Revert "swiotlb: add a IO_TLB_SIZE define"
This reverts commit 22163a8ec8.
Fixes the ABI issues in 5.10.35 that at the moment, we can't handle due
to the KABI freeze. These are not patches that mean much for android
systems, and will be reverted the next KABI "reset" point.
Bug: 161946584
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Ie256b7f1ed5b19f4600ff4c3681efaea5046adda
This commit is contained in:
parent
78957dcb2c
commit
17ba7dfe20
2 changed files with 6 additions and 7 deletions
|
|
@ -29,7 +29,6 @@ enum swiotlb_force {
|
|||
* controllable.
|
||||
*/
|
||||
#define IO_TLB_SHIFT 11
|
||||
#define IO_TLB_SIZE (1 << IO_TLB_SHIFT)
|
||||
|
||||
extern void swiotlb_init(int verbose);
|
||||
int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
|
||||
|
|
|
|||
|
|
@ -475,20 +475,20 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t orig_addr,
|
|||
|
||||
tbl_dma_addr &= mask;
|
||||
|
||||
offset_slots = ALIGN(tbl_dma_addr, IO_TLB_SIZE) >> IO_TLB_SHIFT;
|
||||
offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
|
||||
|
||||
/*
|
||||
* Carefully handle integer overflow which can occur when mask == ~0UL.
|
||||
*/
|
||||
max_slots = mask + 1
|
||||
? ALIGN(mask + 1, IO_TLB_SIZE) >> IO_TLB_SHIFT
|
||||
? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
|
||||
: 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
|
||||
|
||||
/*
|
||||
* For mappings greater than or equal to a page, we limit the stride
|
||||
* (and hence alignment) to a page size.
|
||||
*/
|
||||
nslots = ALIGN(alloc_size, IO_TLB_SIZE) >> IO_TLB_SHIFT;
|
||||
nslots = ALIGN(alloc_size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
|
||||
if (alloc_size >= PAGE_SIZE)
|
||||
stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
|
||||
else
|
||||
|
|
@ -582,7 +582,7 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
|
|||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
unsigned long flags;
|
||||
int i, count, nslots = ALIGN(alloc_size, IO_TLB_SIZE) >> IO_TLB_SHIFT;
|
||||
int i, count, nslots = ALIGN(alloc_size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
|
||||
int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
|
||||
phys_addr_t orig_addr = io_tlb_orig_addr[index];
|
||||
|
||||
|
|
@ -633,7 +633,7 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
|
|||
|
||||
if (orig_addr == INVALID_PHYS_ADDR)
|
||||
return;
|
||||
orig_addr += (unsigned long)tlb_addr & (IO_TLB_SIZE - 1);
|
||||
orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1);
|
||||
|
||||
switch (target) {
|
||||
case SYNC_FOR_CPU:
|
||||
|
|
@ -691,7 +691,7 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
|
|||
|
||||
size_t swiotlb_max_mapping_size(struct device *dev)
|
||||
{
|
||||
return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE;
|
||||
return ((size_t)1 << IO_TLB_SHIFT) * IO_TLB_SEGSIZE;
|
||||
}
|
||||
|
||||
bool is_swiotlb_active(void)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue