Revert "swiotlb: factor out a nr_slots helper"

This reverts commit 1f2ef5a0f7.

Fixes the ABI issues in 5.10.35 that at the moment, we can't handle due
to the KABI freeze.  These are not patches that mean much for android
systems, and will be reverted the next KABI "reset" point.

Bug: 161946584
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I461f44ec54102dcf06ba73959a79172615cdb8fa
This commit is contained in:
Greg Kroah-Hartman 2021-05-08 13:01:02 +02:00
commit cb27079661

View file

@ -178,11 +178,6 @@ static inline unsigned long io_tlb_offset(unsigned long val)
return val & (IO_TLB_SEGSIZE - 1);
}
static inline unsigned long nr_slots(u64 val)
{
return DIV_ROUND_UP(val, IO_TLB_SIZE);
}
/*
* Early SWIOTLB allocation may be too early to allow an architecture to
* perform the desired operations. This function allows the architecture to
@ -482,20 +477,20 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t orig_addr,
tbl_dma_addr &= mask;
offset_slots = nr_slots(tbl_dma_addr);
offset_slots = ALIGN(tbl_dma_addr, IO_TLB_SIZE) >> IO_TLB_SHIFT;
/*
* Carefully handle integer overflow which can occur when mask == ~0UL.
*/
max_slots = mask + 1
? nr_slots(mask + 1)
? ALIGN(mask + 1, IO_TLB_SIZE) >> IO_TLB_SHIFT
: 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
/*
* For mappings greater than or equal to a page, we limit the stride
* (and hence alignment) to a page size.
*/
nslots = nr_slots(alloc_size);
nslots = ALIGN(alloc_size, IO_TLB_SIZE) >> IO_TLB_SHIFT;
if (alloc_size >= PAGE_SIZE)
stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
else
@ -591,7 +586,7 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
enum dma_data_direction dir, unsigned long attrs)
{
unsigned long flags;
int i, count, nslots = nr_slots(alloc_size);
int i, count, nslots = ALIGN(alloc_size, IO_TLB_SIZE) >> IO_TLB_SHIFT;
int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
phys_addr_t orig_addr = io_tlb_orig_addr[index];