Revert "Revert "mm: fix struct page layout on 32-bit systems""

This reverts commit c34cd7750e.

Bring back the commit in 5.10.38 that broke the kabi.

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I570fc31f9e1f196136bbbef479fb2413011ddf0e
This commit is contained in:
Greg Kroah-Hartman 2021-05-20 15:50:29 +02:00 committed by Todd Kjos
commit 556758235b
3 changed files with 20 additions and 8 deletions

View file

@ -98,10 +98,10 @@ struct page {
};
struct { /* page_pool used by netstack */
/**
* @dma_addr: might require a 64-bit value even on
* @dma_addr: might require a 64-bit value on
* 32-bit architectures.
*/
dma_addr_t dma_addr;
unsigned long dma_addr[2];
};
struct { /* slab, slob and slub */
union {

View file

@ -191,7 +191,17 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
{
return page->dma_addr;
dma_addr_t ret = page->dma_addr[0];
if (sizeof(dma_addr_t) > sizeof(unsigned long))
ret |= (dma_addr_t)page->dma_addr[1] << 16 << 16;
return ret;
}
static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
{
page->dma_addr[0] = addr;
if (sizeof(dma_addr_t) > sizeof(unsigned long))
page->dma_addr[1] = upper_32_bits(addr);
}
static inline bool is_page_pool_compiled_in(void)

View file

@ -172,8 +172,10 @@ static void page_pool_dma_sync_for_device(struct page_pool *pool,
struct page *page,
unsigned int dma_sync_size)
{
dma_addr_t dma_addr = page_pool_get_dma_addr(page);
dma_sync_size = min(dma_sync_size, pool->p.max_len);
dma_sync_single_range_for_device(pool->p.dev, page->dma_addr,
dma_sync_single_range_for_device(pool->p.dev, dma_addr,
pool->p.offset, dma_sync_size,
pool->p.dma_dir);
}
@ -224,7 +226,7 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
put_page(page);
return NULL;
}
page->dma_addr = dma;
page_pool_set_dma_addr(page, dma);
if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
@ -292,13 +294,13 @@ void page_pool_release_page(struct page_pool *pool, struct page *page)
*/
goto skip_dma_unmap;
dma = page->dma_addr;
dma = page_pool_get_dma_addr(page);
/* When page is unmapped, it cannot be returned our pool */
/* When page is unmapped, it cannot be returned to our pool */
dma_unmap_page_attrs(pool->p.dev, dma,
PAGE_SIZE << pool->p.order, pool->p.dma_dir,
DMA_ATTR_SKIP_CPU_SYNC);
page->dma_addr = 0;
page_pool_set_dma_addr(page, 0);
skip_dma_unmap:
/* This may be the last page returned, releasing the pool, so
* it is not safe to reference pool afterwards.