From 8011eb22150e5bb6eb1834ce9913f36081d48ab5 Mon Sep 17 00:00:00 2001 From: Charan Teja Reddy Date: Fri, 25 Jun 2021 12:30:50 +0530 Subject: [PATCH] ANDROID: mm: provision to add shmem pages to inactive file lru head Commit 9975da5f43bb ("ANDROID: mm: allow fast reclaim of shmem pages") allows pages to add only to the tail of the inactive file lru for faster reclaims. Extend the same to allow the pages added to the head of the inactive file lru too. This will enable users to selectively add the shmem file pages to head or tail of the inactive file lru. Bug: 187798288 Change-Id: Icf167e1e3ea68257291478e1f16de678ecbf6320 Signed-off-by: Charan Teja Reddy --- include/linux/shmem_fs.h | 2 +- include/linux/swap.h | 2 +- mm/shmem.c | 4 ++-- mm/swap.c | 11 ++++++++--- 4 files changed, 12 insertions(+), 7 deletions(-) diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 51b1dcfb5022..1bf2615449fe 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -85,7 +85,7 @@ extern bool shmem_huge_enabled(struct vm_area_struct *vma); extern unsigned long shmem_swap_usage(struct vm_area_struct *vma); extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, pgoff_t start, pgoff_t end); -extern void shmem_mark_page_lazyfree(struct page *page); +extern void shmem_mark_page_lazyfree(struct page *page, bool tail); /* Flag allocation requirements to shmem_getpage */ enum sgp_type { diff --git a/include/linux/swap.h b/include/linux/swap.h index f750b27773ea..beda0a50d0b9 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -353,7 +353,7 @@ extern void rotate_reclaimable_page(struct page *page); extern void deactivate_file_page(struct page *page); extern void deactivate_page(struct page *page); extern void mark_page_lazyfree(struct page *page); -extern void mark_page_lazyfree_movetail(struct page *page); +extern void mark_page_lazyfree_movetail(struct page *page, bool tail); extern void swap_setup(void); extern void __lru_cache_add_inactive_or_unevictable(struct page *page, diff --git a/mm/shmem.c b/mm/shmem.c index efcffd4836c3..604c6d89d243 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -4295,9 +4295,9 @@ struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, } EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); -void shmem_mark_page_lazyfree(struct page *page) +void shmem_mark_page_lazyfree(struct page *page, bool tail) { - mark_page_lazyfree_movetail(page); + mark_page_lazyfree_movetail(page, tail); } EXPORT_SYMBOL_GPL(shmem_mark_page_lazyfree); diff --git a/mm/swap.c b/mm/swap.c index 09923999ddc2..8d5c61de5a6e 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -634,6 +634,8 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec, static void lru_lazyfree_movetail_fn(struct page *page, struct lruvec *lruvec, void *arg) { + bool *add_to_tail = (bool *)arg; + if (PageLRU(page) && !PageUnevictable(page) && PageSwapBacked(page) && !PageSwapCache(page)) { bool active = PageActive(page); @@ -642,7 +644,10 @@ static void lru_lazyfree_movetail_fn(struct page *page, struct lruvec *lruvec, LRU_INACTIVE_ANON + active); ClearPageActive(page); ClearPageReferenced(page); - add_page_to_lru_list_tail(page, lruvec, LRU_INACTIVE_FILE); + if (add_to_tail && *add_to_tail) + add_page_to_lru_list_tail(page, lruvec, LRU_INACTIVE_FILE); + else + add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE); } } @@ -769,7 +774,7 @@ void mark_page_lazyfree(struct page *page) * mark_page_lazyfree_movetail() moves @page to the tail of inactive file list. * This is done to accelerate the reclaim of @page. */ -void mark_page_lazyfree_movetail(struct page *page) +void mark_page_lazyfree_movetail(struct page *page, bool tail) { if (PageLRU(page) && !PageUnevictable(page) && PageSwapBacked(page) && !PageSwapCache(page)) { @@ -780,7 +785,7 @@ void mark_page_lazyfree_movetail(struct page *page) get_page(page); if (pagevec_add_and_need_flush(pvec, page)) pagevec_lru_move_fn(pvec, - lru_lazyfree_movetail_fn, NULL); + lru_lazyfree_movetail_fn, &tail); local_unlock(&lru_pvecs.lock); } }