mm: remove CONFIG_UNEVICTABLE_LRU config option
Currently, nobody wants to turn UNEVICTABLE_LRU off. Thus this configurability is unnecessary. Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Andi Kleen <andi@firstfloor.org> Acked-by: Minchan Kim <minchan.kim@gmail.com> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Matt Mackall <mpm@selenic.com> Cc: Rik van Riel <riel@redhat.com> Cc: Lee Schermerhorn <lee.schermerhorn@hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
bce7394a3e
commit
6837765963
17 changed files with 3 additions and 153 deletions
|
@ -83,13 +83,8 @@ enum zone_stat_item {
|
|||
NR_ACTIVE_ANON, /* " " " " " */
|
||||
NR_INACTIVE_FILE, /* " " " " " */
|
||||
NR_ACTIVE_FILE, /* " " " " " */
|
||||
#ifdef CONFIG_UNEVICTABLE_LRU
|
||||
NR_UNEVICTABLE, /* " " " " " */
|
||||
NR_MLOCK, /* mlock()ed pages found and moved off LRU */
|
||||
#else
|
||||
NR_UNEVICTABLE = NR_ACTIVE_FILE, /* avoid compiler errors in dead code */
|
||||
NR_MLOCK = NR_ACTIVE_FILE,
|
||||
#endif
|
||||
NR_ANON_PAGES, /* Mapped anonymous pages */
|
||||
NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
|
||||
only modified from process context */
|
||||
|
@ -132,11 +127,7 @@ enum lru_list {
|
|||
LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
|
||||
LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
|
||||
LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
|
||||
#ifdef CONFIG_UNEVICTABLE_LRU
|
||||
LRU_UNEVICTABLE,
|
||||
#else
|
||||
LRU_UNEVICTABLE = LRU_ACTIVE_FILE, /* avoid compiler errors in dead code */
|
||||
#endif
|
||||
NR_LRU_LISTS
|
||||
};
|
||||
|
||||
|
@ -156,11 +147,7 @@ static inline int is_active_lru(enum lru_list l)
|
|||
|
||||
static inline int is_unevictable_lru(enum lru_list l)
|
||||
{
|
||||
#ifdef CONFIG_UNEVICTABLE_LRU
|
||||
return (l == LRU_UNEVICTABLE);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
enum zone_watermarks {
|
||||
|
|
|
@ -95,9 +95,7 @@ enum pageflags {
|
|||
PG_reclaim, /* To be reclaimed asap */
|
||||
PG_buddy, /* Page is free, on buddy lists */
|
||||
PG_swapbacked, /* Page is backed by RAM/swap */
|
||||
#ifdef CONFIG_UNEVICTABLE_LRU
|
||||
PG_unevictable, /* Page is "unevictable" */
|
||||
#endif
|
||||
#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
|
||||
PG_mlocked, /* Page is vma mlocked */
|
||||
#endif
|
||||
|
@ -248,14 +246,8 @@ PAGEFLAG_FALSE(SwapCache)
|
|||
SETPAGEFLAG_NOOP(SwapCache) CLEARPAGEFLAG_NOOP(SwapCache)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_UNEVICTABLE_LRU
|
||||
PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
|
||||
TESTCLEARFLAG(Unevictable, unevictable)
|
||||
#else
|
||||
PAGEFLAG_FALSE(Unevictable) TESTCLEARFLAG_FALSE(Unevictable)
|
||||
SETPAGEFLAG_NOOP(Unevictable) CLEARPAGEFLAG_NOOP(Unevictable)
|
||||
__CLEARPAGEFLAG_NOOP(Unevictable)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
|
||||
#define MLOCK_PAGES 1
|
||||
|
@ -382,12 +374,6 @@ static inline void __ClearPageTail(struct page *page)
|
|||
|
||||
#endif /* !PAGEFLAGS_EXTENDED */
|
||||
|
||||
#ifdef CONFIG_UNEVICTABLE_LRU
|
||||
#define __PG_UNEVICTABLE (1 << PG_unevictable)
|
||||
#else
|
||||
#define __PG_UNEVICTABLE 0
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
|
||||
#define __PG_MLOCKED (1 << PG_mlocked)
|
||||
#else
|
||||
|
@ -403,7 +389,7 @@ static inline void __ClearPageTail(struct page *page)
|
|||
1 << PG_private | 1 << PG_private_2 | \
|
||||
1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \
|
||||
1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \
|
||||
__PG_UNEVICTABLE | __PG_MLOCKED)
|
||||
1 << PG_unevictable | __PG_MLOCKED)
|
||||
|
||||
/*
|
||||
* Flags checked when a page is prepped for return by the page allocator.
|
||||
|
|
|
@ -22,9 +22,7 @@ enum mapping_flags {
|
|||
AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */
|
||||
AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
|
||||
AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
|
||||
#ifdef CONFIG_UNEVICTABLE_LRU
|
||||
AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
|
||||
#endif
|
||||
};
|
||||
|
||||
static inline void mapping_set_error(struct address_space *mapping, int error)
|
||||
|
@ -37,8 +35,6 @@ static inline void mapping_set_error(struct address_space *mapping, int error)
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_UNEVICTABLE_LRU
|
||||
|
||||
static inline void mapping_set_unevictable(struct address_space *mapping)
|
||||
{
|
||||
set_bit(AS_UNEVICTABLE, &mapping->flags);
|
||||
|
@ -55,14 +51,6 @@ static inline int mapping_unevictable(struct address_space *mapping)
|
|||
return test_bit(AS_UNEVICTABLE, &mapping->flags);
|
||||
return !!mapping;
|
||||
}
|
||||
#else
|
||||
static inline void mapping_set_unevictable(struct address_space *mapping) { }
|
||||
static inline void mapping_clear_unevictable(struct address_space *mapping) { }
|
||||
static inline int mapping_unevictable(struct address_space *mapping)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
|
||||
{
|
||||
|
|
|
@ -105,18 +105,11 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
|
|||
*/
|
||||
int page_mkclean(struct page *);
|
||||
|
||||
#ifdef CONFIG_UNEVICTABLE_LRU
|
||||
/*
|
||||
* called in munlock()/munmap() path to check for other vmas holding
|
||||
* the page mlocked.
|
||||
*/
|
||||
int try_to_munlock(struct page *);
|
||||
#else
|
||||
static inline int try_to_munlock(struct page *page)
|
||||
{
|
||||
return 0; /* a.k.a. SWAP_SUCCESS */
|
||||
}
|
||||
#endif
|
||||
|
||||
#else /* !CONFIG_MMU */
|
||||
|
||||
|
|
|
@ -235,7 +235,6 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_UNEVICTABLE_LRU
|
||||
extern int page_evictable(struct page *page, struct vm_area_struct *vma);
|
||||
extern void scan_mapping_unevictable_pages(struct address_space *);
|
||||
|
||||
|
@ -244,24 +243,6 @@ extern int scan_unevictable_handler(struct ctl_table *, int, struct file *,
|
|||
void __user *, size_t *, loff_t *);
|
||||
extern int scan_unevictable_register_node(struct node *node);
|
||||
extern void scan_unevictable_unregister_node(struct node *node);
|
||||
#else
|
||||
static inline int page_evictable(struct page *page,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline void scan_mapping_unevictable_pages(struct address_space *mapping)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int scan_unevictable_register_node(struct node *node)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void scan_unevictable_unregister_node(struct node *node) { }
|
||||
#endif
|
||||
|
||||
extern int kswapd_run(int nid);
|
||||
|
||||
|
|
|
@ -41,7 +41,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
|
|||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
|
||||
#endif
|
||||
#ifdef CONFIG_UNEVICTABLE_LRU
|
||||
UNEVICTABLE_PGCULLED, /* culled to noreclaim list */
|
||||
UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */
|
||||
UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */
|
||||
|
@ -50,7 +49,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
|
|||
UNEVICTABLE_PGCLEARED, /* on COW, page truncate */
|
||||
UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
|
||||
UNEVICTABLE_MLOCKFREED,
|
||||
#endif
|
||||
NR_VM_EVENT_ITEMS
|
||||
};
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue