mm: clean up mm_counter
Presently, per-mm statistics counter is defined by macro in sched.h This patch modifies it to - defined in mm.h as inlinf functions - use array instead of macro's name creation. This patch is for reducing patch size in future patch to modify implementation of per-mm counter. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Lee Schermerhorn <lee.schermerhorn@hp.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
19b629f581
commit
d559db086f
12 changed files with 174 additions and 101 deletions
|
@ -870,6 +870,110 @@ extern int mprotect_fixup(struct vm_area_struct *vma,
|
|||
*/
|
||||
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
struct page **pages);
|
||||
/*
|
||||
* per-process(per-mm_struct) statistics.
|
||||
*/
|
||||
#if USE_SPLIT_PTLOCKS
|
||||
/*
|
||||
* The mm counters are not protected by its page_table_lock,
|
||||
* so must be incremented atomically.
|
||||
*/
|
||||
static inline void set_mm_counter(struct mm_struct *mm, int member, long value)
|
||||
{
|
||||
atomic_long_set(&mm->rss_stat.count[member], value);
|
||||
}
|
||||
|
||||
static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
|
||||
{
|
||||
return (unsigned long)atomic_long_read(&mm->rss_stat.count[member]);
|
||||
}
|
||||
|
||||
static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
|
||||
{
|
||||
atomic_long_add(value, &mm->rss_stat.count[member]);
|
||||
}
|
||||
|
||||
static inline void inc_mm_counter(struct mm_struct *mm, int member)
|
||||
{
|
||||
atomic_long_inc(&mm->rss_stat.count[member]);
|
||||
}
|
||||
|
||||
static inline void dec_mm_counter(struct mm_struct *mm, int member)
|
||||
{
|
||||
atomic_long_dec(&mm->rss_stat.count[member]);
|
||||
}
|
||||
|
||||
#else /* !USE_SPLIT_PTLOCKS */
|
||||
/*
|
||||
* The mm counters are protected by its page_table_lock,
|
||||
* so can be incremented directly.
|
||||
*/
|
||||
static inline void set_mm_counter(struct mm_struct *mm, int member, long value)
|
||||
{
|
||||
mm->rss_stat.count[member] = value;
|
||||
}
|
||||
|
||||
static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
|
||||
{
|
||||
return mm->rss_stat.count[member];
|
||||
}
|
||||
|
||||
static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
|
||||
{
|
||||
mm->rss_stat.count[member] += value;
|
||||
}
|
||||
|
||||
static inline void inc_mm_counter(struct mm_struct *mm, int member)
|
||||
{
|
||||
mm->rss_stat.count[member]++;
|
||||
}
|
||||
|
||||
static inline void dec_mm_counter(struct mm_struct *mm, int member)
|
||||
{
|
||||
mm->rss_stat.count[member]--;
|
||||
}
|
||||
|
||||
#endif /* !USE_SPLIT_PTLOCKS */
|
||||
|
||||
static inline unsigned long get_mm_rss(struct mm_struct *mm)
|
||||
{
|
||||
return get_mm_counter(mm, MM_FILEPAGES) +
|
||||
get_mm_counter(mm, MM_ANONPAGES);
|
||||
}
|
||||
|
||||
static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
|
||||
{
|
||||
return max(mm->hiwater_rss, get_mm_rss(mm));
|
||||
}
|
||||
|
||||
static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
|
||||
{
|
||||
return max(mm->hiwater_vm, mm->total_vm);
|
||||
}
|
||||
|
||||
static inline void update_hiwater_rss(struct mm_struct *mm)
|
||||
{
|
||||
unsigned long _rss = get_mm_rss(mm);
|
||||
|
||||
if ((mm)->hiwater_rss < _rss)
|
||||
(mm)->hiwater_rss = _rss;
|
||||
}
|
||||
|
||||
static inline void update_hiwater_vm(struct mm_struct *mm)
|
||||
{
|
||||
if (mm->hiwater_vm < mm->total_vm)
|
||||
mm->hiwater_vm = mm->total_vm;
|
||||
}
|
||||
|
||||
static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
|
||||
|
||||
if (*maxrss < hiwater_rss)
|
||||
*maxrss = hiwater_rss;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* A callback you can register to apply pressure to ageable caches.
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue