mm: memcontrol: take a css reference for each charged page
Charges currently pin the css indirectly by playing tricks during css_offline(): user pages stall the offlining process until all of them have been reparented, whereas kmemcg acquires a keep-alive reference if outstanding kernel pages are detected at that point. In preparation for removing all this complexity, make the pinning explicit and acquire a css references for every charged page. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Vladimir Davydov <vdavydov@parallels.com> Acked-by: Michal Hocko <mhocko@suse.cz> Cc: David Rientjes <rientjes@google.com> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
					parent
					
						
							
								5ac8fb31ad
							
						
					
				
			
			
				commit
				
					
						e8ea14cc6e
					
				
			
		
					 3 changed files with 92 additions and 24 deletions
				
			
		| 
						 | 
				
			
			@ -146,6 +146,29 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref,
 | 
			
		|||
	return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * percpu_ref_get_many - increment a percpu refcount
 | 
			
		||||
 * @ref: percpu_ref to get
 | 
			
		||||
 * @nr: number of references to get
 | 
			
		||||
 *
 | 
			
		||||
 * Analogous to atomic_long_add().
 | 
			
		||||
 *
 | 
			
		||||
 * This function is safe to call as long as @ref is between init and exit.
 | 
			
		||||
 */
 | 
			
		||||
static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long __percpu *percpu_count;
 | 
			
		||||
 | 
			
		||||
	rcu_read_lock_sched();
 | 
			
		||||
 | 
			
		||||
	if (__ref_is_percpu(ref, &percpu_count))
 | 
			
		||||
		this_cpu_add(*percpu_count, nr);
 | 
			
		||||
	else
 | 
			
		||||
		atomic_long_add(nr, &ref->count);
 | 
			
		||||
 | 
			
		||||
	rcu_read_unlock_sched();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * percpu_ref_get - increment a percpu refcount
 | 
			
		||||
 * @ref: percpu_ref to get
 | 
			
		||||
| 
						 | 
				
			
			@ -156,16 +179,7 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref,
 | 
			
		|||
 */
 | 
			
		||||
static inline void percpu_ref_get(struct percpu_ref *ref)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long __percpu *percpu_count;
 | 
			
		||||
 | 
			
		||||
	rcu_read_lock_sched();
 | 
			
		||||
 | 
			
		||||
	if (__ref_is_percpu(ref, &percpu_count))
 | 
			
		||||
		this_cpu_inc(*percpu_count);
 | 
			
		||||
	else
 | 
			
		||||
		atomic_long_inc(&ref->count);
 | 
			
		||||
 | 
			
		||||
	rcu_read_unlock_sched();
 | 
			
		||||
	percpu_ref_get_many(ref, 1);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
| 
						 | 
				
			
			@ -230,6 +244,30 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
 | 
			
		|||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * percpu_ref_put_many - decrement a percpu refcount
 | 
			
		||||
 * @ref: percpu_ref to put
 | 
			
		||||
 * @nr: number of references to put
 | 
			
		||||
 *
 | 
			
		||||
 * Decrement the refcount, and if 0, call the release function (which was passed
 | 
			
		||||
 * to percpu_ref_init())
 | 
			
		||||
 *
 | 
			
		||||
 * This function is safe to call as long as @ref is between init and exit.
 | 
			
		||||
 */
 | 
			
		||||
static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long __percpu *percpu_count;
 | 
			
		||||
 | 
			
		||||
	rcu_read_lock_sched();
 | 
			
		||||
 | 
			
		||||
	if (__ref_is_percpu(ref, &percpu_count))
 | 
			
		||||
		this_cpu_sub(*percpu_count, nr);
 | 
			
		||||
	else if (unlikely(atomic_long_sub_and_test(nr, &ref->count)))
 | 
			
		||||
		ref->release(ref);
 | 
			
		||||
 | 
			
		||||
	rcu_read_unlock_sched();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * percpu_ref_put - decrement a percpu refcount
 | 
			
		||||
 * @ref: percpu_ref to put
 | 
			
		||||
| 
						 | 
				
			
			@ -241,16 +279,7 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
 | 
			
		|||
 */
 | 
			
		||||
static inline void percpu_ref_put(struct percpu_ref *ref)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long __percpu *percpu_count;
 | 
			
		||||
 | 
			
		||||
	rcu_read_lock_sched();
 | 
			
		||||
 | 
			
		||||
	if (__ref_is_percpu(ref, &percpu_count))
 | 
			
		||||
		this_cpu_dec(*percpu_count);
 | 
			
		||||
	else if (unlikely(atomic_long_dec_and_test(&ref->count)))
 | 
			
		||||
		ref->release(ref);
 | 
			
		||||
 | 
			
		||||
	rcu_read_unlock_sched();
 | 
			
		||||
	percpu_ref_put_many(ref, 1);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue