workqueue: change argument of worker_maybe_bind_and_lock() to @pool
worker_maybe_bind_and_lock() currently takes @worker but only cares about @worker->pool. This patch updates worker_maybe_bind_and_lock() to take @pool instead of @worker. This will be used to better define synchronization rules regarding rescuer->pool updates. This doesn't introduce any functional change. tj: Updated the comments and description. Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
		
					parent
					
						
							
								f5faa0774e
							
						
					
				
			
			
				commit
				
					
						f36dc67b27
					
				
			
		
					 1 changed files with 9 additions and 9 deletions
				
			
		| 
						 | 
					@ -1504,8 +1504,10 @@ static void worker_leave_idle(struct worker *worker)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock pool
 | 
					 * worker_maybe_bind_and_lock - try to bind %current to worker_pool and lock it
 | 
				
			||||||
 * @worker: self
 | 
					 * @pool: target worker_pool
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Bind %current to the cpu of @pool if it is associated and lock @pool.
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * Works which are scheduled while the cpu is online must at least be
 | 
					 * Works which are scheduled while the cpu is online must at least be
 | 
				
			||||||
 * scheduled to a worker which is bound to the cpu so that if they are
 | 
					 * scheduled to a worker which is bound to the cpu so that if they are
 | 
				
			||||||
| 
						 | 
					@ -1533,11 +1535,9 @@ static void worker_leave_idle(struct worker *worker)
 | 
				
			||||||
 * %true if the associated pool is online (@worker is successfully
 | 
					 * %true if the associated pool is online (@worker is successfully
 | 
				
			||||||
 * bound), %false if offline.
 | 
					 * bound), %false if offline.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static bool worker_maybe_bind_and_lock(struct worker *worker)
 | 
					static bool worker_maybe_bind_and_lock(struct worker_pool *pool)
 | 
				
			||||||
__acquires(&pool->lock)
 | 
					__acquires(&pool->lock)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct worker_pool *pool = worker->pool;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	while (true) {
 | 
						while (true) {
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
		 * The following call may fail, succeed or succeed
 | 
							 * The following call may fail, succeed or succeed
 | 
				
			||||||
| 
						 | 
					@ -1575,7 +1575,7 @@ __acquires(&pool->lock)
 | 
				
			||||||
static void idle_worker_rebind(struct worker *worker)
 | 
					static void idle_worker_rebind(struct worker *worker)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	/* CPU may go down again inbetween, clear UNBOUND only on success */
 | 
						/* CPU may go down again inbetween, clear UNBOUND only on success */
 | 
				
			||||||
	if (worker_maybe_bind_and_lock(worker))
 | 
						if (worker_maybe_bind_and_lock(worker->pool))
 | 
				
			||||||
		worker_clr_flags(worker, WORKER_UNBOUND);
 | 
							worker_clr_flags(worker, WORKER_UNBOUND);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* rebind complete, become available again */
 | 
						/* rebind complete, become available again */
 | 
				
			||||||
| 
						 | 
					@ -1593,7 +1593,7 @@ static void busy_worker_rebind_fn(struct work_struct *work)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct worker *worker = container_of(work, struct worker, rebind_work);
 | 
						struct worker *worker = container_of(work, struct worker, rebind_work);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (worker_maybe_bind_and_lock(worker))
 | 
						if (worker_maybe_bind_and_lock(worker->pool))
 | 
				
			||||||
		worker_clr_flags(worker, WORKER_UNBOUND);
 | 
							worker_clr_flags(worker, WORKER_UNBOUND);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	spin_unlock_irq(&worker->pool->lock);
 | 
						spin_unlock_irq(&worker->pool->lock);
 | 
				
			||||||
| 
						 | 
					@ -2038,7 +2038,7 @@ static bool manage_workers(struct worker *worker)
 | 
				
			||||||
		 * on @pool's current state.  Try it and adjust
 | 
							 * on @pool's current state.  Try it and adjust
 | 
				
			||||||
		 * %WORKER_UNBOUND accordingly.
 | 
							 * %WORKER_UNBOUND accordingly.
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		if (worker_maybe_bind_and_lock(worker))
 | 
							if (worker_maybe_bind_and_lock(pool))
 | 
				
			||||||
			worker->flags &= ~WORKER_UNBOUND;
 | 
								worker->flags &= ~WORKER_UNBOUND;
 | 
				
			||||||
		else
 | 
							else
 | 
				
			||||||
			worker->flags |= WORKER_UNBOUND;
 | 
								worker->flags |= WORKER_UNBOUND;
 | 
				
			||||||
| 
						 | 
					@ -2358,7 +2358,7 @@ repeat:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* migrate to the target cpu if possible */
 | 
							/* migrate to the target cpu if possible */
 | 
				
			||||||
		rescuer->pool = pool;
 | 
							rescuer->pool = pool;
 | 
				
			||||||
		worker_maybe_bind_and_lock(rescuer);
 | 
							worker_maybe_bind_and_lock(pool);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
		 * Slurp in all works issued via this workqueue and
 | 
							 * Slurp in all works issued via this workqueue and
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue