workqueue: use %current instead of worker->task in worker_maybe_bind_and_lock()
worker_maybe_bind_and_lock() uses both @worker->task and @current at the same time. As worker_maybe_bind_and_lock() can only be called by the current worker task, they are always the same. Update worker_maybe_bind_and_lock() to use %current consistently. This doesn't introduce any functional change. tj: Massaged the description. Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
		
					parent
					
						
							
								45d9550a0e
							
						
					
				
			
			
				commit
				
					
						f5faa0774e
					
				
			
		
					 1 changed files with 3 additions and 4 deletions
				
			
		| 
						 | 
					@ -1512,7 +1512,7 @@ static void worker_leave_idle(struct worker *worker)
 | 
				
			||||||
 * flushed from cpu callbacks while cpu is going down, they are
 | 
					 * flushed from cpu callbacks while cpu is going down, they are
 | 
				
			||||||
 * guaranteed to execute on the cpu.
 | 
					 * guaranteed to execute on the cpu.
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * This function is to be used by rogue workers and rescuers to bind
 | 
					 * This function is to be used by unbound workers and rescuers to bind
 | 
				
			||||||
 * themselves to the target cpu and may race with cpu going down or
 | 
					 * themselves to the target cpu and may race with cpu going down or
 | 
				
			||||||
 * coming online.  kthread_bind() can't be used because it may put the
 | 
					 * coming online.  kthread_bind() can't be used because it may put the
 | 
				
			||||||
 * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
 | 
					 * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
 | 
				
			||||||
| 
						 | 
					@ -1537,7 +1537,6 @@ static bool worker_maybe_bind_and_lock(struct worker *worker)
 | 
				
			||||||
__acquires(&pool->lock)
 | 
					__acquires(&pool->lock)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct worker_pool *pool = worker->pool;
 | 
						struct worker_pool *pool = worker->pool;
 | 
				
			||||||
	struct task_struct *task = worker->task;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	while (true) {
 | 
						while (true) {
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
| 
						 | 
					@ -1547,12 +1546,12 @@ __acquires(&pool->lock)
 | 
				
			||||||
		 * against POOL_DISASSOCIATED.
 | 
							 * against POOL_DISASSOCIATED.
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		if (!(pool->flags & POOL_DISASSOCIATED))
 | 
							if (!(pool->flags & POOL_DISASSOCIATED))
 | 
				
			||||||
			set_cpus_allowed_ptr(task, get_cpu_mask(pool->cpu));
 | 
								set_cpus_allowed_ptr(current, get_cpu_mask(pool->cpu));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		spin_lock_irq(&pool->lock);
 | 
							spin_lock_irq(&pool->lock);
 | 
				
			||||||
		if (pool->flags & POOL_DISASSOCIATED)
 | 
							if (pool->flags & POOL_DISASSOCIATED)
 | 
				
			||||||
			return false;
 | 
								return false;
 | 
				
			||||||
		if (task_cpu(task) == pool->cpu &&
 | 
							if (task_cpu(current) == pool->cpu &&
 | 
				
			||||||
		    cpumask_equal(¤t->cpus_allowed,
 | 
							    cpumask_equal(¤t->cpus_allowed,
 | 
				
			||||||
				  get_cpu_mask(pool->cpu)))
 | 
									  get_cpu_mask(pool->cpu)))
 | 
				
			||||||
			return true;
 | 
								return true;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue