locking/mutexes: Add extra reschedule point
Add in an extra reschedule in an attempt to avoid getting reschedule the moment we've acquired the lock. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/n/tip-zah5eyn9gu7qlgwh9r6n2anc@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
		
					parent
					
						
							
								fb0527bd5e
							
						
					
				
			
			
				commit
				
					
						34c6bc2c91
					
				
			
		
					 1 changed files with 7 additions and 0 deletions
				
			
		|  | @ -468,6 +468,13 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | ||||||
| 	} | 	} | ||||||
| 	osq_unlock(&lock->osq); | 	osq_unlock(&lock->osq); | ||||||
| slowpath: | slowpath: | ||||||
|  | 	/*
 | ||||||
|  | 	 * If we fell out of the spin path because of need_resched(), | ||||||
|  | 	 * reschedule now, before we try-lock the mutex. This avoids getting | ||||||
|  | 	 * scheduled out right after we obtained the mutex. | ||||||
|  | 	 */ | ||||||
|  | 	if (need_resched()) | ||||||
|  | 		schedule_preempt_disabled(); | ||||||
| #endif | #endif | ||||||
| 	spin_lock_mutex(&lock->wait_lock, flags); | 	spin_lock_mutex(&lock->wait_lock, flags); | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 Peter Zijlstra
				Peter Zijlstra