With various drivers wanting to inject idle time; we get people calling idle routines outside of the idle loop proper. Therefore we need to be extra careful about not missing TIF_NEED_RESCHED -> PREEMPT_NEED_RESCHED propagations. While looking at this, I also realized there's a small window in the existing idle loop where we can miss TIF_NEED_RESCHED; when it hits right after the tif_need_resched() test at the end of the loop but right before the need_resched() test at the start of the loop. So move preempt_fold_need_resched() out of the loop where we're guaranteed to have TIF_NEED_RESCHED set. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/n/tip-x9jgh45oeayzajz2mjt0y7d6@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
		
			
				
	
	
		
			59 lines
		
	
	
	
		
			1.6 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			59 lines
		
	
	
	
		
			1.6 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
#ifndef _ASM_X86_MWAIT_H
 | 
						|
#define _ASM_X86_MWAIT_H
 | 
						|
 | 
						|
#include <linux/sched.h>
 | 
						|
 | 
						|
#define MWAIT_SUBSTATE_MASK		0xf
 | 
						|
#define MWAIT_CSTATE_MASK		0xf
 | 
						|
#define MWAIT_SUBSTATE_SIZE		4
 | 
						|
#define MWAIT_HINT2CSTATE(hint)		(((hint) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK)
 | 
						|
#define MWAIT_HINT2SUBSTATE(hint)	((hint) & MWAIT_CSTATE_MASK)
 | 
						|
 | 
						|
#define CPUID_MWAIT_LEAF		5
 | 
						|
#define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1
 | 
						|
#define CPUID5_ECX_INTERRUPT_BREAK	0x2
 | 
						|
 | 
						|
#define MWAIT_ECX_INTERRUPT_BREAK	0x1
 | 
						|
 | 
						|
static inline void __monitor(const void *eax, unsigned long ecx,
 | 
						|
			     unsigned long edx)
 | 
						|
{
 | 
						|
	/* "monitor %eax, %ecx, %edx;" */
 | 
						|
	asm volatile(".byte 0x0f, 0x01, 0xc8;"
 | 
						|
		     :: "a" (eax), "c" (ecx), "d"(edx));
 | 
						|
}
 | 
						|
 | 
						|
static inline void __mwait(unsigned long eax, unsigned long ecx)
 | 
						|
{
 | 
						|
	/* "mwait %eax, %ecx;" */
 | 
						|
	asm volatile(".byte 0x0f, 0x01, 0xc9;"
 | 
						|
		     :: "a" (eax), "c" (ecx));
 | 
						|
}
 | 
						|
 | 
						|
/*
 | 
						|
 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
 | 
						|
 * which can obviate IPI to trigger checking of need_resched.
 | 
						|
 * We execute MONITOR against need_resched and enter optimized wait state
 | 
						|
 * through MWAIT. Whenever someone changes need_resched, we would be woken
 | 
						|
 * up from MWAIT (without an IPI).
 | 
						|
 *
 | 
						|
 * New with Core Duo processors, MWAIT can take some hints based on CPU
 | 
						|
 * capability.
 | 
						|
 */
 | 
						|
static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
 | 
						|
{
 | 
						|
	if (!current_set_polling_and_test()) {
 | 
						|
		if (static_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) {
 | 
						|
			mb();
 | 
						|
			clflush((void *)¤t_thread_info()->flags);
 | 
						|
			mb();
 | 
						|
		}
 | 
						|
 | 
						|
		__monitor((void *)¤t_thread_info()->flags, 0, 0);
 | 
						|
		if (!need_resched())
 | 
						|
			__mwait(eax, ecx);
 | 
						|
	}
 | 
						|
	current_clr_polling();
 | 
						|
}
 | 
						|
 | 
						|
#endif /* _ASM_X86_MWAIT_H */
 |