 f233f7f158
			
		
	
	
	f233f7f158
	
	
	
		
			
			We use the regular paravirt call patching to switch between: native_queued_spin_lock_slowpath() __pv_queued_spin_lock_slowpath() native_queued_spin_unlock() __pv_queued_spin_unlock() We use a callee saved call for the unlock function which reduces the i-cache footprint and allows 'inlining' of SPIN_UNLOCK functions again. We further optimize the unlock path by patching the direct call with a "movb $0,%arg1" if we are indeed using the native unlock code. This makes the unlock code almost as fast as the !PARAVIRT case. This significantly lowers the overhead of having CONFIG_PARAVIRT_SPINLOCKS enabled, even for native code. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Waiman Long <Waiman.Long@hp.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Daniel J Blueman <daniel@numascale.com> Cc: David Vrabel <david.vrabel@citrix.com> Cc: Douglas Hatch <doug.hatch@hp.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Paolo Bonzini <paolo.bonzini@gmail.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> Cc: Rik van Riel <riel@redhat.com> Cc: Scott J Norton <scott.norton@hp.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: virtualization@lists.linux-foundation.org Cc: xen-devel@lists.xenproject.org Link: http://lkml.kernel.org/r/1429901803-29771-10-git-send-email-Waiman.Long@hp.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
		
			
				
	
	
		
			75 lines
		
	
	
	
		
			2.2 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			75 lines
		
	
	
	
		
			2.2 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
| #include <asm/paravirt.h>
 | |
| 
 | |
| DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
 | |
| DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
 | |
| DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf");
 | |
| DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax");
 | |
| DEF_NATIVE(pv_cpu_ops, iret, "iret");
 | |
| DEF_NATIVE(pv_cpu_ops, irq_enable_sysexit, "sti; sysexit");
 | |
| DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax");
 | |
| DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3");
 | |
| DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
 | |
| DEF_NATIVE(pv_cpu_ops, clts, "clts");
 | |
| DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc");
 | |
| 
 | |
| #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
 | |
| DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%eax)");
 | |
| #endif
 | |
| 
 | |
| unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
 | |
| {
 | |
| 	/* arg in %eax, return in %eax */
 | |
| 	return 0;
 | |
| }
 | |
| 
 | |
| unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
 | |
| {
 | |
| 	/* arg in %edx:%eax, return in %edx:%eax */
 | |
| 	return 0;
 | |
| }
 | |
| 
 | |
| extern bool pv_is_native_spin_unlock(void);
 | |
| 
 | |
| unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
 | |
| 		      unsigned long addr, unsigned len)
 | |
| {
 | |
| 	const unsigned char *start, *end;
 | |
| 	unsigned ret;
 | |
| 
 | |
| #define PATCH_SITE(ops, x)					\
 | |
| 		case PARAVIRT_PATCH(ops.x):			\
 | |
| 			start = start_##ops##_##x;		\
 | |
| 			end = end_##ops##_##x;			\
 | |
| 			goto patch_site
 | |
| 	switch (type) {
 | |
| 		PATCH_SITE(pv_irq_ops, irq_disable);
 | |
| 		PATCH_SITE(pv_irq_ops, irq_enable);
 | |
| 		PATCH_SITE(pv_irq_ops, restore_fl);
 | |
| 		PATCH_SITE(pv_irq_ops, save_fl);
 | |
| 		PATCH_SITE(pv_cpu_ops, iret);
 | |
| 		PATCH_SITE(pv_cpu_ops, irq_enable_sysexit);
 | |
| 		PATCH_SITE(pv_mmu_ops, read_cr2);
 | |
| 		PATCH_SITE(pv_mmu_ops, read_cr3);
 | |
| 		PATCH_SITE(pv_mmu_ops, write_cr3);
 | |
| 		PATCH_SITE(pv_cpu_ops, clts);
 | |
| 		PATCH_SITE(pv_cpu_ops, read_tsc);
 | |
| #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
 | |
| 		case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
 | |
| 			if (pv_is_native_spin_unlock()) {
 | |
| 				start = start_pv_lock_ops_queued_spin_unlock;
 | |
| 				end   = end_pv_lock_ops_queued_spin_unlock;
 | |
| 				goto patch_site;
 | |
| 			}
 | |
| #endif
 | |
| 
 | |
| 	default:
 | |
| 		ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
 | |
| 		break;
 | |
| 
 | |
| patch_site:
 | |
| 		ret = paravirt_patch_insns(ibuf, len, start, end);
 | |
| 		break;
 | |
| 	}
 | |
| #undef PATCH_SITE
 | |
| 	return ret;
 | |
| }
 |