 816434ec4a
			
		
	
	
	816434ec4a
	
	
	
		
			
			Pull x86 spinlock changes from Ingo Molnar: "The biggest change here are paravirtualized ticket spinlocks (PV spinlocks), which bring a nice speedup on various benchmarks. The KVM host side will come to you via the KVM tree" * 'x86-spinlocks-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/kvm/guest: Fix sparse warning: "symbol 'klock_waiting' was not declared as static" kvm: Paravirtual ticketlocks support for linux guests running on KVM hypervisor kvm guest: Add configuration support to enable debug information for KVM Guests kvm uapi: Add KICK_CPU and PV_UNHALT definition to uapi xen, pvticketlock: Allow interrupts to be enabled while blocking x86, ticketlock: Add slowpath logic jump_label: Split jumplabel ratelimit x86, pvticketlock: When paravirtualizing ticket locks, increment by 2 x86, pvticketlock: Use callee-save for lock_spinning xen, pvticketlocks: Add xen_nopvspin parameter to disable xen pv ticketlocks xen, pvticketlock: Xen implementation for PV ticket locks xen: Defer spinlock setup until boot CPU setup x86, ticketlock: Collapse a layer of functions x86, ticketlock: Don't inline _spin_unlock when using paravirt spinlocks x86, spinlock: Replace pv spinlocks with pv ticketlocks
		
			
				
	
	
		
			140 lines
		
	
	
	
		
			3.3 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			140 lines
		
	
	
	
		
			3.3 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
| #ifndef _ASM_X86_KVM_PARA_H
 | |
| #define _ASM_X86_KVM_PARA_H
 | |
| 
 | |
| #include <asm/processor.h>
 | |
| #include <uapi/asm/kvm_para.h>
 | |
| 
 | |
| extern void kvmclock_init(void);
 | |
| extern int kvm_register_clock(char *txt);
 | |
| 
 | |
| #ifdef CONFIG_KVM_GUEST
 | |
| bool kvm_check_and_clear_guest_paused(void);
 | |
| #else
 | |
| static inline bool kvm_check_and_clear_guest_paused(void)
 | |
| {
 | |
| 	return false;
 | |
| }
 | |
| #endif /* CONFIG_KVM_GUEST */
 | |
| 
 | |
| /* This instruction is vmcall.  On non-VT architectures, it will generate a
 | |
|  * trap that we will then rewrite to the appropriate instruction.
 | |
|  */
 | |
| #define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1"
 | |
| 
 | |
| /* For KVM hypercalls, a three-byte sequence of either the vmcall or the vmmcall
 | |
|  * instruction.  The hypervisor may replace it with something else but only the
 | |
|  * instructions are guaranteed to be supported.
 | |
|  *
 | |
|  * Up to four arguments may be passed in rbx, rcx, rdx, and rsi respectively.
 | |
|  * The hypercall number should be placed in rax and the return value will be
 | |
|  * placed in rax.  No other registers will be clobbered unless explicitly
 | |
|  * noted by the particular hypercall.
 | |
|  */
 | |
| 
 | |
| static inline long kvm_hypercall0(unsigned int nr)
 | |
| {
 | |
| 	long ret;
 | |
| 	asm volatile(KVM_HYPERCALL
 | |
| 		     : "=a"(ret)
 | |
| 		     : "a"(nr)
 | |
| 		     : "memory");
 | |
| 	return ret;
 | |
| }
 | |
| 
 | |
| static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
 | |
| {
 | |
| 	long ret;
 | |
| 	asm volatile(KVM_HYPERCALL
 | |
| 		     : "=a"(ret)
 | |
| 		     : "a"(nr), "b"(p1)
 | |
| 		     : "memory");
 | |
| 	return ret;
 | |
| }
 | |
| 
 | |
| static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
 | |
| 				  unsigned long p2)
 | |
| {
 | |
| 	long ret;
 | |
| 	asm volatile(KVM_HYPERCALL
 | |
| 		     : "=a"(ret)
 | |
| 		     : "a"(nr), "b"(p1), "c"(p2)
 | |
| 		     : "memory");
 | |
| 	return ret;
 | |
| }
 | |
| 
 | |
| static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
 | |
| 				  unsigned long p2, unsigned long p3)
 | |
| {
 | |
| 	long ret;
 | |
| 	asm volatile(KVM_HYPERCALL
 | |
| 		     : "=a"(ret)
 | |
| 		     : "a"(nr), "b"(p1), "c"(p2), "d"(p3)
 | |
| 		     : "memory");
 | |
| 	return ret;
 | |
| }
 | |
| 
 | |
| static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
 | |
| 				  unsigned long p2, unsigned long p3,
 | |
| 				  unsigned long p4)
 | |
| {
 | |
| 	long ret;
 | |
| 	asm volatile(KVM_HYPERCALL
 | |
| 		     : "=a"(ret)
 | |
| 		     : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4)
 | |
| 		     : "memory");
 | |
| 	return ret;
 | |
| }
 | |
| 
 | |
| static inline uint32_t kvm_cpuid_base(void)
 | |
| {
 | |
| 	if (boot_cpu_data.cpuid_level < 0)
 | |
| 		return 0;	/* So we don't blow up on old processors */
 | |
| 
 | |
| 	if (cpu_has_hypervisor)
 | |
| 		return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
 | |
| 
 | |
| 	return 0;
 | |
| }
 | |
| 
 | |
| static inline bool kvm_para_available(void)
 | |
| {
 | |
| 	return kvm_cpuid_base() != 0;
 | |
| }
 | |
| 
 | |
| static inline unsigned int kvm_arch_para_features(void)
 | |
| {
 | |
| 	return cpuid_eax(KVM_CPUID_FEATURES);
 | |
| }
 | |
| 
 | |
| #ifdef CONFIG_KVM_GUEST
 | |
| void __init kvm_guest_init(void);
 | |
| void kvm_async_pf_task_wait(u32 token);
 | |
| void kvm_async_pf_task_wake(u32 token);
 | |
| u32 kvm_read_and_reset_pf_reason(void);
 | |
| extern void kvm_disable_steal_time(void);
 | |
| 
 | |
| #ifdef CONFIG_PARAVIRT_SPINLOCKS
 | |
| void __init kvm_spinlock_init(void);
 | |
| #else /* !CONFIG_PARAVIRT_SPINLOCKS */
 | |
| static inline void kvm_spinlock_init(void)
 | |
| {
 | |
| }
 | |
| #endif /* CONFIG_PARAVIRT_SPINLOCKS */
 | |
| 
 | |
| #else /* CONFIG_KVM_GUEST */
 | |
| #define kvm_guest_init() do {} while (0)
 | |
| #define kvm_async_pf_task_wait(T) do {} while(0)
 | |
| #define kvm_async_pf_task_wake(T) do {} while(0)
 | |
| 
 | |
| static inline u32 kvm_read_and_reset_pf_reason(void)
 | |
| {
 | |
| 	return 0;
 | |
| }
 | |
| 
 | |
| static inline void kvm_disable_steal_time(void)
 | |
| {
 | |
| 	return;
 | |
| }
 | |
| #endif
 | |
| 
 | |
| #endif /* _ASM_X86_KVM_PARA_H */
 |