KVM updates for the 3.7 merge window
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.12 (GNU/Linux) iQIcBAABAgAGBQJQbY/2AAoJEI7yEDeUysxlymQQAIv5svpAI/FUe3FhvBi3IW2h WWMIpbdhHyocaINT18qNp8prO0iwoaBfgsnU8zuB34MrbdUgiwSHgM6T4Ff4NGa+ R4u+gpyKYwxNQYKeJyj04luXra/krxwHL1u9OwN7o44JuQXAmzrw2tZ9ad1ArvL3 eoZ6kGsPcdHPZMZWw2jN5xzBsRtqybm0GPPQh1qPXdn8UlPPd1X7owvbaud2y4+e StVIpGY6wrsO36f7UcA4Gm1EP/1E6Lm5KMXJyHgM9WBRkEfp92jTY5+XKv91vK8Z VKUd58QMdZE5NCNBkAR9U5N9aH0oSXnFU/g8hgiwGvrhS3IsSkKUePE6sVyMVTIO VptKRYe0AdmD/g25p6ApJsguV7ITlgoCPaE4rMmRcW9/bw8+iY098r7tO7w11H8M TyFOXihc3B+rlH8WdzOblwxHMC4yRuiPIktaA3WwbX7eA7Xv/ZRtdidifXKtgsVE rtubVqwGyYcHoX1Y+JiByIW1NN0pYncJhPEdc8KbRe2wKs3amA9rio1mUpBYYBPO B0ygcITftyXbhcTtssgcwBDGXB0AAGqI7wqdtJhFeIrKwHXD7fNeAGRwO8oKxmlj 0aPwo9fDtpI+e6BFTohEgjZBocRvXXNWLnDSFB0E7xDR31bACck2FG5FAp1DxdS7 lb/nbAsXf9UJLgGir4I1 =kN6V -----END PGP SIGNATURE----- Merge tag 'kvm-3.7-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull KVM updates from Avi Kivity: "Highlights of the changes for this release include support for vfio level triggered interrupts, improved big real mode support on older Intels, a streamlines guest page table walker, guest APIC speedups, PIO optimizations, better overcommit handling, and read-only memory." * tag 'kvm-3.7-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (138 commits) KVM: s390: Fix vcpu_load handling in interrupt code KVM: x86: Fix guest debug across vcpu INIT reset KVM: Add resampling irqfds for level triggered interrupts KVM: optimize apic interrupt delivery KVM: MMU: Eliminate pointless temporary 'ac' KVM: MMU: Avoid access/dirty update loop if all is well KVM: MMU: Eliminate eperm temporary KVM: MMU: Optimize is_last_gpte() KVM: MMU: Simplify walk_addr_generic() loop KVM: MMU: Optimize pte permission checks KVM: MMU: Update accessed and dirty bits after guest pagetable walk KVM: MMU: Move gpte_access() out of paging_tmpl.h KVM: MMU: Optimize gpte_access() slightly KVM: MMU: Push clean gpte write protection out of gpte_access() KVM: clarify kvmclock documentation KVM: make processes waiting on vcpu mutex killable KVM: SVM: Make use of asm.h KVM: VMX: Make use of asm.h KVM: VMX: Make lto-friendly KVM: x86: lapic: Clean up find_highest_vector() and count_vectors() ... Conflicts: arch/s390/include/asm/processor.h arch/x86/kvm/i8259.c
This commit is contained in:
commit
ecefbd94b8
62 changed files with 3009 additions and 1469 deletions
|
@ -21,6 +21,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/err.h>
|
||||
#include <asm/signal.h>
|
||||
|
||||
#include <linux/kvm.h>
|
||||
|
@ -34,6 +35,13 @@
|
|||
#define KVM_MMIO_SIZE 8
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
|
||||
* in kvm, other bits are visible for userspace which are defined in
|
||||
* include/linux/kvm_h.
|
||||
*/
|
||||
#define KVM_MEMSLOT_INVALID (1UL << 16)
|
||||
|
||||
/*
|
||||
* If we support unaligned MMIO, at most one fragment will be split into two:
|
||||
*/
|
||||
|
@ -48,6 +56,47 @@
|
|||
#define KVM_MAX_MMIO_FRAGMENTS \
|
||||
(KVM_MMIO_SIZE / KVM_USER_MMIO_SIZE + KVM_EXTRA_MMIO_FRAGMENTS)
|
||||
|
||||
/*
|
||||
* For the normal pfn, the highest 12 bits should be zero,
|
||||
* so we can mask these bits to indicate the error.
|
||||
*/
|
||||
#define KVM_PFN_ERR_MASK (0xfffULL << 52)
|
||||
|
||||
#define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK)
|
||||
#define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1)
|
||||
#define KVM_PFN_ERR_BAD (KVM_PFN_ERR_MASK + 2)
|
||||
#define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 3)
|
||||
|
||||
static inline bool is_error_pfn(pfn_t pfn)
|
||||
{
|
||||
return !!(pfn & KVM_PFN_ERR_MASK);
|
||||
}
|
||||
|
||||
static inline bool is_noslot_pfn(pfn_t pfn)
|
||||
{
|
||||
return pfn == KVM_PFN_ERR_BAD;
|
||||
}
|
||||
|
||||
static inline bool is_invalid_pfn(pfn_t pfn)
|
||||
{
|
||||
return !is_noslot_pfn(pfn) && is_error_pfn(pfn);
|
||||
}
|
||||
|
||||
#define KVM_HVA_ERR_BAD (PAGE_OFFSET)
|
||||
#define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE)
|
||||
|
||||
static inline bool kvm_is_error_hva(unsigned long addr)
|
||||
{
|
||||
return addr >= PAGE_OFFSET;
|
||||
}
|
||||
|
||||
#define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT))
|
||||
|
||||
static inline bool is_error_page(struct page *page)
|
||||
{
|
||||
return IS_ERR(page);
|
||||
}
|
||||
|
||||
/*
|
||||
* vcpu->requests bit members
|
||||
*/
|
||||
|
@ -70,7 +119,8 @@
|
|||
#define KVM_REQ_PMU 16
|
||||
#define KVM_REQ_PMI 17
|
||||
|
||||
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
|
||||
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
|
||||
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
|
||||
|
||||
struct kvm;
|
||||
struct kvm_vcpu;
|
||||
|
@ -183,6 +233,18 @@ struct kvm_vcpu {
|
|||
} async_pf;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
|
||||
/*
|
||||
* Cpu relax intercept or pause loop exit optimization
|
||||
* in_spin_loop: set when a vcpu does a pause loop exit
|
||||
* or cpu relax intercepted.
|
||||
* dy_eligible: indicates whether vcpu is eligible for directed yield.
|
||||
*/
|
||||
struct {
|
||||
bool in_spin_loop;
|
||||
bool dy_eligible;
|
||||
} spin_loop;
|
||||
#endif
|
||||
struct kvm_vcpu_arch arch;
|
||||
};
|
||||
|
||||
|
@ -201,7 +263,6 @@ struct kvm_memory_slot {
|
|||
gfn_t base_gfn;
|
||||
unsigned long npages;
|
||||
unsigned long flags;
|
||||
unsigned long *rmap;
|
||||
unsigned long *dirty_bitmap;
|
||||
struct kvm_arch_memory_slot arch;
|
||||
unsigned long userspace_addr;
|
||||
|
@ -283,6 +344,8 @@ struct kvm {
|
|||
struct {
|
||||
spinlock_t lock;
|
||||
struct list_head items;
|
||||
struct list_head resampler_list;
|
||||
struct mutex resampler_lock;
|
||||
} irqfds;
|
||||
struct list_head ioeventfds;
|
||||
#endif
|
||||
|
@ -348,7 +411,7 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
|
|||
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
|
||||
void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
|
||||
|
||||
void vcpu_load(struct kvm_vcpu *vcpu);
|
||||
int __must_check vcpu_load(struct kvm_vcpu *vcpu);
|
||||
void vcpu_put(struct kvm_vcpu *vcpu);
|
||||
|
||||
int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
|
||||
|
@ -378,23 +441,6 @@ id_to_memslot(struct kvm_memslots *slots, int id)
|
|||
return slot;
|
||||
}
|
||||
|
||||
#define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
|
||||
#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
|
||||
static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
|
||||
|
||||
extern struct page *bad_page;
|
||||
extern struct page *fault_page;
|
||||
|
||||
extern pfn_t bad_pfn;
|
||||
extern pfn_t fault_pfn;
|
||||
|
||||
int is_error_page(struct page *page);
|
||||
int is_error_pfn(pfn_t pfn);
|
||||
int is_hwpoison_pfn(pfn_t pfn);
|
||||
int is_fault_pfn(pfn_t pfn);
|
||||
int is_noslot_pfn(pfn_t pfn);
|
||||
int is_invalid_pfn(pfn_t pfn);
|
||||
int kvm_is_error_hva(unsigned long addr);
|
||||
int kvm_set_memory_region(struct kvm *kvm,
|
||||
struct kvm_userspace_memory_region *mem,
|
||||
int user_alloc);
|
||||
|
@ -415,28 +461,33 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
|
|||
int user_alloc);
|
||||
bool kvm_largepages_enabled(void);
|
||||
void kvm_disable_largepages(void);
|
||||
void kvm_arch_flush_shadow(struct kvm *kvm);
|
||||
/* flush all memory translations */
|
||||
void kvm_arch_flush_shadow_all(struct kvm *kvm);
|
||||
/* flush memory translations pointing to 'slot' */
|
||||
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
|
||||
struct kvm_memory_slot *slot);
|
||||
|
||||
int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
|
||||
int nr_pages);
|
||||
|
||||
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
|
||||
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
|
||||
unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
|
||||
void kvm_release_page_clean(struct page *page);
|
||||
void kvm_release_page_dirty(struct page *page);
|
||||
void kvm_set_page_dirty(struct page *page);
|
||||
void kvm_set_page_accessed(struct page *page);
|
||||
|
||||
pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr);
|
||||
pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
|
||||
pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
|
||||
bool write_fault, bool *writable);
|
||||
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
|
||||
pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
|
||||
bool *writable);
|
||||
pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
|
||||
struct kvm_memory_slot *slot, gfn_t gfn);
|
||||
void kvm_release_pfn_dirty(pfn_t);
|
||||
pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
|
||||
pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
|
||||
|
||||
void kvm_release_pfn_dirty(pfn_t pfn);
|
||||
void kvm_release_pfn_clean(pfn_t pfn);
|
||||
void kvm_set_pfn_dirty(pfn_t pfn);
|
||||
void kvm_set_pfn_accessed(pfn_t pfn);
|
||||
|
@ -494,6 +545,7 @@ int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
|
|||
struct
|
||||
kvm_userspace_memory_region *mem,
|
||||
int user_alloc);
|
||||
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level);
|
||||
long kvm_arch_vm_ioctl(struct file *filp,
|
||||
unsigned int ioctl, unsigned long arg);
|
||||
|
||||
|
@ -573,7 +625,7 @@ void kvm_arch_sync_events(struct kvm *kvm);
|
|||
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
|
||||
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
|
||||
|
||||
int kvm_is_mmio_pfn(pfn_t pfn);
|
||||
bool kvm_is_mmio_pfn(pfn_t pfn);
|
||||
|
||||
struct kvm_irq_ack_notifier {
|
||||
struct hlist_node link;
|
||||
|
@ -728,6 +780,12 @@ __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
|
|||
return search_memslots(slots, gfn);
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
__gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
|
||||
{
|
||||
return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
|
||||
}
|
||||
|
||||
static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
|
||||
{
|
||||
return gfn_to_memslot(kvm, gfn)->id;
|
||||
|
@ -740,10 +798,12 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
|
|||
(base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
|
||||
}
|
||||
|
||||
static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
|
||||
gfn_t gfn)
|
||||
static inline gfn_t
|
||||
hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
|
||||
{
|
||||
return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
|
||||
gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
|
||||
|
||||
return slot->base_gfn + gfn_offset;
|
||||
}
|
||||
|
||||
static inline gpa_t gfn_to_gpa(gfn_t gfn)
|
||||
|
@ -899,5 +959,32 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
|
||||
|
||||
static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
|
||||
{
|
||||
vcpu->spin_loop.in_spin_loop = val;
|
||||
}
|
||||
static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
|
||||
{
|
||||
vcpu->spin_loop.dy_eligible = val;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
|
||||
|
||||
static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
|
||||
#endif
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue