Merge branch 'kvm-updates/3.4' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm updates from Avi Kivity: "Changes include timekeeping improvements, support for assigning host PCI devices that share interrupt lines, s390 user-controlled guests, a large ppc update, and random fixes." This is with the sign-off's fixed, hopefully next merge window we won't have rebased commits. * 'kvm-updates/3.4' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (130 commits) KVM: Convert intx_mask_lock to spin lock KVM: x86: fix kvm_write_tsc() TSC matching thinko x86: kvmclock: abstract save/restore sched_clock_state KVM: nVMX: Fix erroneous exception bitmap check KVM: Ignore the writes to MSR_K7_HWCR(3) KVM: MMU: make use of ->root_level in reset_rsvds_bits_mask KVM: PMU: add proper support for fixed counter 2 KVM: PMU: Fix raw event check KVM: PMU: warn when pin control is set in eventsel msr KVM: VMX: Fix delayed load of shared MSRs KVM: use correct tlbs dirty type in cmpxchg KVM: Allow host IRQ sharing for assigned PCI 2.3 devices KVM: Ensure all vcpus are consistent with in-kernel irqchip settings KVM: x86 emulator: Allow PM/VM86 switch during task switch KVM: SVM: Fix CPL updates KVM: x86 emulator: VM86 segments must have DPL 3 KVM: x86 emulator: Fix task switch privilege checks arch/powerpc/kvm/book3s_hv.c: included linux/sched.h twice KVM: x86 emulator: correctly mask pmc index bits in RDPMC instruction emulation KVM: mmu_notifier: Flush TLBs before releasing mmu_lock ...
This commit is contained in:
commit
2e7580b0e7
82 changed files with 5827 additions and 1686 deletions
|
@ -172,11 +172,6 @@ static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
|
|||
*/
|
||||
#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
|
||||
|
||||
struct kvm_lpage_info {
|
||||
unsigned long rmap_pde;
|
||||
int write_count;
|
||||
};
|
||||
|
||||
struct kvm_memory_slot {
|
||||
gfn_t base_gfn;
|
||||
unsigned long npages;
|
||||
|
@ -185,7 +180,7 @@ struct kvm_memory_slot {
|
|||
unsigned long *dirty_bitmap;
|
||||
unsigned long *dirty_bitmap_head;
|
||||
unsigned long nr_dirty_pages;
|
||||
struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
|
||||
struct kvm_arch_memory_slot arch;
|
||||
unsigned long userspace_addr;
|
||||
int user_alloc;
|
||||
int id;
|
||||
|
@ -377,6 +372,9 @@ int kvm_set_memory_region(struct kvm *kvm,
|
|||
int __kvm_set_memory_region(struct kvm *kvm,
|
||||
struct kvm_userspace_memory_region *mem,
|
||||
int user_alloc);
|
||||
void kvm_arch_free_memslot(struct kvm_memory_slot *free,
|
||||
struct kvm_memory_slot *dont);
|
||||
int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages);
|
||||
int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot,
|
||||
struct kvm_memory_slot old,
|
||||
|
@ -386,6 +384,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
|
|||
struct kvm_userspace_memory_region *mem,
|
||||
struct kvm_memory_slot old,
|
||||
int user_alloc);
|
||||
bool kvm_largepages_enabled(void);
|
||||
void kvm_disable_largepages(void);
|
||||
void kvm_arch_flush_shadow(struct kvm *kvm);
|
||||
|
||||
|
@ -451,6 +450,7 @@ long kvm_arch_dev_ioctl(struct file *filp,
|
|||
unsigned int ioctl, unsigned long arg);
|
||||
long kvm_arch_vcpu_ioctl(struct file *filp,
|
||||
unsigned int ioctl, unsigned long arg);
|
||||
int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
|
||||
|
||||
int kvm_dev_ioctl_check_extension(long ext);
|
||||
|
||||
|
@ -521,7 +521,7 @@ static inline void kvm_arch_free_vm(struct kvm *kvm)
|
|||
}
|
||||
#endif
|
||||
|
||||
int kvm_arch_init_vm(struct kvm *kvm);
|
||||
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
|
||||
void kvm_arch_destroy_vm(struct kvm *kvm);
|
||||
void kvm_free_all_assigned_devices(struct kvm *kvm);
|
||||
void kvm_arch_sync_events(struct kvm *kvm);
|
||||
|
@ -547,6 +547,7 @@ struct kvm_assigned_dev_kernel {
|
|||
unsigned int entries_nr;
|
||||
int host_irq;
|
||||
bool host_irq_disabled;
|
||||
bool pci_2_3;
|
||||
struct msix_entry *host_msix_entries;
|
||||
int guest_irq;
|
||||
struct msix_entry *guest_msix_entries;
|
||||
|
@ -556,6 +557,7 @@ struct kvm_assigned_dev_kernel {
|
|||
struct pci_dev *dev;
|
||||
struct kvm *kvm;
|
||||
spinlock_t intx_lock;
|
||||
spinlock_t intx_mask_lock;
|
||||
char irq_name[32];
|
||||
struct pci_saved_state *pci_saved_state;
|
||||
};
|
||||
|
@ -651,11 +653,43 @@ static inline void kvm_guest_exit(void)
|
|||
current->flags &= ~PF_VCPU;
|
||||
}
|
||||
|
||||
/*
|
||||
* search_memslots() and __gfn_to_memslot() are here because they are
|
||||
* used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
|
||||
* gfn_to_memslot() itself isn't here as an inline because that would
|
||||
* bloat other code too much.
|
||||
*/
|
||||
static inline struct kvm_memory_slot *
|
||||
search_memslots(struct kvm_memslots *slots, gfn_t gfn)
|
||||
{
|
||||
struct kvm_memory_slot *memslot;
|
||||
|
||||
kvm_for_each_memslot(memslot, slots)
|
||||
if (gfn >= memslot->base_gfn &&
|
||||
gfn < memslot->base_gfn + memslot->npages)
|
||||
return memslot;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct kvm_memory_slot *
|
||||
__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
|
||||
{
|
||||
return search_memslots(slots, gfn);
|
||||
}
|
||||
|
||||
static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
|
||||
{
|
||||
return gfn_to_memslot(kvm, gfn)->id;
|
||||
}
|
||||
|
||||
static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
|
||||
{
|
||||
/* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
|
||||
return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
|
||||
(base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
|
||||
}
|
||||
|
||||
static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
|
||||
gfn_t gfn)
|
||||
{
|
||||
|
@ -702,12 +736,16 @@ static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_se
|
|||
if (unlikely(vcpu->kvm->mmu_notifier_count))
|
||||
return 1;
|
||||
/*
|
||||
* Both reads happen under the mmu_lock and both values are
|
||||
* modified under mmu_lock, so there's no need of smb_rmb()
|
||||
* here in between, otherwise mmu_notifier_count should be
|
||||
* read before mmu_notifier_seq, see
|
||||
* mmu_notifier_invalidate_range_end write side.
|
||||
* Ensure the read of mmu_notifier_count happens before the read
|
||||
* of mmu_notifier_seq. This interacts with the smp_wmb() in
|
||||
* mmu_notifier_invalidate_range_end to make sure that the caller
|
||||
* either sees the old (non-zero) value of mmu_notifier_count or
|
||||
* the new (incremented) value of mmu_notifier_seq.
|
||||
* PowerPC Book3s HV KVM calls this under a per-page lock
|
||||
* rather than under kvm->mmu_lock, for scalability, so
|
||||
* can't rely on kvm->mmu_lock to keep things ordered.
|
||||
*/
|
||||
smp_rmb();
|
||||
if (vcpu->kvm->mmu_notifier_seq != mmu_seq)
|
||||
return 1;
|
||||
return 0;
|
||||
|
@ -770,6 +808,13 @@ static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
|
|||
{
|
||||
return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
|
||||
}
|
||||
|
||||
bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
|
||||
|
||||
#else
|
||||
|
||||
static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue