KVM: Call common update function when ioapic entry changed.
Both TMR and EOI exit bitmap need to be updated when ioapic changed or vcpu's id/ldr/dfr changed. So use common function instead eoi exit bitmap specific function. Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com> Reviewed-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
		
					parent
					
						
							
								01e439be77
							
						
					
				
			
			
				commit
				
					
						3d81bc7e96
					
				
			
		
					 9 changed files with 32 additions and 30 deletions
				
			
		| 
						 | 
					@ -27,10 +27,4 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq);
 | 
				
			||||||
#define kvm_apic_present(x) (true)
 | 
					#define kvm_apic_present(x) (true)
 | 
				
			||||||
#define kvm_lapic_enabled(x) (true)
 | 
					#define kvm_lapic_enabled(x) (true)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline bool kvm_apic_vid_enabled(void)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	/* IA64 has no apicv supporting, do nothing here */
 | 
					 | 
				
			||||||
	return false;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -217,7 +217,7 @@ out:
 | 
				
			||||||
	if (old)
 | 
						if (old)
 | 
				
			||||||
		kfree_rcu(old, rcu);
 | 
							kfree_rcu(old, rcu);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	kvm_ioapic_make_eoibitmap_request(kvm);
 | 
						kvm_vcpu_request_scan_ioapic(kvm);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void kvm_apic_set_id(struct kvm_lapic *apic, u8 id)
 | 
					static inline void kvm_apic_set_id(struct kvm_lapic *apic, u8 id)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -6414,6 +6414,9 @@ static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
 | 
					static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						if (!vmx_vm_has_apicv(vcpu->kvm))
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]);
 | 
						vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]);
 | 
				
			||||||
	vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]);
 | 
						vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]);
 | 
				
			||||||
	vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]);
 | 
						vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -5661,13 +5661,16 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void update_eoi_exitmap(struct kvm_vcpu *vcpu)
 | 
					static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	u64 eoi_exit_bitmap[4];
 | 
						u64 eoi_exit_bitmap[4];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!kvm_apic_hw_enabled(vcpu->arch.apic))
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	memset(eoi_exit_bitmap, 0, 32);
 | 
						memset(eoi_exit_bitmap, 0, 32);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	kvm_ioapic_calculate_eoi_exitmap(vcpu, eoi_exit_bitmap);
 | 
						kvm_ioapic_scan_entry(vcpu, eoi_exit_bitmap);
 | 
				
			||||||
	kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
 | 
						kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -5724,8 +5727,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 | 
				
			||||||
			kvm_handle_pmu_event(vcpu);
 | 
								kvm_handle_pmu_event(vcpu);
 | 
				
			||||||
		if (kvm_check_request(KVM_REQ_PMI, vcpu))
 | 
							if (kvm_check_request(KVM_REQ_PMI, vcpu))
 | 
				
			||||||
			kvm_deliver_pmi(vcpu);
 | 
								kvm_deliver_pmi(vcpu);
 | 
				
			||||||
		if (kvm_check_request(KVM_REQ_EOIBITMAP, vcpu))
 | 
							if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu))
 | 
				
			||||||
			update_eoi_exitmap(vcpu);
 | 
								vcpu_scan_ioapic(vcpu);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
 | 
						if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -126,7 +126,7 @@ static inline bool is_error_page(struct page *page)
 | 
				
			||||||
#define KVM_REQ_MASTERCLOCK_UPDATE 19
 | 
					#define KVM_REQ_MASTERCLOCK_UPDATE 19
 | 
				
			||||||
#define KVM_REQ_MCLOCK_INPROGRESS 20
 | 
					#define KVM_REQ_MCLOCK_INPROGRESS 20
 | 
				
			||||||
#define KVM_REQ_EPR_EXIT          21
 | 
					#define KVM_REQ_EPR_EXIT          21
 | 
				
			||||||
#define KVM_REQ_EOIBITMAP         22
 | 
					#define KVM_REQ_SCAN_IOAPIC       22
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define KVM_USERSPACE_IRQ_SOURCE_ID		0
 | 
					#define KVM_USERSPACE_IRQ_SOURCE_ID		0
 | 
				
			||||||
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID	1
 | 
					#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID	1
 | 
				
			||||||
| 
						 | 
					@ -575,7 +575,7 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
 | 
				
			||||||
void kvm_flush_remote_tlbs(struct kvm *kvm);
 | 
					void kvm_flush_remote_tlbs(struct kvm *kvm);
 | 
				
			||||||
void kvm_reload_remote_mmus(struct kvm *kvm);
 | 
					void kvm_reload_remote_mmus(struct kvm *kvm);
 | 
				
			||||||
void kvm_make_mclock_inprogress_request(struct kvm *kvm);
 | 
					void kvm_make_mclock_inprogress_request(struct kvm *kvm);
 | 
				
			||||||
void kvm_make_update_eoibitmap_request(struct kvm *kvm);
 | 
					void kvm_make_scan_ioapic_request(struct kvm *kvm);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
long kvm_arch_dev_ioctl(struct file *filp,
 | 
					long kvm_arch_dev_ioctl(struct file *filp,
 | 
				
			||||||
			unsigned int ioctl, unsigned long arg);
 | 
								unsigned int ioctl, unsigned long arg);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -193,15 +193,13 @@ static void update_handled_vectors(struct kvm_ioapic *ioapic)
 | 
				
			||||||
	smp_wmb();
 | 
						smp_wmb();
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void kvm_ioapic_calculate_eoi_exitmap(struct kvm_vcpu *vcpu,
 | 
					void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
 | 
				
			||||||
					u64 *eoi_exit_bitmap)
 | 
					 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
 | 
						struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
 | 
				
			||||||
	union kvm_ioapic_redirect_entry *e;
 | 
						union kvm_ioapic_redirect_entry *e;
 | 
				
			||||||
	int index;
 | 
						int index;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	spin_lock(&ioapic->lock);
 | 
						spin_lock(&ioapic->lock);
 | 
				
			||||||
	/* traverse ioapic entry to set eoi exit bitmap*/
 | 
					 | 
				
			||||||
	for (index = 0; index < IOAPIC_NUM_PINS; index++) {
 | 
						for (index = 0; index < IOAPIC_NUM_PINS; index++) {
 | 
				
			||||||
		e = &ioapic->redirtbl[index];
 | 
							e = &ioapic->redirtbl[index];
 | 
				
			||||||
		if (!e->fields.mask &&
 | 
							if (!e->fields.mask &&
 | 
				
			||||||
| 
						 | 
					@ -215,16 +213,22 @@ void kvm_ioapic_calculate_eoi_exitmap(struct kvm_vcpu *vcpu,
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	spin_unlock(&ioapic->lock);
 | 
						spin_unlock(&ioapic->lock);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(kvm_ioapic_calculate_eoi_exitmap);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
void kvm_ioapic_make_eoibitmap_request(struct kvm *kvm)
 | 
					#ifdef CONFIG_X86
 | 
				
			||||||
 | 
					void kvm_vcpu_request_scan_ioapic(struct kvm *kvm)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct kvm_ioapic *ioapic = kvm->arch.vioapic;
 | 
						struct kvm_ioapic *ioapic = kvm->arch.vioapic;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!kvm_apic_vid_enabled(kvm) || !ioapic)
 | 
						if (!ioapic)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
	kvm_make_update_eoibitmap_request(kvm);
 | 
						kvm_make_scan_ioapic_request(kvm);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					#else
 | 
				
			||||||
 | 
					void kvm_vcpu_request_scan_ioapic(struct kvm *kvm)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
 | 
					static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
| 
						 | 
					@ -267,7 +271,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
 | 
				
			||||||
		if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG
 | 
							if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG
 | 
				
			||||||
		    && ioapic->irr & (1 << index))
 | 
							    && ioapic->irr & (1 << index))
 | 
				
			||||||
			ioapic_service(ioapic, index, false);
 | 
								ioapic_service(ioapic, index, false);
 | 
				
			||||||
		kvm_ioapic_make_eoibitmap_request(ioapic->kvm);
 | 
							kvm_vcpu_request_scan_ioapic(ioapic->kvm);
 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -586,7 +590,7 @@ int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
 | 
				
			||||||
	spin_lock(&ioapic->lock);
 | 
						spin_lock(&ioapic->lock);
 | 
				
			||||||
	memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
 | 
						memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
 | 
				
			||||||
	update_handled_vectors(ioapic);
 | 
						update_handled_vectors(ioapic);
 | 
				
			||||||
	kvm_ioapic_make_eoibitmap_request(kvm);
 | 
						kvm_vcpu_request_scan_ioapic(kvm);
 | 
				
			||||||
	kvm_rtc_eoi_tracking_restore_all(ioapic);
 | 
						kvm_rtc_eoi_tracking_restore_all(ioapic);
 | 
				
			||||||
	spin_unlock(&ioapic->lock);
 | 
						spin_unlock(&ioapic->lock);
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -96,9 +96,7 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
 | 
				
			||||||
		struct kvm_lapic_irq *irq, unsigned long *dest_map);
 | 
							struct kvm_lapic_irq *irq, unsigned long *dest_map);
 | 
				
			||||||
int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
 | 
					int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
 | 
				
			||||||
int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
 | 
					int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
 | 
				
			||||||
void kvm_ioapic_make_eoibitmap_request(struct kvm *kvm);
 | 
					void kvm_vcpu_request_scan_ioapic(struct kvm *kvm);
 | 
				
			||||||
void kvm_ioapic_calculate_eoi_exitmap(struct kvm_vcpu *vcpu,
 | 
					void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
 | 
				
			||||||
					u64 *eoi_exit_bitmap);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -285,7 +285,7 @@ void kvm_register_irq_ack_notifier(struct kvm *kvm,
 | 
				
			||||||
	mutex_lock(&kvm->irq_lock);
 | 
						mutex_lock(&kvm->irq_lock);
 | 
				
			||||||
	hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
 | 
						hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
 | 
				
			||||||
	mutex_unlock(&kvm->irq_lock);
 | 
						mutex_unlock(&kvm->irq_lock);
 | 
				
			||||||
	kvm_ioapic_make_eoibitmap_request(kvm);
 | 
						kvm_vcpu_request_scan_ioapic(kvm);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
 | 
					void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
 | 
				
			||||||
| 
						 | 
					@ -295,7 +295,7 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
 | 
				
			||||||
	hlist_del_init_rcu(&kian->link);
 | 
						hlist_del_init_rcu(&kian->link);
 | 
				
			||||||
	mutex_unlock(&kvm->irq_lock);
 | 
						mutex_unlock(&kvm->irq_lock);
 | 
				
			||||||
	synchronize_rcu();
 | 
						synchronize_rcu();
 | 
				
			||||||
	kvm_ioapic_make_eoibitmap_request(kvm);
 | 
						kvm_vcpu_request_scan_ioapic(kvm);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int kvm_request_irq_source_id(struct kvm *kvm)
 | 
					int kvm_request_irq_source_id(struct kvm *kvm)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -217,9 +217,9 @@ void kvm_make_mclock_inprogress_request(struct kvm *kvm)
 | 
				
			||||||
	make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
 | 
						make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void kvm_make_update_eoibitmap_request(struct kvm *kvm)
 | 
					void kvm_make_scan_ioapic_request(struct kvm *kvm)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	make_all_cpus_request(kvm, KVM_REQ_EOIBITMAP);
 | 
						make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
 | 
					int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue