KVM: move DR register access handling into generic code
Currently both SVM and VMX have their own DR handling code. Move it to x86.c. Acked-by: Jan Kiszka <jan.kiszka@siemens.com> Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
		
					parent
					
						
							
								6bc31bdc55
							
						
					
				
			
			
				commit
				
					
						020df0794f
					
				
			
		
					 4 changed files with 93 additions and 134 deletions
				
			
		| 
						 | 
					@ -496,8 +496,7 @@ struct kvm_x86_ops {
 | 
				
			||||||
	void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
 | 
						void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
 | 
				
			||||||
	void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
 | 
						void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
 | 
				
			||||||
	void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
 | 
						void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
 | 
				
			||||||
	int (*get_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long *dest);
 | 
						void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
 | 
				
			||||||
	int (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value);
 | 
					 | 
				
			||||||
	void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
 | 
						void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
 | 
				
			||||||
	unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
 | 
						unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
 | 
				
			||||||
	void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
 | 
						void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
 | 
				
			||||||
| 
						 | 
					@ -602,6 +601,8 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
 | 
				
			||||||
void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
 | 
					void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
 | 
				
			||||||
void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
 | 
					void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
 | 
				
			||||||
void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
 | 
					void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
 | 
				
			||||||
 | 
					int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
 | 
				
			||||||
 | 
					int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
 | 
				
			||||||
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
 | 
					unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
 | 
				
			||||||
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
 | 
					void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
 | 
				
			||||||
void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
 | 
					void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1307,70 +1307,11 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
 | 
				
			||||||
	svm->vmcb->control.asid = sd->next_asid++;
 | 
						svm->vmcb->control.asid = sd->next_asid++;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int svm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *dest)
 | 
					static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct vcpu_svm *svm = to_svm(vcpu);
 | 
						struct vcpu_svm *svm = to_svm(vcpu);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	switch (dr) {
 | 
						svm->vmcb->save.dr7 = value;
 | 
				
			||||||
	case 0 ... 3:
 | 
					 | 
				
			||||||
		*dest = vcpu->arch.db[dr];
 | 
					 | 
				
			||||||
		break;
 | 
					 | 
				
			||||||
	case 4:
 | 
					 | 
				
			||||||
		if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
 | 
					 | 
				
			||||||
			return EMULATE_FAIL; /* will re-inject UD */
 | 
					 | 
				
			||||||
		/* fall through */
 | 
					 | 
				
			||||||
	case 6:
 | 
					 | 
				
			||||||
		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
 | 
					 | 
				
			||||||
			*dest = vcpu->arch.dr6;
 | 
					 | 
				
			||||||
		else
 | 
					 | 
				
			||||||
			*dest = svm->vmcb->save.dr6;
 | 
					 | 
				
			||||||
		break;
 | 
					 | 
				
			||||||
	case 5:
 | 
					 | 
				
			||||||
		if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
 | 
					 | 
				
			||||||
			return EMULATE_FAIL; /* will re-inject UD */
 | 
					 | 
				
			||||||
		/* fall through */
 | 
					 | 
				
			||||||
	case 7:
 | 
					 | 
				
			||||||
		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
 | 
					 | 
				
			||||||
			*dest = vcpu->arch.dr7;
 | 
					 | 
				
			||||||
		else
 | 
					 | 
				
			||||||
			*dest = svm->vmcb->save.dr7;
 | 
					 | 
				
			||||||
		break;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return EMULATE_DONE;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static int svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct vcpu_svm *svm = to_svm(vcpu);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	switch (dr) {
 | 
					 | 
				
			||||||
	case 0 ... 3:
 | 
					 | 
				
			||||||
		vcpu->arch.db[dr] = value;
 | 
					 | 
				
			||||||
		if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
 | 
					 | 
				
			||||||
			vcpu->arch.eff_db[dr] = value;
 | 
					 | 
				
			||||||
		break;
 | 
					 | 
				
			||||||
	case 4:
 | 
					 | 
				
			||||||
		if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
 | 
					 | 
				
			||||||
			return EMULATE_FAIL; /* will re-inject UD */
 | 
					 | 
				
			||||||
		/* fall through */
 | 
					 | 
				
			||||||
	case 6:
 | 
					 | 
				
			||||||
		vcpu->arch.dr6 = (value & DR6_VOLATILE) | DR6_FIXED_1;
 | 
					 | 
				
			||||||
		break;
 | 
					 | 
				
			||||||
	case 5:
 | 
					 | 
				
			||||||
		if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
 | 
					 | 
				
			||||||
			return EMULATE_FAIL; /* will re-inject UD */
 | 
					 | 
				
			||||||
		/* fall through */
 | 
					 | 
				
			||||||
	case 7:
 | 
					 | 
				
			||||||
		vcpu->arch.dr7 = (value & DR7_VOLATILE) | DR7_FIXED_1;
 | 
					 | 
				
			||||||
		if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
 | 
					 | 
				
			||||||
			svm->vmcb->save.dr7 = vcpu->arch.dr7;
 | 
					 | 
				
			||||||
			vcpu->arch.switch_db_regs = (value & DR7_BP_EN_MASK);
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		break;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return EMULATE_DONE;
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int pf_interception(struct vcpu_svm *svm)
 | 
					static int pf_interception(struct vcpu_svm *svm)
 | 
				
			||||||
| 
						 | 
					@ -3302,8 +3243,7 @@ static struct kvm_x86_ops svm_x86_ops = {
 | 
				
			||||||
	.set_idt = svm_set_idt,
 | 
						.set_idt = svm_set_idt,
 | 
				
			||||||
	.get_gdt = svm_get_gdt,
 | 
						.get_gdt = svm_get_gdt,
 | 
				
			||||||
	.set_gdt = svm_set_gdt,
 | 
						.set_gdt = svm_set_gdt,
 | 
				
			||||||
	.get_dr = svm_get_dr,
 | 
						.set_dr7 = svm_set_dr7,
 | 
				
			||||||
	.set_dr = svm_set_dr,
 | 
					 | 
				
			||||||
	.cache_reg = svm_cache_reg,
 | 
						.cache_reg = svm_cache_reg,
 | 
				
			||||||
	.get_rflags = svm_get_rflags,
 | 
						.get_rflags = svm_get_rflags,
 | 
				
			||||||
	.set_rflags = svm_set_rflags,
 | 
						.set_rflags = svm_set_rflags,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3089,19 +3089,9 @@ static int handle_cr(struct kvm_vcpu *vcpu)
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int check_dr_alias(struct kvm_vcpu *vcpu)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
 | 
					 | 
				
			||||||
		kvm_queue_exception(vcpu, UD_VECTOR);
 | 
					 | 
				
			||||||
		return -1;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	return 0;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static int handle_dr(struct kvm_vcpu *vcpu)
 | 
					static int handle_dr(struct kvm_vcpu *vcpu)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned long exit_qualification;
 | 
						unsigned long exit_qualification;
 | 
				
			||||||
	unsigned long val;
 | 
					 | 
				
			||||||
	int dr, reg;
 | 
						int dr, reg;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Do not handle if the CPL > 0, will trigger GP on re-entry */
 | 
						/* Do not handle if the CPL > 0, will trigger GP on re-entry */
 | 
				
			||||||
| 
						 | 
					@ -3136,67 +3126,20 @@ static int handle_dr(struct kvm_vcpu *vcpu)
 | 
				
			||||||
	dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
 | 
						dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
 | 
				
			||||||
	reg = DEBUG_REG_ACCESS_REG(exit_qualification);
 | 
						reg = DEBUG_REG_ACCESS_REG(exit_qualification);
 | 
				
			||||||
	if (exit_qualification & TYPE_MOV_FROM_DR) {
 | 
						if (exit_qualification & TYPE_MOV_FROM_DR) {
 | 
				
			||||||
		switch (dr) {
 | 
							unsigned long val;
 | 
				
			||||||
		case 0 ... 3:
 | 
							if (!kvm_get_dr(vcpu, dr, &val))
 | 
				
			||||||
			val = vcpu->arch.db[dr];
 | 
								kvm_register_write(vcpu, reg, val);
 | 
				
			||||||
			break;
 | 
						} else
 | 
				
			||||||
		case 4:
 | 
							kvm_set_dr(vcpu, dr, vcpu->arch.regs[reg]);
 | 
				
			||||||
			if (check_dr_alias(vcpu) < 0)
 | 
					 | 
				
			||||||
				return 1;
 | 
					 | 
				
			||||||
			/* fall through */
 | 
					 | 
				
			||||||
		case 6:
 | 
					 | 
				
			||||||
			val = vcpu->arch.dr6;
 | 
					 | 
				
			||||||
			break;
 | 
					 | 
				
			||||||
		case 5:
 | 
					 | 
				
			||||||
			if (check_dr_alias(vcpu) < 0)
 | 
					 | 
				
			||||||
				return 1;
 | 
					 | 
				
			||||||
			/* fall through */
 | 
					 | 
				
			||||||
		default: /* 7 */
 | 
					 | 
				
			||||||
			val = vcpu->arch.dr7;
 | 
					 | 
				
			||||||
			break;
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		kvm_register_write(vcpu, reg, val);
 | 
					 | 
				
			||||||
	} else {
 | 
					 | 
				
			||||||
		val = vcpu->arch.regs[reg];
 | 
					 | 
				
			||||||
		switch (dr) {
 | 
					 | 
				
			||||||
		case 0 ... 3:
 | 
					 | 
				
			||||||
			vcpu->arch.db[dr] = val;
 | 
					 | 
				
			||||||
			if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
 | 
					 | 
				
			||||||
				vcpu->arch.eff_db[dr] = val;
 | 
					 | 
				
			||||||
			break;
 | 
					 | 
				
			||||||
		case 4:
 | 
					 | 
				
			||||||
			if (check_dr_alias(vcpu) < 0)
 | 
					 | 
				
			||||||
				return 1;
 | 
					 | 
				
			||||||
			/* fall through */
 | 
					 | 
				
			||||||
		case 6:
 | 
					 | 
				
			||||||
			if (val & 0xffffffff00000000ULL) {
 | 
					 | 
				
			||||||
				kvm_inject_gp(vcpu, 0);
 | 
					 | 
				
			||||||
				return 1;
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
			vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
 | 
					 | 
				
			||||||
			break;
 | 
					 | 
				
			||||||
		case 5:
 | 
					 | 
				
			||||||
			if (check_dr_alias(vcpu) < 0)
 | 
					 | 
				
			||||||
				return 1;
 | 
					 | 
				
			||||||
			/* fall through */
 | 
					 | 
				
			||||||
		default: /* 7 */
 | 
					 | 
				
			||||||
			if (val & 0xffffffff00000000ULL) {
 | 
					 | 
				
			||||||
				kvm_inject_gp(vcpu, 0);
 | 
					 | 
				
			||||||
				return 1;
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
			vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
 | 
					 | 
				
			||||||
			if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
 | 
					 | 
				
			||||||
				vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
 | 
					 | 
				
			||||||
				vcpu->arch.switch_db_regs =
 | 
					 | 
				
			||||||
					(val & DR7_BP_EN_MASK);
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
			break;
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	skip_emulated_instruction(vcpu);
 | 
						skip_emulated_instruction(vcpu);
 | 
				
			||||||
	return 1;
 | 
						return 1;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						vmcs_writel(GUEST_DR7, val);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int handle_cpuid(struct kvm_vcpu *vcpu)
 | 
					static int handle_cpuid(struct kvm_vcpu *vcpu)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	kvm_emulate_cpuid(vcpu);
 | 
						kvm_emulate_cpuid(vcpu);
 | 
				
			||||||
| 
						 | 
					@ -4187,6 +4130,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
 | 
				
			||||||
	.set_idt = vmx_set_idt,
 | 
						.set_idt = vmx_set_idt,
 | 
				
			||||||
	.get_gdt = vmx_get_gdt,
 | 
						.get_gdt = vmx_get_gdt,
 | 
				
			||||||
	.set_gdt = vmx_set_gdt,
 | 
						.set_gdt = vmx_set_gdt,
 | 
				
			||||||
 | 
						.set_dr7 = vmx_set_dr7,
 | 
				
			||||||
	.cache_reg = vmx_cache_reg,
 | 
						.cache_reg = vmx_cache_reg,
 | 
				
			||||||
	.get_rflags = vmx_get_rflags,
 | 
						.get_rflags = vmx_get_rflags,
 | 
				
			||||||
	.set_rflags = vmx_set_rflags,
 | 
						.set_rflags = vmx_set_rflags,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -562,6 +562,80 @@ unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(kvm_get_cr8);
 | 
					EXPORT_SYMBOL_GPL(kvm_get_cr8);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						switch (dr) {
 | 
				
			||||||
 | 
						case 0 ... 3:
 | 
				
			||||||
 | 
							vcpu->arch.db[dr] = val;
 | 
				
			||||||
 | 
							if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
 | 
				
			||||||
 | 
								vcpu->arch.eff_db[dr] = val;
 | 
				
			||||||
 | 
							break;
 | 
				
			||||||
 | 
						case 4:
 | 
				
			||||||
 | 
							if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
 | 
				
			||||||
 | 
								kvm_queue_exception(vcpu, UD_VECTOR);
 | 
				
			||||||
 | 
								return 1;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							/* fall through */
 | 
				
			||||||
 | 
						case 6:
 | 
				
			||||||
 | 
							if (val & 0xffffffff00000000ULL) {
 | 
				
			||||||
 | 
								kvm_inject_gp(vcpu, 0);
 | 
				
			||||||
 | 
								return 1;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
 | 
				
			||||||
 | 
							break;
 | 
				
			||||||
 | 
						case 5:
 | 
				
			||||||
 | 
							if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
 | 
				
			||||||
 | 
								kvm_queue_exception(vcpu, UD_VECTOR);
 | 
				
			||||||
 | 
								return 1;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							/* fall through */
 | 
				
			||||||
 | 
						default: /* 7 */
 | 
				
			||||||
 | 
							if (val & 0xffffffff00000000ULL) {
 | 
				
			||||||
 | 
								kvm_inject_gp(vcpu, 0);
 | 
				
			||||||
 | 
								return 1;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
 | 
				
			||||||
 | 
							if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
 | 
				
			||||||
 | 
								kvm_x86_ops->set_dr7(vcpu, vcpu->arch.dr7);
 | 
				
			||||||
 | 
								vcpu->arch.switch_db_regs = (val & DR7_BP_EN_MASK);
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							break;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					EXPORT_SYMBOL_GPL(kvm_set_dr);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						switch (dr) {
 | 
				
			||||||
 | 
						case 0 ... 3:
 | 
				
			||||||
 | 
							*val = vcpu->arch.db[dr];
 | 
				
			||||||
 | 
							break;
 | 
				
			||||||
 | 
						case 4:
 | 
				
			||||||
 | 
							if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
 | 
				
			||||||
 | 
								kvm_queue_exception(vcpu, UD_VECTOR);
 | 
				
			||||||
 | 
								return 1;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							/* fall through */
 | 
				
			||||||
 | 
						case 6:
 | 
				
			||||||
 | 
							*val = vcpu->arch.dr6;
 | 
				
			||||||
 | 
							break;
 | 
				
			||||||
 | 
						case 5:
 | 
				
			||||||
 | 
							if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
 | 
				
			||||||
 | 
								kvm_queue_exception(vcpu, UD_VECTOR);
 | 
				
			||||||
 | 
								return 1;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							/* fall through */
 | 
				
			||||||
 | 
						default: /* 7 */
 | 
				
			||||||
 | 
							*val = vcpu->arch.dr7;
 | 
				
			||||||
 | 
							break;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					EXPORT_SYMBOL_GPL(kvm_get_dr);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline u32 bit(int bitno)
 | 
					static inline u32 bit(int bitno)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return 1 << (bitno & 31);
 | 
						return 1 << (bitno & 31);
 | 
				
			||||||
| 
						 | 
					@ -3483,14 +3557,14 @@ int emulate_clts(struct kvm_vcpu *vcpu)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
 | 
					int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return kvm_x86_ops->get_dr(ctxt->vcpu, dr, dest);
 | 
						return kvm_get_dr(ctxt->vcpu, dr, dest);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
 | 
					int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
 | 
						unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask);
 | 
						return kvm_set_dr(ctxt->vcpu, dr, value & mask);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
 | 
					void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue