529 lines
17 KiB
Diff
529 lines
17 KiB
Diff
From c0c8e5258f6aa8e5fa65ce118333b7c1e484c607 Mon Sep 17 00:00:00 2001
|
|
From: Paolo Bonzini <pbonzini@redhat.com>
|
|
Date: Tue, 25 Oct 2022 15:47:19 +0300
|
|
Subject: [PATCH] KVM: x86: start moving SMM-related functions to new files
|
|
|
|
Create a new header and source with code related to system management
|
|
mode emulation. Entry and exit will move there too; for now,
|
|
opportunistically rename put_smstate to PUT_SMSTATE while moving
|
|
it to smm.h, and adjust the SMM state saving code.
|
|
|
|
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
|
---
|
|
arch/x86/include/asm/kvm_host.h | 6 --
|
|
arch/x86/kvm/Makefile | 1 +
|
|
arch/x86/kvm/emulate.c | 1 +
|
|
arch/x86/kvm/kvm_cache_regs.h | 5 --
|
|
arch/x86/kvm/lapic.c | 1 +
|
|
arch/x86/kvm/mmu/mmu.c | 1 +
|
|
arch/x86/kvm/smm.c | 37 ++++++++
|
|
arch/x86/kvm/smm.h | 25 ++++++
|
|
arch/x86/kvm/svm/nested.c | 1 +
|
|
arch/x86/kvm/svm/svm.c | 5 +-
|
|
arch/x86/kvm/vmx/nested.c | 1 +
|
|
arch/x86/kvm/vmx/vmx.c | 1 +
|
|
arch/x86/kvm/x86.c | 148 ++++++++++++--------------------
|
|
arch/x86/kvm/x86.h | 1 +
|
|
14 files changed, 128 insertions(+), 106 deletions(-)
|
|
create mode 100644 arch/x86/kvm/smm.c
|
|
create mode 100644 arch/x86/kvm/smm.h
|
|
|
|
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
|
|
index aa381ab69a19..eed72a164a5c 100644
|
|
--- a/arch/x86/include/asm/kvm_host.h
|
|
+++ b/arch/x86/include/asm/kvm_host.h
|
|
@@ -2077,12 +2077,6 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
|
|
#endif
|
|
}
|
|
|
|
-#define put_smstate(type, buf, offset, val) \
|
|
- *(type *)((buf) + (offset) - 0x7e00) = val
|
|
-
|
|
-#define GET_SMSTATE(type, buf, offset) \
|
|
- (*(type *)((buf) + (offset) - 0x7e00))
|
|
-
|
|
int kvm_cpu_dirty_log_size(void);
|
|
|
|
int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages);
|
|
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
|
|
index 30f244b64523..ec6f7656254b 100644
|
|
--- a/arch/x86/kvm/Makefile
|
|
+++ b/arch/x86/kvm/Makefile
|
|
@@ -20,6 +20,7 @@ endif
|
|
|
|
kvm-$(CONFIG_X86_64) += mmu/tdp_iter.o mmu/tdp_mmu.o
|
|
kvm-$(CONFIG_KVM_XEN) += xen.o
|
|
+kvm-y += smm.o
|
|
|
|
kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \
|
|
vmx/evmcs.o vmx/nested.o vmx/posted_intr.o
|
|
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
|
|
index aacb28c83e43..3c3bf6f66a7e 100644
|
|
--- a/arch/x86/kvm/emulate.c
|
|
+++ b/arch/x86/kvm/emulate.c
|
|
@@ -30,6 +30,7 @@
|
|
#include "tss.h"
|
|
#include "mmu.h"
|
|
#include "pmu.h"
|
|
+#include "smm.h"
|
|
|
|
/*
|
|
* Operand types
|
|
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
|
|
index 3febc342360c..c09174f73a34 100644
|
|
--- a/arch/x86/kvm/kvm_cache_regs.h
|
|
+++ b/arch/x86/kvm/kvm_cache_regs.h
|
|
@@ -200,9 +200,4 @@ static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
|
|
return vcpu->arch.hflags & HF_GUEST_MASK;
|
|
}
|
|
|
|
-static inline bool is_smm(struct kvm_vcpu *vcpu)
|
|
-{
|
|
- return vcpu->arch.hflags & HF_SMM_MASK;
|
|
-}
|
|
-
|
|
#endif
|
|
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
|
|
index 9dda989a1cf0..7460d9566119 100644
|
|
--- a/arch/x86/kvm/lapic.c
|
|
+++ b/arch/x86/kvm/lapic.c
|
|
@@ -42,6 +42,7 @@
|
|
#include "x86.h"
|
|
#include "cpuid.h"
|
|
#include "hyperv.h"
|
|
+#include "smm.h"
|
|
|
|
#ifndef CONFIG_X86_64
|
|
#define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
|
|
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
|
|
index 3552e6af3684..60a2c5c75095 100644
|
|
--- a/arch/x86/kvm/mmu/mmu.c
|
|
+++ b/arch/x86/kvm/mmu/mmu.c
|
|
@@ -22,6 +22,7 @@
|
|
#include "tdp_mmu.h"
|
|
#include "x86.h"
|
|
#include "kvm_cache_regs.h"
|
|
+#include "smm.h"
|
|
#include "kvm_emulate.h"
|
|
#include "cpuid.h"
|
|
#include "spte.h"
|
|
diff --git a/arch/x86/kvm/smm.c b/arch/x86/kvm/smm.c
|
|
new file mode 100644
|
|
index 000000000000..b91c48d91f6e
|
|
--- /dev/null
|
|
+++ b/arch/x86/kvm/smm.c
|
|
@@ -0,0 +1,37 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0 */
|
|
+
|
|
+#include <linux/kvm_host.h>
|
|
+#include "x86.h"
|
|
+#include "kvm_cache_regs.h"
|
|
+#include "kvm_emulate.h"
|
|
+#include "smm.h"
|
|
+#include "trace.h"
|
|
+
|
|
+void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm)
|
|
+{
|
|
+ trace_kvm_smm_transition(vcpu->vcpu_id, vcpu->arch.smbase, entering_smm);
|
|
+
|
|
+ if (entering_smm) {
|
|
+ vcpu->arch.hflags |= HF_SMM_MASK;
|
|
+ } else {
|
|
+ vcpu->arch.hflags &= ~(HF_SMM_MASK | HF_SMM_INSIDE_NMI_MASK);
|
|
+
|
|
+ /* Process a latched INIT or SMI, if any. */
|
|
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
|
|
+
|
|
+ /*
|
|
+ * Even if KVM_SET_SREGS2 loaded PDPTRs out of band,
|
|
+ * on SMM exit we still need to reload them from
|
|
+ * guest memory
|
|
+ */
|
|
+ vcpu->arch.pdptrs_from_userspace = false;
|
|
+ }
|
|
+
|
|
+ kvm_mmu_reset_context(vcpu);
|
|
+}
|
|
+
|
|
+void process_smi(struct kvm_vcpu *vcpu)
|
|
+{
|
|
+ vcpu->arch.smi_pending = true;
|
|
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
|
|
+}
|
|
diff --git a/arch/x86/kvm/smm.h b/arch/x86/kvm/smm.h
|
|
new file mode 100644
|
|
index 000000000000..d85d4ccd32dd
|
|
--- /dev/null
|
|
+++ b/arch/x86/kvm/smm.h
|
|
@@ -0,0 +1,25 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0 */
|
|
+#ifndef ASM_KVM_SMM_H
|
|
+#define ASM_KVM_SMM_H
|
|
+
|
|
+#define GET_SMSTATE(type, buf, offset) \
|
|
+ (*(type *)((buf) + (offset) - 0x7e00))
|
|
+
|
|
+#define PUT_SMSTATE(type, buf, offset, val) \
|
|
+ *(type *)((buf) + (offset) - 0x7e00) = val
|
|
+
|
|
+static inline int kvm_inject_smi(struct kvm_vcpu *vcpu)
|
|
+{
|
|
+ kvm_make_request(KVM_REQ_SMI, vcpu);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static inline bool is_smm(struct kvm_vcpu *vcpu)
|
|
+{
|
|
+ return vcpu->arch.hflags & HF_SMM_MASK;
|
|
+}
|
|
+
|
|
+void kvm_smm_changed(struct kvm_vcpu *vcpu, bool in_smm);
|
|
+void process_smi(struct kvm_vcpu *vcpu);
|
|
+
|
|
+#endif
|
|
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
|
|
index 76dcc8a3e849..d6cc9963b04a 100644
|
|
--- a/arch/x86/kvm/svm/nested.c
|
|
+++ b/arch/x86/kvm/svm/nested.c
|
|
@@ -25,6 +25,7 @@
|
|
#include "trace.h"
|
|
#include "mmu.h"
|
|
#include "x86.h"
|
|
+#include "smm.h"
|
|
#include "cpuid.h"
|
|
#include "lapic.h"
|
|
#include "svm.h"
|
|
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
|
|
index f3813dbacb9f..f4ed4a02b109 100644
|
|
--- a/arch/x86/kvm/svm/svm.c
|
|
+++ b/arch/x86/kvm/svm/svm.c
|
|
@@ -6,6 +6,7 @@
|
|
#include "mmu.h"
|
|
#include "kvm_cache_regs.h"
|
|
#include "x86.h"
|
|
+#include "smm.h"
|
|
#include "cpuid.h"
|
|
#include "pmu.h"
|
|
|
|
@@ -4443,9 +4444,9 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
|
|
return 0;
|
|
|
|
/* FED8h - SVM Guest */
|
|
- put_smstate(u64, smstate, 0x7ed8, 1);
|
|
+ PUT_SMSTATE(u64, smstate, 0x7ed8, 1);
|
|
/* FEE0h - SVM Guest VMCB Physical Address */
|
|
- put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa);
|
|
+ PUT_SMSTATE(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa);
|
|
|
|
svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
|
|
svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
|
|
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
|
|
index ddd4367d4826..e8197915b8b0 100644
|
|
--- a/arch/x86/kvm/vmx/nested.c
|
|
+++ b/arch/x86/kvm/vmx/nested.c
|
|
@@ -16,6 +16,7 @@
|
|
#include "trace.h"
|
|
#include "vmx.h"
|
|
#include "x86.h"
|
|
+#include "smm.h"
|
|
|
|
static bool __read_mostly enable_shadow_vmcs = 1;
|
|
module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
|
|
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
|
|
index c9b49a09e6b5..dc75de78ceb6 100644
|
|
--- a/arch/x86/kvm/vmx/vmx.c
|
|
+++ b/arch/x86/kvm/vmx/vmx.c
|
|
@@ -66,6 +66,7 @@
|
|
#include "vmcs12.h"
|
|
#include "vmx.h"
|
|
#include "x86.h"
|
|
+#include "smm.h"
|
|
|
|
MODULE_AUTHOR("Qumranet");
|
|
MODULE_LICENSE("GPL");
|
|
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
|
|
index b0c47b41c264..7e60b4c12b91 100644
|
|
--- a/arch/x86/kvm/x86.c
|
|
+++ b/arch/x86/kvm/x86.c
|
|
@@ -30,6 +30,7 @@
|
|
#include "hyperv.h"
|
|
#include "lapic.h"
|
|
#include "xen.h"
|
|
+#include "smm.h"
|
|
|
|
#include <linux/clocksource.h>
|
|
#include <linux/interrupt.h>
|
|
@@ -119,7 +120,6 @@ static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS;
|
|
|
|
static void update_cr8_intercept(struct kvm_vcpu *vcpu);
|
|
static void process_nmi(struct kvm_vcpu *vcpu);
|
|
-static void process_smi(struct kvm_vcpu *vcpu);
|
|
static void enter_smm(struct kvm_vcpu *vcpu);
|
|
static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
|
|
static void store_regs(struct kvm_vcpu *vcpu);
|
|
@@ -4878,13 +4878,6 @@ static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
|
|
return 0;
|
|
}
|
|
|
|
-static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu)
|
|
-{
|
|
- kvm_make_request(KVM_REQ_SMI, vcpu);
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
|
|
struct kvm_tpr_access_ctl *tac)
|
|
{
|
|
@@ -5095,8 +5088,6 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
|
|
memset(&events->reserved, 0, sizeof(events->reserved));
|
|
}
|
|
|
|
-static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm);
|
|
-
|
|
static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
|
|
struct kvm_vcpu_events *events)
|
|
{
|
|
@@ -5536,7 +5527,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
|
break;
|
|
}
|
|
case KVM_SMI: {
|
|
- r = kvm_vcpu_ioctl_smi(vcpu);
|
|
+ r = kvm_inject_smi(vcpu);
|
|
break;
|
|
}
|
|
case KVM_SET_CPUID: {
|
|
@@ -8470,29 +8461,6 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
|
|
static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
|
|
static int complete_emulated_pio(struct kvm_vcpu *vcpu);
|
|
|
|
-static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm)
|
|
-{
|
|
- trace_kvm_smm_transition(vcpu->vcpu_id, vcpu->arch.smbase, entering_smm);
|
|
-
|
|
- if (entering_smm) {
|
|
- vcpu->arch.hflags |= HF_SMM_MASK;
|
|
- } else {
|
|
- vcpu->arch.hflags &= ~(HF_SMM_MASK | HF_SMM_INSIDE_NMI_MASK);
|
|
-
|
|
- /* Process a latched INIT or SMI, if any. */
|
|
- kvm_make_request(KVM_REQ_EVENT, vcpu);
|
|
-
|
|
- /*
|
|
- * Even if KVM_SET_SREGS2 loaded PDPTRs out of band,
|
|
- * on SMM exit we still need to reload them from
|
|
- * guest memory
|
|
- */
|
|
- vcpu->arch.pdptrs_from_userspace = false;
|
|
- }
|
|
-
|
|
- kvm_mmu_reset_context(vcpu);
|
|
-}
|
|
-
|
|
static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
|
|
unsigned long *db)
|
|
{
|
|
@@ -9853,16 +9821,16 @@ static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu, char *buf, int n)
|
|
int offset;
|
|
|
|
kvm_get_segment(vcpu, &seg, n);
|
|
- put_smstate(u32, buf, 0x7fa8 + n * 4, seg.selector);
|
|
+ PUT_SMSTATE(u32, buf, 0x7fa8 + n * 4, seg.selector);
|
|
|
|
if (n < 3)
|
|
offset = 0x7f84 + n * 12;
|
|
else
|
|
offset = 0x7f2c + (n - 3) * 12;
|
|
|
|
- put_smstate(u32, buf, offset + 8, seg.base);
|
|
- put_smstate(u32, buf, offset + 4, seg.limit);
|
|
- put_smstate(u32, buf, offset, enter_smm_get_segment_flags(&seg));
|
|
+ PUT_SMSTATE(u32, buf, offset + 8, seg.base);
|
|
+ PUT_SMSTATE(u32, buf, offset + 4, seg.limit);
|
|
+ PUT_SMSTATE(u32, buf, offset, enter_smm_get_segment_flags(&seg));
|
|
}
|
|
|
|
#ifdef CONFIG_X86_64
|
|
@@ -9876,10 +9844,10 @@ static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n)
|
|
offset = 0x7e00 + n * 16;
|
|
|
|
flags = enter_smm_get_segment_flags(&seg) >> 8;
|
|
- put_smstate(u16, buf, offset, seg.selector);
|
|
- put_smstate(u16, buf, offset + 2, flags);
|
|
- put_smstate(u32, buf, offset + 4, seg.limit);
|
|
- put_smstate(u64, buf, offset + 8, seg.base);
|
|
+ PUT_SMSTATE(u16, buf, offset, seg.selector);
|
|
+ PUT_SMSTATE(u16, buf, offset + 2, flags);
|
|
+ PUT_SMSTATE(u32, buf, offset + 4, seg.limit);
|
|
+ PUT_SMSTATE(u64, buf, offset + 8, seg.base);
|
|
}
|
|
#endif
|
|
|
|
@@ -9890,47 +9858,47 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf)
|
|
unsigned long val;
|
|
int i;
|
|
|
|
- put_smstate(u32, buf, 0x7ffc, kvm_read_cr0(vcpu));
|
|
- put_smstate(u32, buf, 0x7ff8, kvm_read_cr3(vcpu));
|
|
- put_smstate(u32, buf, 0x7ff4, kvm_get_rflags(vcpu));
|
|
- put_smstate(u32, buf, 0x7ff0, kvm_rip_read(vcpu));
|
|
+ PUT_SMSTATE(u32, buf, 0x7ffc, kvm_read_cr0(vcpu));
|
|
+ PUT_SMSTATE(u32, buf, 0x7ff8, kvm_read_cr3(vcpu));
|
|
+ PUT_SMSTATE(u32, buf, 0x7ff4, kvm_get_rflags(vcpu));
|
|
+ PUT_SMSTATE(u32, buf, 0x7ff0, kvm_rip_read(vcpu));
|
|
|
|
for (i = 0; i < 8; i++)
|
|
- put_smstate(u32, buf, 0x7fd0 + i * 4, kvm_register_read_raw(vcpu, i));
|
|
+ PUT_SMSTATE(u32, buf, 0x7fd0 + i * 4, kvm_register_read_raw(vcpu, i));
|
|
|
|
kvm_get_dr(vcpu, 6, &val);
|
|
- put_smstate(u32, buf, 0x7fcc, (u32)val);
|
|
+ PUT_SMSTATE(u32, buf, 0x7fcc, (u32)val);
|
|
kvm_get_dr(vcpu, 7, &val);
|
|
- put_smstate(u32, buf, 0x7fc8, (u32)val);
|
|
+ PUT_SMSTATE(u32, buf, 0x7fc8, (u32)val);
|
|
|
|
kvm_get_segment(vcpu, &seg, VCPU_SREG_TR);
|
|
- put_smstate(u32, buf, 0x7fc4, seg.selector);
|
|
- put_smstate(u32, buf, 0x7f64, seg.base);
|
|
- put_smstate(u32, buf, 0x7f60, seg.limit);
|
|
- put_smstate(u32, buf, 0x7f5c, enter_smm_get_segment_flags(&seg));
|
|
+ PUT_SMSTATE(u32, buf, 0x7fc4, seg.selector);
|
|
+ PUT_SMSTATE(u32, buf, 0x7f64, seg.base);
|
|
+ PUT_SMSTATE(u32, buf, 0x7f60, seg.limit);
|
|
+ PUT_SMSTATE(u32, buf, 0x7f5c, enter_smm_get_segment_flags(&seg));
|
|
|
|
kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR);
|
|
- put_smstate(u32, buf, 0x7fc0, seg.selector);
|
|
- put_smstate(u32, buf, 0x7f80, seg.base);
|
|
- put_smstate(u32, buf, 0x7f7c, seg.limit);
|
|
- put_smstate(u32, buf, 0x7f78, enter_smm_get_segment_flags(&seg));
|
|
+ PUT_SMSTATE(u32, buf, 0x7fc0, seg.selector);
|
|
+ PUT_SMSTATE(u32, buf, 0x7f80, seg.base);
|
|
+ PUT_SMSTATE(u32, buf, 0x7f7c, seg.limit);
|
|
+ PUT_SMSTATE(u32, buf, 0x7f78, enter_smm_get_segment_flags(&seg));
|
|
|
|
static_call(kvm_x86_get_gdt)(vcpu, &dt);
|
|
- put_smstate(u32, buf, 0x7f74, dt.address);
|
|
- put_smstate(u32, buf, 0x7f70, dt.size);
|
|
+ PUT_SMSTATE(u32, buf, 0x7f74, dt.address);
|
|
+ PUT_SMSTATE(u32, buf, 0x7f70, dt.size);
|
|
|
|
static_call(kvm_x86_get_idt)(vcpu, &dt);
|
|
- put_smstate(u32, buf, 0x7f58, dt.address);
|
|
- put_smstate(u32, buf, 0x7f54, dt.size);
|
|
+ PUT_SMSTATE(u32, buf, 0x7f58, dt.address);
|
|
+ PUT_SMSTATE(u32, buf, 0x7f54, dt.size);
|
|
|
|
for (i = 0; i < 6; i++)
|
|
enter_smm_save_seg_32(vcpu, buf, i);
|
|
|
|
- put_smstate(u32, buf, 0x7f14, kvm_read_cr4(vcpu));
|
|
+ PUT_SMSTATE(u32, buf, 0x7f14, kvm_read_cr4(vcpu));
|
|
|
|
/* revision id */
|
|
- put_smstate(u32, buf, 0x7efc, 0x00020000);
|
|
- put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase);
|
|
+ PUT_SMSTATE(u32, buf, 0x7efc, 0x00020000);
|
|
+ PUT_SMSTATE(u32, buf, 0x7ef8, vcpu->arch.smbase);
|
|
}
|
|
|
|
#ifdef CONFIG_X86_64
|
|
@@ -9942,46 +9910,46 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
|
|
int i;
|
|
|
|
for (i = 0; i < 16; i++)
|
|
- put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read_raw(vcpu, i));
|
|
+ PUT_SMSTATE(u64, buf, 0x7ff8 - i * 8, kvm_register_read_raw(vcpu, i));
|
|
|
|
- put_smstate(u64, buf, 0x7f78, kvm_rip_read(vcpu));
|
|
- put_smstate(u32, buf, 0x7f70, kvm_get_rflags(vcpu));
|
|
+ PUT_SMSTATE(u64, buf, 0x7f78, kvm_rip_read(vcpu));
|
|
+ PUT_SMSTATE(u32, buf, 0x7f70, kvm_get_rflags(vcpu));
|
|
|
|
kvm_get_dr(vcpu, 6, &val);
|
|
- put_smstate(u64, buf, 0x7f68, val);
|
|
+ PUT_SMSTATE(u64, buf, 0x7f68, val);
|
|
kvm_get_dr(vcpu, 7, &val);
|
|
- put_smstate(u64, buf, 0x7f60, val);
|
|
+ PUT_SMSTATE(u64, buf, 0x7f60, val);
|
|
|
|
- put_smstate(u64, buf, 0x7f58, kvm_read_cr0(vcpu));
|
|
- put_smstate(u64, buf, 0x7f50, kvm_read_cr3(vcpu));
|
|
- put_smstate(u64, buf, 0x7f48, kvm_read_cr4(vcpu));
|
|
+ PUT_SMSTATE(u64, buf, 0x7f58, kvm_read_cr0(vcpu));
|
|
+ PUT_SMSTATE(u64, buf, 0x7f50, kvm_read_cr3(vcpu));
|
|
+ PUT_SMSTATE(u64, buf, 0x7f48, kvm_read_cr4(vcpu));
|
|
|
|
- put_smstate(u32, buf, 0x7f00, vcpu->arch.smbase);
|
|
+ PUT_SMSTATE(u32, buf, 0x7f00, vcpu->arch.smbase);
|
|
|
|
/* revision id */
|
|
- put_smstate(u32, buf, 0x7efc, 0x00020064);
|
|
+ PUT_SMSTATE(u32, buf, 0x7efc, 0x00020064);
|
|
|
|
- put_smstate(u64, buf, 0x7ed0, vcpu->arch.efer);
|
|
+ PUT_SMSTATE(u64, buf, 0x7ed0, vcpu->arch.efer);
|
|
|
|
kvm_get_segment(vcpu, &seg, VCPU_SREG_TR);
|
|
- put_smstate(u16, buf, 0x7e90, seg.selector);
|
|
- put_smstate(u16, buf, 0x7e92, enter_smm_get_segment_flags(&seg) >> 8);
|
|
- put_smstate(u32, buf, 0x7e94, seg.limit);
|
|
- put_smstate(u64, buf, 0x7e98, seg.base);
|
|
+ PUT_SMSTATE(u16, buf, 0x7e90, seg.selector);
|
|
+ PUT_SMSTATE(u16, buf, 0x7e92, enter_smm_get_segment_flags(&seg) >> 8);
|
|
+ PUT_SMSTATE(u32, buf, 0x7e94, seg.limit);
|
|
+ PUT_SMSTATE(u64, buf, 0x7e98, seg.base);
|
|
|
|
static_call(kvm_x86_get_idt)(vcpu, &dt);
|
|
- put_smstate(u32, buf, 0x7e84, dt.size);
|
|
- put_smstate(u64, buf, 0x7e88, dt.address);
|
|
+ PUT_SMSTATE(u32, buf, 0x7e84, dt.size);
|
|
+ PUT_SMSTATE(u64, buf, 0x7e88, dt.address);
|
|
|
|
kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR);
|
|
- put_smstate(u16, buf, 0x7e70, seg.selector);
|
|
- put_smstate(u16, buf, 0x7e72, enter_smm_get_segment_flags(&seg) >> 8);
|
|
- put_smstate(u32, buf, 0x7e74, seg.limit);
|
|
- put_smstate(u64, buf, 0x7e78, seg.base);
|
|
+ PUT_SMSTATE(u16, buf, 0x7e70, seg.selector);
|
|
+ PUT_SMSTATE(u16, buf, 0x7e72, enter_smm_get_segment_flags(&seg) >> 8);
|
|
+ PUT_SMSTATE(u32, buf, 0x7e74, seg.limit);
|
|
+ PUT_SMSTATE(u64, buf, 0x7e78, seg.base);
|
|
|
|
static_call(kvm_x86_get_gdt)(vcpu, &dt);
|
|
- put_smstate(u32, buf, 0x7e64, dt.size);
|
|
- put_smstate(u64, buf, 0x7e68, dt.address);
|
|
+ PUT_SMSTATE(u32, buf, 0x7e64, dt.size);
|
|
+ PUT_SMSTATE(u64, buf, 0x7e68, dt.address);
|
|
|
|
for (i = 0; i < 6; i++)
|
|
enter_smm_save_seg_64(vcpu, buf, i);
|
|
@@ -10067,12 +10035,6 @@ static void enter_smm(struct kvm_vcpu *vcpu)
|
|
kvm_mmu_reset_context(vcpu);
|
|
}
|
|
|
|
-static void process_smi(struct kvm_vcpu *vcpu)
|
|
-{
|
|
- vcpu->arch.smi_pending = true;
|
|
- kvm_make_request(KVM_REQ_EVENT, vcpu);
|
|
-}
|
|
-
|
|
void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
|
|
unsigned long *vcpu_bitmap)
|
|
{
|
|
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
|
|
index 1926d2cb8e79..cb64fa05405f 100644
|
|
--- a/arch/x86/kvm/x86.h
|
|
+++ b/arch/x86/kvm/x86.h
|
|
@@ -7,6 +7,7 @@
|
|
#include <asm/pvclock.h>
|
|
#include "kvm_cache_regs.h"
|
|
#include "kvm_emulate.h"
|
|
+#include "smm.h"
|
|
|
|
struct kvm_caps {
|
|
/* control of guest tsc rate supported? */
|
|
--
|
|
2.38.1
|
|
|