x86/hvm: Don't raise #GP behind the emulators back for MSR accesses
authorAndrew Cooper <andrew.cooper3@citrix.com>
Wed, 2 Nov 2016 14:36:49 +0000 (14:36 +0000)
committerAndrew Cooper <andrew.cooper3@citrix.com>
Tue, 21 Feb 2017 13:47:49 +0000 (13:47 +0000)
The current hvm_msr_{read,write}_intercept() infrastructure calls
hvm_inject_hw_exception() directly to latch a fault, and returns
X86EMUL_EXCEPTION to its caller.

This behaviour is problematic for the hvmemul_{read,write}_msr() paths, as the
fault is raised behind the back of the x86 emulator.

Alter the behaviour so hvm_msr_{read,write}_intercept() simply returns
X86EMUL_EXCEPTION, leaving the callers to actually inject the #GP fault.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Paul Durrant <paul.durrant@citrix.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
xen/arch/x86/hvm/emulate.c
xen/arch/x86/hvm/hvm.c
xen/arch/x86/hvm/svm/svm.c
xen/arch/x86/hvm/vmx/vmx.c
xen/arch/x86/hvm/vmx/vvmx.c
xen/include/asm-x86/hvm/support.h

index 14f9b43b9c7b3e1f990aa725c0bc55181eba5e91..edcae5e64bdd52c1109e3a74b2cf69b2c922f4ec 100644 (file)
@@ -1544,7 +1544,12 @@ static int hvmemul_read_msr(
     uint64_t *val,
     struct x86_emulate_ctxt *ctxt)
 {
-    return hvm_msr_read_intercept(reg, val);
+    int rc = hvm_msr_read_intercept(reg, val);
+
+    if ( rc == X86EMUL_EXCEPTION )
+        x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt);
+
+    return rc;
 }
 
 static int hvmemul_write_msr(
@@ -1552,7 +1557,12 @@ static int hvmemul_write_msr(
     uint64_t val,
     struct x86_emulate_ctxt *ctxt)
 {
-    return hvm_msr_write_intercept(reg, val, 1);
+    int rc = hvm_msr_write_intercept(reg, val, 1);
+
+    if ( rc == X86EMUL_EXCEPTION )
+        x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt);
+
+    return rc;
 }
 
 static int hvmemul_wbinvd(
index 6621d629a5ed01f3fc4bdc80f26120dce7e6ee6e..08855c2d8decffc11345c578df4ca12a2969d527 100644 (file)
@@ -518,7 +518,10 @@ void hvm_do_resume(struct vcpu *v)
 
         if ( w->do_write.msr )
         {
-            hvm_msr_write_intercept(w->msr, w->value, 0);
+            if ( hvm_msr_write_intercept(w->msr, w->value, 0) ==
+                 X86EMUL_EXCEPTION )
+                hvm_inject_hw_exception(TRAP_gp_fault, 0);
+
             w->do_write.msr = 0;
         }
 
@@ -3455,7 +3458,6 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
     return ret;
 
  gp_fault:
-    hvm_inject_hw_exception(TRAP_gp_fault, 0);
     ret = X86EMUL_EXCEPTION;
     *msr_content = -1ull;
     goto out;
@@ -3600,7 +3602,6 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content,
     return ret;
 
 gp_fault:
-    hvm_inject_hw_exception(TRAP_gp_fault, 0);
     return X86EMUL_EXCEPTION;
 }
 
index 79924a5d5f41460a9c7c1b46a1a64401d9b7037d..10149591a4e8d77c05e137d097aa4cb3af5edda4 100644 (file)
@@ -1744,7 +1744,6 @@ static int svm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
     return X86EMUL_OKAY;
 
  gpf:
-    hvm_inject_hw_exception(TRAP_gp_fault, 0);
     return X86EMUL_EXCEPTION;
 }
 
@@ -1897,7 +1896,6 @@ static int svm_msr_write_intercept(unsigned int msr, uint64_t msr_content)
     return result;
 
  gpf:
-    hvm_inject_hw_exception(TRAP_gp_fault, 0);
     return X86EMUL_EXCEPTION;
 }
 
@@ -1924,6 +1922,8 @@ static void svm_do_msr_access(struct cpu_user_regs *regs)
 
     if ( rc == X86EMUL_OKAY )
         __update_guest_eip(regs, inst_len);
+    else if ( rc == X86EMUL_EXCEPTION )
+        hvm_inject_hw_exception(TRAP_gp_fault, 0);
 }
 
 static void svm_vmexit_do_hlt(struct vmcb_struct *vmcb,
index e0c28311001f1df31f0f50b577d786070ee5d010..59fa0aa4af8f2f6da2f0ff0ce3db2934e9670d20 100644 (file)
@@ -2673,7 +2673,6 @@ done:
     return X86EMUL_OKAY;
 
 gp_fault:
-    hvm_inject_hw_exception(TRAP_gp_fault, 0);
     return X86EMUL_EXCEPTION;
 }
 
@@ -2910,7 +2909,6 @@ static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content)
     return X86EMUL_OKAY;
 
 gp_fault:
-    hvm_inject_hw_exception(TRAP_gp_fault, 0);
     return X86EMUL_EXCEPTION;
 }
 
@@ -3603,18 +3601,33 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
         break;
     case EXIT_REASON_MSR_READ:
     {
-        uint64_t msr_content;
-        if ( hvm_msr_read_intercept(regs->_ecx, &msr_content) == X86EMUL_OKAY )
+        uint64_t msr_content = 0;
+
+        switch ( hvm_msr_read_intercept(regs->_ecx, &msr_content) )
         {
+        case X86EMUL_OKAY:
             msr_split(regs, msr_content);
             update_guest_eip(); /* Safe: RDMSR */
+            break;
+
+        case X86EMUL_EXCEPTION:
+            hvm_inject_hw_exception(TRAP_gp_fault, 0);
+            break;
         }
         break;
     }
 
     case EXIT_REASON_MSR_WRITE:
-        if ( hvm_msr_write_intercept(regs->_ecx, msr_fold(regs), 1) == X86EMUL_OKAY )
+        switch ( hvm_msr_write_intercept(regs->_ecx, msr_fold(regs), 1) )
+        {
+        case X86EMUL_OKAY:
             update_guest_eip(); /* Safe: WRMSR */
+            break;
+
+        case X86EMUL_EXCEPTION:
+            hvm_inject_hw_exception(TRAP_gp_fault, 0);
+            break;
+        }
         break;
 
     case EXIT_REASON_VMXOFF:
index 74775dd473c093767b470c1123fbb0cce14a73f0..0aef7a75c13ea4f04b064fcbbd3a7b00a360bd9b 100644 (file)
@@ -1032,6 +1032,7 @@ static void load_shadow_guest_state(struct vcpu *v)
     struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
     u32 control;
     u64 cr_gh_mask, cr_read_shadow;
+    int rc;
 
     static const u16 vmentry_fields[] = {
         VM_ENTRY_INTR_INFO,
@@ -1053,8 +1054,12 @@ static void load_shadow_guest_state(struct vcpu *v)
     if ( control & VM_ENTRY_LOAD_GUEST_PAT )
         hvm_set_guest_pat(v, get_vvmcs(v, GUEST_PAT));
     if ( control & VM_ENTRY_LOAD_PERF_GLOBAL_CTRL )
-        hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
-                                get_vvmcs(v, GUEST_PERF_GLOBAL_CTRL), 0);
+    {
+        rc = hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
+                                     get_vvmcs(v, GUEST_PERF_GLOBAL_CTRL), 0);
+        if ( rc == X86EMUL_EXCEPTION )
+            hvm_inject_hw_exception(TRAP_gp_fault, 0);
+    }
 
     hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
 
@@ -1222,7 +1227,7 @@ static void sync_vvmcs_ro(struct vcpu *v)
 
 static void load_vvmcs_host_state(struct vcpu *v)
 {
-    int i;
+    int i, rc;
     u64 r;
     u32 control;
 
@@ -1240,8 +1245,12 @@ static void load_vvmcs_host_state(struct vcpu *v)
     if ( control & VM_EXIT_LOAD_HOST_PAT )
         hvm_set_guest_pat(v, get_vvmcs(v, HOST_PAT));
     if ( control & VM_EXIT_LOAD_PERF_GLOBAL_CTRL )
-        hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
-                                get_vvmcs(v, HOST_PERF_GLOBAL_CTRL), 1);
+    {
+        rc = hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
+                                     get_vvmcs(v, HOST_PERF_GLOBAL_CTRL), 1);
+        if ( rc == X86EMUL_EXCEPTION )
+            hvm_inject_hw_exception(TRAP_gp_fault, 0);
+    }
 
     hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
 
index 262955dc435653d3e406cbc6485f565c0c36a00d..5e256989ab5ae7beee03596a9995928cac8fff6f 100644 (file)
@@ -121,13 +121,19 @@ int hvm_set_efer(uint64_t value);
 int hvm_set_cr0(unsigned long value, bool_t may_defer);
 int hvm_set_cr3(unsigned long value, bool_t may_defer);
 int hvm_set_cr4(unsigned long value, bool_t may_defer);
-int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content);
-int hvm_msr_write_intercept(
-    unsigned int msr, uint64_t msr_content, bool_t may_defer);
 int hvm_mov_to_cr(unsigned int cr, unsigned int gpr);
 int hvm_mov_from_cr(unsigned int cr, unsigned int gpr);
 void hvm_ud_intercept(struct cpu_user_regs *);
 
+/*
+ * May return X86EMUL_EXCEPTION, at which point the caller is responsible for
+ * injecting a #GP fault.  Used to support speculative reads.
+ */
+int __must_check hvm_msr_read_intercept(
+    unsigned int msr, uint64_t *msr_content);
+int __must_check hvm_msr_write_intercept(
+    unsigned int msr, uint64_t msr_content, bool_t may_defer);
+
 #endif /* __ASM_X86_HVM_SUPPORT_H__ */
 
 /*