vmx/hvm: move mov-cr handling functions to generic HVM code
authorKeir Fraser <keir@xen.org>
Mon, 18 Apr 2011 08:47:12 +0000 (09:47 +0100)
committerKeir Fraser <keir@xen.org>
Mon, 18 Apr 2011 08:47:12 +0000 (09:47 +0100)
Currently the handling of CR accesses intercepts is done much
differently in SVM and VMX. For future usage move the VMX part
into the generic HVM path and use the exported functions.

Signed-off-by: Andre Przywara <andre.przywara@amd.com>
Signed-off-by: Keir Fraser <keir@xen.org>
xen/arch/x86/hvm/hvm.c
xen/arch/x86/hvm/vmx/vmx.c
xen/arch/x86/traps.c
xen/include/asm-x86/hvm/support.h
xen/include/asm-x86/hvm/vmx/vmx.h
xen/include/asm-x86/processor.h

index edeffe06cf901f1e4dda30ee278f81bf06eea5f1..085230322b02893f00b94abd2112678582d3fc97 100644 (file)
@@ -1409,6 +1409,86 @@ static void hvm_set_uc_mode(struct vcpu *v, bool_t is_in_uc_mode)
         return hvm_funcs.set_uc_mode(v);
 }
 
+int hvm_mov_to_cr(unsigned int cr, unsigned int gpr)
+{
+    struct vcpu *curr = current;
+    unsigned long val, *reg;
+
+    if ( (reg = get_x86_gpr(guest_cpu_user_regs(), gpr)) == NULL )
+    {
+        gdprintk(XENLOG_ERR, "invalid gpr: %u\n", gpr);
+        goto exit_and_crash;
+    }
+
+    val = *reg;
+    HVMTRACE_LONG_2D(CR_WRITE, cr, TRC_PAR_LONG(val));
+    HVM_DBG_LOG(DBG_LEVEL_1, "CR%u, value = %lx", cr, val);
+
+    switch ( cr )
+    {
+    case 0:
+        return hvm_set_cr0(val);
+
+    case 3:
+        return hvm_set_cr3(val);
+
+    case 4:
+        return hvm_set_cr4(val);
+
+    case 8:
+        vlapic_set_reg(vcpu_vlapic(curr), APIC_TASKPRI, ((val & 0x0f) << 4));
+        break;
+
+    default:
+        gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
+        goto exit_and_crash;
+    }
+
+    return X86EMUL_OKAY;
+
+ exit_and_crash:
+    domain_crash(curr->domain);
+    return X86EMUL_UNHANDLEABLE;
+}
+
+int hvm_mov_from_cr(unsigned int cr, unsigned int gpr)
+{
+    struct vcpu *curr = current;
+    unsigned long val = 0, *reg;
+
+    if ( (reg = get_x86_gpr(guest_cpu_user_regs(), gpr)) == NULL )
+    {
+        gdprintk(XENLOG_ERR, "invalid gpr: %u\n", gpr);
+        goto exit_and_crash;
+    }
+
+    switch ( cr )
+    {
+    case 0:
+    case 2:
+    case 3:
+    case 4:
+        val = curr->arch.hvm_vcpu.guest_cr[cr];
+        break;
+    case 8:
+        val = (vlapic_get_reg(vcpu_vlapic(curr), APIC_TASKPRI) & 0xf0) >> 4;
+        break;
+    default:
+        gdprintk(XENLOG_ERR, "invalid cr: %u\n", cr);
+        goto exit_and_crash;
+    }
+
+    *reg = val;
+    HVMTRACE_LONG_2D(CR_READ, cr, TRC_PAR_LONG(val));
+    HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR%u, value = %lx", cr, val);
+
+    return X86EMUL_OKAY;
+
+ exit_and_crash:
+    domain_crash(curr->domain);
+    return X86EMUL_UNHANDLEABLE;
+}
+
 int hvm_set_cr0(unsigned long value)
 {
     struct vcpu *v = current;
index 2f4c74e2b4c59502ed2f31f255f2d0b04da8a97e..0d291efe4b511c0d2bb2eea26c606c4acaf1eeb4 100644 (file)
@@ -1554,182 +1554,42 @@ static void vmx_invlpg_intercept(unsigned long vaddr)
         vpid_sync_vcpu_gva(curr, vaddr);
 }
 
-#define CASE_SET_REG(REG, reg)      \
-    case VMX_CONTROL_REG_ACCESS_GPR_ ## REG: regs->reg = value; break
-#define CASE_GET_REG(REG, reg)      \
-    case VMX_CONTROL_REG_ACCESS_GPR_ ## REG: value = regs->reg; break
-
-#define CASE_EXTEND_SET_REG         \
-    CASE_EXTEND_REG(S)
-#define CASE_EXTEND_GET_REG         \
-    CASE_EXTEND_REG(G)
-
-#ifdef __i386__
-#define CASE_EXTEND_REG(T)
-#else
-#define CASE_EXTEND_REG(T)          \
-    CASE_ ## T ## ET_REG(R8, r8);   \
-    CASE_ ## T ## ET_REG(R9, r9);   \
-    CASE_ ## T ## ET_REG(R10, r10); \
-    CASE_ ## T ## ET_REG(R11, r11); \
-    CASE_ ## T ## ET_REG(R12, r12); \
-    CASE_ ## T ## ET_REG(R13, r13); \
-    CASE_ ## T ## ET_REG(R14, r14); \
-    CASE_ ## T ## ET_REG(R15, r15)
-#endif
-
-static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
+static int vmx_cr_access(unsigned long exit_qualification)
 {
-    unsigned long value;
-    struct vcpu *v = current;
-    struct vlapic *vlapic = vcpu_vlapic(v);
-    int rc = 0;
-    unsigned long old;
-
-    switch ( gp )
-    {
-    CASE_GET_REG(EAX, eax);
-    CASE_GET_REG(ECX, ecx);
-    CASE_GET_REG(EDX, edx);
-    CASE_GET_REG(EBX, ebx);
-    CASE_GET_REG(EBP, ebp);
-    CASE_GET_REG(ESI, esi);
-    CASE_GET_REG(EDI, edi);
-    CASE_GET_REG(ESP, esp);
-    CASE_EXTEND_GET_REG;
-    default:
-        gdprintk(XENLOG_ERR, "invalid gp: %d\n", gp);
-        goto exit_and_crash;
-    }
-
-    HVMTRACE_LONG_2D(CR_WRITE, cr, TRC_PAR_LONG(value));
-
-    HVM_DBG_LOG(DBG_LEVEL_1, "CR%d, value = %lx", cr, value);
-
-    switch ( cr )
-    {
-    case 0:
-        old = v->arch.hvm_vcpu.guest_cr[0];
-        rc = !hvm_set_cr0(value);
-        if (rc)
-            hvm_memory_event_cr0(value, old);
-        return rc;
-
-    case 3:
-        old = v->arch.hvm_vcpu.guest_cr[3];
-        rc = !hvm_set_cr3(value);
-        if (rc)
-            hvm_memory_event_cr3(value, old);        
-        return rc;
-
-    case 4:
-        old = v->arch.hvm_vcpu.guest_cr[4];
-        rc = !hvm_set_cr4(value);
-        if (rc)
-            hvm_memory_event_cr4(value, old);
-        return rc; 
-
-    case 8:
-        vlapic_set_reg(vlapic, APIC_TASKPRI, ((value & 0x0F) << 4));
-        break;
-
-    default:
-        gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
-        goto exit_and_crash;
-    }
-
-    return 1;
-
- exit_and_crash:
-    domain_crash(v->domain);
-    return 0;
-}
-
-/*
- * Read from control registers. CR0 and CR4 are read from the shadow.
- */
-static void mov_from_cr(int cr, int gp, struct cpu_user_regs *regs)
-{
-    unsigned long value = 0;
-    struct vcpu *v = current;
-    struct vlapic *vlapic = vcpu_vlapic(v);
+    struct vcpu *curr = current;
 
-    switch ( cr )
+    switch ( VMX_CONTROL_REG_ACCESS_TYPE(exit_qualification) )
     {
-    case 3:
-        value = (unsigned long)v->arch.hvm_vcpu.guest_cr[3];
-        break;
-    case 8:
-        value = (unsigned long)vlapic_get_reg(vlapic, APIC_TASKPRI);
-        value = (value & 0xF0) >> 4;
-        break;
-    default:
-        gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
-        domain_crash(v->domain);
-        break;
+    case VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR: {
+        unsigned long gp = VMX_CONTROL_REG_ACCESS_GPR(exit_qualification);
+        unsigned long cr = VMX_CONTROL_REG_ACCESS_NUM(exit_qualification);
+        return hvm_mov_to_cr(cr, gp);
     }
-
-    switch ( gp ) {
-    CASE_SET_REG(EAX, eax);
-    CASE_SET_REG(ECX, ecx);
-    CASE_SET_REG(EDX, edx);
-    CASE_SET_REG(EBX, ebx);
-    CASE_SET_REG(EBP, ebp);
-    CASE_SET_REG(ESI, esi);
-    CASE_SET_REG(EDI, edi);
-    CASE_SET_REG(ESP, esp);
-    CASE_EXTEND_SET_REG;
-    default:
-        printk("invalid gp: %d\n", gp);
-        domain_crash(v->domain);
-        break;
+    case VMX_CONTROL_REG_ACCESS_TYPE_MOV_FROM_CR: {
+        unsigned long gp = VMX_CONTROL_REG_ACCESS_GPR(exit_qualification);
+        unsigned long cr = VMX_CONTROL_REG_ACCESS_NUM(exit_qualification);
+        return hvm_mov_from_cr(cr, gp);
     }
-
-    HVMTRACE_LONG_2D(CR_READ, cr, TRC_PAR_LONG(value));
-
-    HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR%d, value = %lx", cr, value);
-}
-
-static int vmx_cr_access(unsigned long exit_qualification,
-                         struct cpu_user_regs *regs)
-{
-    unsigned int gp, cr;
-    unsigned long value;
-    struct vcpu *v = current;
-
-    switch ( exit_qualification & VMX_CONTROL_REG_ACCESS_TYPE )
-    {
-    case VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR:
-        gp = exit_qualification & VMX_CONTROL_REG_ACCESS_GPR;
-        cr = exit_qualification & VMX_CONTROL_REG_ACCESS_NUM;
-        return mov_to_cr(gp, cr, regs);
-    case VMX_CONTROL_REG_ACCESS_TYPE_MOV_FROM_CR:
-        gp = exit_qualification & VMX_CONTROL_REG_ACCESS_GPR;
-        cr = exit_qualification & VMX_CONTROL_REG_ACCESS_NUM;
-        mov_from_cr(cr, gp, regs);
-        break;
-    case VMX_CONTROL_REG_ACCESS_TYPE_CLTS: 
-    {
-        unsigned long old = v->arch.hvm_vcpu.guest_cr[0];
-        v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS;
-        vmx_update_guest_cr(v, 0);
-
-        hvm_memory_event_cr0(v->arch.hvm_vcpu.guest_cr[0], old);
-
+    case VMX_CONTROL_REG_ACCESS_TYPE_CLTS: {
+        unsigned long old = curr->arch.hvm_vcpu.guest_cr[0];
+        curr->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS;
+        vmx_update_guest_cr(curr, 0);
+        hvm_memory_event_cr0(curr->arch.hvm_vcpu.guest_cr[0], old);
         HVMTRACE_0D(CLTS);
         break;
     }
-    case VMX_CONTROL_REG_ACCESS_TYPE_LMSW:
-        value = v->arch.hvm_vcpu.guest_cr[0];
+    case VMX_CONTROL_REG_ACCESS_TYPE_LMSW: {
+        unsigned long value = curr->arch.hvm_vcpu.guest_cr[0];
         /* LMSW can: (1) set bits 0-3; (2) clear bits 1-3. */
         value = (value & ~0xe) | ((exit_qualification >> 16) & 0xf);
         HVMTRACE_LONG_1D(LMSW, value);
-        return !hvm_set_cr0(value);
+        return hvm_set_cr0(value);
+    }
     default:
         BUG();
     }
 
-    return 1;
+    return X86EMUL_OKAY;
 }
 
 static const struct lbr_info {
@@ -2534,7 +2394,7 @@ asmlinkage void vmx_vmexit_handler(struct cpu_user_regs *regs)
     case EXIT_REASON_CR_ACCESS:
     {
         exit_qualification = __vmread(EXIT_QUALIFICATION);
-        if ( vmx_cr_access(exit_qualification, regs) )
+        if ( vmx_cr_access(exit_qualification) == X86EMUL_OKAY )
             update_guest_eip(); /* Safe: MOV Cn, LMSW, CLTS */
         break;
     }
index 0fcc5b9a9b3d239ca180ce36bae7912f3ec8e437..129dc999ec33783ab8ddd3c6a20ee522a4d14ebb 100644 (file)
@@ -368,6 +368,36 @@ void vcpu_show_execution_state(struct vcpu *v)
     vcpu_unpause(v);
 }
 
+unsigned long *get_x86_gpr(struct cpu_user_regs *regs, unsigned int modrm_reg)
+{
+    void *p;
+
+    switch ( modrm_reg )
+    {
+    case  0: p = &regs->eax; break;
+    case  1: p = &regs->ecx; break;
+    case  2: p = &regs->edx; break;
+    case  3: p = &regs->ebx; break;
+    case  4: p = &regs->esp; break;
+    case  5: p = &regs->ebp; break;
+    case  6: p = &regs->esi; break;
+    case  7: p = &regs->edi; break;
+#if defined(__x86_64__)
+    case  8: p = &regs->r8;  break;
+    case  9: p = &regs->r9;  break;
+    case 10: p = &regs->r10; break;
+    case 11: p = &regs->r11; break;
+    case 12: p = &regs->r12; break;
+    case 13: p = &regs->r13; break;
+    case 14: p = &regs->r14; break;
+    case 15: p = &regs->r15; break;
+#endif
+    default: p = NULL; break;
+    }
+
+    return p;
+}
+
 static char *trapstr(int trapnr)
 {
     static char *strings[] = { 
index 92e96e30fb2124c3ecafbdd409e87410e8e10c43..d9a4e3cdca86c76ec8be8211c8be2c639f14898d 100644 (file)
@@ -137,5 +137,7 @@ int hvm_set_cr3(unsigned long value);
 int hvm_set_cr4(unsigned long value);
 int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content);
 int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content);
+int hvm_mov_to_cr(unsigned int cr, unsigned int gpr);
+int hvm_mov_from_cr(unsigned int cr, unsigned int gpr);
 
 #endif /* __ASM_X86_HVM_SUPPORT_H__ */
index 23406fa230acf88608f1cc1251f64971ec3709d4..30114a941e668064ab4538ee14a321d5df807242 100644 (file)
@@ -144,31 +144,15 @@ void vmx_update_cpu_exec_control(struct vcpu *v);
  * Exit Qualifications for MOV for Control Register Access
  */
  /* 3:0 - control register number (CRn) */
-#define VMX_CONTROL_REG_ACCESS_NUM      0xf
+#define VMX_CONTROL_REG_ACCESS_NUM(eq)  ((eq) & 0xf)
  /* 5:4 - access type (CR write, CR read, CLTS, LMSW) */
-#define VMX_CONTROL_REG_ACCESS_TYPE     0x30
+#define VMX_CONTROL_REG_ACCESS_TYPE(eq) (((eq) >> 4) & 0x3)
+# define VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR   0
+# define VMX_CONTROL_REG_ACCESS_TYPE_MOV_FROM_CR 1
+# define VMX_CONTROL_REG_ACCESS_TYPE_CLTS        2
+# define VMX_CONTROL_REG_ACCESS_TYPE_LMSW        3
  /* 10:8 - general purpose register operand */
-#define VMX_CONTROL_REG_ACCESS_GPR      0xf00
-#define VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR   (0 << 4)
-#define VMX_CONTROL_REG_ACCESS_TYPE_MOV_FROM_CR (1 << 4)
-#define VMX_CONTROL_REG_ACCESS_TYPE_CLTS        (2 << 4)
-#define VMX_CONTROL_REG_ACCESS_TYPE_LMSW        (3 << 4)
-#define VMX_CONTROL_REG_ACCESS_GPR_EAX  (0 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_ECX  (1 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_EDX  (2 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_EBX  (3 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_ESP  (4 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_EBP  (5 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_ESI  (6 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_EDI  (7 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_R8   (8 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_R9   (9 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_R10  (10 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_R11  (11 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_R12  (12 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_R13  (13 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_R14  (14 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_R15  (15 << 8)
+#define VMX_CONTROL_REG_ACCESS_GPR(eq)  (((eq) >> 8) & 0xf)
 
 /*
  * Access Rights
index 183815ea14abc4c06f283d076c0a5dec09228ab4..f9fbf39d628ffda3a62f927cba59faf131a3b4d3 100644 (file)
@@ -589,6 +589,8 @@ int wrmsr_hypervisor_regs(uint32_t idx, uint64_t val);
 int microcode_update(XEN_GUEST_HANDLE(const_void), unsigned long len);
 int microcode_resume_cpu(int cpu);
 
+unsigned long *get_x86_gpr(struct cpu_user_regs *regs, unsigned int modrm_reg);
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __ASM_X86_PROCESSOR_H */