vmx: Clean up and fix guest MSR load/save handling:
authorKeir Fraser <keir.fraser@citrix.com>
Thu, 19 Jun 2008 10:09:10 +0000 (11:09 +0100)
committerKeir Fraser <keir.fraser@citrix.com>
Thu, 19 Jun 2008 10:09:10 +0000 (11:09 +0100)
 1. msr_bitmap/msr_area/msr_host_area must be freed when a vcpu is
    destroyed
 2. vmx_create_vmcs()/vmx_destroy_vmcs() are only ever called once,
    and can hence be simplified slightly
 3. Change vmx_*_msr() interfaces to make it crystal clear that they
    operate only on current (hence safe against vmwrite() and also
    against concurrency races).
 4. Change vmx_add_*_msr() implementation to make it crystal clear
    that msr_area is not dereferenced before it is allocated.

Only (1) is a bug fix. (2)-(4) are for code clarity.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
xen/arch/x86/hvm/vmx/vmcs.c
xen/arch/x86/hvm/vmx/vmx.c
xen/arch/x86/hvm/vmx/vpmu_core2.c
xen/include/asm-x86/hvm/vmx/vmcs.h

index 7056251c9e05cef6f13c8f71c217c1fb81128ff9..ff5ca4e7d486ac5f2f19f87698216b9e97079900 100644 (file)
@@ -677,10 +677,11 @@ static int construct_vmcs(struct vcpu *v)
     return 0;
 }
 
-int vmx_read_guest_msr(struct vcpu *v, u32 msr, u64 *val)
+int vmx_read_guest_msr(u32 msr, u64 *val)
 {
-    unsigned int i, msr_count = v->arch.hvm_vmx.msr_count;
-    const struct vmx_msr_entry *msr_area = v->arch.hvm_vmx.msr_area;
+    struct vcpu *curr = current;
+    unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count;
+    const struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
 
     for ( i = 0; i < msr_count; i++ )
     {
@@ -694,10 +695,11 @@ int vmx_read_guest_msr(struct vcpu *v, u32 msr, u64 *val)
     return -ESRCH;
 }
 
-int vmx_write_guest_msr(struct vcpu *v, u32 msr, u64 val)
+int vmx_write_guest_msr(u32 msr, u64 val)
 {
-    unsigned int i, msr_count = v->arch.hvm_vmx.msr_count;
-    struct vmx_msr_entry *msr_area = v->arch.hvm_vmx.msr_area;
+    struct vcpu *curr = current;
+    unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count;
+    struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
 
     for ( i = 0; i < msr_count; i++ )
     {
@@ -711,61 +713,63 @@ int vmx_write_guest_msr(struct vcpu *v, u32 msr, u64 val)
     return -ESRCH;
 }
 
-int vmx_add_guest_msr(struct vcpu *v, u32 msr)
+int vmx_add_guest_msr(u32 msr)
 {
-    unsigned int i, msr_count = v->arch.hvm_vmx.msr_count;
-    struct vmx_msr_entry *msr_area = v->arch.hvm_vmx.msr_area;
-
-    for ( i = 0; i < msr_count; i++ )
-        if ( msr_area[i].index == msr )
-            return 0;
-
-    if ( msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
-        return -ENOSPC;
+    struct vcpu *curr = current;
+    unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count;
+    struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
 
     if ( msr_area == NULL )
     {
         if ( (msr_area = alloc_xenheap_page()) == NULL )
             return -ENOMEM;
-        v->arch.hvm_vmx.msr_area = msr_area;
+        curr->arch.hvm_vmx.msr_area = msr_area;
         __vmwrite(VM_EXIT_MSR_STORE_ADDR, virt_to_maddr(msr_area));
         __vmwrite(VM_ENTRY_MSR_LOAD_ADDR, virt_to_maddr(msr_area));
     }
 
+    for ( i = 0; i < msr_count; i++ )
+        if ( msr_area[i].index == msr )
+            return 0;
+
+    if ( msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
+        return -ENOSPC;
+
     msr_area[msr_count].index = msr;
     msr_area[msr_count].mbz   = 0;
     msr_area[msr_count].data  = 0;
-    v->arch.hvm_vmx.msr_count = ++msr_count;
+    curr->arch.hvm_vmx.msr_count = ++msr_count;
     __vmwrite(VM_EXIT_MSR_STORE_COUNT, msr_count);
     __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, msr_count);
 
     return 0;
 }
 
-int vmx_add_host_load_msr(struct vcpu *v, u32 msr)
+int vmx_add_host_load_msr(u32 msr)
 {
-    unsigned int i, msr_count = v->arch.hvm_vmx.host_msr_count;
-    struct vmx_msr_entry *msr_area = v->arch.hvm_vmx.host_msr_area;
-
-    for ( i = 0; i < msr_count; i++ )
-        if ( msr_area[i].index == msr )
-            return 0;
-
-    if ( msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
-        return -ENOSPC;
+    struct vcpu *curr = current;
+    unsigned int i, msr_count = curr->arch.hvm_vmx.host_msr_count;
+    struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.host_msr_area;
 
     if ( msr_area == NULL )
     {
         if ( (msr_area = alloc_xenheap_page()) == NULL )
             return -ENOMEM;
-        v->arch.hvm_vmx.host_msr_area = msr_area;
+        curr->arch.hvm_vmx.host_msr_area = msr_area;
         __vmwrite(VM_EXIT_MSR_LOAD_ADDR, virt_to_maddr(msr_area));
     }
 
+    for ( i = 0; i < msr_count; i++ )
+        if ( msr_area[i].index == msr )
+            return 0;
+
+    if ( msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
+        return -ENOSPC;
+
     msr_area[msr_count].index = msr;
     msr_area[msr_count].mbz   = 0;
     rdmsrl(msr, msr_area[msr_count].data);
-    v->arch.hvm_vmx.host_msr_count = ++msr_count;
+    curr->arch.hvm_vmx.host_msr_count = ++msr_count;
     __vmwrite(VM_EXIT_MSR_LOAD_COUNT, msr_count);
 
     return 0;
@@ -776,21 +780,17 @@ int vmx_create_vmcs(struct vcpu *v)
     struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
     int rc;
 
-    if ( arch_vmx->vmcs == NULL )
-    {
-        if ( (arch_vmx->vmcs = vmx_alloc_vmcs()) == NULL )
-            return -ENOMEM;
+    if ( (arch_vmx->vmcs = vmx_alloc_vmcs()) == NULL )
+        return -ENOMEM;
 
-        INIT_LIST_HEAD(&arch_vmx->active_list);
-        __vmpclear(virt_to_maddr(arch_vmx->vmcs));
-        arch_vmx->active_cpu = -1;
-        arch_vmx->launched   = 0;
-    }
+    INIT_LIST_HEAD(&arch_vmx->active_list);
+    __vmpclear(virt_to_maddr(arch_vmx->vmcs));
+    arch_vmx->active_cpu = -1;
+    arch_vmx->launched   = 0;
 
     if ( (rc = construct_vmcs(v)) != 0 )
     {
         vmx_free_vmcs(arch_vmx->vmcs);
-        arch_vmx->vmcs = NULL;
         return rc;
     }
 
@@ -801,13 +801,13 @@ void vmx_destroy_vmcs(struct vcpu *v)
 {
     struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
 
-    if ( arch_vmx->vmcs == NULL )
-        return;
-
     vmx_clear_vmcs(v);
 
     vmx_free_vmcs(arch_vmx->vmcs);
-    arch_vmx->vmcs = NULL;
+
+    free_xenheap_page(v->arch.hvm_vmx.host_msr_area);
+    free_xenheap_page(v->arch.hvm_vmx.msr_area);
+    free_xenheap_page(v->arch.hvm_vmx.msr_bitmap);
 }
 
 void vm_launch_fail(void)
index 16dfe29e10de0f7331fcfbecced4b5604b71f3dd..5f048ab5501c56a00bf34553752803ff3a1fd321 100644 (file)
@@ -1655,7 +1655,7 @@ static int vmx_msr_read_intercept(struct cpu_user_regs *regs)
                 goto done;
         }
 
-        if ( vmx_read_guest_msr(v, ecx, &msr_content) == 0 )
+        if ( vmx_read_guest_msr(ecx, &msr_content) == 0 )
             break;
 
         if ( is_last_branch_msr(ecx) )
@@ -1817,12 +1817,12 @@ static int vmx_msr_write_intercept(struct cpu_user_regs *regs)
 
             for ( ; (rc == 0) && lbr->count; lbr++ )
                 for ( i = 0; (rc == 0) && (i < lbr->count); i++ )
-                    if ( (rc = vmx_add_guest_msr(v, lbr->base + i)) == 0 )
+                    if ( (rc = vmx_add_guest_msr(lbr->base + i)) == 0 )
                         vmx_disable_intercept_for_msr(v, lbr->base + i);
         }
 
         if ( (rc < 0) ||
-             (vmx_add_host_load_msr(v, ecx) < 0) )
+             (vmx_add_host_load_msr(ecx) < 0) )
             vmx_inject_hw_exception(v, TRAP_machine_check, 0);
         else
         {
@@ -1842,7 +1842,7 @@ static int vmx_msr_write_intercept(struct cpu_user_regs *regs)
         switch ( long_mode_do_msr_write(regs) )
         {
             case HNDL_unhandled:
-                if ( (vmx_write_guest_msr(v, ecx, msr_content) != 0) &&
+                if ( (vmx_write_guest_msr(ecx, msr_content) != 0) &&
                      !is_last_branch_msr(ecx) )
                     wrmsr_hypervisor_regs(ecx, regs->eax, regs->edx);
                 break;
index 55398747745d2a83a5e9cef93e8195d0d0f0c542..5062d9c28d462c7513f1594ffb3dd948099e2b08 100644 (file)
@@ -219,12 +219,12 @@ static int core2_vpmu_alloc_resource(struct vcpu *v)
         return 0;
 
     wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
-    if ( vmx_add_host_load_msr(v, MSR_CORE_PERF_GLOBAL_CTRL) )
+    if ( vmx_add_host_load_msr(MSR_CORE_PERF_GLOBAL_CTRL) )
         return 0;
 
-    if ( vmx_add_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL) )
+    if ( vmx_add_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL) )
         return 0;
-    vmx_write_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, -1ULL);
+    vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, -1ULL);
 
     pmu_enable = xmalloc_bytes(sizeof(struct core2_pmu_enable) +
                  (core2_get_pmc_count()-1)*sizeof(char));
@@ -347,7 +347,7 @@ static int core2_vpmu_do_wrmsr(struct cpu_user_regs *regs)
         break;
     case MSR_CORE_PERF_FIXED_CTR_CTRL:
         non_global_ctrl = msr_content;
-        vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
+        vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
         global_ctrl >>= 32;
         for ( i = 0; i < 3; i++ )
         {
@@ -359,7 +359,7 @@ static int core2_vpmu_do_wrmsr(struct cpu_user_regs *regs)
         break;
     default:
         tmp = ecx - MSR_P6_EVNTSEL0;
-        vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
+        vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
         if ( tmp >= 0 && tmp < core2_get_pmc_count() )
             core2_vpmu_cxt->pmu_enable->arch_pmc_enable[tmp] =
                 (global_ctrl >> tmp) & (msr_content >> 22) & 1;
@@ -385,7 +385,7 @@ static int core2_vpmu_do_wrmsr(struct cpu_user_regs *regs)
     if ( type != MSR_TYPE_GLOBAL )
         wrmsrl(ecx, msr_content);
     else
-        vmx_write_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
+        vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
 
     return 1;
 }
@@ -410,7 +410,7 @@ static int core2_vpmu_do_rdmsr(struct cpu_user_regs *regs)
         msr_content = core2_vpmu_cxt->global_ovf_status;
         break;
     case MSR_CORE_PERF_GLOBAL_CTRL:
-        vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, &msr_content);
+        vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &msr_content);
         break;
     default:
         rdmsrl(regs->ecx, msr_content);
index 4ac9b43e4f63863f2567b8fe55b73149e6be9882..9d900996cbb9430963e972e110913b08b89efbfd 100644 (file)
@@ -333,10 +333,10 @@ enum vmcs_field {
 #define VMCS_VPID_WIDTH 16
 
 void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr);
-int vmx_read_guest_msr(struct vcpu *v, u32 msr, u64 *val);
-int vmx_write_guest_msr(struct vcpu *v, u32 msr, u64 val);
-int vmx_add_guest_msr(struct vcpu *v, u32 msr);
-int vmx_add_host_load_msr(struct vcpu *v, u32 msr);
+int vmx_read_guest_msr(u32 msr, u64 *val);
+int vmx_write_guest_msr(u32 msr, u64 val);
+int vmx_add_guest_msr(u32 msr);
+int vmx_add_host_load_msr(u32 msr);
 
 #endif /* ASM_X86_HVM_VMX_VMCS_H__ */