x86/vmx: Support removing MSRs from the host/guest load/save lists
authorAndrew Cooper <andrew.cooper3@citrix.com>
Mon, 7 May 2018 10:57:00 +0000 (11:57 +0100)
committerAndrew Cooper <andrew.cooper3@citrix.com>
Wed, 4 Jul 2018 11:12:15 +0000 (12:12 +0100)
Up until this point, the MSR load/save lists have only ever accumulated
content.  Introduce vmx_del_msr() as a companion to vmx_add_msr().

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewd-by: Kevin Tian <kevin.tian@intel.com>
xen/arch/x86/hvm/vmx/vmcs.c
xen/include/asm-x86/hvm/vmx/vmcs.h

index 68582817017be2b2989ae7fab53363cea040a0ca..cce47f428270ecd84296c8f8de20b8c639488d9d 100644 (file)
@@ -1454,6 +1454,74 @@ int vmx_add_msr(struct vcpu *v, uint32_t msr, uint64_t val,
     return rc;
 }
 
+int vmx_del_msr(struct vcpu *v, uint32_t msr, enum vmx_msr_list_type type)
+{
+    struct arch_vmx_struct *vmx = &v->arch.hvm_vmx;
+    struct vmx_msr_entry *start = NULL, *ent, *end;
+    unsigned int substart, subend, total;
+
+    ASSERT(v == current || !vcpu_runnable(v));
+
+    switch ( type )
+    {
+    case VMX_MSR_HOST:
+        start    = vmx->host_msr_area;
+        substart = 0;
+        subend   = vmx->host_msr_count;
+        total    = subend;
+        break;
+
+    case VMX_MSR_GUEST:
+        start    = vmx->msr_area;
+        substart = 0;
+        subend   = vmx->msr_save_count;
+        total    = vmx->msr_load_count;
+        break;
+
+    case VMX_MSR_GUEST_LOADONLY:
+        start    = vmx->msr_area;
+        substart = vmx->msr_save_count;
+        subend   = vmx->msr_load_count;
+        total    = subend;
+        break;
+
+    default:
+        ASSERT_UNREACHABLE();
+    }
+
+    if ( !start )
+        return -ESRCH;
+
+    end = start + total;
+    ent = locate_msr_entry(start + substart, start + subend, msr);
+
+    if ( (ent == end) || (ent->index != msr) )
+        return -ESRCH;
+
+    memmove(ent, ent + 1, sizeof(*ent) * (end - ent - 1));
+
+    vmx_vmcs_enter(v);
+
+    switch ( type )
+    {
+    case VMX_MSR_HOST:
+        __vmwrite(VM_EXIT_MSR_LOAD_COUNT, vmx->host_msr_count--);
+        break;
+
+    case VMX_MSR_GUEST:
+        __vmwrite(VM_EXIT_MSR_STORE_COUNT, vmx->msr_save_count--);
+
+        /* Fallthrough */
+    case VMX_MSR_GUEST_LOADONLY:
+        __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_load_count--);
+        break;
+    }
+
+    vmx_vmcs_exit(v);
+
+    return 0;
+}
+
 void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector)
 {
     if ( !test_and_set_bit(vector, v->arch.hvm_vmx.eoi_exit_bitmap) )
index 57e5098b99fd8bc689082f8d8e49f18e6d4a3c71..66e86f8786200e2ccc0abf9816eb7ed14f5418b8 100644 (file)
@@ -559,6 +559,12 @@ enum vmx_msr_list_type {
 int vmx_add_msr(struct vcpu *v, uint32_t msr, uint64_t val,
                 enum vmx_msr_list_type type);
 
+/**
+ * Remove an MSR entry from an MSR list.  Returns -ESRCH if the MSR was not
+ * found in the list.
+ */
+int vmx_del_msr(struct vcpu *v, uint32_t msr, enum vmx_msr_list_type type);
+
 static inline int vmx_add_guest_msr(struct vcpu *v, uint32_t msr, uint64_t val)
 {
     return vmx_add_msr(v, msr, val, VMX_MSR_GUEST);