x86/vmx: optimize vmx_read/write_guest_msr()
authorSergey Dyasli <sergey.dyasli@citrix.com>
Thu, 23 Feb 2017 09:33:26 +0000 (09:33 +0000)
committerAndrew Cooper <andrew.cooper3@citrix.com>
Mon, 27 Feb 2017 10:22:20 +0000 (10:22 +0000)
Replace linear scan with vmx_find_msr().  This way the time complexity
of searching for required MSR reduces from linear to logarithmic.

Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>
Acked-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
xen/arch/x86/hvm/vmx/vmcs.c

index ba78d8ad0acb88d830340ad777de6b326186649a..03e68ad2a2392866fbda0d9d90ddf7a21ecdaee9 100644 (file)
@@ -1358,17 +1358,12 @@ struct vmx_msr_entry *vmx_find_msr(u32 msr, int type)
 
 int vmx_read_guest_msr(u32 msr, u64 *val)
 {
-    struct vcpu *curr = current;
-    unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count;
-    const struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
+    struct vmx_msr_entry *ent;
 
-    for ( i = 0; i < msr_count; i++ )
+    if ( (ent = vmx_find_msr(msr, VMX_GUEST_MSR)) != NULL )
     {
-        if ( msr_area[i].index == msr )
-        {
-            *val = msr_area[i].data;
-            return 0;
-        }
+        *val = ent->data;
+        return 0;
     }
 
     return -ESRCH;
@@ -1376,17 +1371,12 @@ int vmx_read_guest_msr(u32 msr, u64 *val)
 
 int vmx_write_guest_msr(u32 msr, u64 val)
 {
-    struct vcpu *curr = current;
-    unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count;
-    struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
+    struct vmx_msr_entry *ent;
 
-    for ( i = 0; i < msr_count; i++ )
+    if ( (ent = vmx_find_msr(msr, VMX_GUEST_MSR)) != NULL )
     {
-        if ( msr_area[i].index == msr )
-        {
-            msr_area[i].data = val;
-            return 0;
-        }
+        ent->data = val;
+        return 0;
     }
 
     return -ESRCH;