hvm: Share ASID logic between VMX and SVM.
authorKeir Fraser <keir.fraser@citrix.com>
Tue, 8 Dec 2009 14:14:27 +0000 (14:14 +0000)
committerKeir Fraser <keir.fraser@citrix.com>
Tue, 8 Dec 2009 14:14:27 +0000 (14:14 +0000)
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
12 files changed:
xen/arch/x86/hvm/asid.c
xen/arch/x86/hvm/hvm.c
xen/arch/x86/hvm/svm/svm.c
xen/arch/x86/hvm/svm/vmcb.c
xen/arch/x86/hvm/vmx/entry.S
xen/arch/x86/hvm/vmx/vmcs.c
xen/arch/x86/hvm/vmx/vmx.c
xen/include/asm-x86/hvm/asid.h
xen/include/asm-x86/hvm/hvm.h
xen/include/asm-x86/hvm/svm/asid.h
xen/include/asm-x86/hvm/vmx/vmcs.h
xen/include/asm-x86/hvm/vmx/vmx.h

index 85754d18bbddc775708cdc1d96747247b8b37351..69f3f577f822c8db59e01269c1da4361453611d6 100644 (file)
@@ -20,7 +20,9 @@
 #include <xen/config.h>
 #include <xen/init.h>
 #include <xen/lib.h>
-#include <xen/perfc.h>
+#include <xen/sched.h>
+#include <xen/smp.h>
+#include <xen/percpu.h>
 #include <asm/hvm/asid.h>
 
 /*
@@ -80,7 +82,7 @@ void hvm_asid_init(int nasids)
     data->next_asid = 1;
 }
 
-void hvm_asid_invalidate_asid(struct vcpu *v)
+void hvm_asid_flush_vcpu(struct vcpu *v)
 {
     v->arch.hvm_vcpu.asid_generation = 0;
 }
index 42121fec8073ecda65ef99568affc34bb971ffe2..d2e8a7162ec4832d0e57dac3b4e28e59e97f90d1 100644 (file)
@@ -756,6 +756,8 @@ int hvm_vcpu_initialise(struct vcpu *v)
 {
     int rc;
 
+    hvm_asid_flush_vcpu(v);
+
     if ( cpu_has_xsave )
     {
         /* XSAVE/XRSTOR requires the save area be 64-byte-boundary aligned. */
index 7695f3b831f259dacdfcd3de5869b12ff292c26b..c297b0b509c9e353becc61391fff682b22a1a84d 100644 (file)
@@ -424,7 +424,7 @@ static void svm_update_guest_cr(struct vcpu *v, unsigned int cr)
         break;
     case 3:
         vmcb->cr3 = v->arch.hvm_vcpu.hw_cr[3];
-        hvm_asid_invalidate_asid(v);
+        hvm_asid_flush_vcpu(v);
         break;
     case 4:
         vmcb->cr4 = HVM_CR4_HOST_MASK;
@@ -455,14 +455,6 @@ static void svm_update_guest_efer(struct vcpu *v)
     svm_intercept_msr(v, MSR_IA32_SYSENTER_EIP, lma);
 }
 
-static void svm_flush_guest_tlbs(void)
-{
-    /* Roll over the CPU's ASID generation, so it gets a clean TLB when we
-     * next VMRUN.  (If ASIDs are disabled, the whole TLB is flushed on
-     * VMRUN anyway). */
-    hvm_asid_flush_core();
-}
-
 static void svm_sync_vmcb(struct vcpu *v)
 {
     struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
@@ -704,7 +696,7 @@ static void svm_do_resume(struct vcpu *v)
         hvm_migrate_timers(v);
 
         /* Migrating to another ASID domain.  Request a new ASID. */
-        hvm_asid_invalidate_asid(v);
+        hvm_asid_flush_vcpu(v);
     }
 
     /* Reflect the vlapic's TPR in the hardware vtpr */
@@ -1250,7 +1242,6 @@ static struct hvm_function_table __read_mostly svm_function_table = {
     .update_host_cr3      = svm_update_host_cr3,
     .update_guest_cr      = svm_update_guest_cr,
     .update_guest_efer    = svm_update_guest_efer,
-    .flush_guest_tlbs     = svm_flush_guest_tlbs,
     .set_tsc_offset       = svm_set_tsc_offset,
     .inject_exception     = svm_inject_exception,
     .init_hypercall_page  = svm_init_hypercall_page,
index 71302cc194abc2694a4a0d5462d4a0a63cf022ad..9218028221f29720dad2498cc2994abc38041863 100644 (file)
@@ -114,9 +114,6 @@ static int construct_vmcb(struct vcpu *v)
     struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
     struct vmcb_struct *vmcb = arch_svm->vmcb;
 
-    /* TLB control, and ASID assigment. */
-    hvm_asid_invalidate_asid(v);
-
     vmcb->general1_intercepts = 
         GENERAL1_INTERCEPT_INTR        | GENERAL1_INTERCEPT_NMI         |
         GENERAL1_INTERCEPT_SMI         | GENERAL1_INTERCEPT_INIT        |
index 8720efcceecba993c7416285e5d08e0d9dc5c9fa..9fb7ecb97cee0e6204efcc7fc12ef1cc79da5895 100644 (file)
@@ -142,9 +142,9 @@ vmx_asm_do_vmentry:
         call_with_regs(vmx_enter_realmode) 
 
 .Lvmx_not_realmode:
+        call vmx_vmenter_helper
         mov  VCPU_hvm_guest_cr2(r(bx)),r(ax)
         mov  r(ax),%cr2
-        call vmx_trace_vmentry
 
         lea  UREGS_rip(r(sp)),r(di)
         mov  $GUEST_RIP,%eax
index bcbf7ccb65b77a264eb3b635f62849e30ea840ef..8c974b79caaafa41e8f1db8cf4b2d6360a3df844 100644 (file)
@@ -400,9 +400,12 @@ int vmx_cpu_up(void)
         BUG();
     }
 
+    hvm_asid_init(cpu_has_vmx_vpid ? (1u << VMCS_VPID_WIDTH) : 0);
+
     ept_sync_all();
 
-    vpid_sync_all();
+    if ( cpu_has_vmx_vpid )
+        vpid_sync_all();
 
     return 1;
 }
@@ -559,6 +562,9 @@ static int construct_vmcs(struct vcpu *v)
 
     v->arch.hvm_vmx.secondary_exec_control = vmx_secondary_exec_control;
 
+    /* Disable VPID for now: we decide when to enable it on VMENTER. */
+    v->arch.hvm_vmx.secondary_exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
+
     if ( paging_mode_hap(d) )
     {
         v->arch.hvm_vmx.exec_control &= ~(CPU_BASED_INVLPG_EXITING |
@@ -736,7 +742,7 @@ static int construct_vmcs(struct vcpu *v)
     }
 
     if ( cpu_has_vmx_vpid )
-        __vmwrite(VIRTUAL_PROCESSOR_ID, v->arch.hvm_vmx.vpid);
+        __vmwrite(VIRTUAL_PROCESSOR_ID, v->arch.hvm_vcpu.asid);
 
     if ( cpu_has_vmx_pat && paging_mode_hap(d) )
     {
@@ -946,7 +952,7 @@ void vmx_do_resume(struct vcpu *v)
         hvm_migrate_timers(v);
         hvm_migrate_pirqs(v);
         vmx_set_host_env(v);
-        vpid_sync_vcpu_all(v);
+        hvm_asid_flush_vcpu(v);
     }
 
     debug_state = v->domain->debugger_attached;
index 20c30c561232c690384cdc6c2f2639adcc881854..e2b55a5a04c19159b825f5660c54caadc5bf5677 100644 (file)
@@ -60,8 +60,6 @@ static void vmx_ctxt_switch_to(struct vcpu *v);
 
 static int  vmx_alloc_vlapic_mapping(struct domain *d);
 static void vmx_free_vlapic_mapping(struct domain *d);
-static int  vmx_alloc_vpid(struct vcpu *v);
-static void vmx_free_vpid(struct vcpu *v);
 static void vmx_install_vlapic_mapping(struct vcpu *v);
 static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr);
 static void vmx_update_guest_efer(struct vcpu *v);
@@ -104,9 +102,6 @@ static int vmx_vcpu_initialise(struct vcpu *v)
 
     spin_lock_init(&v->arch.hvm_vmx.vmcs_lock);
 
-    if ( (rc = vmx_alloc_vpid(v)) != 0 )
-        return rc;
-
     v->arch.schedule_tail    = vmx_do_resume;
     v->arch.ctxt_switch_from = vmx_ctxt_switch_from;
     v->arch.ctxt_switch_to   = vmx_ctxt_switch_to;
@@ -116,7 +111,6 @@ static int vmx_vcpu_initialise(struct vcpu *v)
         dprintk(XENLOG_WARNING,
                 "Failed to create VMCS for vcpu %d: err=%d.\n",
                 v->vcpu_id, rc);
-        vmx_free_vpid(v);
         return rc;
     }
 
@@ -136,7 +130,6 @@ static void vmx_vcpu_destroy(struct vcpu *v)
     vmx_destroy_vmcs(v);
     vpmu_destroy(v);
     passive_domain_destroy(v);
-    vmx_free_vpid(v);
 }
 
 #ifdef __x86_64__
@@ -1168,7 +1161,7 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr)
         }
  
         __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr[3]);
-        vpid_sync_vcpu_all(v);
+        hvm_asid_flush_vcpu(v);
         break;
     case 4:
         v->arch.hvm_vcpu.hw_cr[4] = HVM_CR4_HOST_MASK;
@@ -1214,19 +1207,6 @@ static void vmx_update_guest_efer(struct vcpu *v)
                    (v->arch.hvm_vcpu.guest_efer & EFER_SCE));
 }
 
-static void vmx_flush_guest_tlbs(void)
-{
-    /*
-     * If VPID (i.e. tagged TLB support) is not enabled, the fact that
-     * we're in Xen at all means any guest will have a clean TLB when
-     * it's next run, because VMRESUME will flush it for us.
-     *
-     * If enabled, we invalidate all translations associated with all
-     * VPID values.
-     */
-    vpid_sync_all();
-}
-
 static void __ept_sync_domain(void *info)
 {
     struct domain *d = info;
@@ -1358,7 +1338,7 @@ static void vmx_set_uc_mode(struct vcpu *v)
     if ( paging_mode_hap(v->domain) )
         ept_change_entry_emt_with_range(
             v->domain, 0, v->domain->arch.p2m->max_mapped_pfn);
-    vpid_sync_all();
+    hvm_asid_flush_vcpu(v);
 }
 
 static void vmx_set_info_guest(struct vcpu *v)
@@ -1405,7 +1385,6 @@ static struct hvm_function_table __read_mostly vmx_function_table = {
     .update_host_cr3      = vmx_update_host_cr3,
     .update_guest_cr      = vmx_update_guest_cr,
     .update_guest_efer    = vmx_update_guest_efer,
-    .flush_guest_tlbs     = vmx_flush_guest_tlbs,
     .set_tsc_offset       = vmx_set_tsc_offset,
     .inject_exception     = vmx_inject_exception,
     .init_hypercall_page  = vmx_init_hypercall_page,
@@ -1424,9 +1403,6 @@ static struct hvm_function_table __read_mostly vmx_function_table = {
     .set_rdtsc_exiting    = vmx_set_rdtsc_exiting
 };
 
-static unsigned long *vpid_bitmap;
-#define VPID_BITMAP_SIZE (1u << VMCS_VPID_WIDTH)
-
 void start_vmx(void)
 {
     static bool_t bootstrapped;
@@ -1461,17 +1437,6 @@ void start_vmx(void)
     if ( cpu_has_vmx_ept )
         vmx_function_table.hap_supported = 1;
 
-    if ( cpu_has_vmx_vpid )
-    {
-        vpid_bitmap = xmalloc_array(
-            unsigned long, BITS_TO_LONGS(VPID_BITMAP_SIZE));
-        BUG_ON(vpid_bitmap == NULL);
-        memset(vpid_bitmap, 0, BITS_TO_LONGS(VPID_BITMAP_SIZE) * sizeof(long));
-
-        /* VPID 0 is used by VMX root mode (the hypervisor). */
-        __set_bit(0, vpid_bitmap);
-    }
-
     setup_vmcs_dump();
 
     hvm_enable(&vmx_function_table);
@@ -1584,7 +1549,7 @@ static void vmx_invlpg_intercept(unsigned long vaddr)
 {
     struct vcpu *curr = current;
     HVMTRACE_LONG_2D(INVLPG, /*invlpga=*/ 0, TRC_PAR_LONG(vaddr));
-    if ( paging_invlpg(curr, vaddr) )
+    if ( paging_invlpg(curr, vaddr) && cpu_has_vmx_vpid )
         vpid_sync_vcpu_gva(curr, vaddr);
 }
 
@@ -1931,36 +1896,6 @@ static void vmx_free_vlapic_mapping(struct domain *d)
         free_xenheap_page(mfn_to_virt(mfn));
 }
 
-static int vmx_alloc_vpid(struct vcpu *v)
-{
-    int idx;
-
-    if ( !cpu_has_vmx_vpid )
-        return 0;
-
-    do {
-        idx = find_first_zero_bit(vpid_bitmap, VPID_BITMAP_SIZE);
-        if ( idx >= VPID_BITMAP_SIZE )
-        {
-            dprintk(XENLOG_WARNING, "VMX VPID space exhausted.\n");
-            return -EBUSY;
-        }
-    }
-    while ( test_and_set_bit(idx, vpid_bitmap) );
-
-    v->arch.hvm_vmx.vpid = idx;
-    return 0;
-}
-
-static void vmx_free_vpid(struct vcpu *v)
-{
-    if ( !cpu_has_vmx_vpid )
-        return;
-
-    if ( v->arch.hvm_vmx.vpid )
-        clear_bit(v->arch.hvm_vmx.vpid, vpid_bitmap);
-}
-
 static void vmx_install_vlapic_mapping(struct vcpu *v)
 {
     paddr_t virt_page_ma, apic_page_ma;
@@ -2675,8 +2610,44 @@ asmlinkage void vmx_vmexit_handler(struct cpu_user_regs *regs)
     }
 }
 
-asmlinkage void vmx_trace_vmentry(void)
+asmlinkage void vmx_vmenter_helper(void)
 {
+    struct vcpu *curr = current;
+    u32 new_asid, old_asid;
+    bool_t need_flush;
+
+    if ( !cpu_has_vmx_vpid )
+        goto out;
+
+    old_asid = curr->arch.hvm_vcpu.asid;
+    need_flush = hvm_asid_handle_vmenter();
+    new_asid = curr->arch.hvm_vcpu.asid;
+
+    if ( unlikely(new_asid != old_asid) )
+    {
+        __vmwrite(VIRTUAL_PROCESSOR_ID, new_asid);
+        if ( !old_asid && new_asid )
+        {
+            /* VPID was disabled: now enabled. */
+            curr->arch.hvm_vmx.secondary_exec_control |=
+                SECONDARY_EXEC_ENABLE_VPID;
+            __vmwrite(SECONDARY_VM_EXEC_CONTROL,
+                      curr->arch.hvm_vmx.secondary_exec_control);
+        }
+        else if ( old_asid && !new_asid )
+        {
+            /* VPID was enabled: now disabled. */
+            curr->arch.hvm_vmx.secondary_exec_control &=
+                ~SECONDARY_EXEC_ENABLE_VPID;
+            __vmwrite(SECONDARY_VM_EXEC_CONTROL,
+                      curr->arch.hvm_vmx.secondary_exec_control);
+        }
+    }
+
+    if ( unlikely(need_flush) )
+        vpid_sync_all();
+
+ out:
     HVMTRACE_ND (VMENTRY, 1/*cycles*/, 0, 0, 0, 0, 0, 0, 0);
 }
 
index 336e61dd54c57b328f9c40f517027b9bb526647f..4ee520f1dba80a4d0e3e7cdd30af507d199fd319 100644 (file)
 #define __ASM_X86_HVM_ASID_H__
 
 #include <xen/config.h>
-#include <xen/sched.h>
-#include <asm/processor.h>
+
+struct vcpu;
 
 /* Initialise ASID management for the current physical CPU. */
 void hvm_asid_init(int nasids);
 
 /* Invalidate a VCPU's current ASID allocation: forces re-allocation. */
-void hvm_asid_invalidate_asid(struct vcpu *v);
+void hvm_asid_flush_vcpu(struct vcpu *v);
 
 /* Flush all ASIDs on this processor core. */
 void hvm_asid_flush_core(void);
index cbff2e0a0d5b2713d02ee82d1557650b3bfbb3df..8643f92926400a452d0d6202838daddccd2f2c85 100644 (file)
@@ -23,6 +23,7 @@
 
 #include <asm/current.h>
 #include <asm/x86_emulate.h>
+#include <asm/hvm/asid.h>
 #include <public/domctl.h>
 #include <public/hvm/save.h>
 
@@ -100,13 +101,6 @@ struct hvm_function_table {
     void (*update_guest_cr)(struct vcpu *v, unsigned int cr);
     void (*update_guest_efer)(struct vcpu *v);
 
-    /*
-     * Called to ensure than all guest-specific mappings in a tagged TLB
-     * are flushed; does *not* flush Xen's TLB entries, and on
-     * processors without a tagged TLB it will be a noop.
-     */
-    void (*flush_guest_tlbs)(void);
-
     void (*set_tsc_offset)(struct vcpu *v, u64 offset);
 
     void (*inject_exception)(unsigned int trapnr, int errcode,
@@ -201,11 +195,15 @@ static inline void hvm_update_guest_efer(struct vcpu *v)
     hvm_funcs.update_guest_efer(v);
 }
 
-static inline void 
-hvm_flush_guest_tlbs(void)
+/*
+ * Called to ensure than all guest-specific mappings in a tagged TLB are 
+ * flushed; does *not* flush Xen's TLB entries, and on processors without a 
+ * tagged TLB it will be a noop.
+ */
+static inline void hvm_flush_guest_tlbs(void)
 {
     if ( hvm_enabled )
-        hvm_funcs.flush_guest_tlbs();
+        hvm_asid_flush_core();
 }
 
 void hvm_hypercall_page_initialise(struct domain *d,
index 25a8835e7e034edc8fb7c9ee12d271c0fccf2814..a484f3eff3b567dbe9799fb8f2478a89e4f5e657 100644 (file)
@@ -41,7 +41,7 @@ static inline void svm_asid_g_invlpg(struct vcpu *v, unsigned long g_vaddr)
 #endif
 
     /* Safe fallback. Take a new ASID. */
-    hvm_asid_invalidate_asid(v);
+    hvm_asid_flush_vcpu(v);
 }
 
 #endif /* __ASM_X86_HVM_SVM_ASID_H__ */
index f7c3c78cf71ed2b41fddbb9f47091a184d6e48bc..a24985bd645223c1c1d8ee81d7e37da5853dcd72 100644 (file)
@@ -90,8 +90,6 @@ struct arch_vmx_struct {
     u32                  exec_control;
     u32                  secondary_exec_control;
 
-    u16                  vpid;
-
     /* PMU */
     struct vpmu_struct   vpmu;
 
index ddda6c09aa623a3a298efb9efece4bffa1978363..8894dbf6d86b4581fccf633a08c8d805d6ad9bca 100644 (file)
@@ -314,20 +314,12 @@ void ept_sync_domain(struct domain *d);
 
 static inline void vpid_sync_vcpu_gva(struct vcpu *v, unsigned long gva)
 {
-    if ( cpu_has_vmx_vpid )
-        __invvpid(0, v->arch.hvm_vmx.vpid, (u64)gva);
-}
-
-static inline void vpid_sync_vcpu_all(struct vcpu *v)
-{
-    if ( cpu_has_vmx_vpid )
-        __invvpid(1, v->arch.hvm_vmx.vpid, 0);
+    __invvpid(0, v->arch.hvm_vcpu.asid, (u64)gva);
 }
 
 static inline void vpid_sync_all(void)
 {
-    if ( cpu_has_vmx_vpid )
-        __invvpid(2, 0, 0);
+    __invvpid(2, 0, 0);
 }
 
 static inline void __vmxoff(void)