x86/pv: Provide custom cpumasks for PV domains
authorAndrew Cooper <andrew.cooper3@citrix.com>
Thu, 26 Nov 2015 18:56:43 +0000 (18:56 +0000)
committerAndrew Cooper <andrew.cooper3@citrix.com>
Fri, 8 Apr 2016 20:54:39 +0000 (21:54 +0100)
And use them in preference to cpumask_defaults on context switch.  HVM domains
must not be masked (to avoid interfering with cpuid calls within the guest),
so always lazily context switch to the host default.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <JBeulich@suse.com>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
xen/arch/x86/cpu/amd.c
xen/arch/x86/cpu/intel.c
xen/arch/x86/domain.c
xen/include/asm-x86/domain.h

index 3e2f4a8f45ea0a4dad15bef39f750610123cbed6..d5afc3ea0e1707475123521175b0889c4484e022 100644 (file)
@@ -206,7 +206,9 @@ static void __init noinline probe_masking_msrs(void)
 static void amd_ctxt_switch_levelling(const struct domain *nextd)
 {
        struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
-       const struct cpuidmasks *masks = &cpuidmask_defaults;
+       const struct cpuidmasks *masks =
+               (nextd && is_pv_domain(nextd) && nextd->arch.pv_domain.cpuidmasks)
+               ? nextd->arch.pv_domain.cpuidmasks : &cpuidmask_defaults;
 
 #define LAZY(cap, msr, field)                                          \
        ({                                                              \
index e21c32d695c33bc38f6ab45e0cd07c94868bd3c2..fe4736edc781087a2f919134cc796a3232c670d6 100644 (file)
@@ -154,7 +154,7 @@ static void __init probe_masking_msrs(void)
 static void intel_ctxt_switch_levelling(const struct domain *nextd)
 {
        struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
-       const struct cpuidmasks *masks = &cpuidmask_defaults;
+       const struct cpuidmasks *masks;
 
        if (cpu_has_cpuid_faulting) {
                /*
@@ -178,6 +178,9 @@ static void intel_ctxt_switch_levelling(const struct domain *nextd)
                return;
        }
 
+       masks = (nextd && is_pv_domain(nextd) && nextd->arch.pv_domain.cpuidmasks)
+               ? nextd->arch.pv_domain.cpuidmasks : &cpuidmask_defaults;
+
 #define LAZY(msr, field)                                               \
        ({                                                              \
                if (unlikely(these_masks->field != masks->field) &&     \
index d7b6c2bf451f549072d20a0b74cbe86d638d2f0f..e93ff2056a8ecb0edd77b5fb2f05a51f04fdcc9f 100644 (file)
@@ -577,6 +577,14 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags,
             goto fail;
         clear_page(d->arch.pv_domain.gdt_ldt_l1tab);
 
+        if ( levelling_caps & ~LCAP_faulting )
+        {
+            d->arch.pv_domain.cpuidmasks = xmalloc(struct cpuidmasks);
+            if ( !d->arch.pv_domain.cpuidmasks )
+                goto fail;
+            *d->arch.pv_domain.cpuidmasks = cpuidmask_defaults;
+        }
+
         rc = create_perdomain_mapping(d, GDT_LDT_VIRT_START,
                                       GDT_LDT_MBYTES << (20 - PAGE_SHIFT),
                                       NULL, NULL);
@@ -672,7 +680,10 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags,
         paging_final_teardown(d);
     free_perdomain_mappings(d);
     if ( is_pv_domain(d) )
+    {
+        xfree(d->arch.pv_domain.cpuidmasks);
         free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab);
+    }
     psr_domain_free(d);
     return rc;
 }
@@ -692,7 +703,10 @@ void arch_domain_destroy(struct domain *d)
 
     free_perdomain_mappings(d);
     if ( is_pv_domain(d) )
+    {
         free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab);
+        xfree(d->arch.pv_domain.cpuidmasks);
+    }
 
     free_xenheap_page(d->shared_info);
     cleanup_domain_irq_mapping(d);
index 5f2f27f354759a85438438d4bba42c36a961d1ab..d393ed24aa86a90c442fabc7e5c31fb42e161909 100644 (file)
@@ -252,6 +252,8 @@ struct pv_domain
 
     /* map_domain_page() mapping cache. */
     struct mapcache_domain mapcache;
+
+    struct cpuidmasks *cpuidmasks;
 };
 
 struct monitor_write_data {