x86/cpuid: Move x86_vendor from arch_domain to cpuid_policy
authorAndrew Cooper <andrew.cooper3@citrix.com>
Thu, 12 Jan 2017 11:45:10 +0000 (11:45 +0000)
committerAndrew Cooper <andrew.cooper3@citrix.com>
Wed, 18 Jan 2017 12:45:52 +0000 (12:45 +0000)
No functional change.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Acked-by: George Dunlap <george.dunlap@citrix.com>
13 files changed:
xen/arch/x86/cpuid.c
xen/arch/x86/domain.c
xen/arch/x86/domctl.c
xen/arch/x86/hvm/emulate.c
xen/arch/x86/hvm/hvm.c
xen/arch/x86/hvm/ioreq.c
xen/arch/x86/hvm/svm/svm.c
xen/arch/x86/hvm/vmx/vmx.c
xen/arch/x86/mm.c
xen/arch/x86/mm/shadow/common.c
xen/arch/x86/traps.c
xen/include/asm-x86/cpuid.h
xen/include/asm-x86/domain.h

index 95040f9bd46254a22dea234bb176174778339104..bcdac03dca82979cee5888d6fba37f2cb784088e 100644 (file)
@@ -130,6 +130,8 @@ static void __init calculate_raw_policy(void)
     for ( i = 1; i < min(ARRAY_SIZE(p->extd.raw),
                          p->extd.max_leaf + 1 - 0x80000000ul); ++i )
         cpuid_leaf(0x80000000 + i, &p->extd.raw[i]);
+
+    p->x86_vendor = boot_cpu_data.x86_vendor;
 }
 
 static void __init calculate_host_policy(void)
@@ -592,7 +594,7 @@ static void pv_cpuid(uint32_t leaf, uint32_t subleaf, struct cpuid_leaf *res)
         res->d = p->extd.e1d;
 
         /* If not emulating AMD, clear the duplicated features in e1d. */
-        if ( currd->arch.x86_vendor != X86_VENDOR_AMD )
+        if ( p->x86_vendor != X86_VENDOR_AMD )
             res->d &= ~CPUID_COMMON_1D_FEATURES;
 
         /*
@@ -805,7 +807,7 @@ static void hvm_cpuid(uint32_t leaf, uint32_t subleaf, struct cpuid_leaf *res)
         res->d = p->extd.e1d;
 
         /* If not emulating AMD, clear the duplicated features in e1d. */
-        if ( d->arch.x86_vendor != X86_VENDOR_AMD )
+        if ( p->x86_vendor != X86_VENDOR_AMD )
             res->d &= ~CPUID_COMMON_1D_FEATURES;
         /* fast-forward MSR_APIC_BASE.EN if it hasn't already been clobbered. */
         else if ( vlapic_hw_disabled(vcpu_vlapic(v)) )
@@ -829,8 +831,7 @@ static void hvm_cpuid(uint32_t leaf, uint32_t subleaf, struct cpuid_leaf *res)
             res->d &= ~cpufeat_mask(X86_FEATURE_PSE36);
 
         /* SYSCALL is hidden outside of long mode on Intel. */
-        if ( d->arch.x86_vendor == X86_VENDOR_INTEL &&
-             !hvm_long_mode_enabled(v))
+        if ( p->x86_vendor == X86_VENDOR_INTEL && !hvm_long_mode_enabled(v) )
             res->d &= ~cpufeat_mask(X86_FEATURE_SYSCALL);
 
         break;
index 354f3866a840a211372cdd0a7a5f3223c454b6f2..340905475666e43300ba85ab1c33d577485c2922 100644 (file)
@@ -608,8 +608,6 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags,
         if ( (rc = init_domain_cpuid_policy(d)) )
             goto fail;
 
-        d->arch.x86_vendor = boot_cpu_data.x86_vendor;
-
         d->arch.ioport_caps = 
             rangeset_new(d, "I/O Ports", RANGESETF_prettyprint_hex);
         rc = -ENOMEM;
index 0458d8f47804f164551bef0b0651a404961b04f7..969df12e464713461cf3ae612bcc6eb3129baf25 100644 (file)
@@ -154,12 +154,11 @@ static int update_domain_cpuid_info(struct domain *d,
     switch ( ctl->input[0] )
     {
     case 0: {
-        int old_vendor = d->arch.x86_vendor;
+        int old_vendor = p->x86_vendor;
 
-        d->arch.x86_vendor = get_cpu_vendor(
-            ctl->ebx, ctl->ecx, ctl->edx, gcv_guest);
+        p->x86_vendor = get_cpu_vendor(ctl->ebx, ctl->ecx, ctl->edx, gcv_guest);
 
-        if ( is_hvm_domain(d) && (d->arch.x86_vendor != old_vendor) )
+        if ( is_hvm_domain(d) && (p->x86_vendor != old_vendor) )
         {
             struct vcpu *v;
 
@@ -290,7 +289,7 @@ static int update_domain_cpuid_info(struct domain *d,
                 ecx |= cpufeat_mask(X86_FEATURE_CMP_LEGACY);
 
             /* If not emulating AMD, clear the duplicated features in e1d. */
-            if ( d->arch.x86_vendor != X86_VENDOR_AMD )
+            if ( p->x86_vendor != X86_VENDOR_AMD )
                 edx &= ~CPUID_COMMON_1D_FEATURES;
 
             switch ( boot_cpu_data.x86_vendor )
index e22740fd578acaa77820ae704421bd880effbe85..0d21fe190246f9396fef3e9b5ada3ced81554bf8 100644 (file)
@@ -1910,7 +1910,7 @@ void hvm_emulate_init_once(
 
     hvmemul_ctxt->validate = validate;
     hvmemul_ctxt->ctxt.regs = regs;
-    hvmemul_ctxt->ctxt.vendor = curr->domain->arch.x86_vendor;
+    hvmemul_ctxt->ctxt.vendor = curr->domain->arch.cpuid->x86_vendor;
     hvmemul_ctxt->ctxt.force_writeback = true;
 
     if ( cpu_has_vmx )
index 2ec08000dbe39906364de37550b9361a165b53e6..63748dc049dd6024184a312172d7913ed96133a6 100644 (file)
@@ -3619,7 +3619,7 @@ void hvm_ud_intercept(struct cpu_user_regs *regs)
 {
     struct vcpu *cur = current;
     bool should_emulate =
-        cur->domain->arch.x86_vendor != boot_cpu_data.x86_vendor;
+        cur->domain->arch.cpuid->x86_vendor != boot_cpu_data.x86_vendor;
     struct hvm_emulate_ctxt ctxt;
 
     hvm_emulate_init_once(&ctxt, opt_hvm_fep ? NULL : is_cross_vendor, regs);
index 8ad84654245a52f8c649f59342f90e1d2d7a4b74..26a0cb80357a15d3e0022f56944b6943038af396 100644 (file)
@@ -1140,7 +1140,7 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
                (p->addr & 3);
         /* AMD extended configuration space access? */
         if ( CF8_ADDR_HI(cf8) &&
-             d->arch.x86_vendor == X86_VENDOR_AMD &&
+             d->arch.cpuid->x86_vendor == X86_VENDOR_AMD &&
              (x86_fam = get_cpu_family(
                  d->arch.cpuid->basic.raw_fms, NULL, NULL)) > 0x10 &&
              x86_fam <= 0x17 )
index ae8e2c47b2f55202fa14ca94d1001203491760e6..e8ef88da4413e25e1228a1d6814f2d32f0d6b0ae 100644 (file)
@@ -591,7 +591,7 @@ static void svm_update_guest_vendor(struct vcpu *v)
     u32 bitmap = vmcb_get_exception_intercepts(vmcb);
 
     if ( opt_hvm_fep ||
-         (v->domain->arch.x86_vendor != boot_cpu_data.x86_vendor) )
+         (v->domain->arch.cpuid->x86_vendor != boot_cpu_data.x86_vendor) )
         bitmap |= (1U << TRAP_invalid_op);
     else
         bitmap &= ~(1U << TRAP_invalid_op);
index 61925cfa9e03771785d34845cb1f51a0707f27bf..a5e5ffdcac793cebf933d642a752b2fc743733f1 100644 (file)
@@ -544,7 +544,7 @@ void vmx_update_exception_bitmap(struct vcpu *v)
 static void vmx_update_guest_vendor(struct vcpu *v)
 {
     if ( opt_hvm_fep ||
-         (v->domain->arch.x86_vendor != boot_cpu_data.x86_vendor) )
+         (v->domain->arch.cpuid->x86_vendor != boot_cpu_data.x86_vendor) )
         v->arch.hvm_vmx.exception_bitmap |= (1U << TRAP_invalid_op);
     else
         v->arch.hvm_vmx.exception_bitmap &= ~(1U << TRAP_invalid_op);
index d707d1c30c39b8753491c878b6d4d28f31d8b079..a5521f186e48685665e201f016dbfea6b9eeb04d 100644 (file)
@@ -5358,7 +5358,7 @@ int ptwr_do_page_fault(struct vcpu *v, unsigned long addr,
     struct ptwr_emulate_ctxt ptwr_ctxt = {
         .ctxt = {
             .regs = regs,
-            .vendor = d->arch.x86_vendor,
+            .vendor = d->arch.cpuid->x86_vendor,
             .addr_size = is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG,
             .sp_size   = is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG,
             .swint_emulate = x86_swint_emulate_none,
@@ -5514,7 +5514,7 @@ int mmio_ro_do_page_fault(struct vcpu *v, unsigned long addr,
     struct mmio_ro_emulate_ctxt mmio_ro_ctxt = { .cr2 = addr };
     struct x86_emulate_ctxt ctxt = {
         .regs = regs,
-        .vendor = v->domain->arch.x86_vendor,
+        .vendor = v->domain->arch.cpuid->x86_vendor,
         .addr_size = addr_size,
         .sp_size = addr_size,
         .swint_emulate = x86_swint_emulate_none,
index 4113351e31190ace44bd0891b780a85828cade8a..e4ccf92e6c267587bd64207bf819d85b50593b70 100644 (file)
@@ -330,7 +330,7 @@ const struct x86_emulate_ops *shadow_init_emulation(
     memset(sh_ctxt, 0, sizeof(*sh_ctxt));
 
     sh_ctxt->ctxt.regs = regs;
-    sh_ctxt->ctxt.vendor = v->domain->arch.x86_vendor;
+    sh_ctxt->ctxt.vendor = v->domain->arch.cpuid->x86_vendor;
     sh_ctxt->ctxt.swint_emulate = x86_swint_emulate_none;
 
     /* Segment cache initialisation. Primed with CS. */
index ea0ce528f345c9e0852fe5f6fce603c444eb2a7f..691c9a2a5bf5d4244e9ac01a4a6240021c14b87f 100644 (file)
@@ -2981,7 +2981,7 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
     struct domain *currd = curr->domain;
     struct priv_op_ctxt ctxt = {
         .ctxt.regs = regs,
-        .ctxt.vendor = currd->arch.x86_vendor,
+        .ctxt.vendor = currd->arch.cpuid->x86_vendor,
     };
     int rc;
     unsigned int eflags, ar;
index c56190b762195731d15d1390d4e0c8c0ffcde6eb..24ad3e0b86a10f5830cd1cbf2fc6c13092b80f9c 100644 (file)
@@ -204,6 +204,9 @@ struct cpuid_policy
     /* Toolstack selected Hypervisor max_leaf (if non-zero). */
     uint8_t hv_limit, hv2_limit;
 
+    /* Value calculated from raw data above. */
+    uint8_t x86_vendor;
+
     /* Temporary: Legacy data array. */
 #define MAX_CPUID_INPUT 40
     xen_domctl_cpuid_t legacy[MAX_CPUID_INPUT];
index 82296c8756c48ab59fcb094152f11fa5d2131287..e6c7e13354249bc1abb383416f1af7217776e51e 100644 (file)
@@ -337,9 +337,6 @@ struct arch_domain
     /* Is PHYSDEVOP_eoi to automatically unmask the event channel? */
     bool_t auto_unmask;
 
-    /* Values snooped from updates to cpuids[] (below). */
-    u8 x86_vendor;           /* CPU vendor */
-
     /*
      * The width of the FIP/FDP register in the FPU that needs to be
      * saved/restored during a context switch.  This is needed because