x86/levelling: Provide architectural OSXSAVE handling to masked native CPUID
authorAndrew Cooper <andrew.cooper3@citrix.com>
Mon, 22 Aug 2016 16:50:55 +0000 (17:50 +0100)
committerAndrew Cooper <andrew.cooper3@citrix.com>
Thu, 1 Sep 2016 10:41:07 +0000 (11:41 +0100)
Contrary to c/s b2507fe7 "x86/domctl: Update PV domain cpumasks when setting
cpuid policy", Intel CPUID masks are applied after fast forwarding hardware
state, rather than before.  (All behaviour in this regard appears completely
undocumented by both Intel and AMD).

Therefore, a set bit in the MSR causes hardware to be fast-forwarded, while a
clear bit forces the guests view to 0, even if Xen's CR4.OSXSAVE is actually
set.

This allows Xen to provide an architectural view of a guest kernels
CR4.OSXSAVE setting to any native CPUID instruction issused by guest kernel or
userspace, even when masking is used.

The masking value defaults to 1 (if the guest has XSAVE available) to cause
fast-forwarding to occur for the HVM and idle vcpus.

When setting the MSRs, a PV guest kernel's choice of OXSAVE is taken into
account, and clobbered from the MSR if not set.  This causes the
fast-forwarding of Xen's CR4 state not to happen.

As a side effect however, levelling potentially need updating on all PV CR4
changes.

Reported-by: Jan Beulich <JBeulich@suse.com>
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
xen/arch/x86/cpu/amd.c
xen/arch/x86/cpu/intel.c
xen/arch/x86/domctl.c
xen/arch/x86/traps.c

index 784fa40c1d66d3adade7b6e9cc4b334449e35b28..23175469996541992b4d3bb5fc88db568bfed129 100644 (file)
@@ -211,6 +211,24 @@ static void amd_ctxt_switch_levelling(const struct vcpu *next)
                (nextd && is_pv_domain(nextd) && nextd->arch.pv_domain.cpuidmasks)
                ? nextd->arch.pv_domain.cpuidmasks : &cpuidmask_defaults;
 
+       if ((levelling_caps & LCAP_1cd) == LCAP_1cd) {
+               uint64_t val = masks->_1cd;
+
+               /*
+                * OSXSAVE defaults to 1, which causes fast-forwarding of
+                * Xen's real setting.  Clobber it if disabled by the guest
+                * kernel.
+                */
+               if (next && is_pv_vcpu(next) && !is_idle_vcpu(next) &&
+                   !(next->arch.pv_vcpu.ctrlreg[4] & X86_CR4_OSXSAVE))
+                       val &= ~((uint64_t)cpufeat_mask(X86_FEATURE_OSXSAVE) << 32);
+
+               if (unlikely(these_masks->_1cd != val)) {
+                       wrmsr_amd(MSR_K8_FEATURE_MASK, val);
+                       these_masks->_1cd = val;
+               }
+       }
+
 #define LAZY(cap, msr, field)                                          \
        ({                                                              \
                if (unlikely(these_masks->field != masks->field) &&     \
@@ -221,7 +239,6 @@ static void amd_ctxt_switch_levelling(const struct vcpu *next)
                }                                                       \
        })
 
-       LAZY(LCAP_1cd,  MSR_K8_FEATURE_MASK,       _1cd);
        LAZY(LCAP_e1cd, MSR_K8_EXT_FEATURE_MASK,   e1cd);
        LAZY(LCAP_7ab0, MSR_AMD_L7S0_FEATURE_MASK, _7ab0);
        LAZY(LCAP_6c,   MSR_AMD_THRM_FEATURE_MASK, _6c);
index 3491638931a1807598e92ad4fe5f4b9d5532851c..a9355cbfa1cdf96c5d2ded7569708fb31ad75b79 100644 (file)
@@ -182,6 +182,24 @@ static void intel_ctxt_switch_levelling(const struct vcpu *next)
        masks = (nextd && is_pv_domain(nextd) && nextd->arch.pv_domain.cpuidmasks)
                ? nextd->arch.pv_domain.cpuidmasks : &cpuidmask_defaults;
 
+        if (msr_basic) {
+               uint64_t val = masks->_1cd;
+
+               /*
+                * OSXSAVE defaults to 1, which causes fast-forwarding of
+                * Xen's real setting.  Clobber it if disabled by the guest
+                * kernel.
+                */
+               if (next && is_pv_vcpu(next) && !is_idle_vcpu(next) &&
+                   !(next->arch.pv_vcpu.ctrlreg[4] & X86_CR4_OSXSAVE))
+                       val &= ~cpufeat_mask(X86_FEATURE_OSXSAVE);
+
+               if (unlikely(these_masks->_1cd != val)) {
+                       wrmsrl(msr_basic, val);
+                       these_masks->_1cd = val;
+               }
+        }
+
 #define LAZY(msr, field)                                               \
        ({                                                              \
                if (unlikely(these_masks->field != masks->field) &&     \
@@ -192,7 +210,6 @@ static void intel_ctxt_switch_levelling(const struct vcpu *next)
                }                                                       \
        })
 
-       LAZY(msr_basic, _1cd);
        LAZY(msr_ext,   e1cd);
        LAZY(msr_xsave, Da1);
 
@@ -218,6 +235,11 @@ static void __init noinline intel_init_levelling(void)
                ecx &= opt_cpuid_mask_ecx;
                edx &= opt_cpuid_mask_edx;
 
+               /* Fast-forward bits - Must be set. */
+               if (ecx & cpufeat_mask(X86_FEATURE_XSAVE))
+                       ecx |= cpufeat_mask(X86_FEATURE_OSXSAVE);
+               edx |= cpufeat_mask(X86_FEATURE_APIC);
+
                cpuidmask_defaults._1cd &= ((u64)edx << 32) | ecx;
        }
 
index bed70aad66fbe3dc420fd0e9295bb4cac2165a3f..a904fd613015cd13e1362288cc38cb3f41961b1a 100644 (file)
@@ -110,10 +110,18 @@ static void update_domain_cpuid_info(struct domain *d,
             case X86_VENDOR_INTEL:
                 /*
                  * Intel masking MSRs are documented as AND masks.
-                 * Experimentally, they are applied before OSXSAVE and APIC
+                 * Experimentally, they are applied after OSXSAVE and APIC
                  * are fast-forwarded from real hardware state.
                  */
                 mask &= ((uint64_t)edx << 32) | ecx;
+
+                if ( ecx & cpufeat_mask(X86_FEATURE_XSAVE) )
+                    ecx = cpufeat_mask(X86_FEATURE_OSXSAVE);
+                else
+                    ecx = 0;
+                edx = cpufeat_mask(X86_FEATURE_APIC);
+
+                mask |= ((uint64_t)edx << 32) | ecx;
                 break;
 
             case X86_VENDOR_AMD:
index ce924d8a5dd78381b89dcb36b4768add1129bd59..bab374dd36a0b128ce4a3bfcb56c149e7115e297 100644 (file)
@@ -2737,6 +2737,7 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
         case 4: /* Write CR4 */
             v->arch.pv_vcpu.ctrlreg[4] = pv_guest_cr4_fixup(v, *reg);
             write_cr4(pv_guest_cr4_to_real_cr4(v));
+            ctxt_switch_levelling(v);
             break;
 
         default: