x86/spec-ctrl: Clean up MSR_MCU_OPT_CTRL handling
authorAndrew Cooper <andrew.cooper3@citrix.com>
Wed, 19 May 2021 18:40:28 +0000 (19:40 +0100)
committerAndrew Cooper <andrew.cooper3@citrix.com>
Tue, 8 Feb 2022 18:00:08 +0000 (18:00 +0000)
Introduce cpu_has_srbds_ctrl as more users are going to appear shortly.

MSR_MCU_OPT_CTRL is gaining extra functionality, meaning that the current
default_xen_mcu_opt_ctrl is no longer a good fit.

Introduce two new helpers, update_mcu_opt_ctrl() which does a full RMW cycle
on the MSR, and set_in_mcu_opt_ctrl() which lets callers configure specific
bits at a time without clobbering each others settings.

No functional change.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
xen/arch/x86/acpi/power.c
xen/arch/x86/cpu/intel.c
xen/arch/x86/include/asm/cpufeature.h
xen/arch/x86/include/asm/processor.h
xen/arch/x86/include/asm/spec_ctrl.h
xen/arch/x86/smpboot.c
xen/arch/x86/spec_ctrl.c

index d4bdc3e7df426028294624ad621ac4537a341476..5eaa77f66a28409dc961fc7c1ce6509f105cb4ba 100644 (file)
@@ -301,8 +301,7 @@ static int enter_state(u32 state)
         ci->last_spec_ctrl = default_xen_spec_ctrl;
     }
 
-    if ( boot_cpu_has(X86_FEATURE_SRBDS_CTRL) )
-        wrmsrl(MSR_MCU_OPT_CTRL, default_xen_mcu_opt_ctrl);
+    update_mcu_opt_ctrl();
 
     /* (re)initialise SYSCALL/SYSENTER state, amongst other things. */
     percpu_traps_init();
index 9b011c344636bc2ac4a4bbbfa5f7638fd094cd9c..e7d4dd652f9f15ce97a5f2fb6bfc12182a84fa6b 100644 (file)
 
 #include "cpu.h"
 
+/*
+ * MSR_MCU_OPT_CTRL is a collection of unrelated functionality, with separate
+ * enablement requirements, but which want to be consistent across the system.
+ */
+static uint32_t __read_mostly mcu_opt_ctrl_mask;
+static uint32_t __read_mostly mcu_opt_ctrl_val;
+
+void update_mcu_opt_ctrl(void)
+{
+    uint32_t mask = mcu_opt_ctrl_mask, lo, hi;
+
+    if ( !mask )
+        return;
+
+    rdmsr(MSR_MCU_OPT_CTRL, lo, hi);
+
+    lo &= ~mask;
+    lo |= mcu_opt_ctrl_val;
+
+    wrmsr(MSR_MCU_OPT_CTRL, lo, hi);
+}
+
+void __init set_in_mcu_opt_ctrl(uint32_t mask, uint32_t val)
+{
+    mcu_opt_ctrl_mask |= mask;
+
+    mcu_opt_ctrl_val &= ~mask;
+    mcu_opt_ctrl_val |= (val & mask);
+
+    update_mcu_opt_ctrl();
+}
+
 /*
  * Processors which have self-snooping capability can handle conflicting
  * memory type across CPUs by snooping its own cache. However, there exists
index 4754940e23f3c3f45c659358559d58d865f60bbd..a0ab6d7d78eab82b21d282df7c2020fb2054d980 100644 (file)
 #define cpu_has_avx512_4vnniw   boot_cpu_has(X86_FEATURE_AVX512_4VNNIW)
 #define cpu_has_avx512_4fmaps   boot_cpu_has(X86_FEATURE_AVX512_4FMAPS)
 #define cpu_has_avx512_vp2intersect boot_cpu_has(X86_FEATURE_AVX512_VP2INTERSECT)
+#define cpu_has_srbds_ctrl      boot_cpu_has(X86_FEATURE_SRBDS_CTRL)
 #define cpu_has_rtm_always_abort boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT)
 #define cpu_has_tsx_force_abort boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)
 #define cpu_has_serialize       boot_cpu_has(X86_FEATURE_SERIALIZE)
index e2e1eaf5bd0d5da019d41c818386acd2e5064db1..23639d5479a3f1f6943e51c0aec5ebaee8dbe831 100644 (file)
@@ -626,6 +626,9 @@ extern int8_t opt_tsx, cpu_has_tsx_ctrl;
 extern bool rtm_disabled;
 void tsx_init(void);
 
+void update_mcu_opt_ctrl(void);
+void set_in_mcu_opt_ctrl(uint32_t mask, uint32_t val);
+
 enum ap_boot_method {
     AP_BOOT_NORMAL,
     AP_BOOT_SKINIT,
index a803d16f9065b6c686271aecf1732ebbc9893c12..f760295236104888ff8d24c125766297e3a987fe 100644 (file)
@@ -54,8 +54,6 @@ extern int8_t opt_pv_l1tf_hwdom, opt_pv_l1tf_domu;
  */
 extern paddr_t l1tf_addr_mask, l1tf_safe_maddr;
 
-extern uint64_t default_xen_mcu_opt_ctrl;
-
 static inline void init_shadow_spec_ctrl_state(void)
 {
     struct cpu_info *info = get_cpu_info();
index 22ae4c1b2de9e0e66681dd8bc30cf1d1b51aaa53..335129a0104de5bd61c2f2767943b0aa260ed720 100644 (file)
@@ -385,8 +385,7 @@ void start_secondary(void *unused)
         wrmsrl(MSR_SPEC_CTRL, default_xen_spec_ctrl);
         info->last_spec_ctrl = default_xen_spec_ctrl;
     }
-    if ( boot_cpu_has(X86_FEATURE_SRBDS_CTRL) )
-        wrmsrl(MSR_MCU_OPT_CTRL, default_xen_mcu_opt_ctrl);
+    update_mcu_opt_ctrl();
 
     tsx_init(); /* Needs microcode.  May change HLE/RTM feature bits. */
 
index ee862089b7f65fa8344e85d1eb86c0cbd8a2cfba..3628b4b4154f0b39dda4f25cb65afeaf71dbd8ec 100644 (file)
@@ -67,7 +67,6 @@ static bool __initdata cpu_has_bug_msbds_only; /* => minimal HT impact. */
 static bool __initdata cpu_has_bug_mds; /* Any other M{LP,SB,FB}DS combination. */
 
 static int8_t __initdata opt_srb_lock = -1;
-uint64_t __read_mostly default_xen_mcu_opt_ctrl;
 
 static int __init parse_spec_ctrl(const char *s)
 {
@@ -376,7 +375,7 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps)
            (default_xen_spec_ctrl & SPEC_CTRL_SSBD)  ? " SSBD+" : " SSBD-",
            !(caps & ARCH_CAPS_TSX_CTRL)              ? "" :
            (opt_tsx & 1)                             ? " TSX+" : " TSX-",
-           !boot_cpu_has(X86_FEATURE_SRBDS_CTRL)     ? "" :
+           !cpu_has_srbds_ctrl                       ? "" :
            opt_srb_lock                              ? " SRB_LOCK+" : " SRB_LOCK-",
            opt_ibpb                                  ? " IBPB"  : "",
            opt_l1d_flush                             ? " L1D_FLUSH" : "",
@@ -1251,32 +1250,24 @@ void __init init_speculation_mitigations(void)
         tsx_init();
     }
 
-    /* Calculate suitable defaults for MSR_MCU_OPT_CTRL */
-    if ( boot_cpu_has(X86_FEATURE_SRBDS_CTRL) )
+    /*
+     * On some SRBDS-affected hardware, it may be safe to relax srb-lock by
+     * default.
+     *
+     * On parts which enumerate MDS_NO and not TAA_NO, TSX is the only known
+     * way to access the Fill Buffer.  If TSX isn't available (inc. SKU
+     * reasons on some models), or TSX is explicitly disabled, then there is
+     * no need for the extra overhead to protect RDRAND/RDSEED.
+     */
+    if ( cpu_has_srbds_ctrl )
     {
-        uint64_t val;
-
-        rdmsrl(MSR_MCU_OPT_CTRL, val);
-
-        /*
-         * On some SRBDS-affected hardware, it may be safe to relax srb-lock
-         * by default.
-         *
-         * On parts which enumerate MDS_NO and not TAA_NO, TSX is the only way
-         * to access the Fill Buffer.  If TSX isn't available (inc. SKU
-         * reasons on some models), or TSX is explicitly disabled, then there
-         * is no need for the extra overhead to protect RDRAND/RDSEED.
-         */
         if ( opt_srb_lock == -1 &&
              (caps & (ARCH_CAPS_MDS_NO|ARCH_CAPS_TAA_NO)) == ARCH_CAPS_MDS_NO &&
              (!cpu_has_hle || ((caps & ARCH_CAPS_TSX_CTRL) && rtm_disabled)) )
             opt_srb_lock = 0;
 
-        val &= ~MCU_OPT_CTRL_RNGDS_MITG_DIS;
-        if ( !opt_srb_lock )
-            val |= MCU_OPT_CTRL_RNGDS_MITG_DIS;
-
-        default_xen_mcu_opt_ctrl = val;
+        set_in_mcu_opt_ctrl(MCU_OPT_CTRL_RNGDS_MITG_DIS,
+                            opt_srb_lock ? 0 : MCU_OPT_CTRL_RNGDS_MITG_DIS);
     }
 
     print_details(thunk, caps);
@@ -1314,9 +1305,6 @@ void __init init_speculation_mitigations(void)
         wrmsrl(MSR_SPEC_CTRL, val);
         info->last_spec_ctrl = val;
     }
-
-    if ( boot_cpu_has(X86_FEATURE_SRBDS_CTRL) )
-        wrmsrl(MSR_MCU_OPT_CTRL, default_xen_mcu_opt_ctrl);
 }
 
 static void __init __maybe_unused build_assertions(void)