c->x86_capability);
}
+void __init detect_zen2_null_seg_behaviour(void)
+{
+ uint64_t base;
+
+ wrmsrl(MSR_FS_BASE, 1);
+ asm volatile ( "mov %0, %%fs" :: "rm" (0) );
+ rdmsrl(MSR_FS_BASE, base);
+
+ if (base == 0)
+ setup_force_cpu_cap(X86_FEATURE_NSCB);
+
+}
+
static void init_amd(struct cpuinfo_x86 *c)
{
u32 l, h;
else /* Implicily "== 0x10 || >= 0x12" by being 64bit. */
amd_init_lfence(c);
+ /* Probe for NSCB on Zen2 CPUs when not virtualised */
+ if (!cpu_has_hypervisor && !cpu_has_nscb && c == &boot_cpu_data &&
+ c->x86 == 0x17)
+ detect_zen2_null_seg_behaviour();
+
/*
* If the user has explicitly chosen to disable Memory Disambiguation
* to mitigiate Speculative Store Bypass, poke the appropriate MSR.
void early_init_amd(struct cpuinfo_x86 *c);
void amd_log_freq(const struct cpuinfo_x86 *c);
void amd_init_lfence(struct cpuinfo_x86 *c);
+void detect_zen2_null_seg_behaviour(void);
amd_init_lfence(c);
+ /* Probe for NSCB on Zen2 CPUs when not virtualised */
+ if (!cpu_has_hypervisor && !cpu_has_nscb && c == &boot_cpu_data &&
+ c->x86 == 0x18)
+ detect_zen2_null_seg_behaviour();
+
/*
* If the user has explicitly chosen to disable Memory Disambiguation
* to mitigiate Speculative Store Bypass, poke the appropriate MSR.
#define cpu_has_cpuid_faulting boot_cpu_has(X86_FEATURE_CPUID_FAULTING)
#define cpu_has_aperfmperf boot_cpu_has(X86_FEATURE_APERFMPERF)
#define cpu_has_lfence_dispatch boot_cpu_has(X86_FEATURE_LFENCE_DISPATCH)
+#define cpu_has_nscb boot_cpu_has(X86_FEATURE_NSCB)
#define cpu_has_xen_lbr boot_cpu_has(X86_FEATURE_XEN_LBR)
#define cpu_has_xen_shstk boot_cpu_has(X86_FEATURE_XEN_SHSTK)