{"avx512-4fmaps",0x00000007, 0, CPUID_REG_EDX, 3, 1},
{"ibrsb", 0x00000007, 0, CPUID_REG_EDX, 26, 1},
{"stibp", 0x00000007, 0, CPUID_REG_EDX, 27, 1},
+ {"arch-caps", 0x00000007, 0, CPUID_REG_EDX, 29, 1},
{"lahfsahf", 0x80000001, NA, CPUID_REG_ECX, 0, 1},
{"cmplegacy", 0x80000001, NA, CPUID_REG_ECX, 1, 1},
[ 2] = "avx512_4vnniw", [ 3] = "avx512_4fmaps",
[26] = "ibrsb", [27] = "stibp",
+ /* 28 */ [29] = "arch_caps",
};
static struct {
* to the page lock we hold, its pinned status, and uses on
* this (v)CPU.
*/
- if ( !rc && this_cpu(root_pgt) &&
+ if ( !rc && !cpu_has_no_xpti &&
((page->u.inuse.type_info & PGT_count_mask) >
(1 + !!(page->u.inuse.type_info & PGT_pinned) +
(pagetable_get_pfn(curr->arch.guest_table) == mfn) +
}
custom_param("smap", parse_smap_param);
+static int8_t __initdata opt_xpti = -1;
+boolean_param("xpti", opt_xpti);
+
bool __read_mostly acpi_disabled;
bool __initdata acpi_force;
static char __initdata acpi_param[10] = "";
cr4_pv32_mask = mmu_cr4_features & XEN_CR4_PV32_BITS;
+ if ( opt_xpti < 0 )
+ {
+ uint64_t caps = 0;
+
+ if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
+ caps = ARCH_CAPABILITIES_RDCL_NO;
+ else if ( boot_cpu_has(X86_FEATURE_ARCH_CAPS) )
+ rdmsrl(MSR_ARCH_CAPABILITIES, caps);
+
+ opt_xpti = !(caps & ARCH_CAPABILITIES_RDCL_NO);
+ }
+ if ( opt_xpti )
+ setup_clear_cpu_cap(X86_FEATURE_NO_XPTI);
+ else
+ setup_force_cpu_cap(X86_FEATURE_NO_XPTI);
+
if ( cpu_has_fsgsbase )
set_in_cr4(X86_CR4_FSGSBASE);
return 0;
}
-static __read_mostly int8_t opt_xpti = -1;
-boolean_param("xpti", opt_xpti);
DEFINE_PER_CPU(root_pgentry_t *, root_pgt);
static root_pgentry_t common_pgt;
unsigned int off;
int rc;
- if ( !opt_xpti )
+ if ( cpu_has_no_xpti )
return 0;
rpt = alloc_xen_pagetable();
stack_base[0] = stack_start;
- if ( opt_xpti < 0 )
- opt_xpti = boot_cpu_data.x86_vendor != X86_VENDOR_AMD;
-
rc = setup_cpu_root_pgt(0);
if ( rc )
panic("Error %d setting up PV root page table\n", rc);
#define cpu_has_cpuid_faulting boot_cpu_has(X86_FEATURE_CPUID_FAULTING)
#define cpu_has_aperfmperf boot_cpu_has(X86_FEATURE_APERFMPERF)
#define cpu_has_lfence_dispatch boot_cpu_has(X86_FEATURE_LFENCE_DISPATCH)
+#define cpu_has_no_xpti boot_cpu_has(X86_FEATURE_NO_XPTI)
enum _cache_type {
CACHE_TYPE_NULL = 0,
XEN_CPUFEATURE(XEN_IBRS_SET, (FSCAPINTS+0)*32+16) /* IBRSB && IRBS set in Xen */
XEN_CPUFEATURE(XEN_IBRS_CLEAR, (FSCAPINTS+0)*32+17) /* IBRSB && IBRS clear in Xen */
XEN_CPUFEATURE(RSB_NATIVE, (FSCAPINTS+0)*32+18) /* RSB overwrite needed for native */
-XEN_CPUFEATURE(RSB_VMEXIT, (FSCAPINTS+0)*32+20) /* RSB overwrite needed for vmexit */
+XEN_CPUFEATURE(RSB_VMEXIT, (FSCAPINTS+0)*32+19) /* RSB overwrite needed for vmexit */
+XEN_CPUFEATURE(NO_XPTI, (FSCAPINTS+0)*32+20) /* XPTI mitigation not in use */
#define PRED_CMD_IBPB (_AC(1, ULL) << 0)
#define MSR_ARCH_CAPABILITIES 0x0000010a
+#define ARCH_CAPABILITIES_RDCL_NO (_AC(1, ULL) << 0)
+#define ARCH_CAPABILITIES_IBRS_ALL (_AC(1, ULL) << 1)
/* Intel MSRs. Some also available on other CPUs */
#define MSR_IA32_PERFCTR0 0x000000c1
XEN_CPUFEATURE(AVX512_4FMAPS, 9*32+ 3) /*A AVX512 Multiply Accumulation Single Precision */
XEN_CPUFEATURE(IBRSB, 9*32+26) /*A IBRS and IBPB support (used by Intel) */
XEN_CPUFEATURE(STIBP, 9*32+27) /*A! STIBP */
+XEN_CPUFEATURE(ARCH_CAPS, 9*32+29) /* IA32_ARCH_CAPABILITIES MSR */
#endif /* XEN_CPUFEATURE */