{"invtsc", 0x80000007, NA, CPUID_REG_EDX, 8, 1},
+ {"clzero", 0x80000008, NA, CPUID_REG_EBX, 0, 1},
+ {"rstr-fp-err-ptrs", 0x80000008, NA, CPUID_REG_EBX, 2, 1},
{"wbnoinvd", 0x80000008, NA, CPUID_REG_EBX, 9, 1},
{"ibpb", 0x80000008, NA, CPUID_REG_EBX, 12, 1},
+
{"nc", 0x80000008, NA, CPUID_REG_ECX, 0, 8},
{"apicidsize", 0x80000008, NA, CPUID_REG_ECX, 12, 4},
static const char *const str_e8b[32] =
{
[ 0] = "clzero",
+ [ 2] = "rstr-fp-err-ptrs",
/* [ 8] */ [ 9] = "wbnoinvd",
wrmsr_amd_safe(0xc001100d, l, h & ~1);
}
+ /*
+ * Older AMD CPUs don't save/load FOP/FIP/FDP unless an FPU exception
+ * is pending. Xen works around this at (F)XRSTOR time.
+ */
+ if (!cpu_has(c, X86_FEATURE_RSTR_FP_ERR_PTRS))
+ setup_force_cpu_cap(X86_BUG_FPU_PTRS);
+
/*
* Attempt to set lfence to be Dispatch Serialising. This MSR almost
* certainly isn't virtualised (and Xen at least will leak the real
const typeof(v->arch.xsave_area->fpu_sse) *fpu_ctxt = v->arch.fpu_ctxt;
/*
- * AMD CPUs don't save/restore FDP/FIP/FOP unless an exception
+ * Some CPUs don't save/restore FDP/FIP/FOP unless an exception
* is pending. Clear the x87 state here by setting it to fixed
* values. The hypervisor data segment can be sometimes 0 and
* sometimes new user value. Both should be ok. Use the FPU saved
* data block as a safe address because it should be in L1.
*/
- if ( !(fpu_ctxt->fsw & ~fpu_ctxt->fcw & 0x003f) &&
- boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
- {
+ if ( cpu_bug_fpu_ptrs &&
+ !(fpu_ctxt->fsw & ~fpu_ctxt->fcw & 0x003f) )
asm volatile ( "fnclex\n\t"
"ffree %%st(7)\n\t" /* clear stack tag */
"fildl %0" /* load to clear state */
: : "m" (*fpu_ctxt) );
- }
/*
* FXRSTOR can fault if passed a corrupted data block. We handle this
: "=m" (*fpu_ctxt) : "R" (fpu_ctxt) );
/*
- * AMD CPUs don't save/restore FDP/FIP/FOP unless an exception
- * is pending.
+ * Some CPUs don't save/restore FDP/FIP/FOP unless an exception is
+ * pending. In this case, the restore side will arrange safe values,
+ * and there is no point trying to collect FCS/FDS in addition.
*/
- if ( !(fpu_ctxt->fsw & 0x0080) &&
- boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
+ if ( cpu_bug_fpu_ptrs && !(fpu_ctxt->fsw & 0x0080) )
return;
/*
unsigned int faults, prev_faults;
/*
- * AMD CPUs don't save/restore FDP/FIP/FOP unless an exception
+ * Some CPUs don't save/restore FDP/FIP/FOP unless an exception
* is pending. Clear the x87 state here by setting it to fixed
* values. The hypervisor data segment can be sometimes 0 and
* sometimes new user value. Both should be ok. Use the FPU saved
* data block as a safe address because it should be in L1.
*/
- if ( (mask & ptr->xsave_hdr.xstate_bv & X86_XCR0_FP) &&
- !(ptr->fpu_sse.fsw & ~ptr->fpu_sse.fcw & 0x003f) &&
- boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
+ if ( cpu_bug_fpu_ptrs &&
+ !(ptr->fpu_sse.fsw & ~ptr->fpu_sse.fcw & 0x003f) )
asm volatile ( "fnclex\n\t" /* clear exceptions */
"ffree %%st(7)\n\t" /* clear stack tag */
"fildl %0" /* load to clear state */
#define cpu_has_msr_tsc_aux (cpu_has_rdtscp || cpu_has_rdpid)
+/* Bugs. */
+#define cpu_bug_fpu_ptrs boot_cpu_has(X86_BUG_FPU_PTRS)
+
enum _cache_type {
CACHE_TYPE_NULL = 0,
CACHE_TYPE_DATA = 1,
#define X86_NR_BUG 1
#define X86_BUG(x) ((FSCAPINTS + X86_NR_SYNTH) * 32 + (x))
+#define X86_BUG_FPU_PTRS X86_BUG( 0) /* (F)X{SAVE,RSTOR} doesn't save/restore FOP/FIP/FDP. */
+
/* Total number of capability words, inc synth and bug words. */
#define NCAPINTS (FSCAPINTS + X86_NR_SYNTH + X86_NR_BUG) /* N 32-bit words worth of info */
/* AMD-defined CPU features, CPUID level 0x80000008.ebx, word 8 */
XEN_CPUFEATURE(CLZERO, 8*32+ 0) /*A CLZERO instruction */
+XEN_CPUFEATURE(RSTR_FP_ERR_PTRS, 8*32+ 2) /*A (F)X{SAVE,RSTOR} always saves/restores FPU Error pointers */
XEN_CPUFEATURE(WBNOINVD, 8*32+ 9) /* WBNOINVD instruction */
XEN_CPUFEATURE(IBPB, 8*32+12) /*A IBPB support only (no IBRS, used by AMD) */