*ebx = XSTATE_YMM_OFFSET;
break;
case 1:
+ if ( cpu_has_xsaveopt )
+ *eax = XSAVEOPT;
+ break;
default:
break;
}
* we set all accumulated feature mask before doing save/restore.
*/
set_xcr0(v->arch.xcr0_accum);
- xsave(v);
+ if ( cpu_has_xsaveopt )
+ xsaveopt(v);
+ else
+ xsave(v);
set_xcr0(v->arch.xcr0);
}
else if ( cpu_has_fxsr )
/* Cached xcr0 for fast read */
DEFINE_PER_CPU(uint64_t, xcr0);
+bool_t __read_mostly cpu_has_xsaveopt;
+
void xsave_init(void)
{
u32 eax, ebx, ecx, edx;
xfeature_mask &= XCNTXT_MASK;
printk("%s: using cntxt_size: 0x%x and states: 0x%"PRIx64"\n",
__func__, xsave_cntxt_size, xfeature_mask);
+
+ /* Check XSAVEOPT feature. */
+ cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx);
+ cpu_has_xsaveopt = !!(eax & XSAVEOPT);
}
else
{
extern unsigned int xsave_cntxt_size;
extern u64 xfeature_mask;
+extern bool_t cpu_has_xsaveopt;
void xsave_init(void);
int xsave_alloc_save_area(struct vcpu *v);
#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM)
#define XSTATE_YMM_OFFSET (512 + 64)
#define XSTATE_YMM_SIZE 256
+#define XSAVEOPT (1 << 0)
struct xsave_struct
{
: "memory");
}
+static inline void xsaveopt(struct vcpu *v)
+{
+ struct xsave_struct *ptr;
+
+ ptr =(struct xsave_struct *)v->arch.xsave_area;
+
+ asm volatile (".byte " REX_PREFIX "0x0f,0xae,0x37"
+ :
+ : "a" (-1), "d" (-1), "D"(ptr)
+ : "memory");
+}
+
static inline void xrstor(struct vcpu *v)
{
struct xsave_struct *ptr;