* has ever used lazy states (checking xcr0_accum excluding
* XSTATE_FP_SSE), vcpu_xsave_mask will return XSTATE_ALL. Otherwise
* return XSTATE_NONLAZY.
- * XSTATE_FP_SSE may be excluded, because the offsets of XSTATE_FP_SSE
- * (in the legacy region of xsave area) are fixed, so saving
- * XSTATE_FP_SSE will not cause overwriting problem.
*/
- return (v->arch.xsave_area->xsave_hdr.xcomp_bv & XSTATE_COMPACTION_ENABLED)
- && (v->arch.xcr0_accum & XSTATE_LAZY & ~XSTATE_FP_SSE)
- ? XSTATE_ALL : XSTATE_NONLAZY;
+ return xstate_all(v) ? XSTATE_ALL : XSTATE_NONLAZY;
}
/* Save x87 extended state */
{
ASSERT(!is_idle_vcpu(v));
- /* save the nonlazy extended state which is not tracked by CR0.TS bit */
- if ( v->arch.nonlazy_xstate_used )
+ /* Restore nonlazy extended state (i.e. parts not tracked by CR0.TS). */
+ if ( !v->arch.nonlazy_xstate_used )
+ return;
+
+ /* Avoid recursion */
+ clts();
+
+ /*
+ * When saving full state even with !v->fpu_dirtied (see vcpu_xsave_mask()
+ * above) we also need to restore full state, to prevent subsequently
+ * saving state belonging to another vCPU.
+ */
+ if ( xstate_all(v) )
+ {
+ fpu_xrstor(v, XSTATE_ALL);
+ v->fpu_initialised = 1;
+ v->fpu_dirtied = 1;
+ }
+ else
{
- /* Avoid recursion */
- clts();
fpu_xrstor(v, XSTATE_NONLAZY);
stts();
}
clts();
if ( curr->fpu_dirtied )
asm ( "stmxcsr %0" : "=m" (curr->arch.xsave_area->fpu_sse.mxcsr) );
+ else if ( xstate_all(curr) )
+ {
+ /* See the comment in i387.c:vcpu_restore_fpu_eager(). */
+ mask |= XSTATE_LAZY;
+ curr->fpu_initialised = 1;
+ curr->fpu_dirtied = 1;
+ cr0 &= ~X86_CR0_TS;
+ }
xrstor(curr, mask);
if ( cr0 & X86_CR0_TS )
write_cr0(cr0);
#ifndef __ASM_XSTATE_H
#define __ASM_XSTATE_H
-#include <xen/types.h>
+#include <xen/sched.h>
#include <asm/cpufeature.h>
#define FCW_DEFAULT 0x037f
void xstate_init(struct cpuinfo_x86 *c);
unsigned int xstate_ctxt_size(u64 xcr0);
+static inline bool_t xstate_all(const struct vcpu *v)
+{
+ /*
+ * XSTATE_FP_SSE may be excluded, because the offsets of XSTATE_FP_SSE
+ * (in the legacy region of xsave area) are fixed, so saving
+ * XSTATE_FP_SSE will not cause overwriting problem with XSAVES/XSAVEC.
+ */
+ return (v->arch.xsave_area->xsave_hdr.xcomp_bv &
+ XSTATE_COMPACTION_ENABLED) &&
+ (v->arch.xcr0_accum & XSTATE_LAZY & ~XSTATE_FP_SSE);
+}
+
#endif /* __ASM_XSTATE_H */