return -EINVAL;
}
- v->fpu_initialised = !!(flags & VGCF_I387_VALID);
-
v->arch.flags &= ~TF_kernel_mode;
if ( (flags & VGCF_in_kernel) || is_hvm_domain(d)/*???*/ )
v->arch.flags |= TF_kernel_mode;
v->arch.vgc_flags = flags;
- if ( flags & VGCF_I387_VALID )
- {
- memcpy(v->arch.fpu_ctxt, &c.nat->fpu_ctxt, sizeof(c.nat->fpu_ctxt));
- if ( v->arch.xsave_area )
- v->arch.xsave_area->xsave_hdr.xstate_bv = XSTATE_FP_SSE;
- }
- else if ( v->arch.xsave_area )
- {
- v->arch.xsave_area->xsave_hdr.xstate_bv = 0;
- v->arch.xsave_area->fpu_sse.mxcsr = MXCSR_DEFAULT;
- }
- else
- {
- typeof(v->arch.xsave_area->fpu_sse) *fpu_sse = v->arch.fpu_ctxt;
-
- memset(fpu_sse, 0, sizeof(*fpu_sse));
- fpu_sse->fcw = FCW_DEFAULT;
- fpu_sse->mxcsr = MXCSR_DEFAULT;
- }
- if ( v->arch.xsave_area )
- v->arch.xsave_area->xsave_hdr.xcomp_bv = 0;
+ vcpu_setup_fpu(v, v->arch.xsave_area,
+ flags & VGCF_I387_VALID ? &c.nat->fpu_ctxt : NULL,
+ FCW_DEFAULT);
if ( !compat )
{
struct hvm_hw_cpu ctxt;
struct segment_register seg;
const char *errstr;
- struct xsave_struct *xsave_area;
/* Which vcpu is this? */
vcpuid = hvm_load_instance(h);
hvm_set_segment_register(v, x86_seg_ldtr, &seg);
/* Cover xsave-absent save file restoration on xsave-capable host. */
- xsave_area = xsave_enabled(v) ? NULL : v->arch.xsave_area;
-
- v->fpu_initialised = !!(ctxt.flags & XEN_X86_FPU_INITIALISED);
- if ( v->fpu_initialised )
- {
- memcpy(v->arch.fpu_ctxt, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
- if ( xsave_area )
- xsave_area->xsave_hdr.xstate_bv = XSTATE_FP_SSE;
- }
- else if ( xsave_area )
- {
- xsave_area->xsave_hdr.xstate_bv = 0;
- xsave_area->fpu_sse.mxcsr = MXCSR_DEFAULT;
- }
- if ( xsave_area )
- xsave_area->xsave_hdr.xcomp_bv = 0;
+ vcpu_setup_fpu(v, xsave_enabled(v) ? NULL : v->arch.xsave_area,
+ ctxt.flags & XEN_X86_FPU_INITIALISED ? ctxt.fpu_regs : NULL,
+ FCW_RESET);
v->arch.user_regs.rax = ctxt.rax;
v->arch.user_regs.rbx = ctxt.rbx;
{
struct domain *d = v->domain;
struct segment_register reg;
- typeof(v->arch.xsave_area->fpu_sse) *fpu_ctxt = v->arch.fpu_ctxt;
domain_lock(d);
v->arch.guest_table = pagetable_null();
}
- memset(fpu_ctxt, 0, sizeof(*fpu_ctxt));
- fpu_ctxt->fcw = FCW_RESET;
- fpu_ctxt->mxcsr = MXCSR_DEFAULT;
if ( v->arch.xsave_area )
- {
- v->arch.xsave_area->xsave_hdr.xstate_bv = X86_XCR0_FP;
- v->arch.xsave_area->xsave_hdr.xcomp_bv = 0;
- }
+ v->arch.xsave_area->xsave_hdr.xstate_bv = 0;
+ vcpu_setup_fpu(v, v->arch.xsave_area, NULL, FCW_RESET);
v->arch.vgc_flags = VGCF_online;
memset(&v->arch.user_regs, 0, sizeof(v->arch.user_regs));
return rc;
}
+void vcpu_setup_fpu(struct vcpu *v, struct xsave_struct *xsave_area,
+ const void *data, unsigned int fcw_default)
+{
+ /*
+ * For the entire function please note that vcpu_init_fpu() (above) points
+ * v->arch.fpu_ctxt into v->arch.xsave_area when XSAVE is available. Hence
+ * accesses through both pointers alias one another, and the shorter form
+ * is used here.
+ */
+ typeof(xsave_area->fpu_sse) *fpu_sse = v->arch.fpu_ctxt;
+
+ ASSERT(!xsave_area || xsave_area == v->arch.xsave_area);
+
+ v->fpu_initialised = !!data;
+
+ if ( data )
+ {
+ memcpy(fpu_sse, data, sizeof(*fpu_sse));
+ if ( xsave_area )
+ xsave_area->xsave_hdr.xstate_bv = XSTATE_FP_SSE;
+ }
+ else if ( xsave_area && fcw_default == FCW_DEFAULT )
+ {
+ xsave_area->xsave_hdr.xstate_bv = 0;
+ fpu_sse->mxcsr = MXCSR_DEFAULT;
+ }
+ else
+ {
+ memset(fpu_sse, 0, sizeof(*fpu_sse));
+ fpu_sse->fcw = fcw_default;
+ fpu_sse->mxcsr = MXCSR_DEFAULT;
+ if ( v->arch.xsave_area )
+ {
+ v->arch.xsave_area->xsave_hdr.xstate_bv &= ~XSTATE_FP_SSE;
+ if ( fcw_default != FCW_DEFAULT )
+ v->arch.xsave_area->xsave_hdr.xstate_bv |= X86_XCR0_FP;
+ }
+ }
+
+ if ( xsave_area )
+ xsave_area->xsave_hdr.xcomp_bv = 0;
+}
+
/* Free FPU's context save area */
void vcpu_destroy_fpu(struct vcpu *v)
{
void save_fpu_enable(void);
int vcpu_init_fpu(struct vcpu *v);
+struct xsave_struct;
+void vcpu_setup_fpu(struct vcpu *v, struct xsave_struct *xsave_area,
+ const void *data, unsigned int fcw_default);
void vcpu_destroy_fpu(struct vcpu *v);
#endif /* __ASM_I386_I387_H */