if ( cpu_has_xsaves && is_hvm_vcpu(n) )
set_msr_xss(n->arch.hvm_vcpu.msr_xss);
}
- vcpu_restore_fpu_eager(n);
+ vcpu_restore_fpu_nonlazy(n, false);
nd->arch.ctxt_switch->to(n);
}
* by hvmemul_get_fpu().
*/
if ( curr->arch.fully_eager_fpu )
- vcpu_restore_fpu_eager(curr);
+ vcpu_restore_fpu_nonlazy(curr, false);
else
{
curr->fpu_dirtied = false;
/* VCPU FPU Functions */
/*******************************/
/* Restore FPU state whenever VCPU is schduled in. */
-void vcpu_restore_fpu_eager(struct vcpu *v)
+void vcpu_restore_fpu_nonlazy(struct vcpu *v, bool need_stts)
{
/* Restore nonlazy extended state (i.e. parts not tracked by CR0.TS). */
if ( !v->arch.fully_eager_fpu && !v->arch.nonlazy_xstate_used )
- return;
+ goto maybe_stts;
ASSERT(!is_idle_vcpu(v));
v->fpu_dirtied = 1;
/* Xen doesn't need TS set, but the guest might. */
- if ( is_pv_vcpu(v) && (v->arch.pv_vcpu.ctrlreg[0] & X86_CR0_TS) )
- stts();
+ need_stts = is_pv_vcpu(v) && (v->arch.pv_vcpu.ctrlreg[0] & X86_CR0_TS);
}
else
{
fpu_xrstor(v, XSTATE_NONLAZY);
- stts();
+ need_stts = true;
}
+
+ maybe_stts:
+ if ( need_stts )
+ stts();
}
/*
irq_exit();
efi_rs_on_cpu = NR_CPUS;
spin_unlock(&efi_rs_lock);
- vcpu_restore_fpu_eager(curr);
+ vcpu_restore_fpu_nonlazy(curr, true);
}
bool efi_rs_using_pgtables(void)
uint16_t fds, _res6;
};
-void vcpu_restore_fpu_eager(struct vcpu *v);
+void vcpu_restore_fpu_nonlazy(struct vcpu *v, bool need_stts);
void vcpu_restore_fpu_lazy(struct vcpu *v);
void vcpu_save_fpu(struct vcpu *v);
void save_fpu_enable(void);