{
ioreq_t *p;
- if ( !v->fpu_dirtied )
- hvm_funcs.stts(v);
-
pt_restore_timer(v);
/* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
return 0;
}
+static void svm_fpu_enter(struct vcpu *v)
+{
+ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+
+ setup_fpu(v);
+ vmcb->exception_intercepts &= ~(1U << TRAP_no_device);
+}
+
+static void svm_fpu_leave(struct vcpu *v)
+{
+ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+
+ ASSERT(!v->fpu_dirtied);
+ ASSERT(read_cr0() & X86_CR0_TS);
+
+ /*
+ * If the guest does not have TS enabled then we must cause and handle an
+ * exception on first use of the FPU. If the guest *does* have TS enabled
+ * then this is not necessary: no FPU activity can occur until the guest
+ * clears CR0.TS, and we will initialise the FPU when that happens.
+ */
+ if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
+ {
+ v->arch.hvm_svm.vmcb->exception_intercepts |= 1U << TRAP_no_device;
+ vmcb->cr0 |= X86_CR0_TS;
+ }
+}
+
static enum hvm_intblk svm_interrupt_blocked(
struct vcpu *v, struct hvm_intack intack)
{
switch ( cr )
{
- case 0:
- /* TS cleared? Then initialise FPU now. */
- if ( (v == current) && !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) &&
- (vmcb->cr0 & X86_CR0_TS) )
+ case 0: {
+ unsigned long hw_cr0_mask = 0;
+
+ if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
{
- setup_fpu(v);
- vmcb->exception_intercepts &= ~(1U << TRAP_no_device);
+ if ( v != current )
+ hw_cr0_mask |= X86_CR0_TS;
+ else if ( vmcb->cr0 & X86_CR0_TS )
+ svm_fpu_enter(v);
}
- vmcb->cr0 = v->arch.hvm_vcpu.guest_cr[0];
+ vmcb->cr0 = v->arch.hvm_vcpu.guest_cr[0] | hw_cr0_mask;
if ( !paging_mode_hap(v->domain) )
vmcb->cr0 |= X86_CR0_PG | X86_CR0_WP;
break;
+ }
case 2:
vmcb->cr2 = v->arch.hvm_vcpu.guest_cr[2];
break;
svm_vmload(vmcb);
}
-/* Make sure that xen intercepts any FP accesses from current */
-static void svm_stts(struct vcpu *v)
-{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-
- /*
- * If the guest does not have TS enabled then we must cause and handle an
- * exception on first use of the FPU. If the guest *does* have TS enabled
- * then this is not necessary: no FPU activity can occur until the guest
- * clears CR0.TS, and we will initialise the FPU when that happens.
- */
- if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
- {
- v->arch.hvm_svm.vmcb->exception_intercepts |= 1U << TRAP_no_device;
- vmcb->cr0 |= X86_CR0_TS;
- }
-}
-
static void svm_set_tsc_offset(struct vcpu *v, u64 offset)
{
v->arch.hvm_svm.vmcb->tsc_offset = offset;
{
int cpu = smp_processor_id();
+ svm_fpu_leave(v);
+
svm_save_dr(v);
svm_sync_vmcb(v);
.update_guest_cr = svm_update_guest_cr,
.update_guest_efer = svm_update_guest_efer,
.flush_guest_tlbs = svm_flush_guest_tlbs,
- .stts = svm_stts,
.set_tsc_offset = svm_set_tsc_offset,
.inject_exception = svm_inject_exception,
.init_hypercall_page = svm_init_hypercall_page,
static void svm_do_no_device_fault(struct vmcb_struct *vmcb)
{
- struct vcpu *v = current;
+ struct vcpu *curr = current;
- setup_fpu(v);
- vmcb->exception_intercepts &= ~(1U << TRAP_no_device);
+ svm_fpu_enter(curr);
- if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
+ if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
vmcb->cr0 &= ~X86_CR0_TS;
}
break;
case INSTR_CLTS:
- /* TS being cleared means that it's time to restore fpu state. */
- setup_fpu(current);
- vmcb->exception_intercepts &= ~(1U << TRAP_no_device);
- vmcb->cr0 &= ~X86_CR0_TS; /* clear TS */
- v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS; /* clear TS */
+ v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS;
+ svm_update_guest_cr(v, 0);
HVMTRACE_0D(CLTS, current);
break;
vmcb->tr.base = 0;
vmcb->tr.limit = 0xff;
- v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_PE | X86_CR0_TS;
+ v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_PE | X86_CR0_ET;
hvm_update_guest_cr(v, 0);
v->arch.hvm_vcpu.guest_cr[4] = 0;
paging_update_paging_modes(v);
+ vmcb->exception_intercepts = HVM_TRAP_MASK | (1U << TRAP_no_device);
+
if ( paging_mode_hap(v->domain) )
{
vmcb->np_enable = 1; /* enable nested paging */
vmcb->g_pat = 0x0007040600070406ULL; /* guest PAT */
vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
- vmcb->exception_intercepts = HVM_TRAP_MASK;
/*
* No point in intercepting CR3 reads, because the hardware will return
}
else
{
- vmcb->exception_intercepts = HVM_TRAP_MASK | (1U << TRAP_page_fault);
+ vmcb->exception_intercepts |= (1U << TRAP_page_fault);
}
return 0;
__vmwrite(VMCS_LINK_POINTER_HIGH, ~0UL);
#endif
- __vmwrite(EXCEPTION_BITMAP, HVM_TRAP_MASK | (1U << TRAP_page_fault));
+ __vmwrite(EXCEPTION_BITMAP, (HVM_TRAP_MASK |
+ (1U << TRAP_page_fault) |
+ (1U << TRAP_no_device)));
v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_PE | X86_CR0_ET;
hvm_update_guest_cr(v, 0);
return 0;
}
-static void vmx_ctxt_switch_from(struct vcpu *v)
+static void vmx_fpu_enter(struct vcpu *v)
+{
+ setup_fpu(v);
+ __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);
+ v->arch.hvm_vmx.host_cr0 &= ~X86_CR0_TS;
+ __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
+}
+
+static void vmx_fpu_leave(struct vcpu *v)
{
+ ASSERT(!v->fpu_dirtied);
ASSERT(read_cr0() & X86_CR0_TS);
+
if ( !(v->arch.hvm_vmx.host_cr0 & X86_CR0_TS) )
{
v->arch.hvm_vmx.host_cr0 |= X86_CR0_TS;
__vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
}
+ /*
+ * If the guest does not have TS enabled then we must cause and handle an
+ * exception on first use of the FPU. If the guest *does* have TS enabled
+ * then this is not necessary: no FPU activity can occur until the guest
+ * clears CR0.TS, and we will initialise the FPU when that happens.
+ */
+ if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
+ {
+ v->arch.hvm_vcpu.hw_cr[0] |= X86_CR0_TS;
+ __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
+ __vm_set_bit(EXCEPTION_BITMAP, TRAP_no_device);
+ }
+}
+
+static void vmx_ctxt_switch_from(struct vcpu *v)
+{
+ vmx_fpu_leave(v);
vmx_save_guest_msrs(v);
vmx_restore_host_msrs();
vmx_save_dr(v);
vmx_vmcs_exit(v);
}
-/* Make sure that xen intercepts any FP accesses from current */
-static void vmx_stts(struct vcpu *v)
-{
- /* VMX depends on operating on the current vcpu */
- ASSERT(v == current);
-
- /*
- * If the guest does not have TS enabled then we must cause and handle an
- * exception on first use of the FPU. If the guest *does* have TS enabled
- * then this is not necessary: no FPU activity can occur until the guest
- * clears CR0.TS, and we will initialise the FPU when that happens.
- */
- if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
- {
- v->arch.hvm_vcpu.hw_cr[0] |= X86_CR0_TS;
- __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
- __vm_set_bit(EXCEPTION_BITMAP, TRAP_no_device);
- }
-}
-
static void vmx_set_tsc_offset(struct vcpu *v, u64 offset)
{
vmx_vmcs_enter(v);
switch ( cr )
{
- case 0:
- /* TS cleared? Then initialise FPU now. */
- if ( (v == current) && !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) &&
- (v->arch.hvm_vcpu.hw_cr[0] & X86_CR0_TS) )
+ case 0: {
+ unsigned long hw_cr0_mask =
+ X86_CR0_NE | X86_CR0_PG | X86_CR0_WP | X86_CR0_PE;
+
+ if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
{
- setup_fpu(v);
- __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);
+ if ( v != current )
+ hw_cr0_mask |= X86_CR0_TS;
+ else if ( v->arch.hvm_vcpu.hw_cr[0] & X86_CR0_TS )
+ vmx_fpu_enter(v);
}
v->arch.hvm_vcpu.hw_cr[0] =
- v->arch.hvm_vcpu.guest_cr[0] |
- X86_CR0_NE | X86_CR0_PG | X86_CR0_WP | X86_CR0_PE;
+ v->arch.hvm_vcpu.guest_cr[0] | hw_cr0_mask;
__vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
__vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
break;
+ }
case 2:
/* CR2 is updated in exit stub. */
break;
.update_guest_cr = vmx_update_guest_cr,
.update_guest_efer = vmx_update_guest_efer,
.flush_guest_tlbs = vmx_flush_guest_tlbs,
- .stts = vmx_stts,
.set_tsc_offset = vmx_set_tsc_offset,
.inject_exception = vmx_inject_exception,
.init_hypercall_page = vmx_init_hypercall_page,
void vmx_do_no_device_fault(void)
{
- struct vcpu *v = current;
-
- setup_fpu(current);
- __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);
+ struct vcpu *curr = current;
- ASSERT(v->arch.hvm_vmx.host_cr0 & X86_CR0_TS);
- v->arch.hvm_vmx.host_cr0 &= ~X86_CR0_TS;
- __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
+ vmx_fpu_enter(curr);
/* Disable TS in guest CR0 unless the guest wants the exception too. */
- if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
+ if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
{
- v->arch.hvm_vcpu.hw_cr[0] &= ~X86_CR0_TS;
- __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
+ curr->arch.hvm_vcpu.hw_cr[0] &= ~X86_CR0_TS;
+ __vmwrite(GUEST_CR0, curr->arch.hvm_vcpu.hw_cr[0]);
}
}
mov_from_cr(cr, gp, regs);
break;
case TYPE_CLTS:
- /* We initialise the FPU now, to avoid needing another vmexit. */
- setup_fpu(v);
- __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);
-
- v->arch.hvm_vcpu.hw_cr[0] &= ~X86_CR0_TS; /* clear TS */
- __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
-
- v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS; /* clear TS */
- __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
+ v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS;
+ vmx_update_guest_cr(v, 0);
HVMTRACE_0D(CLTS, current);
break;
case TYPE_LMSW:
*/
void (*flush_guest_tlbs)(void);
- /*
- * Update specifics of the guest state:
- * 1) TS bit in guest cr0
- * 2) TSC offset in guest
- */
- void (*stts)(struct vcpu *v);
void (*set_tsc_offset)(struct vcpu *v, u64 offset);
void (*inject_exception)(unsigned int trapnr, int errcode,