};
static DEFINE_PER_CPU(struct foreign_vmcs, foreign_vmcs);
-void vmx_vmcs_enter(struct vcpu *v)
+bool_t vmx_vmcs_try_enter(struct vcpu *v)
{
struct foreign_vmcs *fv;
/*
* NB. We must *always* run an HVM VCPU on its own VMCS, except for
- * vmx_vmcs_enter/exit critical regions.
+ * vmx_vmcs_enter/exit and scheduling tail critical regions.
*/
if ( likely(v == current) )
- return;
+ return v->arch.hvm_vmx.vmcs == this_cpu(current_vmcs);
fv = &this_cpu(foreign_vmcs);
}
fv->count++;
+
+ return 1;
+}
+
+void vmx_vmcs_enter(struct vcpu *v)
+{
+ bool_t okay = vmx_vmcs_try_enter(v);
+
+ ASSERT(okay);
}
void vmx_vmcs_exit(struct vcpu *v)
{
unsigned long attr = 0, sel = 0, limit;
- vmx_vmcs_enter(v);
+ /*
+ * We may get here in the context of dump_execstate(), which may have
+ * interrupted context switching between setting "current" and
+ * vmx_do_resume() reaching the end of vmx_load_vmcs(). That would make
+ * all the VMREADs below fail if we don't bail right away.
+ */
+ if ( unlikely(!vmx_vmcs_try_enter(v)) )
+ {
+ static bool_t warned;
+
+ if ( !warned )
+ {
+ warned = 1;
+ printk(XENLOG_WARNING "Segment register inaccessible for d%dv%d\n"
+ "(If you see this outside of debugging activity,"
+ " please report to xen-devel@lists.xenproject.org)\n",
+ v->domain->domain_id, v->vcpu_id);
+ }
+ memset(reg, 0, sizeof(*reg));
+ return;
+ }
switch ( seg )
{
int vmx_create_vmcs(struct vcpu *v);
void vmx_destroy_vmcs(struct vcpu *v);
void vmx_vmcs_enter(struct vcpu *v);
+bool_t __must_check vmx_vmcs_try_enter(struct vcpu *v);
void vmx_vmcs_exit(struct vcpu *v);
#define CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004