static void nvmx_purge_vvmcs(struct vcpu *v);
+static bool nvmx_vcpu_in_vmx(const struct vcpu *v)
+{
+ return vcpu_2_nvmx(v).vmxon_region_pa != INVALID_PADDR;
+}
+
#define VMCS_BUF_SIZE 100
int nvmx_cpu_up_prepare(unsigned int cpu)
nvmx->ept.enabled = 0;
nvmx->guest_vpid = 0;
- nvmx->vmxon_region_pa = 0;
+ nvmx->vmxon_region_pa = INVALID_PADDR;
nvcpu->nv_vvmcx = NULL;
nvcpu->nv_vvmcxaddr = VMCX_EADDR;
nvmx->intr.intr_info = 0;
!(v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_VMXE) )
goto invalid_op;
}
- else if ( !vcpu_2_nvmx(v).vmxon_region_pa )
+ else if ( !nvmx_vcpu_in_vmx(v) )
goto invalid_op;
if ( vmx_guest_x86_mode(v) < (hvm_long_mode_enabled(v) ? 8 : 2) )
if ( rc != X86EMUL_OKAY )
return rc;
- if ( nvmx->vmxon_region_pa )
+ if ( nvmx_vcpu_in_vmx(v) )
gdprintk(XENLOG_WARNING,
"vmxon again: orig %"PRIpaddr" new %lx\n",
nvmx->vmxon_region_pa, gpa);
return rc;
nvmx_purge_vvmcs(v);
- nvmx->vmxon_region_pa = 0;
+ nvmx->vmxon_region_pa = INVALID_PADDR;
vmreturn(regs, VMSUCCEED);
return X86EMUL_OKAY;
};
struct nestedvmx {
+ /*
+ * vmxon_region_pa is also used to indicate whether a vcpu is in
+ * the VMX operation. When a vcpu is out of the VMX operation, its
+ * vmxon_region_pa is set to an invalid address INVALID_PADDR. We
+ * cannot use 0 for this purpose, because it's a valid VMXON region
+ * address.
+ */
paddr_t vmxon_region_pa;
void *iobitmap[2]; /* map (va) of L1 guest I/O bitmap */
void *msrbitmap; /* map (va) of L1 guest MSR bitmap */