struct nestedvcpu *nv = &vcpu_nestedhvm(v);
if (nv->nv_vvmcx != NULL && nv->nv_vvmcxaddr != vmcbaddr) {
- ASSERT(nv->nv_vvmcxaddr != INVALID_PADDR);
+ ASSERT(vvmcx_valid(v));
hvm_unmap_guest_frame(nv->nv_vvmcx, 1);
nv->nv_vvmcx = NULL;
nv->nv_vvmcxaddr = INVALID_PADDR;
if ( errno == VMX_INSN_SUCCEED )
return;
- if ( vcpu_nestedhvm(current).nv_vvmcxaddr != INVALID_PADDR &&
- errno != VMX_INSN_FAIL_INVALID )
+ if ( vvmcx_valid(current) && errno != VMX_INSN_FAIL_INVALID )
vmfail_valid(regs, errno);
else
vmfail_invalid(regs);
int i;
__clear_current_vvmcs(v);
- if ( nvcpu->nv_vvmcxaddr != INVALID_PADDR )
+ if ( vvmcx_valid(v) )
hvm_unmap_guest_frame(nvcpu->nv_vvmcx, 1);
nvcpu->nv_vvmcx = NULL;
nvcpu->nv_vvmcxaddr = INVALID_PADDR;
struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
/* check VMCS is valid and IO BITMAP is set */
- if ( (nvcpu->nv_vvmcxaddr != INVALID_PADDR) &&
+ if ( vvmcx_valid(v) &&
((nvmx->iobitmap[0] && nvmx->iobitmap[1]) ||
!(__n2_exec_control(v) & CPU_BASED_ACTIVATE_IO_BITMAP) ) )
nvcpu->nv_vmentry_pending = 1;
struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
unsigned long intr_shadow;
- if ( vcpu_nestedhvm(v).nv_vvmcxaddr == INVALID_PADDR )
+ if ( !vvmcx_valid(v) )
{
vmfail_invalid(regs);
return X86EMUL_OKAY;
unsigned long intr_shadow;
int rc;
- if ( vcpu_nestedhvm(v).nv_vvmcxaddr == INVALID_PADDR )
+ if ( !vvmcx_valid(v) )
{
vmfail_invalid(regs);
return X86EMUL_OKAY;
if ( nvcpu->nv_vvmcxaddr != gpa )
nvmx_purge_vvmcs(v);
- if ( nvcpu->nv_vvmcxaddr == INVALID_PADDR )
+ if ( !vvmcx_valid(v) )
{
bool_t writable;
void *vvmcx = hvm_map_guest_frame_rw(paddr_to_pfn(gpa), 1, &writable);
if ( rc != X86EMUL_OKAY )
return rc;
- if ( vcpu_nestedhvm(v).nv_vvmcxaddr == INVALID_PADDR )
+ if ( !vvmcx_valid(v) )
{
vmfail_invalid(regs);
return X86EMUL_OKAY;
if ( decode_vmx_inst(regs, &decode, &operand) != X86EMUL_OKAY )
return X86EMUL_EXCEPTION;
- if ( vcpu_nestedhvm(v).nv_vvmcxaddr == INVALID_PADDR )
+ if ( !vvmcx_valid(v) )
{
vmfail_invalid(regs);
return X86EMUL_OKAY;