}
if ( a.value > 1 )
rc = -EINVAL;
- if ( !is_hvm_domain(d) )
- rc = -EINVAL;
/* Remove the check below once we have
* shadow-on-shadow.
*/
if ( cpu_has_svm && !paging_mode_hap(d) && a.value )
rc = -EINVAL;
/* Set up NHVM state for any vcpus that are already up */
- if ( !d->arch.hvm_domain.params[HVM_PARAM_NESTEDHVM] )
+ if ( a.value &&
+ !d->arch.hvm_domain.params[HVM_PARAM_NESTEDHVM] )
for_each_vcpu(d, v)
if ( rc == 0 )
rc = nestedhvm_vcpu_initialise(v);
+ if ( !a.value || rc )
+ for_each_vcpu(d, v)
+ nestedhvm_vcpu_destroy(v);
break;
case HVM_PARAM_BUFIOREQ_EVTCHN:
rc = -EINVAL;
if ( !nvcpu->nv_n2vmcx )
{
gdprintk(XENLOG_ERR, "nest: allocation for shadow vmcs failed\n");
- goto out;
+ return -ENOMEM;
}
/* non-root VMREAD/VMWRITE bitmap. */
if ( !vmread_bitmap )
{
gdprintk(XENLOG_ERR, "nest: allocation for vmread bitmap failed\n");
- goto out1;
+ return -ENOMEM;
}
v->arch.hvm_vmx.vmread_bitmap = vmread_bitmap;
if ( !vmwrite_bitmap )
{
gdprintk(XENLOG_ERR, "nest: allocation for vmwrite bitmap failed\n");
- goto out2;
+ return -ENOMEM;
}
v->arch.hvm_vmx.vmwrite_bitmap = vmwrite_bitmap;
nvmx->msrbitmap = NULL;
INIT_LIST_HEAD(&nvmx->launched_list);
return 0;
-out2:
- free_domheap_page(v->arch.hvm_vmx.vmread_bitmap);
-out1:
- free_xenheap_page(nvcpu->nv_n2vmcx);
-out:
- return -ENOMEM;
}
void nvmx_vcpu_destroy(struct vcpu *v)
nvcpu->nv_n2vmcx = NULL;
}
- list_for_each_entry_safe(item, n, &nvmx->launched_list, node)
- {
- list_del(&item->node);
- xfree(item);
- }
+ /* Must also cope with nvmx_vcpu_initialise() not having got called. */
+ if ( nvmx->launched_list.next )
+ list_for_each_entry_safe(item, n, &nvmx->launched_list, node)
+ {
+ list_del(&item->node);
+ xfree(item);
+ }
if ( v->arch.hvm_vmx.vmread_bitmap )
+ {
free_domheap_page(v->arch.hvm_vmx.vmread_bitmap);
+ v->arch.hvm_vmx.vmread_bitmap = NULL;
+ }
if ( v->arch.hvm_vmx.vmwrite_bitmap )
+ {
free_domheap_page(v->arch.hvm_vmx.vmwrite_bitmap);
+ v->arch.hvm_vmx.vmwrite_bitmap = NULL;
+ }
}
void nvmx_domain_relinquish_resources(struct domain *d)