void hvm_domain_relinquish_resources(struct domain *d)
{
+ if ( hvm_funcs.nhvm_domain_relinquish_resources )
+ hvm_funcs.nhvm_domain_relinquish_resources(d);
+
hvm_destroy_ioreq_page(d, &d->arch.hvm_domain.ioreq);
hvm_destroy_ioreq_page(d, &d->arch.hvm_domain.buf_ioreq);
.nhvm_vcpu_asid = nvmx_vcpu_asid,
.nhvm_vmcx_guest_intercepts_trap = nvmx_intercepts_exception,
.nhvm_vcpu_vmexit_trap = nvmx_vmexit_trap,
- .nhvm_intr_blocked = nvmx_intr_blocked
+ .nhvm_intr_blocked = nvmx_intr_blocked,
+ .nhvm_domain_relinquish_resources = nvmx_domain_relinquish_resources
};
struct hvm_function_table * __init start_vmx(void)
{
struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
- nvmx_purge_vvmcs(v);
+ /*
+ * When destroying the vcpu, it may be running on behalf of L2 guest.
+ * Therefore we need to switch the VMCS pointer back to the L1 VMCS,
+ * in order to avoid double free of L2 VMCS and the possible memory
+ * leak of L1 VMCS page.
+ */
+ if ( nvcpu->nv_n1vmcx )
+ v->arch.hvm_vmx.vmcs = nvcpu->nv_n1vmcx;
+
if ( nvcpu->nv_n2vmcx ) {
__vmpclear(virt_to_maddr(nvcpu->nv_n2vmcx));
free_xenheap_page(nvcpu->nv_n2vmcx);
}
}
+void nvmx_domain_relinquish_resources(struct domain *d)
+{
+ struct vcpu *v;
+
+ for_each_vcpu ( d, v )
+ nvmx_purge_vvmcs(v);
+}
+
int nvmx_vcpu_reset(struct vcpu *v)
{
return 0;
bool_t (*nhvm_vmcx_hap_enabled)(struct vcpu *v);
enum hvm_intblk (*nhvm_intr_blocked)(struct vcpu *v);
+ void (*nhvm_domain_relinquish_resources)(struct domain *d);
};
extern struct hvm_function_table hvm_funcs;
enum hvm_intblk nvmx_intr_blocked(struct vcpu *v);
int nvmx_intercepts_exception(struct vcpu *v,
unsigned int trap, int error_code);
+void nvmx_domain_relinquish_resources(struct domain *d);
int nvmx_handle_vmxon(struct cpu_user_regs *regs);
int nvmx_handle_vmxoff(struct cpu_user_regs *regs);