x86/nhvm: properly clean up after failure to set up all vCPU-s
authorJan Beulich <jbeulich@suse.com>
Fri, 22 Feb 2013 10:21:38 +0000 (11:21 +0100)
committerJan Beulich <jbeulich@suse.com>
Fri, 22 Feb 2013 10:21:38 +0000 (11:21 +0100)
Otherwise we may leak memory when setting up nHVM fails half way.

This implies that the individual destroy functions will have to remain
capable (in the VMX case they first need to be made so, following
26486:7648ef657fe7 and 26489:83a3fa9c8434) of being called for a vCPU
that the corresponding init function was never run on.

Once at it, also remove a redundant check from the corresponding
parameter validation code.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Tim Deegan <tim@xen.org>
Tested-by: Olaf Hering <olaf@aepfle.de>
xen/arch/x86/hvm/hvm.c
xen/arch/x86/hvm/nestedhvm.c
xen/arch/x86/hvm/vmx/vvmx.c

index febbffb9821c1f7e3795988b9b1a6fdd24581ab1..ea7adf67805828e287012db312c3855f001d9d98 100644 (file)
@@ -3918,18 +3918,20 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
                 }
                 if ( a.value > 1 )
                     rc = -EINVAL;
-                if ( !is_hvm_domain(d) )
-                    rc = -EINVAL;
                 /* Remove the check below once we have
                  * shadow-on-shadow.
                  */
                 if ( cpu_has_svm && !paging_mode_hap(d) && a.value )
                     rc = -EINVAL;
                 /* Set up NHVM state for any vcpus that are already up */
-                if ( !d->arch.hvm_domain.params[HVM_PARAM_NESTEDHVM] )
+                if ( a.value &&
+                     !d->arch.hvm_domain.params[HVM_PARAM_NESTEDHVM] )
                     for_each_vcpu(d, v)
                         if ( rc == 0 )
                             rc = nestedhvm_vcpu_initialise(v);
+                if ( !a.value || rc )
+                    for_each_vcpu(d, v)
+                        nestedhvm_vcpu_destroy(v);
                 break;
             case HVM_PARAM_BUFIOREQ_EVTCHN:
                 rc = -EINVAL;
index b3cf07dda3750614dae351ea27a0ff24718efe40..964f58f1a5fe1ccf9f3c451354a1e88eaaee47e3 100644 (file)
@@ -87,7 +87,7 @@ nestedhvm_vcpu_initialise(struct vcpu *v)
 void
 nestedhvm_vcpu_destroy(struct vcpu *v)
 {
-    if ( nestedhvm_enabled(v->domain) && hvm_funcs.nhvm_vcpu_destroy )
+    if ( hvm_funcs.nhvm_vcpu_destroy )
         hvm_funcs.nhvm_vcpu_destroy(v);
 }
 
index 4f3f94d3c7eaecaee9317b8ef05d68f3a09dd754..bb7688ff6c4db78985d81f662a9561db96b89414 100644 (file)
@@ -62,7 +62,7 @@ int nvmx_vcpu_initialise(struct vcpu *v)
     if ( !nvcpu->nv_n2vmcx )
     {
         gdprintk(XENLOG_ERR, "nest: allocation for shadow vmcs failed\n");
-       goto out;
+        return -ENOMEM;
     }
 
     /* non-root VMREAD/VMWRITE bitmap. */
@@ -75,7 +75,7 @@ int nvmx_vcpu_initialise(struct vcpu *v)
         if ( !vmread_bitmap )
         {
             gdprintk(XENLOG_ERR, "nest: allocation for vmread bitmap failed\n");
-            goto out1;
+            return -ENOMEM;
         }
         v->arch.hvm_vmx.vmread_bitmap = vmread_bitmap;
 
@@ -83,7 +83,7 @@ int nvmx_vcpu_initialise(struct vcpu *v)
         if ( !vmwrite_bitmap )
         {
             gdprintk(XENLOG_ERR, "nest: allocation for vmwrite bitmap failed\n");
-            goto out2;
+            return -ENOMEM;
         }
         v->arch.hvm_vmx.vmwrite_bitmap = vmwrite_bitmap;
 
@@ -118,12 +118,6 @@ int nvmx_vcpu_initialise(struct vcpu *v)
     nvmx->msrbitmap = NULL;
     INIT_LIST_HEAD(&nvmx->launched_list);
     return 0;
-out2:
-    free_domheap_page(v->arch.hvm_vmx.vmread_bitmap);
-out1:
-    free_xenheap_page(nvcpu->nv_n2vmcx);
-out:
-    return -ENOMEM;
 }
  
 void nvmx_vcpu_destroy(struct vcpu *v)
@@ -147,16 +141,24 @@ void nvmx_vcpu_destroy(struct vcpu *v)
         nvcpu->nv_n2vmcx = NULL;
     }
 
-    list_for_each_entry_safe(item, n, &nvmx->launched_list, node)
-    {
-        list_del(&item->node);
-        xfree(item);
-    }
+    /* Must also cope with nvmx_vcpu_initialise() not having got called. */
+    if ( nvmx->launched_list.next )
+        list_for_each_entry_safe(item, n, &nvmx->launched_list, node)
+        {
+            list_del(&item->node);
+            xfree(item);
+        }
 
     if ( v->arch.hvm_vmx.vmread_bitmap )
+    {
         free_domheap_page(v->arch.hvm_vmx.vmread_bitmap);
+        v->arch.hvm_vmx.vmread_bitmap = NULL;
+    }
     if ( v->arch.hvm_vmx.vmwrite_bitmap )
+    {
         free_domheap_page(v->arch.hvm_vmx.vmwrite_bitmap);
+        v->arch.hvm_vmx.vmwrite_bitmap = NULL;
+    }
 }
  
 void nvmx_domain_relinquish_resources(struct domain *d)