adjust number of domains in cpupools when destroying domain
authorJuergen Gross <jgross@suse.com>
Wed, 12 Nov 2014 11:39:58 +0000 (12:39 +0100)
committerJan Beulich <jbeulich@suse.com>
Wed, 12 Nov 2014 11:39:58 +0000 (12:39 +0100)
Commit bac6334b51d9bcfe57ecf4a4cb5288348fcf044a (move domain to
cpupool0 before destroying it) introduced an error in the accounting
of cpupools regarding the number of domains. The number of domains
is nor adjusted when a domain is moved to cpupool0 in kill_domain().

Correct this by introducing a cpupool function doing the move
instead of open coding it by calling sched_move_domain().

Reported-by: Dietmar Hahn <dietmar.hahn@ts.fujitsu.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
Tested-by: Dietmar Hahn <dietmar.hahn@ts.fujitsu.com>
Reviewed-by: Andrew Cooper <Andrew.Cooper3@citrix.com>
Acked-by: George Dunlap <george.dunlap@eu.citrix.com>
xen/common/cpupool.c
xen/common/domain.c
xen/include/xen/sched.h

index 73249d3a4e7b6e52f2406af8a45f59eac862c498..a758a8bb9b62e0adb017836bd3da16f4125ebc15 100644 (file)
@@ -224,6 +224,35 @@ static int cpupool_destroy(struct cpupool *c)
     return 0;
 }
 
+/*
+ * Move domain to another cpupool
+ */
+static int cpupool_move_domain_locked(struct domain *d, struct cpupool *c)
+{
+    int ret;
+
+    d->cpupool->n_dom--;
+    ret = sched_move_domain(d, c);
+    if ( ret )
+        d->cpupool->n_dom++;
+    else
+        c->n_dom++;
+
+    return ret;
+}
+int cpupool_move_domain(struct domain *d, struct cpupool *c)
+{
+    int ret;
+
+    spin_lock(&cpupool_lock);
+
+    ret = cpupool_move_domain_locked(d, c);
+
+    spin_unlock(&cpupool_lock);
+
+    return ret;
+}
+
 /*
  * assign a specific cpu to a cpupool
  * cpupool_lock must be held
@@ -338,14 +367,9 @@ static int cpupool_unassign_cpu(struct cpupool *c, unsigned int cpu)
                 ret = -EBUSY;
                 break;
             }
-            c->n_dom--;
-            ret = sched_move_domain(d, cpupool0);
+            ret = cpupool_move_domain_locked(d, cpupool0);
             if ( ret )
-            {
-                c->n_dom++;
                 break;
-            }
-            cpupool0->n_dom++;
         }
         rcu_read_unlock(&domlist_read_lock);
         if ( ret )
@@ -613,16 +637,11 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op)
                         d->domain_id, op->cpupool_id);
         ret = -ENOENT;
         spin_lock(&cpupool_lock);
+
         c = cpupool_find_by_id(op->cpupool_id);
         if ( (c != NULL) && cpumask_weight(c->cpu_valid) )
-        {
-            d->cpupool->n_dom--;
-            ret = sched_move_domain(d, c);
-            if ( ret )
-                d->cpupool->n_dom++;
-            else
-                c->n_dom++;
-        }
+            ret = cpupool_move_domain_locked(d, c);
+
         spin_unlock(&cpupool_lock);
         cpupool_dprintk("cpupool move_domain(dom=%d)->pool=%d ret %d\n",
                         d->domain_id, op->cpupool_id, ret);
index a3f51ec407f7b3f419ecf64d3982ea66855ddd69..4a62c1d1c0bb457db46077840f921e1dc8ff2494 100644 (file)
@@ -621,7 +621,7 @@ int domain_kill(struct domain *d)
                 rc = -EAGAIN;
             break;
         }
-        if ( sched_move_domain(d, cpupool0) )
+        if ( cpupool_move_domain(d, cpupool0) )
             return -EAGAIN;
         for_each_vcpu ( d, v )
             unmap_vcpu_info(v);
index c5157e6396de56c802552662b868184ad08f6b14..46fc6e3eff5bbb3242696f8df3093e3a36d3b55c 100644 (file)
@@ -871,6 +871,7 @@ struct cpupool *cpupool_get_by_id(int poolid);
 void cpupool_put(struct cpupool *pool);
 int cpupool_add_domain(struct domain *d, int poolid);
 void cpupool_rm_domain(struct domain *d);
+int cpupool_move_domain(struct domain *d, struct cpupool *c);
 int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op);
 void schedule_dump(struct cpupool *c);
 extern void dump_runq(unsigned char key);