cpumask <=> xenctl_cpumap: allocate CPU masks and byte maps dynamically
authorJan Beulich <jbeulich@suse.com>
Fri, 21 Oct 2011 07:45:24 +0000 (09:45 +0200)
committerJan Beulich <jbeulich@suse.com>
Fri, 21 Oct 2011 07:45:24 +0000 (09:45 +0200)
Generally there was a NR_CPUS-bits wide array in these functions and
another (through a cpumask_t) on their callers' stacks, which may get
a little large for big NR_CPUS. As the functions can fail anyway, do
the allocation in there.

For the x86/MCA case this require a little code restructuring: By using
different CPU mask accessors it was possible to avoid allocating a mask
in the broadcast case. Also, this was the only user that failed to
check the return value of the conversion function (which could have led
to undefined behvior).

Also constify the input parameters of the two functions.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Keir Fraser <keir@xen.org>
xen/arch/x86/cpu/mcheck/mce.c
xen/arch/x86/platform_hypercall.c
xen/common/domctl.c
xen/common/trace.c
xen/include/xen/cpumask.h

index 4474358e2ab3db14b4624ef61566ba8f00edb91d..6af93c0a501e33a719a7a5ace43da5748c8ee3e9 100644 (file)
@@ -1531,25 +1531,28 @@ long do_mca(XEN_GUEST_HANDLE(xen_mc_t) u_xen_mc)
 
     case XEN_MC_inject_v2:
     {
-        cpumask_t cpumap;
+        const cpumask_t *cpumap;
+        cpumask_var_t cmv;
 
         if (nr_mce_banks == 0)
             return x86_mcerr("do_mca #MC", -ENODEV);
 
         if ( op->u.mc_inject_v2.flags & XEN_MC_INJECT_CPU_BROADCAST )
-            cpumask_copy(&cpumap, &cpu_online_map);
+            cpumap = &cpu_online_map;
         else
         {
-            int gcw;
-
-            xenctl_cpumap_to_cpumask(&cpumap,
-                                     &op->u.mc_inject_v2.cpumap);
-            gcw = cpumask_weight(&cpumap);
-            cpumask_and(&cpumap, &cpu_online_map, &cpumap);
-
-            if ( cpumask_empty(&cpumap) )
-                return x86_mcerr("No online CPU passed\n", -EINVAL);
-            else if ( gcw != cpumask_weight(&cpumap) )
+            ret = xenctl_cpumap_to_cpumask(&cmv,
+                                           &op->u.mc_inject_v2.cpumap);
+            if ( ret )
+                break;
+            cpumap = cmv;
+            if ( !cpumask_intersects(cpumap, &cpu_online_map) )
+            {
+                free_cpumask_var(cmv);
+                ret = x86_mcerr("No online CPU passed\n", -EINVAL);
+                break;
+            }
+            if ( !cpumask_subset(cpumap, &cpu_online_map) )
                 dprintk(XENLOG_INFO,
                         "Not all required CPUs are online\n");
         }
@@ -1558,19 +1561,25 @@ long do_mca(XEN_GUEST_HANDLE(xen_mc_t) u_xen_mc)
         {
         case XEN_MC_INJECT_TYPE_MCE:
             if ( mce_broadcast &&
-                 !cpumask_equal(&cpumap, &cpu_online_map) )
+                 !cpumask_equal(cpumap, &cpu_online_map) )
                 printk("Not trigger MCE on all CPUs, may HANG!\n");
-            on_selected_cpus(&cpumap, x86_mc_mceinject, NULL, 1);
+            on_selected_cpus(cpumap, x86_mc_mceinject, NULL, 1);
             break;
         case XEN_MC_INJECT_TYPE_CMCI:
             if ( !cmci_support )
-                return x86_mcerr(
+                ret = x86_mcerr(
                     "No CMCI supported in platform\n", -EINVAL);
-            on_selected_cpus(&cpumap, x86_cmci_inject, NULL, 1);
+            else
+                on_selected_cpus(cpumap, x86_cmci_inject, NULL, 1);
             break;
         default:
-            return x86_mcerr("Wrong mca type\n", -EINVAL);
+            ret = x86_mcerr("Wrong mca type\n", -EINVAL);
+            break;
         }
+
+        if (cpumap != &cpu_online_map)
+            free_cpumask_var(cmv);
+
         break;
     }
 
index 0269e507c9368ece1bfcd3c4437fe40d53824249..c1ad9ef6365fb2ad58dadcb3eb44356ab8c841e0 100644 (file)
@@ -346,7 +346,7 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xen_platform_op_t) u_xenpf_op)
         uint32_t cpu;
         uint64_t idletime, now = NOW();
         struct xenctl_cpumap ctlmap;
-        cpumask_t cpumap;
+        cpumask_var_t cpumap;
         XEN_GUEST_HANDLE(uint8) cpumap_bitmap;
         XEN_GUEST_HANDLE(uint64) idletimes;
 
@@ -366,22 +366,26 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xen_platform_op_t) u_xenpf_op)
             goto out;
         guest_from_compat_handle(idletimes, op->u.getidletime.idletime);
 
-        for_each_cpu_mask ( cpu, cpumap )
+        for_each_cpu_mask ( cpu, *cpumap )
         {
             if ( idle_vcpu[cpu] == NULL )
-                cpu_clear(cpu, cpumap);
+                cpumask_clear_cpu(cpu, cpumap);
             idletime = get_cpu_idle_time(cpu);
 
-            ret = -EFAULT;
             if ( copy_to_guest_offset(idletimes, cpu, &idletime, 1) )
-                goto out;
+            {
+                ret = -EFAULT;
+                break;
+            }
         }
 
         op->u.getidletime.now = now;
-        if ( (ret = cpumask_to_xenctl_cpumap(&ctlmap, &cpumap)) != 0 )
-            goto out;
+        if ( ret == 0 )
+            ret = cpumask_to_xenctl_cpumap(&ctlmap, cpumap);
+        free_cpumask_var(cpumap);
 
-        ret = copy_to_guest(u_xenpf_op, op, 1) ? -EFAULT : 0;
+        if ( ret == 0 && copy_to_guest(u_xenpf_op, op, 1) )
+            ret = -EFAULT;
     }
     break;
 
index c524659d4fd80e3ec20820066319d684230f2c78..74664f4ebc3ddca0e1093d6e30a4f7145ade8783 100644 (file)
 static DEFINE_SPINLOCK(domctl_lock);
 
 int cpumask_to_xenctl_cpumap(
-    struct xenctl_cpumap *xenctl_cpumap, cpumask_t *cpumask)
+    struct xenctl_cpumap *xenctl_cpumap, const cpumask_t *cpumask)
 {
     unsigned int guest_bytes, copy_bytes, i;
     uint8_t zero = 0;
-    uint8_t bytemap[(NR_CPUS + 7) / 8];
+    int err = 0;
+    uint8_t *bytemap = xmalloc_array(uint8_t, (nr_cpu_ids + 7) / 8);
+
+    if ( !bytemap )
+        return -ENOMEM;
 
     guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
     copy_bytes  = min_t(unsigned int, guest_bytes, (nr_cpu_ids + 7) / 8);
@@ -43,37 +47,48 @@ int cpumask_to_xenctl_cpumap(
 
     if ( copy_bytes != 0 )
         if ( copy_to_guest(xenctl_cpumap->bitmap, bytemap, copy_bytes) )
-            return -EFAULT;
+            err = -EFAULT;
 
-    for ( i = copy_bytes; i < guest_bytes; i++ )
+    for ( i = copy_bytes; !err && i < guest_bytes; i++ )
         if ( copy_to_guest_offset(xenctl_cpumap->bitmap, i, &zero, 1) )
-            return -EFAULT;
+            err = -EFAULT;
 
-    return 0;
+    xfree(bytemap);
+
+    return err;
 }
 
 int xenctl_cpumap_to_cpumask(
-    cpumask_t *cpumask, struct xenctl_cpumap *xenctl_cpumap)
+    cpumask_var_t *cpumask, const struct xenctl_cpumap *xenctl_cpumap)
 {
     unsigned int guest_bytes, copy_bytes;
-    uint8_t bytemap[(NR_CPUS + 7) / 8];
+    int err = 0;
+    uint8_t *bytemap = xzalloc_array(uint8_t, (nr_cpu_ids + 7) / 8);
+
+    if ( !bytemap )
+        return -ENOMEM;
 
     guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
     copy_bytes  = min_t(unsigned int, guest_bytes, (nr_cpu_ids + 7) / 8);
 
-    memset(bytemap, 0, sizeof(bytemap));
-
     if ( copy_bytes != 0 )
     {
         if ( copy_from_guest(bytemap, xenctl_cpumap->bitmap, copy_bytes) )
-            return -EFAULT;
+            err = -EFAULT;
         if ( (xenctl_cpumap->nr_cpus & 7) && (guest_bytes <= sizeof(bytemap)) )
             bytemap[guest_bytes-1] &= ~(0xff << (xenctl_cpumap->nr_cpus & 7));
     }
 
-    bitmap_byte_to_long(cpumask_bits(cpumask), bytemap, nr_cpu_ids);
+    if ( err )
+        /* nothing */;
+    else if ( alloc_cpumask_var(cpumask) )
+        bitmap_byte_to_long(cpumask_bits(*cpumask), bytemap, nr_cpu_ids);
+    else
+        err = -ENOMEM;
 
-    return 0;
+    xfree(bytemap);
+
+    return err;
 }
 
 static inline int is_free_domid(domid_t dom)
@@ -558,7 +573,6 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
         domid_t dom = op->domain;
         struct domain *d = rcu_lock_domain_by_id(dom);
         struct vcpu *v;
-        cpumask_t new_affinity;
 
         ret = -ESRCH;
         if ( d == NULL )
@@ -578,10 +592,15 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
 
         if ( op->cmd == XEN_DOMCTL_setvcpuaffinity )
         {
+            cpumask_var_t new_affinity;
+
             ret = xenctl_cpumap_to_cpumask(
                 &new_affinity, &op->u.vcpuaffinity.cpumap);
             if ( !ret )
-                ret = vcpu_set_affinity(v, &new_affinity);
+            {
+                ret = vcpu_set_affinity(v, new_affinity);
+                free_cpumask_var(new_affinity);
+            }
         }
         else
         {
index 2253e3114a592712eba3daa069931e13218ccf5c..746bf4b75050c80b7ccbb9f1fae4270f69097a81 100644 (file)
@@ -378,7 +378,16 @@ int tb_control(xen_sysctl_tbuf_op_t *tbc)
         tbc->size = t_info_pages * PAGE_SIZE;
         break;
     case XEN_SYSCTL_TBUFOP_set_cpu_mask:
-        rc = xenctl_cpumap_to_cpumask(&tb_cpu_mask, &tbc->cpu_mask);
+    {
+        cpumask_var_t mask;
+
+        rc = xenctl_cpumap_to_cpumask(&mask, &tbc->cpu_mask);
+        if ( !rc )
+        {
+            cpumask_copy(&tb_cpu_mask, mask);
+            free_cpumask_var(mask);
+        }
+    }
         break;
     case XEN_SYSCTL_TBUFOP_set_evt_mask:
         tb_event_mask = tbc->evt_mask;
index 088b8517bc3dcef9f33df375a16347032b1f975c..d9bef4da40555c8de3bcef6502e4b4e0d032af53 100644 (file)
@@ -488,9 +488,7 @@ extern cpumask_t cpu_present_map;
 
 /* Copy to/from cpumap provided by control tools. */
 struct xenctl_cpumap;
-int cpumask_to_xenctl_cpumap(
-    struct xenctl_cpumap *enctl_cpumap, cpumask_t *cpumask);
-int xenctl_cpumap_to_cpumask(
-    cpumask_t *cpumask, struct xenctl_cpumap *enctl_cpumap);
+int cpumask_to_xenctl_cpumap(struct xenctl_cpumap *, const cpumask_t *);
+int xenctl_cpumap_to_cpumask(cpumask_var_t *, const struct xenctl_cpumap *);
 
 #endif /* __XEN_CPUMASK_H */