case XEN_MC_inject_v2:
{
- cpumask_t cpumap;
+ const cpumask_t *cpumap;
+ cpumask_var_t cmv;
if (nr_mce_banks == 0)
return x86_mcerr("do_mca #MC", -ENODEV);
if ( op->u.mc_inject_v2.flags & XEN_MC_INJECT_CPU_BROADCAST )
- cpumask_copy(&cpumap, &cpu_online_map);
+ cpumap = &cpu_online_map;
else
{
- int gcw;
-
- xenctl_cpumap_to_cpumask(&cpumap,
- &op->u.mc_inject_v2.cpumap);
- gcw = cpumask_weight(&cpumap);
- cpumask_and(&cpumap, &cpu_online_map, &cpumap);
-
- if ( cpumask_empty(&cpumap) )
- return x86_mcerr("No online CPU passed\n", -EINVAL);
- else if ( gcw != cpumask_weight(&cpumap) )
+ ret = xenctl_cpumap_to_cpumask(&cmv,
+ &op->u.mc_inject_v2.cpumap);
+ if ( ret )
+ break;
+ cpumap = cmv;
+ if ( !cpumask_intersects(cpumap, &cpu_online_map) )
+ {
+ free_cpumask_var(cmv);
+ ret = x86_mcerr("No online CPU passed\n", -EINVAL);
+ break;
+ }
+ if ( !cpumask_subset(cpumap, &cpu_online_map) )
dprintk(XENLOG_INFO,
"Not all required CPUs are online\n");
}
{
case XEN_MC_INJECT_TYPE_MCE:
if ( mce_broadcast &&
- !cpumask_equal(&cpumap, &cpu_online_map) )
+ !cpumask_equal(cpumap, &cpu_online_map) )
printk("Not trigger MCE on all CPUs, may HANG!\n");
- on_selected_cpus(&cpumap, x86_mc_mceinject, NULL, 1);
+ on_selected_cpus(cpumap, x86_mc_mceinject, NULL, 1);
break;
case XEN_MC_INJECT_TYPE_CMCI:
if ( !cmci_support )
- return x86_mcerr(
+ ret = x86_mcerr(
"No CMCI supported in platform\n", -EINVAL);
- on_selected_cpus(&cpumap, x86_cmci_inject, NULL, 1);
+ else
+ on_selected_cpus(cpumap, x86_cmci_inject, NULL, 1);
break;
default:
- return x86_mcerr("Wrong mca type\n", -EINVAL);
+ ret = x86_mcerr("Wrong mca type\n", -EINVAL);
+ break;
}
+
+ if (cpumap != &cpu_online_map)
+ free_cpumask_var(cmv);
+
break;
}
uint32_t cpu;
uint64_t idletime, now = NOW();
struct xenctl_cpumap ctlmap;
- cpumask_t cpumap;
+ cpumask_var_t cpumap;
XEN_GUEST_HANDLE(uint8) cpumap_bitmap;
XEN_GUEST_HANDLE(uint64) idletimes;
goto out;
guest_from_compat_handle(idletimes, op->u.getidletime.idletime);
- for_each_cpu_mask ( cpu, cpumap )
+ for_each_cpu_mask ( cpu, *cpumap )
{
if ( idle_vcpu[cpu] == NULL )
- cpu_clear(cpu, cpumap);
+ cpumask_clear_cpu(cpu, cpumap);
idletime = get_cpu_idle_time(cpu);
- ret = -EFAULT;
if ( copy_to_guest_offset(idletimes, cpu, &idletime, 1) )
- goto out;
+ {
+ ret = -EFAULT;
+ break;
+ }
}
op->u.getidletime.now = now;
- if ( (ret = cpumask_to_xenctl_cpumap(&ctlmap, &cpumap)) != 0 )
- goto out;
+ if ( ret == 0 )
+ ret = cpumask_to_xenctl_cpumap(&ctlmap, cpumap);
+ free_cpumask_var(cpumap);
- ret = copy_to_guest(u_xenpf_op, op, 1) ? -EFAULT : 0;
+ if ( ret == 0 && copy_to_guest(u_xenpf_op, op, 1) )
+ ret = -EFAULT;
}
break;
static DEFINE_SPINLOCK(domctl_lock);
int cpumask_to_xenctl_cpumap(
- struct xenctl_cpumap *xenctl_cpumap, cpumask_t *cpumask)
+ struct xenctl_cpumap *xenctl_cpumap, const cpumask_t *cpumask)
{
unsigned int guest_bytes, copy_bytes, i;
uint8_t zero = 0;
- uint8_t bytemap[(NR_CPUS + 7) / 8];
+ int err = 0;
+ uint8_t *bytemap = xmalloc_array(uint8_t, (nr_cpu_ids + 7) / 8);
+
+ if ( !bytemap )
+ return -ENOMEM;
guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
copy_bytes = min_t(unsigned int, guest_bytes, (nr_cpu_ids + 7) / 8);
if ( copy_bytes != 0 )
if ( copy_to_guest(xenctl_cpumap->bitmap, bytemap, copy_bytes) )
- return -EFAULT;
+ err = -EFAULT;
- for ( i = copy_bytes; i < guest_bytes; i++ )
+ for ( i = copy_bytes; !err && i < guest_bytes; i++ )
if ( copy_to_guest_offset(xenctl_cpumap->bitmap, i, &zero, 1) )
- return -EFAULT;
+ err = -EFAULT;
- return 0;
+ xfree(bytemap);
+
+ return err;
}
int xenctl_cpumap_to_cpumask(
- cpumask_t *cpumask, struct xenctl_cpumap *xenctl_cpumap)
+ cpumask_var_t *cpumask, const struct xenctl_cpumap *xenctl_cpumap)
{
unsigned int guest_bytes, copy_bytes;
- uint8_t bytemap[(NR_CPUS + 7) / 8];
+ int err = 0;
+ uint8_t *bytemap = xzalloc_array(uint8_t, (nr_cpu_ids + 7) / 8);
+
+ if ( !bytemap )
+ return -ENOMEM;
guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
copy_bytes = min_t(unsigned int, guest_bytes, (nr_cpu_ids + 7) / 8);
- memset(bytemap, 0, sizeof(bytemap));
-
if ( copy_bytes != 0 )
{
if ( copy_from_guest(bytemap, xenctl_cpumap->bitmap, copy_bytes) )
- return -EFAULT;
+ err = -EFAULT;
if ( (xenctl_cpumap->nr_cpus & 7) && (guest_bytes <= sizeof(bytemap)) )
bytemap[guest_bytes-1] &= ~(0xff << (xenctl_cpumap->nr_cpus & 7));
}
- bitmap_byte_to_long(cpumask_bits(cpumask), bytemap, nr_cpu_ids);
+ if ( err )
+ /* nothing */;
+ else if ( alloc_cpumask_var(cpumask) )
+ bitmap_byte_to_long(cpumask_bits(*cpumask), bytemap, nr_cpu_ids);
+ else
+ err = -ENOMEM;
- return 0;
+ xfree(bytemap);
+
+ return err;
}
static inline int is_free_domid(domid_t dom)
domid_t dom = op->domain;
struct domain *d = rcu_lock_domain_by_id(dom);
struct vcpu *v;
- cpumask_t new_affinity;
ret = -ESRCH;
if ( d == NULL )
if ( op->cmd == XEN_DOMCTL_setvcpuaffinity )
{
+ cpumask_var_t new_affinity;
+
ret = xenctl_cpumap_to_cpumask(
&new_affinity, &op->u.vcpuaffinity.cpumap);
if ( !ret )
- ret = vcpu_set_affinity(v, &new_affinity);
+ {
+ ret = vcpu_set_affinity(v, new_affinity);
+ free_cpumask_var(new_affinity);
+ }
}
else
{
tbc->size = t_info_pages * PAGE_SIZE;
break;
case XEN_SYSCTL_TBUFOP_set_cpu_mask:
- rc = xenctl_cpumap_to_cpumask(&tb_cpu_mask, &tbc->cpu_mask);
+ {
+ cpumask_var_t mask;
+
+ rc = xenctl_cpumap_to_cpumask(&mask, &tbc->cpu_mask);
+ if ( !rc )
+ {
+ cpumask_copy(&tb_cpu_mask, mask);
+ free_cpumask_var(mask);
+ }
+ }
break;
case XEN_SYSCTL_TBUFOP_set_evt_mask:
tb_event_mask = tbc->evt_mask;
/* Copy to/from cpumap provided by control tools. */
struct xenctl_cpumap;
-int cpumask_to_xenctl_cpumap(
- struct xenctl_cpumap *enctl_cpumap, cpumask_t *cpumask);
-int xenctl_cpumap_to_cpumask(
- cpumask_t *cpumask, struct xenctl_cpumap *enctl_cpumap);
+int cpumask_to_xenctl_cpumap(struct xenctl_cpumap *, const cpumask_t *);
+int xenctl_cpumap_to_cpumask(cpumask_var_t *, const struct xenctl_cpumap *);
#endif /* __XEN_CPUMASK_H */