From: Jan Beulich Date: Fri, 21 Oct 2011 07:17:42 +0000 (+0200) Subject: introduce and use nr_cpu_ids and nr_cpumask_bits X-Git-Url: https://dgit.raspbian.org/?a=commitdiff_plain;h=20307695e5c22a58b4cce35fba5333375da5a7fa;p=xen.git introduce and use nr_cpu_ids and nr_cpumask_bits The former is the runtime equivalent of NR_CPUS (and users of NR_CPUS, where necessary, get adjusted accordingly), while the latter is for the sole use of determining the allocation size when dynamically allocating CPU masks (done later in this series). Adjust accessors to use either of the two to bound their bitmap operations - which one gets used depends on whether accessing the bits in the gap between nr_cpu_ids and nr_cpumask_bits is benign but more efficient. Signed-off-by: Jan Beulich Acked-by: Keir Fraser --- diff --git a/xen/arch/x86/acpi/cpu_idle.c b/xen/arch/x86/acpi/cpu_idle.c index c797b8bb70..3952020956 100644 --- a/xen/arch/x86/acpi/cpu_idle.c +++ b/xen/arch/x86/acpi/cpu_idle.c @@ -900,7 +900,7 @@ int get_cpu_id(u32 acpi_id) if ( apic_id == BAD_APICID ) return -1; - for ( i = 0; i < NR_CPUS; i++ ) + for ( i = 0; i < nr_cpu_ids; i++ ) { if ( apic_id == x86_cpu_to_apicid[i] ) return i; diff --git a/xen/arch/x86/acpi/cpufreq/cpufreq.c b/xen/arch/x86/acpi/cpufreq/cpufreq.c index b57c3e325f..680e7fe550 100644 --- a/xen/arch/x86/acpi/cpufreq/cpufreq.c +++ b/xen/arch/x86/acpi/cpufreq/cpufreq.c @@ -209,7 +209,7 @@ static u32 get_cur_val(const cpumask_t *mask) if (!cpumask_test_cpu(cpu, mask)) cpu = cpumask_first(mask); - if (cpu >= NR_CPUS || !cpu_online(cpu)) + if (cpu >= nr_cpu_ids || !cpu_online(cpu)) return 0; policy = per_cpu(cpufreq_cpu_policy, cpu); diff --git a/xen/arch/x86/acpi/lib.c b/xen/arch/x86/acpi/lib.c index 4315d12226..e8e69d1cb4 100644 --- a/xen/arch/x86/acpi/lib.c +++ b/xen/arch/x86/acpi/lib.c @@ -95,7 +95,7 @@ int arch_acpi_set_pdc_bits(u32 acpi_id, u32 *pdc, u32 mask) if (!(acpi_id + 1)) c = &boot_cpu_data; - else if (cpu >= NR_CPUS || !cpu_online(cpu)) + else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) return -EINVAL; else c = cpu_data + cpu; diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c index d899860eb2..b1239ad3a9 100644 --- a/xen/arch/x86/cpu/common.c +++ b/xen/arch/x86/cpu/common.c @@ -532,7 +532,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); } else if (c->x86_num_siblings > 1 ) { - if (c->x86_num_siblings > NR_CPUS) { + if (c->x86_num_siblings > nr_cpu_ids) { printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", c->x86_num_siblings); c->x86_num_siblings = 1; return; diff --git a/xen/arch/x86/cpu/mcheck/mce.c b/xen/arch/x86/cpu/mcheck/mce.c index 5c15df2076..5318612c7b 100644 --- a/xen/arch/x86/cpu/mcheck/mce.c +++ b/xen/arch/x86/cpu/mcheck/mce.c @@ -1487,7 +1487,7 @@ long do_mca(XEN_GUEST_HANDLE(xen_mc_t) u_xen_mc) mc_msrinject = &op->u.mc_msrinject; target = mc_msrinject->mcinj_cpunr; - if (target >= NR_CPUS) + if (target >= nr_cpu_ids) return x86_mcerr("do_mca inject: bad target", -EINVAL); if (!cpu_online(target)) @@ -1514,7 +1514,7 @@ long do_mca(XEN_GUEST_HANDLE(xen_mc_t) u_xen_mc) mc_mceinject = &op->u.mc_mceinject; target = mc_mceinject->mceinj_cpunr; - if (target >= NR_CPUS) + if (target >= nr_cpu_ids) return x86_mcerr("do_mca #MC: bad target", -EINVAL); if (!cpu_online(target)) diff --git a/xen/arch/x86/microcode.c b/xen/arch/x86/microcode.c index 31e77a6f00..12594b8c23 100644 --- a/xen/arch/x86/microcode.c +++ b/xen/arch/x86/microcode.c @@ -126,7 +126,7 @@ static long do_microcode_update(void *_info) info->error = error; info->cpu = next_cpu(info->cpu, cpu_online_map); - if ( info->cpu < NR_CPUS ) + if ( info->cpu < nr_cpu_ids ) return continue_hypercall_on_cpu(info->cpu, do_microcode_update, info); error = info->error; diff --git a/xen/arch/x86/mpparse.c b/xen/arch/x86/mpparse.c index baec32f1b1..d348fef93c 100644 --- a/xen/arch/x86/mpparse.c +++ b/xen/arch/x86/mpparse.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -61,10 +62,31 @@ unsigned int __read_mostly boot_cpu_physical_apicid = BAD_APICID; /* Internal processor count */ static unsigned int __devinitdata num_processors; +static unsigned int __initdata disabled_cpus; /* Bitmask of physically existing CPUs */ physid_mask_t phys_cpu_present_map; +void __init set_nr_cpu_ids(unsigned int max_cpus) +{ + if (!max_cpus) + max_cpus = num_processors + disabled_cpus; + if (max_cpus > NR_CPUS) + max_cpus = NR_CPUS; + else if (!max_cpus) + max_cpus = 1; + printk(XENLOG_INFO "SMP: Allowing %u CPUs (%d hotplug CPUs)\n", + max_cpus, max_t(int, max_cpus - num_processors, 0)); + nr_cpu_ids = max_cpus; + +#ifndef nr_cpumask_bits + nr_cpumask_bits = (max_cpus + (BITS_PER_LONG - 1)) & + ~(BITS_PER_LONG - 1); + printk(XENLOG_DEBUG "NR_CPUS:%u nr_cpumask_bits:%u\n", + NR_CPUS, nr_cpumask_bits); +#endif +} + /* * Intel MP BIOS table parsing routines: */ @@ -90,8 +112,11 @@ static int __devinit MP_processor_info_x(struct mpc_config_processor *m, { int ver, apicid, cpu = 0; - if (!(m->mpc_cpuflag & CPU_ENABLED)) + if (!(m->mpc_cpuflag & CPU_ENABLED)) { + if (!hotplug) + ++disabled_cpus; return -EINVAL; + } apicid = mpc_apic_id(m, apicidx); @@ -115,9 +140,9 @@ static int __devinit MP_processor_info_x(struct mpc_config_processor *m, set_apicid(apicid, &phys_cpu_present_map); - if (num_processors >= NR_CPUS) { - printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached." - " Processor ignored.\n", NR_CPUS); + if (num_processors >= nr_cpu_ids) { + printk(KERN_WARNING "WARNING: NR_CPUS limit of %u reached." + " Processor ignored.\n", nr_cpu_ids); return -ENOSPC; } diff --git a/xen/arch/x86/numa.c b/xen/arch/x86/numa.c index 28b7dd429d..61b5904a2c 100644 --- a/xen/arch/x86/numa.c +++ b/xen/arch/x86/numa.c @@ -198,7 +198,7 @@ void __init numa_init_array(void) CPUs, as the number of CPUs is not known yet. We round robin the existing nodes. */ rr = first_node(node_online_map); - for (i = 0; i < NR_CPUS; i++) { + for (i = 0; i < nr_cpu_ids; i++) { if (cpu_to_node[i] != NUMA_NO_NODE) continue; numa_set_node(i, rr); @@ -280,7 +280,7 @@ void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn) memnodemap = _memnodemap; nodes_clear(node_online_map); node_set_online(0); - for (i = 0; i < NR_CPUS; i++) + for (i = 0; i < nr_cpu_ids; i++) numa_set_node(i, 0); node_to_cpumask[0] = cpumask_of_cpu(0); setup_node_bootmem(0, (u64)start_pfn << PAGE_SHIFT, (u64)end_pfn << PAGE_SHIFT); @@ -335,7 +335,7 @@ static __init int numa_setup(char *opt) void __init init_cpu_to_node(void) { int i, node; - for (i = 0; i < NR_CPUS; i++) { + for (i = 0; i < nr_cpu_ids; i++) { u32 apicid = x86_cpu_to_apicid[i]; if (apicid == BAD_APICID) continue; diff --git a/xen/arch/x86/oprofile/nmi_int.c b/xen/arch/x86/oprofile/nmi_int.c index eba9dec64d..70930b245a 100644 --- a/xen/arch/x86/oprofile/nmi_int.c +++ b/xen/arch/x86/oprofile/nmi_int.c @@ -127,7 +127,7 @@ static void nmi_save_registers(void * dummy) static void free_msrs(void) { int i; - for (i = 0; i < NR_CPUS; ++i) { + for (i = 0; i < nr_cpu_ids; ++i) { xfree(cpu_msrs[i].counters); cpu_msrs[i].counters = NULL; xfree(cpu_msrs[i].controls); diff --git a/xen/arch/x86/platform_hypercall.c b/xen/arch/x86/platform_hypercall.c index d10024ae94..0269e507c9 100644 --- a/xen/arch/x86/platform_hypercall.c +++ b/xen/arch/x86/platform_hypercall.c @@ -442,7 +442,7 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xen_platform_op_t) u_xenpf_op) break; } - if ( (g_info->xen_cpuid >= NR_CPUS) || + if ( (g_info->xen_cpuid >= nr_cpu_ids) || !cpu_present(g_info->xen_cpuid) ) { g_info->flags |= XEN_PCPU_FLAGS_INVALID; diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c index 5880e9e8b3..8bc77b0fc8 100644 --- a/xen/arch/x86/setup.c +++ b/xen/arch/x86/setup.c @@ -51,7 +51,7 @@ static bool_t __initdata opt_nosmp; boolean_param("nosmp", opt_nosmp); /* maxcpus: maximum number of CPUs to activate. */ -static unsigned int __initdata max_cpus = NR_CPUS; +static unsigned int __initdata max_cpus; integer_param("maxcpus", max_cpus); /* opt_watchdog: If true, run a watchdog NMI on each processor. */ @@ -230,7 +230,7 @@ static void __init normalise_cpu_order(void) * Among identical longest-prefix matches, pick the smallest APIC ID. */ for ( j = next_cpu(i, cpu_present_map); - j < NR_CPUS; + j < nr_cpu_ids; j = next_cpu(j, cpu_present_map) ) { diff = x86_cpu_to_apicid[j] ^ apicid; @@ -246,9 +246,9 @@ static void __init normalise_cpu_order(void) } /* If no match then there must be no CPUs remaining to consider. */ - if ( min_cpu >= NR_CPUS ) + if ( min_cpu >= nr_cpu_ids ) { - BUG_ON(next_cpu(i, cpu_present_map) < NR_CPUS); + BUG_ON(next_cpu(i, cpu_present_map) < nr_cpu_ids); break; } @@ -1203,6 +1203,17 @@ void __init __start_xen(unsigned long mbi_p) if ( smp_found_config ) get_smp_config(); + if ( opt_nosmp ) + { + max_cpus = 0; + set_nr_cpu_ids(1); + } + else + { + set_nr_cpu_ids(max_cpus); + max_cpus = nr_cpu_ids; + } + #ifdef CONFIG_X86_64 /* Low mappings were only needed for some BIOS table parsing. */ zap_low_mappings(); @@ -1254,9 +1265,6 @@ void __init __start_xen(unsigned long mbi_p) acpi_mmcfg_init(); #endif - if ( opt_nosmp ) - max_cpus = 0; - iommu_setup(); /* setup iommu if available */ smp_prepare_cpus(max_cpus); diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c index 5feb8a3d25..eb400f2084 100644 --- a/xen/arch/x86/smpboot.c +++ b/xen/arch/x86/smpboot.c @@ -535,7 +535,7 @@ int alloc_cpu_id(void) int cpu; cpus_complement(tmp_map, cpu_present_map); cpu = first_cpu(tmp_map); - return (cpu < NR_CPUS) ? cpu : -ENODEV; + return (cpu < nr_cpu_ids) ? cpu : -ENODEV; } static int do_boot_cpu(int apicid, int cpu) diff --git a/xen/arch/x86/srat.c b/xen/arch/x86/srat.c index bc857d5b29..20cfedba3c 100644 --- a/xen/arch/x86/srat.c +++ b/xen/arch/x86/srat.c @@ -447,7 +447,7 @@ int __init acpi_scan_nodes(u64 start, u64 end) continue; setup_node_bootmem(i, nodes[i].start, nodes[i].end); } - for (i = 0; i < NR_CPUS; i++) { + for (i = 0; i < nr_cpu_ids; i++) { if (cpu_to_node[i] == NUMA_NO_NODE) continue; if (!node_isset(cpu_to_node[i], nodes_parsed)) diff --git a/xen/arch/x86/sysctl.c b/xen/arch/x86/sysctl.c index 93f01c23f0..cd2f13cf09 100644 --- a/xen/arch/x86/sysctl.c +++ b/xen/arch/x86/sysctl.c @@ -82,7 +82,7 @@ long arch_do_sysctl( pi->nr_cpus = num_online_cpus(); pi->nr_nodes = num_online_nodes(); pi->max_node_id = MAX_NUMNODES-1; - pi->max_cpu_id = NR_CPUS-1; + pi->max_cpu_id = nr_cpu_ids - 1; pi->total_pages = total_pages; pi->free_pages = avail_domheap_pages(); pi->scrub_pages = 0; diff --git a/xen/arch/x86/tboot.c b/xen/arch/x86/tboot.c index b9aa32d13a..cfbeb9ffd7 100644 --- a/xen/arch/x86/tboot.c +++ b/xen/arch/x86/tboot.c @@ -248,7 +248,7 @@ static int mfn_in_guarded_stack(unsigned long mfn) void *p; int i; - for ( i = 0; i < NR_CPUS; i++ ) + for ( i = 0; i < nr_cpu_ids; i++ ) { if ( !stack_base[i] ) continue; diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c index eecdb4d201..a8bad8f7fc 100644 --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -3412,7 +3412,7 @@ static void __set_intr_gate(unsigned int n, uint32_t dpl, void *addr) { int i; /* Keep secondary tables in sync with IRQ updates. */ - for ( i = 1; i < NR_CPUS; i++ ) + for ( i = 1; i < nr_cpu_ids; i++ ) if ( idt_tables[i] != NULL ) _set_gate(&idt_tables[i][n], 14, dpl, addr); _set_gate(&idt_table[n], 14, dpl, addr); diff --git a/xen/common/cpu.c b/xen/common/cpu.c index 396eb58ad8..c80d923bec 100644 --- a/xen/common/cpu.c +++ b/xen/common/cpu.c @@ -6,6 +6,12 @@ #include #include +unsigned int __read_mostly nr_cpu_ids = NR_CPUS; +#ifndef nr_cpumask_bits +unsigned int __read_mostly nr_cpumask_bits + = BITS_TO_LONGS(NR_CPUS) * BITS_PER_LONG; +#endif + /* * cpu_bit_bitmap[] is a special, "compressed" data structure that * represents all NR_CPUS bits binary values of 1<= NR_CPUS) || (cpu == 0) || !cpu_online(cpu) ) + if ( (cpu >= nr_cpu_ids) || (cpu == 0) || !cpu_online(cpu) ) { cpu_hotplug_done(); return -EINVAL; @@ -122,7 +128,7 @@ int cpu_up(unsigned int cpu) if ( !cpu_hotplug_begin() ) return -EBUSY; - if ( (cpu >= NR_CPUS) || cpu_online(cpu) || !cpu_present(cpu) ) + if ( (cpu >= nr_cpu_ids) || cpu_online(cpu) || !cpu_present(cpu) ) { cpu_hotplug_done(); return -EINVAL; diff --git a/xen/common/cpupool.c b/xen/common/cpupool.c index f9880ecce3..b470f621c7 100644 --- a/xen/common/cpupool.c +++ b/xen/common/cpupool.c @@ -489,7 +489,7 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op) if ( cpu == XEN_SYSCTL_CPUPOOL_PAR_ANY ) cpu = first_cpu(cpupool_free_cpus); ret = -EINVAL; - if ( cpu >= NR_CPUS ) + if ( cpu >= nr_cpu_ids ) goto addcpu_out; ret = -EBUSY; if ( !cpu_isset(cpu, cpupool_free_cpus) ) @@ -517,7 +517,7 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op) cpu = op->cpu; if ( cpu == XEN_SYSCTL_CPUPOOL_PAR_ANY ) cpu = last_cpu(c->cpu_valid); - ret = (cpu < NR_CPUS) ? cpupool_unassign_cpu(c, cpu) : -EINVAL; + ret = (cpu < nr_cpu_ids) ? cpupool_unassign_cpu(c, cpu) : -EINVAL; cpupool_put(c); } break; diff --git a/xen/common/domain.c b/xen/common/domain.c index 793990e7b6..d0818c5398 100644 --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -1023,7 +1023,7 @@ int continue_hypercall_on_cpu( { struct migrate_info *info; - if ( (cpu >= NR_CPUS) || !cpu_online(cpu) ) + if ( (cpu >= nr_cpu_ids) || !cpu_online(cpu) ) return -EINVAL; info = this_cpu(continue_info); diff --git a/xen/common/kexec.c b/xen/common/kexec.c index bddf23c535..cd37636cf8 100644 --- a/xen/common/kexec.c +++ b/xen/common/kexec.c @@ -296,7 +296,7 @@ static int kexec_get_cpu(xen_kexec_range_t *range) int nr = range->nr; int nr_bytes = 0; - if ( nr < 0 || nr >= NR_CPUS || !cpu_online(nr) ) + if ( nr < 0 || nr >= nr_cpu_ids || !cpu_online(nr) ) return -EINVAL; nr_bytes += sizeof_note("CORE", sizeof(ELF_Prstatus)); diff --git a/xen/common/keyhandler.c b/xen/common/keyhandler.c index 3d1c67eeed..db98475f93 100644 --- a/xen/common/keyhandler.c +++ b/xen/common/keyhandler.c @@ -98,7 +98,7 @@ void dump_execstate(struct cpu_user_regs *regs) return; cpu = cycle_cpu(cpu, dump_execstate_mask); - if ( cpu < NR_CPUS ) + if ( cpu < nr_cpu_ids ) { smp_send_state_dump(cpu); return; diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c index e84c8b977e..9e29957e1a 100644 --- a/xen/common/sched_credit.c +++ b/xen/common/sched_credit.c @@ -374,7 +374,7 @@ csched_alloc_pdata(const struct scheduler *ops, int cpu) INIT_LIST_HEAD(&spc->runq); spc->runq_sort_last = prv->runq_sort; - spc->idle_bias = NR_CPUS - 1; + spc->idle_bias = nr_cpu_ids - 1; if ( per_cpu(schedule_data, cpu).sched_priv == NULL ) per_cpu(schedule_data, cpu).sched_priv = spc; diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c index 089e493faf..398ccad159 100644 --- a/xen/common/sched_credit2.c +++ b/xen/common/sched_credit2.c @@ -2071,7 +2071,7 @@ csched_init(struct scheduler *ops) INIT_LIST_HEAD(&prv->sdom); /* But un-initialize all runqueues */ - for ( i=0; irunq_map[i] = -1; prv->rqd[i].id = -1; diff --git a/xen/common/sched_sedf.c b/xen/common/sched_sedf.c index 006f44bcbb..ae93f39420 100644 --- a/xen/common/sched_sedf.c +++ b/xen/common/sched_sedf.c @@ -1201,7 +1201,7 @@ static void sedf_wake(const struct scheduler *ops, struct vcpu *d) routine. Try to avoid unnecessary runs but: Save approximation: Always switch to scheduler!*/ ASSERT(d->processor >= 0); - ASSERT(d->processor < NR_CPUS); + ASSERT(d->processor < nr_cpu_ids); ASSERT(per_cpu(schedule_data, d->processor).curr); if ( should_switch(per_cpu(schedule_data, d->processor).curr, d, now) ) diff --git a/xen/common/schedule.c b/xen/common/schedule.c index 95150c3149..e9a8600b8d 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -1353,7 +1353,7 @@ void __init scheduler_init(void) idle_domain = domain_create(DOMID_IDLE, 0, 0); BUG_ON(idle_domain == NULL); idle_domain->vcpu = idle_vcpu; - idle_domain->max_vcpus = NR_CPUS; + idle_domain->max_vcpus = nr_cpu_ids; if ( alloc_vcpu(idle_domain, 0, 0) == NULL ) BUG(); if ( ops.alloc_pdata && diff --git a/xen/common/sysctl.c b/xen/common/sysctl.c index 5cd5144151..ccfdb220ae 100644 --- a/xen/common/sysctl.c +++ b/xen/common/sysctl.c @@ -183,7 +183,7 @@ long do_sysctl(XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl) uint32_t i, nr_cpus; struct xen_sysctl_cpuinfo cpuinfo; - nr_cpus = min_t(uint32_t, op->u.getcpuinfo.max_cpus, NR_CPUS); + nr_cpus = min(op->u.getcpuinfo.max_cpus, nr_cpu_ids); ret = xsm_getcpuinfo(); if ( ret ) diff --git a/xen/drivers/acpi/pmstat.c b/xen/drivers/acpi/pmstat.c index 7befee8bd2..7e059623f6 100644 --- a/xen/drivers/acpi/pmstat.c +++ b/xen/drivers/acpi/pmstat.c @@ -53,7 +53,7 @@ int do_get_pm_info(struct xen_sysctl_get_pmstat *op) int ret = 0; const struct processor_pminfo *pmpt; - if ( !op || (op->cpuid >= NR_CPUS) || !cpu_online(op->cpuid) ) + if ( !op || (op->cpuid >= nr_cpu_ids) || !cpu_online(op->cpuid) ) return -EINVAL; pmpt = processor_pminfo[op->cpuid]; diff --git a/xen/include/asm-x86/setup.h b/xen/include/asm-x86/setup.h index f7d6345eab..874b1a323e 100644 --- a/xen/include/asm-x86/setup.h +++ b/xen/include/asm-x86/setup.h @@ -20,6 +20,8 @@ int nsc_init_cpu(void); int centaur_init_cpu(void); int transmeta_init_cpu(void); +void set_nr_cpu_ids(unsigned int max_cpus); + void numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn); void arch_init_memory(void); void subarch_init_memory(void); diff --git a/xen/include/xen/cpumask.h b/xen/include/xen/cpumask.h index 031c83752b..f770b2d7c1 100644 --- a/xen/include/xen/cpumask.h +++ b/xen/include/xen/cpumask.h @@ -81,26 +81,43 @@ typedef struct cpumask{ DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; +extern unsigned int nr_cpu_ids; + +#if NR_CPUS > 4 * BITS_PER_LONG && !defined(__ia64__) +/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also, + * not all bits may be allocated. */ +extern unsigned int nr_cpumask_bits; +#else +# define nr_cpumask_bits (BITS_TO_LONGS(NR_CPUS) * BITS_PER_LONG) +#endif + +/* verify cpu argument to cpumask_* operators */ +static inline unsigned int cpumask_check(unsigned int cpu) +{ + ASSERT(cpu < nr_cpu_ids); + return cpu; +} + #define cpu_set(cpu, dst) cpumask_set_cpu(cpu, &(dst)) static inline void cpumask_set_cpu(int cpu, volatile cpumask_t *dstp) { - set_bit(cpu, dstp->bits); + set_bit(cpumask_check(cpu), dstp->bits); } #define cpu_clear(cpu, dst) cpumask_clear_cpu(cpu, &(dst)) static inline void cpumask_clear_cpu(int cpu, volatile cpumask_t *dstp) { - clear_bit(cpu, dstp->bits); + clear_bit(cpumask_check(cpu), dstp->bits); } -#define cpumask_setall(dst) __cpus_setall(dst, NR_CPUS) +#define cpumask_setall(dst) __cpus_setall(dst, nr_cpumask_bits) #define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS) static inline void __cpus_setall(cpumask_t *dstp, int nbits) { bitmap_fill(dstp->bits, nbits); } -#define cpumask_clear(dst) __cpus_clear(dst, NR_CPUS) +#define cpumask_clear(dst) __cpus_clear(dst, nr_cpumask_bits) #define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS) static inline void __cpus_clear(cpumask_t *dstp, int nbits) { @@ -108,24 +125,26 @@ static inline void __cpus_clear(cpumask_t *dstp, int nbits) } /* No static inline type checking - see Subtlety (1) above. */ -#define cpumask_test_cpu(cpu, cpumask) test_bit(cpu, (cpumask)->bits) +#define cpumask_test_cpu(cpu, cpumask) \ + test_bit(cpumask_check(cpu), (cpumask)->bits) #define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits) #define cpu_test_and_set(cpu, cpumask) \ cpumask_test_and_set_cpu(cpu, &(cpumask)) static inline int cpumask_test_and_set_cpu(int cpu, cpumask_t *addr) { - return test_and_set_bit(cpu, addr->bits); + return test_and_set_bit(cpumask_check(cpu), addr->bits); } #define cpu_test_and_clear(cpu, cpumask) \ cpumask_test_and_clear_cpu(cpu, &(cpumask)) static inline int cpumask_test_and_clear_cpu(int cpu, cpumask_t *addr) { - return test_and_clear_bit(cpu, addr->bits); + return test_and_clear_bit(cpumask_check(cpu), addr->bits); } -#define cpumask_and(dst, src1, src2) __cpus_and(dst, src1, src2, NR_CPUS) +#define cpumask_and(dst, src1, src2) \ + __cpus_and(dst, src1, src2, nr_cpumask_bits) #define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS) static inline void __cpus_and(cpumask_t *dstp, const cpumask_t *src1p, const cpumask_t *src2p, int nbits) @@ -133,7 +152,8 @@ static inline void __cpus_and(cpumask_t *dstp, const cpumask_t *src1p, bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); } -#define cpumask_or(dst, src1, src2) __cpus_or(dst, src1, src2, NR_CPUS) +#define cpumask_or(dst, src1, src2) \ + __cpus_or(dst, src1, src2, nr_cpumask_bits) #define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS) static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p, const cpumask_t *src2p, int nbits) @@ -141,7 +161,8 @@ static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p, bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits); } -#define cpumask_xor(dst, src1, src2) __cpus_xor(dst, src1, src2, NR_CPUS) +#define cpumask_xor(dst, src1, src2) \ + __cpus_xor(dst, src1, src2, nr_cpumask_bits) #define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS) static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p, const cpumask_t *src2p, int nbits) @@ -149,7 +170,8 @@ static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p, bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits); } -#define cpumask_andnot(dst, src1, src2) __cpus_andnot(dst, src1, src2, NR_CPUS) +#define cpumask_andnot(dst, src1, src2) \ + __cpus_andnot(dst, src1, src2, nr_cpumask_bits) #define cpus_andnot(dst, src1, src2) \ __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS) static inline void __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p, @@ -158,7 +180,8 @@ static inline void __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p, bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); } -#define cpumask_complement(dst, src) __cpus_complement(dst, src, NR_CPUS) +#define cpumask_complement(dst, src) \ + __cpus_complement(dst, src, nr_cpumask_bits) #define cpus_complement(dst, src) __cpus_complement(&(dst), &(src), NR_CPUS) static inline void __cpus_complement(cpumask_t *dstp, const cpumask_t *srcp, int nbits) @@ -166,55 +189,62 @@ static inline void __cpus_complement(cpumask_t *dstp, bitmap_complement(dstp->bits, srcp->bits, nbits); } -#define cpumask_equal(src1, src2) __cpus_equal(src1, src2, NR_CPUS) -#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS) +#define cpumask_equal(src1, src2) __cpus_equal(src1, src2, nr_cpu_ids) +#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), nr_cpu_ids) static inline int __cpus_equal(const cpumask_t *src1p, const cpumask_t *src2p, int nbits) { return bitmap_equal(src1p->bits, src2p->bits, nbits); } -#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS) +#define cpumask_intersects(src1, src2) \ + __cpus_intersects(src1, src2, nr_cpu_ids) +#define cpus_intersects(src1, src2) \ + __cpus_intersects(&(src1), &(src2), nr_cpu_ids) static inline int __cpus_intersects(const cpumask_t *src1p, const cpumask_t *src2p, int nbits) { return bitmap_intersects(src1p->bits, src2p->bits, nbits); } -#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS) +#define cpumask_subset(src1, src2) __cpus_subset(src1, src2, nr_cpu_ids) +#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), nr_cpu_ids) static inline int __cpus_subset(const cpumask_t *src1p, const cpumask_t *src2p, int nbits) { return bitmap_subset(src1p->bits, src2p->bits, nbits); } -#define cpumask_empty(src) __cpus_empty(src, NR_CPUS) -#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS) +#define cpumask_empty(src) __cpus_empty(src, nr_cpu_ids) +#define cpus_empty(src) __cpus_empty(&(src), nr_cpu_ids) static inline int __cpus_empty(const cpumask_t *srcp, int nbits) { return bitmap_empty(srcp->bits, nbits); } -#define cpumask_full(cpumask) __cpus_full(cpumask, NR_CPUS) -#define cpus_full(cpumask) __cpus_full(&(cpumask), NR_CPUS) +#define cpumask_full(cpumask) __cpus_full(cpumask, nr_cpu_ids) +#define cpus_full(cpumask) __cpus_full(&(cpumask), nr_cpu_ids) static inline int __cpus_full(const cpumask_t *srcp, int nbits) { return bitmap_full(srcp->bits, nbits); } -#define cpumask_weight(cpumask) __cpus_weight(cpumask, NR_CPUS) -#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS) +#define cpumask_weight(cpumask) __cpus_weight(cpumask, nr_cpu_ids) +#define cpus_weight(cpumask) __cpus_weight(&(cpumask), nr_cpu_ids) static inline int __cpus_weight(const cpumask_t *srcp, int nbits) { return bitmap_weight(srcp->bits, nbits); } -#define cpus_copy(dest, src) cpumask_copy(&(dest), &(src)) -static inline void cpumask_copy(cpumask_t *dstp, const cpumask_t *srcp) +#define cpumask_copy(dest, src) __cpus_copy(dest, src, nr_cpumask_bits) +#define cpus_copy(dest, src) __cpus_copy(&(dest), &(src), NR_CPUS) +static inline void __cpus_copy(cpumask_t *dstp, const cpumask_t *srcp, int nbits) { - bitmap_copy(dstp->bits, srcp->bits, NR_CPUS); + bitmap_copy(dstp->bits, srcp->bits, nbits); } +#define cpumask_shift_right(dst, src, n) \ + __cpus_shift_right(dst, src, n, nr_cpumask_bits) #define cpus_shift_right(dst, src, n) \ __cpus_shift_right(&(dst), &(src), (n), NR_CPUS) static inline void __cpus_shift_right(cpumask_t *dstp, @@ -223,6 +253,8 @@ static inline void __cpus_shift_right(cpumask_t *dstp, bitmap_shift_right(dstp->bits, srcp->bits, n, nbits); } +#define cpumask_shift_left(dst, src, n) \ + __cpus_shift_left(dst, src, n, nr_cpumask_bits) #define cpus_shift_left(dst, src, n) \ __cpus_shift_left(&(dst), &(src), (n), NR_CPUS) static inline void __cpus_shift_left(cpumask_t *dstp, @@ -231,22 +263,22 @@ static inline void __cpus_shift_left(cpumask_t *dstp, bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); } -#define cpumask_first(src) __first_cpu(src, NR_CPUS) -#define first_cpu(src) __first_cpu(&(src), NR_CPUS) +#define cpumask_first(src) __first_cpu(src, nr_cpu_ids) +#define first_cpu(src) __first_cpu(&(src), nr_cpu_ids) static inline int __first_cpu(const cpumask_t *srcp, int nbits) { return min_t(int, nbits, find_first_bit(srcp->bits, nbits)); } -#define cpumask_next(n, src) __next_cpu(n, src, NR_CPUS) -#define next_cpu(n, src) __next_cpu((n), &(src), NR_CPUS) +#define cpumask_next(n, src) __next_cpu(n, src, nr_cpu_ids) +#define next_cpu(n, src) __next_cpu((n), &(src), nr_cpu_ids) static inline int __next_cpu(int n, const cpumask_t *srcp, int nbits) { return min_t(int, nbits, find_next_bit(srcp->bits, nbits, n+1)); } -#define cpumask_last(src) __last_cpu(src, NR_CPUS) -#define last_cpu(src) __last_cpu(&(src), NR_CPUS) +#define cpumask_last(src) __last_cpu(src, nr_cpu_ids) +#define last_cpu(src) __last_cpu(&(src), nr_cpu_ids) static inline int __last_cpu(const cpumask_t *srcp, int nbits) { int cpu, pcpu = nbits; @@ -257,8 +289,8 @@ static inline int __last_cpu(const cpumask_t *srcp, int nbits) return pcpu; } -#define cpumask_cycle(n, src) __cycle_cpu(n, src, NR_CPUS) -#define cycle_cpu(n, src) __cycle_cpu((n), &(src), NR_CPUS) +#define cpumask_cycle(n, src) __cycle_cpu(n, src, nr_cpu_ids) +#define cycle_cpu(n, src) __cycle_cpu((n), &(src), nr_cpu_ids) static inline int __cycle_cpu(int n, const cpumask_t *srcp, int nbits) { int nxt = __next_cpu(n, srcp, nbits); @@ -317,7 +349,7 @@ static inline const cpumask_t *cpumask_of(unsigned int cpu) #define cpus_addr(src) ((src).bits) #define cpumask_scnprintf(buf, len, src) \ - __cpumask_scnprintf((buf), (len), &(src), NR_CPUS) + __cpumask_scnprintf((buf), (len), &(src), nr_cpu_ids) static inline int __cpumask_scnprintf(char *buf, int len, const cpumask_t *srcp, int nbits) { @@ -325,7 +357,7 @@ static inline int __cpumask_scnprintf(char *buf, int len, } #define cpulist_scnprintf(buf, len, src) \ - __cpulist_scnprintf((buf), (len), &(src), NR_CPUS) + __cpulist_scnprintf((buf), (len), &(src), nr_cpu_ids) static inline int __cpulist_scnprintf(char *buf, int len, const cpumask_t *srcp, int nbits) { @@ -355,6 +387,11 @@ typedef cpumask_t *cpumask_var_t; static inline bool_t alloc_cpumask_var(cpumask_var_t *mask) { + /* + * Once all direct cpumask assignments and all cpus_*() accessors + * still referencing NR_CPUS are gone, we could use nr_cpumask_bits + * to determine the allocation size here. + */ return (*mask = xmalloc(cpumask_t)) != NULL; } @@ -386,7 +423,7 @@ static inline bool_t zalloc_cpumask_var(cpumask_var_t *mask) #if NR_CPUS > 1 #define for_each_cpu_mask(cpu, mask) \ for ((cpu) = first_cpu(mask); \ - (cpu) < NR_CPUS; \ + (cpu) < nr_cpu_ids; \ (cpu) = next_cpu((cpu), (mask))) #else /* NR_CPUS == 1 */ #define for_each_cpu_mask(cpu, mask) for ((cpu) = 0; (cpu) < 1; (cpu)++)