if (unlikely(result))
return -ENODEV;
- online_policy_cpus = policy->cpus;
+ cpumask_and(&online_policy_cpus, &policy->cpus, &cpu_online_map);
next_perf_state = data->freq_table[next_state].index;
if (perf->state == next_perf_state) {
struct set_mtrr_data data;
unsigned long flags;
- allbutself = cpu_online_map;
- cpu_clear(smp_processor_id(), allbutself);
- nr_cpus = cpus_weight(allbutself);
+ cpumask_andnot(&allbutself, &cpu_online_map,
+ cpumask_of(smp_processor_id()));
+ nr_cpus = cpumask_weight(&allbutself);
data.smp_reg = reg;
data.smp_base = base;
desc->handler->end(desc, 0);
break;
case ACKTYPE_EOI:
- cpu_eoi_map = action->cpu_eoi_map;
+ cpumask_copy(&cpu_eoi_map, &action->cpu_eoi_map);
spin_unlock_irq(&desc->lock);
on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 0);
spin_lock_irq(&desc->lock);
ASSERT(action->ack_type == ACKTYPE_EOI);
- cpu_eoi_map = action->cpu_eoi_map;
+ cpumask_copy(&cpu_eoi_map, &action->cpu_eoi_map);
- if ( cpu_test_and_clear(smp_processor_id(), cpu_eoi_map) )
+ if ( cpumask_test_and_clear_cpu(smp_processor_id(), &cpu_eoi_map) )
{
__set_eoi_ready(desc);
spin_unlock(&desc->lock);
spin_unlock_irq(&desc->lock);
}
- if ( !cpus_empty(cpu_eoi_map) )
+ if ( !cpumask_empty(&cpu_eoi_map) )
on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 0);
}
(--action->in_flight == 0) &&
(action->nr_guests != 0) )
{
- cpu_eoi_map = action->cpu_eoi_map;
+ cpumask_copy(&cpu_eoi_map, &action->cpu_eoi_map);
spin_unlock_irq(&desc->lock);
on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 0);
spin_lock_irq(&desc->lock);
* would need to flush all ready EOIs before returning as otherwise the
* desc->handler could change and we would call the wrong 'end' hook.
*/
- cpu_eoi_map = action->cpu_eoi_map;
- if ( !cpus_empty(cpu_eoi_map) )
+ cpumask_copy(&cpu_eoi_map, &action->cpu_eoi_map);
+ if ( !cpumask_empty(&cpu_eoi_map) )
{
BUG_ON(action->ack_type != ACKTYPE_EOI);
spin_unlock_irq(&desc->lock);
node_set_online(0);
for (i = 0; i < nr_cpu_ids; i++)
numa_set_node(i, 0);
- node_to_cpumask[0] = cpumask_of_cpu(0);
+ cpumask_copy(&node_to_cpumask[0], cpumask_of(0));
setup_node_bootmem(0, (u64)start_pfn << PAGE_SHIFT, (u64)end_pfn << PAGE_SHIFT);
}
cpumask_t allbutself;
/* Flush everyone else. We definitely flushed just before entry. */
- allbutself = cpu_online_map;
- cpu_clear(smp_processor_id(), allbutself);
+ cpumask_andnot(&allbutself, &cpu_online_map,
+ cpumask_of(smp_processor_id()));
flush_mask(&allbutself, FLUSH_TLB);
/* No need for atomicity: we are the only possible updater. */
void *info,
int wait)
{
- cpumask_t allbutself = cpu_online_map;
- cpu_clear(smp_processor_id(), allbutself);
+ cpumask_t allbutself;
+
+ cpumask_andnot(&allbutself, &cpu_online_map,
+ cpumask_of(smp_processor_id()));
on_selected_cpus(&allbutself, func, info, wait);
}
spin_lock(&call_lock);
- call_data.selected = *selected;
+ cpumask_copy(&call_data.selected, selected);
- nr_cpus = cpus_weight(call_data.selected);
+ nr_cpus = cpumask_weight(&call_data.selected);
if ( nr_cpus == 0 )
goto out;
send_IPI_mask(&call_data.selected, CALL_FUNCTION_VECTOR);
- if ( cpu_isset(smp_processor_id(), call_data.selected) )
+ if ( cpumask_test_cpu(smp_processor_id(), &call_data.selected) )
{
local_irq_disable();
__smp_call_function_interrupt();
local_irq_enable();
}
- while ( !cpus_empty(call_data.selected) )
+ while ( !cpumask_empty(&call_data.selected) )
cpu_relax();
out:
void *info = call_data.info;
unsigned int cpu = smp_processor_id();
- if ( !cpu_isset(cpu, call_data.selected) )
+ if ( !cpumask_test_cpu(cpu, &call_data.selected) )
return;
irq_enter();
{
(*func)(info);
mb();
- cpu_clear(cpu, call_data.selected);
+ cpumask_clear_cpu(cpu, &call_data.selected);
}
else
{
mb();
- cpu_clear(cpu, call_data.selected);
+ cpumask_clear_cpu(cpu, &call_data.selected);
(*func)(info);
}
if ( c[cpu].x86_max_cores == 1 )
{
- per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
+ cpumask_copy(&per_cpu(cpu_core_map, cpu),
+ &per_cpu(cpu_sibling_map, cpu));
c[cpu].booted_cores = 1;
return;
}
{
unsigned int cpu = smp_processor_id();
local_irq_disable();
- while ( !cpu_isset(cpu, tsc_check_cpumask) )
+ while ( !cpumask_test_cpu(cpu, &tsc_check_cpumask) )
mb();
check_tsc_warp(cpu_khz, &tsc_max_warp);
- cpu_clear(cpu, tsc_check_cpumask);
+ cpumask_clear_cpu(cpu, &tsc_check_cpumask);
local_irq_enable();
}
tsc_check_count++;
smp_call_function(tsc_check_slave, NULL, 0);
- tsc_check_cpumask = cpu_online_map;
+ cpumask_andnot(&tsc_check_cpumask, &cpu_online_map, cpumask_of(cpu));
local_irq_disable();
check_tsc_warp(cpu_khz, &tsc_max_warp);
- cpu_clear(cpu, tsc_check_cpumask);
local_irq_enable();
- while ( !cpus_empty(tsc_check_cpumask) )
+ while ( !cpumask_empty(&tsc_check_cpumask) )
cpu_relax();
spin_unlock(&lock);
int i;
struct cpu_calibration *c = &this_cpu(cpu_calibration);
struct calibration_rendezvous *r = _r;
- unsigned int total_cpus = cpus_weight(r->cpu_calibration_map);
+ unsigned int total_cpus = cpumask_weight(&r->cpu_calibration_map);
/* Loop to get rid of cache effects on TSC skew. */
for ( i = 4; i >= 0; i-- )
{
struct cpu_calibration *c = &this_cpu(cpu_calibration);
struct calibration_rendezvous *r = _r;
- unsigned int total_cpus = cpus_weight(r->cpu_calibration_map);
+ unsigned int total_cpus = cpumask_weight(&r->cpu_calibration_map);
if ( smp_processor_id() == 0 )
{
static void time_calibration(void *unused)
{
struct calibration_rendezvous r = {
- .cpu_calibration_map = cpu_online_map,
.semaphore = ATOMIC_INIT(0)
};
+ cpumask_copy(&r.cpu_calibration_map, &cpu_online_map);
+
/* @wait=1 because we must wait for all cpus before freeing @r. */
on_selected_cpus(&r.cpu_calibration_map,
time_calibration_rendezvous_fn,
printk("\n");
}
- cpu_clear(cpu, dump_execstate_mask);
+ cpumask_clear_cpu(cpu, &dump_execstate_mask);
if ( !alt_key_handling )
return;
- cpu = cycle_cpu(cpu, dump_execstate_mask);
+ cpu = cpumask_cycle(cpu, &dump_execstate_mask);
if ( cpu < nr_cpu_ids )
{
smp_send_state_dump(cpu);
printk("'%c' pressed -> dumping registers\n\n", key);
- dump_execstate_mask = cpu_online_map;
+ cpumask_copy(&dump_execstate_mask, &cpu_online_map);
/* Get local execution state out immediately, in case we get stuck. */
dump_execstate(regs);
for_each_cpu_mask ( cpu, dump_execstate_mask )
{
smp_send_state_dump(cpu);
- while ( cpu_isset(cpu, dump_execstate_mask) )
+ while ( cpumask_test_cpu(cpu, &dump_execstate_mask) )
cpu_relax();
}
{
unsigned int cpu = smp_processor_id();
local_irq_disable();
- while ( !cpu_isset(cpu, read_clocks_cpumask) )
+ while ( !cpumask_test_cpu(cpu, &read_clocks_cpumask) )
cpu_relax();
per_cpu(read_clocks_time, cpu) = NOW();
per_cpu(read_cycles_time, cpu) = get_cycles();
- cpu_clear(cpu, read_clocks_cpumask);
+ cpumask_clear_cpu(cpu, &read_clocks_cpumask);
local_irq_enable();
}
smp_call_function(read_clocks_slave, NULL, 0);
local_irq_disable();
- read_clocks_cpumask = cpu_online_map;
+ cpumask_andnot(&read_clocks_cpumask, &cpu_online_map, cpumask_of(cpu));
per_cpu(read_clocks_time, cpu) = NOW();
per_cpu(read_cycles_time, cpu) = get_cycles();
- cpu_clear(cpu, read_clocks_cpumask);
local_irq_enable();
- while ( !cpus_empty(read_clocks_cpumask) )
+ while ( !cpumask_empty(&read_clocks_cpumask) )
cpu_relax();
min_stime_cpu = max_stime_cpu = min_cycles_cpu = max_cycles_cpu = cpu;
* Don't send IPI to itself. With irqs disabled,
* rdp->cpu is the current cpu.
*/
- cpumask = rcp->cpumask;
- cpu_clear(rdp->cpu, cpumask);
+ cpumask_andnot(&cpumask, &rcp->cpumask, cpumask_of(rdp->cpu));
cpumask_raise_softirq(&cpumask, SCHEDULE_SOFTIRQ);
}
}
smp_wmb();
rcp->cur++;
- rcp->cpumask = cpu_online_map;
+ cpumask_copy(&rcp->cpumask, &cpu_online_map);
}
}
*/
static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp)
{
- cpu_clear(cpu, rcp->cpumask);
- if (cpus_empty(rcp->cpumask)) {
+ cpumask_clear_cpu(cpu, &rcp->cpumask);
+ if (cpumask_empty(&rcp->cpumask)) {
/* batch completed ! */
rcp->completed = rcp->cur;
rcu_start_batch(rcp);
if ( !get_cpu_maps() )
return -EBUSY;
- allbutself = cpu_online_map;
- cpu_clear(smp_processor_id(), allbutself);
- nr_cpus = cpus_weight(allbutself);
+ cpumask_andnot(&allbutself, &cpu_online_map,
+ cpumask_of(smp_processor_id()));
+ nr_cpus = cpumask_weight(&allbutself);
/* Must not spin here as the holder will expect us to be descheduled. */
if ( !spin_trylock(&stopmachine_lock) )
static inline bool_t alloc_cpumask_var(cpumask_var_t *mask)
{
- /*
- * Once all direct cpumask assignments are gone, we could use
- * nr_cpumask_bits to determine the allocation size here.
- */
- return (*mask = xmalloc(cpumask_t)) != NULL;
+ *(void **)mask = _xmalloc(nr_cpumask_bits / 8, sizeof(long));
+ return *mask != NULL;
+}
+
+static inline bool_t zalloc_cpumask_var(cpumask_var_t *mask)
+{
+ *(void **)mask = _xzalloc(nr_cpumask_bits / 8, sizeof(long));
+ return *mask != NULL;
}
static inline void free_cpumask_var(cpumask_var_t mask)
return 1;
}
-static inline void free_cpumask_var(cpumask_var_t mask)
-{
-}
-#endif
-
static inline bool_t zalloc_cpumask_var(cpumask_var_t *mask)
{
- if (!alloc_cpumask_var(mask))
- return 0;
cpumask_clear(*mask);
return 1;
}
+static inline void free_cpumask_var(cpumask_var_t mask)
+{
+}
+#endif
+
#if NR_CPUS > 1
#define for_each_cpu_mask(cpu, mask) \
for ((cpu) = first_cpu(mask); \
static inline void set_native_irq_info(unsigned int irq, const cpumask_t *mask)
{
- irq_desc[irq].affinity = *mask;
+ cpumask_copy(&irq_desc[irq].affinity, mask);
}
unsigned int set_desc_affinity(struct irq_desc *, const cpumask_t *);