(pa->apic_id << 8) | (pa->local_sapic_eid);
/* nid should be overridden as logical node id later */
node_cpuid[srat_num_cpus].nid = pxm;
- cpu_set(srat_num_cpus, early_cpu_possible_map);
+ cpumask_set_cpu(srat_num_cpus, &early_cpu_possible_map);
srat_num_cpus++;
}
possible, max((possible - available_cpus), 0));
for (i = 0; i < possible; i++)
- cpu_set(i, cpu_possible_map);
+ cpumask_set_cpu(i, &cpu_possible_map);
}
#ifndef XEN
{
#ifdef CONFIG_SMP
/* If we register an early console, allow CPU 0 to printk */
- cpu_set(smp_processor_id(), cpu_online_map);
+ cpumask_set_cpu(smp_processor_id(), &cpu_online_map);
#endif
}
#else
lock_ipi_calllock();
#endif
- cpu_set(cpuid, cpu_online_map);
+ cpumask_set_cpu(cpuid, &cpu_online_map);
#ifdef XEN
unlock_ipi_calllock(flags);
#else
/*
* Allow the master to continue.
*/
- cpu_set(cpuid, cpu_callin_map);
+ cpumask_set_cpu(cpuid, &cpu_callin_map);
Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid);
}
/*
* We have the boot CPU online for sure.
*/
- cpu_set(0, cpu_online_map);
- cpu_set(0, cpu_callin_map);
+ cpumask_set_cpu(0, &cpu_online_map);
+ cpumask_set_cpu(0, &cpu_callin_map);
local_cpu_data->loops_per_jiffy = loops_per_jiffy;
ia64_cpu_to_sapicid[0] = boot_cpu_id;
void __devinit smp_prepare_boot_cpu(void)
{
- cpu_set(smp_processor_id(), cpu_online_map);
- cpu_set(smp_processor_id(), cpu_callin_map);
+ cpumask_set_cpu(smp_processor_id(), &cpu_online_map);
+ cpumask_set_cpu(smp_processor_id(), &cpu_callin_map);
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
}
for_each_possible_cpu(cpu) {
cnode = cpu_to_node(cpu);
if (!node_isset(cnode, nodes_flushed)) {
- cpu_set(cpu, selected_cpus);
+ cpumask_set_cpu(cpu, &selected_cpus);
i++;
}
node_set(cnode, nodes_flushed);
ia64_set_psr(psr);
ia64_srlz_i();
#ifdef XEN
- cpu_set(cpu, percpu_set);
+ cpumask_set_cpu(cpu, &percpu_set);
#endif
/*
found:
BUG_ON(v->processor >= NR_CPUS);
- cpu_set(v->processor, entry->pcpu_dirty_mask);
+ cpumask_set_cpu(v->processor, &entry->pcpu_dirty_mask);
BUG_ON(v->vcpu_id >= NR_CPUS);
vcpu_set(v->vcpu_id, entry->vcpu_dirty_mask);
perfc_incr(tlb_track_iod_dirtied);
*/
if ( expires > NOW() || expires == 0 )
{
- cpu_set(cpu, cpuidle_mwait_flags);
+ cpumask_set_cpu(cpu, &cpuidle_mwait_flags);
__mwait(eax, ecx);
- cpu_clear(cpu, cpuidle_mwait_flags);
+ cpumask_clear_cpu(cpu, &cpuidle_mwait_flags);
}
if ( expires <= NOW() && expires > 0 )
* (the MSRs are sticky)
*/
if (bs.pcc || !bs.recoverable)
- cpu_set(smp_processor_id(), mce_fatal_cpus);
+ cpumask_set_cpu(smp_processor_id(), &mce_fatal_cpus);
} else {
if (mctc != NULL)
mctelem_commit(mctc);
mce_barrier_enter(&mce_trap_bar);
if ( mctc != NULL && mce_urgent_action(regs, mctc))
- cpu_set(smp_processor_id(), mce_fatal_cpus);
+ cpumask_set_cpu(smp_processor_id(), &mce_fatal_cpus);
mce_barrier_exit(&mce_trap_bar);
/*
* Wait until everybody has processed the trap.
return cpu;
}
x86_cpu_to_apicid[cpu] = apicid;
- cpu_set(cpu, cpu_present_map);
+ cpumask_set_cpu(cpu, &cpu_present_map);
}
if (++num_processors > 8) {
__cpuinit void numa_add_cpu(int cpu)
{
- cpu_set(cpu, node_to_cpumask[cpu_to_node(cpu)]);
+ cpumask_set_cpu(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
}
void __cpuinit numa_set_node(int cpu, int node)
int i;
struct cpuinfo_x86 *c = cpu_data;
- cpu_set(cpu, cpu_sibling_setup_map);
+ cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
if ( c[cpu].x86_num_siblings > 1 )
{
*/
lock_vector_lock();
__setup_vector_irq(cpu);
- cpu_set(cpu, cpu_online_map);
+ cpumask_set_cpu(cpu, &cpu_online_map);
unlock_vector_lock();
init_percpu_time();
void __init smp_prepare_boot_cpu(void)
{
- cpu_set(smp_processor_id(), cpu_online_map);
- cpu_set(smp_processor_id(), cpu_present_map);
+ cpumask_set_cpu(smp_processor_id(), &cpu_online_map);
+ cpumask_set_cpu(smp_processor_id(), &cpu_present_map);
}
static void
"break assumed cross-CPU TSC coherency.\n"
" ** Consider using boot parameter \"tsc=skewed\" "
"which forces TSC emulation where appropriate.\n", cpu);
- cpu_set(cpu, tsc_sync_cpu_mask);
+ cpumask_set_cpu(cpu, &tsc_sync_cpu_mask);
}
srat_detect_node(cpu);
void pit_broadcast_enter(void)
{
- cpu_set(smp_processor_id(), pit_broadcast_mask);
+ cpumask_set_cpu(smp_processor_id(), &pit_broadcast_mask);
}
void pit_broadcast_exit(void)
spin_lock(&cpupool_lock);
ret = cpu_disable_scheduler(cpu);
- cpu_set(cpu, cpupool_free_cpus);
+ cpumask_set_cpu(cpu, &cpupool_free_cpus);
if ( !ret )
{
ret = schedule_cpu_switch(cpu, NULL);
static void cpupool_cpu_add(unsigned int cpu)
{
spin_lock(&cpupool_lock);
- cpu_clear(cpu, cpupool_locked_cpus);
- cpu_set(cpu, cpupool_free_cpus);
+ cpumask_clear_cpu(cpu, &cpupool_locked_cpus);
+ cpumask_set_cpu(cpu, &cpupool_free_cpus);
cpupool_assign_cpu_locked(cpupool0, cpu);
spin_unlock(&cpupool_lock);
}
if ( !cpumask_test_cpu(cpu, cpupool0->cpu_valid))
ret = -EBUSY;
else
- cpu_set(cpu, cpupool_locked_cpus);
+ cpumask_set_cpu(cpu, &cpupool_locked_cpus);
spin_unlock(&cpupool_lock);
return ret;
{
/* Update the idle mask if necessary */
if ( !cpumask_test_cpu(cpu, &rqd->idle) )
- cpu_set(cpu, rqd->idle);
+ cpumask_set_cpu(cpu, &rqd->idle);
/* Make sure avgload gets updated periodically even
* if there's no activity */
update_load(ops, rqd, NULL, 0, now);
INIT_LIST_HEAD(&rqd->runq);
spin_lock_init(&rqd->lock);
- cpu_set(rqi, prv->active_queues);
+ cpumask_set_cpu(rqi, &prv->active_queues);
}
static void deactivate_runqueue(struct csched_private *prv, int rqi)
/* Set the runqueue map */
prv->runq_map[cpu]=rqi;
- cpu_set(cpu, rqd->idle);
- cpu_set(cpu, rqd->active);
+ cpumask_set_cpu(cpu, &rqd->idle);
+ cpumask_set_cpu(cpu, &rqd->active);
spin_unlock(old_lock);
- cpu_set(cpu, prv->initialized);
+ cpumask_set_cpu(cpu, &prv->initialized);
spin_unlock_irqrestore(&prv->lock, flags);
high_cpu = min(high_cpu + reserve_cpus, NR_CPUS);
for (cpu = low_cpu; cpu < high_cpu; cpu++) {
- cpu_set(cpu, early_cpu_possible_map);
+ cpumask_set_cpu(cpu, &early_cpu_possible_map);
if (node_cpuid[cpu].nid == NUMA_NO_NODE) {
node_cpuid[cpu].nid = next_nid;
next_nid++;
return cpu;
}
-#define cpu_set(cpu, dst) cpumask_set_cpu(cpu, &(dst))
static inline void cpumask_set_cpu(int cpu, volatile cpumask_t *dstp)
{
set_bit(cpumask_check(cpu), dstp->bits);