//printf("smp_send_event_check_mask called\n");
for (cpu = 0; cpu < NR_CPUS; ++cpu)
- if (cpu_isset(cpu, *mask) && cpu != smp_processor_id())
+ if (cpumask_test_cpu(cpu, mask) && cpu != smp_processor_id())
platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
}
#endif
*/
Dprintk("Waiting on callin_map ...");
for (timeout = 0; timeout < 100000; timeout++) {
- if (cpu_isset(cpu, cpu_callin_map))
+ if (cpumask_test_cpu(cpu, &cpu_callin_map))
break; /* It has booted */
udelay(100);
}
Dprintk("\n");
- if (!cpu_isset(cpu, cpu_callin_map)) {
+ if (!cpumask_test_cpu(cpu, &cpu_callin_map)) {
printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid);
ia64_cpu_to_sapicid[cpu] = -1;
cpu_clear(cpu, cpu_online_map); /* was set in smp_callin() */
* Already booted cpu? not valid anymore since we dont
* do idle loop tightspin anymore.
*/
- if (cpu_isset(cpu, cpu_callin_map))
+ if (cpumask_test_cpu(cpu, &cpu_callin_map))
return -EINVAL;
if (!per_cpu(cpu_sibling_mask, cpu) &&
if (is_idle_vcpu(next) ||
__test_and_clear_bit(cpu, &next->arch.cache_coherent_map)) {
- if (cpu_test_and_clear(cpu, cpu_cache_coherent_map)) {
+ if (cpumask_test_and_clear_cpu(cpu, &cpu_cache_coherent_map)) {
unsigned long flags;
u64 progress = 0;
s64 status;
else {
if (current && VMX_DOMAIN(current))
vpd = __get_cpu_var(inserted_vpd);
- ia64_new_rr7_efi(val, cpu_isset(smp_processor_id(),
- percpu_set), vpd);
+ ia64_new_rr7_efi(val, cpumask_test_cpu(smp_processor_id(),
+ &percpu_set), vpd);
}
return 1;
and strcut domain are initialized. */
if (unlikely(current == NULL || current->domain == NULL ||
is_idle_vcpu(current)))
- ia64_new_rr7_efi(val, cpu_isset(smp_processor_id(),
- percpu_set),
+ ia64_new_rr7_efi(val, cpumask_test_cpu(smp_processor_id(),
+ &percpu_set),
0UL);
else if (VMX_DOMAIN(current))
__vmx_switch_rr7_vcpu(current, val);
int cpu;
cpu = smp_processor_id();
- if (cpu_isset(cpu, *mask))
+ if (cpumask_test_cpu(cpu, mask))
flush_tlb_vhpt_all (NULL);
if (cpumask_subset(mask, cpumask_of(cpu)))
.limit = LAST_RESERVED_GDT_BYTE
};
- if (cpu_test_and_set(cpu, cpu_initialized)) {
+ if (cpumask_test_and_set_cpu(cpu, &cpu_initialized)) {
printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
for (;;) local_irq_enable();
}
{
ASSERT(local_irq_is_enabled());
- if ( cpu_isset(smp_processor_id(), *mask) )
+ if ( cpumask_test_cpu(smp_processor_id(), mask) )
flush_area_local(va, flags);
if ( !cpumask_subset(mask, cpumask_of(smp_processor_id())) )
return;
if ( boot_cpu_has(X86_FEATURE_TSC_RELIABLE) &&
- !cpu_isset(slave, tsc_sync_cpu_mask) )
+ !cpumask_test_cpu(slave, &tsc_sync_cpu_mask) )
return;
for ( i = 1; i <= 5; i++ )
return;
if ( boot_cpu_has(X86_FEATURE_TSC_RELIABLE) &&
- !cpu_isset(slave, tsc_sync_cpu_mask) )
+ !cpumask_test_cpu(slave, &tsc_sync_cpu_mask) )
return;
for ( i = 1; i <= 5; i++ )
return ret;
set_cpu_state(CPU_STATE_ONLINE);
- while ( !cpu_isset(cpu, cpu_online_map) )
+ while ( !cpu_online(cpu) )
{
cpu_relax();
process_pending_softirqs();
{
int cpu = smp_processor_id();
- if ( cpu_test_and_clear(cpu, pit_broadcast_mask) )
+ if ( cpumask_test_and_clear_cpu(cpu, &pit_broadcast_mask) )
reprogram_timer(this_cpu(timer_deadline));
}
ret = -EBUSY;
if ( (cpupool_moving_cpu != -1) && (cpu != cpupool_moving_cpu) )
goto out;
- if ( cpu_isset(cpu, cpupool_locked_cpus) )
+ if ( cpumask_test_cpu(cpu, &cpupool_locked_cpus) )
goto out;
ret = 0;
if ( cpu >= nr_cpu_ids )
goto addcpu_out;
ret = -EBUSY;
- if ( !cpu_isset(cpu, cpupool_free_cpus) )
+ if ( !cpumask_test_cpu(cpu, &cpupool_free_cpus) )
goto addcpu_out;
c = cpupool_find_by_id(op->cpupool_id);
ret = -ENOENT;
ELF_Prstatus *prstatus;
crash_xen_core_t *xencore;
- if ( cpu_test_and_set(cpu, crash_saved_cpus) )
+ if ( cpumask_test_and_set_cpu(cpu, &crash_saved_cpus) )
return;
prstatus = (ELF_Prstatus *)ELFNOTE_DESC(note);
crash_xen_info_t info;
crash_xen_info_t *out = (crash_xen_info_t *)ELFNOTE_DESC(xen_crash_note);
- BUG_ON(!cpu_test_and_set(cpu, crash_saved_cpus));
+ BUG_ON(!cpumask_test_and_set_cpu(cpu, &crash_saved_cpus));
memset(&info, 0, sizeof(info));
info.xen_major_version = xen_major_version();
struct csched_runqueue_data *trqd;
/* Check if new_cpu is valid */
- BUG_ON(!cpu_isset(new_cpu, CSCHED_PRIV(ops)->initialized));
+ BUG_ON(!cpumask_test_cpu(new_cpu, &CSCHED_PRIV(ops)->initialized));
trqd = RQD(ops, new_cpu);
scurr->vcpu->vcpu_id,
now);
- BUG_ON(!cpu_isset(cpu, CSCHED_PRIV(ops)->initialized));
+ BUG_ON(!cpumask_test_cpu(cpu, &CSCHED_PRIV(ops)->initialized));
rqd = RQD(ops, cpu);
- BUG_ON(!cpu_isset(cpu, rqd->active));
+ BUG_ON(!cpumask_test_cpu(cpu, &rqd->active));
/* Protected by runqueue lock */
BUG_ON(!is_idle_vcpu(scurr->vcpu) && scurr->rqd != rqd);
/* Clear "tickled" bit now that we've been scheduled */
- if ( cpu_isset(cpu, rqd->tickled) )
+ if ( cpumask_test_cpu(cpu, &rqd->tickled) )
cpu_clear(cpu, rqd->tickled);
/* Update credits */
}
/* Clear the idle mask if necessary */
- if ( cpu_isset(cpu, rqd->idle) )
+ if ( cpumask_test_cpu(cpu, &rqd->idle) )
cpu_clear(cpu, rqd->idle);
snext->start_time = now;
else
{
/* Update the idle mask if necessary */
- if ( !cpu_isset(cpu, rqd->idle) )
+ if ( !cpumask_test_cpu(cpu, &rqd->idle) )
cpu_set(cpu, rqd->idle);
/* Make sure avgload gets updated periodically even
* if there's no activity */
spin_lock_irqsave(&prv->lock, flags);
- if ( cpu_isset(cpu, prv->initialized) )
+ if ( cpumask_test_cpu(cpu, &prv->initialized) )
{
printk("%s: Strange, cpu %d already initialized!\n", __func__, cpu);
spin_unlock_irqrestore(&prv->lock, flags);
rqd=prv->rqd + rqi;
printk("Adding cpu %d to runqueue %d\n", cpu, rqi);
- if ( ! cpu_isset(rqi, prv->active_queues) )
+ if ( ! cpumask_test_cpu(rqi, &prv->active_queues) )
{
printk(" First cpu on runqueue, activating\n");
activate_runqueue(prv, rqi);
spin_lock_irqsave(&prv->lock, flags);
- BUG_ON( !cpu_isset(cpu, prv->initialized));
+ BUG_ON(!cpumask_test_cpu(cpu, &prv->initialized));
/* Find the old runqueue and remove this cpu from it */
rqi = prv->runq_map[cpu];
/* No need to save IRQs here, they're already disabled */
spin_lock(&rqd->lock);
- BUG_ON(!cpu_isset(cpu, rqd->idle));
+ BUG_ON(!cpumask_test_cpu(cpu, &rqd->idle));
printk("Removing cpu %d from runqueue %d\n", cpu, rqi);
/* Tasklet work (which runs in idle VCPU context) overrides all else. */
if ( tasklet_work_scheduled ||
(list_empty(runq) && list_empty(waitq)) ||
- unlikely(!cpu_isset(cpu, *SEDF_CPUONLINE(per_cpu(cpupool, cpu)))) )
+ unlikely(!cpumask_test_cpu(cpu, SEDF_CPUONLINE(per_cpu(cpupool, cpu)))) )
{
ret.task = IDLETASK(cpu);
ret.time = SECONDS(1);
& ((event >> TRC_SUBCLS_SHIFT) & 0xf )) == 0 )
return 0;
- if ( !cpu_isset(smp_processor_id(), tb_cpu_mask) )
+ if ( !cpumask_test_cpu(smp_processor_id(), &tb_cpu_mask) )
return 0;
return 1;
& ((event >> TRC_SUBCLS_SHIFT) & 0xf )) == 0 )
return;
- if ( !cpu_isset(smp_processor_id(), tb_cpu_mask) )
+ if ( !cpumask_test_cpu(smp_processor_id(), &tb_cpu_mask) )
return;
/* Read tb_init_done /before/ t_bufs. */
* void cpumask_clear(mask) clear all bits
* int cpumask_test_cpu(cpu, mask) true iff bit 'cpu' set in mask
* int cpumask_test_and_set_cpu(cpu, mask) test and set bit 'cpu' in mask
+ * int cpumask_test_and_clear_cpu(cpu, mask) test and clear bit 'cpu' in mask
*
* void cpumask_and(dst, src1, src2) dst = src1 & src2 [intersection]
* void cpumask_or(dst, src1, src2) dst = src1 | src2 [union]
* for_each_present_cpu(cpu) for-loop cpu over cpu_present_map
*
* Subtlety:
- * 1) The 'type-checked' form of cpu_isset() causes gcc (3.3.2, anyway)
+ * 1) The 'type-checked' form of cpumask_test_cpu() causes gcc (3.3.2, anyway)
* to generate slightly worse code. Note for example the additional
* 40 lines of assembly code compiling the "for each possible cpu"
* loops buried in the disk_stat_read() macros calls when compiling
* drivers/block/genhd.c (arch i386, CONFIG_SMP=y). So use a simple
- * one-line #define for cpu_isset(), instead of wrapping an inline
+ * one-line #define for cpumask_test_cpu(), instead of wrapping an inline
* inside a macro, the way we do the other calls.
*/
/* No static inline type checking - see Subtlety (1) above. */
#define cpumask_test_cpu(cpu, cpumask) \
test_bit(cpumask_check(cpu), (cpumask)->bits)
-#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
-#define cpu_test_and_set(cpu, cpumask) \
- cpumask_test_and_set_cpu(cpu, &(cpumask))
static inline int cpumask_test_and_set_cpu(int cpu, cpumask_t *addr)
{
return test_and_set_bit(cpumask_check(cpu), addr->bits);
}
-#define cpu_test_and_clear(cpu, cpumask) \
- cpumask_test_and_clear_cpu(cpu, &(cpumask))
static inline int cpumask_test_and_clear_cpu(int cpu, cpumask_t *addr)
{
return test_and_clear_bit(cpumask_check(cpu), addr->bits);
#define num_online_cpus() cpumask_weight(&cpu_online_map)
#define num_possible_cpus() cpumask_weight(&cpu_possible_map)
#define num_present_cpus() cpumask_weight(&cpu_present_map)
-#define cpu_online(cpu) cpu_isset((cpu), cpu_online_map)
-#define cpu_possible(cpu) cpu_isset((cpu), cpu_possible_map)
-#define cpu_present(cpu) cpu_isset((cpu), cpu_present_map)
+#define cpu_online(cpu) cpumask_test_cpu(cpu, &cpu_online_map)
+#define cpu_possible(cpu) cpumask_test_cpu(cpu, &cpu_possible_map)
+#define cpu_present(cpu) cpumask_test_cpu(cpu, &cpu_present_map)
#else
#define num_online_cpus() 1
#define num_possible_cpus() 1