printk("Dom0 has maximum %u VCPUs\n", opt_dom0_max_vcpus);
- cpu = first_cpu(cpupool0->cpu_valid);
+ cpu = cpumask_first(cpupool0->cpu_valid);
for ( i = 1; i < opt_dom0_max_vcpus; i++ )
{
- cpu = cycle_cpu(cpu, cpupool0->cpu_valid);
+ cpu = cpumask_cycle(cpu, cpupool0->cpu_valid);
(void)alloc_vcpu(d, i, cpu);
}
remove_siblinginfo(cpu);
/* It's now safe to remove this processor from the online map */
- cpu_clear(cpu, cpupool0->cpu_valid);
- cpu_clear(cpu, cpu_online_map);
+ cpumask_clear_cpu(cpu, cpupool0->cpu_valid);
+ cpumask_clear_cpu(cpu, &cpu_online_map);
fixup_irqs();
if ( cpu_disable_scheduler(cpu) )
static struct cpupool *alloc_cpupool_struct(void)
{
- return xzalloc(struct cpupool);
+ struct cpupool *c = xzalloc(struct cpupool);
+
+ if ( c && zalloc_cpumask_var(&c->cpu_valid) )
+ return c;
+ xfree(c);
+ return NULL;
}
static void free_cpupool_struct(struct cpupool *c)
{
+ if ( c )
+ free_cpumask_var(c->cpu_valid);
xfree(c);
}
spin_unlock(&cpupool_lock);
return -ENOENT;
}
- if ( (c->n_dom != 0) || cpus_weight(c->cpu_valid) )
+ if ( (c->n_dom != 0) || cpumask_weight(c->cpu_valid) )
{
spin_unlock(&cpupool_lock);
return -EBUSY;
cpupool_put(cpupool_cpu_moving);
cpupool_cpu_moving = NULL;
}
- cpu_set(cpu, c->cpu_valid);
+ cpumask_set_cpu(cpu, c->cpu_valid);
return 0;
}
goto out;
ret = 0;
- if ( !cpu_isset(cpu, c->cpu_valid) && (cpu != cpupool_moving_cpu) )
+ if ( !cpumask_test_cpu(cpu, c->cpu_valid) && (cpu != cpupool_moving_cpu) )
goto out;
- if ( (c->n_dom > 0) && (cpus_weight(c->cpu_valid) == 1) &&
+ if ( (c->n_dom > 0) && (cpumask_weight(c->cpu_valid) == 1) &&
(cpu != cpupool_moving_cpu) )
{
for_each_domain(d)
cpupool_moving_cpu = cpu;
atomic_inc(&c->refcnt);
cpupool_cpu_moving = c;
- cpu_clear(cpu, c->cpu_valid);
+ cpumask_clear_cpu(cpu, c->cpu_valid);
spin_unlock(&cpupool_lock);
work_cpu = smp_processor_id();
if ( work_cpu == cpu )
{
- work_cpu = first_cpu(cpupool0->cpu_valid);
+ work_cpu = cpumask_first(cpupool0->cpu_valid);
if ( work_cpu == cpu )
- work_cpu = next_cpu(cpu, cpupool0->cpu_valid);
+ work_cpu = cpumask_next(cpu, cpupool0->cpu_valid);
}
return continue_hypercall_on_cpu(work_cpu, cpupool_unassign_cpu_helper, c);
return 0;
spin_lock(&cpupool_lock);
c = cpupool_find_by_id(poolid);
- if ( (c != NULL) && cpus_weight(c->cpu_valid) )
+ if ( (c != NULL) && cpumask_weight(c->cpu_valid) )
{
c->n_dom++;
n_dom = c->n_dom;
int ret = 0;
spin_lock(&cpupool_lock);
- if ( !cpu_isset(cpu, cpupool0->cpu_valid))
+ if ( !cpumask_test_cpu(cpu, cpupool0->cpu_valid))
ret = -EBUSY;
else
cpu_set(cpu, cpupool_locked_cpus);
op->cpupool_id = c->cpupool_id;
op->sched_id = c->sched->sched_id;
op->n_dom = c->n_dom;
- ret = cpumask_to_xenctl_cpumap(&op->cpumap, &c->cpu_valid);
+ ret = cpumask_to_xenctl_cpumap(&op->cpumap, c->cpu_valid);
cpupool_put(c);
}
break;
break;
cpu = op->cpu;
if ( cpu == XEN_SYSCTL_CPUPOOL_PAR_ANY )
- cpu = last_cpu(c->cpu_valid);
+ cpu = cpumask_last(c->cpu_valid);
ret = (cpu < nr_cpu_ids) ? cpupool_unassign_cpu(c, cpu) : -EINVAL;
cpupool_put(c);
}
ret = -ENOENT;
spin_lock(&cpupool_lock);
c = cpupool_find_by_id(op->cpupool_id);
- if ( (c != NULL) && cpus_weight(c->cpu_valid) )
+ if ( (c != NULL) && cpumask_weight(c->cpu_valid) )
{
d->cpupool->n_dom--;
ret = sched_move_domain(d, c);
goto maxvcpu_out;
ret = -ENOMEM;
- online = (d->cpupool == NULL) ? &cpu_online_map : &d->cpupool->cpu_valid;
+ online = (d->cpupool == NULL) ? &cpu_online_map : d->cpupool->cpu_valid;
if ( max > d->max_vcpus )
{
struct vcpu **vcpus;
#define CSCHED_DOM(_dom) ((struct csched_dom *) (_dom)->sched_priv)
#define RUNQ(_cpu) (&(CSCHED_PCPU(_cpu)->runq))
#define CSCHED_CPUONLINE(_pool) \
- (((_pool) == NULL) ? &cpupool_free_cpus : &(_pool)->cpu_valid)
+ (((_pool) == NULL) ? &cpupool_free_cpus : (_pool)->cpu_valid)
/*
#define CSCHED_VCPU(_vcpu) ((struct csched_vcpu *) (_vcpu)->sched_priv)
#define CSCHED_DOM(_dom) ((struct csched_dom *) (_dom)->sched_priv)
#define CSCHED_CPUONLINE(_pool) \
- (((_pool) == NULL) ? &cpupool_free_cpus : &(_pool)->cpu_valid)
+ (((_pool) == NULL) ? &cpupool_free_cpus : (_pool)->cpu_valid)
/* CPU to runq_id macro */
#define c2r(_ops, _cpu) (CSCHED_PRIV(_ops)->runq_map[(_cpu)])
/* CPU to runqueue struct macro */
} while ( 0 )
#define SEDF_CPUONLINE(_pool) \
- (((_pool) == NULL) ? &cpupool_free_cpus : &(_pool)->cpu_valid)
+ (((_pool) == NULL) ? &cpupool_free_cpus : (_pool)->cpu_valid)
#ifndef NDEBUG
#define SEDF_STATS
#define VCPU2OP(_v) (DOM2OP((_v)->domain))
#define VCPU2ONLINE(_v) \
(((_v)->domain->cpupool == NULL) ? &cpu_online_map \
- : &(_v)->domain->cpupool->cpu_valid)
+ : (_v)->domain->cpupool->cpu_valid)
static inline void trace_runstate_change(struct vcpu *v, int new_state)
{
domain_pause(d);
- new_p = first_cpu(c->cpu_valid);
+ new_p = cpumask_first(c->cpu_valid);
for_each_vcpu ( d, v )
{
migrate_timer(&v->periodic_timer, new_p);
v->sched_priv = vcpu_priv[v->vcpu_id];
evtchn_move_pirqs(v);
- new_p = cycle_cpu(new_p, c->cpu_valid);
+ new_p = cpumask_cycle(new_p, c->cpu_valid);
SCHED_OP(VCPU2OP(v), insert_vcpu, v);
}
if ( pick_called &&
(new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) &&
cpumask_test_cpu(new_cpu, v->cpu_affinity) &&
- cpu_isset(new_cpu, v->domain->cpupool->cpu_valid) )
+ cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
break;
/* Select a new CPU. */
new_cpu = SCHED_OP(VCPU2OP(v), pick_cpu, v);
if ( (new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) &&
- cpu_isset(new_cpu, v->domain->cpupool->cpu_valid) )
+ cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
break;
pick_called = 1;
}
{
vcpu_schedule_lock_irq(v);
- cpumask_and(&online_affinity, v->cpu_affinity, &c->cpu_valid);
+ cpumask_and(&online_affinity, v->cpu_affinity, c->cpu_valid);
if ( cpus_empty(online_affinity) &&
cpumask_test_cpu(cpu, v->cpu_affinity) )
{
cpumask_t *cpus;
sched = (c == NULL) ? &ops : c->sched;
- cpus = (c == NULL) ? &cpupool_free_cpus : &c->cpu_valid;
+ cpus = (c == NULL) ? &cpupool_free_cpus : c->cpu_valid;
printk("Scheduler: %s (%s)\n", sched->name, sched->opt_name);
SCHED_OP(sched, dump_settings);
struct cpupool
{
int cpupool_id;
- cpumask_t cpu_valid; /* all cpus assigned to pool */
+ cpumask_var_t cpu_valid; /* all cpus assigned to pool */
struct cpupool *next;
unsigned int n_dom;
struct scheduler *sched;
void schedule_dump(struct cpupool *c);
extern void dump_runq(unsigned char key);
-#define num_cpupool_cpus(c) (cpus_weight((c)->cpu_valid))
+#define num_cpupool_cpus(c) cpumask_weight((c)->cpu_valid)
#endif /* __SCHED_H__ */