goto maxvcpu_out;
ret = -ENOMEM;
- online = (d->cpupool == NULL) ? &cpu_online_map : d->cpupool->cpu_valid;
+ online = cpupool_online_cpumask(d->cpupool);
if ( max > d->max_vcpus )
{
struct vcpu **vcpus;
#define CSCHED_VCPU(_vcpu) ((struct csched_vcpu *) (_vcpu)->sched_priv)
#define CSCHED_DOM(_dom) ((struct csched_dom *) (_dom)->sched_priv)
#define RUNQ(_cpu) (&(CSCHED_PCPU(_cpu)->runq))
-#define CSCHED_CPUONLINE(_pool) \
- (((_pool) == NULL) ? &cpupool_free_cpus : (_pool)->cpu_valid)
/*
* Pick from online CPUs in VCPU's affinity mask, giving a
* preference to its current processor if it's in there.
*/
- online = CSCHED_CPUONLINE(vc->domain->cpupool);
+ online = cpupool_scheduler_cpumask(vc->domain->cpupool);
cpumask_and(&cpus, online, vc->cpu_affinity);
cpu = cpumask_test_cpu(vc->processor, &cpus)
? vc->processor
int peer_cpu;
BUG_ON( cpu != snext->vcpu->processor );
- online = CSCHED_CPUONLINE(per_cpu(cpupool, cpu));
+ online = cpupool_scheduler_cpumask(per_cpu(cpupool, cpu));
/* If this CPU is going offline we shouldn't steal work. */
if ( unlikely(!cpumask_test_cpu(cpu, online)) )
((struct csched_private *)((_ops)->sched_data))
#define CSCHED_VCPU(_vcpu) ((struct csched_vcpu *) (_vcpu)->sched_priv)
#define CSCHED_DOM(_dom) ((struct csched_dom *) (_dom)->sched_priv)
-#define CSCHED_CPUONLINE(_pool) \
- (((_pool) == NULL) ? &cpupool_free_cpus : (_pool)->cpu_valid)
/* CPU to runq_id macro */
#define c2r(_ops, _cpu) (CSCHED_PRIV(_ops)->runq_map[(_cpu)])
/* CPU to runqueue struct macro */
#include <xen/time.h>
#include <xen/errno.h>
-#define SEDF_CPUONLINE(_pool) \
- (((_pool) == NULL) ? &cpupool_free_cpus : (_pool)->cpu_valid)
-
#ifndef NDEBUG
#define SEDF_STATS
#define CHECK(_p) \
cpumask_t online_affinity;
cpumask_t *online;
- online = SEDF_CPUONLINE(v->domain->cpupool);
+ online = cpupool_scheduler_cpumask(v->domain->cpupool);
cpumask_and(&online_affinity, v->cpu_affinity, online);
return cpumask_first(&online_affinity);
}
*/
if ( tasklet_work_scheduled ||
(list_empty(runq) && list_empty(waitq)) ||
- unlikely(!cpumask_test_cpu(cpu, SEDF_CPUONLINE(per_cpu(cpupool, cpu)))) )
+ unlikely(!cpumask_test_cpu(cpu,
+ cpupool_scheduler_cpumask(per_cpu(cpupool, cpu)))) )
{
ret.task = IDLETASK(cpu);
ret.time = SECONDS(1);
#define DOM2OP(_d) (((_d)->cpupool == NULL) ? &ops : ((_d)->cpupool->sched))
#define VCPU2OP(_v) (DOM2OP((_v)->domain))
-#define VCPU2ONLINE(_v) \
- (((_v)->domain->cpupool == NULL) ? &cpu_online_map \
- : (_v)->domain->cpupool->cpu_valid)
+#define VCPU2ONLINE(_v) cpupool_online_cpumask((_v)->domain->cpupool)
static inline void trace_runstate_change(struct vcpu *v, int new_state)
{
cpumask_t *cpus;
sched = (c == NULL) ? &ops : c->sched;
- cpus = (c == NULL) ? &cpupool_free_cpus : c->cpu_valid;
+ cpus = cpupool_scheduler_cpumask(c);
printk("Scheduler: %s (%s)\n", sched->name, sched->opt_name);
SCHED_OP(sched, dump_settings);
atomic_t refcnt;
};
+#define cpupool_scheduler_cpumask(_pool) \
+ (((_pool) == NULL) ? &cpupool_free_cpus : (_pool)->cpu_valid)
+#define cpupool_online_cpumask(_pool) \
+ (((_pool) == NULL) ? &cpu_online_map : (_pool)->cpu_valid)
+
#endif /* __XEN_SCHED_IF_H__ */