(( (opsptr)->fn != NULL ) ? (opsptr)->fn(opsptr, ##__VA_ARGS__ ) \
: (typeof((opsptr)->fn(opsptr, ##__VA_ARGS__)))0 )
-#define DOM2OP(_d) (((_d)->cpupool == NULL) ? &ops : ((_d)->cpupool->sched))
-static inline struct scheduler *VCPU2OP(const struct vcpu *v)
+static inline struct scheduler *dom_scheduler(const struct domain *d)
+{
+ if ( likely(d->cpupool != NULL) )
+ return d->cpupool->sched;
+
+ /*
+ * If d->cpupool is NULL, this is the idle domain. This is special
+ * because the idle domain does not really belong to any cpupool, and,
+ * hence, does not really have a scheduler.
+ *
+ * This is (should be!) only called like this for allocating the idle
+ * vCPUs for the first time, during boot, in which case what we want
+ * is the default scheduler that has been, choosen at boot.
+ */
+ ASSERT(is_idle_domain(d));
+ return &ops;
+}
+
+static inline struct scheduler *vcpu_scheduler(const struct vcpu *v)
{
struct domain *d = v->domain;
init_timer(&v->poll_timer, poll_timer_fn,
v, v->processor);
- v->sched_priv = SCHED_OP(DOM2OP(d), alloc_vdata, v, d->sched_priv);
+ v->sched_priv = SCHED_OP(dom_scheduler(d), alloc_vdata, v,
+ d->sched_priv);
if ( v->sched_priv == NULL )
return 1;
}
else
{
- SCHED_OP(DOM2OP(d), insert_vcpu, v);
+ SCHED_OP(dom_scheduler(d), insert_vcpu, v);
}
return 0;
domain_pause(d);
- old_ops = DOM2OP(d);
+ old_ops = dom_scheduler(d);
old_domdata = d->sched_priv;
for_each_vcpu ( d, v )
kill_timer(&v->poll_timer);
if ( test_and_clear_bool(v->is_urgent) )
atomic_dec(&per_cpu(schedule_data, v->processor).urgent_count);
- SCHED_OP(VCPU2OP(v), remove_vcpu, v);
- SCHED_OP(VCPU2OP(v), free_vdata, v->sched_priv);
+ SCHED_OP(vcpu_scheduler(v), remove_vcpu, v);
+ SCHED_OP(vcpu_scheduler(v), free_vdata, v->sched_priv);
}
int sched_init_domain(struct domain *d, int poolid)
SCHED_STAT_CRANK(dom_init);
TRACE_1D(TRC_SCHED_DOM_ADD, d->domain_id);
- return SCHED_OP(DOM2OP(d), init_domain, d);
+ return SCHED_OP(dom_scheduler(d), init_domain, d);
}
void sched_destroy_domain(struct domain *d)
SCHED_STAT_CRANK(dom_destroy);
TRACE_1D(TRC_SCHED_DOM_REM, d->domain_id);
- SCHED_OP(DOM2OP(d), destroy_domain, d);
+ SCHED_OP(dom_scheduler(d), destroy_domain, d);
cpupool_rm_domain(d);
}
if ( v->runstate.state == RUNSTATE_runnable )
vcpu_runstate_change(v, RUNSTATE_offline, NOW());
- SCHED_OP(VCPU2OP(v), sleep, v);
+ SCHED_OP(vcpu_scheduler(v), sleep, v);
}
vcpu_schedule_unlock_irqrestore(lock, flags, v);
{
if ( v->runstate.state >= RUNSTATE_blocked )
vcpu_runstate_change(v, RUNSTATE_runnable, NOW());
- SCHED_OP(VCPU2OP(v), wake, v);
+ SCHED_OP(vcpu_scheduler(v), wake, v);
}
else if ( !(v->pause_flags & VPF_blocked) )
{
* Actual CPU switch to new CPU. This is safe because the lock
* pointer cant' change while the current lock is held.
*/
- if ( VCPU2OP(v)->migrate )
- SCHED_OP(VCPU2OP(v), migrate, v, new_cpu);
+ if ( vcpu_scheduler(v)->migrate )
+ SCHED_OP(vcpu_scheduler(v), migrate, v, new_cpu);
else
v->processor = new_cpu;
}
break;
/* Select a new CPU. */
- new_cpu = SCHED_OP(VCPU2OP(v), pick_cpu, v);
+ new_cpu = SCHED_OP(vcpu_scheduler(v), pick_cpu, v);
if ( (new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) &&
cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
break;
spin_unlock_irq(lock);;
lock = vcpu_schedule_lock_irq(v);
- v->processor = SCHED_OP(VCPU2OP(v), pick_cpu, v);
+ v->processor = SCHED_OP(vcpu_scheduler(v), pick_cpu, v);
spin_unlock_irq(lock);
}
struct vcpu * v=current;
spinlock_t *lock = vcpu_schedule_lock_irq(v);
- SCHED_OP(VCPU2OP(v), yield, v);
+ SCHED_OP(vcpu_scheduler(v), yield, v);
vcpu_schedule_unlock_irq(lock, v);
SCHED_STAT_CRANK(vcpu_yield);
if ( ret )
return ret;
- if ( op->sched_id != DOM2OP(d)->sched_id )
+ if ( op->sched_id != dom_scheduler(d)->sched_id )
return -EINVAL;
switch ( op->cmd )
/* NB: the pluggable scheduler code needs to take care
* of locking by itself. */
- if ( (ret = SCHED_OP(DOM2OP(d), adjust, d, op)) == 0 )
+ if ( (ret = SCHED_OP(dom_scheduler(d), adjust, d, op)) == 0 )
TRACE_1D(TRC_SCHED_ADJDOM, d->domain_id);
return ret;
/* Check for migration request /after/ clearing running flag. */
smp_mb();
- SCHED_OP(VCPU2OP(prev), context_saved, prev);
+ SCHED_OP(vcpu_scheduler(prev), context_saved, prev);
if ( unlikely(prev->pause_flags & VPF_migrating) )
vcpu_migrate(prev);