csched_vcpu_insert(const struct scheduler *ops, struct vcpu *vc)
{
struct csched_vcpu *svc = vc->sched_priv;
+ spinlock_t *lock;
+ unsigned long flags;
+
+ lock = vcpu_schedule_lock_irqsave(vc, &flags);
if ( !__vcpu_on_runq(svc) && vcpu_runnable(vc) && !vc->is_running )
__runq_insert(svc);
+ vcpu_schedule_unlock_irqrestore(lock, flags, vc);
+
SCHED_STAT_CRANK(vcpu_insert);
}
{
struct rt_vcpu *svc = rt_vcpu(vc);
s_time_t now = NOW();
+ spinlock_t *lock;
/* not addlocate idle vcpu to dom vcpu list */
if ( is_idle_vcpu(vc) )
return;
+ lock = vcpu_schedule_lock_irq(vc);
if ( now >= svc->cur_deadline )
rt_update_deadline(now, svc);
if ( !__vcpu_on_q(svc) && vcpu_runnable(vc) && !vc->is_running )
__runq_insert(ops, svc);
+ vcpu_schedule_unlock_irq(lock, vc);
/* add rt_vcpu svc to scheduler-specific vcpu list of the dom */
list_add_tail(&svc->sdom_elem, &svc->sdom->vcpu);
int schedule_cpu_switch(unsigned int cpu, struct cpupool *c)
{
- unsigned long flags;
struct vcpu *idle;
spinlock_t *lock;
void *ppriv, *ppriv_old, *vpriv, *vpriv_old;
return -ENOMEM;
}
- lock = pcpu_schedule_lock_irqsave(cpu, &flags);
+ lock = pcpu_schedule_lock_irq(cpu);
SCHED_OP(old_ops, tick_suspend, cpu);
vpriv_old = idle->sched_priv;
ppriv_old = per_cpu(schedule_data, cpu).sched_priv;
per_cpu(schedule_data, cpu).sched_priv = ppriv;
SCHED_OP(new_ops, tick_resume, cpu);
- SCHED_OP(new_ops, insert_vcpu, idle);
- pcpu_schedule_unlock_irqrestore(lock, flags, cpu);
+ pcpu_schedule_unlock_irq(lock, cpu);
SCHED_OP(old_ops, free_vdata, vpriv_old);
SCHED_OP(old_ops, free_pdata, ppriv_old, cpu);