From ae2f41e3d7e7798537b7ea6dbb9a0c6aeb1179e3 Mon Sep 17 00:00:00 2001 From: Dario Faggioli Date: Tue, 24 Nov 2015 14:48:34 +0100 Subject: [PATCH] sched: fix locking for insert_vcpu() in credit1 and RTDS The insert_vcpu() hook is handled with inconsistent locking. In fact, schedule_cpu_switch() calls the hook with runqueue lock held, while sched_move_domain() relies on the hook implementations to take the lock themselves (and, since that is not done in Credit1 and RTDS, such operation is not safe in those cases). This is fixed as follows: - take the lock in the hook implementations, in specific schedulers' code; - avoid calling insert_vcpu(), for the idle vCPU, in schedule_cpu_switch(). In fact, idle vCPUs are set to run immediately, and the various schedulers won't insert them in their runqueues anyway, even when explicitly asked to. While there, still in schedule_cpu_switch(), locking with _irq() is enough (there's no need to do *_irqsave()). Signed-off-by: Dario Faggioli Reviewed-by: Meng Xu Reviewed-by: George Dunlap --- xen/common/sched_credit.c | 6 ++++++ xen/common/sched_rt.c | 3 +++ xen/common/schedule.c | 6 ++---- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c index 72a210911a..496cc45cb7 100644 --- a/xen/common/sched_credit.c +++ b/xen/common/sched_credit.c @@ -910,10 +910,16 @@ static void csched_vcpu_insert(const struct scheduler *ops, struct vcpu *vc) { struct csched_vcpu *svc = vc->sched_priv; + spinlock_t *lock; + unsigned long flags; + + lock = vcpu_schedule_lock_irqsave(vc, &flags); if ( !__vcpu_on_runq(svc) && vcpu_runnable(vc) && !vc->is_running ) __runq_insert(svc); + vcpu_schedule_unlock_irqrestore(lock, flags, vc); + SCHED_STAT_CRANK(vcpu_insert); } diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c index 822f23c07c..3a66c9afc8 100644 --- a/xen/common/sched_rt.c +++ b/xen/common/sched_rt.c @@ -622,16 +622,19 @@ rt_vcpu_insert(const struct scheduler *ops, struct vcpu *vc) { struct rt_vcpu *svc = rt_vcpu(vc); s_time_t now = NOW(); + spinlock_t *lock; /* not addlocate idle vcpu to dom vcpu list */ if ( is_idle_vcpu(vc) ) return; + lock = vcpu_schedule_lock_irq(vc); if ( now >= svc->cur_deadline ) rt_update_deadline(now, svc); if ( !__vcpu_on_q(svc) && vcpu_runnable(vc) && !vc->is_running ) __runq_insert(ops, svc); + vcpu_schedule_unlock_irq(lock, vc); /* add rt_vcpu svc to scheduler-specific vcpu list of the dom */ list_add_tail(&svc->sdom_elem, &svc->sdom->vcpu); diff --git a/xen/common/schedule.c b/xen/common/schedule.c index 20f5f5617b..f38df71d57 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -1487,7 +1487,6 @@ void __init scheduler_init(void) int schedule_cpu_switch(unsigned int cpu, struct cpupool *c) { - unsigned long flags; struct vcpu *idle; spinlock_t *lock; void *ppriv, *ppriv_old, *vpriv, *vpriv_old; @@ -1508,7 +1507,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c) return -ENOMEM; } - lock = pcpu_schedule_lock_irqsave(cpu, &flags); + lock = pcpu_schedule_lock_irq(cpu); SCHED_OP(old_ops, tick_suspend, cpu); vpriv_old = idle->sched_priv; @@ -1517,9 +1516,8 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c) ppriv_old = per_cpu(schedule_data, cpu).sched_priv; per_cpu(schedule_data, cpu).sched_priv = ppriv; SCHED_OP(new_ops, tick_resume, cpu); - SCHED_OP(new_ops, insert_vcpu, idle); - pcpu_schedule_unlock_irqrestore(lock, flags, cpu); + pcpu_schedule_unlock_irq(lock, cpu); SCHED_OP(old_ops, free_vdata, vpriv_old); SCHED_OP(old_ops, free_pdata, ppriv_old, cpu); -- 2.30.2