sched: fix locking for insert_vcpu() in credit1 and RTDS
authorDario Faggioli <dario.faggioli@citrix.com>
Tue, 24 Nov 2015 13:48:34 +0000 (14:48 +0100)
committerJan Beulich <jbeulich@suse.com>
Tue, 24 Nov 2015 13:48:34 +0000 (14:48 +0100)
The insert_vcpu() hook is handled with inconsistent locking.
In fact, schedule_cpu_switch() calls the hook with runqueue
lock held, while sched_move_domain() relies on the hook
implementations to take the lock themselves (and, since that
is not done in Credit1 and RTDS, such operation is not safe
in those cases).

This is fixed as follows:
 - take the lock in the hook implementations, in specific
   schedulers' code;
 - avoid calling insert_vcpu(), for the idle vCPU, in
   schedule_cpu_switch(). In fact, idle vCPUs are set to run
   immediately, and the various schedulers won't insert them
   in their runqueues anyway, even when explicitly asked to.

While there, still in schedule_cpu_switch(), locking with
_irq() is enough (there's no need to do *_irqsave()).

Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>
Reviewed-by: Meng Xu <mengxu@cis.upenn.edu>
Reviewed-by: George Dunlap <george.dunlap@citrix.com>
xen/common/sched_credit.c
xen/common/sched_rt.c
xen/common/schedule.c

index 72a210911a3edb7146906d778941b36fecef06a1..496cc45cb7f8fc7da9e7d91d5f63bd47e6b12d67 100644 (file)
@@ -910,10 +910,16 @@ static void
 csched_vcpu_insert(const struct scheduler *ops, struct vcpu *vc)
 {
     struct csched_vcpu *svc = vc->sched_priv;
+    spinlock_t *lock;
+    unsigned long flags;
+
+    lock = vcpu_schedule_lock_irqsave(vc, &flags);
 
     if ( !__vcpu_on_runq(svc) && vcpu_runnable(vc) && !vc->is_running )
         __runq_insert(svc);
 
+    vcpu_schedule_unlock_irqrestore(lock, flags, vc);
+
     SCHED_STAT_CRANK(vcpu_insert);
 }
 
index 822f23c07c744f5e55ddaf868cbf25d14c7a8070..3a66c9afc88fd93fd3cc6c16103807f29c6d9ba9 100644 (file)
@@ -622,16 +622,19 @@ rt_vcpu_insert(const struct scheduler *ops, struct vcpu *vc)
 {
     struct rt_vcpu *svc = rt_vcpu(vc);
     s_time_t now = NOW();
+    spinlock_t *lock;
 
     /* not addlocate idle vcpu to dom vcpu list */
     if ( is_idle_vcpu(vc) )
         return;
 
+    lock = vcpu_schedule_lock_irq(vc);
     if ( now >= svc->cur_deadline )
         rt_update_deadline(now, svc);
 
     if ( !__vcpu_on_q(svc) && vcpu_runnable(vc) && !vc->is_running )
         __runq_insert(ops, svc);
+    vcpu_schedule_unlock_irq(lock, vc);
 
     /* add rt_vcpu svc to scheduler-specific vcpu list of the dom */
     list_add_tail(&svc->sdom_elem, &svc->sdom->vcpu);
index 20f5f5617b4d3832ae9c3279b8f6758c4af9c105..f38df71d57b5f945f865bc818c65fba92ed6c89c 100644 (file)
@@ -1487,7 +1487,6 @@ void __init scheduler_init(void)
 
 int schedule_cpu_switch(unsigned int cpu, struct cpupool *c)
 {
-    unsigned long flags;
     struct vcpu *idle;
     spinlock_t *lock;
     void *ppriv, *ppriv_old, *vpriv, *vpriv_old;
@@ -1508,7 +1507,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c)
         return -ENOMEM;
     }
 
-    lock = pcpu_schedule_lock_irqsave(cpu, &flags);
+    lock = pcpu_schedule_lock_irq(cpu);
 
     SCHED_OP(old_ops, tick_suspend, cpu);
     vpriv_old = idle->sched_priv;
@@ -1517,9 +1516,8 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c)
     ppriv_old = per_cpu(schedule_data, cpu).sched_priv;
     per_cpu(schedule_data, cpu).sched_priv = ppriv;
     SCHED_OP(new_ops, tick_resume, cpu);
-    SCHED_OP(new_ops, insert_vcpu, idle);
 
-    pcpu_schedule_unlock_irqrestore(lock, flags, cpu);
+    pcpu_schedule_unlock_irq(lock, cpu);
 
     SCHED_OP(old_ops, free_vdata, vpriv_old);
     SCHED_OP(old_ops, free_pdata, ppriv_old, cpu);