xen: sched: avoid spuriously re-enabling IRQs in csched2_switch_sched()
authorDario Faggioli <dario.faggioli@citrix.com>
Tue, 26 Apr 2016 16:56:56 +0000 (18:56 +0200)
committerGeorge Dunlap <george.dunlap@citrix.com>
Mon, 9 May 2016 14:41:25 +0000 (15:41 +0100)
interrupts are already disabled when calling the hook
(from schedule_cpu_switch()), so we must use spin_lock()
and spin_unlock().

Add an ASSERT(), so we will notice if this code and its
caller get out of sync with respect to disabling interrupts
(and add one at the same exact occurrence of this pattern
in Credit1 too)

Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>
Reviewed-by: George Dunlap <george.dunlap@citrix.com>
Release-acked-by: Wei Liu <wei.liu2@citrix.com>
xen/common/sched_credit.c
xen/common/sched_credit2.c

index db4d42ae3e8958a904271493e8a41ecb0232d0aa..a38a63d9aa16f596d43853cf9f6f801bdbc8d47d 100644 (file)
@@ -615,6 +615,7 @@ csched_switch_sched(struct scheduler *new_ops, unsigned int cpu,
      * schedule_cpu_switch()). It actually may or may not be the 'right'
      * one for this cpu, but that is ok for preventing races.
      */
+    ASSERT(!local_irq_is_enabled());
     spin_lock(&prv->lock);
     init_pdata(prv, pdata, cpu);
     spin_unlock(&prv->lock);
index f3b62acfca7c5ff384c27130b0a6472cfa983250..f95e50969aa80e9bc3dbd5de737b12434f0f0bcf 100644 (file)
@@ -2238,7 +2238,8 @@ csched2_switch_sched(struct scheduler *new_ops, unsigned int cpu,
      * And owning exactly that one (the lock of the old scheduler of this
      * cpu) is what is necessary to prevent races.
      */
-    spin_lock_irq(&prv->lock);
+    ASSERT(!local_irq_is_enabled());
+    spin_lock(&prv->lock);
 
     idle_vcpu[cpu]->sched_priv = vdata;
 
@@ -2263,7 +2264,7 @@ csched2_switch_sched(struct scheduler *new_ops, unsigned int cpu,
     smp_mb();
     per_cpu(schedule_data, cpu).schedule_lock = &prv->rqd[rqi].lock;
 
-    spin_unlock_irq(&prv->lock);
+    spin_unlock(&prv->lock);
 }
 
 static void