fix locking in cpu_disable_scheduler()
authorJan Beulich <jbeulich@suse.com>
Tue, 29 Oct 2013 08:57:14 +0000 (09:57 +0100)
committerJan Beulich <jbeulich@suse.com>
Tue, 29 Oct 2013 08:57:14 +0000 (09:57 +0100)
So commit eedd6039 ("scheduler: adjust internal locking interface")
uncovered - by now using proper spin lock constructs - a bug after all:
When bringing down a CPU, cpu_disable_scheduler() gets called with
interrupts disabled, and hence the use of vcpu_schedule_lock_irq() was
never really correct (i.e. the caller ended up with interrupts enabled
despite having disabled them explicitly).

Fixing this however surfaced another problem: The call path
vcpu_migrate() -> evtchn_move_pirqs() wants to acquire the event lock,
which however is a non-IRQ-safe once, and hence check_lock() doesn't
like this lock to be acquired when interrupts are already off. As we're
in stop-machine context here, getting things wrong wrt interrupt state
management during lock acquire/release is out of question though, so
the simple solution to this appears to be to just suppress spin lock
debugging for the period of time while the stop machine callback gets
run.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Keir Fraser <keir@xen.org>
xen/common/schedule.c
xen/common/stop_machine.c

index bfa6bee283a4893f40cc5cf0e84aec9f2a99e400..0f45f07fb5f28db128406d7daea7da94ec33763b 100644 (file)
@@ -601,7 +601,8 @@ int cpu_disable_scheduler(unsigned int cpu)
     {
         for_each_vcpu ( d, v )
         {
-            spinlock_t *lock = vcpu_schedule_lock_irq(v);
+            unsigned long flags;
+            spinlock_t *lock = vcpu_schedule_lock_irqsave(v, &flags);
 
             cpumask_and(&online_affinity, v->cpu_affinity, c->cpu_valid);
             if ( cpumask_empty(&online_affinity) &&
@@ -622,14 +623,12 @@ int cpu_disable_scheduler(unsigned int cpu)
             if ( v->processor == cpu )
             {
                 set_bit(_VPF_migrating, &v->pause_flags);
-                vcpu_schedule_unlock_irq(lock, v);
+                vcpu_schedule_unlock_irqrestore(lock, flags, v);
                 vcpu_sleep_nosync(v);
                 vcpu_migrate(v);
             }
             else
-            {
-                vcpu_schedule_unlock_irq(lock, v);
-            }
+                vcpu_schedule_unlock_irqrestore(lock, flags, v);
 
             /*
              * A vcpu active in the hypervisor will not be migratable.
index 05905047723b090629baa7b3b1329f83e29c25bb..932e5a7c69b2b9718af2d8ac97d8d837b64b331f 100644 (file)
@@ -110,6 +110,7 @@ int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu)
     local_irq_disable();
     stopmachine_set_state(STOPMACHINE_DISABLE_IRQ);
     stopmachine_wait_state();
+    spin_debug_disable();
 
     stopmachine_set_state(STOPMACHINE_INVOKE);
     if ( (cpu == smp_processor_id()) || (cpu == NR_CPUS) )
@@ -117,6 +118,7 @@ int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu)
     stopmachine_wait_state();
     ret = stopmachine_data.fn_result;
 
+    spin_debug_enable();
     stopmachine_set_state(STOPMACHINE_EXIT);
     stopmachine_wait_state();
     local_irq_enable();