From 41a0cc9e26160a89245c9ba3233e3f70bf9cd4b4 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Tue, 29 Oct 2013 09:57:14 +0100 Subject: [PATCH] fix locking in cpu_disable_scheduler() So commit eedd6039 ("scheduler: adjust internal locking interface") uncovered - by now using proper spin lock constructs - a bug after all: When bringing down a CPU, cpu_disable_scheduler() gets called with interrupts disabled, and hence the use of vcpu_schedule_lock_irq() was never really correct (i.e. the caller ended up with interrupts enabled despite having disabled them explicitly). Fixing this however surfaced another problem: The call path vcpu_migrate() -> evtchn_move_pirqs() wants to acquire the event lock, which however is a non-IRQ-safe once, and hence check_lock() doesn't like this lock to be acquired when interrupts are already off. As we're in stop-machine context here, getting things wrong wrt interrupt state management during lock acquire/release is out of question though, so the simple solution to this appears to be to just suppress spin lock debugging for the period of time while the stop machine callback gets run. Signed-off-by: Jan Beulich Reviewed-by: Andrew Cooper Acked-by: Keir Fraser --- xen/common/schedule.c | 9 ++++----- xen/common/stop_machine.c | 2 ++ 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/xen/common/schedule.c b/xen/common/schedule.c index bfa6bee283..0f45f07fb5 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -601,7 +601,8 @@ int cpu_disable_scheduler(unsigned int cpu) { for_each_vcpu ( d, v ) { - spinlock_t *lock = vcpu_schedule_lock_irq(v); + unsigned long flags; + spinlock_t *lock = vcpu_schedule_lock_irqsave(v, &flags); cpumask_and(&online_affinity, v->cpu_affinity, c->cpu_valid); if ( cpumask_empty(&online_affinity) && @@ -622,14 +623,12 @@ int cpu_disable_scheduler(unsigned int cpu) if ( v->processor == cpu ) { set_bit(_VPF_migrating, &v->pause_flags); - vcpu_schedule_unlock_irq(lock, v); + vcpu_schedule_unlock_irqrestore(lock, flags, v); vcpu_sleep_nosync(v); vcpu_migrate(v); } else - { - vcpu_schedule_unlock_irq(lock, v); - } + vcpu_schedule_unlock_irqrestore(lock, flags, v); /* * A vcpu active in the hypervisor will not be migratable. diff --git a/xen/common/stop_machine.c b/xen/common/stop_machine.c index 0590504772..932e5a7c69 100644 --- a/xen/common/stop_machine.c +++ b/xen/common/stop_machine.c @@ -110,6 +110,7 @@ int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu) local_irq_disable(); stopmachine_set_state(STOPMACHINE_DISABLE_IRQ); stopmachine_wait_state(); + spin_debug_disable(); stopmachine_set_state(STOPMACHINE_INVOKE); if ( (cpu == smp_processor_id()) || (cpu == NR_CPUS) ) @@ -117,6 +118,7 @@ int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu) stopmachine_wait_state(); ret = stopmachine_data.fn_result; + spin_debug_enable(); stopmachine_set_state(STOPMACHINE_EXIT); stopmachine_wait_state(); local_irq_enable(); -- 2.30.2