From 27c31d386746bea3daa5b3733c9f986a9c3cccc3 Mon Sep 17 00:00:00 2001 From: Keir Fraser Date: Fri, 14 May 2010 17:07:52 +0100 Subject: [PATCH] Move cpu hotplug routines into common cpu.c file. Also simplify the locking (reverting to use if spin_trylock, as returning EBUSY/EAGAIN seems unavoidable after all). In particular this should continue to ensure that stop_machine_run() does not have cpu_online_map change under its feet. Signed-off-by: Keir Fraser --- xen/arch/ia64/xen/xensetup.c | 1 + xen/arch/x86/acpi/power.c | 9 +- xen/arch/x86/platform_hypercall.c | 8 +- xen/arch/x86/setup.c | 1 + xen/arch/x86/smpboot.c | 232 +++--------------------------- xen/arch/x86/sysctl.c | 1 + xen/common/cpu.c | 189 ++++++++++++++++++++++-- xen/common/spinlock.c | 13 +- xen/common/stop_machine.c | 20 ++- xen/include/asm-x86/smp.h | 4 - xen/include/xen/cpu.h | 21 ++- xen/include/xen/spinlock.h | 2 + 12 files changed, 252 insertions(+), 249 deletions(-) diff --git a/xen/arch/ia64/xen/xensetup.c b/xen/arch/ia64/xen/xensetup.c index a7e4791771..e3d9d36011 100644 --- a/xen/arch/ia64/xen/xensetup.c +++ b/xen/arch/ia64/xen/xensetup.c @@ -32,6 +32,7 @@ #include #include #include +#include unsigned long total_pages; diff --git a/xen/arch/x86/acpi/power.c b/xen/arch/x86/acpi/power.c index 9efae905c2..c62f122882 100644 --- a/xen/arch/x86/acpi/power.c +++ b/xen/arch/x86/acpi/power.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -138,12 +139,8 @@ static int enter_state(u32 state) freeze_domains(); - disable_nonboot_cpus(); - if ( num_online_cpus() != 1 ) - { - error = -EBUSY; + if ( (error = disable_nonboot_cpus()) ) goto enable_cpu; - } cpufreq_del_cpu(0); @@ -207,7 +204,9 @@ static int enter_state(u32 state) enable_cpu: cpufreq_add_cpu(0); microcode_resume_cpu(0); + mtrr_aps_sync_begin(); enable_nonboot_cpus(); + mtrr_aps_sync_end(); thaw_domains(); spin_unlock(&pm_lock); return error; diff --git a/xen/arch/x86/platform_hypercall.c b/xen/arch/x86/platform_hypercall.c index b3ab4b7446..2ab7617a86 100644 --- a/xen/arch/x86/platform_hypercall.c +++ b/xen/arch/x86/platform_hypercall.c @@ -410,7 +410,11 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xen_platform_op_t) u_xenpf_op) g_info = &op->u.pcpu_info; - spin_lock(&cpu_add_remove_lock); + if ( !get_cpu_maps() ) + { + ret = -EBUSY; + break; + } if ( (g_info->xen_cpuid >= NR_CPUS) || (g_info->xen_cpuid < 0) || @@ -429,7 +433,7 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xen_platform_op_t) u_xenpf_op) g_info->max_present = last_cpu(cpu_present_map); - spin_unlock(&cpu_add_remove_lock); + put_cpu_maps(); ret = copy_to_guest(u_xenpf_op, op, 1) ? -EFAULT : 0; } diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c index d6c07f0895..9d6036f300 100644 --- a/xen/arch/x86/setup.c +++ b/xen/arch/x86/setup.c @@ -43,6 +43,7 @@ #include /* for bzimage_headroom */ #include /* for generic_apic_probe */ #include +#include #if defined(CONFIG_X86_64) #define BOOTSTRAP_DIRECTMAP_END (1UL << 32) /* 4GB */ diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c index 8b857b3809..50ac661926 100644 --- a/xen/arch/x86/smpboot.c +++ b/xen/arch/x86/smpboot.c @@ -46,7 +46,6 @@ #include #include #include -#include #include #include #include @@ -58,7 +57,6 @@ #include #include #include -#include #include #define setup_trampoline() (bootsym_phys(trampoline_realmode_entry)) @@ -1310,169 +1308,9 @@ void __cpu_die(unsigned int cpu) } } -static int take_cpu_down(void *unused) -{ - void *hcpu = (void *)(long)smp_processor_id(); - int rc; - - spin_lock(&cpu_add_remove_lock); - - if (cpu_notifier_call_chain(CPU_DYING, hcpu) != NOTIFY_DONE) - BUG(); - - rc = __cpu_disable(); - - spin_unlock(&cpu_add_remove_lock); - - return rc; -} - -/* - * Protects against concurrent offline/online requests for a single CPU. - * We need this extra protection because cpu_down() cannot continuously hold - * the cpu_add_remove_lock, as it cannot be held across stop_machine_run(). - */ -static cpumask_t cpu_offlining; - -int cpu_down(unsigned int cpu) -{ - int err, notifier_rc, nr_calls; - void *hcpu = (void *)(long)cpu; - - spin_lock(&cpu_add_remove_lock); - - if ((cpu == 0) || !cpu_online(cpu) || cpu_isset(cpu, cpu_offlining)) { - spin_unlock(&cpu_add_remove_lock); - return -EINVAL; - } - - cpu_set(cpu, cpu_offlining); - - printk("Prepare to bring CPU%d down...\n", cpu); - - notifier_rc = __cpu_notifier_call_chain( - CPU_DOWN_PREPARE, hcpu, -1, &nr_calls); - if (notifier_rc != NOTIFY_DONE) { - err = notifier_to_errno(notifier_rc); - nr_calls--; - notifier_rc = __cpu_notifier_call_chain( - CPU_DOWN_FAILED, hcpu, nr_calls, NULL); - BUG_ON(notifier_rc != NOTIFY_DONE); - goto out; - } - - spin_unlock(&cpu_add_remove_lock); - err = stop_machine_run(take_cpu_down, NULL, cpu); - spin_lock(&cpu_add_remove_lock); - - if (err < 0) { - notifier_rc = cpu_notifier_call_chain(CPU_DOWN_FAILED, hcpu); - BUG_ON(notifier_rc != NOTIFY_DONE); - goto out; - } - - __cpu_die(cpu); - BUG_ON(cpu_online(cpu)); - - notifier_rc = cpu_notifier_call_chain(CPU_DEAD, hcpu); - BUG_ON(notifier_rc != NOTIFY_DONE); - -out: - if (!err) { - printk("CPU %u is now offline\n", cpu); - send_guest_global_virq(dom0, VIRQ_PCPU_STATE); - } else { - printk("Failed to take down CPU %u (error %d)\n", cpu, err); - } - cpu_clear(cpu, cpu_offlining); - spin_unlock(&cpu_add_remove_lock); - return err; -} - -int cpu_up(unsigned int cpu) -{ - int err = 0; - - spin_lock(&cpu_add_remove_lock); - - if (cpu_online(cpu) || cpu_isset(cpu, cpu_offlining)) { - err = -EINVAL; - goto out; - } - - err = __cpu_up(cpu); - if (err < 0) - goto out; - -out: - if (!err) - send_guest_global_virq(dom0, VIRQ_PCPU_STATE); - spin_unlock(&cpu_add_remove_lock); - return err; -} - -/* From kernel/power/main.c */ -/* This is protected by pm_sem semaphore */ -static cpumask_t frozen_cpus; - -void disable_nonboot_cpus(void) -{ - int cpu, error; - - error = 0; - cpus_clear(frozen_cpus); - printk("Freezing cpus ...\n"); - for_each_online_cpu(cpu) { - if (cpu == 0) - continue; - error = cpu_down(cpu); - /* No need to check EBUSY here */ - ASSERT(error != -EBUSY); - if (!error) { - cpu_set(cpu, frozen_cpus); - printk("CPU%d is down\n", cpu); - continue; - } - printk("Error taking cpu %d down: %d\n", cpu, error); - } - BUG_ON(raw_smp_processor_id() != 0); - if (error) - panic("cpus not sleeping"); -} - -void enable_nonboot_cpus(void) -{ - int cpu, error; - - printk("Thawing cpus ...\n"); - mtrr_aps_sync_begin(); - for_each_cpu_mask(cpu, frozen_cpus) { - error = cpu_up(cpu); - /* No conflict will happen here */ - ASSERT(error != -EBUSY); - if (!error) { - printk("CPU%d is up\n", cpu); - continue; - } - printk("Error taking cpu %d up: %d\n", cpu, error); - panic("Not enough cpus"); - } - mtrr_aps_sync_end(); - cpus_clear(frozen_cpus); - - /* - * Cleanup possible dangling ends after sleep... - */ - smpboot_restore_warm_reset_vector(); -} - int cpu_add(uint32_t apic_id, uint32_t acpi_id, uint32_t pxm) { - int cpu = -1; - -#ifndef CONFIG_ACPI - return -ENOSYS; -#endif + int node, cpu = -1; dprintk(XENLOG_DEBUG, "cpu_add apic_id %x acpi_id %x pxm %x\n", apic_id, acpi_id, pxm); @@ -1480,68 +1318,53 @@ int cpu_add(uint32_t apic_id, uint32_t acpi_id, uint32_t pxm) if ( acpi_id > MAX_MADT_ENTRIES || apic_id > MAX_APICS || pxm > 256 ) return -EINVAL; + if ( !cpu_hotplug_begin() ) + return -EBUSY; + /* Detect if the cpu has been added before */ - if ( x86_acpiid_to_apicid[acpi_id] != 0xff) + if ( x86_acpiid_to_apicid[acpi_id] != 0xff ) { - if (x86_acpiid_to_apicid[acpi_id] != apic_id) - return -EINVAL; - else - return -EEXIST; + cpu = (x86_acpiid_to_apicid[acpi_id] != apic_id) + ? -EINVAL : -EEXIST; + goto out; } if ( physid_isset(apic_id, phys_cpu_present_map) ) - return -EEXIST; - - spin_lock(&cpu_add_remove_lock); - - cpu = mp_register_lapic(apic_id, 1); - - if (cpu < 0) { - spin_unlock(&cpu_add_remove_lock); - return cpu; + cpu = -EEXIST; + goto out; } + if ( (cpu = mp_register_lapic(apic_id, 1)) < 0 ) + goto out; + x86_acpiid_to_apicid[acpi_id] = apic_id; if ( !srat_disabled() ) { - int node; - - node = setup_node(pxm); - if (node < 0) + if ( (node = setup_node(pxm)) < 0 ) { dprintk(XENLOG_WARNING, "Setup node failed for pxm %x\n", pxm); x86_acpiid_to_apicid[acpi_id] = 0xff; mp_unregister_lapic(apic_id, cpu); - spin_unlock(&cpu_add_remove_lock); - return node; + cpu = node; + goto out; } apicid_to_node[apic_id] = node; } srat_detect_node(cpu); numa_add_cpu(cpu); - spin_unlock(&cpu_add_remove_lock); dprintk(XENLOG_INFO, "Add CPU %x with index %x\n", apic_id, cpu); + out: + cpu_hotplug_done(); return cpu; } int __devinit __cpu_up(unsigned int cpu) { - int notifier_rc, ret = 0, nr_calls; - void *hcpu = (void *)(long)cpu; - - notifier_rc = __cpu_notifier_call_chain( - CPU_UP_PREPARE, hcpu, -1, &nr_calls); - if (notifier_rc != NOTIFY_DONE) { - ret = notifier_to_errno(notifier_rc); - nr_calls--; - goto fail; - } - /* * We do warm boot only on cpus that had booted earlier * Otherwise cold boot is all handled from smp_boot_cpus(). @@ -1549,20 +1372,15 @@ int __devinit __cpu_up(unsigned int cpu) * when a cpu is taken offline from cpu_exit_clear(). */ if (!cpu_isset(cpu, cpu_callin_map)) { - ret = __smp_prepare_cpu(cpu); + if (__smp_prepare_cpu(cpu)) + return -EIO; smpboot_restore_warm_reset_vector(); } - if (ret) { - ret = -EIO; - goto fail; - } - /* In case one didn't come up */ if (!cpu_isset(cpu, cpu_callin_map)) { printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu); - ret = -EIO; - goto fail; + return -EIO; } /* Unleash the CPU! */ @@ -1572,15 +1390,7 @@ int __devinit __cpu_up(unsigned int cpu) process_pending_softirqs(); } - notifier_rc = cpu_notifier_call_chain(CPU_ONLINE, hcpu); - BUG_ON(notifier_rc != NOTIFY_DONE); return 0; - - fail: - notifier_rc = __cpu_notifier_call_chain( - CPU_UP_CANCELED, hcpu, nr_calls, NULL); - BUG_ON(notifier_rc != NOTIFY_DONE); - return ret; } diff --git a/xen/arch/x86/sysctl.c b/xen/arch/x86/sysctl.c index 4ffc76b145..5d22c5acb5 100644 --- a/xen/arch/x86/sysctl.c +++ b/xen/arch/x86/sysctl.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0) diff --git a/xen/common/cpu.c b/xen/common/cpu.c index 8a04dd449e..82a111de98 100644 --- a/xen/common/cpu.c +++ b/xen/common/cpu.c @@ -1,6 +1,9 @@ #include #include #include +#include +#include +#include /* * cpu_bit_bitmap[] is a special, "compressed" data structure that @@ -26,35 +29,195 @@ const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { #endif }; -DEFINE_SPINLOCK(cpu_add_remove_lock); +static DEFINE_SPINLOCK(cpu_add_remove_lock); + +bool_t get_cpu_maps(void) +{ + return spin_trylock_recursive(&cpu_add_remove_lock); +} + +void put_cpu_maps(void) +{ + spin_unlock_recursive(&cpu_add_remove_lock); +} + +bool_t cpu_hotplug_begin(void) +{ + return get_cpu_maps(); +} + +void cpu_hotplug_done(void) +{ + put_cpu_maps(); +} static RAW_NOTIFIER_HEAD(cpu_chain); int register_cpu_notifier(struct notifier_block *nb) { int ret; - spin_lock(&cpu_add_remove_lock); + if ( !spin_trylock(&cpu_add_remove_lock) ) + BUG(); /* Should never fail as we are called only during boot. */ ret = raw_notifier_chain_register(&cpu_chain, nb); spin_unlock(&cpu_add_remove_lock); return ret; } -void unregister_cpu_notifier(struct notifier_block *nb) +static int take_cpu_down(void *unused) { - spin_lock(&cpu_add_remove_lock); - raw_notifier_chain_unregister(&cpu_chain, nb); - spin_unlock(&cpu_add_remove_lock); + void *hcpu = (void *)(long)smp_processor_id(); + if ( raw_notifier_call_chain(&cpu_chain, CPU_DYING, hcpu) != NOTIFY_DONE ) + BUG(); + return __cpu_disable(); +} + +int cpu_down(unsigned int cpu) +{ + int err, notifier_rc, nr_calls; + void *hcpu = (void *)(long)cpu; + + if ( !cpu_hotplug_begin() ) + return -EBUSY; + + if ( (cpu == 0) || !cpu_online(cpu) ) + { + cpu_hotplug_done(); + return -EINVAL; + } + + printk("Prepare to bring CPU%d down...\n", cpu); + + notifier_rc = __raw_notifier_call_chain( + &cpu_chain, CPU_DOWN_PREPARE, hcpu, -1, &nr_calls); + if ( notifier_rc != NOTIFY_DONE ) + { + err = notifier_to_errno(notifier_rc); + nr_calls--; + notifier_rc = __raw_notifier_call_chain( + &cpu_chain, CPU_DOWN_FAILED, hcpu, nr_calls, NULL); + BUG_ON(notifier_rc != NOTIFY_DONE); + goto out; + } + + if ( (err = stop_machine_run(take_cpu_down, NULL, cpu)) < 0 ) + { + notifier_rc = raw_notifier_call_chain( + &cpu_chain, CPU_DOWN_FAILED, hcpu); + BUG_ON(notifier_rc != NOTIFY_DONE); + goto out; + } + + __cpu_die(cpu); + BUG_ON(cpu_online(cpu)); + + notifier_rc = raw_notifier_call_chain(&cpu_chain, CPU_DEAD, hcpu); + BUG_ON(notifier_rc != NOTIFY_DONE); + + out: + if ( !err ) + { + printk("CPU %u is now offline\n", cpu); + send_guest_global_virq(dom0, VIRQ_PCPU_STATE); + } + else + { + printk("Failed to take down CPU %u (error %d)\n", cpu, err); + } + cpu_hotplug_done(); + return err; } -int cpu_notifier_call_chain(unsigned long val, void *v) +int cpu_up(unsigned int cpu) { - BUG_ON(!spin_is_locked(&cpu_add_remove_lock)); - return raw_notifier_call_chain(&cpu_chain, val, v); + int notifier_rc, nr_calls, err = 0; + void *hcpu = (void *)(long)cpu; + + if ( !cpu_hotplug_begin() ) + return -EBUSY; + + if ( cpu_online(cpu) || !cpu_present(cpu) ) + { + cpu_hotplug_done(); + return -EINVAL; + } + + notifier_rc = __raw_notifier_call_chain( + &cpu_chain, CPU_UP_PREPARE, hcpu, -1, &nr_calls); + if ( notifier_rc != NOTIFY_DONE ) + { + err = notifier_to_errno(notifier_rc); + nr_calls--; + goto fail; + } + + err = __cpu_up(cpu); + if ( err < 0 ) + goto fail; + + notifier_rc = raw_notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu); + BUG_ON(notifier_rc != NOTIFY_DONE); + + send_guest_global_virq(dom0, VIRQ_PCPU_STATE); + + cpu_hotplug_done(); + return 0; + + fail: + notifier_rc = __raw_notifier_call_chain( + &cpu_chain, CPU_UP_CANCELED, hcpu, nr_calls, NULL); + BUG_ON(notifier_rc != NOTIFY_DONE); + cpu_hotplug_done(); + return err; } -int __cpu_notifier_call_chain( - unsigned long val, void *v, int nr_to_call, int *nr_calls) +static cpumask_t frozen_cpus; + +int disable_nonboot_cpus(void) { - BUG_ON(!spin_is_locked(&cpu_add_remove_lock)); - return __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call, nr_calls); + int cpu, error = 0; + + BUG_ON(raw_smp_processor_id() != 0); + + cpus_clear(frozen_cpus); + + printk("Disabling non-boot CPUs ...\n"); + + for_each_online_cpu ( cpu ) + { + if ( cpu == 0 ) + continue; + + if ( (error = cpu_down(cpu)) ) + { + BUG_ON(error == -EBUSY); + printk("Error taking CPU%d down: %d\n", cpu, error); + break; + } + + cpu_set(cpu, frozen_cpus); + printk("CPU%d is down\n", cpu); + } + + BUG_ON(!error && (num_online_cpus() != 1)); + return error; +} + +void enable_nonboot_cpus(void) +{ + int cpu, error; + + printk("Enabling non-boot CPUs ...\n"); + + for_each_cpu_mask ( cpu, frozen_cpus ) + { + if ( (error = cpu_up(cpu)) ) + { + BUG_ON(error == -EBUSY); + printk("Error taking CPU%d up: %d\n", cpu, error); + continue; + } + printk("CPU%d is up\n", cpu); + } + + cpus_clear(frozen_cpus); } diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c index caca8d5c87..b3d4b3ba91 100644 --- a/xen/common/spinlock.c +++ b/xen/common/spinlock.c @@ -186,7 +186,7 @@ void _spin_barrier_irq(spinlock_t *lock) local_irq_restore(flags); } -void _spin_lock_recursive(spinlock_t *lock) +int _spin_trylock_recursive(spinlock_t *lock) { int cpu = smp_processor_id(); @@ -197,13 +197,22 @@ void _spin_lock_recursive(spinlock_t *lock) if ( likely(lock->recurse_cpu != cpu) ) { - spin_lock(lock); + if ( !spin_trylock(lock) ) + return 0; lock->recurse_cpu = cpu; } /* We support only fairly shallow recursion, else the counter overflows. */ ASSERT(lock->recurse_cnt < 0xfu); lock->recurse_cnt++; + + return 1; +} + +void _spin_lock_recursive(spinlock_t *lock) +{ + while ( !spin_trylock_recursive(lock) ) + cpu_relax(); } void _spin_unlock_recursive(spinlock_t *lock) diff --git a/xen/common/stop_machine.c b/xen/common/stop_machine.c index 9f5dd1e799..70856505e3 100644 --- a/xen/common/stop_machine.c +++ b/xen/common/stop_machine.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -72,19 +73,20 @@ int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu) BUG_ON(!local_irq_is_enabled()); + /* cpu_online_map must not change. */ + if ( !get_cpu_maps() ) + return -EBUSY; + allbutself = cpu_online_map; cpu_clear(smp_processor_id(), allbutself); nr_cpus = cpus_weight(allbutself); - if ( nr_cpus == 0 ) - { - BUG_ON(cpu != smp_processor_id()); - return (*fn)(data); - } - /* Must not spin here as the holder will expect us to be descheduled. */ if ( !spin_trylock(&stopmachine_lock) ) + { + put_cpu_maps(); return -EBUSY; + } stopmachine_data.fn = fn; stopmachine_data.fn_data = data; @@ -113,13 +115,17 @@ int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu) spin_unlock(&stopmachine_lock); + put_cpu_maps(); + return ret; } -static void stopmachine_action(unsigned long unused) +static void stopmachine_action(unsigned long cpu) { enum stopmachine_state state = STOPMACHINE_START; + BUG_ON(cpu != smp_processor_id()); + smp_mb(); while ( state != STOPMACHINE_EXIT ) diff --git a/xen/include/asm-x86/smp.h b/xen/include/asm-x86/smp.h index d67165493a..3c85b87956 100644 --- a/xen/include/asm-x86/smp.h +++ b/xen/include/asm-x86/smp.h @@ -56,12 +56,8 @@ extern u32 cpu_2_logical_apicid[]; DECLARE_PER_CPU(int, cpu_state); #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) -extern int cpu_down(unsigned int cpu); -extern int cpu_up(unsigned int cpu); extern void cpu_exit_clear(void); extern void cpu_uninit(void); -extern void disable_nonboot_cpus(void); -extern void enable_nonboot_cpus(void); int cpu_add(uint32_t apic_id, uint32_t acpi_id, uint32_t pxm); /* diff --git a/xen/include/xen/cpu.h b/xen/include/xen/cpu.h index 521559e6bb..115dec7896 100644 --- a/xen/include/xen/cpu.h +++ b/xen/include/xen/cpu.h @@ -5,13 +5,16 @@ #include #include -extern spinlock_t cpu_add_remove_lock; +/* Safely access cpu_online_map, cpu_present_map, etc. */ +bool_t get_cpu_maps(void); +void put_cpu_maps(void); +/* Safely perform CPU hotplug and update cpu_online_map, etc. */ +bool_t cpu_hotplug_begin(void); +void cpu_hotplug_done(void); + +/* Receive notification of CPU hotplug events. */ int register_cpu_notifier(struct notifier_block *nb); -void unregister_cpu_notifier(struct notifier_block *nb); -int cpu_notifier_call_chain(unsigned long val, void *v); -int __cpu_notifier_call_chain( - unsigned long val, void *v, int nr_to_call, int *nr_calls); /* * Notification actions: note that only CPU_{UP,DOWN}_PREPARE may fail --- @@ -25,4 +28,12 @@ int __cpu_notifier_call_chain( #define CPU_DYING 0x0007 /* CPU is nearly dead (in stop_machine ctxt) */ #define CPU_DEAD 0x0008 /* CPU is dead */ +/* Perform CPU hotplug. May return -EAGAIN. */ +int cpu_down(unsigned int cpu); +int cpu_up(unsigned int cpu); + +/* Power management. */ +int disable_nonboot_cpus(void); +void enable_nonboot_cpus(void); + #endif /* __XEN_CPU_H__ */ diff --git a/xen/include/xen/spinlock.h b/xen/include/xen/spinlock.h index e1f500c4ec..f6f737d756 100644 --- a/xen/include/xen/spinlock.h +++ b/xen/include/xen/spinlock.h @@ -146,6 +146,7 @@ int _spin_trylock(spinlock_t *lock); void _spin_barrier(spinlock_t *lock); void _spin_barrier_irq(spinlock_t *lock); +int _spin_trylock_recursive(spinlock_t *lock); void _spin_lock_recursive(spinlock_t *lock); void _spin_unlock_recursive(spinlock_t *lock); @@ -191,6 +192,7 @@ int _rw_is_write_locked(rwlock_t *lock); * are any critical regions that cannot form part of such a set, they can use * standard spin_[un]lock(). */ +#define spin_trylock_recursive(l) _spin_trylock_recursive(l) #define spin_lock_recursive(l) _spin_lock_recursive(l) #define spin_unlock_recursive(l) _spin_unlock_recursive(l) -- 2.30.2