.initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET)
{
__initcall_start = .;
+ *(.initcallpresmp.init)
+ __presmp_initcall_end = .;
*(.initcall1.init)
*(.initcall2.init)
*(.initcall3.init)
return 0;
}
-static void __init do_initcalls(void)
-{
- initcall_t *call;
- for ( call = &__initcall_start; call < &__initcall_end; call++ )
- (*call)();
-}
-
/*
* IPF loader only supports one command line currently, for
* both xen and guest kernel. This function provides pre-parse
/* Enable IRQ to receive IPI (needed for ITC sync). */
local_irq_enable();
+ do_presmp_initcalls();
+
printk("num_online_cpus=%d, max_cpus=%d\n",num_online_cpus(),max_cpus);
for_each_present_cpu ( i )
{
if ( num_online_cpus() >= max_cpus )
break;
- if ( !cpu_online(i) ) {
- rcu_online_cpu(i);
- __cpu_up(i);
- }
+ if ( !cpu_online(i) )
+ cpu_up(i);
}
local_irq_disable();
smp_cpus_done(max_cpus);
#endif
- initialise_gdb(); /* could be moved earlier */
-
iommu_setup(); /* setup iommu if available */
do_initcalls();
#include <xen/delay.h>
#include <xen/smp.h>
#include <xen/mm.h>
+#include <xen/cpu.h>
#include <asm/processor.h>
#include <public/sysctl.h>
#include <asm/system.h>
cmci_discover();
}
-void cpu_mcheck_distribute_cmci(void)
+static void cpu_mcheck_distribute_cmci(void)
{
if (cmci_support && !mce_disabled)
on_each_cpu(__cpu_mcheck_distribute_cmci, NULL, 0);
}
}
-void cpu_mcheck_disable(void)
+static void cpu_mcheck_disable(void)
{
clear_in_cr4(X86_CR4_MCE);
return ret;
}
+static int cpu_callback(
+ struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+ switch ( action )
+ {
+ case CPU_DYING:
+ cpu_mcheck_disable();
+ break;
+ case CPU_DEAD:
+ cpu_mcheck_distribute_cmci();
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block cpu_nfb = {
+ .notifier_call = cpu_callback
+};
+
+static int __init intel_mce_initcall(void)
+{
+ register_cpu_notifier(&cpu_nfb);
+ return 0;
+}
+presmp_initcall(intel_mce_initcall);
#include <xen/acpi.h>
#include <xen/pci.h>
#include <xen/paging.h>
+#include <xen/cpu.h>
#include <public/sysctl.h>
#include <asm/regs.h>
#include <asm/mc146818rtc.h>
#include <xen/guest_access.h>
#include <xen/event.h>
#include <xen/paging.h>
+#include <xen/cpu.h>
#include <asm/shadow.h>
#include <asm/hap.h>
#include <asm/current.h>
unsigned long __attribute__ ((__section__ (".bss.page_aligned")))
hvm_io_bitmap[3*PAGE_SIZE/BYTES_PER_LONG];
+static int cpu_callback(
+ struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+ int rc = 0;
+
+ switch ( action )
+ {
+ case CPU_UP_PREPARE:
+ rc = hvm_funcs.cpu_prepare(cpu);
+ break;
+ default:
+ break;
+ }
+
+ return !rc ? NOTIFY_DONE : notifier_from_errno(rc);
+}
+
+static struct notifier_block cpu_nfb = {
+ .notifier_call = cpu_callback
+};
+
void hvm_enable(struct hvm_function_table *fns)
{
extern int hvm_port80_allowed;
if ( hvm_funcs.hap_supported )
printk("HVM: Hardware Assisted Paging detected.\n");
+
+ register_cpu_notifier(&cpu_nfb);
}
/*
#include <xen/iocap.h>
#include <xen/guest_access.h>
#include <xen/acpi.h>
+#include <xen/cpu.h>
#include <asm/current.h>
#include <public/platform.h>
#include <acpi/cpufreq/processor_perf.h>
}
}
-static void __init do_initcalls(void)
-{
- initcall_t *call;
- for ( call = &__initcall_start; call < &__initcall_end; call++ )
- (*call)();
-}
-
#define EARLY_FAIL(f, a...) do { \
printk( f , ## a ); \
for ( ; ; ) halt(); \
console_init_postirq();
+ do_presmp_initcalls();
+
for_each_present_cpu ( i )
{
if ( num_online_cpus() >= max_cpus )
break;
if ( !cpu_online(i) )
- {
- rcu_online_cpu(i);
- __cpu_up(i);
- }
+ cpu_up(i);
/* Set up cpu_to_node[]. */
srat_detect_node(i);
printk("Brought up %ld CPUs\n", (long)num_online_cpus());
smp_cpus_done(max_cpus);
- initialise_gdb(); /* could be moved earlier */
-
do_initcalls();
if ( opt_watchdog )
if ( !tboot_protect_mem_regions() )
panic("Could not protect TXT memory regions\n");
- /* Create initial cpupool 0. */
- cpupool0 = cpupool_create(0, NULL);
- if ( (cpupool0 == NULL) || cpupool0_cpu_assign(cpupool0) )
- panic("Error creating cpupool 0\n");
-
/* Create initial domain 0. */
dom0 = domain_create(0, DOMCRF_s3_integrity, DOM0_SSIDREF);
if ( (dom0 == NULL) || (alloc_dom0_vcpu0() == NULL) )
#include <xen/serial.h>
#include <xen/numa.h>
#include <xen/event.h>
+#include <xen/cpu.h>
#include <asm/current.h>
#include <asm/mc146818rtc.h>
#include <asm/desc.h>
DEFINE_PER_CPU(int, cpu_state) = { 0 };
void *stack_base[NR_CPUS];
-DEFINE_SPINLOCK(cpu_add_remove_lock);
/*
* The bootstrap kernel entry code has set these up. Save them for
{
int cpu = smp_processor_id();
- /*
- * Perhaps use cpufreq to drop frequency, but that could go
- * into generic code.
- *
- * We won't take down the boot processor on i386 due to some
- * interrupts only being able to be serviced by the BSP.
- * Especially so if we're not using an IOAPIC -zwane
- */
- if (cpu == 0)
- return -EBUSY;
-
local_irq_disable();
clear_local_APIC();
/* Allow any queued timer interrupts to get serviced */
time_suspend();
- cpu_mcheck_disable();
-
remove_siblinginfo(cpu);
/* It's now safe to remove this processor from the online map */
for (;;) {
/* They ack this in play_dead by setting CPU_DEAD */
- if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
- printk ("CPU %u is now offline\n", cpu);
+ if (per_cpu(cpu_state, cpu) == CPU_DEAD)
return;
- }
mdelay(100);
mb();
process_pending_softirqs();
static int take_cpu_down(void *unused)
{
- return __cpu_disable();
+ void *hcpu = (void *)(long)smp_processor_id();
+ int rc;
+
+ spin_lock(&cpu_add_remove_lock);
+
+ if (cpu_notifier_call_chain(CPU_DYING, hcpu) != NOTIFY_DONE)
+ BUG();
+
+ rc = __cpu_disable();
+
+ spin_unlock(&cpu_add_remove_lock);
+
+ return rc;
}
/*
int cpu_down(unsigned int cpu)
{
- int err = 0;
+ int err, notifier_rc, nr_calls;
+ void *hcpu = (void *)(long)cpu;
spin_lock(&cpu_add_remove_lock);
cpu_set(cpu, cpu_offlining);
- err = cpupool_cpu_remove(cpu);
- if (err)
- goto out;
-
printk("Prepare to bring CPU%d down...\n", cpu);
- cpufreq_del_cpu(cpu);
+ notifier_rc = __cpu_notifier_call_chain(
+ CPU_DOWN_PREPARE, hcpu, -1, &nr_calls);
+ if (notifier_rc != NOTIFY_DONE) {
+ err = notifier_to_errno(notifier_rc);
+ nr_calls--;
+ notifier_rc = __cpu_notifier_call_chain(
+ CPU_DOWN_FAILED, hcpu, nr_calls, NULL);
+ BUG_ON(notifier_rc != NOTIFY_DONE);
+ goto out;
+ }
spin_unlock(&cpu_add_remove_lock);
err = stop_machine_run(take_cpu_down, NULL, cpu);
spin_lock(&cpu_add_remove_lock);
if (err < 0) {
- cpupool_cpu_add(cpu);
+ notifier_rc = cpu_notifier_call_chain(CPU_DOWN_FAILED, hcpu);
+ BUG_ON(notifier_rc != NOTIFY_DONE);
goto out;
}
__cpu_die(cpu);
BUG_ON(cpu_online(cpu));
- migrate_tasklets_from_cpu(cpu);
- cpu_mcheck_distribute_cmci();
+ notifier_rc = cpu_notifier_call_chain(CPU_DEAD, hcpu);
+ BUG_ON(notifier_rc != NOTIFY_DONE);
out:
- if (!err)
+ if (!err) {
+ printk("CPU %u is now offline\n", cpu);
send_guest_global_virq(dom0, VIRQ_PCPU_STATE);
+ } else {
+ printk("Failed to take down CPU %u (error %d)\n", cpu, err);
+ }
cpu_clear(cpu, cpu_offlining);
spin_unlock(&cpu_add_remove_lock);
return err;
goto out;
}
- rcu_online_cpu(cpu);
-
err = __cpu_up(cpu);
if (err < 0)
goto out;
int __devinit __cpu_up(unsigned int cpu)
{
- int ret;
-
- ret = hvm_cpu_prepare(cpu);
- if (ret)
- return ret;
+ int notifier_rc, ret = 0, nr_calls;
+ void *hcpu = (void *)(long)cpu;
+
+ notifier_rc = __cpu_notifier_call_chain(
+ CPU_UP_PREPARE, hcpu, -1, &nr_calls);
+ if (notifier_rc != NOTIFY_DONE) {
+ ret = notifier_to_errno(notifier_rc);
+ nr_calls--;
+ goto fail;
+ }
/*
* We do warm boot only on cpus that had booted earlier
smpboot_restore_warm_reset_vector();
}
- if (ret)
- return -EIO;
+ if (ret) {
+ ret = -EIO;
+ goto fail;
+ }
/* In case one didn't come up */
if (!cpu_isset(cpu, cpu_callin_map)) {
printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu);
- local_irq_enable();
- return -EIO;
+ ret = -EIO;
+ goto fail;
}
- local_irq_enable();
- /*per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;*/
/* Unleash the CPU! */
cpu_set(cpu, smp_commenced_mask);
while (!cpu_isset(cpu, cpu_online_map)) {
process_pending_softirqs();
}
- cpupool_cpu_add(cpu);
- cpufreq_add_cpu(cpu);
+ notifier_rc = cpu_notifier_call_chain(CPU_ONLINE, hcpu);
+ BUG_ON(notifier_rc != NOTIFY_DONE);
return 0;
+
+ fail:
+ notifier_rc = __cpu_notifier_call_chain(
+ CPU_UP_CANCELED, hcpu, nr_calls, NULL);
+ BUG_ON(notifier_rc != NOTIFY_DONE);
+ return ret;
}
} :text
.initcall.init : {
__initcall_start = .;
+ *(.initcallpresmp.init)
+ __presmp_initcall_end = .;
*(.initcall1.init)
__initcall_end = .;
} :text
#include <xen/config.h>
#include <xen/cpumask.h>
+#include <xen/cpu.h>
/*
* cpu_bit_bitmap[] is a special, "compressed" data structure that
MASK_DECLARE_8(48), MASK_DECLARE_8(56),
#endif
};
+
+DEFINE_SPINLOCK(cpu_add_remove_lock);
+
+static RAW_NOTIFIER_HEAD(cpu_chain);
+
+int register_cpu_notifier(struct notifier_block *nb)
+{
+ int ret;
+ spin_lock(&cpu_add_remove_lock);
+ ret = raw_notifier_chain_register(&cpu_chain, nb);
+ spin_unlock(&cpu_add_remove_lock);
+ return ret;
+}
+
+void unregister_cpu_notifier(struct notifier_block *nb)
+{
+ spin_lock(&cpu_add_remove_lock);
+ raw_notifier_chain_unregister(&cpu_chain, nb);
+ spin_unlock(&cpu_add_remove_lock);
+}
+
+int cpu_notifier_call_chain(unsigned long val, void *v)
+{
+ BUG_ON(!spin_is_locked(&cpu_add_remove_lock));
+ return raw_notifier_call_chain(&cpu_chain, val, v);
+}
+
+int __cpu_notifier_call_chain(
+ unsigned long val, void *v, int nr_to_call, int *nr_calls)
+{
+ BUG_ON(!spin_is_locked(&cpu_add_remove_lock));
+ return __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call, nr_calls);
+}
#include <xen/percpu.h>
#include <xen/sched.h>
#include <xen/sched-if.h>
+#include <xen/cpu.h>
#define for_each_cpupool(ptr) \
for ((ptr) = &cpupool_list; *(ptr) != NULL; (ptr) = &((*(ptr))->next))
*/
int cpupool_assign_ncpu(struct cpupool *c, int ncpu)
{
- int i;
- int n;
+ int i, n = 0;
- n = 0;
spin_lock(&cpupool_lock);
for_each_cpu_mask(i, cpupool_free_cpus)
{
return ret;
}
-/*
- * assign cpus to the default cpupool
- * default are all cpus, less cpus may be specified as boot parameter
- * possible failures:
- * - no cpu assigned
- */
-int __init cpupool0_cpu_assign(struct cpupool *c)
-{
- if ( (cpupool0_max_cpus == 0) || (cpupool0_max_cpus > num_online_cpus()) )
- cpupool0_max_cpus = num_online_cpus();
- if ( !cpupool_assign_ncpu(cpupool0, cpupool0_max_cpus) )
- return 1;
- return 0;
-}
-
/*
* add a new domain to a cpupool
* possible failures:
* called to add a new cpu to pool admin
* we add a hotplugged cpu to the cpupool0 to be able to add it to dom0
*/
-void cpupool_cpu_add(unsigned int cpu)
+static void cpupool_cpu_add(unsigned int cpu)
{
- if ( cpupool0 == NULL )
- return;
spin_lock(&cpupool_lock);
cpu_clear(cpu, cpupool_locked_cpus);
cpu_set(cpu, cpupool_free_cpus);
- cpupool_assign_cpu_locked(cpupool0, cpu);
+ if ( cpupool0 != NULL )
+ cpupool_assign_cpu_locked(cpupool0, cpu);
spin_unlock(&cpupool_lock);
- return;
}
/*
* the cpu to be removed is locked to avoid removing it from dom0
* returns failure if not in pool0
*/
-int cpupool_cpu_remove(unsigned int cpu)
+static int cpupool_cpu_remove(unsigned int cpu)
{
int ret = 0;
spin_unlock(&cpupool_lock);
}
+static int cpu_callback(
+ struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+ int rc = 0;
+
+ switch ( action )
+ {
+ case CPU_DOWN_FAILED:
+ case CPU_ONLINE:
+ cpupool_cpu_add(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ rc = cpupool_cpu_remove(cpu);
+ break;
+ default:
+ break;
+ }
+
+ return !rc ? NOTIFY_DONE : notifier_from_errno(rc);
+}
+
+static struct notifier_block cpu_nfb = {
+ .notifier_call = cpu_callback
+};
+
+static int __init cpupool_presmp_init(void)
+{
+ void *cpu = (void *)(long)smp_processor_id();
+ cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
+ register_cpu_notifier(&cpu_nfb);
+ return 0;
+}
+presmp_initcall(cpupool_presmp_init);
+
static int __init cpupool_init(void)
{
- cpupool_free_cpus = cpu_online_map;
- cpupool_list = NULL;
+ cpupool0 = cpupool_create(0, NULL);
+ BUG_ON(cpupool0 == NULL);
+
+ if ( (cpupool0_max_cpus == 0) || (cpupool0_max_cpus > num_online_cpus()) )
+ cpupool0_max_cpus = num_online_cpus();
+
+ if ( !cpupool_assign_ncpu(cpupool0, cpupool0_max_cpus) )
+ BUG();
+
return 0;
}
__initcall(cpupool_init);
#include <xen/console.h>
#include <xen/errno.h>
#include <xen/delay.h>
+#include <xen/init.h>
#include <asm/byteorder.h>
/* Printk isn't particularly safe just after we've trapped to the
return rc;
}
-void __init
-initialise_gdb(void)
+static int __init initialise_gdb(void)
{
if ( *opt_gdb == '\0' )
- return;
+ return 0;
gdb_ctx->serhnd = serial_parse_handle(opt_gdb);
if ( gdb_ctx->serhnd == -1 )
{
printk("Bad gdb= option '%s'\n", opt_gdb);
- return;
+ return 0;
}
serial_start_sync(gdb_ctx->serhnd);
printk("GDB stub initialised.\n");
+
+ return 0;
}
+presmp_initcall(initialise_gdb);
static void gdb_pause_this_cpu(void *unused)
{
tainted |= flag;
}
+extern initcall_t __initcall_start, __presmp_initcall_end, __initcall_end;
+
+void __init do_presmp_initcalls(void)
+{
+ initcall_t *call;
+ for ( call = &__initcall_start; call < &__presmp_initcall_end; call++ )
+ (*call)();
+}
+
+void __init do_initcalls(void)
+{
+ initcall_t *call;
+ for ( call = &__presmp_initcall_end; call < &__initcall_end; call++ )
+ (*call)();
+}
+
# define DO(fn) long do_##fn
#endif
int ret = NOTIFY_DONE;
struct notifier_block *nb, *next_nb;
+ if ( nr_calls )
+ *nr_calls = 0;
+
nb = rcu_dereference(*nl);
while ( nb && nr_to_call )
#include <xen/bitops.h>
#include <xen/percpu.h>
#include <xen/softirq.h>
+#include <xen/cpu.h>
/* Definition for rcupdate control block. */
struct rcu_ctrlblk rcu_ctrlblk = {
rdp->blimit = blimit;
}
-void __devinit rcu_online_cpu(int cpu)
+static int cpu_callback(
+ struct notifier_block *nfb, unsigned long action, void *hcpu)
{
- struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch ( action )
+ {
+ case CPU_UP_PREPARE: {
+ struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+ rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp);
+ break;
+ }
+ default:
+ break;
+ }
- rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp);
+ return NOTIFY_DONE;
}
+static struct notifier_block cpu_nfb = {
+ .notifier_call = cpu_callback
+};
+
void __init rcu_init(void)
{
- rcu_online_cpu(smp_processor_id());
+ void *cpu = (void *)(long)smp_processor_id();
+ cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
+ register_cpu_notifier(&cpu_nfb);
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
}
#include <xen/sched.h>
#include <xen/softirq.h>
#include <xen/tasklet.h>
+#include <xen/cpu.h>
/* Some subsystems call into us before we are initialised. We ignore them. */
-static bool_t tasklets_initialised;
+static cpumask_t tasklets_initialised;
DEFINE_PER_CPU(unsigned long, tasklet_work_to_do);
spin_lock_irqsave(&tasklet_lock, flags);
- if ( tasklets_initialised && !t->is_dead )
+ if ( cpu_isset(cpu, tasklets_initialised) && !t->is_dead )
{
t->scheduled_on = cpu;
if ( !t->is_running )
spin_unlock_irqrestore(&tasklet_lock, flags);
}
-void migrate_tasklets_from_cpu(unsigned int cpu)
+static void migrate_tasklets_from_cpu(unsigned int cpu)
{
struct list_head *list = &per_cpu(tasklet_list, cpu);
unsigned long flags;
t->data = data;
}
-void __init tasklet_subsys_init(void)
+static int cpu_callback(
+ struct notifier_block *nfb, unsigned long action, void *hcpu)
{
- unsigned int cpu;
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch ( action )
+ {
+ case CPU_UP_PREPARE:
+ if ( !cpu_test_and_set(cpu, tasklets_initialised) )
+ INIT_LIST_HEAD(&per_cpu(tasklet_list, cpu));
+ break;
+ case CPU_DEAD:
+ migrate_tasklets_from_cpu(cpu);
+ break;
+ default:
+ break;
+ }
- for_each_possible_cpu ( cpu )
- INIT_LIST_HEAD(&per_cpu(tasklet_list, cpu));
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block cpu_nfb = {
+ .notifier_call = cpu_callback
+};
- tasklets_initialised = 1;
+void __init tasklet_subsys_init(void)
+{
+ void *hcpu = (void *)(long)smp_processor_id();
+ cpu_callback(&cpu_nfb, CPU_UP_PREPARE, hcpu);
+ register_cpu_notifier(&cpu_nfb);
}
/*
#include <xen/xmalloc.h>
#include <xen/guest_access.h>
#include <xen/domain.h>
+#include <xen/cpu.h>
#include <asm/bug.h>
#include <asm/io.h>
#include <asm/config.h>
str = end;
} while (str);
}
+
+static int cpu_callback(
+ struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch ( action )
+ {
+ case CPU_DOWN_FAILED:
+ case CPU_ONLINE:
+ (void)cpufreq_add_cpu(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ (void)cpufreq_del_cpu(cpu);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block cpu_nfb = {
+ .notifier_call = cpu_callback
+};
+
+static int __init cpufreq_presmp_init(void)
+{
+ void *cpu = (void *)(long)smp_processor_id();
+ cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
+ register_cpu_notifier(&cpu_nfb);
+ return 0;
+}
+presmp_initcall(cpufreq_presmp_init);
+
void hvm_set_rdtsc_exiting(struct domain *d, bool_t enable);
-static inline int
-hvm_cpu_prepare(unsigned int cpu)
-{
- return (hvm_funcs.cpu_prepare ? hvm_funcs.cpu_prepare(cpu) : 0);
-}
-
static inline int hvm_cpu_up(void)
{
return (hvm_funcs.cpu_up ? hvm_funcs.cpu_up() : 1);
void mcheck_init(struct cpuinfo_x86 *c);
asmlinkage void do_machine_check(struct cpu_user_regs *regs);
-void cpu_mcheck_distribute_cmci(void);
-void cpu_mcheck_disable(void);
int cpuid_hypervisor_leaves( uint32_t idx, uint32_t sub_idx,
uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
/* State of each CPU. */
-#define CPU_ONLINE 0x0002 /* CPU is up */
-#define CPU_DEAD 0x0004 /* CPU is dead */
DECLARE_PER_CPU(int, cpu_state);
-extern spinlock_t(cpu_add_remove_lock);
#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
extern int cpu_down(unsigned int cpu);
--- /dev/null
+#ifndef __XEN_CPU_H__
+#define __XEN_CPU_H__
+
+#include <xen/types.h>
+#include <xen/spinlock.h>
+#include <xen/notifier.h>
+
+extern spinlock_t cpu_add_remove_lock;
+
+int register_cpu_notifier(struct notifier_block *nb);
+void unregister_cpu_notifier(struct notifier_block *nb);
+int cpu_notifier_call_chain(unsigned long val, void *v);
+int __cpu_notifier_call_chain(
+ unsigned long val, void *v, int nr_to_call, int *nr_calls);
+
+/*
+ * Notification actions: note that only CPU_{UP,DOWN}_PREPARE may fail ---
+ * all other handlers *must* return NOTIFY_DONE.
+ */
+#define CPU_UP_PREPARE 0x0002 /* CPU is coming up */
+#define CPU_UP_CANCELED 0x0003 /* CPU is no longer coming up */
+#define CPU_ONLINE 0x0004 /* CPU is up */
+#define CPU_DOWN_PREPARE 0x0005 /* CPU is going down */
+#define CPU_DOWN_FAILED 0x0006 /* CPU is no longer going down */
+#define CPU_DYING 0x0007 /* CPU is nearly dead (in stop_machine ctxt) */
+#define CPU_DEAD 0x0008 /* CPU is dead */
+
+#endif /* __XEN_CPU_H__ */
#define SIGALRM 14
#define SIGTERM 15
-void initialise_gdb(void);
-
-#else
-
-#define initialise_gdb() ((void)0)
-
#endif
#endif /* __XEN_GDBSTUB_H__ */
__attribute_used__ __attribute__ ((__section__ (".exit.data")))
#define __initsetup \
__attribute_used__ __attribute__ ((__section__ (".init.setup")))
-#define __init_call \
- __attribute_used__ __attribute__ ((__section__ (".initcall1.init")))
+#define __init_call(lvl) \
+ __attribute_used__ __attribute__ ((__section__ (".initcall" lvl ".init")))
#define __exit_call \
__attribute_used__ __attribute__ ((__section__ (".exitcall.exit")))
typedef int (*initcall_t)(void);
typedef void (*exitcall_t)(void);
-extern initcall_t __initcall_start, __initcall_end;
-
+#define presmp_initcall(fn) \
+ static initcall_t __initcall_##fn __init_call("presmp") = fn
#define __initcall(fn) \
- static initcall_t __initcall_##fn __init_call = fn
+ static initcall_t __initcall_##fn __init_call("1") = fn
#define __exitcall(fn) \
static exitcall_t __exitcall_##fn __exit_call = fn
+void do_presmp_initcalls(void);
+void do_initcalls(void);
+
/*
* Used for kernel command line parameter setup
*/
struct raw_notifier_head *nh, unsigned long val, void *v,
int nr_to_call, int *nr_calls);
-#define NOTIFY_DONE 0x0000 /* Don't care */
-#define NOTIFY_OK 0x0001 /* Suits me */
-#define NOTIFY_STOP_MASK 0x8000 /* Don't call further */
-#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002)
-/* Bad/Veto action */
-/*
- * Clean way to return from the notifier and stop further calls.
- */
-#define NOTIFY_STOP (NOTIFY_OK|NOTIFY_STOP_MASK)
+#define NOTIFY_DONE 0x0000
+#define NOTIFY_STOP_MASK 0x8000
+#define NOTIFY_STOP (NOTIFY_STOP_MASK|NOTIFY_DONE)
+#define NOTIFY_BAD (NOTIFY_STOP_MASK|EINVAL)
-/* Encapsulate (negative) errno value (in particular, NOTIFY_BAD <=> EPERM). */
+/* Encapsulate (negative) errno value. */
static inline int notifier_from_errno(int err)
{
- return NOTIFY_STOP_MASK | (NOTIFY_OK - err);
+ return NOTIFY_STOP_MASK | -err;
}
/* Restore (negative) errno value from notify return value. */
static inline int notifier_to_errno(int ret)
{
- ret &= ~NOTIFY_STOP_MASK;
- return ret > NOTIFY_OK ? NOTIFY_OK - ret : 0;
+ return -(ret & ~NOTIFY_STOP_MASK);
}
-#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
-#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
-#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
-#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
-#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
-#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
-#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
- * not handling interrupts, soon dead */
-#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
- * lock is dropped */
-
#endif /* __XEN_NOTIFIER_H__ */
#define rcu_assign_pointer(p, v) ({ smp_wmb(); (p) = (v); })
void rcu_init(void);
-void __devinit rcu_online_cpu(int cpu);
void rcu_check_callbacks(int cpu);
/* Exported interfaces */
struct cpupool *cpupool_create(int poolid, char *sched);
int cpupool_destroy(struct cpupool *c);
-int cpupool0_cpu_assign(struct cpupool *c);
int cpupool_assign_ncpu(struct cpupool *c, int ncpu);
-void cpupool_cpu_add(unsigned int cpu);
-int cpupool_cpu_remove(unsigned int cpu);
int cpupool_add_domain(struct domain *d, int poolid);
void cpupool_rm_domain(struct domain *d);
int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op);
void tasklet_schedule(struct tasklet *t);
void do_tasklet(void);
void tasklet_kill(struct tasklet *t);
-void migrate_tasklets_from_cpu(unsigned int cpu);
void tasklet_init(
struct tasklet *t, void (*func)(unsigned long), unsigned long data);
void tasklet_subsys_init(void);