extern void fixup_irqs(void);
/* must be called with cpucontrol mutex held */
-int __cpu_disable(void)
+void __cpu_disable(void)
{
int cpu = smp_processor_id();
- /*
- * dont permit boot processor for now
- */
- if (cpu == 0)
- return -EBUSY;
-
remove_siblinginfo(cpu);
cpu_clear(cpu, cpu_online_map);
#ifndef XEN
#endif
local_flush_tlb_all();
cpu_clear(cpu, cpu_callin_map);
- return 0;
}
#else /* !CONFIG_HOTPLUG_CPU */
-int __cpu_disable(void)
+void __cpu_disable(void)
{
- return -ENOSYS;
+ BUG();
}
#endif /* CONFIG_HOTPLUG_CPU */
static void play_dead(void)
{
- /*
- * Flush pending softirqs if any. They can be queued up before this CPU
- * was taken out of cpu_online_map in __cpu_disable().
- */
- do_softirq();
-
/* This must be done before dead CPU ack */
cpu_exit_clear();
- hvm_cpu_down();
wbinvd();
mb();
/* Ack it */
switch ( action )
{
case CPU_UP_PREPARE:
- rc = hvm_funcs.cpu_prepare(cpu);
+ rc = hvm_funcs.cpu_up_prepare(cpu);
+ break;
+ case CPU_DYING:
+ hvm_cpu_down();
+ break;
+ case CPU_DEAD:
+ hvm_funcs.cpu_dead(cpu);
break;
default:
break;
return vpmu_do_interrupt(regs);
}
-static int svm_cpu_prepare(unsigned int cpu)
+static void svm_cpu_dead(unsigned int cpu)
+{
+ free_xenheap_page(hsa[cpu]);
+ hsa[cpu] = NULL;
+ free_vmcb(root_vmcb[cpu]);
+ root_vmcb[cpu] = NULL;
+}
+
+static int svm_cpu_up_prepare(unsigned int cpu)
{
if ( ((hsa[cpu] == NULL) &&
((hsa[cpu] = alloc_host_save_area()) == NULL)) ||
((root_vmcb[cpu] == NULL) &&
((root_vmcb[cpu] = alloc_vmcb()) == NULL)) )
+ {
+ svm_cpu_dead(cpu);
return -ENOMEM;
+ }
+
return 0;
}
return 0;
}
- if ( svm_cpu_prepare(cpu) != 0 )
+ if ( svm_cpu_up_prepare(cpu) != 0 )
return 0;
write_efer(read_efer() | EFER_SVME);
static struct hvm_function_table __read_mostly svm_function_table = {
.name = "SVM",
- .cpu_prepare = svm_cpu_prepare,
+ .cpu_up_prepare = svm_cpu_up_prepare,
+ .cpu_dead = svm_cpu_dead,
.cpu_down = svm_cpu_down,
.domain_initialise = svm_domain_initialise,
.domain_destroy = svm_domain_destroy,
local_irq_restore(flags);
}
-int vmx_cpu_prepare(unsigned int cpu)
+int vmx_cpu_up_prepare(unsigned int cpu)
{
if ( per_cpu(host_vmcs, cpu) != NULL )
return 0;
return -ENOMEM;
}
+void vmx_cpu_dead(unsigned int cpu)
+{
+ vmx_free_vmcs(per_cpu(host_vmcs, cpu));
+ per_cpu(host_vmcs, cpu) = NULL;
+}
+
int vmx_cpu_up(void)
{
u32 eax, edx;
INIT_LIST_HEAD(&this_cpu(active_vmcs_list));
- if ( vmx_cpu_prepare(cpu) != 0 )
+ if ( vmx_cpu_up_prepare(cpu) != 0 )
return 0;
switch ( __vmxon(virt_to_maddr(this_cpu(host_vmcs))) )
static struct hvm_function_table __read_mostly vmx_function_table = {
.name = "VMX",
- .cpu_prepare = vmx_cpu_prepare,
+ .cpu_up_prepare = vmx_cpu_up_prepare,
+ .cpu_dead = vmx_cpu_dead,
.domain_initialise = vmx_domain_initialise,
.domain_destroy = vmx_domain_destroy,
.vcpu_initialise = vmx_vcpu_initialise,
cpu_clear(cpu, cpu_sibling_setup_map);
}
-extern void fixup_irqs(void);
-int __cpu_disable(void)
+void __cpu_disable(void)
{
+ extern void fixup_irqs(void);
int cpu = smp_processor_id();
local_irq_disable();
fixup_irqs();
cpu_disable_scheduler(cpu);
-
- return 0;
}
void __cpu_die(unsigned int cpu)
void *hcpu = (void *)(long)smp_processor_id();
if ( raw_notifier_call_chain(&cpu_chain, CPU_DYING, hcpu) != NOTIFY_DONE )
BUG();
- return __cpu_disable();
+ __cpu_disable();
+ return 0;
}
int cpu_down(unsigned int cpu)
if ( rcu_pending(cpu) )
rcu_check_callbacks(cpu);
- if ( (pending = (softirq_pending(cpu) & ~ignore_mask)) == 0 )
+ if ( ((pending = (softirq_pending(cpu) & ~ignore_mask)) == 0)
+ || cpu_is_offline(cpu) )
break;
i = find_first_set_bit(pending);
spin_lock_irq(&tasklet_lock);
- if ( unlikely(list_empty(list)) )
+ if ( unlikely(list_empty(list) || cpu_is_offline(cpu)) )
goto out;
t = list_entry(list->next, struct tasklet, list);
#define hard_smp_processor_id() ia64_get_lid()
/* Upping and downing of CPUs */
-extern int __cpu_disable (void);
-extern void __cpu_die (unsigned int cpu);
extern void cpu_die (void) __attribute__ ((noreturn));
-extern int __cpu_up (unsigned int cpu);
extern void __init smp_build_cpu_map(void);
extern void __init init_smp_config (void);
int (*event_pending)(struct vcpu *v);
int (*do_pmu_interrupt)(struct cpu_user_regs *regs);
- int (*cpu_prepare)(unsigned int cpu);
+ int (*cpu_up_prepare)(unsigned int cpu);
+ void (*cpu_dead)(unsigned int cpu);
+
int (*cpu_up)(void);
void (*cpu_down)(void);
extern void start_vmx(void);
extern void vmcs_dump_vcpu(struct vcpu *v);
extern void setup_vmcs_dump(void);
-extern int vmx_cpu_prepare(unsigned int cpu);
+extern int vmx_cpu_up_prepare(unsigned int cpu);
+extern void vmx_cpu_dead(unsigned int cpu);
extern int vmx_cpu_up(void);
extern void vmx_cpu_down(void);
#endif
-extern int __cpu_disable(void);
-extern void __cpu_die(unsigned int cpu);
#endif /* !__ASSEMBLY__ */
#else /* CONFIG_SMP */
int register_cpu_notifier(struct notifier_block *nb);
/*
- * Notification actions: note that only CPU_{UP,DOWN}_PREPARE may fail ---
- * all other handlers *must* return NOTIFY_DONE.
+ * Possible event sequences for a given CPU:
+ * CPU_UP_PREPARE -> CPU_UP_CANCELLED -- failed CPU up
+ * CPU_UP_PREPARE -> CPU_ONLINE -- successful CPU up
+ * CPU_DOWN_PREPARE -> CPU_DOWN_FAILED -- failed CPU down
+ * CPU_DOWN_PREPARE -> CPU_DYING -> CPU_DEAD -- successful CPU down
+ *
+ * Hence note that only CPU_*_PREPARE handlers are allowed to fail. Also note
+ * that once CPU_DYING is delivered, an offline action can no longer fail.
*/
#define CPU_UP_PREPARE 0x0002 /* CPU is coming up */
#define CPU_UP_CANCELED 0x0003 /* CPU is no longer coming up */
int disable_nonboot_cpus(void);
void enable_nonboot_cpus(void);
+/* Private arch-dependent helpers for CPU hotplug. */
+int __cpu_up(unsigned int cpunum);
+void __cpu_disable(void);
+void __cpu_die(unsigned int cpu);
+
#endif /* __XEN_CPU_H__ */
*/
extern void smp_prepare_cpus(unsigned int max_cpus);
-/*
- * Bring a CPU up
- */
-extern int __cpu_up(unsigned int cpunum);
-
/*
* Final polishing of CPUs
*/