#define IPI_CALL_FUNC 0
#define IPI_CPU_STOP 1
+#define IPI_STATE_DUMP 2
/* This needs to be cacheline aligned because it is written to by *other* CPUs. */
static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned;
stop_this_cpu();
break;
+ case IPI_STATE_DUMP:
+ dump_execstate(regs);
+ break;
+
default:
printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which);
break;
send_IPI_allbutself(IPI_CPU_STOP);
}
+void
+smp_send_state_dump (unsigned int cpu)
+{
+ send_IPI_single(cpu, IPI_STATE_DUMP);
+}
+
int __init
setup_profiling_timer (unsigned int multiplier)
{
set_irq_regs(old_regs);
}
+static DEFINE_PER_CPU(bool_t, state_dump_pending);
+
+void smp_send_state_dump(unsigned int cpu)
+{
+ /* We overload the spurious interrupt handler to handle the dump. */
+ per_cpu(state_dump_pending, cpu) = 1;
+ send_IPI_mask(cpumask_of(cpu), SPURIOUS_APIC_VECTOR);
+}
+
/*
- * This interrupt should _never_ happen with our APIC/SMP architecture
+ * Spurious interrupts should _never_ happen with our APIC/SMP architecture.
*/
fastcall void smp_spurious_interrupt(struct cpu_user_regs *regs)
{
struct cpu_user_regs *old_regs = set_irq_regs(regs);
irq_enter();
+
/*
- * Check if this really is a spurious interrupt and ACK it
- * if it is a vectored one. Just in case...
- * Spurious interrupts should not be ACKed.
+ * Check if this is a vectored interrupt (most likely, as this is probably
+ * a request to dump local CPU state). Vectored interrupts are ACKed;
+ * spurious interrupts are not.
*/
v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
- if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
+ if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f))) {
ack_APIC_irq();
+ if (this_cpu(state_dump_pending)) {
+ this_cpu(state_dump_pending) = 0;
+ dump_execstate(regs);
+ goto out;
+ }
+ }
/* see sw-dev-man vol 3, chapter 7.4.13.5 */
- printk(KERN_INFO "spurious APIC interrupt on CPU#%d, should never happen.\n",
- smp_processor_id());
+ printk(KERN_INFO "spurious APIC interrupt on CPU#%d, should "
+ "never happen.\n", smp_processor_id());
+
+ out:
irq_exit();
set_irq_regs(old_regs);
}
.desc = "show this message"
};
-static void __dump_execstate(void *unused)
+static cpumask_t dump_execstate_mask;
+
+void dump_execstate(struct cpu_user_regs *regs)
{
- dump_execution_state();
- printk("*** Dumping CPU%d guest state: ***\n", smp_processor_id());
- if ( is_idle_vcpu(current) )
- printk("No guest context (CPU is idle).\n");
- else
+ unsigned int cpu = smp_processor_id();
+
+ if ( !guest_mode(regs) )
+ {
+ printk("*** Dumping CPU%u host state: ***\n", cpu);
+ show_execution_state(regs);
+ }
+
+ if ( !is_idle_vcpu(current) )
+ {
+ printk("*** Dumping CPU%u guest state (d%d:v%d): ***\n",
+ smp_processor_id(), current->domain->domain_id,
+ current->vcpu_id);
show_execution_state(guest_cpu_user_regs());
+ printk("\n");
+ }
+
+ cpu_clear(cpu, dump_execstate_mask);
+ if ( !alt_key_handling )
+ return;
+
+ cpu = cycle_cpu(cpu, dump_execstate_mask);
+ if ( cpu < NR_CPUS )
+ {
+ smp_send_state_dump(cpu);
+ return;
+ }
+
+ console_end_sync();
+ watchdog_enable();
}
static void dump_registers(unsigned char key, struct cpu_user_regs *regs)
watchdog_disable();
console_start_sync();
- printk("'%c' pressed -> dumping registers\n", key);
+ printk("'%c' pressed -> dumping registers\n\n", key);
+
+ dump_execstate_mask = cpu_online_map;
/* Get local execution state out immediately, in case we get stuck. */
- printk("\n*** Dumping CPU%d host state: ***\n", smp_processor_id());
- __dump_execstate(NULL);
+ dump_execstate(regs);
- for_each_online_cpu ( cpu )
+ /* Alt. handling: remaining CPUs are dumped asynchronously one-by-one. */
+ if ( alt_key_handling )
+ return;
+
+ /* Normal handling: synchronously dump the remaining CPUs' states. */
+ for_each_cpu_mask ( cpu, dump_execstate_mask )
{
- if ( cpu == smp_processor_id() )
- continue;
- printk("\n*** Dumping CPU%d host state: ***\n", cpu);
- on_selected_cpus(cpumask_of(cpu), __dump_execstate, NULL, 1);
+ smp_send_state_dump(cpu);
+ while ( cpu_isset(cpu, dump_execstate_mask) )
+ cpu_relax();
}
- printk("\n");
-
console_end_sync();
watchdog_enable();
}
# define ia64_task_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
# define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr)
#ifdef XEN
-# define guest_mode(regs) (ia64_psr(regs)->cpl != 0)
+# define guest_mode(regs) (ia64_psr(regs)->cpl && !ia64_psr(regs)->vm)
# define guest_kernel_mode(regs) (ia64_psr(regs)->cpl == CONFIG_CPL0_EMUL)
# define vmx_guest_kernel_mode(regs) (ia64_psr(regs)->cpl == 0)
# define regs_increment_iip(regs) \
extern char *print_tainted(char *str);
extern void add_taint(unsigned);
+struct cpu_user_regs;
+void dump_execstate(struct cpu_user_regs *);
+
#endif /* __LIB_H__ */
#define smp_send_event_check_cpu(cpu) \
smp_send_event_check_mask(cpumask_of(cpu))
+extern void smp_send_state_dump(unsigned int cpu);
+
/*
* Prepare machine for booting other CPUs.
*/