DEFINE_PER_CPU(uint64_t, efer);
static DEFINE_PER_CPU(unsigned long, last_extable_addr);
-DEFINE_PER_CPU_READ_MOSTLY(u32, ler_msr);
-
DEFINE_PER_CPU_READ_MOSTLY(struct desc_struct *, gdt_table);
DEFINE_PER_CPU_READ_MOSTLY(struct desc_struct *, compat_gdt_table);
static bool opt_ler;
boolean_param("ler", opt_ler);
+/* LastExceptionFromIP on this hardware. Zero if LER is not in use. */
+unsigned int __read_mostly ler_msr;
+
#define stack_words_per_line 4
#define ESP_BEFORE_EXCEPTION(regs) ((unsigned long *)regs->rsp)
return;
}
-static void ler_enable(void)
-{
- u64 debugctl;
-
- if ( !this_cpu(ler_msr) )
- return;
-
- rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
- wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl | IA32_DEBUGCTLMSR_LBR);
-}
-
void do_debug(struct cpu_user_regs *regs)
{
unsigned long dr6;
*/
write_debugreg(6, X86_DR6_DEFAULT);
+ /* #DB automatically disabled LBR. Reinstate it if debugging Xen. */
+ if ( cpu_has_xen_lbr )
+ wrmsrl(MSR_IA32_DEBUGCTLMSR, IA32_DEBUGCTLMSR_LBR);
+
if ( !guest_mode(regs) )
{
/*
{
if ( regs->rip == (unsigned long)sysenter_eflags_saved )
regs->eflags &= ~X86_EFLAGS_TF;
- goto out;
+ return;
}
if ( !debugger_trap_fatal(TRAP_debug, regs) )
{
regs->cs, _p(regs->rip), _p(regs->rip),
regs->ss, _p(regs->rsp), dr6);
- goto out;
+ return;
}
/* Save debug status register where guest OS can peek at it */
v->arch.debugreg[6] |= (dr6 & ~X86_DR6_DEFAULT);
v->arch.debugreg[6] &= (dr6 | ~X86_DR6_DEFAULT);
- ler_enable();
pv_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
- return;
-
- out:
- ler_enable();
- return;
}
static void __init noinline __set_intr_gate(unsigned int n,
: "=m" (old_gdt) : "rm" (TSS_ENTRY << 3), "m" (tss_gdt) : "memory" );
}
-void percpu_traps_init(void)
+static unsigned int calc_ler_msr(void)
{
- subarch_percpu_traps_init();
-
- if ( !opt_ler )
- return;
-
switch ( boot_cpu_data.x86_vendor )
{
case X86_VENDOR_INTEL:
switch ( boot_cpu_data.x86 )
{
case 6:
- this_cpu(ler_msr) = MSR_IA32_LASTINTFROMIP;
- break;
+ return MSR_IA32_LASTINTFROMIP;
+
case 15:
- this_cpu(ler_msr) = MSR_P4_LER_FROM_LIP;
- break;
+ return MSR_P4_LER_FROM_LIP;
}
break;
+
case X86_VENDOR_AMD:
switch ( boot_cpu_data.x86 )
{
case 6:
case 0xf ... 0x17:
- this_cpu(ler_msr) = MSR_IA32_LASTINTFROMIP;
- break;
+ return MSR_IA32_LASTINTFROMIP;
}
break;
}
- ler_enable();
+ return 0;
+}
+
+void percpu_traps_init(void)
+{
+ subarch_percpu_traps_init();
+
+ if ( !opt_ler )
+ return;
+
+ if ( !ler_msr && (ler_msr = calc_ler_msr()) )
+ setup_force_cpu_cap(X86_FEATURE_XEN_LBR);
+
+ if ( cpu_has_xen_lbr )
+ wrmsrl(MSR_IA32_DEBUGCTLMSR, IA32_DEBUGCTLMSR_LBR);
}
void __init init_idt_traps(void)
printk("CPU: %d\n", smp_processor_id());
_show_registers(&fault_regs, fault_crs, context, v);
- if ( this_cpu(ler_msr) && !guest_mode(regs) )
+ if ( ler_msr && !guest_mode(regs) )
{
u64 from, to;
- rdmsrl(this_cpu(ler_msr), from);
- rdmsrl(this_cpu(ler_msr) + 1, to);
+
+ rdmsrl(ler_msr, from);
+ rdmsrl(ler_msr + 1, to);
printk("ler: %016lx -> %016lx\n", from, to);
}
}