DF_MASK = 0x00000400
NT_MASK = 0x00004000
VM_MASK = 0x00020000
-
+/* pseudo-eflags */
+NMI_MASK = 0x80000000
+
/* Offsets into shared_info_t. */
#define evtchn_upcall_pending /* 0 */
#define evtchn_upcall_mask 1
je ldt_ss # returning to user-space with LDT SS
#endif /* XEN */
restore_nocheck:
- testl $VM_MASK, EFLAGS(%esp)
- jnz resume_vm86
+ testl $(VM_MASK|NMI_MASK), EFLAGS(%esp)
+ jnz hypervisor_iret
movb EVENT_MASK(%esp), %al
notb %al # %al == ~saved_mask
XEN_GET_VCPU_INFO(%esi)
.long 1b,iret_exc
.previous
-resume_vm86:
- XEN_UNBLOCK_EVENTS(%esi)
+hypervisor_iret:
+ andl $~NMI_MASK, EFLAGS(%esp)
RESTORE_REGS
movl %eax,(%esp)
- movl $__HYPERVISOR_switch_vm86,%eax
+ movl $__HYPERVISOR_iret,%eax
int $0x82
ud2
call do_debug
jmp ret_from_exception
+ENTRY(nmi)
+ pushl %eax
+ SAVE_ALL
+ xorl %edx,%edx # zero error code
+ movl %esp,%eax # pt_regs pointer
+ call do_nmi
+ orl $NMI_MASK, EFLAGS(%esp)
+ jmp restore_all
+
#if 0 /* XEN */
/*
* NMI is doubly nasty. It can happen _while_ we're handling
#include <asm-xen/xen-public/xen.h>
#include <asm-xen/xen-public/sched.h>
+#include <asm-xen/xen-public/nmi.h>
#define _hypercall0(type, name) \
({ \
SHUTDOWN_suspend, srec);
}
+static inline int
+HYPERVISOR_nmi_op(
+ unsigned long op,
+ unsigned long arg)
+{
+ return _hypercall2(int, nmi_op, op, arg);
+}
+
#endif /* __HYPERCALL_H__ */
/*
--- /dev/null
+/*
+ * include/asm-xen/asm-i386/mach-xen/mach_traps.h
+ *
+ * Machine specific NMI handling for Xen
+ */
+#ifndef _MACH_TRAPS_H
+#define _MACH_TRAPS_H
+
+#include <linux/bitops.h>
+#include <asm-xen/xen-public/nmi.h>
+
+static inline void clear_mem_error(unsigned char reason) {}
+static inline void clear_io_check_error(unsigned char reason) {}
+
+static inline unsigned char get_nmi_reason(void)
+{
+ shared_info_t *s = HYPERVISOR_shared_info;
+ unsigned char reason = 0;
+
+ /* construct a value which looks like it came from
+ * port 0x61.
+ */
+ if (test_bit(_XEN_NMIREASON_io_error, &s->arch.nmi_reason))
+ reason |= 0x40;
+ if (test_bit(_XEN_NMIREASON_parity_error, &s->arch.nmi_reason))
+ reason |= 0x80;
+
+ return reason;
+}
+
+static inline void reassert_nmi(void) {}
+
+#endif /* !_MACH_TRAPS_H */
extern void hypervisor_callback(void);
extern void failsafe_callback(void);
+extern void nmi(void);
static void __init machine_specific_arch_setup(void)
{
__KERNEL_CS, (unsigned long)hypervisor_callback,
__KERNEL_CS, (unsigned long)failsafe_callback);
+ HYPERVISOR_nmi_op(XENNMI_register_callback, (unsigned long)&nmi);
+
machine_specific_modify_cpu_capabilities(&boot_cpu_data);
}