specified as not disabling event delivery, just like any other vector.
Signed-off-by: Keir Fraser <keir@xensource.com>
return 0;
}
+/*
+ * Called from asm to set up the NMI trapbounce info.
+ * Returns 0 if no callback is set up, else 1.
+ */
+asmlinkage int set_guest_nmi_trapbounce(void)
+{
+ struct vcpu *v = current;
+ struct trap_bounce *tb = &v->arch.trap_bounce;
+ do_guest_trap(TRAP_nmi, guest_cpu_user_regs(), 0);
+ tb->flags &= ~TBF_EXCEPTION; /* not needed for NMI delivery path */
+ return !null_trap_bounce(v, tb);
+}
+
static inline int do_trap(
int trapnr, struct cpu_user_regs *regs, int use_error_code)
{
if ( cur.address == 0 )
break;
- if ( (cur.vector == TRAP_nmi) && !TI_GET_IF(&cur) )
- {
- rc = -EINVAL;
- break;
- }
-
fixup_guest_code_selector(current->domain, cur.cs);
memcpy(&dst[cur.vector], &cur, sizeof(cur));
OFFSET(VCPU_kernel_sp, struct vcpu,
arch.guest_context.kernel_sp);
OFFSET(VCPU_guest_context_flags, struct vcpu, arch.guest_context.flags);
- OFFSET(VCPU_arch_guest_fpu_ctxt, struct vcpu, arch.guest_context.fpu_ctxt);
- OFFSET(VCPU_nmi_cs, struct vcpu,
- arch.guest_context.trap_ctxt[TRAP_nmi].cs);
- OFFSET(VCPU_nmi_addr, struct vcpu,
- arch.guest_context.trap_ctxt[TRAP_nmi].address);
OFFSET(VCPU_nmi_pending, struct vcpu, nmi_pending);
OFFSET(VCPU_nmi_masked, struct vcpu, nmi_masked);
DEFINE(_VGCF_failsafe_disables_events, _VGCF_failsafe_disables_events);
process_nmi:
testb $1,VCPU_nmi_masked(%ebx)
jnz test_guest_events
+ sti
movb $0,VCPU_nmi_pending(%ebx)
- movzwl VCPU_nmi_cs(%ebx),%eax
- movl VCPU_nmi_addr(%ebx),%ecx
+ call set_guest_nmi_trapbounce
test %eax,%eax
- jz test_guest_events
+ jz test_all_events
movb $1,VCPU_nmi_masked(%ebx)
- sti
leal VCPU_trap_bounce(%ebx),%edx
- movw %ax,TRAPBOUNCE_cs(%edx)
- movl %ecx,TRAPBOUNCE_eip(%edx)
- movw $FLAT_KERNEL_CS,TRAPBOUNCE_cs(%edx)
- movb $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
call create_bounce_frame
jmp test_all_events
OFFSET(VCPU_kernel_sp, struct vcpu, arch.guest_context.kernel_sp);
OFFSET(VCPU_kernel_ss, struct vcpu, arch.guest_context.kernel_ss);
OFFSET(VCPU_guest_context_flags, struct vcpu, arch.guest_context.flags);
- OFFSET(VCPU_arch_guest_fpu_ctxt, struct vcpu, arch.guest_context.fpu_ctxt);
- OFFSET(VCPU_nmi_cs, struct vcpu,
- arch.guest_context.trap_ctxt[TRAP_nmi].cs);
- OFFSET(VCPU_nmi_addr, struct vcpu,
- arch.guest_context.trap_ctxt[TRAP_nmi].address);
OFFSET(VCPU_nmi_pending, struct vcpu, nmi_pending);
OFFSET(VCPU_nmi_masked, struct vcpu, nmi_masked);
DEFINE(_VGCF_failsafe_disables_events, _VGCF_failsafe_disables_events);
compat_process_nmi:
testb $1,VCPU_nmi_masked(%rbx)
jnz compat_test_guest_events
+ sti
movb $0,VCPU_nmi_pending(%rbx)
- movzwl VCPU_nmi_cs(%rbx),%eax
- movl VCPU_nmi_addr(%rbx),%ecx
+ call set_guest_nmi_trapbounce
testl %eax,%eax
- jz compat_test_guest_events
+ jz compat_test_all_events
movb $1,VCPU_nmi_masked(%rbx)
- sti
leaq VCPU_trap_bounce(%rbx),%rdx
- movw %ax,TRAPBOUNCE_cs(%rdx)
- movl %ecx,TRAPBOUNCE_eip(%rdx)
- movw $FLAT_COMPAT_KERNEL_CS,TRAPBOUNCE_cs(%rdx)
- movb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
call compat_create_bounce_frame
jmp compat_test_all_events
if ( cur.address == 0 )
break;
- if ( (cur.vector == TRAP_nmi) && !TI_GET_IF(&cur) )
- {
- rc = -EINVAL;
- break;
- }
-
fixup_guest_code_selector(current->domain, cur.cs);
XLAT_trap_info(dst + cur.vector, &cur);
process_nmi:
testb $1,VCPU_nmi_masked(%rbx)
jnz test_guest_events
+ sti
movb $0,VCPU_nmi_pending(%rbx)
- movq VCPU_nmi_addr(%rbx),%rax
- test %rax,%rax
- jz test_guest_events
+ call set_guest_nmi_trapbounce
+ test %eax,%eax
+ jz test_all_events
movb $1,VCPU_nmi_masked(%rbx)
- sti
leaq VCPU_trap_bounce(%rbx),%rdx
- movq %rax,TRAPBOUNCE_eip(%rdx)
- movb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
call create_bounce_frame
jmp test_all_events