if ( unlikely(!all_segs_okay) )
{
struct cpu_user_regs *regs = guest_cpu_user_regs();
- unsigned long *rsp =
+ unsigned long *rsp =
(n->arch.flags & TF_kernel_mode) ?
(unsigned long *)regs->rsp :
(unsigned long *)nctxt->kernel_sp;
+ unsigned long cs_and_mask, rflags;
if ( !(n->arch.flags & TF_kernel_mode) )
toggle_guest_mode(n);
else
regs->cs &= ~3;
+ /* CS longword also contains full evtchn_upcall_mask. */
+ cs_and_mask = (unsigned long)regs->cs |
+ ((unsigned long)n->vcpu_info->evtchn_upcall_mask << 32);
+
+ /* Fold upcall mask into RFLAGS.IF. */
+ rflags = regs->rflags & ~X86_EFLAGS_IF;
+ rflags |= !n->vcpu_info->evtchn_upcall_mask << 9;
+
if ( put_user(regs->ss, rsp- 1) |
put_user(regs->rsp, rsp- 2) |
- put_user(regs->rflags, rsp- 3) |
- put_user(regs->cs, rsp- 4) |
+ put_user(rflags, rsp- 3) |
+ put_user(cs_and_mask, rsp- 4) |
put_user(regs->rip, rsp- 5) |
put_user(nctxt->user_regs.gs, rsp- 6) |
put_user(nctxt->user_regs.fs, rsp- 7) |
domain_crash(n->domain);
}
+ if ( test_bit(_VGCF_failsafe_disables_events,
+ &n->arch.guest_context.flags) )
+ n->vcpu_info->evtchn_upcall_mask = 1;
+
regs->entry_vector = TRAP_syscall;
regs->rflags &= 0xFFFCBEFFUL;
regs->ss = __GUEST_SS;
arch.guest_context.kernel_ss);
OFFSET(VCPU_kernel_sp, struct vcpu,
arch.guest_context.kernel_sp);
+ OFFSET(VCPU_guest_context_flags, struct vcpu, arch.guest_context.flags);
OFFSET(VCPU_arch_guest_fpu_ctxt, struct vcpu, arch.guest_context.fpu_ctxt);
OFFSET(VCPU_flags, struct vcpu, vcpu_flags);
OFFSET(VCPU_nmi_addr, struct vcpu, nmi_addr);
DEFINE(_VCPUF_nmi_pending, _VCPUF_nmi_pending);
DEFINE(_VCPUF_nmi_masked, _VCPUF_nmi_masked);
+ DEFINE(_VGCF_failsafe_disables_events, _VGCF_failsafe_disables_events);
BLANK();
OFFSET(TSS_ss0, struct tss_struct, ss0);
movl VCPU_failsafe_sel(%ebx),%eax
movw %ax,TRAPBOUNCE_cs(%edx)
movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx)
- call create_bounce_frame
+ bt $_VGCF_failsafe_disables_events,VCPU_guest_context_flags(%ebx)
+ jnc 1f
+ orw $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
+1: call create_bounce_frame
xorl %eax,%eax
movl %eax,UREGS_ds(%esp)
movl %eax,UREGS_es(%esp)
case CALLBACKTYPE_failsafe:
v->arch.guest_context.failsafe_callback_cs = reg->address.cs;
v->arch.guest_context.failsafe_callback_eip = reg->address.eip;
+ if ( reg->flags & CALLBACKF_mask_events )
+ set_bit(_VGCF_failsafe_disables_events,
+ &v->arch.guest_context.flags);
+ else
+ clear_bit(_VGCF_failsafe_disables_events,
+ &v->arch.guest_context.flags);
break;
#ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
arch.guest_context.syscall_callback_eip);
OFFSET(VCPU_kernel_sp, struct vcpu,
arch.guest_context.kernel_sp);
+ OFFSET(VCPU_guest_context_flags, struct vcpu, arch.guest_context.flags);
OFFSET(VCPU_arch_guest_fpu_ctxt, struct vcpu, arch.guest_context.fpu_ctxt);
OFFSET(VCPU_flags, struct vcpu, vcpu_flags);
OFFSET(VCPU_nmi_addr, struct vcpu, nmi_addr);
DEFINE(_VCPUF_nmi_pending, _VCPUF_nmi_pending);
DEFINE(_VCPUF_nmi_masked, _VCPUF_nmi_masked);
+ DEFINE(_VGCF_failsafe_disables_events, _VGCF_failsafe_disables_events);
+ DEFINE(_VGCF_syscall_disables_events, _VGCF_syscall_disables_events);
BLANK();
OFFSET(VCPU_svm_vmcb_pa, struct vcpu, arch.hvm_svm.vmcb_pa);
movq VCPU_syscall_addr(%rbx),%rax
movq %rax,TRAPBOUNCE_eip(%rdx)
movw $0,TRAPBOUNCE_flags(%rdx)
- call create_bounce_frame
+ bt $_VGCF_syscall_disables_events,VCPU_guest_context_flags(%rbx)
+ jnc 1f
+ orw $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
+1: call create_bounce_frame
jmp test_all_events
/* %rbx: struct vcpu, interrupts disabled */
movq VCPU_failsafe_addr(%rbx),%rax
movq %rax,TRAPBOUNCE_eip(%rdx)
movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%rdx)
- call create_bounce_frame
+ bt $_VGCF_failsafe_disables_events,VCPU_guest_context_flags(%rbx)
+ jnc 1f
+ orw $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
+1: call create_bounce_frame
jmp test_all_events
.previous
.section __pre_ex_table,"a"
case CALLBACKTYPE_failsafe:
v->arch.guest_context.failsafe_callback_eip = reg->address;
+ if ( reg->flags & CALLBACKF_mask_events )
+ set_bit(_VGCF_failsafe_disables_events,
+ &v->arch.guest_context.flags);
+ else
+ clear_bit(_VGCF_failsafe_disables_events,
+ &v->arch.guest_context.flags);
break;
case CALLBACKTYPE_syscall:
v->arch.guest_context.syscall_callback_eip = reg->address;
+ if ( reg->flags & CALLBACKF_mask_events )
+ set_bit(_VGCF_syscall_disables_events,
+ &v->arch.guest_context.flags);
+ else
+ clear_bit(_VGCF_syscall_disables_events,
+ &v->arch.guest_context.flags);
break;
case CALLBACKTYPE_nmi:
struct vcpu_guest_context {
/* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */
-#define VGCF_I387_VALID (1<<0)
-#define VGCF_HVM_GUEST (1<<1)
-#define VGCF_IN_KERNEL (1<<2)
+#define VGCF_I387_VALID (1<<0)
+#define VGCF_HVM_GUEST (1<<1)
+#define VGCF_IN_KERNEL (1<<2)
+#define _VGCF_i387_valid 0
+#define VGCF_i387_valid (1<<_VGCF_i387_valid)
+#define _VGCF_hvm_guest 1
+#define VGCF_hvm_guest (1<<_VGCF_hvm_guest)
+#define _VGCF_in_kernel 2
+#define VGCF_in_kernel (1<<_VGCF_in_kernel)
+#define _VGCF_failsafe_disables_events 3
+#define VGCF_failsafe_disables_events (1<<_VGCF_failsafe_disables_events)
unsigned long flags; /* VGCF_* flags */
struct cpu_user_regs user_regs; /* User-level CPU registers */
struct trap_info trap_ctxt[256]; /* Virtual IDT */
struct vcpu_guest_context {
/* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */
-#define VGCF_I387_VALID (1<<0)
-#define VGCF_HVM_GUEST (1<<1)
-#define VGCF_IN_KERNEL (1<<2)
+#define VGCF_I387_VALID (1<<0)
+#define VGCF_HVM_GUEST (1<<1)
+#define VGCF_IN_KERNEL (1<<2)
+#define _VGCF_i387_valid 0
+#define VGCF_i387_valid (1<<_VGCF_i387_valid)
+#define _VGCF_hvm_guest 1
+#define VGCF_hvm_guest (1<<_VGCF_hvm_guest)
+#define _VGCF_in_kernel 2
+#define VGCF_in_kernel (1<<_VGCF_in_kernel)
+#define _VGCF_failsafe_disables_events 3
+#define VGCF_failsafe_disables_events (1<<_VGCF_failsafe_disables_events)
+#define _VGCF_syscall_disables_events 4
+#define VGCF_syscall_disables_events (1<<_VGCF_syscall_disables_events)
unsigned long flags; /* VGCF_* flags */
struct cpu_user_regs user_regs; /* User-level CPU registers */
struct trap_info trap_ctxt[256]; /* Virtual IDT */
#define CALLBACKTYPE_sysenter 3
#define CALLBACKTYPE_nmi 4
+/*
+ * Disable event deliver during callback? This flag is ignored for event and
+ * NMI callbacks: event delivery is unconditionally disabled.
+ */
+#define _CALLBACKF_mask_events 0
+#define CALLBACKF_mask_events (1U << _CALLBACKF_mask_events)
+
/*
* Register a callback.
*/
#define CALLBACKOP_register 0
struct callback_register {
- int type;
- xen_callback_t address;
+ uint16_t type;
+ uint16_t flags;
+ xen_callback_t address;
};
typedef struct callback_register callback_register_t;
DEFINE_XEN_GUEST_HANDLE(callback_register_t);
*/
#define CALLBACKOP_unregister 1
struct callback_unregister {
- int type;
+ uint16_t type;
+ uint16_t _unused;
};
typedef struct callback_unregister callback_unregister_t;
DEFINE_XEN_GUEST_HANDLE(callback_unregister_t);