From: kaf24@firebug.cl.cam.ac.uk Date: Mon, 5 Jun 2006 13:35:22 +0000 (+0100) Subject: Extend callback-registration hypercall to take a flags argument. X-Git-Tag: archive/raspbian/4.8.0-1+rpi1~1^2~15972^2~49^2~11 X-Git-Url: https://dgit.raspbian.org/?a=commitdiff_plain;h=d3a5ffe775160fb42bee1be875b5d0c3da11129d;p=xen.git Extend callback-registration hypercall to take a flags argument. This can currently be used to request that event delivery be disabled during callback processing. Signed-off-by: Keir Fraser --- diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index 7c6297b917..1890956916 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -524,20 +524,29 @@ static void load_segments(struct vcpu *n) if ( unlikely(!all_segs_okay) ) { struct cpu_user_regs *regs = guest_cpu_user_regs(); - unsigned long *rsp = + unsigned long *rsp = (n->arch.flags & TF_kernel_mode) ? (unsigned long *)regs->rsp : (unsigned long *)nctxt->kernel_sp; + unsigned long cs_and_mask, rflags; if ( !(n->arch.flags & TF_kernel_mode) ) toggle_guest_mode(n); else regs->cs &= ~3; + /* CS longword also contains full evtchn_upcall_mask. */ + cs_and_mask = (unsigned long)regs->cs | + ((unsigned long)n->vcpu_info->evtchn_upcall_mask << 32); + + /* Fold upcall mask into RFLAGS.IF. */ + rflags = regs->rflags & ~X86_EFLAGS_IF; + rflags |= !n->vcpu_info->evtchn_upcall_mask << 9; + if ( put_user(regs->ss, rsp- 1) | put_user(regs->rsp, rsp- 2) | - put_user(regs->rflags, rsp- 3) | - put_user(regs->cs, rsp- 4) | + put_user(rflags, rsp- 3) | + put_user(cs_and_mask, rsp- 4) | put_user(regs->rip, rsp- 5) | put_user(nctxt->user_regs.gs, rsp- 6) | put_user(nctxt->user_regs.fs, rsp- 7) | @@ -550,6 +559,10 @@ static void load_segments(struct vcpu *n) domain_crash(n->domain); } + if ( test_bit(_VGCF_failsafe_disables_events, + &n->arch.guest_context.flags) ) + n->vcpu_info->evtchn_upcall_mask = 1; + regs->entry_vector = TRAP_syscall; regs->rflags &= 0xFFFCBEFFUL; regs->ss = __GUEST_SS; diff --git a/xen/arch/x86/x86_32/asm-offsets.c b/xen/arch/x86/x86_32/asm-offsets.c index 07830746be..f03556ef8d 100644 --- a/xen/arch/x86/x86_32/asm-offsets.c +++ b/xen/arch/x86/x86_32/asm-offsets.c @@ -64,11 +64,13 @@ void __dummy__(void) arch.guest_context.kernel_ss); OFFSET(VCPU_kernel_sp, struct vcpu, arch.guest_context.kernel_sp); + OFFSET(VCPU_guest_context_flags, struct vcpu, arch.guest_context.flags); OFFSET(VCPU_arch_guest_fpu_ctxt, struct vcpu, arch.guest_context.fpu_ctxt); OFFSET(VCPU_flags, struct vcpu, vcpu_flags); OFFSET(VCPU_nmi_addr, struct vcpu, nmi_addr); DEFINE(_VCPUF_nmi_pending, _VCPUF_nmi_pending); DEFINE(_VCPUF_nmi_masked, _VCPUF_nmi_masked); + DEFINE(_VGCF_failsafe_disables_events, _VGCF_failsafe_disables_events); BLANK(); OFFSET(TSS_ss0, struct tss_struct, ss0); diff --git a/xen/arch/x86/x86_32/entry.S b/xen/arch/x86/x86_32/entry.S index 1a4ac5367e..c0bc7e4e01 100644 --- a/xen/arch/x86/x86_32/entry.S +++ b/xen/arch/x86/x86_32/entry.S @@ -130,7 +130,10 @@ failsafe_callback: movl VCPU_failsafe_sel(%ebx),%eax movw %ax,TRAPBOUNCE_cs(%edx) movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx) - call create_bounce_frame + bt $_VGCF_failsafe_disables_events,VCPU_guest_context_flags(%ebx) + jnc 1f + orw $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx) +1: call create_bounce_frame xorl %eax,%eax movl %eax,UREGS_ds(%esp) movl %eax,UREGS_es(%esp) diff --git a/xen/arch/x86/x86_32/traps.c b/xen/arch/x86/x86_32/traps.c index 64c038c4ee..6fce183a17 100644 --- a/xen/arch/x86/x86_32/traps.c +++ b/xen/arch/x86/x86_32/traps.c @@ -346,6 +346,12 @@ static long register_guest_callback(struct callback_register *reg) case CALLBACKTYPE_failsafe: v->arch.guest_context.failsafe_callback_cs = reg->address.cs; v->arch.guest_context.failsafe_callback_eip = reg->address.eip; + if ( reg->flags & CALLBACKF_mask_events ) + set_bit(_VGCF_failsafe_disables_events, + &v->arch.guest_context.flags); + else + clear_bit(_VGCF_failsafe_disables_events, + &v->arch.guest_context.flags); break; #ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL diff --git a/xen/arch/x86/x86_64/asm-offsets.c b/xen/arch/x86/x86_64/asm-offsets.c index ebc3059a9c..1719c50a05 100644 --- a/xen/arch/x86/x86_64/asm-offsets.c +++ b/xen/arch/x86/x86_64/asm-offsets.c @@ -64,11 +64,14 @@ void __dummy__(void) arch.guest_context.syscall_callback_eip); OFFSET(VCPU_kernel_sp, struct vcpu, arch.guest_context.kernel_sp); + OFFSET(VCPU_guest_context_flags, struct vcpu, arch.guest_context.flags); OFFSET(VCPU_arch_guest_fpu_ctxt, struct vcpu, arch.guest_context.fpu_ctxt); OFFSET(VCPU_flags, struct vcpu, vcpu_flags); OFFSET(VCPU_nmi_addr, struct vcpu, nmi_addr); DEFINE(_VCPUF_nmi_pending, _VCPUF_nmi_pending); DEFINE(_VCPUF_nmi_masked, _VCPUF_nmi_masked); + DEFINE(_VGCF_failsafe_disables_events, _VGCF_failsafe_disables_events); + DEFINE(_VGCF_syscall_disables_events, _VGCF_syscall_disables_events); BLANK(); OFFSET(VCPU_svm_vmcb_pa, struct vcpu, arch.hvm_svm.vmcb_pa); diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S index 110c34a417..62bb86510b 100644 --- a/xen/arch/x86/x86_64/entry.S +++ b/xen/arch/x86/x86_64/entry.S @@ -30,7 +30,10 @@ switch_to_kernel: movq VCPU_syscall_addr(%rbx),%rax movq %rax,TRAPBOUNCE_eip(%rdx) movw $0,TRAPBOUNCE_flags(%rdx) - call create_bounce_frame + bt $_VGCF_syscall_disables_events,VCPU_guest_context_flags(%rbx) + jnc 1f + orw $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx) +1: call create_bounce_frame jmp test_all_events /* %rbx: struct vcpu, interrupts disabled */ @@ -77,7 +80,10 @@ failsafe_callback: movq VCPU_failsafe_addr(%rbx),%rax movq %rax,TRAPBOUNCE_eip(%rdx) movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%rdx) - call create_bounce_frame + bt $_VGCF_failsafe_disables_events,VCPU_guest_context_flags(%rbx) + jnc 1f + orw $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx) +1: call create_bounce_frame jmp test_all_events .previous .section __pre_ex_table,"a" diff --git a/xen/arch/x86/x86_64/traps.c b/xen/arch/x86/x86_64/traps.c index ad7c347182..8c11a5ef4f 100644 --- a/xen/arch/x86/x86_64/traps.c +++ b/xen/arch/x86/x86_64/traps.c @@ -334,10 +334,22 @@ static long register_guest_callback(struct callback_register *reg) case CALLBACKTYPE_failsafe: v->arch.guest_context.failsafe_callback_eip = reg->address; + if ( reg->flags & CALLBACKF_mask_events ) + set_bit(_VGCF_failsafe_disables_events, + &v->arch.guest_context.flags); + else + clear_bit(_VGCF_failsafe_disables_events, + &v->arch.guest_context.flags); break; case CALLBACKTYPE_syscall: v->arch.guest_context.syscall_callback_eip = reg->address; + if ( reg->flags & CALLBACKF_mask_events ) + set_bit(_VGCF_syscall_disables_events, + &v->arch.guest_context.flags); + else + clear_bit(_VGCF_syscall_disables_events, + &v->arch.guest_context.flags); break; case CALLBACKTYPE_nmi: diff --git a/xen/include/public/arch-x86_32.h b/xen/include/public/arch-x86_32.h index 2a43ae53ac..c5237ca5c8 100644 --- a/xen/include/public/arch-x86_32.h +++ b/xen/include/public/arch-x86_32.h @@ -138,9 +138,17 @@ typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */ struct vcpu_guest_context { /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */ struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */ -#define VGCF_I387_VALID (1<<0) -#define VGCF_HVM_GUEST (1<<1) -#define VGCF_IN_KERNEL (1<<2) +#define VGCF_I387_VALID (1<<0) +#define VGCF_HVM_GUEST (1<<1) +#define VGCF_IN_KERNEL (1<<2) +#define _VGCF_i387_valid 0 +#define VGCF_i387_valid (1<<_VGCF_i387_valid) +#define _VGCF_hvm_guest 1 +#define VGCF_hvm_guest (1<<_VGCF_hvm_guest) +#define _VGCF_in_kernel 2 +#define VGCF_in_kernel (1<<_VGCF_in_kernel) +#define _VGCF_failsafe_disables_events 3 +#define VGCF_failsafe_disables_events (1<<_VGCF_failsafe_disables_events) unsigned long flags; /* VGCF_* flags */ struct cpu_user_regs user_regs; /* User-level CPU registers */ struct trap_info trap_ctxt[256]; /* Virtual IDT */ diff --git a/xen/include/public/arch-x86_64.h b/xen/include/public/arch-x86_64.h index 0b0ca809ca..bea67d4b0c 100644 --- a/xen/include/public/arch-x86_64.h +++ b/xen/include/public/arch-x86_64.h @@ -211,9 +211,19 @@ typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */ struct vcpu_guest_context { /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */ struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */ -#define VGCF_I387_VALID (1<<0) -#define VGCF_HVM_GUEST (1<<1) -#define VGCF_IN_KERNEL (1<<2) +#define VGCF_I387_VALID (1<<0) +#define VGCF_HVM_GUEST (1<<1) +#define VGCF_IN_KERNEL (1<<2) +#define _VGCF_i387_valid 0 +#define VGCF_i387_valid (1<<_VGCF_i387_valid) +#define _VGCF_hvm_guest 1 +#define VGCF_hvm_guest (1<<_VGCF_hvm_guest) +#define _VGCF_in_kernel 2 +#define VGCF_in_kernel (1<<_VGCF_in_kernel) +#define _VGCF_failsafe_disables_events 3 +#define VGCF_failsafe_disables_events (1<<_VGCF_failsafe_disables_events) +#define _VGCF_syscall_disables_events 4 +#define VGCF_syscall_disables_events (1<<_VGCF_syscall_disables_events) unsigned long flags; /* VGCF_* flags */ struct cpu_user_regs user_regs; /* User-level CPU registers */ struct trap_info trap_ctxt[256]; /* Virtual IDT */ diff --git a/xen/include/public/callback.h b/xen/include/public/callback.h index a9f4168dda..b497d99317 100644 --- a/xen/include/public/callback.h +++ b/xen/include/public/callback.h @@ -28,13 +28,21 @@ #define CALLBACKTYPE_sysenter 3 #define CALLBACKTYPE_nmi 4 +/* + * Disable event deliver during callback? This flag is ignored for event and + * NMI callbacks: event delivery is unconditionally disabled. + */ +#define _CALLBACKF_mask_events 0 +#define CALLBACKF_mask_events (1U << _CALLBACKF_mask_events) + /* * Register a callback. */ #define CALLBACKOP_register 0 struct callback_register { - int type; - xen_callback_t address; + uint16_t type; + uint16_t flags; + xen_callback_t address; }; typedef struct callback_register callback_register_t; DEFINE_XEN_GUEST_HANDLE(callback_register_t); @@ -47,7 +55,8 @@ DEFINE_XEN_GUEST_HANDLE(callback_register_t); */ #define CALLBACKOP_unregister 1 struct callback_unregister { - int type; + uint16_t type; + uint16_t _unused; }; typedef struct callback_unregister callback_unregister_t; DEFINE_XEN_GUEST_HANDLE(callback_unregister_t);