From 0e47f92b072548800223f9a21ea051a017173915 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Wed, 21 Dec 2016 16:46:13 +0100 Subject: [PATCH] x86: force EFLAGS.IF on when exiting to PV guests Guest kernels modifying instructions in the process of being emulated for another of their vCPU-s may effect EFLAGS.IF to be cleared upon next exiting to guest context, by converting the being emulated instruction to CLI (at the right point in time). Prevent any such bad effects by always forcing EFLAGS.IF on. And to cover hypothetical other similar issues, also force EFLAGS.{IOPL,NT,VM} to zero. This is CVE-2016-10024 / XSA-202. Signed-off-by: Jan Beulich Reviewed-by: Andrew Cooper --- xen/arch/x86/x86_64/compat/entry.S | 4 ++++ xen/arch/x86/x86_64/entry.S | 15 ++++++++------- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/xen/arch/x86/x86_64/compat/entry.S b/xen/arch/x86/x86_64/compat/entry.S index 3bb6b61646..474ffbc951 100644 --- a/xen/arch/x86/x86_64/compat/entry.S +++ b/xen/arch/x86/x86_64/compat/entry.S @@ -109,6 +109,8 @@ compat_process_trap: /* %rbx: struct vcpu, interrupts disabled */ ENTRY(compat_restore_all_guest) ASSERT_INTERRUPTS_DISABLED + mov $~(X86_EFLAGS_IOPL|X86_EFLAGS_NT|X86_EFLAGS_VM),%r11d + and UREGS_eflags(%rsp),%r11d .Lcr4_orig: .skip .Lcr4_alt_end - .Lcr4_alt, 0x90 .Lcr4_orig_end: @@ -144,6 +146,8 @@ ENTRY(compat_restore_all_guest) (.Lcr4_orig_end - .Lcr4_orig), \ (.Lcr4_alt_end - .Lcr4_alt) .popsection + or $X86_EFLAGS_IF,%r11 + mov %r11d,UREGS_eflags(%rsp) RESTORE_ALL adj=8 compat=1 .Lft0: iretq _ASM_PRE_EXTABLE(.Lft0, handle_exception) diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S index 66aefaa781..85f1a4b0ba 100644 --- a/xen/arch/x86/x86_64/entry.S +++ b/xen/arch/x86/x86_64/entry.S @@ -40,28 +40,29 @@ restore_all_guest: testw $TRAP_syscall,4(%rsp) jz iret_exit_to_guest + movq 24(%rsp),%r11 # RFLAGS + andq $~(X86_EFLAGS_IOPL|X86_EFLAGS_NT|X86_EFLAGS_VM),%r11 + orq $X86_EFLAGS_IF,%r11 + /* Don't use SYSRET path if the return address is not canonical. */ movq 8(%rsp),%rcx sarq $47,%rcx incl %ecx cmpl $1,%ecx - ja .Lforce_iret + movq 8(%rsp),%rcx # RIP + ja iret_exit_to_guest cmpw $FLAT_USER_CS32,16(%rsp)# CS - movq 8(%rsp),%rcx # RIP - movq 24(%rsp),%r11 # RFLAGS movq 32(%rsp),%rsp # RSP je 1f sysretq 1: sysretl -.Lforce_iret: - /* Mimic SYSRET behavior. */ - movq 8(%rsp),%rcx # RIP - movq 24(%rsp),%r11 # RFLAGS ALIGN /* No special register assumptions. */ iret_exit_to_guest: + andl $~(X86_EFLAGS_IOPL|X86_EFLAGS_NT|X86_EFLAGS_VM),24(%rsp) + orl $X86_EFLAGS_IF,24(%rsp) addq $8,%rsp .Lft0: iretq _ASM_PRE_EXTABLE(.Lft0, handle_exception) -- 2.30.2