wrmsrl(MSR_LSTAR, saved_lstar);
wrmsrl(MSR_CSTAR, saved_cstar);
wrmsr(MSR_STAR, 0, (FLAT_RING3_CS32<<16) | __HYPERVISOR_CS);
- wrmsr(MSR_SYSCALL_MASK,
- X86_EFLAGS_VM|X86_EFLAGS_RF|X86_EFLAGS_NT|
- X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_TF,
- 0U);
+ wrmsr(MSR_SYSCALL_MASK, XEN_SYSCALL_MASK, 0U);
wrfsbase(saved_fs_base);
wrgsbase(saved_gs_base);
/* This is the default interrupt handler. */
ignore_int:
- SAVE_ALL
+ SAVE_ALL CLAC
movq %cr2,%rsi
leaq int_msg(%rip),%rdi
xorl %eax,%eax
{
static DEFINE_PER_CPU(char, depth);
+ /* Set AC to reduce chance of further SMAP faults */
+ stac();
+
/*
* In some cases, we can end up in a vicious cycle of fatal_trap()s
* within fatal_trap()s. We give the problem a couple of iterations to
#include <irq_vectors.h>
ENTRY(compat_hypercall)
+ ASM_CLAC
pushq $0
SAVE_VOLATILE type=TRAP_syscall compat=1
pushq $0
pushfq
GLOBAL(sysenter_eflags_saved)
+ ASM_CLAC
pushq $3 /* ring 3 null cs */
pushq $0 /* null rip */
pushq $0
jmp .Lbounce_exception
ENTRY(int80_direct_trap)
+ ASM_CLAC
pushq $0
SAVE_VOLATILE 0x80
jmp asm_domain_crash_synchronous /* Does not return */
ENTRY(common_interrupt)
- SAVE_ALL
+ SAVE_ALL CLAC
movq %rsp,%rdi
callq do_IRQ
jmp ret_from_intr
movl $TRAP_page_fault,4(%rsp)
/* No special register assumptions. */
GLOBAL(handle_exception)
- SAVE_ALL
+ SAVE_ALL CLAC
handle_exception_saved:
testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rsp)
jz exception_with_ints_disabled
ENTRY(double_fault)
movl $TRAP_double_fault,4(%rsp)
- SAVE_ALL
+ /* Set AC to reduce chance of further SMAP faults */
+ SAVE_ALL STAC
movq %rsp,%rdi
call do_double_fault
ud2
pushq $0
movl $TRAP_nmi,4(%rsp)
handle_ist_exception:
- SAVE_ALL
+ SAVE_ALL CLAC
testb $3,UREGS_cs(%rsp)
jz 1f
/* Interrupted guest context. Copy the context to stack bottom. */
ENTRY(nmi_crash)
pushq $0
movl $TRAP_nmi,4(%rsp)
- SAVE_ALL
+ /* Set AC to reduce chance of further SMAP faults */
+ SAVE_ALL STAC
movq %rsp,%rdi
callq do_nmi_crash /* Does not return */
ud2
/* Common SYSCALL parameters. */
wrmsr(MSR_STAR, 0, (FLAT_RING3_CS32<<16) | __HYPERVISOR_CS);
- wrmsr(MSR_SYSCALL_MASK,
- X86_EFLAGS_VM|X86_EFLAGS_RF|X86_EFLAGS_NT|
- X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_TF,
- 0U);
+ wrmsr(MSR_SYSCALL_MASK, XEN_SYSCALL_MASK, 0U);
}
void init_int80_direct_trap(struct vcpu *v)
#endif
#ifdef __ASSEMBLY__
-.macro SAVE_ALL
+.macro SAVE_ALL op
+.ifeqs "\op", "CLAC"
+ ASM_CLAC
+.else
+.ifeqs "\op", "STAC"
+ ASM_STAC
+.else
+.ifnb \op
+ .err
+.endif
+.endif
+.endif
addq $-(UREGS_error_code-UREGS_r15), %rsp
cld
movq %rdi,UREGS_rdi(%rsp)
#define PFEC_page_paged (1U<<5)
#define PFEC_page_shared (1U<<6)
+#define XEN_SYSCALL_MASK (X86_EFLAGS_AC|X86_EFLAGS_VM|X86_EFLAGS_RF| \
+ X86_EFLAGS_NT|X86_EFLAGS_DF|X86_EFLAGS_IF| \
+ X86_EFLAGS_TF)
+
#ifndef __ASSEMBLY__
struct domain;