GLOBAL(trampoline_misc_enable_off)
.quad 0
-/* EFER OR-mask for boot paths. This gets adjusted with NX when available. */
+/* EFER OR-mask for boot paths. SCE conditional on PV support, NX added when available. */
GLOBAL(trampoline_efer)
- .long EFER_LME | EFER_SCE
+ .long EFER_LME | (EFER_SCE * IS_ENABLED(CONFIG_PV))
GLOBAL(trampoline_xen_phys_start)
.long 0
/* Set system registers and transfer control. */
asm volatile("pushq $0\n\tpopfq");
rdmsrl(MSR_EFER, efer);
- efer |= EFER_SCE;
- if ( cpu_has_nx )
- efer |= EFER_NX;
+ efer |= trampoline_efer;
wrmsrl(MSR_EFER, efer);
wrmsrl(MSR_IA32_CR_PAT, XEN_MSR_PAT);
write_cr0(X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP |
*/
static void vmx_restore_host_msrs(void)
{
+ /* No PV guests? No need to restore host SYSCALL infrastructure. */
+ if ( !IS_ENABLED(CONFIG_PV) )
+ return;
+
/* Relies on the SYSCALL trampoline being at the start of the stubs. */
wrmsrl(MSR_STAR, XEN_MSR_STAR);
wrmsrl(MSR_LSTAR, this_cpu(stubs.addr));
DEFINE_PER_CPU(struct stubs, stubs);
-#ifdef CONFIG_PV
void lstar_enter(void);
void cstar_enter(void);
-#else
-static void __cold star_enter(void)
-{
- panic("lstar/cstar\n");
-}
-#define lstar_enter star_enter
-#define cstar_enter star_enter
-#endif /* CONFIG_PV */
void subarch_percpu_traps_init(void)
{
/* IST_MAX IST pages + at least 1 guard page + primary stack. */
BUILD_BUG_ON((IST_MAX + 1) * PAGE_SIZE + PRIMARY_STACK_SIZE > STACK_SIZE);
+ /* No PV guests? No need to set up SYSCALL/SYSENTER infrastructure. */
+ if ( !IS_ENABLED(CONFIG_PV) )
+ return;
+
stub_page = map_domain_page(_mfn(this_cpu(stubs.mfn)));
/*
{
/* SYSENTER entry. */
wrmsrl(MSR_IA32_SYSENTER_ESP, stack_bottom);
- wrmsrl(MSR_IA32_SYSENTER_EIP,
- IS_ENABLED(CONFIG_PV) ? (unsigned long)sysenter_entry : 0);
- wrmsr(MSR_IA32_SYSENTER_CS,
- IS_ENABLED(CONFIG_PV) ? __HYPERVISOR_CS : 0, 0);
+ wrmsrl(MSR_IA32_SYSENTER_EIP, (unsigned long)sysenter_entry);
+ wrmsr(MSR_IA32_SYSENTER_CS, __HYPERVISOR_CS, 0);
}
/* Trampoline for SYSCALL entry from compatibility mode. */