jz .L_bsp
/* APs. Set up shadow stacks before entering C. */
-
+#ifdef CONFIG_XEN_SHSTK
testl $cpufeat_mask(X86_FEATURE_XEN_SHSTK), \
CPUINFO_FEATURE_OFFSET(X86_FEATURE_XEN_SHSTK) + boot_cpu_data(%rip)
je .L_ap_shstk_done
mov $XEN_MINIMAL_CR4 | X86_CR4_CET, %ecx
mov %rcx, %cr4
setssbsy
+#endif
.L_ap_shstk_done:
call start_secondary
stack_base[0] = stack;
memguard_guard_stack(stack);
- if ( cpu_has_xen_shstk )
+ if ( IS_ENABLED(CONFIG_XEN_SHSTK) && cpu_has_xen_shstk )
{
wrmsrl(MSR_PL0_SSP,
(unsigned long)stack + (PRIMARY_SHSTK_SLOT + 1) * PAGE_SIZE - 8);
/* See lstar_enter for entry register state. */
ENTRY(cstar_enter)
+#ifdef CONFIG_XEN_SHSTK
ALTERNATIVE "", "setssbsy", X86_FEATURE_XEN_SHSTK
+#endif
/* sti could live here when we don't switch page tables below. */
CR4_PV32_RESTORE
movq 8(%rsp),%rax /* Restore %rax. */
cmpl $1,%ecx
ja .Lrestore_rcx_iret_exit_to_guest
+#ifdef CONFIG_XEN_SHSTK
/* Clear the supervisor shadow stack token busy bit. */
.macro rag_clrssbsy
rdsspq %rcx
clrssbsy (%rcx)
.endm
ALTERNATIVE "", rag_clrssbsy, X86_FEATURE_XEN_SHSTK
+#endif
movq 8(%rsp), %rcx # RIP
cmpw $FLAT_USER_CS32,16(%rsp)# CS
* %ss must be saved into the space left by the trampoline.
*/
ENTRY(lstar_enter)
+#ifdef CONFIG_XEN_SHSTK
ALTERNATIVE "", "setssbsy", X86_FEATURE_XEN_SHSTK
+#endif
/* sti could live here when we don't switch page tables below. */
movq 8(%rsp),%rax /* Restore %rax. */
movq $FLAT_KERNEL_SS,8(%rsp)
jmp test_all_events
ENTRY(sysenter_entry)
+#ifdef CONFIG_XEN_SHSTK
ALTERNATIVE "", "setssbsy", X86_FEATURE_XEN_SHSTK
+#endif
/* sti could live here when we don't switch page tables below. */
pushq $FLAT_USER_SS
pushq $0
movq %rdi,%rsp
rep movsq
+#ifdef CONFIG_XEN_SHSTK
/* Switch Shadow Stacks */
.macro ist_switch_shstk
rdsspq %rdi
setssbsy
.endm
ALTERNATIVE "", ist_switch_shstk, X86_FEATURE_XEN_SHSTK
+#endif
1:
#else
ASSERT_CONTEXT_IS_XEN