GET_CURRENT(bx)
- /* SPEC_CTRL_ENTRY_FROM_SVM Req: %rsp=regs/cpuinfo Clob: acd */
+ /* SPEC_CTRL_ENTRY_FROM_SVM Req: %rsp=regs/cpuinfo, %rdx=0 Clob: acd */
+
+ .macro svm_vmexit_cond_ibpb
+ testb $SCF_entry_ibpb, CPUINFO_xen_spec_ctrl(%rsp)
+ jz .L_skip_ibpb
+
+ mov $MSR_PRED_CMD, %ecx
+ mov $PRED_CMD_IBPB, %eax
+ wrmsr
+.L_skip_ibpb:
+ .endm
+ ALTERNATIVE "", svm_vmexit_cond_ibpb, X86_FEATURE_IBPB_ENTRY_HVM
+
ALTERNATIVE "", DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_HVM
.macro svm_vmexit_spec_ctrl
ALTERNATIVE "", svm_vmexit_spec_ctrl, X86_FEATURE_SC_MSR_HVM
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
+ /*
+ * STGI is executed unconditionally, and is sufficiently serialising
+ * to safely resolve any Spectre-v1 concerns in the above logic.
+ */
stgi
GLOBAL(svm_stgi_label)
mov %rsp,%rdi
rc = vmx_add_msr(v, MSR_FLUSH_CMD, FLUSH_CMD_L1D,
VMX_MSR_GUEST_LOADONLY);
+ if ( !rc && (d->arch.spec_ctrl_flags & SCF_entry_ibpb) )
+ rc = vmx_add_msr(v, MSR_PRED_CMD, PRED_CMD_IBPB,
+ VMX_MSR_HOST);
+
out:
vmx_vmcs_exit(v);
XEN_CPUFEATURE(SC_VERW_IDLE, X86_SYNTH(25)) /* VERW used by Xen for idle */
XEN_CPUFEATURE(XEN_SHSTK, X86_SYNTH(26)) /* Xen uses CET Shadow Stacks */
XEN_CPUFEATURE(XEN_IBT, X86_SYNTH(27)) /* Xen uses CET Indirect Branch Tracking */
+XEN_CPUFEATURE(IBPB_ENTRY_PV, X86_SYNTH(28)) /* MSR_PRED_CMD used by Xen for PV */
+XEN_CPUFEATURE(IBPB_ENTRY_HVM, X86_SYNTH(29)) /* MSR_PRED_CMD used by Xen for HVM */
/* Bug words follow the synthetic words. */
#define X86_NR_BUG 1
#define SCF_ist_sc_msr (1 << 1)
#define SCF_ist_rsb (1 << 2)
#define SCF_verw (1 << 3)
+#define SCF_ist_ibpb (1 << 4)
+#define SCF_entry_ibpb (1 << 5)
/*
* The IST paths (NMI/#MC) can interrupt any arbitrary context. Some
* These are the controls to inhibit on the S3 resume path until microcode has
* been reloaded.
*/
-#define SCF_IST_MASK (SCF_ist_sc_msr)
+#define SCF_IST_MASK (SCF_ist_sc_msr | SCF_ist_ibpb)
/*
* Some speculative protections are per-domain. These settings are merged
* into the top-of-stack block in the context switch path.
*/
-#define SCF_DOM_MASK (SCF_verw)
+#define SCF_DOM_MASK (SCF_verw | SCF_entry_ibpb)
#ifndef __ASSEMBLY__
* - SPEC_CTRL_EXIT_TO_{SVM,VMX}
*/
+.macro DO_SPEC_CTRL_COND_IBPB maybexen:req
+/*
+ * Requires %rsp=regs (also cpuinfo if !maybexen)
+ * Requires %r14=stack_end (if maybexen), %rdx=0
+ * Clobbers %rax, %rcx, %rdx
+ *
+ * Conditionally issue IBPB if SCF_entry_ibpb is active. In the maybexen
+ * case, we can safely look at UREGS_cs to skip taking the hit when
+ * interrupting Xen.
+ */
+ .if \maybexen
+ testb $SCF_entry_ibpb, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14)
+ jz .L\@_skip
+ testb $3, UREGS_cs(%rsp)
+ .else
+ testb $SCF_entry_ibpb, CPUINFO_xen_spec_ctrl(%rsp)
+ .endif
+ jz .L\@_skip
+
+ mov $MSR_PRED_CMD, %ecx
+ mov $PRED_CMD_IBPB, %eax
+ wrmsr
+ jmp .L\@_done
+
+.L\@_skip:
+ lfence
+.L\@_done:
+.endm
+
.macro DO_OVERWRITE_RSB tmp=rax
/*
* Requires nothing
/* Use after an entry from PV context (syscall/sysenter/int80/int82/etc). */
#define SPEC_CTRL_ENTRY_FROM_PV \
+ ALTERNATIVE "", __stringify(DO_SPEC_CTRL_COND_IBPB maybexen=0), \
+ X86_FEATURE_IBPB_ENTRY_PV; \
ALTERNATIVE "", DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_PV; \
ALTERNATIVE "", __stringify(DO_SPEC_CTRL_ENTRY maybexen=0), \
X86_FEATURE_SC_MSR_PV
/* Use in interrupt/exception context. May interrupt Xen or PV context. */
#define SPEC_CTRL_ENTRY_FROM_INTR \
+ ALTERNATIVE "", __stringify(DO_SPEC_CTRL_COND_IBPB maybexen=1), \
+ X86_FEATURE_IBPB_ENTRY_PV; \
ALTERNATIVE "", DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_PV; \
ALTERNATIVE "", __stringify(DO_SPEC_CTRL_ENTRY maybexen=1), \
X86_FEATURE_SC_MSR_PV
* Requires %rsp=regs, %r14=stack_end, %rdx=0
* Clobbers %rax, %rbx, %rcx, %rdx
*
- * This is logical merge of DO_OVERWRITE_RSB and DO_SPEC_CTRL_ENTRY
- * maybexen=1, but with conditionals rather than alternatives.
+ * This is logical merge of:
+ * DO_SPEC_CTRL_COND_IBPB maybexen=0
+ * DO_OVERWRITE_RSB
+ * DO_SPEC_CTRL_ENTRY maybexen=1
+ * but with conditionals rather than alternatives.
*/
movzbl STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14), %ebx
+ test $SCF_ist_ibpb, %bl
+ jz .L\@_skip_ibpb
+
+ mov $MSR_PRED_CMD, %ecx
+ mov $PRED_CMD_IBPB, %eax
+ wrmsr
+
+.L\@_skip_ibpb:
+
test $SCF_ist_rsb, %bl
jz .L\@_skip_rsb
movl $HYPERCALL_VECTOR, 4(%rsp)
SAVE_ALL compat=1 /* DPL1 gate, restricted to 32bit PV guests only. */
- SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo, Clob: acd */
+ SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo, %rdx=0, Clob: acd */
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
CR4_PV32_RESTORE
movl $TRAP_syscall, 4(%rsp)
SAVE_ALL
- SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo, Clob: acd */
+ SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo, %rdx=0, Clob: acd */
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
GET_STACK_END(bx)
movl $TRAP_syscall, 4(%rsp)
SAVE_ALL
- SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo, Clob: acd */
+ SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo, %rdx=0, Clob: acd */
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
GET_STACK_END(bx)
movl $TRAP_syscall, 4(%rsp)
SAVE_ALL
- SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo, Clob: acd */
+ SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo, %rdx=0, Clob: acd */
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
GET_STACK_END(bx)
movl $0x80, 4(%rsp)
SAVE_ALL
- SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo, Clob: acd */
+ SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo, %rdx=0, Clob: acd */
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
GET_STACK_END(bx)
GET_STACK_END(14)
- SPEC_CTRL_ENTRY_FROM_INTR /* Req: %rsp=regs, %r14=end, Clob: acd */
+ SPEC_CTRL_ENTRY_FROM_INTR /* Req: %rsp=regs, %r14=end, %rdx=0, Clob: acd */
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
mov STACK_CPUINFO_FIELD(xen_cr3)(%r14), %rcx
GET_STACK_END(14)
- SPEC_CTRL_ENTRY_FROM_INTR /* Req: %rsp=regs, %r14=end, Clob: acd */
+ SPEC_CTRL_ENTRY_FROM_INTR /* Req: %rsp=regs, %r14=end, %rdx=0, Clob: acd */
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
mov STACK_CPUINFO_FIELD(xen_cr3)(%r14), %rcx