bpf: Introduce BPF nospec instruction for mitigating Spectre v4
authorSasha Levin <sashal@kernel.org>
Tue, 13 Jul 2021 08:18:31 +0000 (08:18 +0000)
committerSalvatore Bonaccorso <carnil@debian.org>
Thu, 23 Sep 2021 20:35:21 +0000 (21:35 +0100)
[ Upstream commit f5e81d1117501546b7be050c5fbafa6efd2c722c ]

In case of JITs, each of the JIT backends compiles the BPF nospec instruction
/either/ to a machine instruction which emits a speculation barrier /or/ to
/no/ machine instruction in case the underlying architecture is not affected
by Speculative Store Bypass or has different mitigations in place already.

This covers both x86 and (implicitly) arm64: In case of x86, we use 'lfence'
instruction for mitigation. In case of arm64, we rely on the firmware mitigation
as controlled via the ssbd kernel parameter. Whenever the mitigation is enabled,
it works for all of the kernel code with no need to provide any additional
instructions here (hence only comment in arm64 JIT). Other archs can follow
as needed. The BPF nospec instruction is specifically targeting Spectre v4
since i) we don't use a serialization barrier for the Spectre v1 case, and
ii) mitigation instructions for v1 and v4 might be different on some archs.

The BPF nospec is required for a future commit, where the BPF verifier does
annotate intermediate BPF programs with speculation barriers.

Co-developed-by: Piotr Krysiuk <piotras@gmail.com>
Co-developed-by: Benedict Schlueter <benedict.schlueter@rub.de>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Piotr Krysiuk <piotras@gmail.com>
Signed-off-by: Benedict Schlueter <benedict.schlueter@rub.de>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
Gbp-Pq: Topic bugfix/all
Gbp-Pq: Name bpf-introduce-bpf-nospec-instruction-for-mitigating-.patch

13 files changed:
arch/arm/net/bpf_jit_32.c
arch/arm64/net/bpf_jit_comp.c
arch/mips/net/ebpf_jit.c
arch/powerpc/net/bpf_jit_comp64.c
arch/riscv/net/bpf_jit_comp32.c
arch/riscv/net/bpf_jit_comp64.c
arch/s390/net/bpf_jit_comp.c
arch/sparc/net/bpf_jit_comp_64.c
arch/x86/net/bpf_jit_comp.c
arch/x86/net/bpf_jit_comp32.c
include/linux/filter.h
kernel/bpf/core.c
kernel/bpf/disasm.c

index 0207b6ea6e8a0b3833f6b5376cc7c1fe75740358..ce8b043263521b059974ada9260dd177f87b8066 100644 (file)
@@ -1602,6 +1602,9 @@ exit:
                rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
                emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code));
                break;
+       /* speculation barrier */
+       case BPF_ST | BPF_NOSPEC:
+               break;
        /* ST: *(size *)(dst + off) = imm */
        case BPF_ST | BPF_MEM | BPF_W:
        case BPF_ST | BPF_MEM | BPF_H:
index ef9f1d5e989d062a589e286d3dac30693d1e43c0..345066b8e9fc8ed65add25ea8066caefe35944cf 100644 (file)
@@ -829,6 +829,19 @@ emit_cond_jmp:
                        return ret;
                break;
 
+       /* speculation barrier */
+       case BPF_ST | BPF_NOSPEC:
+               /*
+                * Nothing required here.
+                *
+                * In case of arm64, we rely on the firmware mitigation of
+                * Speculative Store Bypass as controlled via the ssbd kernel
+                * parameter. Whenever the mitigation is enabled, it works
+                * for all of the kernel code with no need to provide any
+                * additional instructions.
+                */
+               break;
+
        /* ST: *(size *)(dst + off) = imm */
        case BPF_ST | BPF_MEM | BPF_W:
        case BPF_ST | BPF_MEM | BPF_H:
index 561154cbcc401eb8e5eee381af4873f638583fc1..b31b91e57c34128d363a8076290d081edbe3b3b6 100644 (file)
@@ -1355,6 +1355,9 @@ jeq_common:
                }
                break;
 
+       case BPF_ST | BPF_NOSPEC: /* speculation barrier */
+               break;
+
        case BPF_ST | BPF_B | BPF_MEM:
        case BPF_ST | BPF_H | BPF_MEM:
        case BPF_ST | BPF_W | BPF_MEM:
index 022103c6a201aa5386c112d9fa9bfbdcb1cc6b1e..658ca2bab13cc56bbf7c4bc4e7fe4d92712e4bd6 100644 (file)
@@ -646,6 +646,12 @@ emit_clear:
                        }
                        break;
 
+               /*
+                * BPF_ST NOSPEC (speculation barrier)
+                */
+               case BPF_ST | BPF_NOSPEC:
+                       break;
+
                /*
                 * BPF_ST(X)
                 */
index 579575f9cdae0873057ceb878fc6e9dce46e21c8..f300f93ba64563bc4bcf7ef106d1d5ba7af994e6 100644 (file)
@@ -1251,6 +1251,10 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
                        return -1;
                break;
 
+       /* speculation barrier */
+       case BPF_ST | BPF_NOSPEC:
+               break;
+
        case BPF_ST | BPF_MEM | BPF_B:
        case BPF_ST | BPF_MEM | BPF_H:
        case BPF_ST | BPF_MEM | BPF_W:
index 8a56b52931170ac0ebc78872c8352837bf998278..c113ae818b14ed6932debddf3f70a54af57f7bff 100644 (file)
@@ -939,6 +939,10 @@ out_be:
                emit_ld(rd, 0, RV_REG_T1, ctx);
                break;
 
+       /* speculation barrier */
+       case BPF_ST | BPF_NOSPEC:
+               break;
+
        /* ST: *(size *)(dst + off) = imm */
        case BPF_ST | BPF_MEM | BPF_B:
                emit_imm(RV_REG_T1, imm, ctx);
index 0a418279287691b5ae4553134859b19ae06bc37b..44530fe5d6fafd4ddb7056d8d0fd4fbe4746f108 100644 (file)
@@ -1153,6 +1153,11 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
                        break;
                }
                break;
+       /*
+        * BPF_NOSPEC (speculation barrier)
+        */
+       case BPF_ST | BPF_NOSPEC:
+               break;
        /*
         * BPF_ST(X)
         */
index 3364e2a009899c39860e902627b62ee2d07ad2b9..fef734473c0f38f3bac21e61900880f1a34de5ef 100644 (file)
@@ -1287,6 +1287,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
                        return 1;
                break;
        }
+       /* speculation barrier */
+       case BPF_ST | BPF_NOSPEC:
+               break;
        /* ST: *(size *)(dst + off) = imm */
        case BPF_ST | BPF_MEM | BPF_W:
        case BPF_ST | BPF_MEM | BPF_H:
index a11796bbb9cee50facef2eb09d61c64394f44d7d..40b47f98c0330fae66549c8149c74fd46852512b 100644 (file)
@@ -1138,6 +1138,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
                        }
                        break;
 
+                       /* speculation barrier */
+               case BPF_ST | BPF_NOSPEC:
+                       if (boot_cpu_has(X86_FEATURE_XMM2))
+                               /* Emit 'lfence' */
+                               EMIT3(0x0F, 0xAE, 0xE8);
+                       break;
+
                        /* ST: *(u8*)(dst_reg + off) = imm */
                case BPF_ST | BPF_MEM | BPF_B:
                        if (is_ereg(dst_reg))
index 2cf4d217840d8207651bfe05ae5c037cf79016fc..4bd0f98df700cf0cd96ff74fa1932cc378830947 100644 (file)
@@ -1705,6 +1705,12 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
                        i++;
                        break;
                }
+               /* speculation barrier */
+               case BPF_ST | BPF_NOSPEC:
+                       if (boot_cpu_has(X86_FEATURE_XMM2))
+                               /* Emit 'lfence' */
+                               EMIT3(0x0F, 0xAE, 0xE8);
+                       break;
                /* ST: *(u8*)(dst_reg + off) = imm */
                case BPF_ST | BPF_MEM | BPF_H:
                case BPF_ST | BPF_MEM | BPF_B:
index e2ffa02f9067a2f0fa5472451a694b07824ed59a..822b701c803d1118554ec7e7509dc49089b56479 100644 (file)
@@ -72,6 +72,11 @@ struct ctl_table_header;
 /* unused opcode to mark call to interpreter with arguments */
 #define BPF_CALL_ARGS  0xe0
 
+/* unused opcode to mark speculation barrier for mitigating
+ * Speculative Store Bypass
+ */
+#define BPF_NOSPEC     0xc0
+
 /* As per nm, we expose JITed images as text (code) section for
  * kallsyms. That way, tools like perf can find it to match
  * addresses.
@@ -372,6 +377,16 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
                .off   = 0,                                     \
                .imm   = 0 })
 
+/* Speculation barrier */
+
+#define BPF_ST_NOSPEC()                                                \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_ST | BPF_NOSPEC,                   \
+               .dst_reg = 0,                                   \
+               .src_reg = 0,                                   \
+               .off   = 0,                                     \
+               .imm   = 0 })
+
 /* Internal classic blocks for direct assignment */
 
 #define __BPF_STMT(CODE, K)                                    \
index 182e162f8fd0b6c66c7c5ec724b6c78dc6a7f40c..93f8b9ddcf3e6867449e9f28cd78556e10a535ca 100644 (file)
@@ -32,6 +32,8 @@
 #include <linux/perf_event.h>
 #include <linux/extable.h>
 #include <linux/log2.h>
+
+#include <asm/barrier.h>
 #include <asm/unaligned.h>
 
 /* Registers */
@@ -1380,6 +1382,7 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
                /* Non-UAPI available opcodes. */
                [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
                [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
+               [BPF_ST  | BPF_NOSPEC] = &&ST_NOSPEC,
                [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
                [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
                [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
@@ -1599,7 +1602,21 @@ out:
        COND_JMP(s, JSGE, >=)
        COND_JMP(s, JSLE, <=)
 #undef COND_JMP
-       /* STX and ST and LDX*/
+       /* ST, STX and LDX*/
+       ST_NOSPEC:
+               /* Speculation barrier for mitigating Speculative Store Bypass.
+                * In case of arm64, we rely on the firmware mitigation as
+                * controlled via the ssbd kernel parameter. Whenever the
+                * mitigation is enabled, it works for all of the kernel code
+                * with no need to provide any additional instructions here.
+                * In case of x86, we use 'lfence' insn for mitigation. We
+                * reuse preexisting logic from Spectre v1 mitigation that
+                * happens to produce the required code on x86 for v4 as well.
+                */
+#ifdef CONFIG_X86
+               barrier_nospec();
+#endif
+               CONT;
 #define LDST(SIZEOP, SIZE)                                             \
        STX_MEM_##SIZEOP:                                               \
                *(SIZE *)(unsigned long) (DST + insn->off) = SRC;       \
index b44d8c447afd1d699b27e152e5bf06a24dff80f0..ff1dd7d45b58ab00d0f3ea5b317ed3b9bd6e6e8f 100644 (file)
@@ -162,15 +162,17 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs,
                else
                        verbose(cbs->private_data, "BUG_%02x\n", insn->code);
        } else if (class == BPF_ST) {
-               if (BPF_MODE(insn->code) != BPF_MEM) {
+               if (BPF_MODE(insn->code) == BPF_MEM) {
+                       verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n",
+                               insn->code,
+                               bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
+                               insn->dst_reg,
+                               insn->off, insn->imm);
+               } else if (BPF_MODE(insn->code) == 0xc0 /* BPF_NOSPEC, no UAPI */) {
+                       verbose(cbs->private_data, "(%02x) nospec\n", insn->code);
+               } else {
                        verbose(cbs->private_data, "BUG_st_%02x\n", insn->code);
-                       return;
                }
-               verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n",
-                       insn->code,
-                       bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
-                       insn->dst_reg,
-                       insn->off, insn->imm);
        } else if (class == BPF_LDX) {
                if (BPF_MODE(insn->code) != BPF_MEM) {
                        verbose(cbs->private_data, "BUG_ldx_%02x\n", insn->code);