bpf: get rid of pure_initcall dependency to enable jits
authorDaniel Borkmann <daniel@iogearbox.net>
Fri, 16 Aug 2019 22:59:20 +0000 (23:59 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 25 Aug 2019 08:51:40 +0000 (10:51 +0200)
commit fa9dd599b4dae841924b022768354cfde9affecb upstream.

Having a pure_initcall() callback just to permanently enable BPF
JITs under CONFIG_BPF_JIT_ALWAYS_ON is unnecessary and could leave
a small race window in future where JIT is still disabled on boot.
Since we know about the setting at compilation time anyway, just
initialize it properly there. Also consolidate all the individual
bpf_jit_enable variables into a single one and move them under one
location. Moreover, don't allow for setting unspecified garbage
values on them.

Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
[bwh: Backported to 4.9 as dependency of commit 2e4a30983b0f
 "bpf: restrict access to core bpf sysctls":
 - Drop change in arch/mips/net/ebpf_jit.c
 - Drop change to bpf_jit_kallsyms
 - Adjust filenames, context]
Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/arm/net/bpf_jit_32.c
arch/arm64/net/bpf_jit_comp.c
arch/mips/net/bpf_jit.c
arch/powerpc/net/bpf_jit_comp.c
arch/powerpc/net/bpf_jit_comp64.c
arch/s390/net/bpf_jit_comp.c
arch/sparc/net/bpf_jit_comp.c
arch/x86/net/bpf_jit_comp.c
kernel/bpf/core.c
net/core/sysctl_net_core.c
net/socket.c

index 93d0b6d0b63eede5f36de91428bb181bab2e0971..7fd448b23b9468c4940c399818dcac44d5c5d0ba 100644 (file)
@@ -72,8 +72,6 @@ struct jit_ctx {
 #endif
 };
 
-int bpf_jit_enable __read_mostly;
-
 static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
                      unsigned int size)
 {
index b47a26f4290cfc804e97f15d90d98b709cde2551..939c607b1376d4f3cd8d900a2d3002d02ab4d527 100644 (file)
@@ -30,8 +30,6 @@
 
 #include "bpf_jit.h"
 
-int bpf_jit_enable __read_mostly;
-
 #define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
 #define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
 #define TCALL_CNT (MAX_BPF_JIT_REG + 2)
index 2486037391980738895587141c539996ada69f43..bb9f779326d03e95c821b4708695c036a965006c 100644 (file)
@@ -1194,8 +1194,6 @@ jmp_cmp:
        return 0;
 }
 
-int bpf_jit_enable __read_mostly;
-
 void bpf_jit_compile(struct bpf_prog *fp)
 {
        struct jit_ctx ctx;
index 9c58194c7ea579c273d2817a4d788e9f0e804dc3..158f4300831465979599ebb1efd0291371a5132c 100644 (file)
@@ -18,8 +18,6 @@
 
 #include "bpf_jit32.h"
 
-int bpf_jit_enable __read_mostly;
-
 static inline void bpf_flush_icache(void *start, void *end)
 {
        smp_wmb();
index 9f0810cfe5f309bf1d2b4dc295cef54c8896288b..888ee95340da27b15ba7021e88b538638220f655 100644 (file)
@@ -21,8 +21,6 @@
 
 #include "bpf_jit64.h"
 
-int bpf_jit_enable __read_mostly;
-
 static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
 {
        int *p = area;
index 8bd25aebf488eb240727a48346ecedcae417b117..896344b6e0363357d9386b55557955c4c5782685 100644 (file)
@@ -28,8 +28,6 @@
 #include <asm/nospec-branch.h>
 #include "bpf_jit.h"
 
-int bpf_jit_enable __read_mostly;
-
 struct bpf_jit {
        u32 seen;               /* Flags to remember seen eBPF instructions */
        u32 seen_reg[16];       /* Array to remember which registers are used */
index a6d9204a6a0bd352ec6fa4e3c89450a81dee8a37..98a4da3012e31d35ed6337501ac1bb6b7e6be3af 100644 (file)
@@ -10,8 +10,6 @@
 
 #include "bpf_jit.h"
 
-int bpf_jit_enable __read_mostly;
-
 static inline bool is_simm13(unsigned int value)
 {
        return value + 0x1000 < 0x2000;
index cd97645208515e5ba8c685d374eca6d0adf8f0e0..d9dabd0c31fc4bb0a92956d01f1f7d826eaf4adc 100644 (file)
@@ -15,8 +15,6 @@
 #include <asm/nospec-branch.h>
 #include <linux/bpf.h>
 
-int bpf_jit_enable __read_mostly;
-
 /*
  * assembly code in arch/x86/net/bpf_jit.S
  */
index 879ca844ba1d3371617ef4a16bf103dad61d69ee..da03ab4ec57838b48f12e3af9c5991c93968ed42 100644 (file)
@@ -208,6 +208,10 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
 }
 
 #ifdef CONFIG_BPF_JIT
+/* All BPF JIT sysctl knobs here. */
+int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
+int bpf_jit_harden   __read_mostly;
+
 struct bpf_binary_header *
 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
                     unsigned int alignment,
@@ -244,8 +248,6 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr)
        module_memfree(hdr);
 }
 
-int bpf_jit_harden __read_mostly;
-
 static int bpf_jit_blind_insn(const struct bpf_insn *from,
                              const struct bpf_insn *aux,
                              struct bpf_insn *to_buff)
@@ -925,8 +927,13 @@ load_byte:
 STACK_FRAME_NON_STANDARD(__bpf_prog_run); /* jump table */
 
 #else
-static unsigned int __bpf_prog_ret0(void *ctx, const struct bpf_insn *insn)
+static unsigned int __bpf_prog_ret0_warn(void *ctx,
+                                        const struct bpf_insn *insn)
 {
+       /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
+        * is not working properly, so warn about it!
+        */
+       WARN_ON_ONCE(1);
        return 0;
 }
 #endif
@@ -981,7 +988,7 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
        fp->bpf_func = (void *) __bpf_prog_run;
 #else
-       fp->bpf_func = (void *) __bpf_prog_ret0;
+       fp->bpf_func = (void *) __bpf_prog_ret0_warn;
 #endif
 
        /* eBPF JITs can rewrite the program in case constant
index 546ba76b35a5ab07631fb02bd2c3d5034eab15c3..7b7d26a4f8c827600e6559eafc4966d8d113e42f 100644 (file)
@@ -24,6 +24,7 @@
 
 static int zero = 0;
 static int one = 1;
+static int two __maybe_unused = 2;
 static int min_sndbuf = SOCK_MIN_SNDBUF;
 static int min_rcvbuf = SOCK_MIN_RCVBUF;
 static int max_skb_frags = MAX_SKB_FRAGS;
@@ -292,13 +293,14 @@ static struct ctl_table net_core_table[] = {
                .data           = &bpf_jit_enable,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-#ifndef CONFIG_BPF_JIT_ALWAYS_ON
-               .proc_handler   = proc_dointvec
-#else
                .proc_handler   = proc_dointvec_minmax,
+# ifdef CONFIG_BPF_JIT_ALWAYS_ON
                .extra1         = &one,
                .extra2         = &one,
-#endif
+# else
+               .extra1         = &zero,
+               .extra2         = &two,
+# endif
        },
 # ifdef CONFIG_HAVE_EBPF_JIT
        {
@@ -306,7 +308,9 @@ static struct ctl_table net_core_table[] = {
                .data           = &bpf_jit_harden,
                .maxlen         = sizeof(int),
                .mode           = 0600,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &zero,
+               .extra2         = &two,
        },
 # endif
 #endif
index d9e2989c10c4c6bf9e37ee8e8268845a8d6fe88f..bf99bc1fab2c49a4c991bcc8f7f67b28113673c0 100644 (file)
@@ -2550,15 +2550,6 @@ out_fs:
 
 core_initcall(sock_init);      /* early initcall */
 
-static int __init jit_init(void)
-{
-#ifdef CONFIG_BPF_JIT_ALWAYS_ON
-       bpf_jit_enable = 1;
-#endif
-       return 0;
-}
-pure_initcall(jit_init);
-
 #ifdef CONFIG_PROC_FS
 void socket_seq_show(struct seq_file *seq)
 {