bpf: free up BPF_JMP | BPF_CALL | BPF_X opcode
authorAlexei Starovoitov <ast@fb.com>
Tue, 30 May 2017 20:31:27 +0000 (13:31 -0700)
committerDavid S. Miller <davem@davemloft.net>
Wed, 31 May 2017 23:29:47 +0000 (19:29 -0400)
free up BPF_JMP | BPF_CALL | BPF_X opcode to be used by actual
indirect call by register and use kernel internal opcode to
mark call instruction into bpf_tail_call() helper.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
arch/arm64/net/bpf_jit_comp.c
arch/powerpc/net/bpf_jit_comp64.c
arch/s390/net/bpf_jit_comp.c
arch/sparc/net/bpf_jit_comp_64.c
arch/x86/net/bpf_jit_comp.c
include/linux/filter.h
kernel/bpf/core.c
kernel/bpf/verifier.c

index 71f930501ade7cec2d1f230aa638ad3fc9112ee8..b1d38eeb24f6f6423ea91c33aa8c55fd1fe9e98e 100644 (file)
@@ -586,7 +586,7 @@ emit_cond_jmp:
                break;
        }
        /* tail call */
-       case BPF_JMP | BPF_CALL | BPF_X:
+       case BPF_JMP | BPF_TAIL_CALL:
                if (emit_bpf_tail_call(ctx))
                        return -EFAULT;
                break;
index aee2bb817ac68c4a8dfa7a28b521ac73c0f77b3f..a01366584a4b7efb157bbbce480a0c425e705519 100644 (file)
@@ -938,7 +938,7 @@ common_load:
                /*
                 * Tail call
                 */
-               case BPF_JMP | BPF_CALL | BPF_X:
+               case BPF_JMP | BPF_TAIL_CALL:
                        ctx->seen |= SEEN_TAILCALL;
                        bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
                        break;
index 6e97a2e3fd8d1fc5b5a2e89c67b52d1c322b1da3..42ad3832586ce4cb480710e0b5acb8ae9af431f2 100644 (file)
@@ -991,7 +991,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
                }
                break;
        }
-       case BPF_JMP | BPF_CALL | BPF_X:
+       case BPF_JMP | BPF_TAIL_CALL:
                /*
                 * Implicit input:
                 *  B1: pointer to ctx
index 21de77419f484b16939d4ce4bf6abbf734ccf325..4a52d34facf95329aeff85c2a02b856afbdded20 100644 (file)
@@ -1217,7 +1217,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
        }
 
        /* tail call */
-       case BPF_JMP | BPF_CALL |BPF_X:
+       case BPF_JMP | BPF_TAIL_CALL:
                emit_tail_call(ctx);
                break;
 
index f58939393eefe2b07cf12179f7f8ccbab33b54fc..fec12eaa0dec9cb80a0fc34c2ec998d89109d84f 100644 (file)
@@ -877,7 +877,7 @@ xadd:                       if (is_imm8(insn->off))
                        }
                        break;
 
-               case BPF_JMP | BPF_CALL | BPF_X:
+               case BPF_JMP | BPF_TAIL_CALL:
                        emit_bpf_tail_call(&prog);
                        break;
 
index 62d948f80730fdd94c587b4f0235c72cf5d3399a..a20ba40fcb73f901dd643581579629f4e2430d46 100644 (file)
@@ -57,6 +57,9 @@ struct bpf_prog_aux;
 #define BPF_REG_AX             MAX_BPF_REG
 #define MAX_BPF_JIT_REG                (MAX_BPF_REG + 1)
 
+/* unused opcode to mark special call to bpf_tail_call() helper */
+#define BPF_TAIL_CALL  0xf0
+
 /* As per nm, we expose JITed images as text (code) section for
  * kallsyms. That way, tools like perf can find it to match
  * addresses.
index dedf367f59bba529ded5603d61782bccb80782cb..339289402b961229fcb5b03cc090121704ef33d2 100644 (file)
@@ -824,7 +824,7 @@ static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
                [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
                /* Call instruction */
                [BPF_JMP | BPF_CALL] = &&JMP_CALL,
-               [BPF_JMP | BPF_CALL | BPF_X] = &&JMP_TAIL_CALL,
+               [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
                /* Jumps */
                [BPF_JMP | BPF_JA] = &&JMP_JA,
                [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
index 339c8a1371de0201df0f7ac799280168ea4d22e3..28113d0e8e92cb6693f2bee3fb92af82f750e3cd 100644 (file)
@@ -3469,7 +3469,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
                         * that doesn't support bpf_tail_call yet
                         */
                        insn->imm = 0;
-                       insn->code |= BPF_X;
+                       insn->code = BPF_JMP | BPF_TAIL_CALL;
                        continue;
                }