Merge branch 'bpf-new-branches'
authorDavid S. Miller <davem@davemloft.net>
Wed, 9 Aug 2017 23:53:57 +0000 (16:53 -0700)
committerDavid S. Miller <davem@davemloft.net>
Wed, 9 Aug 2017 23:53:57 +0000 (16:53 -0700)
Daniel Borkmann says:

====================
bpf: Add BPF_J{LT,LE,SLT,SLE} instructions

This set adds BPF_J{LT,LE,SLT,SLE} instructions to the BPF
insn set, interpreter, JIT hardening code and all JITs are
also updated to support the new instructions. Basic idea is
to reduce register pressure by avoiding BPF_J{GT,GE,SGT,SGE}
rewrites. Removing the workaround for the rewrites in LLVM,
this can result in shorter BPF programs, less stack usage
and less verification complexity. First patch provides some
more details on rationale and integration.

Thanks a lot!

v1 -> v2:
  - Reworded commit msg in patch 1
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
16 files changed:
Documentation/networking/filter.txt
arch/arm64/net/bpf_jit.h
arch/arm64/net/bpf_jit_comp.c
arch/powerpc/net/bpf_jit.h
arch/powerpc/net/bpf_jit_comp64.c
arch/s390/net/bpf_jit_comp.c
arch/sparc/net/bpf_jit_comp_64.c
arch/x86/net/bpf_jit_comp.c
drivers/net/ethernet/netronome/nfp/bpf/jit.c
include/uapi/linux/bpf.h
kernel/bpf/core.c
kernel/bpf/verifier.c
lib/test_bpf.c
net/core/filter.c
tools/include/uapi/linux/bpf.h
tools/testing/selftests/bpf/test_verifier.c

index d0fdba7d66e23564bcbb97d9945fbdf9c5066afa..6a0df8df6c43e8e17b38fe5a7cbb22209477bfcc 100644 (file)
@@ -906,6 +906,10 @@ If BPF_CLASS(code) == BPF_JMP, BPF_OP(code) is one of:
   BPF_JSGE  0x70  /* eBPF only: signed '>=' */
   BPF_CALL  0x80  /* eBPF only: function call */
   BPF_EXIT  0x90  /* eBPF only: function return */
+  BPF_JLT   0xa0  /* eBPF only: unsigned '<' */
+  BPF_JLE   0xb0  /* eBPF only: unsigned '<=' */
+  BPF_JSLT  0xc0  /* eBPF only: signed '<' */
+  BPF_JSLE  0xd0  /* eBPF only: signed '<=' */
 
 So BPF_ADD | BPF_X | BPF_ALU means 32-bit addition in both classic BPF
 and eBPF. There are only two registers in classic BPF, so it means A += X.
index b02a9268dfbf1093016dc3609e79681fa6da9dba..783de51a6c4e5b33725d4904be6b09011dd48605 100644 (file)
 #define A64_COND_NE    AARCH64_INSN_COND_NE /* != */
 #define A64_COND_CS    AARCH64_INSN_COND_CS /* unsigned >= */
 #define A64_COND_HI    AARCH64_INSN_COND_HI /* unsigned > */
+#define A64_COND_LS    AARCH64_INSN_COND_LS /* unsigned <= */
+#define A64_COND_CC    AARCH64_INSN_COND_CC /* unsigned < */
 #define A64_COND_GE    AARCH64_INSN_COND_GE /* signed >= */
 #define A64_COND_GT    AARCH64_INSN_COND_GT /* signed > */
+#define A64_COND_LE    AARCH64_INSN_COND_LE /* signed <= */
+#define A64_COND_LT    AARCH64_INSN_COND_LT /* signed < */
 #define A64_B_(cond, imm19) A64_COND_BRANCH(cond, (imm19) << 2)
 
 /* Unconditional branch (immediate) */
index f32144b2e07fc13e39baa929bfb9f9c29796905f..ba38d403abb2fc92d8ea6ae9a6c3c38e70979062 100644 (file)
@@ -527,10 +527,14 @@ emit_bswap_uxt:
        /* IF (dst COND src) JUMP off */
        case BPF_JMP | BPF_JEQ | BPF_X:
        case BPF_JMP | BPF_JGT | BPF_X:
+       case BPF_JMP | BPF_JLT | BPF_X:
        case BPF_JMP | BPF_JGE | BPF_X:
+       case BPF_JMP | BPF_JLE | BPF_X:
        case BPF_JMP | BPF_JNE | BPF_X:
        case BPF_JMP | BPF_JSGT | BPF_X:
+       case BPF_JMP | BPF_JSLT | BPF_X:
        case BPF_JMP | BPF_JSGE | BPF_X:
+       case BPF_JMP | BPF_JSLE | BPF_X:
                emit(A64_CMP(1, dst, src), ctx);
 emit_cond_jmp:
                jmp_offset = bpf2a64_offset(i + off, i, ctx);
@@ -542,9 +546,15 @@ emit_cond_jmp:
                case BPF_JGT:
                        jmp_cond = A64_COND_HI;
                        break;
+               case BPF_JLT:
+                       jmp_cond = A64_COND_CC;
+                       break;
                case BPF_JGE:
                        jmp_cond = A64_COND_CS;
                        break;
+               case BPF_JLE:
+                       jmp_cond = A64_COND_LS;
+                       break;
                case BPF_JSET:
                case BPF_JNE:
                        jmp_cond = A64_COND_NE;
@@ -552,9 +562,15 @@ emit_cond_jmp:
                case BPF_JSGT:
                        jmp_cond = A64_COND_GT;
                        break;
+               case BPF_JSLT:
+                       jmp_cond = A64_COND_LT;
+                       break;
                case BPF_JSGE:
                        jmp_cond = A64_COND_GE;
                        break;
+               case BPF_JSLE:
+                       jmp_cond = A64_COND_LE;
+                       break;
                default:
                        return -EFAULT;
                }
@@ -566,10 +582,14 @@ emit_cond_jmp:
        /* IF (dst COND imm) JUMP off */
        case BPF_JMP | BPF_JEQ | BPF_K:
        case BPF_JMP | BPF_JGT | BPF_K:
+       case BPF_JMP | BPF_JLT | BPF_K:
        case BPF_JMP | BPF_JGE | BPF_K:
+       case BPF_JMP | BPF_JLE | BPF_K:
        case BPF_JMP | BPF_JNE | BPF_K:
        case BPF_JMP | BPF_JSGT | BPF_K:
+       case BPF_JMP | BPF_JSLT | BPF_K:
        case BPF_JMP | BPF_JSGE | BPF_K:
+       case BPF_JMP | BPF_JSLE | BPF_K:
                emit_a64_mov_i(1, tmp, imm, ctx);
                emit(A64_CMP(1, dst, tmp), ctx);
                goto emit_cond_jmp;
index 30cf03f534280b9c956f5c78aab02ae4223c1061..47fc6660845d3735545efc718ae013ee3c1609a8 100644 (file)
@@ -263,6 +263,7 @@ static inline bool is_nearbranch(int offset)
 #define COND_EQ                (CR0_EQ | COND_CMP_TRUE)
 #define COND_NE                (CR0_EQ | COND_CMP_FALSE)
 #define COND_LT                (CR0_LT | COND_CMP_TRUE)
+#define COND_LE                (CR0_GT | COND_CMP_FALSE)
 
 #endif
 
index 861c5af1c9c4bdd85dba552b2e41a9587162e3a7..faf20163bd4cd36c28d9ab60e7ca58f2b741798f 100644 (file)
@@ -795,12 +795,24 @@ emit_clear:
                case BPF_JMP | BPF_JSGT | BPF_X:
                        true_cond = COND_GT;
                        goto cond_branch;
+               case BPF_JMP | BPF_JLT | BPF_K:
+               case BPF_JMP | BPF_JLT | BPF_X:
+               case BPF_JMP | BPF_JSLT | BPF_K:
+               case BPF_JMP | BPF_JSLT | BPF_X:
+                       true_cond = COND_LT;
+                       goto cond_branch;
                case BPF_JMP | BPF_JGE | BPF_K:
                case BPF_JMP | BPF_JGE | BPF_X:
                case BPF_JMP | BPF_JSGE | BPF_K:
                case BPF_JMP | BPF_JSGE | BPF_X:
                        true_cond = COND_GE;
                        goto cond_branch;
+               case BPF_JMP | BPF_JLE | BPF_K:
+               case BPF_JMP | BPF_JLE | BPF_X:
+               case BPF_JMP | BPF_JSLE | BPF_K:
+               case BPF_JMP | BPF_JSLE | BPF_X:
+                       true_cond = COND_LE;
+                       goto cond_branch;
                case BPF_JMP | BPF_JEQ | BPF_K:
                case BPF_JMP | BPF_JEQ | BPF_X:
                        true_cond = COND_EQ;
@@ -817,14 +829,18 @@ emit_clear:
 cond_branch:
                        switch (code) {
                        case BPF_JMP | BPF_JGT | BPF_X:
+                       case BPF_JMP | BPF_JLT | BPF_X:
                        case BPF_JMP | BPF_JGE | BPF_X:
+                       case BPF_JMP | BPF_JLE | BPF_X:
                        case BPF_JMP | BPF_JEQ | BPF_X:
                        case BPF_JMP | BPF_JNE | BPF_X:
                                /* unsigned comparison */
                                PPC_CMPLD(dst_reg, src_reg);
                                break;
                        case BPF_JMP | BPF_JSGT | BPF_X:
+                       case BPF_JMP | BPF_JSLT | BPF_X:
                        case BPF_JMP | BPF_JSGE | BPF_X:
+                       case BPF_JMP | BPF_JSLE | BPF_X:
                                /* signed comparison */
                                PPC_CMPD(dst_reg, src_reg);
                                break;
@@ -834,7 +850,9 @@ cond_branch:
                        case BPF_JMP | BPF_JNE | BPF_K:
                        case BPF_JMP | BPF_JEQ | BPF_K:
                        case BPF_JMP | BPF_JGT | BPF_K:
+                       case BPF_JMP | BPF_JLT | BPF_K:
                        case BPF_JMP | BPF_JGE | BPF_K:
+                       case BPF_JMP | BPF_JLE | BPF_K:
                                /*
                                 * Need sign-extended load, so only positive
                                 * values can be used as imm in cmpldi
@@ -849,7 +867,9 @@ cond_branch:
                                }
                                break;
                        case BPF_JMP | BPF_JSGT | BPF_K:
+                       case BPF_JMP | BPF_JSLT | BPF_K:
                        case BPF_JMP | BPF_JSGE | BPF_K:
+                       case BPF_JMP | BPF_JSLE | BPF_K:
                                /*
                                 * signed comparison, so any 16-bit value
                                 * can be used in cmpdi
index 1803797fc885cf799337b5d61f30ae726628b8d6..8ec88497a28dace3e25e371e4347693c053678eb 100644 (file)
@@ -1093,15 +1093,27 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
        case BPF_JMP | BPF_JSGT | BPF_K: /* ((s64) dst > (s64) imm) */
                mask = 0x2000; /* jh */
                goto branch_ks;
+       case BPF_JMP | BPF_JSLT | BPF_K: /* ((s64) dst < (s64) imm) */
+               mask = 0x4000; /* jl */
+               goto branch_ks;
        case BPF_JMP | BPF_JSGE | BPF_K: /* ((s64) dst >= (s64) imm) */
                mask = 0xa000; /* jhe */
                goto branch_ks;
+       case BPF_JMP | BPF_JSLE | BPF_K: /* ((s64) dst <= (s64) imm) */
+               mask = 0xc000; /* jle */
+               goto branch_ks;
        case BPF_JMP | BPF_JGT | BPF_K: /* (dst_reg > imm) */
                mask = 0x2000; /* jh */
                goto branch_ku;
+       case BPF_JMP | BPF_JLT | BPF_K: /* (dst_reg < imm) */
+               mask = 0x4000; /* jl */
+               goto branch_ku;
        case BPF_JMP | BPF_JGE | BPF_K: /* (dst_reg >= imm) */
                mask = 0xa000; /* jhe */
                goto branch_ku;
+       case BPF_JMP | BPF_JLE | BPF_K: /* (dst_reg <= imm) */
+               mask = 0xc000; /* jle */
+               goto branch_ku;
        case BPF_JMP | BPF_JNE | BPF_K: /* (dst_reg != imm) */
                mask = 0x7000; /* jne */
                goto branch_ku;
@@ -1119,15 +1131,27 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
        case BPF_JMP | BPF_JSGT | BPF_X: /* ((s64) dst > (s64) src) */
                mask = 0x2000; /* jh */
                goto branch_xs;
+       case BPF_JMP | BPF_JSLT | BPF_X: /* ((s64) dst < (s64) src) */
+               mask = 0x4000; /* jl */
+               goto branch_xs;
        case BPF_JMP | BPF_JSGE | BPF_X: /* ((s64) dst >= (s64) src) */
                mask = 0xa000; /* jhe */
                goto branch_xs;
+       case BPF_JMP | BPF_JSLE | BPF_X: /* ((s64) dst <= (s64) src) */
+               mask = 0xc000; /* jle */
+               goto branch_xs;
        case BPF_JMP | BPF_JGT | BPF_X: /* (dst > src) */
                mask = 0x2000; /* jh */
                goto branch_xu;
+       case BPF_JMP | BPF_JLT | BPF_X: /* (dst < src) */
+               mask = 0x4000; /* jl */
+               goto branch_xu;
        case BPF_JMP | BPF_JGE | BPF_X: /* (dst >= src) */
                mask = 0xa000; /* jhe */
                goto branch_xu;
+       case BPF_JMP | BPF_JLE | BPF_X: /* (dst <= src) */
+               mask = 0xc000; /* jle */
+               goto branch_xu;
        case BPF_JMP | BPF_JNE | BPF_X: /* (dst != src) */
                mask = 0x7000; /* jne */
                goto branch_xu;
index 8799ae9a8788f6fd8628c35974ec172d6aab61cc..c340af7b13717e7fa63039ec27d863a106b3fd45 100644 (file)
@@ -128,6 +128,8 @@ static u32 WDISP10(u32 off)
 
 #define BA             (BRANCH | CONDA)
 #define BG             (BRANCH | CONDG)
+#define BL             (BRANCH | CONDL)
+#define BLE            (BRANCH | CONDLE)
 #define BGU            (BRANCH | CONDGU)
 #define BLEU           (BRANCH | CONDLEU)
 #define BGE            (BRANCH | CONDGE)
@@ -715,9 +717,15 @@ static int emit_compare_and_branch(const u8 code, const u8 dst, u8 src,
                case BPF_JGT:
                        br_opcode = BGU;
                        break;
+               case BPF_JLT:
+                       br_opcode = BLU;
+                       break;
                case BPF_JGE:
                        br_opcode = BGEU;
                        break;
+               case BPF_JLE:
+                       br_opcode = BLEU;
+                       break;
                case BPF_JSET:
                case BPF_JNE:
                        br_opcode = BNE;
@@ -725,9 +733,15 @@ static int emit_compare_and_branch(const u8 code, const u8 dst, u8 src,
                case BPF_JSGT:
                        br_opcode = BG;
                        break;
+               case BPF_JSLT:
+                       br_opcode = BL;
+                       break;
                case BPF_JSGE:
                        br_opcode = BGE;
                        break;
+               case BPF_JSLE:
+                       br_opcode = BLE;
+                       break;
                default:
                        /* Make sure we dont leak kernel information to the
                         * user.
@@ -746,18 +760,30 @@ static int emit_compare_and_branch(const u8 code, const u8 dst, u8 src,
                case BPF_JGT:
                        cbcond_opcode = CBCONDGU;
                        break;
+               case BPF_JLT:
+                       cbcond_opcode = CBCONDLU;
+                       break;
                case BPF_JGE:
                        cbcond_opcode = CBCONDGEU;
                        break;
+               case BPF_JLE:
+                       cbcond_opcode = CBCONDLEU;
+                       break;
                case BPF_JNE:
                        cbcond_opcode = CBCONDNE;
                        break;
                case BPF_JSGT:
                        cbcond_opcode = CBCONDG;
                        break;
+               case BPF_JSLT:
+                       cbcond_opcode = CBCONDL;
+                       break;
                case BPF_JSGE:
                        cbcond_opcode = CBCONDGE;
                        break;
+               case BPF_JSLE:
+                       cbcond_opcode = CBCONDLE;
+                       break;
                default:
                        /* Make sure we dont leak kernel information to the
                         * user.
@@ -1176,10 +1202,14 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
        /* IF (dst COND src) JUMP off */
        case BPF_JMP | BPF_JEQ | BPF_X:
        case BPF_JMP | BPF_JGT | BPF_X:
+       case BPF_JMP | BPF_JLT | BPF_X:
        case BPF_JMP | BPF_JGE | BPF_X:
+       case BPF_JMP | BPF_JLE | BPF_X:
        case BPF_JMP | BPF_JNE | BPF_X:
        case BPF_JMP | BPF_JSGT | BPF_X:
+       case BPF_JMP | BPF_JSLT | BPF_X:
        case BPF_JMP | BPF_JSGE | BPF_X:
+       case BPF_JMP | BPF_JSLE | BPF_X:
        case BPF_JMP | BPF_JSET | BPF_X: {
                int err;
 
@@ -1191,10 +1221,14 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
        /* IF (dst COND imm) JUMP off */
        case BPF_JMP | BPF_JEQ | BPF_K:
        case BPF_JMP | BPF_JGT | BPF_K:
+       case BPF_JMP | BPF_JLT | BPF_K:
        case BPF_JMP | BPF_JGE | BPF_K:
+       case BPF_JMP | BPF_JLE | BPF_K:
        case BPF_JMP | BPF_JNE | BPF_K:
        case BPF_JMP | BPF_JSGT | BPF_K:
+       case BPF_JMP | BPF_JSLT | BPF_K:
        case BPF_JMP | BPF_JSGE | BPF_K:
+       case BPF_JMP | BPF_JSLE | BPF_K:
        case BPF_JMP | BPF_JSET | BPF_K: {
                int err;
 
index e1324f280e06eb86c9aa9c2249cdba6b6c036c4b..8194696e2805852a8246bf1db2a5862d5758f76b 100644 (file)
@@ -94,7 +94,9 @@ static int bpf_size_to_x86_bytes(int bpf_size)
 #define X86_JNE 0x75
 #define X86_JBE 0x76
 #define X86_JA  0x77
+#define X86_JL  0x7C
 #define X86_JGE 0x7D
+#define X86_JLE 0x7E
 #define X86_JG  0x7F
 
 static void bpf_flush_icache(void *start, void *end)
@@ -888,9 +890,13 @@ xadd:                      if (is_imm8(insn->off))
                case BPF_JMP | BPF_JEQ | BPF_X:
                case BPF_JMP | BPF_JNE | BPF_X:
                case BPF_JMP | BPF_JGT | BPF_X:
+               case BPF_JMP | BPF_JLT | BPF_X:
                case BPF_JMP | BPF_JGE | BPF_X:
+               case BPF_JMP | BPF_JLE | BPF_X:
                case BPF_JMP | BPF_JSGT | BPF_X:
+               case BPF_JMP | BPF_JSLT | BPF_X:
                case BPF_JMP | BPF_JSGE | BPF_X:
+               case BPF_JMP | BPF_JSLE | BPF_X:
                        /* cmp dst_reg, src_reg */
                        EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39,
                              add_2reg(0xC0, dst_reg, src_reg));
@@ -911,9 +917,13 @@ xadd:                      if (is_imm8(insn->off))
                case BPF_JMP | BPF_JEQ | BPF_K:
                case BPF_JMP | BPF_JNE | BPF_K:
                case BPF_JMP | BPF_JGT | BPF_K:
+               case BPF_JMP | BPF_JLT | BPF_K:
                case BPF_JMP | BPF_JGE | BPF_K:
+               case BPF_JMP | BPF_JLE | BPF_K:
                case BPF_JMP | BPF_JSGT | BPF_K:
+               case BPF_JMP | BPF_JSLT | BPF_K:
                case BPF_JMP | BPF_JSGE | BPF_K:
+               case BPF_JMP | BPF_JSLE | BPF_K:
                        /* cmp dst_reg, imm8/32 */
                        EMIT1(add_1mod(0x48, dst_reg));
 
@@ -935,18 +945,34 @@ emit_cond_jmp:            /* convert BPF opcode to x86 */
                                /* GT is unsigned '>', JA in x86 */
                                jmp_cond = X86_JA;
                                break;
+                       case BPF_JLT:
+                               /* LT is unsigned '<', JB in x86 */
+                               jmp_cond = X86_JB;
+                               break;
                        case BPF_JGE:
                                /* GE is unsigned '>=', JAE in x86 */
                                jmp_cond = X86_JAE;
                                break;
+                       case BPF_JLE:
+                               /* LE is unsigned '<=', JBE in x86 */
+                               jmp_cond = X86_JBE;
+                               break;
                        case BPF_JSGT:
                                /* signed '>', GT in x86 */
                                jmp_cond = X86_JG;
                                break;
+                       case BPF_JSLT:
+                               /* signed '<', LT in x86 */
+                               jmp_cond = X86_JL;
+                               break;
                        case BPF_JSGE:
                                /* signed '>=', GE in x86 */
                                jmp_cond = X86_JGE;
                                break;
+                       case BPF_JSLE:
+                               /* signed '<=', LE in x86 */
+                               jmp_cond = X86_JLE;
+                               break;
                        default: /* to silence gcc warning */
                                return -EFAULT;
                        }
index 8e57fda6b8b54135729fc0a4a356a0411adfd304..239dfbe8a0a13099791d32d23fb12b347d8f882c 100644 (file)
@@ -1238,6 +1238,16 @@ static int jge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
        return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true);
 }
 
+static int jlt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+       return wrp_cmp_imm(nfp_prog, meta, BR_BHS, false);
+}
+
+static int jle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+       return wrp_cmp_imm(nfp_prog, meta, BR_BLO, true);
+}
+
 static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
        const struct bpf_insn *insn = &meta->insn;
@@ -1325,6 +1335,16 @@ static int jge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
        return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true);
 }
 
+static int jlt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+       return wrp_cmp_reg(nfp_prog, meta, BR_BHS, false);
+}
+
+static int jle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+       return wrp_cmp_reg(nfp_prog, meta, BR_BLO, true);
+}
+
 static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
        return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE);
@@ -1383,11 +1403,15 @@ static const instr_cb_t instr_cb[256] = {
        [BPF_JMP | BPF_JEQ | BPF_K] =   jeq_imm,
        [BPF_JMP | BPF_JGT | BPF_K] =   jgt_imm,
        [BPF_JMP | BPF_JGE | BPF_K] =   jge_imm,
+       [BPF_JMP | BPF_JLT | BPF_K] =   jlt_imm,
+       [BPF_JMP | BPF_JLE | BPF_K] =   jle_imm,
        [BPF_JMP | BPF_JSET | BPF_K] =  jset_imm,
        [BPF_JMP | BPF_JNE | BPF_K] =   jne_imm,
        [BPF_JMP | BPF_JEQ | BPF_X] =   jeq_reg,
        [BPF_JMP | BPF_JGT | BPF_X] =   jgt_reg,
        [BPF_JMP | BPF_JGE | BPF_X] =   jge_reg,
+       [BPF_JMP | BPF_JLT | BPF_X] =   jlt_reg,
+       [BPF_JMP | BPF_JLE | BPF_X] =   jle_reg,
        [BPF_JMP | BPF_JSET | BPF_X] =  jset_reg,
        [BPF_JMP | BPF_JNE | BPF_X] =   jne_reg,
        [BPF_JMP | BPF_EXIT] =          goto_out,
index 1d06be1569b16aacdcf725c19e07853c1025f8b0..91da8371a2d05a030951e21381bcd85c4881d062 100644 (file)
 #define BPF_FROM_LE    BPF_TO_LE
 #define BPF_FROM_BE    BPF_TO_BE
 
+/* jmp encodings */
 #define BPF_JNE                0x50    /* jump != */
+#define BPF_JLT                0xa0    /* LT is unsigned, '<' */
+#define BPF_JLE                0xb0    /* LE is unsigned, '<=' */
 #define BPF_JSGT       0x60    /* SGT is signed '>', GT in x86 */
 #define BPF_JSGE       0x70    /* SGE is signed '>=', GE in x86 */
+#define BPF_JSLT       0xc0    /* SLT is signed, '<' */
+#define BPF_JSLE       0xd0    /* SLE is signed, '<=' */
 #define BPF_CALL       0x80    /* function call */
 #define BPF_EXIT       0x90    /* function return */
 
index ad5f55922a136f19af951992e5a973dfa5de9bb6..c69e7f5bfde715db4afd6ebb68190691d2efae8a 100644 (file)
@@ -595,9 +595,13 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from,
        case BPF_JMP | BPF_JEQ  | BPF_K:
        case BPF_JMP | BPF_JNE  | BPF_K:
        case BPF_JMP | BPF_JGT  | BPF_K:
+       case BPF_JMP | BPF_JLT  | BPF_K:
        case BPF_JMP | BPF_JGE  | BPF_K:
+       case BPF_JMP | BPF_JLE  | BPF_K:
        case BPF_JMP | BPF_JSGT | BPF_K:
+       case BPF_JMP | BPF_JSLT | BPF_K:
        case BPF_JMP | BPF_JSGE | BPF_K:
+       case BPF_JMP | BPF_JSLE | BPF_K:
        case BPF_JMP | BPF_JSET | BPF_K:
                /* Accommodate for extra offset in case of a backjump. */
                off = from->off;
@@ -833,12 +837,20 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
                [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
                [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
                [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
+               [BPF_JMP | BPF_JLT | BPF_X] = &&JMP_JLT_X,
+               [BPF_JMP | BPF_JLT | BPF_K] = &&JMP_JLT_K,
                [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
                [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
+               [BPF_JMP | BPF_JLE | BPF_X] = &&JMP_JLE_X,
+               [BPF_JMP | BPF_JLE | BPF_K] = &&JMP_JLE_K,
                [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
                [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
+               [BPF_JMP | BPF_JSLT | BPF_X] = &&JMP_JSLT_X,
+               [BPF_JMP | BPF_JSLT | BPF_K] = &&JMP_JSLT_K,
                [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
                [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
+               [BPF_JMP | BPF_JSLE | BPF_X] = &&JMP_JSLE_X,
+               [BPF_JMP | BPF_JSLE | BPF_K] = &&JMP_JSLE_K,
                [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
                [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
                /* Program return */
@@ -1073,6 +1085,18 @@ out:
                        CONT_JMP;
                }
                CONT;
+       JMP_JLT_X:
+               if (DST < SRC) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
+       JMP_JLT_K:
+               if (DST < IMM) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
        JMP_JGE_X:
                if (DST >= SRC) {
                        insn += insn->off;
@@ -1085,6 +1109,18 @@ out:
                        CONT_JMP;
                }
                CONT;
+       JMP_JLE_X:
+               if (DST <= SRC) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
+       JMP_JLE_K:
+               if (DST <= IMM) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
        JMP_JSGT_X:
                if (((s64) DST) > ((s64) SRC)) {
                        insn += insn->off;
@@ -1097,6 +1133,18 @@ out:
                        CONT_JMP;
                }
                CONT;
+       JMP_JSLT_X:
+               if (((s64) DST) < ((s64) SRC)) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
+       JMP_JSLT_K:
+               if (((s64) DST) < ((s64) IMM)) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
        JMP_JSGE_X:
                if (((s64) DST) >= ((s64) SRC)) {
                        insn += insn->off;
@@ -1109,6 +1157,18 @@ out:
                        CONT_JMP;
                }
                CONT;
+       JMP_JSLE_X:
+               if (((s64) DST) <= ((s64) SRC)) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
+       JMP_JSLE_K:
+               if (((s64) DST) <= ((s64) IMM)) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
        JMP_JSET_X:
                if (DST & SRC) {
                        insn += insn->off;
index 8160a81a40bfa007491b4d0c1073f9db5a7f946c..ecc590e01a1dfb354de175bac435bb1e4a89ac8a 100644 (file)
@@ -312,11 +312,15 @@ static const char *const bpf_jmp_string[16] = {
        [BPF_JA >> 4]   = "jmp",
        [BPF_JEQ >> 4]  = "==",
        [BPF_JGT >> 4]  = ">",
+       [BPF_JLT >> 4]  = "<",
        [BPF_JGE >> 4]  = ">=",
+       [BPF_JLE >> 4]  = "<=",
        [BPF_JSET >> 4] = "&",
        [BPF_JNE >> 4]  = "!=",
        [BPF_JSGT >> 4] = "s>",
+       [BPF_JSLT >> 4] = "s<",
        [BPF_JSGE >> 4] = "s>=",
+       [BPF_JSLE >> 4] = "s<=",
        [BPF_CALL >> 4] = "call",
        [BPF_EXIT >> 4] = "exit",
 };
@@ -2383,27 +2387,37 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
                 */
                return;
 
-       /* LLVM can generate two kind of checks:
+       /* LLVM can generate four kind of checks:
         *
-        * Type 1:
+        * Type 1/2:
         *
         *   r2 = r3;
         *   r2 += 8;
         *   if (r2 > pkt_end) goto <handle exception>
         *   <access okay>
         *
+        *   r2 = r3;
+        *   r2 += 8;
+        *   if (r2 < pkt_end) goto <access okay>
+        *   <handle exception>
+        *
         *   Where:
         *     r2 == dst_reg, pkt_end == src_reg
         *     r2=pkt(id=n,off=8,r=0)
         *     r3=pkt(id=n,off=0,r=0)
         *
-        * Type 2:
+        * Type 3/4:
         *
         *   r2 = r3;
         *   r2 += 8;
         *   if (pkt_end >= r2) goto <access okay>
         *   <handle exception>
         *
+        *   r2 = r3;
+        *   r2 += 8;
+        *   if (pkt_end <= r2) goto <handle exception>
+        *   <access okay>
+        *
         *   Where:
         *     pkt_end == dst_reg, r2 == src_reg
         *     r2=pkt(id=n,off=8,r=0)
@@ -2471,6 +2485,14 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
                false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
                true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
                break;
+       case BPF_JLT:
+               false_reg->umin_value = max(false_reg->umin_value, val);
+               true_reg->umax_value = min(true_reg->umax_value, val - 1);
+               break;
+       case BPF_JSLT:
+               false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
+               true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
+               break;
        case BPF_JGE:
                false_reg->umax_value = min(false_reg->umax_value, val - 1);
                true_reg->umin_value = max(true_reg->umin_value, val);
@@ -2479,6 +2501,14 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
                false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
                true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
                break;
+       case BPF_JLE:
+               false_reg->umin_value = max(false_reg->umin_value, val + 1);
+               true_reg->umax_value = min(true_reg->umax_value, val);
+               break;
+       case BPF_JSLE:
+               false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
+               true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
+               break;
        default:
                break;
        }
@@ -2527,6 +2557,14 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
                true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
                false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
                break;
+       case BPF_JLT:
+               true_reg->umin_value = max(true_reg->umin_value, val + 1);
+               false_reg->umax_value = min(false_reg->umax_value, val);
+               break;
+       case BPF_JSLT:
+               true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
+               false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
+               break;
        case BPF_JGE:
                true_reg->umax_value = min(true_reg->umax_value, val);
                false_reg->umin_value = max(false_reg->umin_value, val + 1);
@@ -2535,6 +2573,14 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
                true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
                false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
                break;
+       case BPF_JLE:
+               true_reg->umin_value = max(true_reg->umin_value, val);
+               false_reg->umax_value = min(false_reg->umax_value, val - 1);
+               break;
+       case BPF_JSLE:
+               true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
+               false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
+               break;
        default:
                break;
        }
@@ -2659,7 +2705,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
        u8 opcode = BPF_OP(insn->code);
        int err;
 
-       if (opcode > BPF_EXIT) {
+       if (opcode > BPF_JSLE) {
                verbose("invalid BPF_JMP opcode %x\n", opcode);
                return -EINVAL;
        }
@@ -2761,10 +2807,18 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
                   dst_reg->type == PTR_TO_PACKET &&
                   regs[insn->src_reg].type == PTR_TO_PACKET_END) {
                find_good_pkt_pointers(this_branch, dst_reg);
+       } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT &&
+                  dst_reg->type == PTR_TO_PACKET &&
+                  regs[insn->src_reg].type == PTR_TO_PACKET_END) {
+               find_good_pkt_pointers(other_branch, dst_reg);
        } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE &&
                   dst_reg->type == PTR_TO_PACKET_END &&
                   regs[insn->src_reg].type == PTR_TO_PACKET) {
                find_good_pkt_pointers(other_branch, &regs[insn->src_reg]);
+       } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE &&
+                  dst_reg->type == PTR_TO_PACKET_END &&
+                  regs[insn->src_reg].type == PTR_TO_PACKET) {
+               find_good_pkt_pointers(this_branch, &regs[insn->src_reg]);
        } else if (is_pointer_value(env, insn->dst_reg)) {
                verbose("R%d pointer comparison prohibited\n", insn->dst_reg);
                return -EACCES;
index d9d5a410955c948b4e923d729417bc9dbb91e514..aa8812ae6776ee31712fe88c58da4048ff9c31e4 100644 (file)
@@ -951,6 +951,32 @@ static struct bpf_test tests[] = {
                { 4, 4, 4, 3, 3 },
                { { 2, 0 }, { 3, 1 }, { 4, MAX_K } },
        },
+       {
+               "JGE (jt 0), test 1",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_LEN, 0),
+                       BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
+                       BPF_JUMP(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 1),
+                       BPF_STMT(BPF_RET | BPF_K, 1),
+                       BPF_STMT(BPF_RET | BPF_K, MAX_K)
+               },
+               CLASSIC,
+               { 4, 4, 4, 3, 3 },
+               { { 2, 0 }, { 3, 1 }, { 4, 1 } },
+       },
+       {
+               "JGE (jt 0), test 2",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_LEN, 0),
+                       BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
+                       BPF_JUMP(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 1),
+                       BPF_STMT(BPF_RET | BPF_K, 1),
+                       BPF_STMT(BPF_RET | BPF_K, MAX_K)
+               },
+               CLASSIC,
+               { 4, 4, 5, 3, 3 },
+               { { 4, 1 }, { 5, 1 }, { 6, MAX_K } },
+       },
        {
                "JGE",
                .u.insns = {
@@ -4492,6 +4518,35 @@ static struct bpf_test tests[] = {
                { },
                { { 0, 1 } },
        },
+       /* BPF_JMP | BPF_JSLT | BPF_K */
+       {
+               "JMP_JSLT_K: Signed jump: if (-2 < -1) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, 0xfffffffffffffffeLL),
+                       BPF_JMP_IMM(BPF_JSLT, R1, -1, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "JMP_JSLT_K: Signed jump: if (-1 < -1) return 0",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
+                       BPF_JMP_IMM(BPF_JSLT, R1, -1, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
        /* BPF_JMP | BPF_JSGT | BPF_K */
        {
                "JMP_JSGT_K: Signed jump: if (-1 > -2) return 1",
@@ -4521,6 +4576,73 @@ static struct bpf_test tests[] = {
                { },
                { { 0, 1 } },
        },
+       /* BPF_JMP | BPF_JSLE | BPF_K */
+       {
+               "JMP_JSLE_K: Signed jump: if (-2 <= -1) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, 0xfffffffffffffffeLL),
+                       BPF_JMP_IMM(BPF_JSLE, R1, -1, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "JMP_JSLE_K: Signed jump: if (-1 <= -1) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
+                       BPF_JMP_IMM(BPF_JSLE, R1, -1, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "JMP_JSLE_K: Signed jump: value walk 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, 3),
+                       BPF_JMP_IMM(BPF_JSLE, R1, 0, 6),
+                       BPF_ALU64_IMM(BPF_SUB, R1, 1),
+                       BPF_JMP_IMM(BPF_JSLE, R1, 0, 4),
+                       BPF_ALU64_IMM(BPF_SUB, R1, 1),
+                       BPF_JMP_IMM(BPF_JSLE, R1, 0, 2),
+                       BPF_ALU64_IMM(BPF_SUB, R1, 1),
+                       BPF_JMP_IMM(BPF_JSLE, R1, 0, 1),
+                       BPF_EXIT_INSN(),                /* bad exit */
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),  /* good exit */
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "JMP_JSLE_K: Signed jump: value walk 2",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, 3),
+                       BPF_JMP_IMM(BPF_JSLE, R1, 0, 4),
+                       BPF_ALU64_IMM(BPF_SUB, R1, 2),
+                       BPF_JMP_IMM(BPF_JSLE, R1, 0, 2),
+                       BPF_ALU64_IMM(BPF_SUB, R1, 2),
+                       BPF_JMP_IMM(BPF_JSLE, R1, 0, 1),
+                       BPF_EXIT_INSN(),                /* bad exit */
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),  /* good exit */
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
        /* BPF_JMP | BPF_JSGE | BPF_K */
        {
                "JMP_JSGE_K: Signed jump: if (-1 >= -2) return 1",
@@ -4617,6 +4739,35 @@ static struct bpf_test tests[] = {
                { },
                { { 0, 1 } },
        },
+       /* BPF_JMP | BPF_JLT | BPF_K */
+       {
+               "JMP_JLT_K: if (2 < 3) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, 2),
+                       BPF_JMP_IMM(BPF_JLT, R1, 3, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "JMP_JGT_K: Unsigned jump: if (1 < -1) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, 1),
+                       BPF_JMP_IMM(BPF_JLT, R1, -1, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
        /* BPF_JMP | BPF_JGE | BPF_K */
        {
                "JMP_JGE_K: if (3 >= 2) return 1",
@@ -4632,6 +4783,21 @@ static struct bpf_test tests[] = {
                { },
                { { 0, 1 } },
        },
+       /* BPF_JMP | BPF_JLE | BPF_K */
+       {
+               "JMP_JLE_K: if (2 <= 3) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, 2),
+                       BPF_JMP_IMM(BPF_JLE, R1, 3, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
        /* BPF_JMP | BPF_JGT | BPF_K jump backwards */
        {
                "JMP_JGT_K: if (3 > 2) return 1 (jump backwards)",
@@ -4662,6 +4828,36 @@ static struct bpf_test tests[] = {
                { },
                { { 0, 1 } },
        },
+       /* BPF_JMP | BPF_JLT | BPF_K jump backwards */
+       {
+               "JMP_JGT_K: if (2 < 3) return 1 (jump backwards)",
+               .u.insns_int = {
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 2), /* goto start */
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1), /* out: */
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0), /* start: */
+                       BPF_LD_IMM64(R1, 2), /* note: this takes 2 insns */
+                       BPF_JMP_IMM(BPF_JLT, R1, 3, -6), /* goto out */
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "JMP_JLE_K: if (3 <= 3) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, 3),
+                       BPF_JMP_IMM(BPF_JLE, R1, 3, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
        /* BPF_JMP | BPF_JNE | BPF_K */
        {
                "JMP_JNE_K: if (3 != 2) return 1",
@@ -4752,6 +4948,37 @@ static struct bpf_test tests[] = {
                { },
                { { 0, 1 } },
        },
+       /* BPF_JMP | BPF_JSLT | BPF_X */
+       {
+               "JMP_JSLT_X: Signed jump: if (-2 < -1) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, -1),
+                       BPF_LD_IMM64(R2, -2),
+                       BPF_JMP_REG(BPF_JSLT, R2, R1, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "JMP_JSLT_X: Signed jump: if (-1 < -1) return 0",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_LD_IMM64(R1, -1),
+                       BPF_LD_IMM64(R2, -1),
+                       BPF_JMP_REG(BPF_JSLT, R1, R2, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
        /* BPF_JMP | BPF_JSGE | BPF_X */
        {
                "JMP_JSGE_X: Signed jump: if (-1 >= -2) return 1",
@@ -4783,6 +5010,37 @@ static struct bpf_test tests[] = {
                { },
                { { 0, 1 } },
        },
+       /* BPF_JMP | BPF_JSLE | BPF_X */
+       {
+               "JMP_JSLE_X: Signed jump: if (-2 <= -1) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, -1),
+                       BPF_LD_IMM64(R2, -2),
+                       BPF_JMP_REG(BPF_JSLE, R2, R1, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "JMP_JSLE_X: Signed jump: if (-1 <= -1) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, -1),
+                       BPF_LD_IMM64(R2, -1),
+                       BPF_JMP_REG(BPF_JSLE, R1, R2, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
        /* BPF_JMP | BPF_JGT | BPF_X */
        {
                "JMP_JGT_X: if (3 > 2) return 1",
@@ -4814,6 +5072,37 @@ static struct bpf_test tests[] = {
                { },
                { { 0, 1 } },
        },
+       /* BPF_JMP | BPF_JLT | BPF_X */
+       {
+               "JMP_JLT_X: if (2 < 3) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, 3),
+                       BPF_LD_IMM64(R2, 2),
+                       BPF_JMP_REG(BPF_JLT, R2, R1, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "JMP_JLT_X: Unsigned jump: if (1 < -1) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, -1),
+                       BPF_LD_IMM64(R2, 1),
+                       BPF_JMP_REG(BPF_JLT, R2, R1, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
        /* BPF_JMP | BPF_JGE | BPF_X */
        {
                "JMP_JGE_X: if (3 >= 2) return 1",
@@ -4845,6 +5134,37 @@ static struct bpf_test tests[] = {
                { },
                { { 0, 1 } },
        },
+       /* BPF_JMP | BPF_JLE | BPF_X */
+       {
+               "JMP_JLE_X: if (2 <= 3) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, 3),
+                       BPF_LD_IMM64(R2, 2),
+                       BPF_JMP_REG(BPF_JLE, R2, R1, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "JMP_JLE_X: if (3 <= 3) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, 3),
+                       BPF_LD_IMM64(R2, 3),
+                       BPF_JMP_REG(BPF_JLE, R1, R2, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
        {
                /* Mainly testing JIT + imm64 here. */
                "JMP_JGE_X: ldimm64 test 1",
@@ -4890,6 +5210,50 @@ static struct bpf_test tests[] = {
                { },
                { { 0, 1 } },
        },
+       {
+               "JMP_JLE_X: ldimm64 test 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, 3),
+                       BPF_LD_IMM64(R2, 2),
+                       BPF_JMP_REG(BPF_JLE, R2, R1, 2),
+                       BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
+                       BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0xeeeeeeeeU } },
+       },
+       {
+               "JMP_JLE_X: ldimm64 test 2",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, 3),
+                       BPF_LD_IMM64(R2, 2),
+                       BPF_JMP_REG(BPF_JLE, R2, R1, 0),
+                       BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0xffffffffU } },
+       },
+       {
+               "JMP_JLE_X: ldimm64 test 3",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_LD_IMM64(R1, 3),
+                       BPF_LD_IMM64(R2, 2),
+                       BPF_JMP_REG(BPF_JLE, R2, R1, 4),
+                       BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
+                       BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
        /* BPF_JMP | BPF_JNE | BPF_X */
        {
                "JMP_JNE_X: if (3 != 2) return 1",
index 78d00933dbe76f503ca3f892649da47272819977..5afe3ac191ecd4f5f7de2106f22f7901746eb354 100644 (file)
@@ -514,14 +514,27 @@ do_pass:
                                break;
                        }
 
-                       /* Convert JEQ into JNE when 'jump_true' is next insn. */
-                       if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) {
-                               insn->code = BPF_JMP | BPF_JNE | bpf_src;
+                       /* Convert some jumps when 'jump_true' is next insn. */
+                       if (fp->jt == 0) {
+                               switch (BPF_OP(fp->code)) {
+                               case BPF_JEQ:
+                                       insn->code = BPF_JMP | BPF_JNE | bpf_src;
+                                       break;
+                               case BPF_JGT:
+                                       insn->code = BPF_JMP | BPF_JLE | bpf_src;
+                                       break;
+                               case BPF_JGE:
+                                       insn->code = BPF_JMP | BPF_JLT | bpf_src;
+                                       break;
+                               default:
+                                       goto jmp_rest;
+                               }
+
                                target = i + fp->jf + 1;
                                BPF_EMIT_JMP;
                                break;
                        }
-
+jmp_rest:
                        /* Other jumps are mapped into two insns: Jxx and JA. */
                        target = i + fp->jt + 1;
                        insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
index 8d9bfcca3fe4831a08f377777e644c74ebabae63..bf3b2e23045569da71ad152df3949febe2864da2 100644 (file)
 #define BPF_FROM_LE    BPF_TO_LE
 #define BPF_FROM_BE    BPF_TO_BE
 
+/* jmp encodings */
 #define BPF_JNE                0x50    /* jump != */
+#define BPF_JLT                0xa0    /* LT is unsigned, '<' */
+#define BPF_JLE                0xb0    /* LE is unsigned, '<=' */
 #define BPF_JSGT       0x60    /* SGT is signed '>', GT in x86 */
 #define BPF_JSGE       0x70    /* SGE is signed '>=', GE in x86 */
+#define BPF_JSLT       0xc0    /* SLT is signed, '<' */
+#define BPF_JSLE       0xd0    /* SLE is signed, '<=' */
 #define BPF_CALL       0x80    /* function call */
 #define BPF_EXIT       0x90    /* function return */
 
index ab0cd11983265d6f4b3fea380e6cbce6dea055da..1b767127e141fae711992fa7d01c3262def662fc 100644 (file)
@@ -2830,6 +2830,79 @@ static struct bpf_test tests[] = {
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
                .result = ACCEPT,
        },
+       {
+               "direct packet access: test25 (marking on <, good access)",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+                       BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, -4),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "direct packet access: test26 (marking on <, bad access)",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+                       BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, -3),
+               },
+               .result = REJECT,
+               .errstr = "invalid access to packet",
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "direct packet access: test27 (marking on <=, good access)",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+                       BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "direct packet access: test28 (marking on <=, bad access)",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+                       BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, -4),
+               },
+               .result = REJECT,
+               .errstr = "invalid access to packet",
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
        {
                "helper access to packet: test1, valid packet_ptr range",
                .insns = {
@@ -4488,6 +4561,246 @@ static struct bpf_test tests[] = {
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_TRACEPOINT,
        },
+       {
+               "helper access to map: bounds check using <, good access",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to map: bounds check using <, bad access",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .result = REJECT,
+               .errstr = "R1 unbounded memory access",
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to map: bounds check using <=, good access",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to map: bounds check using <=, bad access",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .result = REJECT,
+               .errstr = "R1 unbounded memory access",
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to map: bounds check using s<, good access",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to map: bounds check using s<, good access 2",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to map: bounds check using s<, bad access",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .result = REJECT,
+               .errstr = "R1 min value is negative",
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to map: bounds check using s<=, good access",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to map: bounds check using s<=, good access 2",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to map: bounds check using s<=, bad access",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .result = REJECT,
+               .errstr = "R1 min value is negative",
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
        {
                "map element value is preserved across register spilling",
                .insns = {