2 * Testsuite for eBPF verifier
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
12 #include <asm/types.h>
13 #include <linux/types.h>
24 #include <sys/capability.h>
25 #include <sys/resource.h>
27 #include <linux/unistd.h>
28 #include <linux/filter.h>
29 #include <linux/bpf_perf_event.h>
30 #include <linux/bpf.h>
35 # include "autoconf.h"
37 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
38 # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
42 #include "../../../include/linux/filter.h"
45 # define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
52 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
53 #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
57 struct bpf_insn insns
[MAX_INSNS
];
58 int fixup_map1
[MAX_FIXUPS
];
59 int fixup_map2
[MAX_FIXUPS
];
60 int fixup_prog
[MAX_FIXUPS
];
61 int fixup_map_in_map
[MAX_FIXUPS
];
63 const char *errstr_unpriv
;
68 } result
, result_unpriv
;
69 enum bpf_prog_type prog_type
;
73 /* Note we want this to be 64 bit aligned so that the end of our array is
74 * actually the end of the structure.
76 #define MAX_ENTRIES 11
83 static struct bpf_test tests
[] = {
87 BPF_MOV64_IMM(BPF_REG_1
, 1),
88 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 2),
89 BPF_MOV64_IMM(BPF_REG_2
, 3),
90 BPF_ALU64_REG(BPF_SUB
, BPF_REG_1
, BPF_REG_2
),
91 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -1),
92 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_1
, 3),
93 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
104 .errstr
= "unreachable",
110 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
111 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
114 .errstr
= "unreachable",
120 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
123 .errstr
= "jump out of range",
127 "out of range jump2",
129 BPF_JMP_IMM(BPF_JA
, 0, 0, -2),
132 .errstr
= "jump out of range",
138 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
139 BPF_LD_IMM64(BPF_REG_0
, 0),
140 BPF_LD_IMM64(BPF_REG_0
, 0),
141 BPF_LD_IMM64(BPF_REG_0
, 1),
142 BPF_LD_IMM64(BPF_REG_0
, 1),
143 BPF_MOV64_IMM(BPF_REG_0
, 2),
146 .errstr
= "invalid BPF_LD_IMM insn",
147 .errstr_unpriv
= "R1 pointer comparison",
153 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
154 BPF_LD_IMM64(BPF_REG_0
, 0),
155 BPF_LD_IMM64(BPF_REG_0
, 0),
156 BPF_LD_IMM64(BPF_REG_0
, 1),
157 BPF_LD_IMM64(BPF_REG_0
, 1),
160 .errstr
= "invalid BPF_LD_IMM insn",
161 .errstr_unpriv
= "R1 pointer comparison",
167 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
168 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 0),
169 BPF_LD_IMM64(BPF_REG_0
, 0),
170 BPF_LD_IMM64(BPF_REG_0
, 0),
171 BPF_LD_IMM64(BPF_REG_0
, 1),
172 BPF_LD_IMM64(BPF_REG_0
, 1),
175 .errstr
= "invalid bpf_ld_imm64 insn",
181 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 0),
184 .errstr
= "invalid bpf_ld_imm64 insn",
190 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 0),
192 .errstr
= "invalid bpf_ld_imm64 insn",
198 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 0),
199 BPF_RAW_INSN(0, 0, 0, 0, 0),
207 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 1),
208 BPF_RAW_INSN(0, 0, 0, 0, 1),
216 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 1, 1),
217 BPF_RAW_INSN(0, 0, 0, 0, 1),
220 .errstr
= "uses reserved fields",
226 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 1),
227 BPF_RAW_INSN(0, 0, 0, 1, 1),
230 .errstr
= "invalid bpf_ld_imm64 insn",
236 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 1),
237 BPF_RAW_INSN(0, BPF_REG_1
, 0, 0, 1),
240 .errstr
= "invalid bpf_ld_imm64 insn",
246 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 1),
247 BPF_RAW_INSN(0, 0, BPF_REG_1
, 0, 1),
250 .errstr
= "invalid bpf_ld_imm64 insn",
256 BPF_MOV64_IMM(BPF_REG_1
, 0),
257 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, BPF_REG_1
, 0, 1),
258 BPF_RAW_INSN(0, 0, 0, 0, 1),
261 .errstr
= "not pointing to valid bpf_map",
267 BPF_MOV64_IMM(BPF_REG_1
, 0),
268 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, BPF_REG_1
, 0, 1),
269 BPF_RAW_INSN(0, 0, BPF_REG_1
, 0, 1),
272 .errstr
= "invalid bpf_ld_imm64 insn",
278 BPF_ALU64_REG(BPF_MOV
, BPF_REG_0
, BPF_REG_2
),
280 .errstr
= "jump out of range",
286 BPF_JMP_IMM(BPF_JA
, 0, 0, -1),
289 .errstr
= "back-edge",
295 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
296 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_0
),
297 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_0
),
298 BPF_JMP_IMM(BPF_JA
, 0, 0, -4),
301 .errstr
= "back-edge",
307 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
308 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_0
),
309 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_0
),
310 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, -3),
313 .errstr
= "back-edge",
317 "read uninitialized register",
319 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
322 .errstr
= "R2 !read_ok",
326 "read invalid register",
328 BPF_MOV64_REG(BPF_REG_0
, -1),
331 .errstr
= "R15 is invalid",
335 "program doesn't init R0 before exit",
337 BPF_ALU64_REG(BPF_MOV
, BPF_REG_2
, BPF_REG_1
),
340 .errstr
= "R0 !read_ok",
344 "program doesn't init R0 before exit in all branches",
346 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
347 BPF_MOV64_IMM(BPF_REG_0
, 1),
348 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 2),
351 .errstr
= "R0 !read_ok",
352 .errstr_unpriv
= "R1 pointer comparison",
356 "stack out of bounds",
358 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, 8, 0),
361 .errstr
= "invalid stack",
365 "invalid call insn1",
367 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
| BPF_X
, 0, 0, 0, 0),
370 .errstr
= "BPF_CALL uses reserved",
374 "invalid call insn2",
376 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 1, 0),
379 .errstr
= "BPF_CALL uses reserved",
383 "invalid function call",
385 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, 1234567),
388 .errstr
= "invalid func unknown#1234567",
392 "uninitialized stack1",
394 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
395 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
396 BPF_LD_MAP_FD(BPF_REG_1
, 0),
397 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
398 BPF_FUNC_map_lookup_elem
),
402 .errstr
= "invalid indirect read from stack",
406 "uninitialized stack2",
408 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
409 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_2
, -8),
412 .errstr
= "invalid read from stack",
416 "invalid fp arithmetic",
417 /* If this gets ever changed, make sure JITs can deal with it. */
419 BPF_MOV64_IMM(BPF_REG_0
, 0),
420 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
421 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_1
, 8),
422 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, 0),
425 .errstr_unpriv
= "R1 subtraction from stack pointer",
426 .result_unpriv
= REJECT
,
427 .errstr
= "R1 invalid mem access",
431 "non-invalid fp arithmetic",
433 BPF_MOV64_IMM(BPF_REG_0
, 0),
434 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
440 "invalid argument register",
442 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
443 BPF_FUNC_get_cgroup_classid
),
444 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
445 BPF_FUNC_get_cgroup_classid
),
448 .errstr
= "R1 !read_ok",
450 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
453 "non-invalid argument register",
455 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_1
),
456 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
457 BPF_FUNC_get_cgroup_classid
),
458 BPF_ALU64_REG(BPF_MOV
, BPF_REG_1
, BPF_REG_6
),
459 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
460 BPF_FUNC_get_cgroup_classid
),
464 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
467 "check valid spill/fill",
469 /* spill R1(ctx) into stack */
470 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
471 /* fill it back into R2 */
472 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -8),
473 /* should be able to access R0 = *(R2 + 8) */
474 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
475 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
478 .errstr_unpriv
= "R0 leaks addr",
480 .result_unpriv
= REJECT
,
483 "check valid spill/fill, skb mark",
485 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_1
),
486 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_6
, -8),
487 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -8),
488 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
,
489 offsetof(struct __sk_buff
, mark
)),
493 .result_unpriv
= ACCEPT
,
496 "check corrupted spill/fill",
498 /* spill R1(ctx) into stack */
499 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
500 /* mess up with R1 pointer on stack */
501 BPF_ST_MEM(BPF_B
, BPF_REG_10
, -7, 0x23),
502 /* fill back into R0 should fail */
503 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -8),
506 .errstr_unpriv
= "attempt to corrupt spilled",
507 .errstr
= "corrupted spill",
511 "invalid src register in STX",
513 BPF_STX_MEM(BPF_B
, BPF_REG_10
, -1, -1),
516 .errstr
= "R15 is invalid",
520 "invalid dst register in STX",
522 BPF_STX_MEM(BPF_B
, 14, BPF_REG_10
, -1),
525 .errstr
= "R14 is invalid",
529 "invalid dst register in ST",
531 BPF_ST_MEM(BPF_B
, 14, -1, -1),
534 .errstr
= "R14 is invalid",
538 "invalid src register in LDX",
540 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, 12, 0),
543 .errstr
= "R12 is invalid",
547 "invalid dst register in LDX",
549 BPF_LDX_MEM(BPF_B
, 11, BPF_REG_1
, 0),
552 .errstr
= "R11 is invalid",
558 BPF_RAW_INSN(0, 0, 0, 0, 0),
561 .errstr
= "invalid BPF_LD_IMM",
567 BPF_RAW_INSN(1, 0, 0, 0, 0),
570 .errstr
= "BPF_LDX uses reserved fields",
576 BPF_RAW_INSN(-1, 0, 0, 0, 0),
579 .errstr
= "invalid BPF_ALU opcode f0",
585 BPF_RAW_INSN(-1, -1, -1, -1, -1),
588 .errstr
= "invalid BPF_ALU opcode f0",
594 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
597 .errstr
= "BPF_ALU uses reserved fields",
601 "misaligned read from stack",
603 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
604 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_2
, -4),
607 .errstr
= "misaligned stack access",
609 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
612 "invalid map_fd for function call",
614 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
615 BPF_ALU64_REG(BPF_MOV
, BPF_REG_2
, BPF_REG_10
),
616 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
617 BPF_LD_MAP_FD(BPF_REG_1
, 0),
618 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
619 BPF_FUNC_map_delete_elem
),
622 .errstr
= "fd 0 is not pointing to valid bpf_map",
626 "don't check return value before access",
628 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
629 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
630 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
631 BPF_LD_MAP_FD(BPF_REG_1
, 0),
632 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
633 BPF_FUNC_map_lookup_elem
),
634 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 0),
638 .errstr
= "R0 invalid mem access 'map_value_or_null'",
642 "access memory with incorrect alignment",
644 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
645 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
646 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
647 BPF_LD_MAP_FD(BPF_REG_1
, 0),
648 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
649 BPF_FUNC_map_lookup_elem
),
650 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
651 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 4, 0),
655 .errstr
= "misaligned value access",
657 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
660 "sometimes access memory with incorrect alignment",
662 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
663 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
664 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
665 BPF_LD_MAP_FD(BPF_REG_1
, 0),
666 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
667 BPF_FUNC_map_lookup_elem
),
668 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
669 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 0),
671 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 1),
675 .errstr
= "R0 invalid mem access",
676 .errstr_unpriv
= "R0 leaks addr",
678 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
683 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
684 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -8),
685 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
686 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 0),
687 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 1, 1),
688 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 1),
689 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 2, 1),
690 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 2),
691 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 3, 1),
692 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 3),
693 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 4, 1),
694 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 4),
695 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 5, 1),
696 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -32, 5),
697 BPF_MOV64_IMM(BPF_REG_0
, 0),
700 .errstr_unpriv
= "R1 pointer comparison",
701 .result_unpriv
= REJECT
,
707 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
708 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 2),
709 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 0),
710 BPF_JMP_IMM(BPF_JA
, 0, 0, 14),
711 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 1, 2),
712 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 0),
713 BPF_JMP_IMM(BPF_JA
, 0, 0, 11),
714 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 2, 2),
715 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -32, 0),
716 BPF_JMP_IMM(BPF_JA
, 0, 0, 8),
717 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 3, 2),
718 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -40, 0),
719 BPF_JMP_IMM(BPF_JA
, 0, 0, 5),
720 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 4, 2),
721 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -48, 0),
722 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
723 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 5, 1),
724 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -56, 0),
725 BPF_MOV64_IMM(BPF_REG_0
, 0),
728 .errstr_unpriv
= "R1 pointer comparison",
729 .result_unpriv
= REJECT
,
735 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
736 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 3),
737 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 0),
738 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
739 BPF_JMP_IMM(BPF_JA
, 0, 0, 19),
740 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 1, 3),
741 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 0),
742 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -16),
743 BPF_JMP_IMM(BPF_JA
, 0, 0, 15),
744 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 2, 3),
745 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -32, 0),
746 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -32),
747 BPF_JMP_IMM(BPF_JA
, 0, 0, 11),
748 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 3, 3),
749 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -40, 0),
750 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -40),
751 BPF_JMP_IMM(BPF_JA
, 0, 0, 7),
752 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 4, 3),
753 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -48, 0),
754 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -48),
755 BPF_JMP_IMM(BPF_JA
, 0, 0, 3),
756 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 5, 0),
757 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -56, 0),
758 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -56),
759 BPF_LD_MAP_FD(BPF_REG_1
, 0),
760 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
761 BPF_FUNC_map_delete_elem
),
764 .fixup_map1
= { 24 },
765 .errstr_unpriv
= "R1 pointer comparison",
766 .result_unpriv
= REJECT
,
772 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
773 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
774 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
775 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
776 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
777 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
778 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
779 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
780 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
781 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
782 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
783 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
784 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
785 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
786 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
787 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
788 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
789 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
790 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
791 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
792 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
793 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
794 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
795 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
796 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
797 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
798 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
799 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
800 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
801 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
802 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
803 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
804 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
805 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
806 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
807 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
808 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
809 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
810 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
811 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
812 BPF_MOV64_IMM(BPF_REG_0
, 0),
815 .errstr_unpriv
= "R1 pointer comparison",
816 .result_unpriv
= REJECT
,
822 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
823 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_2
),
824 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
825 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
826 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
827 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
828 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
829 BPF_MOV64_IMM(BPF_REG_0
, 0),
830 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
831 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
832 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
833 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
834 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
835 BPF_MOV64_IMM(BPF_REG_0
, 0),
836 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
837 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
838 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
839 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
840 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
841 BPF_MOV64_IMM(BPF_REG_0
, 0),
842 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
843 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
844 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
845 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
846 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
847 BPF_MOV64_IMM(BPF_REG_0
, 0),
848 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
849 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
850 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
851 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
852 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
853 BPF_MOV64_IMM(BPF_REG_0
, 0),
856 .errstr_unpriv
= "R1 pointer comparison",
857 .result_unpriv
= REJECT
,
861 "access skb fields ok",
863 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
864 offsetof(struct __sk_buff
, len
)),
865 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 1),
866 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
867 offsetof(struct __sk_buff
, mark
)),
868 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 1),
869 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
870 offsetof(struct __sk_buff
, pkt_type
)),
871 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 1),
872 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
873 offsetof(struct __sk_buff
, queue_mapping
)),
874 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 0),
875 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
876 offsetof(struct __sk_buff
, protocol
)),
877 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 0),
878 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
879 offsetof(struct __sk_buff
, vlan_present
)),
880 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 0),
881 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
882 offsetof(struct __sk_buff
, vlan_tci
)),
883 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 0),
884 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
885 offsetof(struct __sk_buff
, napi_id
)),
886 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 0),
892 "access skb fields bad1",
894 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -4),
897 .errstr
= "invalid bpf_context access",
901 "access skb fields bad2",
903 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 9),
904 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
905 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
906 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
907 BPF_LD_MAP_FD(BPF_REG_1
, 0),
908 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
909 BPF_FUNC_map_lookup_elem
),
910 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
912 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
913 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
914 offsetof(struct __sk_buff
, pkt_type
)),
918 .errstr
= "different pointers",
919 .errstr_unpriv
= "R1 pointer comparison",
923 "access skb fields bad3",
925 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
926 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
927 offsetof(struct __sk_buff
, pkt_type
)),
929 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
930 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
931 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
932 BPF_LD_MAP_FD(BPF_REG_1
, 0),
933 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
934 BPF_FUNC_map_lookup_elem
),
935 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
937 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
938 BPF_JMP_IMM(BPF_JA
, 0, 0, -12),
941 .errstr
= "different pointers",
942 .errstr_unpriv
= "R1 pointer comparison",
946 "access skb fields bad4",
948 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 3),
949 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
950 offsetof(struct __sk_buff
, len
)),
951 BPF_MOV64_IMM(BPF_REG_0
, 0),
953 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
954 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
955 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
956 BPF_LD_MAP_FD(BPF_REG_1
, 0),
957 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
958 BPF_FUNC_map_lookup_elem
),
959 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
961 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
962 BPF_JMP_IMM(BPF_JA
, 0, 0, -13),
965 .errstr
= "different pointers",
966 .errstr_unpriv
= "R1 pointer comparison",
970 "invalid access __sk_buff family",
972 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
973 offsetof(struct __sk_buff
, family
)),
976 .errstr
= "invalid bpf_context access",
980 "invalid access __sk_buff remote_ip4",
982 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
983 offsetof(struct __sk_buff
, remote_ip4
)),
986 .errstr
= "invalid bpf_context access",
990 "invalid access __sk_buff local_ip4",
992 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
993 offsetof(struct __sk_buff
, local_ip4
)),
996 .errstr
= "invalid bpf_context access",
1000 "invalid access __sk_buff remote_ip6",
1002 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1003 offsetof(struct __sk_buff
, remote_ip6
)),
1006 .errstr
= "invalid bpf_context access",
1010 "invalid access __sk_buff local_ip6",
1012 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1013 offsetof(struct __sk_buff
, local_ip6
)),
1016 .errstr
= "invalid bpf_context access",
1020 "invalid access __sk_buff remote_port",
1022 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1023 offsetof(struct __sk_buff
, remote_port
)),
1026 .errstr
= "invalid bpf_context access",
1030 "invalid access __sk_buff remote_port",
1032 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1033 offsetof(struct __sk_buff
, local_port
)),
1036 .errstr
= "invalid bpf_context access",
1040 "valid access __sk_buff family",
1042 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1043 offsetof(struct __sk_buff
, family
)),
1047 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1050 "valid access __sk_buff remote_ip4",
1052 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1053 offsetof(struct __sk_buff
, remote_ip4
)),
1057 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1060 "valid access __sk_buff local_ip4",
1062 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1063 offsetof(struct __sk_buff
, local_ip4
)),
1067 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1070 "valid access __sk_buff remote_ip6",
1072 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1073 offsetof(struct __sk_buff
, remote_ip6
[0])),
1074 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1075 offsetof(struct __sk_buff
, remote_ip6
[1])),
1076 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1077 offsetof(struct __sk_buff
, remote_ip6
[2])),
1078 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1079 offsetof(struct __sk_buff
, remote_ip6
[3])),
1083 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1086 "valid access __sk_buff local_ip6",
1088 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1089 offsetof(struct __sk_buff
, local_ip6
[0])),
1090 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1091 offsetof(struct __sk_buff
, local_ip6
[1])),
1092 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1093 offsetof(struct __sk_buff
, local_ip6
[2])),
1094 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1095 offsetof(struct __sk_buff
, local_ip6
[3])),
1099 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1102 "valid access __sk_buff remote_port",
1104 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1105 offsetof(struct __sk_buff
, remote_port
)),
1109 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1112 "valid access __sk_buff remote_port",
1114 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1115 offsetof(struct __sk_buff
, local_port
)),
1119 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1122 "invalid access of tc_classid for SK_SKB",
1124 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1125 offsetof(struct __sk_buff
, tc_classid
)),
1129 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1130 .errstr
= "invalid bpf_context access",
1133 "invalid access of skb->mark for SK_SKB",
1135 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1136 offsetof(struct __sk_buff
, mark
)),
1140 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1141 .errstr
= "invalid bpf_context access",
1144 "check skb->mark is not writeable by SK_SKB",
1146 BPF_MOV64_IMM(BPF_REG_0
, 0),
1147 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1148 offsetof(struct __sk_buff
, mark
)),
1152 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1153 .errstr
= "invalid bpf_context access",
1156 "check skb->tc_index is writeable by SK_SKB",
1158 BPF_MOV64_IMM(BPF_REG_0
, 0),
1159 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1160 offsetof(struct __sk_buff
, tc_index
)),
1164 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1167 "check skb->priority is writeable by SK_SKB",
1169 BPF_MOV64_IMM(BPF_REG_0
, 0),
1170 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1171 offsetof(struct __sk_buff
, priority
)),
1175 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1178 "direct packet read for SK_SKB",
1180 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
1181 offsetof(struct __sk_buff
, data
)),
1182 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
1183 offsetof(struct __sk_buff
, data_end
)),
1184 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
1185 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
1186 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
1187 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
1188 BPF_MOV64_IMM(BPF_REG_0
, 0),
1192 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1195 "direct packet write for SK_SKB",
1197 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
1198 offsetof(struct __sk_buff
, data
)),
1199 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
1200 offsetof(struct __sk_buff
, data_end
)),
1201 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
1202 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
1203 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
1204 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
1205 BPF_MOV64_IMM(BPF_REG_0
, 0),
1209 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1212 "overlapping checks for direct packet access SK_SKB",
1214 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
1215 offsetof(struct __sk_buff
, data
)),
1216 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
1217 offsetof(struct __sk_buff
, data_end
)),
1218 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
1219 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
1220 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 4),
1221 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
1222 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 6),
1223 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 1),
1224 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_2
, 6),
1225 BPF_MOV64_IMM(BPF_REG_0
, 0),
1229 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1232 "check skb->mark is not writeable by sockets",
1234 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
1235 offsetof(struct __sk_buff
, mark
)),
1238 .errstr
= "invalid bpf_context access",
1239 .errstr_unpriv
= "R1 leaks addr",
1243 "check skb->tc_index is not writeable by sockets",
1245 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
1246 offsetof(struct __sk_buff
, tc_index
)),
1249 .errstr
= "invalid bpf_context access",
1250 .errstr_unpriv
= "R1 leaks addr",
1254 "check cb access: byte",
1256 BPF_MOV64_IMM(BPF_REG_0
, 0),
1257 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1258 offsetof(struct __sk_buff
, cb
[0])),
1259 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1260 offsetof(struct __sk_buff
, cb
[0]) + 1),
1261 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1262 offsetof(struct __sk_buff
, cb
[0]) + 2),
1263 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1264 offsetof(struct __sk_buff
, cb
[0]) + 3),
1265 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1266 offsetof(struct __sk_buff
, cb
[1])),
1267 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1268 offsetof(struct __sk_buff
, cb
[1]) + 1),
1269 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1270 offsetof(struct __sk_buff
, cb
[1]) + 2),
1271 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1272 offsetof(struct __sk_buff
, cb
[1]) + 3),
1273 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1274 offsetof(struct __sk_buff
, cb
[2])),
1275 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1276 offsetof(struct __sk_buff
, cb
[2]) + 1),
1277 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1278 offsetof(struct __sk_buff
, cb
[2]) + 2),
1279 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1280 offsetof(struct __sk_buff
, cb
[2]) + 3),
1281 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1282 offsetof(struct __sk_buff
, cb
[3])),
1283 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1284 offsetof(struct __sk_buff
, cb
[3]) + 1),
1285 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1286 offsetof(struct __sk_buff
, cb
[3]) + 2),
1287 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1288 offsetof(struct __sk_buff
, cb
[3]) + 3),
1289 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1290 offsetof(struct __sk_buff
, cb
[4])),
1291 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1292 offsetof(struct __sk_buff
, cb
[4]) + 1),
1293 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1294 offsetof(struct __sk_buff
, cb
[4]) + 2),
1295 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1296 offsetof(struct __sk_buff
, cb
[4]) + 3),
1297 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1298 offsetof(struct __sk_buff
, cb
[0])),
1299 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1300 offsetof(struct __sk_buff
, cb
[0]) + 1),
1301 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1302 offsetof(struct __sk_buff
, cb
[0]) + 2),
1303 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1304 offsetof(struct __sk_buff
, cb
[0]) + 3),
1305 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1306 offsetof(struct __sk_buff
, cb
[1])),
1307 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1308 offsetof(struct __sk_buff
, cb
[1]) + 1),
1309 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1310 offsetof(struct __sk_buff
, cb
[1]) + 2),
1311 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1312 offsetof(struct __sk_buff
, cb
[1]) + 3),
1313 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1314 offsetof(struct __sk_buff
, cb
[2])),
1315 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1316 offsetof(struct __sk_buff
, cb
[2]) + 1),
1317 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1318 offsetof(struct __sk_buff
, cb
[2]) + 2),
1319 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1320 offsetof(struct __sk_buff
, cb
[2]) + 3),
1321 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1322 offsetof(struct __sk_buff
, cb
[3])),
1323 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1324 offsetof(struct __sk_buff
, cb
[3]) + 1),
1325 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1326 offsetof(struct __sk_buff
, cb
[3]) + 2),
1327 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1328 offsetof(struct __sk_buff
, cb
[3]) + 3),
1329 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1330 offsetof(struct __sk_buff
, cb
[4])),
1331 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1332 offsetof(struct __sk_buff
, cb
[4]) + 1),
1333 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1334 offsetof(struct __sk_buff
, cb
[4]) + 2),
1335 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1336 offsetof(struct __sk_buff
, cb
[4]) + 3),
1342 "__sk_buff->hash, offset 0, byte store not permitted",
1344 BPF_MOV64_IMM(BPF_REG_0
, 0),
1345 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1346 offsetof(struct __sk_buff
, hash
)),
1349 .errstr
= "invalid bpf_context access",
1353 "__sk_buff->tc_index, offset 3, byte store not permitted",
1355 BPF_MOV64_IMM(BPF_REG_0
, 0),
1356 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1357 offsetof(struct __sk_buff
, tc_index
) + 3),
1360 .errstr
= "invalid bpf_context access",
1364 "check skb->hash byte load permitted",
1366 BPF_MOV64_IMM(BPF_REG_0
, 0),
1367 #if __BYTE_ORDER == __LITTLE_ENDIAN
1368 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1369 offsetof(struct __sk_buff
, hash
)),
1371 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1372 offsetof(struct __sk_buff
, hash
) + 3),
1379 "check skb->hash byte load not permitted 1",
1381 BPF_MOV64_IMM(BPF_REG_0
, 0),
1382 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1383 offsetof(struct __sk_buff
, hash
) + 1),
1386 .errstr
= "invalid bpf_context access",
1390 "check skb->hash byte load not permitted 2",
1392 BPF_MOV64_IMM(BPF_REG_0
, 0),
1393 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1394 offsetof(struct __sk_buff
, hash
) + 2),
1397 .errstr
= "invalid bpf_context access",
1401 "check skb->hash byte load not permitted 3",
1403 BPF_MOV64_IMM(BPF_REG_0
, 0),
1404 #if __BYTE_ORDER == __LITTLE_ENDIAN
1405 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1406 offsetof(struct __sk_buff
, hash
) + 3),
1408 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1409 offsetof(struct __sk_buff
, hash
)),
1413 .errstr
= "invalid bpf_context access",
1417 "check cb access: byte, wrong type",
1419 BPF_MOV64_IMM(BPF_REG_0
, 0),
1420 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1421 offsetof(struct __sk_buff
, cb
[0])),
1424 .errstr
= "invalid bpf_context access",
1426 .prog_type
= BPF_PROG_TYPE_CGROUP_SOCK
,
1429 "check cb access: half",
1431 BPF_MOV64_IMM(BPF_REG_0
, 0),
1432 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1433 offsetof(struct __sk_buff
, cb
[0])),
1434 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1435 offsetof(struct __sk_buff
, cb
[0]) + 2),
1436 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1437 offsetof(struct __sk_buff
, cb
[1])),
1438 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1439 offsetof(struct __sk_buff
, cb
[1]) + 2),
1440 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1441 offsetof(struct __sk_buff
, cb
[2])),
1442 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1443 offsetof(struct __sk_buff
, cb
[2]) + 2),
1444 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1445 offsetof(struct __sk_buff
, cb
[3])),
1446 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1447 offsetof(struct __sk_buff
, cb
[3]) + 2),
1448 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1449 offsetof(struct __sk_buff
, cb
[4])),
1450 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1451 offsetof(struct __sk_buff
, cb
[4]) + 2),
1452 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1453 offsetof(struct __sk_buff
, cb
[0])),
1454 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1455 offsetof(struct __sk_buff
, cb
[0]) + 2),
1456 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1457 offsetof(struct __sk_buff
, cb
[1])),
1458 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1459 offsetof(struct __sk_buff
, cb
[1]) + 2),
1460 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1461 offsetof(struct __sk_buff
, cb
[2])),
1462 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1463 offsetof(struct __sk_buff
, cb
[2]) + 2),
1464 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1465 offsetof(struct __sk_buff
, cb
[3])),
1466 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1467 offsetof(struct __sk_buff
, cb
[3]) + 2),
1468 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1469 offsetof(struct __sk_buff
, cb
[4])),
1470 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1471 offsetof(struct __sk_buff
, cb
[4]) + 2),
1477 "check cb access: half, unaligned",
1479 BPF_MOV64_IMM(BPF_REG_0
, 0),
1480 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1481 offsetof(struct __sk_buff
, cb
[0]) + 1),
1484 .errstr
= "misaligned context access",
1486 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1489 "check __sk_buff->hash, offset 0, half store not permitted",
1491 BPF_MOV64_IMM(BPF_REG_0
, 0),
1492 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1493 offsetof(struct __sk_buff
, hash
)),
1496 .errstr
= "invalid bpf_context access",
1500 "check __sk_buff->tc_index, offset 2, half store not permitted",
1502 BPF_MOV64_IMM(BPF_REG_0
, 0),
1503 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1504 offsetof(struct __sk_buff
, tc_index
) + 2),
1507 .errstr
= "invalid bpf_context access",
1511 "check skb->hash half load permitted",
1513 BPF_MOV64_IMM(BPF_REG_0
, 0),
1514 #if __BYTE_ORDER == __LITTLE_ENDIAN
1515 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1516 offsetof(struct __sk_buff
, hash
)),
1518 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1519 offsetof(struct __sk_buff
, hash
) + 2),
1526 "check skb->hash half load not permitted",
1528 BPF_MOV64_IMM(BPF_REG_0
, 0),
1529 #if __BYTE_ORDER == __LITTLE_ENDIAN
1530 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1531 offsetof(struct __sk_buff
, hash
) + 2),
1533 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1534 offsetof(struct __sk_buff
, hash
)),
1538 .errstr
= "invalid bpf_context access",
1542 "check cb access: half, wrong type",
1544 BPF_MOV64_IMM(BPF_REG_0
, 0),
1545 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1546 offsetof(struct __sk_buff
, cb
[0])),
1549 .errstr
= "invalid bpf_context access",
1551 .prog_type
= BPF_PROG_TYPE_CGROUP_SOCK
,
1554 "check cb access: word",
1556 BPF_MOV64_IMM(BPF_REG_0
, 0),
1557 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1558 offsetof(struct __sk_buff
, cb
[0])),
1559 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1560 offsetof(struct __sk_buff
, cb
[1])),
1561 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1562 offsetof(struct __sk_buff
, cb
[2])),
1563 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1564 offsetof(struct __sk_buff
, cb
[3])),
1565 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1566 offsetof(struct __sk_buff
, cb
[4])),
1567 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1568 offsetof(struct __sk_buff
, cb
[0])),
1569 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1570 offsetof(struct __sk_buff
, cb
[1])),
1571 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1572 offsetof(struct __sk_buff
, cb
[2])),
1573 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1574 offsetof(struct __sk_buff
, cb
[3])),
1575 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1576 offsetof(struct __sk_buff
, cb
[4])),
1582 "check cb access: word, unaligned 1",
1584 BPF_MOV64_IMM(BPF_REG_0
, 0),
1585 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1586 offsetof(struct __sk_buff
, cb
[0]) + 2),
1589 .errstr
= "misaligned context access",
1591 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1594 "check cb access: word, unaligned 2",
1596 BPF_MOV64_IMM(BPF_REG_0
, 0),
1597 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1598 offsetof(struct __sk_buff
, cb
[4]) + 1),
1601 .errstr
= "misaligned context access",
1603 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1606 "check cb access: word, unaligned 3",
1608 BPF_MOV64_IMM(BPF_REG_0
, 0),
1609 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1610 offsetof(struct __sk_buff
, cb
[4]) + 2),
1613 .errstr
= "misaligned context access",
1615 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1618 "check cb access: word, unaligned 4",
1620 BPF_MOV64_IMM(BPF_REG_0
, 0),
1621 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1622 offsetof(struct __sk_buff
, cb
[4]) + 3),
1625 .errstr
= "misaligned context access",
1627 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1630 "check cb access: double",
1632 BPF_MOV64_IMM(BPF_REG_0
, 0),
1633 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
1634 offsetof(struct __sk_buff
, cb
[0])),
1635 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
1636 offsetof(struct __sk_buff
, cb
[2])),
1637 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
,
1638 offsetof(struct __sk_buff
, cb
[0])),
1639 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
,
1640 offsetof(struct __sk_buff
, cb
[2])),
1646 "check cb access: double, unaligned 1",
1648 BPF_MOV64_IMM(BPF_REG_0
, 0),
1649 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
1650 offsetof(struct __sk_buff
, cb
[1])),
1653 .errstr
= "misaligned context access",
1655 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1658 "check cb access: double, unaligned 2",
1660 BPF_MOV64_IMM(BPF_REG_0
, 0),
1661 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
1662 offsetof(struct __sk_buff
, cb
[3])),
1665 .errstr
= "misaligned context access",
1667 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1670 "check cb access: double, oob 1",
1672 BPF_MOV64_IMM(BPF_REG_0
, 0),
1673 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
1674 offsetof(struct __sk_buff
, cb
[4])),
1677 .errstr
= "invalid bpf_context access",
1681 "check cb access: double, oob 2",
1683 BPF_MOV64_IMM(BPF_REG_0
, 0),
1684 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
,
1685 offsetof(struct __sk_buff
, cb
[4])),
1688 .errstr
= "invalid bpf_context access",
1692 "check __sk_buff->ifindex dw store not permitted",
1694 BPF_MOV64_IMM(BPF_REG_0
, 0),
1695 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
1696 offsetof(struct __sk_buff
, ifindex
)),
1699 .errstr
= "invalid bpf_context access",
1703 "check __sk_buff->ifindex dw load not permitted",
1705 BPF_MOV64_IMM(BPF_REG_0
, 0),
1706 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
,
1707 offsetof(struct __sk_buff
, ifindex
)),
1710 .errstr
= "invalid bpf_context access",
1714 "check cb access: double, wrong type",
1716 BPF_MOV64_IMM(BPF_REG_0
, 0),
1717 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
1718 offsetof(struct __sk_buff
, cb
[0])),
1721 .errstr
= "invalid bpf_context access",
1723 .prog_type
= BPF_PROG_TYPE_CGROUP_SOCK
,
1726 "check out of range skb->cb access",
1728 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1729 offsetof(struct __sk_buff
, cb
[0]) + 256),
1732 .errstr
= "invalid bpf_context access",
1733 .errstr_unpriv
= "",
1735 .prog_type
= BPF_PROG_TYPE_SCHED_ACT
,
1738 "write skb fields from socket prog",
1740 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1741 offsetof(struct __sk_buff
, cb
[4])),
1742 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 1),
1743 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1744 offsetof(struct __sk_buff
, mark
)),
1745 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1746 offsetof(struct __sk_buff
, tc_index
)),
1747 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 1),
1748 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
1749 offsetof(struct __sk_buff
, cb
[0])),
1750 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
1751 offsetof(struct __sk_buff
, cb
[2])),
1755 .errstr_unpriv
= "R1 leaks addr",
1756 .result_unpriv
= REJECT
,
1759 "write skb fields from tc_cls_act prog",
1761 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1762 offsetof(struct __sk_buff
, cb
[0])),
1763 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1764 offsetof(struct __sk_buff
, mark
)),
1765 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1766 offsetof(struct __sk_buff
, tc_index
)),
1767 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1768 offsetof(struct __sk_buff
, tc_index
)),
1769 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1770 offsetof(struct __sk_buff
, cb
[3])),
1773 .errstr_unpriv
= "",
1774 .result_unpriv
= REJECT
,
1776 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
1779 "PTR_TO_STACK store/load",
1781 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
1782 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -10),
1783 BPF_ST_MEM(BPF_DW
, BPF_REG_1
, 2, 0xfaceb00c),
1784 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 2),
1790 "PTR_TO_STACK store/load - bad alignment on off",
1792 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
1793 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
1794 BPF_ST_MEM(BPF_DW
, BPF_REG_1
, 2, 0xfaceb00c),
1795 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 2),
1799 .errstr
= "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
1800 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1803 "PTR_TO_STACK store/load - bad alignment on reg",
1805 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
1806 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -10),
1807 BPF_ST_MEM(BPF_DW
, BPF_REG_1
, 8, 0xfaceb00c),
1808 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 8),
1812 .errstr
= "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
1813 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1816 "PTR_TO_STACK store/load - out of bounds low",
1818 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
1819 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -80000),
1820 BPF_ST_MEM(BPF_DW
, BPF_REG_1
, 8, 0xfaceb00c),
1821 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 8),
1825 .errstr
= "invalid stack off=-79992 size=8",
1828 "PTR_TO_STACK store/load - out of bounds high",
1830 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
1831 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
1832 BPF_ST_MEM(BPF_DW
, BPF_REG_1
, 8, 0xfaceb00c),
1833 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 8),
1837 .errstr
= "invalid stack off=0 size=8",
1840 "unpriv: return pointer",
1842 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_10
),
1846 .result_unpriv
= REJECT
,
1847 .errstr_unpriv
= "R0 leaks addr",
1850 "unpriv: add const to pointer",
1852 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
1853 BPF_MOV64_IMM(BPF_REG_0
, 0),
1859 "unpriv: add pointer to pointer",
1861 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_10
),
1862 BPF_MOV64_IMM(BPF_REG_0
, 0),
1866 .result_unpriv
= REJECT
,
1867 .errstr_unpriv
= "R1 pointer += pointer",
1870 "unpriv: neg pointer",
1872 BPF_ALU64_IMM(BPF_NEG
, BPF_REG_1
, 0),
1873 BPF_MOV64_IMM(BPF_REG_0
, 0),
1877 .result_unpriv
= REJECT
,
1878 .errstr_unpriv
= "R1 pointer arithmetic",
1881 "unpriv: cmp pointer with const",
1883 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 0),
1884 BPF_MOV64_IMM(BPF_REG_0
, 0),
1888 .result_unpriv
= REJECT
,
1889 .errstr_unpriv
= "R1 pointer comparison",
1892 "unpriv: cmp pointer with pointer",
1894 BPF_JMP_REG(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
1895 BPF_MOV64_IMM(BPF_REG_0
, 0),
1899 .result_unpriv
= REJECT
,
1900 .errstr_unpriv
= "R10 pointer comparison",
1903 "unpriv: check that printk is disallowed",
1905 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
1906 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
1907 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
1908 BPF_MOV64_IMM(BPF_REG_2
, 8),
1909 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_1
),
1910 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
1911 BPF_FUNC_trace_printk
),
1912 BPF_MOV64_IMM(BPF_REG_0
, 0),
1915 .errstr_unpriv
= "unknown func bpf_trace_printk#6",
1916 .result_unpriv
= REJECT
,
1920 "unpriv: pass pointer to helper function",
1922 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
1923 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
1924 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
1925 BPF_LD_MAP_FD(BPF_REG_1
, 0),
1926 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_2
),
1927 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
1928 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
1929 BPF_FUNC_map_update_elem
),
1930 BPF_MOV64_IMM(BPF_REG_0
, 0),
1933 .fixup_map1
= { 3 },
1934 .errstr_unpriv
= "R4 leaks addr",
1935 .result_unpriv
= REJECT
,
1939 "unpriv: indirectly pass pointer on stack to helper function",
1941 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_10
, -8),
1942 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
1943 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
1944 BPF_LD_MAP_FD(BPF_REG_1
, 0),
1945 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
1946 BPF_FUNC_map_lookup_elem
),
1947 BPF_MOV64_IMM(BPF_REG_0
, 0),
1950 .fixup_map1
= { 3 },
1951 .errstr
= "invalid indirect read from stack off -8+0 size 8",
1955 "unpriv: mangle pointer on stack 1",
1957 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_10
, -8),
1958 BPF_ST_MEM(BPF_W
, BPF_REG_10
, -8, 0),
1959 BPF_MOV64_IMM(BPF_REG_0
, 0),
1962 .errstr_unpriv
= "attempt to corrupt spilled",
1963 .result_unpriv
= REJECT
,
1967 "unpriv: mangle pointer on stack 2",
1969 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_10
, -8),
1970 BPF_ST_MEM(BPF_B
, BPF_REG_10
, -1, 0),
1971 BPF_MOV64_IMM(BPF_REG_0
, 0),
1974 .errstr_unpriv
= "attempt to corrupt spilled",
1975 .result_unpriv
= REJECT
,
1979 "unpriv: read pointer from stack in small chunks",
1981 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_10
, -8),
1982 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_10
, -8),
1983 BPF_MOV64_IMM(BPF_REG_0
, 0),
1986 .errstr
= "invalid size",
1990 "unpriv: write pointer into ctx",
1992 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_1
, 0),
1993 BPF_MOV64_IMM(BPF_REG_0
, 0),
1996 .errstr_unpriv
= "R1 leaks addr",
1997 .result_unpriv
= REJECT
,
1998 .errstr
= "invalid bpf_context access",
2002 "unpriv: spill/fill of ctx",
2004 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2005 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2006 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2007 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
2008 BPF_MOV64_IMM(BPF_REG_0
, 0),
2014 "unpriv: spill/fill of ctx 2",
2016 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2017 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2018 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2019 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
2020 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2021 BPF_FUNC_get_hash_recalc
),
2025 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2028 "unpriv: spill/fill of ctx 3",
2030 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2031 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2032 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2033 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_10
, 0),
2034 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
2035 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2036 BPF_FUNC_get_hash_recalc
),
2040 .errstr
= "R1 type=fp expected=ctx",
2041 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2044 "unpriv: spill/fill of ctx 4",
2046 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2047 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2048 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2049 BPF_MOV64_IMM(BPF_REG_0
, 1),
2050 BPF_RAW_INSN(BPF_STX
| BPF_XADD
| BPF_DW
, BPF_REG_10
,
2052 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
2053 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2054 BPF_FUNC_get_hash_recalc
),
2058 .errstr
= "R1 type=inv expected=ctx",
2059 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2062 "unpriv: spill/fill of different pointers stx",
2064 BPF_MOV64_IMM(BPF_REG_3
, 42),
2065 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2066 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2067 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 3),
2068 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
2069 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -16),
2070 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_2
, 0),
2071 BPF_JMP_IMM(BPF_JNE
, BPF_REG_1
, 0, 1),
2072 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2073 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
2074 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_3
,
2075 offsetof(struct __sk_buff
, mark
)),
2076 BPF_MOV64_IMM(BPF_REG_0
, 0),
2080 .errstr
= "same insn cannot be used with different pointers",
2081 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2084 "unpriv: spill/fill of different pointers ldx",
2086 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2087 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2088 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 3),
2089 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
2090 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
,
2091 -(__s32
)offsetof(struct bpf_perf_event_data
,
2092 sample_period
) - 8),
2093 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_2
, 0),
2094 BPF_JMP_IMM(BPF_JNE
, BPF_REG_1
, 0, 1),
2095 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2096 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
2097 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_1
,
2098 offsetof(struct bpf_perf_event_data
,
2100 BPF_MOV64_IMM(BPF_REG_0
, 0),
2104 .errstr
= "same insn cannot be used with different pointers",
2105 .prog_type
= BPF_PROG_TYPE_PERF_EVENT
,
2108 "unpriv: write pointer into map elem value",
2110 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
2111 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
2112 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
2113 BPF_LD_MAP_FD(BPF_REG_1
, 0),
2114 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2115 BPF_FUNC_map_lookup_elem
),
2116 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
2117 BPF_STX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_0
, 0),
2120 .fixup_map1
= { 3 },
2121 .errstr_unpriv
= "R0 leaks addr",
2122 .result_unpriv
= REJECT
,
2126 "unpriv: partial copy of pointer",
2128 BPF_MOV32_REG(BPF_REG_1
, BPF_REG_10
),
2129 BPF_MOV64_IMM(BPF_REG_0
, 0),
2132 .errstr_unpriv
= "R10 partial copy",
2133 .result_unpriv
= REJECT
,
2137 "unpriv: pass pointer to tail_call",
2139 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_1
),
2140 BPF_LD_MAP_FD(BPF_REG_2
, 0),
2141 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2142 BPF_FUNC_tail_call
),
2143 BPF_MOV64_IMM(BPF_REG_0
, 0),
2146 .fixup_prog
= { 1 },
2147 .errstr_unpriv
= "R3 leaks addr into helper",
2148 .result_unpriv
= REJECT
,
2152 "unpriv: cmp map pointer with zero",
2154 BPF_MOV64_IMM(BPF_REG_1
, 0),
2155 BPF_LD_MAP_FD(BPF_REG_1
, 0),
2156 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 0),
2157 BPF_MOV64_IMM(BPF_REG_0
, 0),
2160 .fixup_map1
= { 1 },
2161 .errstr_unpriv
= "R1 pointer comparison",
2162 .result_unpriv
= REJECT
,
2166 "unpriv: write into frame pointer",
2168 BPF_MOV64_REG(BPF_REG_10
, BPF_REG_1
),
2169 BPF_MOV64_IMM(BPF_REG_0
, 0),
2172 .errstr
= "frame pointer is read only",
2176 "unpriv: spill/fill frame pointer",
2178 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2179 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2180 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_10
, 0),
2181 BPF_LDX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_6
, 0),
2182 BPF_MOV64_IMM(BPF_REG_0
, 0),
2185 .errstr
= "frame pointer is read only",
2189 "unpriv: cmp of frame pointer",
2191 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_10
, 0, 0),
2192 BPF_MOV64_IMM(BPF_REG_0
, 0),
2195 .errstr_unpriv
= "R10 pointer comparison",
2196 .result_unpriv
= REJECT
,
2200 "unpriv: adding of fp",
2202 BPF_MOV64_IMM(BPF_REG_0
, 0),
2203 BPF_MOV64_IMM(BPF_REG_1
, 0),
2204 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_10
),
2205 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, -8),
2211 "unpriv: cmp of stack pointer",
2213 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
2214 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
2215 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_2
, 0, 0),
2216 BPF_MOV64_IMM(BPF_REG_0
, 0),
2219 .errstr_unpriv
= "R2 pointer comparison",
2220 .result_unpriv
= REJECT
,
2224 "stack pointer arithmetic",
2226 BPF_MOV64_IMM(BPF_REG_1
, 4),
2227 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
2228 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_10
),
2229 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_7
, -10),
2230 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_7
, -10),
2231 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_7
),
2232 BPF_ALU64_REG(BPF_ADD
, BPF_REG_2
, BPF_REG_1
),
2233 BPF_ST_MEM(0, BPF_REG_2
, 4, 0),
2234 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_7
),
2235 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 8),
2236 BPF_ST_MEM(0, BPF_REG_2
, 4, 0),
2237 BPF_MOV64_IMM(BPF_REG_0
, 0),
2243 "raw_stack: no skb_load_bytes",
2245 BPF_MOV64_IMM(BPF_REG_2
, 4),
2246 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2247 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2248 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2249 BPF_MOV64_IMM(BPF_REG_4
, 8),
2250 /* Call to skb_load_bytes() omitted. */
2251 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2255 .errstr
= "invalid read from stack off -8+0 size 8",
2256 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2259 "raw_stack: skb_load_bytes, negative len",
2261 BPF_MOV64_IMM(BPF_REG_2
, 4),
2262 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2263 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2264 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2265 BPF_MOV64_IMM(BPF_REG_4
, -8),
2266 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2267 BPF_FUNC_skb_load_bytes
),
2268 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2272 .errstr
= "R4 min value is negative",
2273 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2276 "raw_stack: skb_load_bytes, negative len 2",
2278 BPF_MOV64_IMM(BPF_REG_2
, 4),
2279 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2280 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2281 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2282 BPF_MOV64_IMM(BPF_REG_4
, ~0),
2283 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2284 BPF_FUNC_skb_load_bytes
),
2285 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2289 .errstr
= "R4 min value is negative",
2290 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2293 "raw_stack: skb_load_bytes, zero len",
2295 BPF_MOV64_IMM(BPF_REG_2
, 4),
2296 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2297 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2298 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2299 BPF_MOV64_IMM(BPF_REG_4
, 0),
2300 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2301 BPF_FUNC_skb_load_bytes
),
2302 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2306 .errstr
= "invalid stack type R3",
2307 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2310 "raw_stack: skb_load_bytes, no init",
2312 BPF_MOV64_IMM(BPF_REG_2
, 4),
2313 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2314 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2315 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2316 BPF_MOV64_IMM(BPF_REG_4
, 8),
2317 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2318 BPF_FUNC_skb_load_bytes
),
2319 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2323 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2326 "raw_stack: skb_load_bytes, init",
2328 BPF_MOV64_IMM(BPF_REG_2
, 4),
2329 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2330 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2331 BPF_ST_MEM(BPF_DW
, BPF_REG_6
, 0, 0xcafe),
2332 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2333 BPF_MOV64_IMM(BPF_REG_4
, 8),
2334 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2335 BPF_FUNC_skb_load_bytes
),
2336 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2340 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2343 "raw_stack: skb_load_bytes, spilled regs around bounds",
2345 BPF_MOV64_IMM(BPF_REG_2
, 4),
2346 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2347 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -16),
2348 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, -8),
2349 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 8),
2350 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2351 BPF_MOV64_IMM(BPF_REG_4
, 8),
2352 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2353 BPF_FUNC_skb_load_bytes
),
2354 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, -8),
2355 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_6
, 8),
2356 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
,
2357 offsetof(struct __sk_buff
, mark
)),
2358 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_2
,
2359 offsetof(struct __sk_buff
, priority
)),
2360 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
2364 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2367 "raw_stack: skb_load_bytes, spilled regs corruption",
2369 BPF_MOV64_IMM(BPF_REG_2
, 4),
2370 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2371 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2372 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2373 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2374 BPF_MOV64_IMM(BPF_REG_4
, 8),
2375 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2376 BPF_FUNC_skb_load_bytes
),
2377 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2378 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
,
2379 offsetof(struct __sk_buff
, mark
)),
2383 .errstr
= "R0 invalid mem access 'inv'",
2384 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2387 "raw_stack: skb_load_bytes, spilled regs corruption 2",
2389 BPF_MOV64_IMM(BPF_REG_2
, 4),
2390 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2391 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -16),
2392 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, -8),
2393 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2394 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 8),
2395 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2396 BPF_MOV64_IMM(BPF_REG_4
, 8),
2397 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2398 BPF_FUNC_skb_load_bytes
),
2399 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, -8),
2400 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_6
, 8),
2401 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_6
, 0),
2402 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
,
2403 offsetof(struct __sk_buff
, mark
)),
2404 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_2
,
2405 offsetof(struct __sk_buff
, priority
)),
2406 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
2407 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_3
,
2408 offsetof(struct __sk_buff
, pkt_type
)),
2409 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_3
),
2413 .errstr
= "R3 invalid mem access 'inv'",
2414 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2417 "raw_stack: skb_load_bytes, spilled regs + data",
2419 BPF_MOV64_IMM(BPF_REG_2
, 4),
2420 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2421 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -16),
2422 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, -8),
2423 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2424 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 8),
2425 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2426 BPF_MOV64_IMM(BPF_REG_4
, 8),
2427 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2428 BPF_FUNC_skb_load_bytes
),
2429 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, -8),
2430 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_6
, 8),
2431 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_6
, 0),
2432 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
,
2433 offsetof(struct __sk_buff
, mark
)),
2434 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_2
,
2435 offsetof(struct __sk_buff
, priority
)),
2436 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
2437 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_3
),
2441 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2444 "raw_stack: skb_load_bytes, invalid access 1",
2446 BPF_MOV64_IMM(BPF_REG_2
, 4),
2447 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2448 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -513),
2449 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2450 BPF_MOV64_IMM(BPF_REG_4
, 8),
2451 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2452 BPF_FUNC_skb_load_bytes
),
2453 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2457 .errstr
= "invalid stack type R3 off=-513 access_size=8",
2458 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2461 "raw_stack: skb_load_bytes, invalid access 2",
2463 BPF_MOV64_IMM(BPF_REG_2
, 4),
2464 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2465 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -1),
2466 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2467 BPF_MOV64_IMM(BPF_REG_4
, 8),
2468 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2469 BPF_FUNC_skb_load_bytes
),
2470 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2474 .errstr
= "invalid stack type R3 off=-1 access_size=8",
2475 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2478 "raw_stack: skb_load_bytes, invalid access 3",
2480 BPF_MOV64_IMM(BPF_REG_2
, 4),
2481 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2482 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 0xffffffff),
2483 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2484 BPF_MOV64_IMM(BPF_REG_4
, 0xffffffff),
2485 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2486 BPF_FUNC_skb_load_bytes
),
2487 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2491 .errstr
= "R4 min value is negative",
2492 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2495 "raw_stack: skb_load_bytes, invalid access 4",
2497 BPF_MOV64_IMM(BPF_REG_2
, 4),
2498 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2499 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -1),
2500 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2501 BPF_MOV64_IMM(BPF_REG_4
, 0x7fffffff),
2502 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2503 BPF_FUNC_skb_load_bytes
),
2504 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2508 .errstr
= "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
2509 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2512 "raw_stack: skb_load_bytes, invalid access 5",
2514 BPF_MOV64_IMM(BPF_REG_2
, 4),
2515 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2516 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -512),
2517 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2518 BPF_MOV64_IMM(BPF_REG_4
, 0x7fffffff),
2519 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2520 BPF_FUNC_skb_load_bytes
),
2521 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2525 .errstr
= "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
2526 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2529 "raw_stack: skb_load_bytes, invalid access 6",
2531 BPF_MOV64_IMM(BPF_REG_2
, 4),
2532 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2533 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -512),
2534 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2535 BPF_MOV64_IMM(BPF_REG_4
, 0),
2536 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2537 BPF_FUNC_skb_load_bytes
),
2538 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2542 .errstr
= "invalid stack type R3 off=-512 access_size=0",
2543 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2546 "raw_stack: skb_load_bytes, large access",
2548 BPF_MOV64_IMM(BPF_REG_2
, 4),
2549 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2550 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -512),
2551 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2552 BPF_MOV64_IMM(BPF_REG_4
, 512),
2553 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2554 BPF_FUNC_skb_load_bytes
),
2555 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2559 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2562 "direct packet access: test1",
2564 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2565 offsetof(struct __sk_buff
, data
)),
2566 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2567 offsetof(struct __sk_buff
, data_end
)),
2568 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2569 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2570 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
2571 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
2572 BPF_MOV64_IMM(BPF_REG_0
, 0),
2576 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2579 "direct packet access: test2",
2581 BPF_MOV64_IMM(BPF_REG_0
, 1),
2582 BPF_LDX_MEM(BPF_W
, BPF_REG_4
, BPF_REG_1
,
2583 offsetof(struct __sk_buff
, data_end
)),
2584 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2585 offsetof(struct __sk_buff
, data
)),
2586 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_3
),
2587 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 14),
2588 BPF_JMP_REG(BPF_JGT
, BPF_REG_5
, BPF_REG_4
, 15),
2589 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_3
, 7),
2590 BPF_LDX_MEM(BPF_B
, BPF_REG_4
, BPF_REG_3
, 12),
2591 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_4
, 14),
2592 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2593 offsetof(struct __sk_buff
, data
)),
2594 BPF_ALU64_REG(BPF_ADD
, BPF_REG_3
, BPF_REG_4
),
2595 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_1
),
2596 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_2
, 49),
2597 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_2
, 49),
2598 BPF_ALU64_REG(BPF_ADD
, BPF_REG_3
, BPF_REG_2
),
2599 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_3
),
2600 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 8),
2601 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
2602 offsetof(struct __sk_buff
, data_end
)),
2603 BPF_JMP_REG(BPF_JGT
, BPF_REG_2
, BPF_REG_1
, 1),
2604 BPF_LDX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_3
, 4),
2605 BPF_MOV64_IMM(BPF_REG_0
, 0),
2609 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2612 "direct packet access: test3",
2614 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2615 offsetof(struct __sk_buff
, data
)),
2616 BPF_MOV64_IMM(BPF_REG_0
, 0),
2619 .errstr
= "invalid bpf_context access off=76",
2621 .prog_type
= BPF_PROG_TYPE_SOCKET_FILTER
,
2624 "direct packet access: test4 (write)",
2626 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2627 offsetof(struct __sk_buff
, data
)),
2628 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2629 offsetof(struct __sk_buff
, data_end
)),
2630 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2631 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2632 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
2633 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
2634 BPF_MOV64_IMM(BPF_REG_0
, 0),
2638 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2641 "direct packet access: test5 (pkt_end >= reg, good access)",
2643 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2644 offsetof(struct __sk_buff
, data
)),
2645 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2646 offsetof(struct __sk_buff
, data_end
)),
2647 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2648 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2649 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_0
, 2),
2650 BPF_MOV64_IMM(BPF_REG_0
, 1),
2652 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
2653 BPF_MOV64_IMM(BPF_REG_0
, 0),
2657 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2660 "direct packet access: test6 (pkt_end >= reg, bad access)",
2662 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2663 offsetof(struct __sk_buff
, data
)),
2664 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2665 offsetof(struct __sk_buff
, data_end
)),
2666 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2667 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2668 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_0
, 3),
2669 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
2670 BPF_MOV64_IMM(BPF_REG_0
, 1),
2672 BPF_MOV64_IMM(BPF_REG_0
, 0),
2675 .errstr
= "invalid access to packet",
2677 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2680 "direct packet access: test7 (pkt_end >= reg, both accesses)",
2682 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2683 offsetof(struct __sk_buff
, data
)),
2684 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2685 offsetof(struct __sk_buff
, data_end
)),
2686 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2687 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2688 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_0
, 3),
2689 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
2690 BPF_MOV64_IMM(BPF_REG_0
, 1),
2692 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
2693 BPF_MOV64_IMM(BPF_REG_0
, 0),
2696 .errstr
= "invalid access to packet",
2698 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2701 "direct packet access: test8 (double test, variant 1)",
2703 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2704 offsetof(struct __sk_buff
, data
)),
2705 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2706 offsetof(struct __sk_buff
, data_end
)),
2707 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2708 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2709 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_0
, 4),
2710 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
2711 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
2712 BPF_MOV64_IMM(BPF_REG_0
, 1),
2714 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
2715 BPF_MOV64_IMM(BPF_REG_0
, 0),
2719 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2722 "direct packet access: test9 (double test, variant 2)",
2724 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2725 offsetof(struct __sk_buff
, data
)),
2726 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2727 offsetof(struct __sk_buff
, data_end
)),
2728 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2729 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2730 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_0
, 2),
2731 BPF_MOV64_IMM(BPF_REG_0
, 1),
2733 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
2734 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
2735 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
2736 BPF_MOV64_IMM(BPF_REG_0
, 0),
2740 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2743 "direct packet access: test10 (write invalid)",
2745 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2746 offsetof(struct __sk_buff
, data
)),
2747 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2748 offsetof(struct __sk_buff
, data_end
)),
2749 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2750 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2751 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 2),
2752 BPF_MOV64_IMM(BPF_REG_0
, 0),
2754 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
2755 BPF_MOV64_IMM(BPF_REG_0
, 0),
2758 .errstr
= "invalid access to packet",
2760 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2763 "direct packet access: test11 (shift, good access)",
2765 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2766 offsetof(struct __sk_buff
, data
)),
2767 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2768 offsetof(struct __sk_buff
, data_end
)),
2769 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2770 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 22),
2771 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 8),
2772 BPF_MOV64_IMM(BPF_REG_3
, 144),
2773 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_3
),
2774 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 23),
2775 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_5
, 3),
2776 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_2
),
2777 BPF_ALU64_REG(BPF_ADD
, BPF_REG_6
, BPF_REG_5
),
2778 BPF_MOV64_IMM(BPF_REG_0
, 1),
2780 BPF_MOV64_IMM(BPF_REG_0
, 0),
2784 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2787 "direct packet access: test12 (and, good access)",
2789 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2790 offsetof(struct __sk_buff
, data
)),
2791 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2792 offsetof(struct __sk_buff
, data_end
)),
2793 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2794 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 22),
2795 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 8),
2796 BPF_MOV64_IMM(BPF_REG_3
, 144),
2797 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_3
),
2798 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 23),
2799 BPF_ALU64_IMM(BPF_AND
, BPF_REG_5
, 15),
2800 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_2
),
2801 BPF_ALU64_REG(BPF_ADD
, BPF_REG_6
, BPF_REG_5
),
2802 BPF_MOV64_IMM(BPF_REG_0
, 1),
2804 BPF_MOV64_IMM(BPF_REG_0
, 0),
2808 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2811 "direct packet access: test13 (branches, good access)",
2813 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2814 offsetof(struct __sk_buff
, data
)),
2815 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2816 offsetof(struct __sk_buff
, data_end
)),
2817 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2818 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 22),
2819 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 13),
2820 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2821 offsetof(struct __sk_buff
, mark
)),
2822 BPF_MOV64_IMM(BPF_REG_4
, 1),
2823 BPF_JMP_REG(BPF_JGT
, BPF_REG_3
, BPF_REG_4
, 2),
2824 BPF_MOV64_IMM(BPF_REG_3
, 14),
2825 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
2826 BPF_MOV64_IMM(BPF_REG_3
, 24),
2827 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_3
),
2828 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 23),
2829 BPF_ALU64_IMM(BPF_AND
, BPF_REG_5
, 15),
2830 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_2
),
2831 BPF_ALU64_REG(BPF_ADD
, BPF_REG_6
, BPF_REG_5
),
2832 BPF_MOV64_IMM(BPF_REG_0
, 1),
2834 BPF_MOV64_IMM(BPF_REG_0
, 0),
2838 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2841 "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
2843 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2844 offsetof(struct __sk_buff
, data
)),
2845 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2846 offsetof(struct __sk_buff
, data_end
)),
2847 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2848 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 22),
2849 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 7),
2850 BPF_MOV64_IMM(BPF_REG_5
, 12),
2851 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_5
, 4),
2852 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_2
),
2853 BPF_ALU64_REG(BPF_ADD
, BPF_REG_6
, BPF_REG_5
),
2854 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_6
, 0),
2855 BPF_MOV64_IMM(BPF_REG_0
, 1),
2857 BPF_MOV64_IMM(BPF_REG_0
, 0),
2861 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2864 "direct packet access: test15 (spill with xadd)",
2866 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2867 offsetof(struct __sk_buff
, data
)),
2868 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2869 offsetof(struct __sk_buff
, data_end
)),
2870 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2871 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2872 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 8),
2873 BPF_MOV64_IMM(BPF_REG_5
, 4096),
2874 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_10
),
2875 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, -8),
2876 BPF_STX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_2
, 0),
2877 BPF_STX_XADD(BPF_DW
, BPF_REG_4
, BPF_REG_5
, 0),
2878 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_4
, 0),
2879 BPF_STX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_5
, 0),
2880 BPF_MOV64_IMM(BPF_REG_0
, 0),
2883 .errstr
= "R2 invalid mem access 'inv'",
2885 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2888 "direct packet access: test16 (arith on data_end)",
2890 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2891 offsetof(struct __sk_buff
, data
)),
2892 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2893 offsetof(struct __sk_buff
, data_end
)),
2894 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2895 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2896 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_3
, 16),
2897 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
2898 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
2899 BPF_MOV64_IMM(BPF_REG_0
, 0),
2902 .errstr
= "invalid access to packet",
2904 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2907 "direct packet access: test17 (pruning, alignment)",
2909 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2910 offsetof(struct __sk_buff
, data
)),
2911 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2912 offsetof(struct __sk_buff
, data_end
)),
2913 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
2914 offsetof(struct __sk_buff
, mark
)),
2915 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2916 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 14),
2917 BPF_JMP_IMM(BPF_JGT
, BPF_REG_7
, 1, 4),
2918 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
2919 BPF_STX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
, -4),
2920 BPF_MOV64_IMM(BPF_REG_0
, 0),
2922 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 1),
2925 .errstr
= "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
2927 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2928 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
2931 "direct packet access: test18 (imm += pkt_ptr, 1)",
2933 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2934 offsetof(struct __sk_buff
, data
)),
2935 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2936 offsetof(struct __sk_buff
, data_end
)),
2937 BPF_MOV64_IMM(BPF_REG_0
, 8),
2938 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
2939 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
2940 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
2941 BPF_MOV64_IMM(BPF_REG_0
, 0),
2945 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2948 "direct packet access: test19 (imm += pkt_ptr, 2)",
2950 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2951 offsetof(struct __sk_buff
, data
)),
2952 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2953 offsetof(struct __sk_buff
, data_end
)),
2954 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2955 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2956 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 3),
2957 BPF_MOV64_IMM(BPF_REG_4
, 4),
2958 BPF_ALU64_REG(BPF_ADD
, BPF_REG_4
, BPF_REG_2
),
2959 BPF_STX_MEM(BPF_B
, BPF_REG_4
, BPF_REG_4
, 0),
2960 BPF_MOV64_IMM(BPF_REG_0
, 0),
2964 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2967 "direct packet access: test20 (x += pkt_ptr, 1)",
2969 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2970 offsetof(struct __sk_buff
, data
)),
2971 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2972 offsetof(struct __sk_buff
, data_end
)),
2973 BPF_MOV64_IMM(BPF_REG_0
, 0xffffffff),
2974 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
2975 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -8),
2976 BPF_ALU64_IMM(BPF_AND
, BPF_REG_0
, 0x7fff),
2977 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
2978 BPF_ALU64_REG(BPF_ADD
, BPF_REG_4
, BPF_REG_2
),
2979 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_4
),
2980 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 0x7fff - 1),
2981 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 1),
2982 BPF_STX_MEM(BPF_DW
, BPF_REG_5
, BPF_REG_4
, 0),
2983 BPF_MOV64_IMM(BPF_REG_0
, 0),
2986 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2990 "direct packet access: test21 (x += pkt_ptr, 2)",
2992 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2993 offsetof(struct __sk_buff
, data
)),
2994 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2995 offsetof(struct __sk_buff
, data_end
)),
2996 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2997 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2998 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 9),
2999 BPF_MOV64_IMM(BPF_REG_4
, 0xffffffff),
3000 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_4
, -8),
3001 BPF_LDX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_10
, -8),
3002 BPF_ALU64_IMM(BPF_AND
, BPF_REG_4
, 0x7fff),
3003 BPF_ALU64_REG(BPF_ADD
, BPF_REG_4
, BPF_REG_2
),
3004 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_4
),
3005 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 0x7fff - 1),
3006 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 1),
3007 BPF_STX_MEM(BPF_DW
, BPF_REG_5
, BPF_REG_4
, 0),
3008 BPF_MOV64_IMM(BPF_REG_0
, 0),
3011 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3015 "direct packet access: test22 (x += pkt_ptr, 3)",
3017 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3018 offsetof(struct __sk_buff
, data
)),
3019 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3020 offsetof(struct __sk_buff
, data_end
)),
3021 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3022 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3023 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_2
, -8),
3024 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_3
, -16),
3025 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_10
, -16),
3026 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 11),
3027 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -8),
3028 BPF_MOV64_IMM(BPF_REG_4
, 0xffffffff),
3029 BPF_STX_XADD(BPF_DW
, BPF_REG_10
, BPF_REG_4
, -8),
3030 BPF_LDX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_10
, -8),
3031 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_4
, 49),
3032 BPF_ALU64_REG(BPF_ADD
, BPF_REG_4
, BPF_REG_2
),
3033 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_4
),
3034 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 2),
3035 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 2),
3036 BPF_MOV64_IMM(BPF_REG_2
, 1),
3037 BPF_STX_MEM(BPF_H
, BPF_REG_4
, BPF_REG_2
, 0),
3038 BPF_MOV64_IMM(BPF_REG_0
, 0),
3041 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3045 "direct packet access: test23 (x += pkt_ptr, 4)",
3047 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3048 offsetof(struct __sk_buff
, data
)),
3049 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3050 offsetof(struct __sk_buff
, data_end
)),
3051 BPF_MOV64_IMM(BPF_REG_0
, 0xffffffff),
3052 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
3053 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -8),
3054 BPF_ALU64_IMM(BPF_AND
, BPF_REG_0
, 0xffff),
3055 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
3056 BPF_MOV64_IMM(BPF_REG_0
, 31),
3057 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_4
),
3058 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
3059 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_0
),
3060 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 0xffff - 1),
3061 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
3062 BPF_STX_MEM(BPF_DW
, BPF_REG_5
, BPF_REG_0
, 0),
3063 BPF_MOV64_IMM(BPF_REG_0
, 0),
3066 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3068 .errstr
= "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
3071 "direct packet access: test24 (x += pkt_ptr, 5)",
3073 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3074 offsetof(struct __sk_buff
, data
)),
3075 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3076 offsetof(struct __sk_buff
, data_end
)),
3077 BPF_MOV64_IMM(BPF_REG_0
, 0xffffffff),
3078 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
3079 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -8),
3080 BPF_ALU64_IMM(BPF_AND
, BPF_REG_0
, 0xff),
3081 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
3082 BPF_MOV64_IMM(BPF_REG_0
, 64),
3083 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_4
),
3084 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
3085 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_0
),
3086 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 0x7fff - 1),
3087 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
3088 BPF_STX_MEM(BPF_DW
, BPF_REG_5
, BPF_REG_0
, 0),
3089 BPF_MOV64_IMM(BPF_REG_0
, 0),
3092 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3096 "direct packet access: test25 (marking on <, good access)",
3098 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3099 offsetof(struct __sk_buff
, data
)),
3100 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3101 offsetof(struct __sk_buff
, data_end
)),
3102 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3103 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3104 BPF_JMP_REG(BPF_JLT
, BPF_REG_0
, BPF_REG_3
, 2),
3105 BPF_MOV64_IMM(BPF_REG_0
, 0),
3107 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
3108 BPF_JMP_IMM(BPF_JA
, 0, 0, -4),
3111 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3114 "direct packet access: test26 (marking on <, bad access)",
3116 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3117 offsetof(struct __sk_buff
, data
)),
3118 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3119 offsetof(struct __sk_buff
, data_end
)),
3120 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3121 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3122 BPF_JMP_REG(BPF_JLT
, BPF_REG_0
, BPF_REG_3
, 3),
3123 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
3124 BPF_MOV64_IMM(BPF_REG_0
, 0),
3126 BPF_JMP_IMM(BPF_JA
, 0, 0, -3),
3129 .errstr
= "invalid access to packet",
3130 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3133 "direct packet access: test27 (marking on <=, good access)",
3135 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3136 offsetof(struct __sk_buff
, data
)),
3137 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3138 offsetof(struct __sk_buff
, data_end
)),
3139 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3140 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3141 BPF_JMP_REG(BPF_JLE
, BPF_REG_3
, BPF_REG_0
, 1),
3142 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
3143 BPF_MOV64_IMM(BPF_REG_0
, 1),
3147 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3150 "direct packet access: test28 (marking on <=, bad access)",
3152 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3153 offsetof(struct __sk_buff
, data
)),
3154 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3155 offsetof(struct __sk_buff
, data_end
)),
3156 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3157 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3158 BPF_JMP_REG(BPF_JLE
, BPF_REG_3
, BPF_REG_0
, 2),
3159 BPF_MOV64_IMM(BPF_REG_0
, 1),
3161 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
3162 BPF_JMP_IMM(BPF_JA
, 0, 0, -4),
3165 .errstr
= "invalid access to packet",
3166 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3169 "helper access to packet: test1, valid packet_ptr range",
3171 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3172 offsetof(struct xdp_md
, data
)),
3173 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3174 offsetof(struct xdp_md
, data_end
)),
3175 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
3176 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
3177 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 5),
3178 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3179 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_2
),
3180 BPF_MOV64_IMM(BPF_REG_4
, 0),
3181 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3182 BPF_FUNC_map_update_elem
),
3183 BPF_MOV64_IMM(BPF_REG_0
, 0),
3186 .fixup_map1
= { 5 },
3187 .result_unpriv
= ACCEPT
,
3189 .prog_type
= BPF_PROG_TYPE_XDP
,
3192 "helper access to packet: test2, unchecked packet_ptr",
3194 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3195 offsetof(struct xdp_md
, data
)),
3196 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3197 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3198 BPF_FUNC_map_lookup_elem
),
3199 BPF_MOV64_IMM(BPF_REG_0
, 0),
3202 .fixup_map1
= { 1 },
3204 .errstr
= "invalid access to packet",
3205 .prog_type
= BPF_PROG_TYPE_XDP
,
3208 "helper access to packet: test3, variable add",
3210 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3211 offsetof(struct xdp_md
, data
)),
3212 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3213 offsetof(struct xdp_md
, data_end
)),
3214 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3215 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 8),
3216 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 10),
3217 BPF_LDX_MEM(BPF_B
, BPF_REG_5
, BPF_REG_2
, 0),
3218 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3219 BPF_ALU64_REG(BPF_ADD
, BPF_REG_4
, BPF_REG_5
),
3220 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_4
),
3221 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 8),
3222 BPF_JMP_REG(BPF_JGT
, BPF_REG_5
, BPF_REG_3
, 4),
3223 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3224 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_4
),
3225 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3226 BPF_FUNC_map_lookup_elem
),
3227 BPF_MOV64_IMM(BPF_REG_0
, 0),
3230 .fixup_map1
= { 11 },
3232 .prog_type
= BPF_PROG_TYPE_XDP
,
3235 "helper access to packet: test4, packet_ptr with bad range",
3237 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3238 offsetof(struct xdp_md
, data
)),
3239 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3240 offsetof(struct xdp_md
, data_end
)),
3241 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3242 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 4),
3243 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 2),
3244 BPF_MOV64_IMM(BPF_REG_0
, 0),
3246 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3247 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3248 BPF_FUNC_map_lookup_elem
),
3249 BPF_MOV64_IMM(BPF_REG_0
, 0),
3252 .fixup_map1
= { 7 },
3254 .errstr
= "invalid access to packet",
3255 .prog_type
= BPF_PROG_TYPE_XDP
,
3258 "helper access to packet: test5, packet_ptr with too short range",
3260 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3261 offsetof(struct xdp_md
, data
)),
3262 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3263 offsetof(struct xdp_md
, data_end
)),
3264 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 1),
3265 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3266 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 7),
3267 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 3),
3268 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3269 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3270 BPF_FUNC_map_lookup_elem
),
3271 BPF_MOV64_IMM(BPF_REG_0
, 0),
3274 .fixup_map1
= { 6 },
3276 .errstr
= "invalid access to packet",
3277 .prog_type
= BPF_PROG_TYPE_XDP
,
3280 "helper access to packet: test6, cls valid packet_ptr range",
3282 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3283 offsetof(struct __sk_buff
, data
)),
3284 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3285 offsetof(struct __sk_buff
, data_end
)),
3286 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
3287 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
3288 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 5),
3289 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3290 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_2
),
3291 BPF_MOV64_IMM(BPF_REG_4
, 0),
3292 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3293 BPF_FUNC_map_update_elem
),
3294 BPF_MOV64_IMM(BPF_REG_0
, 0),
3297 .fixup_map1
= { 5 },
3299 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3302 "helper access to packet: test7, cls unchecked packet_ptr",
3304 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3305 offsetof(struct __sk_buff
, data
)),
3306 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3307 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3308 BPF_FUNC_map_lookup_elem
),
3309 BPF_MOV64_IMM(BPF_REG_0
, 0),
3312 .fixup_map1
= { 1 },
3314 .errstr
= "invalid access to packet",
3315 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3318 "helper access to packet: test8, cls variable add",
3320 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3321 offsetof(struct __sk_buff
, data
)),
3322 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3323 offsetof(struct __sk_buff
, data_end
)),
3324 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3325 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 8),
3326 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 10),
3327 BPF_LDX_MEM(BPF_B
, BPF_REG_5
, BPF_REG_2
, 0),
3328 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3329 BPF_ALU64_REG(BPF_ADD
, BPF_REG_4
, BPF_REG_5
),
3330 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_4
),
3331 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 8),
3332 BPF_JMP_REG(BPF_JGT
, BPF_REG_5
, BPF_REG_3
, 4),
3333 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3334 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_4
),
3335 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3336 BPF_FUNC_map_lookup_elem
),
3337 BPF_MOV64_IMM(BPF_REG_0
, 0),
3340 .fixup_map1
= { 11 },
3342 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3345 "helper access to packet: test9, cls packet_ptr with bad range",
3347 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3348 offsetof(struct __sk_buff
, data
)),
3349 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3350 offsetof(struct __sk_buff
, data_end
)),
3351 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3352 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 4),
3353 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 2),
3354 BPF_MOV64_IMM(BPF_REG_0
, 0),
3356 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3357 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3358 BPF_FUNC_map_lookup_elem
),
3359 BPF_MOV64_IMM(BPF_REG_0
, 0),
3362 .fixup_map1
= { 7 },
3364 .errstr
= "invalid access to packet",
3365 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3368 "helper access to packet: test10, cls packet_ptr with too short range",
3370 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3371 offsetof(struct __sk_buff
, data
)),
3372 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3373 offsetof(struct __sk_buff
, data_end
)),
3374 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 1),
3375 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3376 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 7),
3377 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 3),
3378 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3379 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3380 BPF_FUNC_map_lookup_elem
),
3381 BPF_MOV64_IMM(BPF_REG_0
, 0),
3384 .fixup_map1
= { 6 },
3386 .errstr
= "invalid access to packet",
3387 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3390 "helper access to packet: test11, cls unsuitable helper 1",
3392 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3393 offsetof(struct __sk_buff
, data
)),
3394 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3395 offsetof(struct __sk_buff
, data_end
)),
3396 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3397 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
3398 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_3
, 7),
3399 BPF_JMP_REG(BPF_JGT
, BPF_REG_3
, BPF_REG_7
, 4),
3400 BPF_MOV64_IMM(BPF_REG_2
, 0),
3401 BPF_MOV64_IMM(BPF_REG_4
, 42),
3402 BPF_MOV64_IMM(BPF_REG_5
, 0),
3403 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3404 BPF_FUNC_skb_store_bytes
),
3405 BPF_MOV64_IMM(BPF_REG_0
, 0),
3409 .errstr
= "helper access to the packet",
3410 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3413 "helper access to packet: test12, cls unsuitable helper 2",
3415 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3416 offsetof(struct __sk_buff
, data
)),
3417 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3418 offsetof(struct __sk_buff
, data_end
)),
3419 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
3420 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 8),
3421 BPF_JMP_REG(BPF_JGT
, BPF_REG_6
, BPF_REG_7
, 3),
3422 BPF_MOV64_IMM(BPF_REG_2
, 0),
3423 BPF_MOV64_IMM(BPF_REG_4
, 4),
3424 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3425 BPF_FUNC_skb_load_bytes
),
3426 BPF_MOV64_IMM(BPF_REG_0
, 0),
3430 .errstr
= "helper access to the packet",
3431 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3434 "helper access to packet: test13, cls helper ok",
3436 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3437 offsetof(struct __sk_buff
, data
)),
3438 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3439 offsetof(struct __sk_buff
, data_end
)),
3440 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3441 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3442 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3443 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3444 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3445 BPF_MOV64_IMM(BPF_REG_2
, 4),
3446 BPF_MOV64_IMM(BPF_REG_3
, 0),
3447 BPF_MOV64_IMM(BPF_REG_4
, 0),
3448 BPF_MOV64_IMM(BPF_REG_5
, 0),
3449 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3450 BPF_FUNC_csum_diff
),
3451 BPF_MOV64_IMM(BPF_REG_0
, 0),
3455 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3458 "helper access to packet: test14, cls helper ok sub",
3460 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3461 offsetof(struct __sk_buff
, data
)),
3462 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3463 offsetof(struct __sk_buff
, data_end
)),
3464 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3465 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3466 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3467 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3468 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_1
, 4),
3469 BPF_MOV64_IMM(BPF_REG_2
, 4),
3470 BPF_MOV64_IMM(BPF_REG_3
, 0),
3471 BPF_MOV64_IMM(BPF_REG_4
, 0),
3472 BPF_MOV64_IMM(BPF_REG_5
, 0),
3473 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3474 BPF_FUNC_csum_diff
),
3475 BPF_MOV64_IMM(BPF_REG_0
, 0),
3479 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3482 "helper access to packet: test15, cls helper fail sub",
3484 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3485 offsetof(struct __sk_buff
, data
)),
3486 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3487 offsetof(struct __sk_buff
, data_end
)),
3488 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3489 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3490 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3491 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3492 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_1
, 12),
3493 BPF_MOV64_IMM(BPF_REG_2
, 4),
3494 BPF_MOV64_IMM(BPF_REG_3
, 0),
3495 BPF_MOV64_IMM(BPF_REG_4
, 0),
3496 BPF_MOV64_IMM(BPF_REG_5
, 0),
3497 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3498 BPF_FUNC_csum_diff
),
3499 BPF_MOV64_IMM(BPF_REG_0
, 0),
3503 .errstr
= "invalid access to packet",
3504 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3507 "helper access to packet: test16, cls helper fail range 1",
3509 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3510 offsetof(struct __sk_buff
, data
)),
3511 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3512 offsetof(struct __sk_buff
, data_end
)),
3513 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3514 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3515 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3516 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3517 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3518 BPF_MOV64_IMM(BPF_REG_2
, 8),
3519 BPF_MOV64_IMM(BPF_REG_3
, 0),
3520 BPF_MOV64_IMM(BPF_REG_4
, 0),
3521 BPF_MOV64_IMM(BPF_REG_5
, 0),
3522 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3523 BPF_FUNC_csum_diff
),
3524 BPF_MOV64_IMM(BPF_REG_0
, 0),
3528 .errstr
= "invalid access to packet",
3529 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3532 "helper access to packet: test17, cls helper fail range 2",
3534 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3535 offsetof(struct __sk_buff
, data
)),
3536 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3537 offsetof(struct __sk_buff
, data_end
)),
3538 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3539 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3540 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3541 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3542 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3543 BPF_MOV64_IMM(BPF_REG_2
, -9),
3544 BPF_MOV64_IMM(BPF_REG_3
, 0),
3545 BPF_MOV64_IMM(BPF_REG_4
, 0),
3546 BPF_MOV64_IMM(BPF_REG_5
, 0),
3547 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3548 BPF_FUNC_csum_diff
),
3549 BPF_MOV64_IMM(BPF_REG_0
, 0),
3553 .errstr
= "R2 min value is negative",
3554 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3557 "helper access to packet: test18, cls helper fail range 3",
3559 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3560 offsetof(struct __sk_buff
, data
)),
3561 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3562 offsetof(struct __sk_buff
, data_end
)),
3563 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3564 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3565 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3566 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3567 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3568 BPF_MOV64_IMM(BPF_REG_2
, ~0),
3569 BPF_MOV64_IMM(BPF_REG_3
, 0),
3570 BPF_MOV64_IMM(BPF_REG_4
, 0),
3571 BPF_MOV64_IMM(BPF_REG_5
, 0),
3572 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3573 BPF_FUNC_csum_diff
),
3574 BPF_MOV64_IMM(BPF_REG_0
, 0),
3578 .errstr
= "R2 min value is negative",
3579 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3582 "helper access to packet: test19, cls helper fail range zero",
3584 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3585 offsetof(struct __sk_buff
, data
)),
3586 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3587 offsetof(struct __sk_buff
, data_end
)),
3588 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3589 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3590 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3591 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3592 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3593 BPF_MOV64_IMM(BPF_REG_2
, 0),
3594 BPF_MOV64_IMM(BPF_REG_3
, 0),
3595 BPF_MOV64_IMM(BPF_REG_4
, 0),
3596 BPF_MOV64_IMM(BPF_REG_5
, 0),
3597 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3598 BPF_FUNC_csum_diff
),
3599 BPF_MOV64_IMM(BPF_REG_0
, 0),
3603 .errstr
= "invalid access to packet",
3604 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3607 "helper access to packet: test20, pkt end as input",
3609 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3610 offsetof(struct __sk_buff
, data
)),
3611 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3612 offsetof(struct __sk_buff
, data_end
)),
3613 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3614 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3615 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3616 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3617 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_7
),
3618 BPF_MOV64_IMM(BPF_REG_2
, 4),
3619 BPF_MOV64_IMM(BPF_REG_3
, 0),
3620 BPF_MOV64_IMM(BPF_REG_4
, 0),
3621 BPF_MOV64_IMM(BPF_REG_5
, 0),
3622 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3623 BPF_FUNC_csum_diff
),
3624 BPF_MOV64_IMM(BPF_REG_0
, 0),
3628 .errstr
= "R1 type=pkt_end expected=fp",
3629 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3632 "helper access to packet: test21, wrong reg",
3634 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3635 offsetof(struct __sk_buff
, data
)),
3636 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3637 offsetof(struct __sk_buff
, data_end
)),
3638 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3639 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3640 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3641 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3642 BPF_MOV64_IMM(BPF_REG_2
, 4),
3643 BPF_MOV64_IMM(BPF_REG_3
, 0),
3644 BPF_MOV64_IMM(BPF_REG_4
, 0),
3645 BPF_MOV64_IMM(BPF_REG_5
, 0),
3646 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3647 BPF_FUNC_csum_diff
),
3648 BPF_MOV64_IMM(BPF_REG_0
, 0),
3652 .errstr
= "invalid access to packet",
3653 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3656 "valid map access into an array with a constant",
3658 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
3659 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3660 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3661 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3662 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3663 BPF_FUNC_map_lookup_elem
),
3664 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
3665 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
3666 offsetof(struct test_val
, foo
)),
3669 .fixup_map2
= { 3 },
3670 .errstr_unpriv
= "R0 leaks addr",
3671 .result_unpriv
= REJECT
,
3675 "valid map access into an array with a register",
3677 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
3678 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3679 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3680 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3681 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3682 BPF_FUNC_map_lookup_elem
),
3683 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
3684 BPF_MOV64_IMM(BPF_REG_1
, 4),
3685 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_1
, 2),
3686 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
3687 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
3688 offsetof(struct test_val
, foo
)),
3691 .fixup_map2
= { 3 },
3692 .errstr_unpriv
= "R0 leaks addr",
3693 .result_unpriv
= REJECT
,
3695 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
3698 "valid map access into an array with a variable",
3700 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
3701 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3702 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3703 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3704 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3705 BPF_FUNC_map_lookup_elem
),
3706 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
3707 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0),
3708 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, MAX_ENTRIES
, 3),
3709 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_1
, 2),
3710 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
3711 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
3712 offsetof(struct test_val
, foo
)),
3715 .fixup_map2
= { 3 },
3716 .errstr_unpriv
= "R0 leaks addr",
3717 .result_unpriv
= REJECT
,
3719 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
3722 "valid map access into an array with a signed variable",
3724 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
3725 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3726 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3727 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3728 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3729 BPF_FUNC_map_lookup_elem
),
3730 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
3731 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0),
3732 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 0xffffffff, 1),
3733 BPF_MOV32_IMM(BPF_REG_1
, 0),
3734 BPF_MOV32_IMM(BPF_REG_2
, MAX_ENTRIES
),
3735 BPF_JMP_REG(BPF_JSGT
, BPF_REG_2
, BPF_REG_1
, 1),
3736 BPF_MOV32_IMM(BPF_REG_1
, 0),
3737 BPF_ALU32_IMM(BPF_LSH
, BPF_REG_1
, 2),
3738 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
3739 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
3740 offsetof(struct test_val
, foo
)),
3743 .fixup_map2
= { 3 },
3744 .errstr_unpriv
= "R0 leaks addr",
3745 .result_unpriv
= REJECT
,
3747 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
3750 "invalid map access into an array with a constant",
3752 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
3753 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3754 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3755 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3756 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3757 BPF_FUNC_map_lookup_elem
),
3758 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
3759 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, (MAX_ENTRIES
+ 1) << 2,
3760 offsetof(struct test_val
, foo
)),
3763 .fixup_map2
= { 3 },
3764 .errstr
= "invalid access to map value, value_size=48 off=48 size=8",
3768 "invalid map access into an array with a register",
3770 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
3771 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3772 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3773 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3774 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3775 BPF_FUNC_map_lookup_elem
),
3776 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
3777 BPF_MOV64_IMM(BPF_REG_1
, MAX_ENTRIES
+ 1),
3778 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_1
, 2),
3779 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
3780 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
3781 offsetof(struct test_val
, foo
)),
3784 .fixup_map2
= { 3 },
3785 .errstr
= "R0 min value is outside of the array range",
3787 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
3790 "invalid map access into an array with a variable",
3792 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
3793 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3794 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3795 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3796 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3797 BPF_FUNC_map_lookup_elem
),
3798 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
3799 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0),
3800 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_1
, 2),
3801 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
3802 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
3803 offsetof(struct test_val
, foo
)),
3806 .fixup_map2
= { 3 },
3807 .errstr
= "R0 unbounded memory access, make sure to bounds check any array access into a map",
3809 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
3812 "invalid map access into an array with no floor check",
3814 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
3815 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3816 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3817 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3818 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3819 BPF_FUNC_map_lookup_elem
),
3820 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
3821 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, 0),
3822 BPF_MOV32_IMM(BPF_REG_2
, MAX_ENTRIES
),
3823 BPF_JMP_REG(BPF_JSGT
, BPF_REG_2
, BPF_REG_1
, 1),
3824 BPF_MOV32_IMM(BPF_REG_1
, 0),
3825 BPF_ALU32_IMM(BPF_LSH
, BPF_REG_1
, 2),
3826 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
3827 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
3828 offsetof(struct test_val
, foo
)),
3831 .fixup_map2
= { 3 },
3832 .errstr_unpriv
= "R0 leaks addr",
3833 .errstr
= "R0 unbounded memory access",
3834 .result_unpriv
= REJECT
,
3836 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
3839 "invalid map access into an array with a invalid max check",
3841 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
3842 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3843 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3844 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3845 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3846 BPF_FUNC_map_lookup_elem
),
3847 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
3848 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0),
3849 BPF_MOV32_IMM(BPF_REG_2
, MAX_ENTRIES
+ 1),
3850 BPF_JMP_REG(BPF_JGT
, BPF_REG_2
, BPF_REG_1
, 1),
3851 BPF_MOV32_IMM(BPF_REG_1
, 0),
3852 BPF_ALU32_IMM(BPF_LSH
, BPF_REG_1
, 2),
3853 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
3854 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
3855 offsetof(struct test_val
, foo
)),
3858 .fixup_map2
= { 3 },
3859 .errstr_unpriv
= "R0 leaks addr",
3860 .errstr
= "invalid access to map value, value_size=48 off=44 size=8",
3861 .result_unpriv
= REJECT
,
3863 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
3866 "invalid map access into an array with a invalid max check",
3868 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
3869 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3870 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3871 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3872 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3873 BPF_FUNC_map_lookup_elem
),
3874 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 10),
3875 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_0
),
3876 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
3877 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3878 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3879 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3880 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3881 BPF_FUNC_map_lookup_elem
),
3882 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
3883 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_8
),
3884 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
,
3885 offsetof(struct test_val
, foo
)),
3888 .fixup_map2
= { 3, 11 },
3889 .errstr_unpriv
= "R0 pointer += pointer",
3890 .errstr
= "R0 invalid mem access 'inv'",
3891 .result_unpriv
= REJECT
,
3893 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
3896 "multiple registers share map_lookup_elem result",
3898 BPF_MOV64_IMM(BPF_REG_1
, 10),
3899 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
3900 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3901 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3902 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3903 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3904 BPF_FUNC_map_lookup_elem
),
3905 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
3906 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
3907 BPF_ST_MEM(BPF_DW
, BPF_REG_4
, 0, 0),
3910 .fixup_map1
= { 4 },
3912 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
3915 "alu ops on ptr_to_map_value_or_null, 1",
3917 BPF_MOV64_IMM(BPF_REG_1
, 10),
3918 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
3919 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3920 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3921 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3922 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3923 BPF_FUNC_map_lookup_elem
),
3924 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
3925 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, -2),
3926 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 2),
3927 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
3928 BPF_ST_MEM(BPF_DW
, BPF_REG_4
, 0, 0),
3931 .fixup_map1
= { 4 },
3932 .errstr
= "R4 invalid mem access",
3934 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
3937 "alu ops on ptr_to_map_value_or_null, 2",
3939 BPF_MOV64_IMM(BPF_REG_1
, 10),
3940 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
3941 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3942 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3943 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3944 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3945 BPF_FUNC_map_lookup_elem
),
3946 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
3947 BPF_ALU64_IMM(BPF_AND
, BPF_REG_4
, -1),
3948 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
3949 BPF_ST_MEM(BPF_DW
, BPF_REG_4
, 0, 0),
3952 .fixup_map1
= { 4 },
3953 .errstr
= "R4 invalid mem access",
3955 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
3958 "alu ops on ptr_to_map_value_or_null, 3",
3960 BPF_MOV64_IMM(BPF_REG_1
, 10),
3961 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
3962 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3963 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3964 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3965 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3966 BPF_FUNC_map_lookup_elem
),
3967 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
3968 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_4
, 1),
3969 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
3970 BPF_ST_MEM(BPF_DW
, BPF_REG_4
, 0, 0),
3973 .fixup_map1
= { 4 },
3974 .errstr
= "R4 invalid mem access",
3976 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
3979 "invalid memory access with multiple map_lookup_elem calls",
3981 BPF_MOV64_IMM(BPF_REG_1
, 10),
3982 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
3983 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3984 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3985 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3986 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_1
),
3987 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_2
),
3988 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3989 BPF_FUNC_map_lookup_elem
),
3990 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
3991 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_8
),
3992 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_7
),
3993 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3994 BPF_FUNC_map_lookup_elem
),
3995 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
3996 BPF_ST_MEM(BPF_DW
, BPF_REG_4
, 0, 0),
3999 .fixup_map1
= { 4 },
4001 .errstr
= "R4 !read_ok",
4002 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
4005 "valid indirect map_lookup_elem access with 2nd lookup in branch",
4007 BPF_MOV64_IMM(BPF_REG_1
, 10),
4008 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
4009 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4010 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4011 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4012 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_1
),
4013 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_2
),
4014 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4015 BPF_FUNC_map_lookup_elem
),
4016 BPF_MOV64_IMM(BPF_REG_2
, 10),
4017 BPF_JMP_IMM(BPF_JNE
, BPF_REG_2
, 0, 3),
4018 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_8
),
4019 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_7
),
4020 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4021 BPF_FUNC_map_lookup_elem
),
4022 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
4023 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
4024 BPF_ST_MEM(BPF_DW
, BPF_REG_4
, 0, 0),
4027 .fixup_map1
= { 4 },
4029 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
4032 "invalid map access from else condition",
4034 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
4035 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4036 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4037 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4038 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
4039 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
4040 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0),
4041 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, MAX_ENTRIES
-1, 1),
4042 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 1),
4043 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_1
, 2),
4044 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
4045 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, offsetof(struct test_val
, foo
)),
4048 .fixup_map2
= { 3 },
4049 .errstr
= "R0 unbounded memory access",
4051 .errstr_unpriv
= "R0 leaks addr",
4052 .result_unpriv
= REJECT
,
4053 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
4056 "constant register |= constant should keep constant type",
4058 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
4059 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -48),
4060 BPF_MOV64_IMM(BPF_REG_2
, 34),
4061 BPF_ALU64_IMM(BPF_OR
, BPF_REG_2
, 13),
4062 BPF_MOV64_IMM(BPF_REG_3
, 0),
4063 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4067 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4070 "constant register |= constant should not bypass stack boundary checks",
4072 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
4073 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -48),
4074 BPF_MOV64_IMM(BPF_REG_2
, 34),
4075 BPF_ALU64_IMM(BPF_OR
, BPF_REG_2
, 24),
4076 BPF_MOV64_IMM(BPF_REG_3
, 0),
4077 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4080 .errstr
= "invalid stack type R1 off=-48 access_size=58",
4082 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4085 "constant register |= constant register should keep constant type",
4087 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
4088 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -48),
4089 BPF_MOV64_IMM(BPF_REG_2
, 34),
4090 BPF_MOV64_IMM(BPF_REG_4
, 13),
4091 BPF_ALU64_REG(BPF_OR
, BPF_REG_2
, BPF_REG_4
),
4092 BPF_MOV64_IMM(BPF_REG_3
, 0),
4093 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4097 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4100 "constant register |= constant register should not bypass stack boundary checks",
4102 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
4103 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -48),
4104 BPF_MOV64_IMM(BPF_REG_2
, 34),
4105 BPF_MOV64_IMM(BPF_REG_4
, 24),
4106 BPF_ALU64_REG(BPF_OR
, BPF_REG_2
, BPF_REG_4
),
4107 BPF_MOV64_IMM(BPF_REG_3
, 0),
4108 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4111 .errstr
= "invalid stack type R1 off=-48 access_size=58",
4113 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4116 "invalid direct packet write for LWT_IN",
4118 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
4119 offsetof(struct __sk_buff
, data
)),
4120 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
4121 offsetof(struct __sk_buff
, data_end
)),
4122 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
4123 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
4124 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
4125 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
4126 BPF_MOV64_IMM(BPF_REG_0
, 0),
4129 .errstr
= "cannot write into packet",
4131 .prog_type
= BPF_PROG_TYPE_LWT_IN
,
4134 "invalid direct packet write for LWT_OUT",
4136 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
4137 offsetof(struct __sk_buff
, data
)),
4138 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
4139 offsetof(struct __sk_buff
, data_end
)),
4140 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
4141 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
4142 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
4143 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
4144 BPF_MOV64_IMM(BPF_REG_0
, 0),
4147 .errstr
= "cannot write into packet",
4149 .prog_type
= BPF_PROG_TYPE_LWT_OUT
,
4152 "direct packet write for LWT_XMIT",
4154 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
4155 offsetof(struct __sk_buff
, data
)),
4156 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
4157 offsetof(struct __sk_buff
, data_end
)),
4158 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
4159 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
4160 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
4161 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
4162 BPF_MOV64_IMM(BPF_REG_0
, 0),
4166 .prog_type
= BPF_PROG_TYPE_LWT_XMIT
,
4169 "direct packet read for LWT_IN",
4171 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
4172 offsetof(struct __sk_buff
, data
)),
4173 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
4174 offsetof(struct __sk_buff
, data_end
)),
4175 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
4176 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
4177 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
4178 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
4179 BPF_MOV64_IMM(BPF_REG_0
, 0),
4183 .prog_type
= BPF_PROG_TYPE_LWT_IN
,
4186 "direct packet read for LWT_OUT",
4188 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
4189 offsetof(struct __sk_buff
, data
)),
4190 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
4191 offsetof(struct __sk_buff
, data_end
)),
4192 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
4193 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
4194 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
4195 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
4196 BPF_MOV64_IMM(BPF_REG_0
, 0),
4200 .prog_type
= BPF_PROG_TYPE_LWT_OUT
,
4203 "direct packet read for LWT_XMIT",
4205 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
4206 offsetof(struct __sk_buff
, data
)),
4207 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
4208 offsetof(struct __sk_buff
, data_end
)),
4209 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
4210 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
4211 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
4212 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
4213 BPF_MOV64_IMM(BPF_REG_0
, 0),
4217 .prog_type
= BPF_PROG_TYPE_LWT_XMIT
,
4220 "overlapping checks for direct packet access",
4222 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
4223 offsetof(struct __sk_buff
, data
)),
4224 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
4225 offsetof(struct __sk_buff
, data_end
)),
4226 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
4227 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
4228 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 4),
4229 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
4230 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 6),
4231 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 1),
4232 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_2
, 6),
4233 BPF_MOV64_IMM(BPF_REG_0
, 0),
4237 .prog_type
= BPF_PROG_TYPE_LWT_XMIT
,
4240 "invalid access of tc_classid for LWT_IN",
4242 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
4243 offsetof(struct __sk_buff
, tc_classid
)),
4247 .errstr
= "invalid bpf_context access",
4250 "invalid access of tc_classid for LWT_OUT",
4252 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
4253 offsetof(struct __sk_buff
, tc_classid
)),
4257 .errstr
= "invalid bpf_context access",
4260 "invalid access of tc_classid for LWT_XMIT",
4262 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
4263 offsetof(struct __sk_buff
, tc_classid
)),
4267 .errstr
= "invalid bpf_context access",
4270 "leak pointer into ctx 1",
4272 BPF_MOV64_IMM(BPF_REG_0
, 0),
4273 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
4274 offsetof(struct __sk_buff
, cb
[0])),
4275 BPF_LD_MAP_FD(BPF_REG_2
, 0),
4276 BPF_STX_XADD(BPF_DW
, BPF_REG_1
, BPF_REG_2
,
4277 offsetof(struct __sk_buff
, cb
[0])),
4280 .fixup_map1
= { 2 },
4281 .errstr_unpriv
= "R2 leaks addr into mem",
4282 .result_unpriv
= REJECT
,
4286 "leak pointer into ctx 2",
4288 BPF_MOV64_IMM(BPF_REG_0
, 0),
4289 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
4290 offsetof(struct __sk_buff
, cb
[0])),
4291 BPF_STX_XADD(BPF_DW
, BPF_REG_1
, BPF_REG_10
,
4292 offsetof(struct __sk_buff
, cb
[0])),
4295 .errstr_unpriv
= "R10 leaks addr into mem",
4296 .result_unpriv
= REJECT
,
4300 "leak pointer into ctx 3",
4302 BPF_MOV64_IMM(BPF_REG_0
, 0),
4303 BPF_LD_MAP_FD(BPF_REG_2
, 0),
4304 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
,
4305 offsetof(struct __sk_buff
, cb
[0])),
4308 .fixup_map1
= { 1 },
4309 .errstr_unpriv
= "R2 leaks addr into ctx",
4310 .result_unpriv
= REJECT
,
4314 "leak pointer into map val",
4316 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
4317 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
4318 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4319 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4320 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4321 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4322 BPF_FUNC_map_lookup_elem
),
4323 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 3),
4324 BPF_MOV64_IMM(BPF_REG_3
, 0),
4325 BPF_STX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_3
, 0),
4326 BPF_STX_XADD(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
4327 BPF_MOV64_IMM(BPF_REG_0
, 0),
4330 .fixup_map1
= { 4 },
4331 .errstr_unpriv
= "R6 leaks addr into mem",
4332 .result_unpriv
= REJECT
,
4336 "helper access to map: full range",
4338 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4339 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4340 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4341 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4342 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4343 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4344 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4345 BPF_MOV64_IMM(BPF_REG_2
, sizeof(struct test_val
)),
4346 BPF_MOV64_IMM(BPF_REG_3
, 0),
4347 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4350 .fixup_map2
= { 3 },
4352 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4355 "helper access to map: partial range",
4357 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4358 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4359 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4360 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4361 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4362 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4363 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4364 BPF_MOV64_IMM(BPF_REG_2
, 8),
4365 BPF_MOV64_IMM(BPF_REG_3
, 0),
4366 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4369 .fixup_map2
= { 3 },
4371 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4374 "helper access to map: empty range",
4376 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4377 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4378 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4379 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4380 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4381 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4382 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4383 BPF_MOV64_IMM(BPF_REG_2
, 0),
4384 BPF_MOV64_IMM(BPF_REG_3
, 0),
4385 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4388 .fixup_map2
= { 3 },
4389 .errstr
= "invalid access to map value, value_size=48 off=0 size=0",
4391 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4394 "helper access to map: out-of-bound range",
4396 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4397 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4398 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4399 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4400 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4401 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4402 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4403 BPF_MOV64_IMM(BPF_REG_2
, sizeof(struct test_val
) + 8),
4404 BPF_MOV64_IMM(BPF_REG_3
, 0),
4405 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4408 .fixup_map2
= { 3 },
4409 .errstr
= "invalid access to map value, value_size=48 off=0 size=56",
4411 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4414 "helper access to map: negative range",
4416 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4417 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4418 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4419 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4420 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4421 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4422 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4423 BPF_MOV64_IMM(BPF_REG_2
, -8),
4424 BPF_MOV64_IMM(BPF_REG_3
, 0),
4425 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4428 .fixup_map2
= { 3 },
4429 .errstr
= "R2 min value is negative",
4431 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4434 "helper access to adjusted map (via const imm): full range",
4436 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4437 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4438 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4439 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4440 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4441 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
4442 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4443 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
,
4444 offsetof(struct test_val
, foo
)),
4445 BPF_MOV64_IMM(BPF_REG_2
,
4446 sizeof(struct test_val
) -
4447 offsetof(struct test_val
, foo
)),
4448 BPF_MOV64_IMM(BPF_REG_3
, 0),
4449 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4452 .fixup_map2
= { 3 },
4454 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4457 "helper access to adjusted map (via const imm): partial range",
4459 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4460 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4461 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4462 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4463 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4464 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
4465 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4466 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
,
4467 offsetof(struct test_val
, foo
)),
4468 BPF_MOV64_IMM(BPF_REG_2
, 8),
4469 BPF_MOV64_IMM(BPF_REG_3
, 0),
4470 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4473 .fixup_map2
= { 3 },
4475 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4478 "helper access to adjusted map (via const imm): empty range",
4480 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4481 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4482 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4483 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4484 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4485 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
4486 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4487 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
,
4488 offsetof(struct test_val
, foo
)),
4489 BPF_MOV64_IMM(BPF_REG_2
, 0),
4490 BPF_MOV64_IMM(BPF_REG_3
, 0),
4491 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4494 .fixup_map2
= { 3 },
4495 .errstr
= "invalid access to map value, value_size=48 off=4 size=0",
4497 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4500 "helper access to adjusted map (via const imm): out-of-bound range",
4502 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4503 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4504 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4505 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4506 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4507 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
4508 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4509 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
,
4510 offsetof(struct test_val
, foo
)),
4511 BPF_MOV64_IMM(BPF_REG_2
,
4512 sizeof(struct test_val
) -
4513 offsetof(struct test_val
, foo
) + 8),
4514 BPF_MOV64_IMM(BPF_REG_3
, 0),
4515 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4518 .fixup_map2
= { 3 },
4519 .errstr
= "invalid access to map value, value_size=48 off=4 size=52",
4521 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4524 "helper access to adjusted map (via const imm): negative range (> adjustment)",
4526 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4527 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4528 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4529 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4530 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4531 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
4532 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4533 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
,
4534 offsetof(struct test_val
, foo
)),
4535 BPF_MOV64_IMM(BPF_REG_2
, -8),
4536 BPF_MOV64_IMM(BPF_REG_3
, 0),
4537 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4540 .fixup_map2
= { 3 },
4541 .errstr
= "R2 min value is negative",
4543 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4546 "helper access to adjusted map (via const imm): negative range (< adjustment)",
4548 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4549 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4550 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4551 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4552 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4553 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
4554 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4555 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
,
4556 offsetof(struct test_val
, foo
)),
4557 BPF_MOV64_IMM(BPF_REG_2
, -1),
4558 BPF_MOV64_IMM(BPF_REG_3
, 0),
4559 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4562 .fixup_map2
= { 3 },
4563 .errstr
= "R2 min value is negative",
4565 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4568 "helper access to adjusted map (via const reg): full range",
4570 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4571 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4572 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4573 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4574 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4575 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
4576 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4577 BPF_MOV64_IMM(BPF_REG_3
,
4578 offsetof(struct test_val
, foo
)),
4579 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4580 BPF_MOV64_IMM(BPF_REG_2
,
4581 sizeof(struct test_val
) -
4582 offsetof(struct test_val
, foo
)),
4583 BPF_MOV64_IMM(BPF_REG_3
, 0),
4584 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4587 .fixup_map2
= { 3 },
4589 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4592 "helper access to adjusted map (via const reg): partial range",
4594 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4595 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4596 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4597 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4598 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4599 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
4600 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4601 BPF_MOV64_IMM(BPF_REG_3
,
4602 offsetof(struct test_val
, foo
)),
4603 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4604 BPF_MOV64_IMM(BPF_REG_2
, 8),
4605 BPF_MOV64_IMM(BPF_REG_3
, 0),
4606 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4609 .fixup_map2
= { 3 },
4611 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4614 "helper access to adjusted map (via const reg): empty range",
4616 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4617 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4618 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4619 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4620 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4621 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
4622 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4623 BPF_MOV64_IMM(BPF_REG_3
, 0),
4624 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4625 BPF_MOV64_IMM(BPF_REG_2
, 0),
4626 BPF_MOV64_IMM(BPF_REG_3
, 0),
4627 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4630 .fixup_map2
= { 3 },
4631 .errstr
= "R1 min value is outside of the array range",
4633 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4636 "helper access to adjusted map (via const reg): out-of-bound range",
4638 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4639 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4640 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4641 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4642 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4643 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
4644 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4645 BPF_MOV64_IMM(BPF_REG_3
,
4646 offsetof(struct test_val
, foo
)),
4647 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4648 BPF_MOV64_IMM(BPF_REG_2
,
4649 sizeof(struct test_val
) -
4650 offsetof(struct test_val
, foo
) + 8),
4651 BPF_MOV64_IMM(BPF_REG_3
, 0),
4652 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4655 .fixup_map2
= { 3 },
4656 .errstr
= "invalid access to map value, value_size=48 off=4 size=52",
4658 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4661 "helper access to adjusted map (via const reg): negative range (> adjustment)",
4663 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4664 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4665 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4666 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4667 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4668 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
4669 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4670 BPF_MOV64_IMM(BPF_REG_3
,
4671 offsetof(struct test_val
, foo
)),
4672 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4673 BPF_MOV64_IMM(BPF_REG_2
, -8),
4674 BPF_MOV64_IMM(BPF_REG_3
, 0),
4675 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4678 .fixup_map2
= { 3 },
4679 .errstr
= "R2 min value is negative",
4681 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4684 "helper access to adjusted map (via const reg): negative range (< adjustment)",
4686 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4687 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4688 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4689 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4690 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4691 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
4692 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4693 BPF_MOV64_IMM(BPF_REG_3
,
4694 offsetof(struct test_val
, foo
)),
4695 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4696 BPF_MOV64_IMM(BPF_REG_2
, -1),
4697 BPF_MOV64_IMM(BPF_REG_3
, 0),
4698 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4701 .fixup_map2
= { 3 },
4702 .errstr
= "R2 min value is negative",
4704 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4707 "helper access to adjusted map (via variable): full range",
4709 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4710 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4711 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4712 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4713 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4714 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
4715 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4716 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
4717 BPF_JMP_IMM(BPF_JGT
, BPF_REG_3
,
4718 offsetof(struct test_val
, foo
), 4),
4719 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4720 BPF_MOV64_IMM(BPF_REG_2
,
4721 sizeof(struct test_val
) -
4722 offsetof(struct test_val
, foo
)),
4723 BPF_MOV64_IMM(BPF_REG_3
, 0),
4724 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4727 .fixup_map2
= { 3 },
4729 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4732 "helper access to adjusted map (via variable): partial range",
4734 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4735 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4736 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4737 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4738 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4739 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
4740 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4741 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
4742 BPF_JMP_IMM(BPF_JGT
, BPF_REG_3
,
4743 offsetof(struct test_val
, foo
), 4),
4744 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4745 BPF_MOV64_IMM(BPF_REG_2
, 8),
4746 BPF_MOV64_IMM(BPF_REG_3
, 0),
4747 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4750 .fixup_map2
= { 3 },
4752 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4755 "helper access to adjusted map (via variable): empty range",
4757 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4758 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4759 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4760 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4761 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4762 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
4763 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4764 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
4765 BPF_JMP_IMM(BPF_JGT
, BPF_REG_3
,
4766 offsetof(struct test_val
, foo
), 4),
4767 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4768 BPF_MOV64_IMM(BPF_REG_2
, 0),
4769 BPF_MOV64_IMM(BPF_REG_3
, 0),
4770 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4773 .fixup_map2
= { 3 },
4774 .errstr
= "R1 min value is outside of the array range",
4776 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4779 "helper access to adjusted map (via variable): no max check",
4781 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4782 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4783 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4784 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4785 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4786 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
4787 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4788 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
4789 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4790 BPF_MOV64_IMM(BPF_REG_2
, 1),
4791 BPF_MOV64_IMM(BPF_REG_3
, 0),
4792 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4795 .fixup_map2
= { 3 },
4796 .errstr
= "R1 unbounded memory access",
4798 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4801 "helper access to adjusted map (via variable): wrong max check",
4803 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4804 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4805 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4806 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4807 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4808 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
4809 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4810 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
4811 BPF_JMP_IMM(BPF_JGT
, BPF_REG_3
,
4812 offsetof(struct test_val
, foo
), 4),
4813 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4814 BPF_MOV64_IMM(BPF_REG_2
,
4815 sizeof(struct test_val
) -
4816 offsetof(struct test_val
, foo
) + 1),
4817 BPF_MOV64_IMM(BPF_REG_3
, 0),
4818 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4821 .fixup_map2
= { 3 },
4822 .errstr
= "invalid access to map value, value_size=48 off=4 size=45",
4824 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4827 "helper access to map: bounds check using <, good access",
4829 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4830 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4831 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4832 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4833 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4834 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4835 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4836 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
4837 BPF_JMP_IMM(BPF_JLT
, BPF_REG_3
, 32, 2),
4838 BPF_MOV64_IMM(BPF_REG_0
, 0),
4840 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4841 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
4842 BPF_MOV64_IMM(BPF_REG_0
, 0),
4845 .fixup_map2
= { 3 },
4847 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4850 "helper access to map: bounds check using <, bad access",
4852 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4853 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4854 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4855 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4856 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4857 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4858 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4859 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
4860 BPF_JMP_IMM(BPF_JLT
, BPF_REG_3
, 32, 4),
4861 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4862 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
4863 BPF_MOV64_IMM(BPF_REG_0
, 0),
4865 BPF_MOV64_IMM(BPF_REG_0
, 0),
4868 .fixup_map2
= { 3 },
4870 .errstr
= "R1 unbounded memory access",
4871 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4874 "helper access to map: bounds check using <=, good access",
4876 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4877 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4878 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4879 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4880 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4881 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4882 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4883 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
4884 BPF_JMP_IMM(BPF_JLE
, BPF_REG_3
, 32, 2),
4885 BPF_MOV64_IMM(BPF_REG_0
, 0),
4887 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4888 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
4889 BPF_MOV64_IMM(BPF_REG_0
, 0),
4892 .fixup_map2
= { 3 },
4894 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4897 "helper access to map: bounds check using <=, bad access",
4899 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4900 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4901 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4902 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4903 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4904 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4905 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4906 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
4907 BPF_JMP_IMM(BPF_JLE
, BPF_REG_3
, 32, 4),
4908 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4909 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
4910 BPF_MOV64_IMM(BPF_REG_0
, 0),
4912 BPF_MOV64_IMM(BPF_REG_0
, 0),
4915 .fixup_map2
= { 3 },
4917 .errstr
= "R1 unbounded memory access",
4918 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4921 "helper access to map: bounds check using s<, good access",
4923 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4924 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4925 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4926 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4927 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4928 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4929 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4930 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
4931 BPF_JMP_IMM(BPF_JSLT
, BPF_REG_3
, 32, 2),
4932 BPF_MOV64_IMM(BPF_REG_0
, 0),
4934 BPF_JMP_IMM(BPF_JSLT
, BPF_REG_3
, 0, -3),
4935 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4936 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
4937 BPF_MOV64_IMM(BPF_REG_0
, 0),
4940 .fixup_map2
= { 3 },
4942 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4945 "helper access to map: bounds check using s<, good access 2",
4947 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4948 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4949 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4950 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4951 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4952 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4953 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4954 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
4955 BPF_JMP_IMM(BPF_JSLT
, BPF_REG_3
, 32, 2),
4956 BPF_MOV64_IMM(BPF_REG_0
, 0),
4958 BPF_JMP_IMM(BPF_JSLT
, BPF_REG_3
, -3, -3),
4959 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4960 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
4961 BPF_MOV64_IMM(BPF_REG_0
, 0),
4964 .fixup_map2
= { 3 },
4966 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4969 "helper access to map: bounds check using s<, bad access",
4971 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4972 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4973 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4974 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4975 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4976 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4977 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4978 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_0
, 0),
4979 BPF_JMP_IMM(BPF_JSLT
, BPF_REG_3
, 32, 2),
4980 BPF_MOV64_IMM(BPF_REG_0
, 0),
4982 BPF_JMP_IMM(BPF_JSLT
, BPF_REG_3
, -3, -3),
4983 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4984 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
4985 BPF_MOV64_IMM(BPF_REG_0
, 0),
4988 .fixup_map2
= { 3 },
4990 .errstr
= "R1 min value is negative",
4991 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4994 "helper access to map: bounds check using s<=, good access",
4996 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4997 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4998 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4999 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5000 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5001 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
5002 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5003 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
5004 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_3
, 32, 2),
5005 BPF_MOV64_IMM(BPF_REG_0
, 0),
5007 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_3
, 0, -3),
5008 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5009 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
5010 BPF_MOV64_IMM(BPF_REG_0
, 0),
5013 .fixup_map2
= { 3 },
5015 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5018 "helper access to map: bounds check using s<=, good access 2",
5020 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5021 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5022 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5023 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5024 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5025 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
5026 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5027 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
5028 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_3
, 32, 2),
5029 BPF_MOV64_IMM(BPF_REG_0
, 0),
5031 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_3
, -3, -3),
5032 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5033 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
5034 BPF_MOV64_IMM(BPF_REG_0
, 0),
5037 .fixup_map2
= { 3 },
5039 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5042 "helper access to map: bounds check using s<=, bad access",
5044 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5045 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5046 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5047 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5048 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5049 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
5050 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5051 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_0
, 0),
5052 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_3
, 32, 2),
5053 BPF_MOV64_IMM(BPF_REG_0
, 0),
5055 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_3
, -3, -3),
5056 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5057 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
5058 BPF_MOV64_IMM(BPF_REG_0
, 0),
5061 .fixup_map2
= { 3 },
5063 .errstr
= "R1 min value is negative",
5064 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5067 "map element value is preserved across register spilling",
5069 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5070 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5071 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5072 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5073 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5074 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
5075 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 42),
5076 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5077 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -184),
5078 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, 0),
5079 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_1
, 0),
5080 BPF_ST_MEM(BPF_DW
, BPF_REG_3
, 0, 42),
5083 .fixup_map2
= { 3 },
5084 .errstr_unpriv
= "R0 leaks addr",
5086 .result_unpriv
= REJECT
,
5089 "map element value or null is marked on register spilling",
5091 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5092 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5093 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5094 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5095 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5096 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5097 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -152),
5098 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, 0),
5099 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
5100 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_1
, 0),
5101 BPF_ST_MEM(BPF_DW
, BPF_REG_3
, 0, 42),
5104 .fixup_map2
= { 3 },
5105 .errstr_unpriv
= "R0 leaks addr",
5107 .result_unpriv
= REJECT
,
5110 "map element value store of cleared call register",
5112 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5113 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5114 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5115 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5116 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5117 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
5118 BPF_STX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 0),
5121 .fixup_map2
= { 3 },
5122 .errstr_unpriv
= "R1 !read_ok",
5123 .errstr
= "R1 !read_ok",
5125 .result_unpriv
= REJECT
,
5128 "map element value with unaligned store",
5130 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5131 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5132 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5133 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5134 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5135 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 17),
5136 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 3),
5137 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 42),
5138 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 2, 43),
5139 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, -2, 44),
5140 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_0
),
5141 BPF_ST_MEM(BPF_DW
, BPF_REG_8
, 0, 32),
5142 BPF_ST_MEM(BPF_DW
, BPF_REG_8
, 2, 33),
5143 BPF_ST_MEM(BPF_DW
, BPF_REG_8
, -2, 34),
5144 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_8
, 5),
5145 BPF_ST_MEM(BPF_DW
, BPF_REG_8
, 0, 22),
5146 BPF_ST_MEM(BPF_DW
, BPF_REG_8
, 4, 23),
5147 BPF_ST_MEM(BPF_DW
, BPF_REG_8
, -7, 24),
5148 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_8
),
5149 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_7
, 3),
5150 BPF_ST_MEM(BPF_DW
, BPF_REG_7
, 0, 22),
5151 BPF_ST_MEM(BPF_DW
, BPF_REG_7
, 4, 23),
5152 BPF_ST_MEM(BPF_DW
, BPF_REG_7
, -4, 24),
5155 .fixup_map2
= { 3 },
5156 .errstr_unpriv
= "R0 leaks addr",
5158 .result_unpriv
= REJECT
,
5159 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
5162 "map element value with unaligned load",
5164 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5165 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5166 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5167 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5168 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5169 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 11),
5170 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0),
5171 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, MAX_ENTRIES
, 9),
5172 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 3),
5173 BPF_LDX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_0
, 0),
5174 BPF_LDX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_0
, 2),
5175 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_0
),
5176 BPF_LDX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_8
, 0),
5177 BPF_LDX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_8
, 2),
5178 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 5),
5179 BPF_LDX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_0
, 0),
5180 BPF_LDX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_0
, 4),
5183 .fixup_map2
= { 3 },
5184 .errstr_unpriv
= "R0 leaks addr",
5186 .result_unpriv
= REJECT
,
5187 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
5190 "map element value illegal alu op, 1",
5192 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5193 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5194 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5195 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5196 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5197 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
5198 BPF_ALU64_IMM(BPF_AND
, BPF_REG_0
, 8),
5199 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 22),
5202 .fixup_map2
= { 3 },
5203 .errstr_unpriv
= "R0 bitwise operator &= on pointer",
5204 .errstr
= "invalid mem access 'inv'",
5206 .result_unpriv
= REJECT
,
5209 "map element value illegal alu op, 2",
5211 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5212 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5213 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5214 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5215 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5216 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
5217 BPF_ALU32_IMM(BPF_ADD
, BPF_REG_0
, 0),
5218 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 22),
5221 .fixup_map2
= { 3 },
5222 .errstr_unpriv
= "R0 32-bit pointer arithmetic prohibited",
5223 .errstr
= "invalid mem access 'inv'",
5225 .result_unpriv
= REJECT
,
5228 "map element value illegal alu op, 3",
5230 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5231 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5232 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5233 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5234 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5235 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
5236 BPF_ALU64_IMM(BPF_DIV
, BPF_REG_0
, 42),
5237 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 22),
5240 .fixup_map2
= { 3 },
5241 .errstr_unpriv
= "R0 pointer arithmetic with /= operator",
5242 .errstr
= "invalid mem access 'inv'",
5244 .result_unpriv
= REJECT
,
5247 "map element value illegal alu op, 4",
5249 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5250 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5251 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5252 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5253 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5254 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
5255 BPF_ENDIAN(BPF_FROM_BE
, BPF_REG_0
, 64),
5256 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 22),
5259 .fixup_map2
= { 3 },
5260 .errstr_unpriv
= "R0 pointer arithmetic prohibited",
5261 .errstr
= "invalid mem access 'inv'",
5263 .result_unpriv
= REJECT
,
5266 "map element value illegal alu op, 5",
5268 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5269 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5270 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5271 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5272 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5273 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
5274 BPF_MOV64_IMM(BPF_REG_3
, 4096),
5275 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5276 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5277 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_0
, 0),
5278 BPF_STX_XADD(BPF_DW
, BPF_REG_2
, BPF_REG_3
, 0),
5279 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_2
, 0),
5280 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 22),
5283 .fixup_map2
= { 3 },
5284 .errstr
= "R0 invalid mem access 'inv'",
5288 "map element value is preserved across register spilling",
5290 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5291 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5292 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5293 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5294 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5295 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
5296 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
,
5297 offsetof(struct test_val
, foo
)),
5298 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 42),
5299 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5300 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -184),
5301 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, 0),
5302 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_1
, 0),
5303 BPF_ST_MEM(BPF_DW
, BPF_REG_3
, 0, 42),
5306 .fixup_map2
= { 3 },
5307 .errstr_unpriv
= "R0 leaks addr",
5309 .result_unpriv
= REJECT
,
5310 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
5313 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
5315 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5316 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5317 BPF_MOV64_IMM(BPF_REG_0
, 0),
5318 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -64),
5319 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -56),
5320 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -48),
5321 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -40),
5322 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -32),
5323 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -24),
5324 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -16),
5325 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
5326 BPF_MOV64_IMM(BPF_REG_2
, 16),
5327 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5328 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5329 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 64),
5330 BPF_MOV64_IMM(BPF_REG_4
, 0),
5331 BPF_JMP_REG(BPF_JGE
, BPF_REG_4
, BPF_REG_2
, 2),
5332 BPF_MOV64_IMM(BPF_REG_3
, 0),
5333 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5334 BPF_MOV64_IMM(BPF_REG_0
, 0),
5338 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5341 "helper access to variable memory: stack, bitwise AND, zero included",
5343 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5344 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5345 BPF_MOV64_IMM(BPF_REG_2
, 16),
5346 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5347 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5348 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 64),
5349 BPF_MOV64_IMM(BPF_REG_3
, 0),
5350 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5353 .errstr
= "invalid stack type R1 off=-64 access_size=0",
5355 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5358 "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
5360 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5361 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5362 BPF_MOV64_IMM(BPF_REG_2
, 16),
5363 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5364 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5365 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 65),
5366 BPF_MOV64_IMM(BPF_REG_4
, 0),
5367 BPF_JMP_REG(BPF_JGE
, BPF_REG_4
, BPF_REG_2
, 2),
5368 BPF_MOV64_IMM(BPF_REG_3
, 0),
5369 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5370 BPF_MOV64_IMM(BPF_REG_0
, 0),
5373 .errstr
= "invalid stack type R1 off=-64 access_size=65",
5375 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5378 "helper access to variable memory: stack, JMP, correct bounds",
5380 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5381 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5382 BPF_MOV64_IMM(BPF_REG_0
, 0),
5383 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -64),
5384 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -56),
5385 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -48),
5386 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -40),
5387 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -32),
5388 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -24),
5389 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -16),
5390 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
5391 BPF_MOV64_IMM(BPF_REG_2
, 16),
5392 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5393 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5394 BPF_JMP_IMM(BPF_JGT
, BPF_REG_2
, 64, 4),
5395 BPF_MOV64_IMM(BPF_REG_4
, 0),
5396 BPF_JMP_REG(BPF_JGE
, BPF_REG_4
, BPF_REG_2
, 2),
5397 BPF_MOV64_IMM(BPF_REG_3
, 0),
5398 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5399 BPF_MOV64_IMM(BPF_REG_0
, 0),
5403 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5406 "helper access to variable memory: stack, JMP (signed), correct bounds",
5408 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5409 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5410 BPF_MOV64_IMM(BPF_REG_0
, 0),
5411 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -64),
5412 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -56),
5413 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -48),
5414 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -40),
5415 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -32),
5416 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -24),
5417 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -16),
5418 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
5419 BPF_MOV64_IMM(BPF_REG_2
, 16),
5420 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5421 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5422 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_2
, 64, 4),
5423 BPF_MOV64_IMM(BPF_REG_4
, 0),
5424 BPF_JMP_REG(BPF_JSGE
, BPF_REG_4
, BPF_REG_2
, 2),
5425 BPF_MOV64_IMM(BPF_REG_3
, 0),
5426 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5427 BPF_MOV64_IMM(BPF_REG_0
, 0),
5431 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5434 "helper access to variable memory: stack, JMP, bounds + offset",
5436 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5437 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5438 BPF_MOV64_IMM(BPF_REG_2
, 16),
5439 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5440 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5441 BPF_JMP_IMM(BPF_JGT
, BPF_REG_2
, 64, 5),
5442 BPF_MOV64_IMM(BPF_REG_4
, 0),
5443 BPF_JMP_REG(BPF_JGE
, BPF_REG_4
, BPF_REG_2
, 3),
5444 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 1),
5445 BPF_MOV64_IMM(BPF_REG_3
, 0),
5446 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5447 BPF_MOV64_IMM(BPF_REG_0
, 0),
5450 .errstr
= "invalid stack type R1 off=-64 access_size=65",
5452 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5455 "helper access to variable memory: stack, JMP, wrong max",
5457 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5458 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5459 BPF_MOV64_IMM(BPF_REG_2
, 16),
5460 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5461 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5462 BPF_JMP_IMM(BPF_JGT
, BPF_REG_2
, 65, 4),
5463 BPF_MOV64_IMM(BPF_REG_4
, 0),
5464 BPF_JMP_REG(BPF_JGE
, BPF_REG_4
, BPF_REG_2
, 2),
5465 BPF_MOV64_IMM(BPF_REG_3
, 0),
5466 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5467 BPF_MOV64_IMM(BPF_REG_0
, 0),
5470 .errstr
= "invalid stack type R1 off=-64 access_size=65",
5472 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5475 "helper access to variable memory: stack, JMP, no max check",
5477 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5478 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5479 BPF_MOV64_IMM(BPF_REG_2
, 16),
5480 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5481 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5482 BPF_MOV64_IMM(BPF_REG_4
, 0),
5483 BPF_JMP_REG(BPF_JGE
, BPF_REG_4
, BPF_REG_2
, 2),
5484 BPF_MOV64_IMM(BPF_REG_3
, 0),
5485 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5486 BPF_MOV64_IMM(BPF_REG_0
, 0),
5489 /* because max wasn't checked, signed min is negative */
5490 .errstr
= "R2 min value is negative, either use unsigned or 'var &= const'",
5492 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5495 "helper access to variable memory: stack, JMP, no min check",
5497 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5498 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5499 BPF_MOV64_IMM(BPF_REG_2
, 16),
5500 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5501 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5502 BPF_JMP_IMM(BPF_JGT
, BPF_REG_2
, 64, 3),
5503 BPF_MOV64_IMM(BPF_REG_3
, 0),
5504 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5505 BPF_MOV64_IMM(BPF_REG_0
, 0),
5508 .errstr
= "invalid stack type R1 off=-64 access_size=0",
5510 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5513 "helper access to variable memory: stack, JMP (signed), no min check",
5515 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5516 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5517 BPF_MOV64_IMM(BPF_REG_2
, 16),
5518 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5519 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5520 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_2
, 64, 3),
5521 BPF_MOV64_IMM(BPF_REG_3
, 0),
5522 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5523 BPF_MOV64_IMM(BPF_REG_0
, 0),
5526 .errstr
= "R2 min value is negative",
5528 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5531 "helper access to variable memory: map, JMP, correct bounds",
5533 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5534 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5535 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5536 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5537 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5538 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 10),
5539 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5540 BPF_MOV64_IMM(BPF_REG_2
, sizeof(struct test_val
)),
5541 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_2
, -128),
5542 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -128),
5543 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_2
,
5544 sizeof(struct test_val
), 4),
5545 BPF_MOV64_IMM(BPF_REG_4
, 0),
5546 BPF_JMP_REG(BPF_JSGE
, BPF_REG_4
, BPF_REG_2
, 2),
5547 BPF_MOV64_IMM(BPF_REG_3
, 0),
5548 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5549 BPF_MOV64_IMM(BPF_REG_0
, 0),
5552 .fixup_map2
= { 3 },
5554 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5557 "helper access to variable memory: map, JMP, wrong max",
5559 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5560 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5561 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5562 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5563 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5564 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 10),
5565 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5566 BPF_MOV64_IMM(BPF_REG_2
, sizeof(struct test_val
)),
5567 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_2
, -128),
5568 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -128),
5569 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_2
,
5570 sizeof(struct test_val
) + 1, 4),
5571 BPF_MOV64_IMM(BPF_REG_4
, 0),
5572 BPF_JMP_REG(BPF_JSGE
, BPF_REG_4
, BPF_REG_2
, 2),
5573 BPF_MOV64_IMM(BPF_REG_3
, 0),
5574 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5575 BPF_MOV64_IMM(BPF_REG_0
, 0),
5578 .fixup_map2
= { 3 },
5579 .errstr
= "invalid access to map value, value_size=48 off=0 size=49",
5581 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5584 "helper access to variable memory: map adjusted, JMP, correct bounds",
5586 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5587 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5588 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5589 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5590 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5591 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 11),
5592 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5593 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 20),
5594 BPF_MOV64_IMM(BPF_REG_2
, sizeof(struct test_val
)),
5595 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_2
, -128),
5596 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -128),
5597 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_2
,
5598 sizeof(struct test_val
) - 20, 4),
5599 BPF_MOV64_IMM(BPF_REG_4
, 0),
5600 BPF_JMP_REG(BPF_JSGE
, BPF_REG_4
, BPF_REG_2
, 2),
5601 BPF_MOV64_IMM(BPF_REG_3
, 0),
5602 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5603 BPF_MOV64_IMM(BPF_REG_0
, 0),
5606 .fixup_map2
= { 3 },
5608 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5611 "helper access to variable memory: map adjusted, JMP, wrong max",
5613 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5614 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5615 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5616 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5617 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5618 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 11),
5619 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5620 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 20),
5621 BPF_MOV64_IMM(BPF_REG_2
, sizeof(struct test_val
)),
5622 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_2
, -128),
5623 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -128),
5624 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_2
,
5625 sizeof(struct test_val
) - 19, 4),
5626 BPF_MOV64_IMM(BPF_REG_4
, 0),
5627 BPF_JMP_REG(BPF_JSGE
, BPF_REG_4
, BPF_REG_2
, 2),
5628 BPF_MOV64_IMM(BPF_REG_3
, 0),
5629 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5630 BPF_MOV64_IMM(BPF_REG_0
, 0),
5633 .fixup_map2
= { 3 },
5634 .errstr
= "R1 min value is outside of the array range",
5636 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5639 "helper access to variable memory: size = 0 allowed on NULL",
5641 BPF_MOV64_IMM(BPF_REG_1
, 0),
5642 BPF_MOV64_IMM(BPF_REG_2
, 0),
5643 BPF_MOV64_IMM(BPF_REG_3
, 0),
5644 BPF_MOV64_IMM(BPF_REG_4
, 0),
5645 BPF_MOV64_IMM(BPF_REG_5
, 0),
5646 BPF_EMIT_CALL(BPF_FUNC_csum_diff
),
5650 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
5653 "helper access to variable memory: size > 0 not allowed on NULL",
5655 BPF_MOV64_IMM(BPF_REG_1
, 0),
5656 BPF_MOV64_IMM(BPF_REG_2
, 0),
5657 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_2
, -128),
5658 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -128),
5659 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 64),
5660 BPF_MOV64_IMM(BPF_REG_3
, 0),
5661 BPF_MOV64_IMM(BPF_REG_4
, 0),
5662 BPF_MOV64_IMM(BPF_REG_5
, 0),
5663 BPF_EMIT_CALL(BPF_FUNC_csum_diff
),
5666 .errstr
= "R1 type=inv expected=fp",
5668 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
5671 "helper access to variable memory: size = 0 not allowed on != NULL",
5673 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5674 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
5675 BPF_MOV64_IMM(BPF_REG_2
, 0),
5676 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, 0),
5677 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 8),
5678 BPF_MOV64_IMM(BPF_REG_3
, 0),
5679 BPF_MOV64_IMM(BPF_REG_4
, 0),
5680 BPF_MOV64_IMM(BPF_REG_5
, 0),
5681 BPF_EMIT_CALL(BPF_FUNC_csum_diff
),
5684 .errstr
= "invalid stack type R1 off=-8 access_size=0",
5686 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
5689 "helper access to variable memory: 8 bytes leak",
5691 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5692 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5693 BPF_MOV64_IMM(BPF_REG_0
, 0),
5694 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -64),
5695 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -56),
5696 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -48),
5697 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -40),
5698 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -24),
5699 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -16),
5700 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
5701 BPF_MOV64_IMM(BPF_REG_2
, 0),
5702 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_2
, -128),
5703 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -128),
5704 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 63),
5705 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 1),
5706 BPF_MOV64_IMM(BPF_REG_3
, 0),
5707 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5708 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
5711 .errstr
= "invalid indirect read from stack off -64+32 size 64",
5713 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5716 "helper access to variable memory: 8 bytes no leak (init memory)",
5718 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5719 BPF_MOV64_IMM(BPF_REG_0
, 0),
5720 BPF_MOV64_IMM(BPF_REG_0
, 0),
5721 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -64),
5722 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -56),
5723 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -48),
5724 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -40),
5725 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -32),
5726 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -24),
5727 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -16),
5728 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
5729 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5730 BPF_MOV64_IMM(BPF_REG_2
, 0),
5731 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 32),
5732 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 32),
5733 BPF_MOV64_IMM(BPF_REG_3
, 0),
5734 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5735 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
5739 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5742 "invalid and of negative number",
5744 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
5745 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5746 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5747 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5748 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
5749 BPF_FUNC_map_lookup_elem
),
5750 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
5751 BPF_LDX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
5752 BPF_ALU64_IMM(BPF_AND
, BPF_REG_1
, -4),
5753 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_1
, 2),
5754 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
5755 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
5756 offsetof(struct test_val
, foo
)),
5759 .fixup_map2
= { 3 },
5760 .errstr
= "R0 max value is outside of the array range",
5762 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
5765 "invalid range check",
5767 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
5768 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5769 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5770 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5771 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
5772 BPF_FUNC_map_lookup_elem
),
5773 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 12),
5774 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0),
5775 BPF_MOV64_IMM(BPF_REG_9
, 1),
5776 BPF_ALU32_IMM(BPF_MOD
, BPF_REG_1
, 2),
5777 BPF_ALU32_IMM(BPF_ADD
, BPF_REG_1
, 1),
5778 BPF_ALU32_REG(BPF_AND
, BPF_REG_9
, BPF_REG_1
),
5779 BPF_ALU32_IMM(BPF_ADD
, BPF_REG_9
, 1),
5780 BPF_ALU32_IMM(BPF_RSH
, BPF_REG_9
, 1),
5781 BPF_MOV32_IMM(BPF_REG_3
, 1),
5782 BPF_ALU32_REG(BPF_SUB
, BPF_REG_3
, BPF_REG_9
),
5783 BPF_ALU32_IMM(BPF_MUL
, BPF_REG_3
, 0x10000000),
5784 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_3
),
5785 BPF_STX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_3
, 0),
5786 BPF_MOV64_REG(BPF_REG_0
, 0),
5789 .fixup_map2
= { 3 },
5790 .errstr
= "R0 max value is outside of the array range",
5792 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
5795 "map in map access",
5797 BPF_ST_MEM(0, BPF_REG_10
, -4, 0),
5798 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5799 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -4),
5800 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5801 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
5802 BPF_FUNC_map_lookup_elem
),
5803 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
5804 BPF_ST_MEM(0, BPF_REG_10
, -4, 0),
5805 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5806 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -4),
5807 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5808 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
5809 BPF_FUNC_map_lookup_elem
),
5810 BPF_MOV64_REG(BPF_REG_0
, 0),
5813 .fixup_map_in_map
= { 3 },
5817 "invalid inner map pointer",
5819 BPF_ST_MEM(0, BPF_REG_10
, -4, 0),
5820 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5821 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -4),
5822 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5823 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
5824 BPF_FUNC_map_lookup_elem
),
5825 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
5826 BPF_ST_MEM(0, BPF_REG_10
, -4, 0),
5827 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5828 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -4),
5829 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5830 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
5831 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
5832 BPF_FUNC_map_lookup_elem
),
5833 BPF_MOV64_REG(BPF_REG_0
, 0),
5836 .fixup_map_in_map
= { 3 },
5837 .errstr
= "R1 type=inv expected=map_ptr",
5838 .errstr_unpriv
= "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
5842 "forgot null checking on the inner map pointer",
5844 BPF_ST_MEM(0, BPF_REG_10
, -4, 0),
5845 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5846 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -4),
5847 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5848 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
5849 BPF_FUNC_map_lookup_elem
),
5850 BPF_ST_MEM(0, BPF_REG_10
, -4, 0),
5851 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5852 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -4),
5853 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5854 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
5855 BPF_FUNC_map_lookup_elem
),
5856 BPF_MOV64_REG(BPF_REG_0
, 0),
5859 .fixup_map_in_map
= { 3 },
5860 .errstr
= "R1 type=map_value_or_null expected=map_ptr",
5864 "ld_abs: check calling conv, r1",
5866 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5867 BPF_MOV64_IMM(BPF_REG_1
, 0),
5868 BPF_LD_ABS(BPF_W
, -0x200000),
5869 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
5872 .errstr
= "R1 !read_ok",
5876 "ld_abs: check calling conv, r2",
5878 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5879 BPF_MOV64_IMM(BPF_REG_2
, 0),
5880 BPF_LD_ABS(BPF_W
, -0x200000),
5881 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
5884 .errstr
= "R2 !read_ok",
5888 "ld_abs: check calling conv, r3",
5890 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5891 BPF_MOV64_IMM(BPF_REG_3
, 0),
5892 BPF_LD_ABS(BPF_W
, -0x200000),
5893 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_3
),
5896 .errstr
= "R3 !read_ok",
5900 "ld_abs: check calling conv, r4",
5902 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5903 BPF_MOV64_IMM(BPF_REG_4
, 0),
5904 BPF_LD_ABS(BPF_W
, -0x200000),
5905 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_4
),
5908 .errstr
= "R4 !read_ok",
5912 "ld_abs: check calling conv, r5",
5914 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5915 BPF_MOV64_IMM(BPF_REG_5
, 0),
5916 BPF_LD_ABS(BPF_W
, -0x200000),
5917 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_5
),
5920 .errstr
= "R5 !read_ok",
5924 "ld_abs: check calling conv, r7",
5926 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5927 BPF_MOV64_IMM(BPF_REG_7
, 0),
5928 BPF_LD_ABS(BPF_W
, -0x200000),
5929 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_7
),
5935 "ld_ind: check calling conv, r1",
5937 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5938 BPF_MOV64_IMM(BPF_REG_1
, 1),
5939 BPF_LD_IND(BPF_W
, BPF_REG_1
, -0x200000),
5940 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
5943 .errstr
= "R1 !read_ok",
5947 "ld_ind: check calling conv, r2",
5949 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5950 BPF_MOV64_IMM(BPF_REG_2
, 1),
5951 BPF_LD_IND(BPF_W
, BPF_REG_2
, -0x200000),
5952 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
5955 .errstr
= "R2 !read_ok",
5959 "ld_ind: check calling conv, r3",
5961 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5962 BPF_MOV64_IMM(BPF_REG_3
, 1),
5963 BPF_LD_IND(BPF_W
, BPF_REG_3
, -0x200000),
5964 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_3
),
5967 .errstr
= "R3 !read_ok",
5971 "ld_ind: check calling conv, r4",
5973 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5974 BPF_MOV64_IMM(BPF_REG_4
, 1),
5975 BPF_LD_IND(BPF_W
, BPF_REG_4
, -0x200000),
5976 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_4
),
5979 .errstr
= "R4 !read_ok",
5983 "ld_ind: check calling conv, r5",
5985 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5986 BPF_MOV64_IMM(BPF_REG_5
, 1),
5987 BPF_LD_IND(BPF_W
, BPF_REG_5
, -0x200000),
5988 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_5
),
5991 .errstr
= "R5 !read_ok",
5995 "ld_ind: check calling conv, r7",
5997 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5998 BPF_MOV64_IMM(BPF_REG_7
, 1),
5999 BPF_LD_IND(BPF_W
, BPF_REG_7
, -0x200000),
6000 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_7
),
6006 "check bpf_perf_event_data->sample_period byte load permitted",
6008 BPF_MOV64_IMM(BPF_REG_0
, 0),
6009 #if __BYTE_ORDER == __LITTLE_ENDIAN
6010 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
6011 offsetof(struct bpf_perf_event_data
, sample_period
)),
6013 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
6014 offsetof(struct bpf_perf_event_data
, sample_period
) + 7),
6019 .prog_type
= BPF_PROG_TYPE_PERF_EVENT
,
6022 "check bpf_perf_event_data->sample_period half load permitted",
6024 BPF_MOV64_IMM(BPF_REG_0
, 0),
6025 #if __BYTE_ORDER == __LITTLE_ENDIAN
6026 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
6027 offsetof(struct bpf_perf_event_data
, sample_period
)),
6029 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
6030 offsetof(struct bpf_perf_event_data
, sample_period
) + 6),
6035 .prog_type
= BPF_PROG_TYPE_PERF_EVENT
,
6038 "check bpf_perf_event_data->sample_period word load permitted",
6040 BPF_MOV64_IMM(BPF_REG_0
, 0),
6041 #if __BYTE_ORDER == __LITTLE_ENDIAN
6042 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
6043 offsetof(struct bpf_perf_event_data
, sample_period
)),
6045 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
6046 offsetof(struct bpf_perf_event_data
, sample_period
) + 4),
6051 .prog_type
= BPF_PROG_TYPE_PERF_EVENT
,
6054 "check bpf_perf_event_data->sample_period dword load permitted",
6056 BPF_MOV64_IMM(BPF_REG_0
, 0),
6057 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
,
6058 offsetof(struct bpf_perf_event_data
, sample_period
)),
6062 .prog_type
= BPF_PROG_TYPE_PERF_EVENT
,
6065 "check skb->data half load not permitted",
6067 BPF_MOV64_IMM(BPF_REG_0
, 0),
6068 #if __BYTE_ORDER == __LITTLE_ENDIAN
6069 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
6070 offsetof(struct __sk_buff
, data
)),
6072 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
6073 offsetof(struct __sk_buff
, data
) + 2),
6078 .errstr
= "invalid bpf_context access",
6081 "check skb->tc_classid half load not permitted for lwt prog",
6083 BPF_MOV64_IMM(BPF_REG_0
, 0),
6084 #if __BYTE_ORDER == __LITTLE_ENDIAN
6085 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
6086 offsetof(struct __sk_buff
, tc_classid
)),
6088 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
6089 offsetof(struct __sk_buff
, tc_classid
) + 2),
6094 .errstr
= "invalid bpf_context access",
6095 .prog_type
= BPF_PROG_TYPE_LWT_IN
,
6098 "bounds checks mixing signed and unsigned, positive bounds",
6100 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6101 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6102 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6103 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6104 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6105 BPF_FUNC_map_lookup_elem
),
6106 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
6107 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6108 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6109 BPF_MOV64_IMM(BPF_REG_2
, 2),
6110 BPF_JMP_REG(BPF_JGE
, BPF_REG_2
, BPF_REG_1
, 3),
6111 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 4, 2),
6112 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6113 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6114 BPF_MOV64_IMM(BPF_REG_0
, 0),
6117 .fixup_map1
= { 3 },
6118 .errstr
= "R0 min value is negative",
6122 "bounds checks mixing signed and unsigned",
6124 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6125 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6126 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6127 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6128 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6129 BPF_FUNC_map_lookup_elem
),
6130 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
6131 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6132 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6133 BPF_MOV64_IMM(BPF_REG_2
, -1),
6134 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_2
, 3),
6135 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6136 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6137 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6138 BPF_MOV64_IMM(BPF_REG_0
, 0),
6141 .fixup_map1
= { 3 },
6142 .errstr
= "R0 min value is negative",
6146 "bounds checks mixing signed and unsigned, variant 2",
6148 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6149 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6150 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6151 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6152 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6153 BPF_FUNC_map_lookup_elem
),
6154 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
6155 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6156 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6157 BPF_MOV64_IMM(BPF_REG_2
, -1),
6158 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_2
, 5),
6159 BPF_MOV64_IMM(BPF_REG_8
, 0),
6160 BPF_ALU64_REG(BPF_ADD
, BPF_REG_8
, BPF_REG_1
),
6161 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_8
, 1, 2),
6162 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_8
),
6163 BPF_ST_MEM(BPF_B
, BPF_REG_8
, 0, 0),
6164 BPF_MOV64_IMM(BPF_REG_0
, 0),
6167 .fixup_map1
= { 3 },
6168 .errstr
= "R8 invalid mem access 'inv'",
6172 "bounds checks mixing signed and unsigned, variant 3",
6174 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6175 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6176 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6177 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6178 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6179 BPF_FUNC_map_lookup_elem
),
6180 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 8),
6181 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6182 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6183 BPF_MOV64_IMM(BPF_REG_2
, -1),
6184 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_2
, 4),
6185 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_1
),
6186 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_8
, 1, 2),
6187 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_8
),
6188 BPF_ST_MEM(BPF_B
, BPF_REG_8
, 0, 0),
6189 BPF_MOV64_IMM(BPF_REG_0
, 0),
6192 .fixup_map1
= { 3 },
6193 .errstr
= "R8 invalid mem access 'inv'",
6197 "bounds checks mixing signed and unsigned, variant 4",
6199 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6200 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6201 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6202 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6203 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6204 BPF_FUNC_map_lookup_elem
),
6205 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
6206 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6207 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6208 BPF_MOV64_IMM(BPF_REG_2
, 1),
6209 BPF_ALU64_REG(BPF_AND
, BPF_REG_1
, BPF_REG_2
),
6210 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6211 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6212 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6213 BPF_MOV64_IMM(BPF_REG_0
, 0),
6216 .fixup_map1
= { 3 },
6220 "bounds checks mixing signed and unsigned, variant 5",
6222 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6223 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6224 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6225 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6226 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6227 BPF_FUNC_map_lookup_elem
),
6228 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
6229 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6230 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6231 BPF_MOV64_IMM(BPF_REG_2
, -1),
6232 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_2
, 5),
6233 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 4),
6234 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 4),
6235 BPF_ALU64_REG(BPF_SUB
, BPF_REG_0
, BPF_REG_1
),
6236 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6237 BPF_MOV64_IMM(BPF_REG_0
, 0),
6240 .fixup_map1
= { 3 },
6241 .errstr
= "R0 min value is negative",
6245 "bounds checks mixing signed and unsigned, variant 6",
6247 BPF_MOV64_IMM(BPF_REG_2
, 0),
6248 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_10
),
6249 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_3
, -512),
6250 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6251 BPF_LDX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_10
, -16),
6252 BPF_MOV64_IMM(BPF_REG_6
, -1),
6253 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_6
, 5),
6254 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_4
, 1, 4),
6255 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 1),
6256 BPF_MOV64_IMM(BPF_REG_5
, 0),
6257 BPF_ST_MEM(BPF_H
, BPF_REG_10
, -512, 0),
6258 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6259 BPF_FUNC_skb_load_bytes
),
6260 BPF_MOV64_IMM(BPF_REG_0
, 0),
6263 .errstr
= "R4 min value is negative, either use unsigned",
6267 "bounds checks mixing signed and unsigned, variant 7",
6269 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6270 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6271 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6272 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6273 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6274 BPF_FUNC_map_lookup_elem
),
6275 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
6276 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6277 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6278 BPF_MOV64_IMM(BPF_REG_2
, 1024 * 1024 * 1024),
6279 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_2
, 3),
6280 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6281 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6282 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6283 BPF_MOV64_IMM(BPF_REG_0
, 0),
6286 .fixup_map1
= { 3 },
6290 "bounds checks mixing signed and unsigned, variant 8",
6292 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6293 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6294 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6295 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6296 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6297 BPF_FUNC_map_lookup_elem
),
6298 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
6299 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6300 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6301 BPF_MOV64_IMM(BPF_REG_2
, -1),
6302 BPF_JMP_REG(BPF_JGT
, BPF_REG_2
, BPF_REG_1
, 2),
6303 BPF_MOV64_IMM(BPF_REG_0
, 0),
6305 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6306 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6307 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6308 BPF_MOV64_IMM(BPF_REG_0
, 0),
6311 .fixup_map1
= { 3 },
6312 .errstr
= "R0 min value is negative",
6316 "bounds checks mixing signed and unsigned, variant 9",
6318 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6319 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6320 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6321 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6322 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6323 BPF_FUNC_map_lookup_elem
),
6324 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 10),
6325 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6326 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6327 BPF_LD_IMM64(BPF_REG_2
, -9223372036854775808ULL),
6328 BPF_JMP_REG(BPF_JGT
, BPF_REG_2
, BPF_REG_1
, 2),
6329 BPF_MOV64_IMM(BPF_REG_0
, 0),
6331 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6332 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6333 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6334 BPF_MOV64_IMM(BPF_REG_0
, 0),
6337 .fixup_map1
= { 3 },
6341 "bounds checks mixing signed and unsigned, variant 10",
6343 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6344 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6345 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6346 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6347 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6348 BPF_FUNC_map_lookup_elem
),
6349 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
6350 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6351 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6352 BPF_MOV64_IMM(BPF_REG_2
, 0),
6353 BPF_JMP_REG(BPF_JGT
, BPF_REG_2
, BPF_REG_1
, 2),
6354 BPF_MOV64_IMM(BPF_REG_0
, 0),
6356 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6357 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6358 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6359 BPF_MOV64_IMM(BPF_REG_0
, 0),
6362 .fixup_map1
= { 3 },
6363 .errstr
= "R0 min value is negative",
6367 "bounds checks mixing signed and unsigned, variant 11",
6369 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6370 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6371 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6372 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6373 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6374 BPF_FUNC_map_lookup_elem
),
6375 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
6376 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6377 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6378 BPF_MOV64_IMM(BPF_REG_2
, -1),
6379 BPF_JMP_REG(BPF_JGE
, BPF_REG_2
, BPF_REG_1
, 2),
6381 BPF_MOV64_IMM(BPF_REG_0
, 0),
6383 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6384 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6385 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6386 BPF_MOV64_IMM(BPF_REG_0
, 0),
6389 .fixup_map1
= { 3 },
6390 .errstr
= "R0 min value is negative",
6394 "bounds checks mixing signed and unsigned, variant 12",
6396 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6397 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6398 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6399 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6400 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6401 BPF_FUNC_map_lookup_elem
),
6402 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
6403 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6404 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6405 BPF_MOV64_IMM(BPF_REG_2
, -6),
6406 BPF_JMP_REG(BPF_JGE
, BPF_REG_2
, BPF_REG_1
, 2),
6407 BPF_MOV64_IMM(BPF_REG_0
, 0),
6409 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6410 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6411 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6412 BPF_MOV64_IMM(BPF_REG_0
, 0),
6415 .fixup_map1
= { 3 },
6416 .errstr
= "R0 min value is negative",
6420 "bounds checks mixing signed and unsigned, variant 13",
6422 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6423 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6424 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6425 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6426 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6427 BPF_FUNC_map_lookup_elem
),
6428 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
6429 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6430 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6431 BPF_MOV64_IMM(BPF_REG_2
, 2),
6432 BPF_JMP_REG(BPF_JGE
, BPF_REG_2
, BPF_REG_1
, 2),
6433 BPF_MOV64_IMM(BPF_REG_7
, 1),
6434 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_7
, 0, 2),
6435 BPF_MOV64_IMM(BPF_REG_0
, 0),
6437 BPF_ALU64_REG(BPF_ADD
, BPF_REG_7
, BPF_REG_1
),
6438 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_7
, 4, 2),
6439 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_7
),
6440 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6441 BPF_MOV64_IMM(BPF_REG_0
, 0),
6444 .fixup_map1
= { 3 },
6445 .errstr
= "R0 min value is negative",
6449 "bounds checks mixing signed and unsigned, variant 14",
6451 BPF_LDX_MEM(BPF_W
, BPF_REG_9
, BPF_REG_1
,
6452 offsetof(struct __sk_buff
, mark
)),
6453 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6454 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6455 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6456 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6457 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6458 BPF_FUNC_map_lookup_elem
),
6459 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 8),
6460 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6461 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6462 BPF_MOV64_IMM(BPF_REG_2
, -1),
6463 BPF_MOV64_IMM(BPF_REG_8
, 2),
6464 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_9
, 42, 6),
6465 BPF_JMP_REG(BPF_JSGT
, BPF_REG_8
, BPF_REG_1
, 3),
6466 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6467 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6468 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6469 BPF_MOV64_IMM(BPF_REG_0
, 0),
6471 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_2
, -3),
6472 BPF_JMP_IMM(BPF_JA
, 0, 0, -7),
6474 .fixup_map1
= { 4 },
6475 .errstr
= "R0 min value is negative",
6479 "bounds checks mixing signed and unsigned, variant 15",
6481 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6482 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6483 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6484 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6485 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6486 BPF_FUNC_map_lookup_elem
),
6487 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
6488 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6489 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6490 BPF_MOV64_IMM(BPF_REG_2
, -6),
6491 BPF_JMP_REG(BPF_JGE
, BPF_REG_2
, BPF_REG_1
, 2),
6492 BPF_MOV64_IMM(BPF_REG_0
, 0),
6494 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6495 BPF_JMP_IMM(BPF_JGT
, BPF_REG_0
, 1, 2),
6496 BPF_MOV64_IMM(BPF_REG_0
, 0),
6498 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6499 BPF_MOV64_IMM(BPF_REG_0
, 0),
6502 .fixup_map1
= { 3 },
6503 .errstr_unpriv
= "R0 pointer comparison prohibited",
6504 .errstr
= "R0 min value is negative",
6506 .result_unpriv
= REJECT
,
6509 "subtraction bounds (map value) variant 1",
6511 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6512 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6513 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6514 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6515 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6516 BPF_FUNC_map_lookup_elem
),
6517 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
6518 BPF_LDX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
6519 BPF_JMP_IMM(BPF_JGT
, BPF_REG_1
, 0xff, 7),
6520 BPF_LDX_MEM(BPF_B
, BPF_REG_3
, BPF_REG_0
, 1),
6521 BPF_JMP_IMM(BPF_JGT
, BPF_REG_3
, 0xff, 5),
6522 BPF_ALU64_REG(BPF_SUB
, BPF_REG_1
, BPF_REG_3
),
6523 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_1
, 56),
6524 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6525 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
6527 BPF_MOV64_IMM(BPF_REG_0
, 0),
6530 .fixup_map1
= { 3 },
6531 .errstr
= "R0 max value is outside of the array range",
6535 "subtraction bounds (map value) variant 2",
6537 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6538 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6539 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6540 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6541 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6542 BPF_FUNC_map_lookup_elem
),
6543 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 8),
6544 BPF_LDX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
6545 BPF_JMP_IMM(BPF_JGT
, BPF_REG_1
, 0xff, 6),
6546 BPF_LDX_MEM(BPF_B
, BPF_REG_3
, BPF_REG_0
, 1),
6547 BPF_JMP_IMM(BPF_JGT
, BPF_REG_3
, 0xff, 4),
6548 BPF_ALU64_REG(BPF_SUB
, BPF_REG_1
, BPF_REG_3
),
6549 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6550 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
6552 BPF_MOV64_IMM(BPF_REG_0
, 0),
6555 .fixup_map1
= { 3 },
6556 .errstr
= "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
6560 "variable-offset ctx access",
6562 /* Get an unknown value */
6563 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
, 0),
6564 /* Make it small and 4-byte aligned */
6565 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 4),
6566 /* add it to skb. We now have either &skb->len or
6567 * &skb->pkt_type, but we don't know which
6569 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_2
),
6570 /* dereference it */
6571 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, 0),
6574 .errstr
= "variable ctx access var_off=(0x0; 0x4)",
6576 .prog_type
= BPF_PROG_TYPE_LWT_IN
,
6579 "variable-offset stack access",
6581 /* Fill the top 8 bytes of the stack */
6582 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6583 /* Get an unknown value */
6584 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
, 0),
6585 /* Make it small and 4-byte aligned */
6586 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 4),
6587 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_2
, 8),
6588 /* add it to fp. We now have either fp-4 or fp-8, but
6589 * we don't know which
6591 BPF_ALU64_REG(BPF_ADD
, BPF_REG_2
, BPF_REG_10
),
6592 /* dereference it */
6593 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_2
, 0),
6596 .errstr
= "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
6598 .prog_type
= BPF_PROG_TYPE_LWT_IN
,
6601 "liveness pruning and write screening",
6603 /* Get an unknown value */
6604 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
, 0),
6605 /* branch conditions teach us nothing about R2 */
6606 BPF_JMP_IMM(BPF_JGE
, BPF_REG_2
, 0, 1),
6607 BPF_MOV64_IMM(BPF_REG_0
, 0),
6608 BPF_JMP_IMM(BPF_JGE
, BPF_REG_2
, 0, 1),
6609 BPF_MOV64_IMM(BPF_REG_0
, 0),
6612 .errstr
= "R0 !read_ok",
6614 .prog_type
= BPF_PROG_TYPE_LWT_IN
,
6617 "varlen_map_value_access pruning",
6619 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6620 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6621 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6622 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6623 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6624 BPF_FUNC_map_lookup_elem
),
6625 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 8),
6626 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, 0),
6627 BPF_MOV32_IMM(BPF_REG_2
, MAX_ENTRIES
),
6628 BPF_JMP_REG(BPF_JSGT
, BPF_REG_2
, BPF_REG_1
, 1),
6629 BPF_MOV32_IMM(BPF_REG_1
, 0),
6630 BPF_ALU32_IMM(BPF_LSH
, BPF_REG_1
, 2),
6631 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6632 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
6633 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
6634 offsetof(struct test_val
, foo
)),
6637 .fixup_map2
= { 3 },
6638 .errstr_unpriv
= "R0 leaks addr",
6639 .errstr
= "R0 unbounded memory access",
6640 .result_unpriv
= REJECT
,
6642 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
6645 "invalid 64-bit BPF_END",
6647 BPF_MOV32_IMM(BPF_REG_0
, 0),
6649 .code
= BPF_ALU64
| BPF_END
| BPF_TO_LE
,
6650 .dst_reg
= BPF_REG_0
,
6657 .errstr
= "BPF_END uses reserved fields",
6661 "arithmetic ops make PTR_TO_CTX unusable",
6663 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
,
6664 offsetof(struct __sk_buff
, data
) -
6665 offsetof(struct __sk_buff
, mark
)),
6666 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
6667 offsetof(struct __sk_buff
, mark
)),
6670 .errstr
= "dereference of modified ctx ptr R1 off=68+8, ctx+const is allowed, ctx+const+const is not",
6672 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
6676 static int probe_filter_length(const struct bpf_insn
*fp
)
6680 for (len
= MAX_INSNS
- 1; len
> 0; --len
)
6681 if (fp
[len
].code
!= 0 || fp
[len
].imm
!= 0)
6686 static int create_map(uint32_t size_value
, uint32_t max_elem
)
6690 fd
= bpf_create_map(BPF_MAP_TYPE_HASH
, sizeof(long long),
6691 size_value
, max_elem
, BPF_F_NO_PREALLOC
);
6693 printf("Failed to create hash map '%s'!\n", strerror(errno
));
6698 static int create_prog_array(void)
6702 fd
= bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY
, sizeof(int),
6705 printf("Failed to create prog array '%s'!\n", strerror(errno
));
6710 static int create_map_in_map(void)
6712 int inner_map_fd
, outer_map_fd
;
6714 inner_map_fd
= bpf_create_map(BPF_MAP_TYPE_ARRAY
, sizeof(int),
6716 if (inner_map_fd
< 0) {
6717 printf("Failed to create array '%s'!\n", strerror(errno
));
6718 return inner_map_fd
;
6721 outer_map_fd
= bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS
,
6722 sizeof(int), inner_map_fd
, 1, 0);
6723 if (outer_map_fd
< 0)
6724 printf("Failed to create array of maps '%s'!\n",
6727 close(inner_map_fd
);
6729 return outer_map_fd
;
6732 static char bpf_vlog
[32768];
6734 static void do_test_fixup(struct bpf_test
*test
, struct bpf_insn
*prog
,
6737 int *fixup_map1
= test
->fixup_map1
;
6738 int *fixup_map2
= test
->fixup_map2
;
6739 int *fixup_prog
= test
->fixup_prog
;
6740 int *fixup_map_in_map
= test
->fixup_map_in_map
;
6742 /* Allocating HTs with 1 elem is fine here, since we only test
6743 * for verifier and not do a runtime lookup, so the only thing
6744 * that really matters is value size in this case.
6747 map_fds
[0] = create_map(sizeof(long long), 1);
6749 prog
[*fixup_map1
].imm
= map_fds
[0];
6751 } while (*fixup_map1
);
6755 map_fds
[1] = create_map(sizeof(struct test_val
), 1);
6757 prog
[*fixup_map2
].imm
= map_fds
[1];
6759 } while (*fixup_map2
);
6763 map_fds
[2] = create_prog_array();
6765 prog
[*fixup_prog
].imm
= map_fds
[2];
6767 } while (*fixup_prog
);
6770 if (*fixup_map_in_map
) {
6771 map_fds
[3] = create_map_in_map();
6773 prog
[*fixup_map_in_map
].imm
= map_fds
[3];
6775 } while (*fixup_map_in_map
);
6779 static void do_test_single(struct bpf_test
*test
, bool unpriv
,
6780 int *passes
, int *errors
)
6782 int fd_prog
, expected_ret
, reject_from_alignment
;
6783 struct bpf_insn
*prog
= test
->insns
;
6784 int prog_len
= probe_filter_length(prog
);
6785 int prog_type
= test
->prog_type
;
6786 int map_fds
[MAX_NR_MAPS
];
6787 const char *expected_err
;
6790 for (i
= 0; i
< MAX_NR_MAPS
; i
++)
6793 do_test_fixup(test
, prog
, map_fds
);
6795 fd_prog
= bpf_verify_program(prog_type
? : BPF_PROG_TYPE_SOCKET_FILTER
,
6796 prog
, prog_len
, test
->flags
& F_LOAD_WITH_STRICT_ALIGNMENT
,
6797 "GPL", 0, bpf_vlog
, sizeof(bpf_vlog
), 1);
6799 expected_ret
= unpriv
&& test
->result_unpriv
!= UNDEF
?
6800 test
->result_unpriv
: test
->result
;
6801 expected_err
= unpriv
&& test
->errstr_unpriv
?
6802 test
->errstr_unpriv
: test
->errstr
;
6804 reject_from_alignment
= fd_prog
< 0 &&
6805 (test
->flags
& F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
) &&
6806 strstr(bpf_vlog
, "Unknown alignment.");
6807 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
6808 if (reject_from_alignment
) {
6809 printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
6814 if (expected_ret
== ACCEPT
) {
6815 if (fd_prog
< 0 && !reject_from_alignment
) {
6816 printf("FAIL\nFailed to load prog '%s'!\n",
6822 printf("FAIL\nUnexpected success to load!\n");
6825 if (!strstr(bpf_vlog
, expected_err
) && !reject_from_alignment
) {
6826 printf("FAIL\nUnexpected error message!\n");
6832 printf("OK%s\n", reject_from_alignment
?
6833 " (NOTE: reject due to unknown alignment)" : "");
6836 for (i
= 0; i
< MAX_NR_MAPS
; i
++)
6842 printf("%s", bpf_vlog
);
6846 static bool is_admin(void)
6849 cap_flag_value_t sysadmin
= CAP_CLEAR
;
6850 const cap_value_t cap_val
= CAP_SYS_ADMIN
;
6852 #ifdef CAP_IS_SUPPORTED
6853 if (!CAP_IS_SUPPORTED(CAP_SETFCAP
)) {
6854 perror("cap_get_flag");
6858 caps
= cap_get_proc();
6860 perror("cap_get_proc");
6863 if (cap_get_flag(caps
, cap_val
, CAP_EFFECTIVE
, &sysadmin
))
6864 perror("cap_get_flag");
6867 return (sysadmin
== CAP_SET
);
6870 static int set_admin(bool admin
)
6873 const cap_value_t cap_val
= CAP_SYS_ADMIN
;
6876 caps
= cap_get_proc();
6878 perror("cap_get_proc");
6881 if (cap_set_flag(caps
, CAP_EFFECTIVE
, 1, &cap_val
,
6882 admin
? CAP_SET
: CAP_CLEAR
)) {
6883 perror("cap_set_flag");
6886 if (cap_set_proc(caps
)) {
6887 perror("cap_set_proc");
6897 static int do_test(bool unpriv
, unsigned int from
, unsigned int to
)
6899 int i
, passes
= 0, errors
= 0;
6901 for (i
= from
; i
< to
; i
++) {
6902 struct bpf_test
*test
= &tests
[i
];
6904 /* Program types that are not supported by non-root we
6907 if (!test
->prog_type
) {
6910 printf("#%d/u %s ", i
, test
->descr
);
6911 do_test_single(test
, true, &passes
, &errors
);
6917 printf("#%d/p %s ", i
, test
->descr
);
6918 do_test_single(test
, false, &passes
, &errors
);
6922 printf("Summary: %d PASSED, %d FAILED\n", passes
, errors
);
6923 return errors
? EXIT_FAILURE
: EXIT_SUCCESS
;
6926 int main(int argc
, char **argv
)
6928 struct rlimit rinf
= { RLIM_INFINITY
, RLIM_INFINITY
};
6929 struct rlimit rlim
= { 1 << 20, 1 << 20 };
6930 unsigned int from
= 0, to
= ARRAY_SIZE(tests
);
6931 bool unpriv
= !is_admin();
6934 unsigned int l
= atoi(argv
[argc
- 2]);
6935 unsigned int u
= atoi(argv
[argc
- 1]);
6937 if (l
< to
&& u
< to
) {
6941 } else if (argc
== 2) {
6942 unsigned int t
= atoi(argv
[argc
- 1]);
6950 setrlimit(RLIMIT_MEMLOCK
, unpriv
? &rlim
: &rinf
);
6951 return do_test(unpriv
, from
, to
);