1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
12 #include <linux/kernel.h>
13 #include <linux/types.h>
14 #include <linux/slab.h>
15 #include <linux/bpf.h>
16 #include <linux/filter.h>
17 #include <net/netlink.h>
18 #include <linux/file.h>
19 #include <linux/vmalloc.h>
21 /* bpf_check() is a static code analyzer that walks eBPF program
22 * instruction by instruction and updates register/stack state.
23 * All paths of conditional branches are analyzed until 'bpf_exit' insn.
25 * The first pass is depth-first-search to check that the program is a DAG.
26 * It rejects the following programs:
27 * - larger than BPF_MAXINSNS insns
28 * - if loop is present (detected via back-edge)
29 * - unreachable insns exist (shouldn't be a forest. program = one function)
30 * - out of bounds or malformed jumps
31 * The second pass is all possible path descent from the 1st insn.
32 * Since it's analyzing all pathes through the program, the length of the
33 * analysis is limited to 32k insn, which may be hit even if total number of
34 * insn is less then 4K, but there are too many branches that change stack/regs.
35 * Number of 'branches to be analyzed' is limited to 1k
37 * On entry to each instruction, each register has a type, and the instruction
38 * changes the types of the registers depending on instruction semantics.
39 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
42 * All registers are 64-bit.
43 * R0 - return register
44 * R1-R5 argument passing registers
45 * R6-R9 callee saved registers
46 * R10 - frame pointer read-only
48 * At the start of BPF program the register R1 contains a pointer to bpf_context
49 * and has type PTR_TO_CTX.
51 * Verifier tracks arithmetic operations on pointers in case:
52 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
53 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
54 * 1st insn copies R10 (which has FRAME_PTR) type into R1
55 * and 2nd arithmetic instruction is pattern matched to recognize
56 * that it wants to construct a pointer to some element within stack.
57 * So after 2nd insn, the register R1 has type PTR_TO_STACK
58 * (and -20 constant is saved for further stack bounds checking).
59 * Meaning that this reg is a pointer to stack plus known immediate constant.
61 * Most of the time the registers have UNKNOWN_VALUE type, which
62 * means the register has some value, but it's not a valid pointer.
63 * (like pointer plus pointer becomes UNKNOWN_VALUE type)
65 * When verifier sees load or store instructions the type of base register
66 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, FRAME_PTR. These are three pointer
67 * types recognized by check_mem_access() function.
69 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
70 * and the range of [ptr, ptr + map's value_size) is accessible.
72 * registers used to pass values to function calls are checked against
73 * function argument constraints.
75 * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
76 * It means that the register type passed to this function must be
77 * PTR_TO_STACK and it will be used inside the function as
78 * 'pointer to map element key'
80 * For example the argument constraints for bpf_map_lookup_elem():
81 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
82 * .arg1_type = ARG_CONST_MAP_PTR,
83 * .arg2_type = ARG_PTR_TO_MAP_KEY,
85 * ret_type says that this function returns 'pointer to map elem value or null'
86 * function expects 1st argument to be a const pointer to 'struct bpf_map' and
87 * 2nd argument should be a pointer to stack, which will be used inside
88 * the helper function as a pointer to map element key.
90 * On the kernel side the helper function looks like:
91 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
93 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
94 * void *key = (void *) (unsigned long) r2;
97 * here kernel can access 'key' and 'map' pointers safely, knowing that
98 * [key, key + map->key_size) bytes are valid and were initialized on
99 * the stack of eBPF program.
102 * Corresponding eBPF program may look like:
103 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR
104 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
105 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP
106 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
107 * here verifier looks at prototype of map_lookup_elem() and sees:
108 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
109 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
111 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
112 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
113 * and were initialized prior to this call.
114 * If it's ok, then verifier allows this BPF_CALL insn and looks at
115 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
116 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
117 * returns ether pointer to map value or NULL.
119 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
120 * insn, the register holding that pointer in the true branch changes state to
121 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
122 * branch. See check_cond_jmp_op().
124 * After the call R0 is set to return type of the function and registers R1-R5
125 * are set to NOT_INIT to indicate that they are no longer readable.
128 /* types of values stored in eBPF registers */
130 NOT_INIT
= 0, /* nothing was written into register */
131 UNKNOWN_VALUE
, /* reg doesn't contain a valid pointer */
132 PTR_TO_CTX
, /* reg points to bpf_context */
133 CONST_PTR_TO_MAP
, /* reg points to struct bpf_map */
134 PTR_TO_MAP_VALUE
, /* reg points to map element value */
135 PTR_TO_MAP_VALUE_OR_NULL
,/* points to map elem value or NULL */
136 FRAME_PTR
, /* reg == frame_pointer */
137 PTR_TO_STACK
, /* reg == frame_pointer + imm */
138 CONST_IMM
, /* constant integer value */
142 enum bpf_reg_type type
;
144 /* valid when type == CONST_IMM | PTR_TO_STACK */
147 /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
148 * PTR_TO_MAP_VALUE_OR_NULL
150 struct bpf_map
*map_ptr
;
154 enum bpf_stack_slot_type
{
155 STACK_INVALID
, /* nothing was stored in this stack slot */
156 STACK_SPILL
, /* register spilled into stack */
157 STACK_MISC
/* BPF program wrote some data into this slot */
160 #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
162 /* state of the program:
163 * type of all registers and stack info
165 struct verifier_state
{
166 struct reg_state regs
[MAX_BPF_REG
];
167 u8 stack_slot_type
[MAX_BPF_STACK
];
168 struct reg_state spilled_regs
[MAX_BPF_STACK
/ BPF_REG_SIZE
];
171 /* linked list of verifier states used to prune search */
172 struct verifier_state_list
{
173 struct verifier_state state
;
174 struct verifier_state_list
*next
;
177 /* verifier_state + insn_idx are pushed to stack when branch is encountered */
178 struct verifier_stack_elem
{
179 /* verifer state is 'st'
180 * before processing instruction 'insn_idx'
181 * and after processing instruction 'prev_insn_idx'
183 struct verifier_state st
;
186 struct verifier_stack_elem
*next
;
189 #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
191 /* single container for all structs
192 * one verifier_env per bpf_check() call
194 struct verifier_env
{
195 struct bpf_prog
*prog
; /* eBPF program being verified */
196 struct verifier_stack_elem
*head
; /* stack of verifier states to be processed */
197 int stack_size
; /* number of states to be processed */
198 struct verifier_state cur_state
; /* current verifier state */
199 struct verifier_state_list
**explored_states
; /* search pruning optimization */
200 struct bpf_map
*used_maps
[MAX_USED_MAPS
]; /* array of map's used by eBPF program */
201 u32 used_map_cnt
; /* number of used maps */
202 bool allow_ptr_leaks
;
205 /* verbose verifier prints what it's seeing
206 * bpf_check() is called under lock, so no race to access these global vars
208 static u32 log_level
, log_size
, log_len
;
209 static char *log_buf
;
211 static DEFINE_MUTEX(bpf_verifier_lock
);
213 /* log_level controls verbosity level of eBPF verifier.
214 * verbose() is used to dump the verification trace to the log, so the user
215 * can figure out what's wrong with the program
217 static __printf(1, 2) void verbose(const char *fmt
, ...)
221 if (log_level
== 0 || log_len
>= log_size
- 1)
225 log_len
+= vscnprintf(log_buf
+ log_len
, log_size
- log_len
, fmt
, args
);
229 /* string representation of 'enum bpf_reg_type' */
230 static const char * const reg_type_str
[] = {
232 [UNKNOWN_VALUE
] = "inv",
233 [PTR_TO_CTX
] = "ctx",
234 [CONST_PTR_TO_MAP
] = "map_ptr",
235 [PTR_TO_MAP_VALUE
] = "map_value",
236 [PTR_TO_MAP_VALUE_OR_NULL
] = "map_value_or_null",
238 [PTR_TO_STACK
] = "fp",
242 static void print_verifier_state(struct verifier_env
*env
)
247 for (i
= 0; i
< MAX_BPF_REG
; i
++) {
248 t
= env
->cur_state
.regs
[i
].type
;
251 verbose(" R%d=%s", i
, reg_type_str
[t
]);
252 if (t
== CONST_IMM
|| t
== PTR_TO_STACK
)
253 verbose("%d", env
->cur_state
.regs
[i
].imm
);
254 else if (t
== CONST_PTR_TO_MAP
|| t
== PTR_TO_MAP_VALUE
||
255 t
== PTR_TO_MAP_VALUE_OR_NULL
)
256 verbose("(ks=%d,vs=%d)",
257 env
->cur_state
.regs
[i
].map_ptr
->key_size
,
258 env
->cur_state
.regs
[i
].map_ptr
->value_size
);
260 for (i
= 0; i
< MAX_BPF_STACK
; i
+= BPF_REG_SIZE
) {
261 if (env
->cur_state
.stack_slot_type
[i
] == STACK_SPILL
)
262 verbose(" fp%d=%s", -MAX_BPF_STACK
+ i
,
263 reg_type_str
[env
->cur_state
.spilled_regs
[i
/ BPF_REG_SIZE
].type
]);
268 static const char *const bpf_class_string
[] = {
276 [BPF_ALU64
] = "alu64",
279 static const char *const bpf_alu_string
[16] = {
280 [BPF_ADD
>> 4] = "+=",
281 [BPF_SUB
>> 4] = "-=",
282 [BPF_MUL
>> 4] = "*=",
283 [BPF_DIV
>> 4] = "/=",
284 [BPF_OR
>> 4] = "|=",
285 [BPF_AND
>> 4] = "&=",
286 [BPF_LSH
>> 4] = "<<=",
287 [BPF_RSH
>> 4] = ">>=",
288 [BPF_NEG
>> 4] = "neg",
289 [BPF_MOD
>> 4] = "%=",
290 [BPF_XOR
>> 4] = "^=",
291 [BPF_MOV
>> 4] = "=",
292 [BPF_ARSH
>> 4] = "s>>=",
293 [BPF_END
>> 4] = "endian",
296 static const char *const bpf_ldst_string
[] = {
297 [BPF_W
>> 3] = "u32",
298 [BPF_H
>> 3] = "u16",
300 [BPF_DW
>> 3] = "u64",
303 static const char *const bpf_jmp_string
[16] = {
304 [BPF_JA
>> 4] = "jmp",
305 [BPF_JEQ
>> 4] = "==",
306 [BPF_JGT
>> 4] = ">",
307 [BPF_JGE
>> 4] = ">=",
308 [BPF_JSET
>> 4] = "&",
309 [BPF_JNE
>> 4] = "!=",
310 [BPF_JSGT
>> 4] = "s>",
311 [BPF_JSGE
>> 4] = "s>=",
312 [BPF_CALL
>> 4] = "call",
313 [BPF_EXIT
>> 4] = "exit",
316 static void print_bpf_insn(struct bpf_insn
*insn
)
318 u8
class = BPF_CLASS(insn
->code
);
320 if (class == BPF_ALU
|| class == BPF_ALU64
) {
321 if (BPF_SRC(insn
->code
) == BPF_X
)
322 verbose("(%02x) %sr%d %s %sr%d\n",
323 insn
->code
, class == BPF_ALU
? "(u32) " : "",
325 bpf_alu_string
[BPF_OP(insn
->code
) >> 4],
326 class == BPF_ALU
? "(u32) " : "",
329 verbose("(%02x) %sr%d %s %s%d\n",
330 insn
->code
, class == BPF_ALU
? "(u32) " : "",
332 bpf_alu_string
[BPF_OP(insn
->code
) >> 4],
333 class == BPF_ALU
? "(u32) " : "",
335 } else if (class == BPF_STX
) {
336 if (BPF_MODE(insn
->code
) == BPF_MEM
)
337 verbose("(%02x) *(%s *)(r%d %+d) = r%d\n",
339 bpf_ldst_string
[BPF_SIZE(insn
->code
) >> 3],
341 insn
->off
, insn
->src_reg
);
342 else if (BPF_MODE(insn
->code
) == BPF_XADD
)
343 verbose("(%02x) lock *(%s *)(r%d %+d) += r%d\n",
345 bpf_ldst_string
[BPF_SIZE(insn
->code
) >> 3],
346 insn
->dst_reg
, insn
->off
,
349 verbose("BUG_%02x\n", insn
->code
);
350 } else if (class == BPF_ST
) {
351 if (BPF_MODE(insn
->code
) != BPF_MEM
) {
352 verbose("BUG_st_%02x\n", insn
->code
);
355 verbose("(%02x) *(%s *)(r%d %+d) = %d\n",
357 bpf_ldst_string
[BPF_SIZE(insn
->code
) >> 3],
359 insn
->off
, insn
->imm
);
360 } else if (class == BPF_LDX
) {
361 if (BPF_MODE(insn
->code
) != BPF_MEM
) {
362 verbose("BUG_ldx_%02x\n", insn
->code
);
365 verbose("(%02x) r%d = *(%s *)(r%d %+d)\n",
366 insn
->code
, insn
->dst_reg
,
367 bpf_ldst_string
[BPF_SIZE(insn
->code
) >> 3],
368 insn
->src_reg
, insn
->off
);
369 } else if (class == BPF_LD
) {
370 if (BPF_MODE(insn
->code
) == BPF_ABS
) {
371 verbose("(%02x) r0 = *(%s *)skb[%d]\n",
373 bpf_ldst_string
[BPF_SIZE(insn
->code
) >> 3],
375 } else if (BPF_MODE(insn
->code
) == BPF_IND
) {
376 verbose("(%02x) r0 = *(%s *)skb[r%d + %d]\n",
378 bpf_ldst_string
[BPF_SIZE(insn
->code
) >> 3],
379 insn
->src_reg
, insn
->imm
);
380 } else if (BPF_MODE(insn
->code
) == BPF_IMM
) {
381 verbose("(%02x) r%d = 0x%x\n",
382 insn
->code
, insn
->dst_reg
, insn
->imm
);
384 verbose("BUG_ld_%02x\n", insn
->code
);
387 } else if (class == BPF_JMP
) {
388 u8 opcode
= BPF_OP(insn
->code
);
390 if (opcode
== BPF_CALL
) {
391 verbose("(%02x) call %d\n", insn
->code
, insn
->imm
);
392 } else if (insn
->code
== (BPF_JMP
| BPF_JA
)) {
393 verbose("(%02x) goto pc%+d\n",
394 insn
->code
, insn
->off
);
395 } else if (insn
->code
== (BPF_JMP
| BPF_EXIT
)) {
396 verbose("(%02x) exit\n", insn
->code
);
397 } else if (BPF_SRC(insn
->code
) == BPF_X
) {
398 verbose("(%02x) if r%d %s r%d goto pc%+d\n",
399 insn
->code
, insn
->dst_reg
,
400 bpf_jmp_string
[BPF_OP(insn
->code
) >> 4],
401 insn
->src_reg
, insn
->off
);
403 verbose("(%02x) if r%d %s 0x%x goto pc%+d\n",
404 insn
->code
, insn
->dst_reg
,
405 bpf_jmp_string
[BPF_OP(insn
->code
) >> 4],
406 insn
->imm
, insn
->off
);
409 verbose("(%02x) %s\n", insn
->code
, bpf_class_string
[class]);
413 static int pop_stack(struct verifier_env
*env
, int *prev_insn_idx
)
415 struct verifier_stack_elem
*elem
;
418 if (env
->head
== NULL
)
421 memcpy(&env
->cur_state
, &env
->head
->st
, sizeof(env
->cur_state
));
422 insn_idx
= env
->head
->insn_idx
;
424 *prev_insn_idx
= env
->head
->prev_insn_idx
;
425 elem
= env
->head
->next
;
432 static struct verifier_state
*push_stack(struct verifier_env
*env
, int insn_idx
,
435 struct verifier_stack_elem
*elem
;
437 elem
= kmalloc(sizeof(struct verifier_stack_elem
), GFP_KERNEL
);
441 memcpy(&elem
->st
, &env
->cur_state
, sizeof(env
->cur_state
));
442 elem
->insn_idx
= insn_idx
;
443 elem
->prev_insn_idx
= prev_insn_idx
;
444 elem
->next
= env
->head
;
447 if (env
->stack_size
> 1024) {
448 verbose("BPF program is too complex\n");
453 /* pop all elements and return */
454 while (pop_stack(env
, NULL
) >= 0);
458 #define CALLER_SAVED_REGS 6
459 static const int caller_saved
[CALLER_SAVED_REGS
] = {
460 BPF_REG_0
, BPF_REG_1
, BPF_REG_2
, BPF_REG_3
, BPF_REG_4
, BPF_REG_5
463 static void init_reg_state(struct reg_state
*regs
)
467 for (i
= 0; i
< MAX_BPF_REG
; i
++) {
468 regs
[i
].type
= NOT_INIT
;
470 regs
[i
].map_ptr
= NULL
;
474 regs
[BPF_REG_FP
].type
= FRAME_PTR
;
476 /* 1st arg to a function */
477 regs
[BPF_REG_1
].type
= PTR_TO_CTX
;
480 static void mark_reg_unknown_value(struct reg_state
*regs
, u32 regno
)
482 BUG_ON(regno
>= MAX_BPF_REG
);
483 regs
[regno
].type
= UNKNOWN_VALUE
;
485 regs
[regno
].map_ptr
= NULL
;
489 SRC_OP
, /* register is used as source operand */
490 DST_OP
, /* register is used as destination operand */
491 DST_OP_NO_MARK
/* same as above, check only, don't mark */
494 static int check_reg_arg(struct reg_state
*regs
, u32 regno
,
497 if (regno
>= MAX_BPF_REG
) {
498 verbose("R%d is invalid\n", regno
);
503 /* check whether register used as source operand can be read */
504 if (regs
[regno
].type
== NOT_INIT
) {
505 verbose("R%d !read_ok\n", regno
);
509 /* check whether register used as dest operand can be written to */
510 if (regno
== BPF_REG_FP
) {
511 verbose("frame pointer is read only\n");
515 mark_reg_unknown_value(regs
, regno
);
520 static int bpf_size_to_bytes(int bpf_size
)
522 if (bpf_size
== BPF_W
)
524 else if (bpf_size
== BPF_H
)
526 else if (bpf_size
== BPF_B
)
528 else if (bpf_size
== BPF_DW
)
534 static bool is_spillable_regtype(enum bpf_reg_type type
)
537 case PTR_TO_MAP_VALUE
:
538 case PTR_TO_MAP_VALUE_OR_NULL
:
542 case CONST_PTR_TO_MAP
:
549 /* check_stack_read/write functions track spill/fill of registers,
550 * stack boundary and alignment are checked in check_mem_access()
552 static int check_stack_write(struct verifier_state
*state
, int off
, int size
,
556 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
557 * so it's aligned access and [off, off + size) are within stack limits
560 if (value_regno
>= 0 &&
561 is_spillable_regtype(state
->regs
[value_regno
].type
)) {
563 /* register containing pointer is being spilled into stack */
564 if (size
!= BPF_REG_SIZE
) {
565 verbose("invalid size of register spill\n");
569 /* save register state */
570 state
->spilled_regs
[(MAX_BPF_STACK
+ off
) / BPF_REG_SIZE
] =
571 state
->regs
[value_regno
];
573 for (i
= 0; i
< BPF_REG_SIZE
; i
++)
574 state
->stack_slot_type
[MAX_BPF_STACK
+ off
+ i
] = STACK_SPILL
;
576 /* regular write of data into stack */
577 state
->spilled_regs
[(MAX_BPF_STACK
+ off
) / BPF_REG_SIZE
] =
578 (struct reg_state
) {};
580 for (i
= 0; i
< size
; i
++)
581 state
->stack_slot_type
[MAX_BPF_STACK
+ off
+ i
] = STACK_MISC
;
586 static int check_stack_read(struct verifier_state
*state
, int off
, int size
,
592 slot_type
= &state
->stack_slot_type
[MAX_BPF_STACK
+ off
];
594 if (slot_type
[0] == STACK_SPILL
) {
595 if (size
!= BPF_REG_SIZE
) {
596 verbose("invalid size of register spill\n");
599 for (i
= 1; i
< BPF_REG_SIZE
; i
++) {
600 if (slot_type
[i
] != STACK_SPILL
) {
601 verbose("corrupted spill memory\n");
606 if (value_regno
>= 0)
607 /* restore register state from stack */
608 state
->regs
[value_regno
] =
609 state
->spilled_regs
[(MAX_BPF_STACK
+ off
) / BPF_REG_SIZE
];
612 for (i
= 0; i
< size
; i
++) {
613 if (slot_type
[i
] != STACK_MISC
) {
614 verbose("invalid read from stack off %d+%d size %d\n",
619 if (value_regno
>= 0)
620 /* have read misc data from the stack */
621 mark_reg_unknown_value(state
->regs
, value_regno
);
626 /* check read/write into map element returned by bpf_map_lookup_elem() */
627 static int check_map_access(struct verifier_env
*env
, u32 regno
, int off
,
630 struct bpf_map
*map
= env
->cur_state
.regs
[regno
].map_ptr
;
632 if (off
< 0 || off
+ size
> map
->value_size
) {
633 verbose("invalid access to map value, value_size=%d off=%d size=%d\n",
634 map
->value_size
, off
, size
);
640 /* check access to 'struct bpf_context' fields */
641 static int check_ctx_access(struct verifier_env
*env
, int off
, int size
,
642 enum bpf_access_type t
)
644 if (env
->prog
->aux
->ops
->is_valid_access
&&
645 env
->prog
->aux
->ops
->is_valid_access(off
, size
, t
))
648 verbose("invalid bpf_context access off=%d size=%d\n", off
, size
);
652 static bool is_pointer_value(struct verifier_env
*env
, int regno
)
654 if (env
->allow_ptr_leaks
)
657 switch (env
->cur_state
.regs
[regno
].type
) {
666 /* check whether memory at (regno + off) is accessible for t = (read | write)
667 * if t==write, value_regno is a register which value is stored into memory
668 * if t==read, value_regno is a register which will receive the value from memory
669 * if t==write && value_regno==-1, some unknown value is stored into memory
670 * if t==read && value_regno==-1, don't care what we read from memory
672 static int check_mem_access(struct verifier_env
*env
, u32 regno
, int off
,
673 int bpf_size
, enum bpf_access_type t
,
676 struct verifier_state
*state
= &env
->cur_state
;
679 if (state
->regs
[regno
].type
== PTR_TO_STACK
)
680 off
+= state
->regs
[regno
].imm
;
682 size
= bpf_size_to_bytes(bpf_size
);
686 if (off
% size
!= 0) {
687 verbose("misaligned access off %d size %d\n", off
, size
);
691 if (state
->regs
[regno
].type
== PTR_TO_MAP_VALUE
) {
692 if (t
== BPF_WRITE
&& value_regno
>= 0 &&
693 is_pointer_value(env
, value_regno
)) {
694 verbose("R%d leaks addr into map\n", value_regno
);
697 err
= check_map_access(env
, regno
, off
, size
);
698 if (!err
&& t
== BPF_READ
&& value_regno
>= 0)
699 mark_reg_unknown_value(state
->regs
, value_regno
);
701 } else if (state
->regs
[regno
].type
== PTR_TO_CTX
) {
702 if (t
== BPF_WRITE
&& value_regno
>= 0 &&
703 is_pointer_value(env
, value_regno
)) {
704 verbose("R%d leaks addr into ctx\n", value_regno
);
707 err
= check_ctx_access(env
, off
, size
, t
);
708 if (!err
&& t
== BPF_READ
&& value_regno
>= 0)
709 mark_reg_unknown_value(state
->regs
, value_regno
);
711 } else if (state
->regs
[regno
].type
== FRAME_PTR
||
712 state
->regs
[regno
].type
== PTR_TO_STACK
) {
713 if (off
>= 0 || off
< -MAX_BPF_STACK
) {
714 verbose("invalid stack off=%d size=%d\n", off
, size
);
717 if (t
== BPF_WRITE
) {
718 if (!env
->allow_ptr_leaks
&&
719 state
->stack_slot_type
[MAX_BPF_STACK
+ off
] == STACK_SPILL
&&
720 size
!= BPF_REG_SIZE
) {
721 verbose("attempt to corrupt spilled pointer on stack\n");
724 err
= check_stack_write(state
, off
, size
, value_regno
);
726 err
= check_stack_read(state
, off
, size
, value_regno
);
729 verbose("R%d invalid mem access '%s'\n",
730 regno
, reg_type_str
[state
->regs
[regno
].type
]);
736 static int check_xadd(struct verifier_env
*env
, struct bpf_insn
*insn
)
738 struct reg_state
*regs
= env
->cur_state
.regs
;
741 if ((BPF_SIZE(insn
->code
) != BPF_W
&& BPF_SIZE(insn
->code
) != BPF_DW
) ||
743 verbose("BPF_XADD uses reserved fields\n");
747 /* check src1 operand */
748 err
= check_reg_arg(regs
, insn
->src_reg
, SRC_OP
);
752 /* check src2 operand */
753 err
= check_reg_arg(regs
, insn
->dst_reg
, SRC_OP
);
757 if (is_pointer_value(env
, insn
->src_reg
)) {
758 verbose("R%d leaks addr into mem\n", insn
->src_reg
);
762 /* check whether atomic_add can read the memory */
763 err
= check_mem_access(env
, insn
->dst_reg
, insn
->off
,
764 BPF_SIZE(insn
->code
), BPF_READ
, -1);
768 /* check whether atomic_add can write into the same memory */
769 return check_mem_access(env
, insn
->dst_reg
, insn
->off
,
770 BPF_SIZE(insn
->code
), BPF_WRITE
, -1);
773 /* when register 'regno' is passed into function that will read 'access_size'
774 * bytes from that pointer, make sure that it's within stack boundary
775 * and all elements of stack are initialized
777 static int check_stack_boundary(struct verifier_env
*env
,
778 int regno
, int access_size
)
780 struct verifier_state
*state
= &env
->cur_state
;
781 struct reg_state
*regs
= state
->regs
;
784 if (regs
[regno
].type
!= PTR_TO_STACK
)
787 off
= regs
[regno
].imm
;
788 if (off
>= 0 || off
< -MAX_BPF_STACK
|| off
+ access_size
> 0 ||
790 verbose("invalid stack type R%d off=%d access_size=%d\n",
791 regno
, off
, access_size
);
795 for (i
= 0; i
< access_size
; i
++) {
796 if (state
->stack_slot_type
[MAX_BPF_STACK
+ off
+ i
] != STACK_MISC
) {
797 verbose("invalid indirect read from stack off %d+%d size %d\n",
798 off
, i
, access_size
);
805 static int check_func_arg(struct verifier_env
*env
, u32 regno
,
806 enum bpf_arg_type arg_type
, struct bpf_map
**mapp
)
808 struct reg_state
*reg
= env
->cur_state
.regs
+ regno
;
809 enum bpf_reg_type expected_type
;
812 if (arg_type
== ARG_DONTCARE
)
815 if (reg
->type
== NOT_INIT
) {
816 verbose("R%d !read_ok\n", regno
);
820 if (arg_type
== ARG_ANYTHING
) {
821 if (is_pointer_value(env
, regno
)) {
822 verbose("R%d leaks addr into helper function\n", regno
);
828 if (arg_type
== ARG_PTR_TO_STACK
|| arg_type
== ARG_PTR_TO_MAP_KEY
||
829 arg_type
== ARG_PTR_TO_MAP_VALUE
) {
830 expected_type
= PTR_TO_STACK
;
831 } else if (arg_type
== ARG_CONST_STACK_SIZE
) {
832 expected_type
= CONST_IMM
;
833 } else if (arg_type
== ARG_CONST_MAP_PTR
) {
834 expected_type
= CONST_PTR_TO_MAP
;
835 } else if (arg_type
== ARG_PTR_TO_CTX
) {
836 expected_type
= PTR_TO_CTX
;
838 verbose("unsupported arg_type %d\n", arg_type
);
842 if (reg
->type
!= expected_type
) {
843 verbose("R%d type=%s expected=%s\n", regno
,
844 reg_type_str
[reg
->type
], reg_type_str
[expected_type
]);
848 if (arg_type
== ARG_CONST_MAP_PTR
) {
849 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */
850 *mapp
= reg
->map_ptr
;
852 } else if (arg_type
== ARG_PTR_TO_MAP_KEY
) {
853 /* bpf_map_xxx(..., map_ptr, ..., key) call:
854 * check that [key, key + map->key_size) are within
855 * stack limits and initialized
858 /* in function declaration map_ptr must come before
859 * map_key, so that it's verified and known before
860 * we have to check map_key here. Otherwise it means
861 * that kernel subsystem misconfigured verifier
863 verbose("invalid map_ptr to access map->key\n");
866 err
= check_stack_boundary(env
, regno
, (*mapp
)->key_size
);
868 } else if (arg_type
== ARG_PTR_TO_MAP_VALUE
) {
869 /* bpf_map_xxx(..., map_ptr, ..., value) call:
870 * check [value, value + map->value_size) validity
873 /* kernel subsystem misconfigured verifier */
874 verbose("invalid map_ptr to access map->value\n");
877 err
= check_stack_boundary(env
, regno
, (*mapp
)->value_size
);
879 } else if (arg_type
== ARG_CONST_STACK_SIZE
) {
880 /* bpf_xxx(..., buf, len) call will access 'len' bytes
881 * from stack pointer 'buf'. Check it
882 * note: regno == len, regno - 1 == buf
885 /* kernel subsystem misconfigured verifier */
886 verbose("ARG_CONST_STACK_SIZE cannot be first argument\n");
889 err
= check_stack_boundary(env
, regno
- 1, reg
->imm
);
895 static int check_map_func_compatibility(struct bpf_map
*map
, int func_id
)
900 /* We need a two way check, first is from map perspective ... */
901 switch (map
->map_type
) {
902 case BPF_MAP_TYPE_PROG_ARRAY
:
903 if (func_id
!= BPF_FUNC_tail_call
)
906 case BPF_MAP_TYPE_PERF_EVENT_ARRAY
:
907 if (func_id
!= BPF_FUNC_perf_event_read
&&
908 func_id
!= BPF_FUNC_perf_event_output
)
915 /* ... and second from the function itself. */
917 case BPF_FUNC_tail_call
:
918 if (map
->map_type
!= BPF_MAP_TYPE_PROG_ARRAY
)
921 case BPF_FUNC_perf_event_read
:
922 case BPF_FUNC_perf_event_output
:
923 if (map
->map_type
!= BPF_MAP_TYPE_PERF_EVENT_ARRAY
)
932 verbose("cannot pass map_type %d into func %d\n",
933 map
->map_type
, func_id
);
937 static int check_call(struct verifier_env
*env
, int func_id
)
939 struct verifier_state
*state
= &env
->cur_state
;
940 const struct bpf_func_proto
*fn
= NULL
;
941 struct reg_state
*regs
= state
->regs
;
942 struct bpf_map
*map
= NULL
;
943 struct reg_state
*reg
;
946 /* find function prototype */
947 if (func_id
< 0 || func_id
>= __BPF_FUNC_MAX_ID
) {
948 verbose("invalid func %d\n", func_id
);
952 if (env
->prog
->aux
->ops
->get_func_proto
)
953 fn
= env
->prog
->aux
->ops
->get_func_proto(func_id
);
956 verbose("unknown func %d\n", func_id
);
960 /* eBPF programs must be GPL compatible to use GPL-ed functions */
961 if (!env
->prog
->gpl_compatible
&& fn
->gpl_only
) {
962 verbose("cannot call GPL only function from proprietary program\n");
967 err
= check_func_arg(env
, BPF_REG_1
, fn
->arg1_type
, &map
);
970 err
= check_func_arg(env
, BPF_REG_2
, fn
->arg2_type
, &map
);
973 err
= check_func_arg(env
, BPF_REG_3
, fn
->arg3_type
, &map
);
976 err
= check_func_arg(env
, BPF_REG_4
, fn
->arg4_type
, &map
);
979 err
= check_func_arg(env
, BPF_REG_5
, fn
->arg5_type
, &map
);
983 /* reset caller saved regs */
984 for (i
= 0; i
< CALLER_SAVED_REGS
; i
++) {
985 reg
= regs
+ caller_saved
[i
];
986 reg
->type
= NOT_INIT
;
990 /* update return register */
991 if (fn
->ret_type
== RET_INTEGER
) {
992 regs
[BPF_REG_0
].type
= UNKNOWN_VALUE
;
993 } else if (fn
->ret_type
== RET_VOID
) {
994 regs
[BPF_REG_0
].type
= NOT_INIT
;
995 } else if (fn
->ret_type
== RET_PTR_TO_MAP_VALUE_OR_NULL
) {
996 regs
[BPF_REG_0
].type
= PTR_TO_MAP_VALUE_OR_NULL
;
997 /* remember map_ptr, so that check_map_access()
998 * can check 'value_size' boundary of memory access
999 * to map element returned from bpf_map_lookup_elem()
1002 verbose("kernel subsystem misconfigured verifier\n");
1005 regs
[BPF_REG_0
].map_ptr
= map
;
1007 verbose("unknown return type %d of func %d\n",
1008 fn
->ret_type
, func_id
);
1012 err
= check_map_func_compatibility(map
, func_id
);
1019 /* check validity of 32-bit and 64-bit arithmetic operations */
1020 static int check_alu_op(struct verifier_env
*env
, struct bpf_insn
*insn
)
1022 struct reg_state
*regs
= env
->cur_state
.regs
;
1023 u8 opcode
= BPF_OP(insn
->code
);
1026 if (opcode
== BPF_END
|| opcode
== BPF_NEG
) {
1027 if (opcode
== BPF_NEG
) {
1028 if (BPF_SRC(insn
->code
) != 0 ||
1029 insn
->src_reg
!= BPF_REG_0
||
1030 insn
->off
!= 0 || insn
->imm
!= 0) {
1031 verbose("BPF_NEG uses reserved fields\n");
1035 if (insn
->src_reg
!= BPF_REG_0
|| insn
->off
!= 0 ||
1036 (insn
->imm
!= 16 && insn
->imm
!= 32 && insn
->imm
!= 64)) {
1037 verbose("BPF_END uses reserved fields\n");
1042 /* check src operand */
1043 err
= check_reg_arg(regs
, insn
->dst_reg
, SRC_OP
);
1047 if (is_pointer_value(env
, insn
->dst_reg
)) {
1048 verbose("R%d pointer arithmetic prohibited\n",
1053 /* check dest operand */
1054 err
= check_reg_arg(regs
, insn
->dst_reg
, DST_OP
);
1058 } else if (opcode
== BPF_MOV
) {
1060 if (BPF_SRC(insn
->code
) == BPF_X
) {
1061 if (insn
->imm
!= 0 || insn
->off
!= 0) {
1062 verbose("BPF_MOV uses reserved fields\n");
1066 /* check src operand */
1067 err
= check_reg_arg(regs
, insn
->src_reg
, SRC_OP
);
1071 if (insn
->src_reg
!= BPF_REG_0
|| insn
->off
!= 0) {
1072 verbose("BPF_MOV uses reserved fields\n");
1077 /* check dest operand */
1078 err
= check_reg_arg(regs
, insn
->dst_reg
, DST_OP
);
1082 if (BPF_SRC(insn
->code
) == BPF_X
) {
1083 if (BPF_CLASS(insn
->code
) == BPF_ALU64
) {
1085 * copy register state to dest reg
1087 regs
[insn
->dst_reg
] = regs
[insn
->src_reg
];
1089 if (is_pointer_value(env
, insn
->src_reg
)) {
1090 verbose("R%d partial copy of pointer\n",
1094 regs
[insn
->dst_reg
].type
= UNKNOWN_VALUE
;
1095 regs
[insn
->dst_reg
].map_ptr
= NULL
;
1099 * remember the value we stored into this reg
1101 regs
[insn
->dst_reg
].type
= CONST_IMM
;
1102 regs
[insn
->dst_reg
].imm
= insn
->imm
;
1105 } else if (opcode
> BPF_END
) {
1106 verbose("invalid BPF_ALU opcode %x\n", opcode
);
1109 } else { /* all other ALU ops: and, sub, xor, add, ... */
1111 bool stack_relative
= false;
1113 if (BPF_SRC(insn
->code
) == BPF_X
) {
1114 if (insn
->imm
!= 0 || insn
->off
!= 0) {
1115 verbose("BPF_ALU uses reserved fields\n");
1118 /* check src1 operand */
1119 err
= check_reg_arg(regs
, insn
->src_reg
, SRC_OP
);
1123 if (insn
->src_reg
!= BPF_REG_0
|| insn
->off
!= 0) {
1124 verbose("BPF_ALU uses reserved fields\n");
1129 /* check src2 operand */
1130 err
= check_reg_arg(regs
, insn
->dst_reg
, SRC_OP
);
1134 if ((opcode
== BPF_MOD
|| opcode
== BPF_DIV
) &&
1135 BPF_SRC(insn
->code
) == BPF_K
&& insn
->imm
== 0) {
1136 verbose("div by zero\n");
1140 if ((opcode
== BPF_LSH
|| opcode
== BPF_RSH
||
1141 opcode
== BPF_ARSH
) && BPF_SRC(insn
->code
) == BPF_K
) {
1142 int size
= BPF_CLASS(insn
->code
) == BPF_ALU64
? 64 : 32;
1144 if (insn
->imm
< 0 || insn
->imm
>= size
) {
1145 verbose("invalid shift %d\n", insn
->imm
);
1150 /* pattern match 'bpf_add Rx, imm' instruction */
1151 if (opcode
== BPF_ADD
&& BPF_CLASS(insn
->code
) == BPF_ALU64
&&
1152 regs
[insn
->dst_reg
].type
== FRAME_PTR
&&
1153 BPF_SRC(insn
->code
) == BPF_K
) {
1154 stack_relative
= true;
1155 } else if (is_pointer_value(env
, insn
->dst_reg
)) {
1156 verbose("R%d pointer arithmetic prohibited\n",
1159 } else if (BPF_SRC(insn
->code
) == BPF_X
&&
1160 is_pointer_value(env
, insn
->src_reg
)) {
1161 verbose("R%d pointer arithmetic prohibited\n",
1166 /* check dest operand */
1167 err
= check_reg_arg(regs
, insn
->dst_reg
, DST_OP
);
1171 if (stack_relative
) {
1172 regs
[insn
->dst_reg
].type
= PTR_TO_STACK
;
1173 regs
[insn
->dst_reg
].imm
= insn
->imm
;
1180 static int check_cond_jmp_op(struct verifier_env
*env
,
1181 struct bpf_insn
*insn
, int *insn_idx
)
1183 struct reg_state
*regs
= env
->cur_state
.regs
;
1184 struct verifier_state
*other_branch
;
1185 u8 opcode
= BPF_OP(insn
->code
);
1188 if (opcode
> BPF_EXIT
) {
1189 verbose("invalid BPF_JMP opcode %x\n", opcode
);
1193 if (BPF_SRC(insn
->code
) == BPF_X
) {
1194 if (insn
->imm
!= 0) {
1195 verbose("BPF_JMP uses reserved fields\n");
1199 /* check src1 operand */
1200 err
= check_reg_arg(regs
, insn
->src_reg
, SRC_OP
);
1204 if (is_pointer_value(env
, insn
->src_reg
)) {
1205 verbose("R%d pointer comparison prohibited\n",
1210 if (insn
->src_reg
!= BPF_REG_0
) {
1211 verbose("BPF_JMP uses reserved fields\n");
1216 /* check src2 operand */
1217 err
= check_reg_arg(regs
, insn
->dst_reg
, SRC_OP
);
1221 /* detect if R == 0 where R was initialized to zero earlier */
1222 if (BPF_SRC(insn
->code
) == BPF_K
&&
1223 (opcode
== BPF_JEQ
|| opcode
== BPF_JNE
) &&
1224 regs
[insn
->dst_reg
].type
== CONST_IMM
&&
1225 regs
[insn
->dst_reg
].imm
== insn
->imm
) {
1226 if (opcode
== BPF_JEQ
) {
1227 /* if (imm == imm) goto pc+off;
1228 * only follow the goto, ignore fall-through
1230 *insn_idx
+= insn
->off
;
1233 /* if (imm != imm) goto pc+off;
1234 * only follow fall-through branch, since
1235 * that's where the program will go
1241 other_branch
= push_stack(env
, *insn_idx
+ insn
->off
+ 1, *insn_idx
);
1245 /* detect if R == 0 where R is returned value from bpf_map_lookup_elem() */
1246 if (BPF_SRC(insn
->code
) == BPF_K
&&
1247 insn
->imm
== 0 && (opcode
== BPF_JEQ
||
1248 opcode
== BPF_JNE
) &&
1249 regs
[insn
->dst_reg
].type
== PTR_TO_MAP_VALUE_OR_NULL
) {
1250 if (opcode
== BPF_JEQ
) {
1251 /* next fallthrough insn can access memory via
1254 regs
[insn
->dst_reg
].type
= PTR_TO_MAP_VALUE
;
1255 /* branch targer cannot access it, since reg == 0 */
1256 other_branch
->regs
[insn
->dst_reg
].type
= CONST_IMM
;
1257 other_branch
->regs
[insn
->dst_reg
].imm
= 0;
1259 other_branch
->regs
[insn
->dst_reg
].type
= PTR_TO_MAP_VALUE
;
1260 regs
[insn
->dst_reg
].type
= CONST_IMM
;
1261 regs
[insn
->dst_reg
].imm
= 0;
1263 } else if (is_pointer_value(env
, insn
->dst_reg
)) {
1264 verbose("R%d pointer comparison prohibited\n", insn
->dst_reg
);
1266 } else if (BPF_SRC(insn
->code
) == BPF_K
&&
1267 (opcode
== BPF_JEQ
|| opcode
== BPF_JNE
)) {
1269 if (opcode
== BPF_JEQ
) {
1270 /* detect if (R == imm) goto
1271 * and in the target state recognize that R = imm
1273 other_branch
->regs
[insn
->dst_reg
].type
= CONST_IMM
;
1274 other_branch
->regs
[insn
->dst_reg
].imm
= insn
->imm
;
1276 /* detect if (R != imm) goto
1277 * and in the fall-through state recognize that R = imm
1279 regs
[insn
->dst_reg
].type
= CONST_IMM
;
1280 regs
[insn
->dst_reg
].imm
= insn
->imm
;
1284 print_verifier_state(env
);
1288 /* return the map pointer stored inside BPF_LD_IMM64 instruction */
1289 static struct bpf_map
*ld_imm64_to_map_ptr(struct bpf_insn
*insn
)
1291 u64 imm64
= ((u64
) (u32
) insn
[0].imm
) | ((u64
) (u32
) insn
[1].imm
) << 32;
1293 return (struct bpf_map
*) (unsigned long) imm64
;
1296 /* verify BPF_LD_IMM64 instruction */
1297 static int check_ld_imm(struct verifier_env
*env
, struct bpf_insn
*insn
)
1299 struct reg_state
*regs
= env
->cur_state
.regs
;
1302 if (BPF_SIZE(insn
->code
) != BPF_DW
) {
1303 verbose("invalid BPF_LD_IMM insn\n");
1306 if (insn
->off
!= 0) {
1307 verbose("BPF_LD_IMM64 uses reserved fields\n");
1311 err
= check_reg_arg(regs
, insn
->dst_reg
, DST_OP
);
1315 if (insn
->src_reg
== 0)
1316 /* generic move 64-bit immediate into a register */
1319 /* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */
1320 BUG_ON(insn
->src_reg
!= BPF_PSEUDO_MAP_FD
);
1322 regs
[insn
->dst_reg
].type
= CONST_PTR_TO_MAP
;
1323 regs
[insn
->dst_reg
].map_ptr
= ld_imm64_to_map_ptr(insn
);
1327 static bool may_access_skb(enum bpf_prog_type type
)
1330 case BPF_PROG_TYPE_SOCKET_FILTER
:
1331 case BPF_PROG_TYPE_SCHED_CLS
:
1332 case BPF_PROG_TYPE_SCHED_ACT
:
1339 /* verify safety of LD_ABS|LD_IND instructions:
1340 * - they can only appear in the programs where ctx == skb
1341 * - since they are wrappers of function calls, they scratch R1-R5 registers,
1342 * preserve R6-R9, and store return value into R0
1345 * ctx == skb == R6 == CTX
1348 * SRC == any register
1349 * IMM == 32-bit immediate
1352 * R0 - 8/16/32-bit skb data converted to cpu endianness
1354 static int check_ld_abs(struct verifier_env
*env
, struct bpf_insn
*insn
)
1356 struct reg_state
*regs
= env
->cur_state
.regs
;
1357 u8 mode
= BPF_MODE(insn
->code
);
1358 struct reg_state
*reg
;
1361 if (!may_access_skb(env
->prog
->type
)) {
1362 verbose("BPF_LD_ABS|IND instructions not allowed for this program type\n");
1366 if (insn
->dst_reg
!= BPF_REG_0
|| insn
->off
!= 0 ||
1367 BPF_SIZE(insn
->code
) == BPF_DW
||
1368 (mode
== BPF_ABS
&& insn
->src_reg
!= BPF_REG_0
)) {
1369 verbose("BPF_LD_ABS uses reserved fields\n");
1373 /* check whether implicit source operand (register R6) is readable */
1374 err
= check_reg_arg(regs
, BPF_REG_6
, SRC_OP
);
1378 if (regs
[BPF_REG_6
].type
!= PTR_TO_CTX
) {
1379 verbose("at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
1383 if (mode
== BPF_IND
) {
1384 /* check explicit source operand */
1385 err
= check_reg_arg(regs
, insn
->src_reg
, SRC_OP
);
1390 /* reset caller saved regs to unreadable */
1391 for (i
= 0; i
< CALLER_SAVED_REGS
; i
++) {
1392 reg
= regs
+ caller_saved
[i
];
1393 reg
->type
= NOT_INIT
;
1397 /* mark destination R0 register as readable, since it contains
1398 * the value fetched from the packet
1400 regs
[BPF_REG_0
].type
= UNKNOWN_VALUE
;
1404 /* non-recursive DFS pseudo code
1405 * 1 procedure DFS-iterative(G,v):
1406 * 2 label v as discovered
1407 * 3 let S be a stack
1409 * 5 while S is not empty
1411 * 7 if t is what we're looking for:
1413 * 9 for all edges e in G.adjacentEdges(t) do
1414 * 10 if edge e is already labelled
1415 * 11 continue with the next edge
1416 * 12 w <- G.adjacentVertex(t,e)
1417 * 13 if vertex w is not discovered and not explored
1418 * 14 label e as tree-edge
1419 * 15 label w as discovered
1422 * 18 else if vertex w is discovered
1423 * 19 label e as back-edge
1425 * 21 // vertex w is explored
1426 * 22 label e as forward- or cross-edge
1427 * 23 label t as explored
1432 * 0x11 - discovered and fall-through edge labelled
1433 * 0x12 - discovered and fall-through and branch edges labelled
1444 #define STATE_LIST_MARK ((struct verifier_state_list *) -1L)
1446 static int *insn_stack
; /* stack of insns to process */
1447 static int cur_stack
; /* current stack index */
1448 static int *insn_state
;
1450 /* t, w, e - match pseudo-code above:
1451 * t - index of current instruction
1452 * w - next instruction
1455 static int push_insn(int t
, int w
, int e
, struct verifier_env
*env
)
1457 if (e
== FALLTHROUGH
&& insn_state
[t
] >= (DISCOVERED
| FALLTHROUGH
))
1460 if (e
== BRANCH
&& insn_state
[t
] >= (DISCOVERED
| BRANCH
))
1463 if (w
< 0 || w
>= env
->prog
->len
) {
1464 verbose("jump out of range from insn %d to %d\n", t
, w
);
1469 /* mark branch target for state pruning */
1470 env
->explored_states
[w
] = STATE_LIST_MARK
;
1472 if (insn_state
[w
] == 0) {
1474 insn_state
[t
] = DISCOVERED
| e
;
1475 insn_state
[w
] = DISCOVERED
;
1476 if (cur_stack
>= env
->prog
->len
)
1478 insn_stack
[cur_stack
++] = w
;
1480 } else if ((insn_state
[w
] & 0xF0) == DISCOVERED
) {
1481 verbose("back-edge from insn %d to %d\n", t
, w
);
1483 } else if (insn_state
[w
] == EXPLORED
) {
1484 /* forward- or cross-edge */
1485 insn_state
[t
] = DISCOVERED
| e
;
1487 verbose("insn state internal bug\n");
1493 /* non-recursive depth-first-search to detect loops in BPF program
1494 * loop == back-edge in directed graph
1496 static int check_cfg(struct verifier_env
*env
)
1498 struct bpf_insn
*insns
= env
->prog
->insnsi
;
1499 int insn_cnt
= env
->prog
->len
;
1503 insn_state
= kcalloc(insn_cnt
, sizeof(int), GFP_KERNEL
);
1507 insn_stack
= kcalloc(insn_cnt
, sizeof(int), GFP_KERNEL
);
1513 insn_state
[0] = DISCOVERED
; /* mark 1st insn as discovered */
1514 insn_stack
[0] = 0; /* 0 is the first instruction */
1520 t
= insn_stack
[cur_stack
- 1];
1522 if (BPF_CLASS(insns
[t
].code
) == BPF_JMP
) {
1523 u8 opcode
= BPF_OP(insns
[t
].code
);
1525 if (opcode
== BPF_EXIT
) {
1527 } else if (opcode
== BPF_CALL
) {
1528 ret
= push_insn(t
, t
+ 1, FALLTHROUGH
, env
);
1533 } else if (opcode
== BPF_JA
) {
1534 if (BPF_SRC(insns
[t
].code
) != BPF_K
) {
1538 /* unconditional jump with single edge */
1539 ret
= push_insn(t
, t
+ insns
[t
].off
+ 1,
1545 /* tell verifier to check for equivalent states
1546 * after every call and jump
1548 if (t
+ 1 < insn_cnt
)
1549 env
->explored_states
[t
+ 1] = STATE_LIST_MARK
;
1551 /* conditional jump with two edges */
1552 ret
= push_insn(t
, t
+ 1, FALLTHROUGH
, env
);
1558 ret
= push_insn(t
, t
+ insns
[t
].off
+ 1, BRANCH
, env
);
1565 /* all other non-branch instructions with single
1568 ret
= push_insn(t
, t
+ 1, FALLTHROUGH
, env
);
1576 insn_state
[t
] = EXPLORED
;
1577 if (cur_stack
-- <= 0) {
1578 verbose("pop stack internal bug\n");
1585 for (i
= 0; i
< insn_cnt
; i
++) {
1586 if (insn_state
[i
] != EXPLORED
) {
1587 verbose("unreachable insn %d\n", i
);
1592 ret
= 0; /* cfg looks good */
1600 /* compare two verifier states
1602 * all states stored in state_list are known to be valid, since
1603 * verifier reached 'bpf_exit' instruction through them
1605 * this function is called when verifier exploring different branches of
1606 * execution popped from the state stack. If it sees an old state that has
1607 * more strict register state and more strict stack state then this execution
1608 * branch doesn't need to be explored further, since verifier already
1609 * concluded that more strict state leads to valid finish.
1611 * Therefore two states are equivalent if register state is more conservative
1612 * and explored stack state is more conservative than the current one.
1615 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
1616 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
1618 * In other words if current stack state (one being explored) has more
1619 * valid slots than old one that already passed validation, it means
1620 * the verifier can stop exploring and conclude that current state is valid too
1622 * Similarly with registers. If explored state has register type as invalid
1623 * whereas register type in current state is meaningful, it means that
1624 * the current state will reach 'bpf_exit' instruction safely
1626 static bool states_equal(struct verifier_state
*old
, struct verifier_state
*cur
)
1630 for (i
= 0; i
< MAX_BPF_REG
; i
++) {
1631 if (memcmp(&old
->regs
[i
], &cur
->regs
[i
],
1632 sizeof(old
->regs
[0])) != 0) {
1633 if (old
->regs
[i
].type
== NOT_INIT
||
1634 (old
->regs
[i
].type
== UNKNOWN_VALUE
&&
1635 cur
->regs
[i
].type
!= NOT_INIT
))
1641 for (i
= 0; i
< MAX_BPF_STACK
; i
++) {
1642 if (old
->stack_slot_type
[i
] == STACK_INVALID
)
1644 if (old
->stack_slot_type
[i
] != cur
->stack_slot_type
[i
])
1645 /* Ex: old explored (safe) state has STACK_SPILL in
1646 * this stack slot, but current has has STACK_MISC ->
1647 * this verifier states are not equivalent,
1648 * return false to continue verification of this path
1651 if (i
% BPF_REG_SIZE
)
1653 if (memcmp(&old
->spilled_regs
[i
/ BPF_REG_SIZE
],
1654 &cur
->spilled_regs
[i
/ BPF_REG_SIZE
],
1655 sizeof(old
->spilled_regs
[0])))
1656 /* when explored and current stack slot types are
1657 * the same, check that stored pointers types
1658 * are the same as well.
1659 * Ex: explored safe path could have stored
1660 * (struct reg_state) {.type = PTR_TO_STACK, .imm = -8}
1661 * but current path has stored:
1662 * (struct reg_state) {.type = PTR_TO_STACK, .imm = -16}
1663 * such verifier states are not equivalent.
1664 * return false to continue verification of this path
1673 static int is_state_visited(struct verifier_env
*env
, int insn_idx
)
1675 struct verifier_state_list
*new_sl
;
1676 struct verifier_state_list
*sl
;
1678 sl
= env
->explored_states
[insn_idx
];
1680 /* this 'insn_idx' instruction wasn't marked, so we will not
1681 * be doing state search here
1685 while (sl
!= STATE_LIST_MARK
) {
1686 if (states_equal(&sl
->state
, &env
->cur_state
))
1687 /* reached equivalent register/stack state,
1694 /* there were no equivalent states, remember current one.
1695 * technically the current state is not proven to be safe yet,
1696 * but it will either reach bpf_exit (which means it's safe) or
1697 * it will be rejected. Since there are no loops, we won't be
1698 * seeing this 'insn_idx' instruction again on the way to bpf_exit
1700 new_sl
= kmalloc(sizeof(struct verifier_state_list
), GFP_USER
);
1704 /* add new state to the head of linked list */
1705 memcpy(&new_sl
->state
, &env
->cur_state
, sizeof(env
->cur_state
));
1706 new_sl
->next
= env
->explored_states
[insn_idx
];
1707 env
->explored_states
[insn_idx
] = new_sl
;
1711 static int do_check(struct verifier_env
*env
)
1713 struct verifier_state
*state
= &env
->cur_state
;
1714 struct bpf_insn
*insns
= env
->prog
->insnsi
;
1715 struct reg_state
*regs
= state
->regs
;
1716 int insn_cnt
= env
->prog
->len
;
1717 int insn_idx
, prev_insn_idx
= 0;
1718 int insn_processed
= 0;
1719 bool do_print_state
= false;
1721 init_reg_state(regs
);
1724 struct bpf_insn
*insn
;
1728 if (insn_idx
>= insn_cnt
) {
1729 verbose("invalid insn idx %d insn_cnt %d\n",
1730 insn_idx
, insn_cnt
);
1734 insn
= &insns
[insn_idx
];
1735 class = BPF_CLASS(insn
->code
);
1737 if (++insn_processed
> 32768) {
1738 verbose("BPF program is too large. Proccessed %d insn\n",
1743 err
= is_state_visited(env
, insn_idx
);
1747 /* found equivalent state, can prune the search */
1750 verbose("\nfrom %d to %d: safe\n",
1751 prev_insn_idx
, insn_idx
);
1753 verbose("%d: safe\n", insn_idx
);
1755 goto process_bpf_exit
;
1758 if (log_level
&& do_print_state
) {
1759 verbose("\nfrom %d to %d:", prev_insn_idx
, insn_idx
);
1760 print_verifier_state(env
);
1761 do_print_state
= false;
1765 verbose("%d: ", insn_idx
);
1766 print_bpf_insn(insn
);
1769 if (class == BPF_ALU
|| class == BPF_ALU64
) {
1770 err
= check_alu_op(env
, insn
);
1774 } else if (class == BPF_LDX
) {
1775 enum bpf_reg_type src_reg_type
;
1777 /* check for reserved fields is already done */
1779 /* check src operand */
1780 err
= check_reg_arg(regs
, insn
->src_reg
, SRC_OP
);
1784 err
= check_reg_arg(regs
, insn
->dst_reg
, DST_OP_NO_MARK
);
1788 src_reg_type
= regs
[insn
->src_reg
].type
;
1790 /* check that memory (src_reg + off) is readable,
1791 * the state of dst_reg will be updated by this func
1793 err
= check_mem_access(env
, insn
->src_reg
, insn
->off
,
1794 BPF_SIZE(insn
->code
), BPF_READ
,
1799 if (BPF_SIZE(insn
->code
) != BPF_W
) {
1804 if (insn
->imm
== 0) {
1806 * dst_reg = *(u32 *)(src_reg + off)
1807 * use reserved 'imm' field to mark this insn
1809 insn
->imm
= src_reg_type
;
1811 } else if (src_reg_type
!= insn
->imm
&&
1812 (src_reg_type
== PTR_TO_CTX
||
1813 insn
->imm
== PTR_TO_CTX
)) {
1814 /* ABuser program is trying to use the same insn
1815 * dst_reg = *(u32*) (src_reg + off)
1816 * with different pointer types:
1817 * src_reg == ctx in one branch and
1818 * src_reg == stack|map in some other branch.
1821 verbose("same insn cannot be used with different pointers\n");
1825 } else if (class == BPF_STX
) {
1826 enum bpf_reg_type dst_reg_type
;
1828 if (BPF_MODE(insn
->code
) == BPF_XADD
) {
1829 err
= check_xadd(env
, insn
);
1836 /* check src1 operand */
1837 err
= check_reg_arg(regs
, insn
->src_reg
, SRC_OP
);
1840 /* check src2 operand */
1841 err
= check_reg_arg(regs
, insn
->dst_reg
, SRC_OP
);
1845 dst_reg_type
= regs
[insn
->dst_reg
].type
;
1847 /* check that memory (dst_reg + off) is writeable */
1848 err
= check_mem_access(env
, insn
->dst_reg
, insn
->off
,
1849 BPF_SIZE(insn
->code
), BPF_WRITE
,
1854 if (insn
->imm
== 0) {
1855 insn
->imm
= dst_reg_type
;
1856 } else if (dst_reg_type
!= insn
->imm
&&
1857 (dst_reg_type
== PTR_TO_CTX
||
1858 insn
->imm
== PTR_TO_CTX
)) {
1859 verbose("same insn cannot be used with different pointers\n");
1863 } else if (class == BPF_ST
) {
1864 if (BPF_MODE(insn
->code
) != BPF_MEM
||
1865 insn
->src_reg
!= BPF_REG_0
) {
1866 verbose("BPF_ST uses reserved fields\n");
1869 /* check src operand */
1870 err
= check_reg_arg(regs
, insn
->dst_reg
, SRC_OP
);
1874 /* check that memory (dst_reg + off) is writeable */
1875 err
= check_mem_access(env
, insn
->dst_reg
, insn
->off
,
1876 BPF_SIZE(insn
->code
), BPF_WRITE
,
1881 } else if (class == BPF_JMP
) {
1882 u8 opcode
= BPF_OP(insn
->code
);
1884 if (opcode
== BPF_CALL
) {
1885 if (BPF_SRC(insn
->code
) != BPF_K
||
1887 insn
->src_reg
!= BPF_REG_0
||
1888 insn
->dst_reg
!= BPF_REG_0
) {
1889 verbose("BPF_CALL uses reserved fields\n");
1893 err
= check_call(env
, insn
->imm
);
1897 } else if (opcode
== BPF_JA
) {
1898 if (BPF_SRC(insn
->code
) != BPF_K
||
1900 insn
->src_reg
!= BPF_REG_0
||
1901 insn
->dst_reg
!= BPF_REG_0
) {
1902 verbose("BPF_JA uses reserved fields\n");
1906 insn_idx
+= insn
->off
+ 1;
1909 } else if (opcode
== BPF_EXIT
) {
1910 if (BPF_SRC(insn
->code
) != BPF_K
||
1912 insn
->src_reg
!= BPF_REG_0
||
1913 insn
->dst_reg
!= BPF_REG_0
) {
1914 verbose("BPF_EXIT uses reserved fields\n");
1918 /* eBPF calling convetion is such that R0 is used
1919 * to return the value from eBPF program.
1920 * Make sure that it's readable at this time
1921 * of bpf_exit, which means that program wrote
1922 * something into it earlier
1924 err
= check_reg_arg(regs
, BPF_REG_0
, SRC_OP
);
1928 if (is_pointer_value(env
, BPF_REG_0
)) {
1929 verbose("R0 leaks addr as return value\n");
1934 insn_idx
= pop_stack(env
, &prev_insn_idx
);
1938 do_print_state
= true;
1942 err
= check_cond_jmp_op(env
, insn
, &insn_idx
);
1946 } else if (class == BPF_LD
) {
1947 u8 mode
= BPF_MODE(insn
->code
);
1949 if (mode
== BPF_ABS
|| mode
== BPF_IND
) {
1950 err
= check_ld_abs(env
, insn
);
1954 } else if (mode
== BPF_IMM
) {
1955 err
= check_ld_imm(env
, insn
);
1961 verbose("invalid BPF_LD mode\n");
1965 verbose("unknown insn class %d\n", class);
1975 /* look for pseudo eBPF instructions that access map FDs and
1976 * replace them with actual map pointers
1978 static int replace_map_fd_with_map_ptr(struct verifier_env
*env
)
1980 struct bpf_insn
*insn
= env
->prog
->insnsi
;
1981 int insn_cnt
= env
->prog
->len
;
1984 for (i
= 0; i
< insn_cnt
; i
++, insn
++) {
1985 if (BPF_CLASS(insn
->code
) == BPF_LDX
&&
1986 (BPF_MODE(insn
->code
) != BPF_MEM
|| insn
->imm
!= 0)) {
1987 verbose("BPF_LDX uses reserved fields\n");
1991 if (BPF_CLASS(insn
->code
) == BPF_STX
&&
1992 ((BPF_MODE(insn
->code
) != BPF_MEM
&&
1993 BPF_MODE(insn
->code
) != BPF_XADD
) || insn
->imm
!= 0)) {
1994 verbose("BPF_STX uses reserved fields\n");
1998 if (insn
[0].code
== (BPF_LD
| BPF_IMM
| BPF_DW
)) {
1999 struct bpf_map
*map
;
2002 if (i
== insn_cnt
- 1 || insn
[1].code
!= 0 ||
2003 insn
[1].dst_reg
!= 0 || insn
[1].src_reg
!= 0 ||
2005 verbose("invalid bpf_ld_imm64 insn\n");
2009 if (insn
->src_reg
== 0)
2010 /* valid generic load 64-bit imm */
2013 if (insn
->src_reg
!= BPF_PSEUDO_MAP_FD
) {
2014 verbose("unrecognized bpf_ld_imm64 insn\n");
2018 f
= fdget(insn
->imm
);
2019 map
= __bpf_map_get(f
);
2021 verbose("fd %d is not pointing to valid bpf_map\n",
2023 return PTR_ERR(map
);
2026 /* store map pointer inside BPF_LD_IMM64 instruction */
2027 insn
[0].imm
= (u32
) (unsigned long) map
;
2028 insn
[1].imm
= ((u64
) (unsigned long) map
) >> 32;
2030 /* check whether we recorded this map already */
2031 for (j
= 0; j
< env
->used_map_cnt
; j
++)
2032 if (env
->used_maps
[j
] == map
) {
2037 if (env
->used_map_cnt
>= MAX_USED_MAPS
) {
2042 /* hold the map. If the program is rejected by verifier,
2043 * the map will be released by release_maps() or it
2044 * will be used by the valid program until it's unloaded
2045 * and all maps are released in free_bpf_prog_info()
2047 map
= bpf_map_inc(map
, false);
2050 return PTR_ERR(map
);
2052 env
->used_maps
[env
->used_map_cnt
++] = map
;
2061 /* now all pseudo BPF_LD_IMM64 instructions load valid
2062 * 'struct bpf_map *' into a register instead of user map_fd.
2063 * These pointers will be used later by verifier to validate map access.
2068 /* drop refcnt of maps used by the rejected program */
2069 static void release_maps(struct verifier_env
*env
)
2073 for (i
= 0; i
< env
->used_map_cnt
; i
++)
2074 bpf_map_put(env
->used_maps
[i
]);
2077 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
2078 static void convert_pseudo_ld_imm64(struct verifier_env
*env
)
2080 struct bpf_insn
*insn
= env
->prog
->insnsi
;
2081 int insn_cnt
= env
->prog
->len
;
2084 for (i
= 0; i
< insn_cnt
; i
++, insn
++)
2085 if (insn
->code
== (BPF_LD
| BPF_IMM
| BPF_DW
))
2089 static void adjust_branches(struct bpf_prog
*prog
, int pos
, int delta
)
2091 struct bpf_insn
*insn
= prog
->insnsi
;
2092 int insn_cnt
= prog
->len
;
2095 for (i
= 0; i
< insn_cnt
; i
++, insn
++) {
2096 if (BPF_CLASS(insn
->code
) != BPF_JMP
||
2097 BPF_OP(insn
->code
) == BPF_CALL
||
2098 BPF_OP(insn
->code
) == BPF_EXIT
)
2101 /* adjust offset of jmps if necessary */
2102 if (i
< pos
&& i
+ insn
->off
+ 1 > pos
)
2104 else if (i
> pos
+ delta
&& i
+ insn
->off
+ 1 <= pos
+ delta
)
2109 /* convert load instructions that access fields of 'struct __sk_buff'
2110 * into sequence of instructions that access fields of 'struct sk_buff'
2112 static int convert_ctx_accesses(struct verifier_env
*env
)
2114 struct bpf_insn
*insn
= env
->prog
->insnsi
;
2115 int insn_cnt
= env
->prog
->len
;
2116 struct bpf_insn insn_buf
[16];
2117 struct bpf_prog
*new_prog
;
2120 enum bpf_access_type type
;
2122 if (!env
->prog
->aux
->ops
->convert_ctx_access
)
2125 for (i
= 0; i
< insn_cnt
; i
++, insn
++) {
2126 if (insn
->code
== (BPF_LDX
| BPF_MEM
| BPF_W
))
2128 else if (insn
->code
== (BPF_STX
| BPF_MEM
| BPF_W
))
2133 if (insn
->imm
!= PTR_TO_CTX
) {
2134 /* clear internal mark */
2139 cnt
= env
->prog
->aux
->ops
->
2140 convert_ctx_access(type
, insn
->dst_reg
, insn
->src_reg
,
2141 insn
->off
, insn_buf
, env
->prog
);
2142 if (cnt
== 0 || cnt
>= ARRAY_SIZE(insn_buf
)) {
2143 verbose("bpf verifier is misconfigured\n");
2148 memcpy(insn
, insn_buf
, sizeof(*insn
));
2152 /* several new insns need to be inserted. Make room for them */
2153 insn_cnt
+= cnt
- 1;
2154 new_prog
= bpf_prog_realloc(env
->prog
,
2155 bpf_prog_size(insn_cnt
),
2160 new_prog
->len
= insn_cnt
;
2162 memmove(new_prog
->insnsi
+ i
+ cnt
, new_prog
->insns
+ i
+ 1,
2163 sizeof(*insn
) * (insn_cnt
- i
- cnt
));
2165 /* copy substitute insns in place of load instruction */
2166 memcpy(new_prog
->insnsi
+ i
, insn_buf
, sizeof(*insn
) * cnt
);
2168 /* adjust branches in the whole program */
2169 adjust_branches(new_prog
, i
, cnt
- 1);
2171 /* keep walking new program and skip insns we just inserted */
2172 env
->prog
= new_prog
;
2173 insn
= new_prog
->insnsi
+ i
+ cnt
- 1;
2180 static void free_states(struct verifier_env
*env
)
2182 struct verifier_state_list
*sl
, *sln
;
2185 if (!env
->explored_states
)
2188 for (i
= 0; i
< env
->prog
->len
; i
++) {
2189 sl
= env
->explored_states
[i
];
2192 while (sl
!= STATE_LIST_MARK
) {
2199 kfree(env
->explored_states
);
2202 int bpf_check(struct bpf_prog
**prog
, union bpf_attr
*attr
)
2204 char __user
*log_ubuf
= NULL
;
2205 struct verifier_env
*env
;
2208 if ((*prog
)->len
<= 0 || (*prog
)->len
> BPF_MAXINSNS
)
2211 /* 'struct verifier_env' can be global, but since it's not small,
2212 * allocate/free it every time bpf_check() is called
2214 env
= kzalloc(sizeof(struct verifier_env
), GFP_KERNEL
);
2220 /* grab the mutex to protect few globals used by verifier */
2221 mutex_lock(&bpf_verifier_lock
);
2223 if (attr
->log_level
|| attr
->log_buf
|| attr
->log_size
) {
2224 /* user requested verbose verifier output
2225 * and supplied buffer to store the verification trace
2227 log_level
= attr
->log_level
;
2228 log_ubuf
= (char __user
*) (unsigned long) attr
->log_buf
;
2229 log_size
= attr
->log_size
;
2233 /* log_* values have to be sane */
2234 if (log_size
< 128 || log_size
> UINT_MAX
>> 8 ||
2235 log_level
== 0 || log_ubuf
== NULL
)
2239 log_buf
= vmalloc(log_size
);
2246 ret
= replace_map_fd_with_map_ptr(env
);
2248 goto skip_full_check
;
2250 env
->explored_states
= kcalloc(env
->prog
->len
,
2251 sizeof(struct verifier_state_list
*),
2254 if (!env
->explored_states
)
2255 goto skip_full_check
;
2257 ret
= check_cfg(env
);
2259 goto skip_full_check
;
2261 env
->allow_ptr_leaks
= capable(CAP_SYS_ADMIN
);
2263 ret
= do_check(env
);
2266 while (pop_stack(env
, NULL
) >= 0);
2270 /* program is valid, convert *(u32*)(ctx + off) accesses */
2271 ret
= convert_ctx_accesses(env
);
2273 if (log_level
&& log_len
>= log_size
- 1) {
2274 BUG_ON(log_len
>= log_size
);
2275 /* verifier log exceeded user supplied buffer */
2277 /* fall through to return what was recorded */
2280 /* copy verifier log back to user space including trailing zero */
2281 if (log_level
&& copy_to_user(log_ubuf
, log_buf
, log_len
+ 1) != 0) {
2286 if (ret
== 0 && env
->used_map_cnt
) {
2287 /* if program passed verifier, update used_maps in bpf_prog_info */
2288 env
->prog
->aux
->used_maps
= kmalloc_array(env
->used_map_cnt
,
2289 sizeof(env
->used_maps
[0]),
2292 if (!env
->prog
->aux
->used_maps
) {
2297 memcpy(env
->prog
->aux
->used_maps
, env
->used_maps
,
2298 sizeof(env
->used_maps
[0]) * env
->used_map_cnt
);
2299 env
->prog
->aux
->used_map_cnt
= env
->used_map_cnt
;
2301 /* program is valid. Convert pseudo bpf_ld_imm64 into generic
2302 * bpf_ld_imm64 instructions
2304 convert_pseudo_ld_imm64(env
);
2311 if (!env
->prog
->aux
->used_maps
)
2312 /* if we didn't copy map pointers into bpf_prog_info, release
2313 * them now. Otherwise free_bpf_prog_info() will release them.
2318 mutex_unlock(&bpf_verifier_lock
);