Merge remote-tracking branches 'regulator/fix/max1586', 'regulator/fix/max77686'...
[sfrench/cifs-2.6.git] / kernel / bpf / verifier.c
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful, but
8  * WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10  * General Public License for more details.
11  */
12 #include <linux/kernel.h>
13 #include <linux/types.h>
14 #include <linux/slab.h>
15 #include <linux/bpf.h>
16 #include <linux/filter.h>
17 #include <net/netlink.h>
18 #include <linux/file.h>
19 #include <linux/vmalloc.h>
20
21 /* bpf_check() is a static code analyzer that walks eBPF program
22  * instruction by instruction and updates register/stack state.
23  * All paths of conditional branches are analyzed until 'bpf_exit' insn.
24  *
25  * The first pass is depth-first-search to check that the program is a DAG.
26  * It rejects the following programs:
27  * - larger than BPF_MAXINSNS insns
28  * - if loop is present (detected via back-edge)
29  * - unreachable insns exist (shouldn't be a forest. program = one function)
30  * - out of bounds or malformed jumps
31  * The second pass is all possible path descent from the 1st insn.
32  * Since it's analyzing all pathes through the program, the length of the
33  * analysis is limited to 32k insn, which may be hit even if total number of
34  * insn is less then 4K, but there are too many branches that change stack/regs.
35  * Number of 'branches to be analyzed' is limited to 1k
36  *
37  * On entry to each instruction, each register has a type, and the instruction
38  * changes the types of the registers depending on instruction semantics.
39  * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
40  * copied to R1.
41  *
42  * All registers are 64-bit.
43  * R0 - return register
44  * R1-R5 argument passing registers
45  * R6-R9 callee saved registers
46  * R10 - frame pointer read-only
47  *
48  * At the start of BPF program the register R1 contains a pointer to bpf_context
49  * and has type PTR_TO_CTX.
50  *
51  * Verifier tracks arithmetic operations on pointers in case:
52  *    BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
53  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
54  * 1st insn copies R10 (which has FRAME_PTR) type into R1
55  * and 2nd arithmetic instruction is pattern matched to recognize
56  * that it wants to construct a pointer to some element within stack.
57  * So after 2nd insn, the register R1 has type PTR_TO_STACK
58  * (and -20 constant is saved for further stack bounds checking).
59  * Meaning that this reg is a pointer to stack plus known immediate constant.
60  *
61  * Most of the time the registers have UNKNOWN_VALUE type, which
62  * means the register has some value, but it's not a valid pointer.
63  * (like pointer plus pointer becomes UNKNOWN_VALUE type)
64  *
65  * When verifier sees load or store instructions the type of base register
66  * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, FRAME_PTR. These are three pointer
67  * types recognized by check_mem_access() function.
68  *
69  * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
70  * and the range of [ptr, ptr + map's value_size) is accessible.
71  *
72  * registers used to pass values to function calls are checked against
73  * function argument constraints.
74  *
75  * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
76  * It means that the register type passed to this function must be
77  * PTR_TO_STACK and it will be used inside the function as
78  * 'pointer to map element key'
79  *
80  * For example the argument constraints for bpf_map_lookup_elem():
81  *   .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
82  *   .arg1_type = ARG_CONST_MAP_PTR,
83  *   .arg2_type = ARG_PTR_TO_MAP_KEY,
84  *
85  * ret_type says that this function returns 'pointer to map elem value or null'
86  * function expects 1st argument to be a const pointer to 'struct bpf_map' and
87  * 2nd argument should be a pointer to stack, which will be used inside
88  * the helper function as a pointer to map element key.
89  *
90  * On the kernel side the helper function looks like:
91  * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
92  * {
93  *    struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
94  *    void *key = (void *) (unsigned long) r2;
95  *    void *value;
96  *
97  *    here kernel can access 'key' and 'map' pointers safely, knowing that
98  *    [key, key + map->key_size) bytes are valid and were initialized on
99  *    the stack of eBPF program.
100  * }
101  *
102  * Corresponding eBPF program may look like:
103  *    BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),  // after this insn R2 type is FRAME_PTR
104  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
105  *    BPF_LD_MAP_FD(BPF_REG_1, map_fd),      // after this insn R1 type is CONST_PTR_TO_MAP
106  *    BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
107  * here verifier looks at prototype of map_lookup_elem() and sees:
108  * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
109  * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
110  *
111  * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
112  * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
113  * and were initialized prior to this call.
114  * If it's ok, then verifier allows this BPF_CALL insn and looks at
115  * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
116  * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
117  * returns ether pointer to map value or NULL.
118  *
119  * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
120  * insn, the register holding that pointer in the true branch changes state to
121  * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
122  * branch. See check_cond_jmp_op().
123  *
124  * After the call R0 is set to return type of the function and registers R1-R5
125  * are set to NOT_INIT to indicate that they are no longer readable.
126  */
127
128 /* types of values stored in eBPF registers */
129 enum bpf_reg_type {
130         NOT_INIT = 0,            /* nothing was written into register */
131         UNKNOWN_VALUE,           /* reg doesn't contain a valid pointer */
132         PTR_TO_CTX,              /* reg points to bpf_context */
133         CONST_PTR_TO_MAP,        /* reg points to struct bpf_map */
134         PTR_TO_MAP_VALUE,        /* reg points to map element value */
135         PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
136         FRAME_PTR,               /* reg == frame_pointer */
137         PTR_TO_STACK,            /* reg == frame_pointer + imm */
138         CONST_IMM,               /* constant integer value */
139 };
140
141 struct reg_state {
142         enum bpf_reg_type type;
143         union {
144                 /* valid when type == CONST_IMM | PTR_TO_STACK */
145                 int imm;
146
147                 /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
148                  *   PTR_TO_MAP_VALUE_OR_NULL
149                  */
150                 struct bpf_map *map_ptr;
151         };
152 };
153
154 enum bpf_stack_slot_type {
155         STACK_INVALID,    /* nothing was stored in this stack slot */
156         STACK_SPILL,      /* 1st byte of register spilled into stack */
157         STACK_SPILL_PART, /* other 7 bytes of register spill */
158         STACK_MISC        /* BPF program wrote some data into this slot */
159 };
160
161 struct bpf_stack_slot {
162         enum bpf_stack_slot_type stype;
163         struct reg_state reg_st;
164 };
165
166 /* state of the program:
167  * type of all registers and stack info
168  */
169 struct verifier_state {
170         struct reg_state regs[MAX_BPF_REG];
171         struct bpf_stack_slot stack[MAX_BPF_STACK];
172 };
173
174 /* linked list of verifier states used to prune search */
175 struct verifier_state_list {
176         struct verifier_state state;
177         struct verifier_state_list *next;
178 };
179
180 /* verifier_state + insn_idx are pushed to stack when branch is encountered */
181 struct verifier_stack_elem {
182         /* verifer state is 'st'
183          * before processing instruction 'insn_idx'
184          * and after processing instruction 'prev_insn_idx'
185          */
186         struct verifier_state st;
187         int insn_idx;
188         int prev_insn_idx;
189         struct verifier_stack_elem *next;
190 };
191
192 #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
193
194 /* single container for all structs
195  * one verifier_env per bpf_check() call
196  */
197 struct verifier_env {
198         struct bpf_prog *prog;          /* eBPF program being verified */
199         struct verifier_stack_elem *head; /* stack of verifier states to be processed */
200         int stack_size;                 /* number of states to be processed */
201         struct verifier_state cur_state; /* current verifier state */
202         struct verifier_state_list **explored_states; /* search pruning optimization */
203         struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
204         u32 used_map_cnt;               /* number of used maps */
205 };
206
207 /* verbose verifier prints what it's seeing
208  * bpf_check() is called under lock, so no race to access these global vars
209  */
210 static u32 log_level, log_size, log_len;
211 static char *log_buf;
212
213 static DEFINE_MUTEX(bpf_verifier_lock);
214
215 /* log_level controls verbosity level of eBPF verifier.
216  * verbose() is used to dump the verification trace to the log, so the user
217  * can figure out what's wrong with the program
218  */
219 static void verbose(const char *fmt, ...)
220 {
221         va_list args;
222
223         if (log_level == 0 || log_len >= log_size - 1)
224                 return;
225
226         va_start(args, fmt);
227         log_len += vscnprintf(log_buf + log_len, log_size - log_len, fmt, args);
228         va_end(args);
229 }
230
231 /* string representation of 'enum bpf_reg_type' */
232 static const char * const reg_type_str[] = {
233         [NOT_INIT]              = "?",
234         [UNKNOWN_VALUE]         = "inv",
235         [PTR_TO_CTX]            = "ctx",
236         [CONST_PTR_TO_MAP]      = "map_ptr",
237         [PTR_TO_MAP_VALUE]      = "map_value",
238         [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
239         [FRAME_PTR]             = "fp",
240         [PTR_TO_STACK]          = "fp",
241         [CONST_IMM]             = "imm",
242 };
243
244 static void print_verifier_state(struct verifier_env *env)
245 {
246         enum bpf_reg_type t;
247         int i;
248
249         for (i = 0; i < MAX_BPF_REG; i++) {
250                 t = env->cur_state.regs[i].type;
251                 if (t == NOT_INIT)
252                         continue;
253                 verbose(" R%d=%s", i, reg_type_str[t]);
254                 if (t == CONST_IMM || t == PTR_TO_STACK)
255                         verbose("%d", env->cur_state.regs[i].imm);
256                 else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE ||
257                          t == PTR_TO_MAP_VALUE_OR_NULL)
258                         verbose("(ks=%d,vs=%d)",
259                                 env->cur_state.regs[i].map_ptr->key_size,
260                                 env->cur_state.regs[i].map_ptr->value_size);
261         }
262         for (i = 0; i < MAX_BPF_STACK; i++) {
263                 if (env->cur_state.stack[i].stype == STACK_SPILL)
264                         verbose(" fp%d=%s", -MAX_BPF_STACK + i,
265                                 reg_type_str[env->cur_state.stack[i].reg_st.type]);
266         }
267         verbose("\n");
268 }
269
270 static const char *const bpf_class_string[] = {
271         [BPF_LD]    = "ld",
272         [BPF_LDX]   = "ldx",
273         [BPF_ST]    = "st",
274         [BPF_STX]   = "stx",
275         [BPF_ALU]   = "alu",
276         [BPF_JMP]   = "jmp",
277         [BPF_RET]   = "BUG",
278         [BPF_ALU64] = "alu64",
279 };
280
281 static const char *const bpf_alu_string[] = {
282         [BPF_ADD >> 4]  = "+=",
283         [BPF_SUB >> 4]  = "-=",
284         [BPF_MUL >> 4]  = "*=",
285         [BPF_DIV >> 4]  = "/=",
286         [BPF_OR  >> 4]  = "|=",
287         [BPF_AND >> 4]  = "&=",
288         [BPF_LSH >> 4]  = "<<=",
289         [BPF_RSH >> 4]  = ">>=",
290         [BPF_NEG >> 4]  = "neg",
291         [BPF_MOD >> 4]  = "%=",
292         [BPF_XOR >> 4]  = "^=",
293         [BPF_MOV >> 4]  = "=",
294         [BPF_ARSH >> 4] = "s>>=",
295         [BPF_END >> 4]  = "endian",
296 };
297
298 static const char *const bpf_ldst_string[] = {
299         [BPF_W >> 3]  = "u32",
300         [BPF_H >> 3]  = "u16",
301         [BPF_B >> 3]  = "u8",
302         [BPF_DW >> 3] = "u64",
303 };
304
305 static const char *const bpf_jmp_string[] = {
306         [BPF_JA >> 4]   = "jmp",
307         [BPF_JEQ >> 4]  = "==",
308         [BPF_JGT >> 4]  = ">",
309         [BPF_JGE >> 4]  = ">=",
310         [BPF_JSET >> 4] = "&",
311         [BPF_JNE >> 4]  = "!=",
312         [BPF_JSGT >> 4] = "s>",
313         [BPF_JSGE >> 4] = "s>=",
314         [BPF_CALL >> 4] = "call",
315         [BPF_EXIT >> 4] = "exit",
316 };
317
318 static void print_bpf_insn(struct bpf_insn *insn)
319 {
320         u8 class = BPF_CLASS(insn->code);
321
322         if (class == BPF_ALU || class == BPF_ALU64) {
323                 if (BPF_SRC(insn->code) == BPF_X)
324                         verbose("(%02x) %sr%d %s %sr%d\n",
325                                 insn->code, class == BPF_ALU ? "(u32) " : "",
326                                 insn->dst_reg,
327                                 bpf_alu_string[BPF_OP(insn->code) >> 4],
328                                 class == BPF_ALU ? "(u32) " : "",
329                                 insn->src_reg);
330                 else
331                         verbose("(%02x) %sr%d %s %s%d\n",
332                                 insn->code, class == BPF_ALU ? "(u32) " : "",
333                                 insn->dst_reg,
334                                 bpf_alu_string[BPF_OP(insn->code) >> 4],
335                                 class == BPF_ALU ? "(u32) " : "",
336                                 insn->imm);
337         } else if (class == BPF_STX) {
338                 if (BPF_MODE(insn->code) == BPF_MEM)
339                         verbose("(%02x) *(%s *)(r%d %+d) = r%d\n",
340                                 insn->code,
341                                 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
342                                 insn->dst_reg,
343                                 insn->off, insn->src_reg);
344                 else if (BPF_MODE(insn->code) == BPF_XADD)
345                         verbose("(%02x) lock *(%s *)(r%d %+d) += r%d\n",
346                                 insn->code,
347                                 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
348                                 insn->dst_reg, insn->off,
349                                 insn->src_reg);
350                 else
351                         verbose("BUG_%02x\n", insn->code);
352         } else if (class == BPF_ST) {
353                 if (BPF_MODE(insn->code) != BPF_MEM) {
354                         verbose("BUG_st_%02x\n", insn->code);
355                         return;
356                 }
357                 verbose("(%02x) *(%s *)(r%d %+d) = %d\n",
358                         insn->code,
359                         bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
360                         insn->dst_reg,
361                         insn->off, insn->imm);
362         } else if (class == BPF_LDX) {
363                 if (BPF_MODE(insn->code) != BPF_MEM) {
364                         verbose("BUG_ldx_%02x\n", insn->code);
365                         return;
366                 }
367                 verbose("(%02x) r%d = *(%s *)(r%d %+d)\n",
368                         insn->code, insn->dst_reg,
369                         bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
370                         insn->src_reg, insn->off);
371         } else if (class == BPF_LD) {
372                 if (BPF_MODE(insn->code) == BPF_ABS) {
373                         verbose("(%02x) r0 = *(%s *)skb[%d]\n",
374                                 insn->code,
375                                 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
376                                 insn->imm);
377                 } else if (BPF_MODE(insn->code) == BPF_IND) {
378                         verbose("(%02x) r0 = *(%s *)skb[r%d + %d]\n",
379                                 insn->code,
380                                 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
381                                 insn->src_reg, insn->imm);
382                 } else if (BPF_MODE(insn->code) == BPF_IMM) {
383                         verbose("(%02x) r%d = 0x%x\n",
384                                 insn->code, insn->dst_reg, insn->imm);
385                 } else {
386                         verbose("BUG_ld_%02x\n", insn->code);
387                         return;
388                 }
389         } else if (class == BPF_JMP) {
390                 u8 opcode = BPF_OP(insn->code);
391
392                 if (opcode == BPF_CALL) {
393                         verbose("(%02x) call %d\n", insn->code, insn->imm);
394                 } else if (insn->code == (BPF_JMP | BPF_JA)) {
395                         verbose("(%02x) goto pc%+d\n",
396                                 insn->code, insn->off);
397                 } else if (insn->code == (BPF_JMP | BPF_EXIT)) {
398                         verbose("(%02x) exit\n", insn->code);
399                 } else if (BPF_SRC(insn->code) == BPF_X) {
400                         verbose("(%02x) if r%d %s r%d goto pc%+d\n",
401                                 insn->code, insn->dst_reg,
402                                 bpf_jmp_string[BPF_OP(insn->code) >> 4],
403                                 insn->src_reg, insn->off);
404                 } else {
405                         verbose("(%02x) if r%d %s 0x%x goto pc%+d\n",
406                                 insn->code, insn->dst_reg,
407                                 bpf_jmp_string[BPF_OP(insn->code) >> 4],
408                                 insn->imm, insn->off);
409                 }
410         } else {
411                 verbose("(%02x) %s\n", insn->code, bpf_class_string[class]);
412         }
413 }
414
415 static int pop_stack(struct verifier_env *env, int *prev_insn_idx)
416 {
417         struct verifier_stack_elem *elem;
418         int insn_idx;
419
420         if (env->head == NULL)
421                 return -1;
422
423         memcpy(&env->cur_state, &env->head->st, sizeof(env->cur_state));
424         insn_idx = env->head->insn_idx;
425         if (prev_insn_idx)
426                 *prev_insn_idx = env->head->prev_insn_idx;
427         elem = env->head->next;
428         kfree(env->head);
429         env->head = elem;
430         env->stack_size--;
431         return insn_idx;
432 }
433
434 static struct verifier_state *push_stack(struct verifier_env *env, int insn_idx,
435                                          int prev_insn_idx)
436 {
437         struct verifier_stack_elem *elem;
438
439         elem = kmalloc(sizeof(struct verifier_stack_elem), GFP_KERNEL);
440         if (!elem)
441                 goto err;
442
443         memcpy(&elem->st, &env->cur_state, sizeof(env->cur_state));
444         elem->insn_idx = insn_idx;
445         elem->prev_insn_idx = prev_insn_idx;
446         elem->next = env->head;
447         env->head = elem;
448         env->stack_size++;
449         if (env->stack_size > 1024) {
450                 verbose("BPF program is too complex\n");
451                 goto err;
452         }
453         return &elem->st;
454 err:
455         /* pop all elements and return */
456         while (pop_stack(env, NULL) >= 0);
457         return NULL;
458 }
459
460 #define CALLER_SAVED_REGS 6
461 static const int caller_saved[CALLER_SAVED_REGS] = {
462         BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
463 };
464
465 static void init_reg_state(struct reg_state *regs)
466 {
467         int i;
468
469         for (i = 0; i < MAX_BPF_REG; i++) {
470                 regs[i].type = NOT_INIT;
471                 regs[i].imm = 0;
472                 regs[i].map_ptr = NULL;
473         }
474
475         /* frame pointer */
476         regs[BPF_REG_FP].type = FRAME_PTR;
477
478         /* 1st arg to a function */
479         regs[BPF_REG_1].type = PTR_TO_CTX;
480 }
481
482 static void mark_reg_unknown_value(struct reg_state *regs, u32 regno)
483 {
484         BUG_ON(regno >= MAX_BPF_REG);
485         regs[regno].type = UNKNOWN_VALUE;
486         regs[regno].imm = 0;
487         regs[regno].map_ptr = NULL;
488 }
489
490 enum reg_arg_type {
491         SRC_OP,         /* register is used as source operand */
492         DST_OP,         /* register is used as destination operand */
493         DST_OP_NO_MARK  /* same as above, check only, don't mark */
494 };
495
496 static int check_reg_arg(struct reg_state *regs, u32 regno,
497                          enum reg_arg_type t)
498 {
499         if (regno >= MAX_BPF_REG) {
500                 verbose("R%d is invalid\n", regno);
501                 return -EINVAL;
502         }
503
504         if (t == SRC_OP) {
505                 /* check whether register used as source operand can be read */
506                 if (regs[regno].type == NOT_INIT) {
507                         verbose("R%d !read_ok\n", regno);
508                         return -EACCES;
509                 }
510         } else {
511                 /* check whether register used as dest operand can be written to */
512                 if (regno == BPF_REG_FP) {
513                         verbose("frame pointer is read only\n");
514                         return -EACCES;
515                 }
516                 if (t == DST_OP)
517                         mark_reg_unknown_value(regs, regno);
518         }
519         return 0;
520 }
521
522 static int bpf_size_to_bytes(int bpf_size)
523 {
524         if (bpf_size == BPF_W)
525                 return 4;
526         else if (bpf_size == BPF_H)
527                 return 2;
528         else if (bpf_size == BPF_B)
529                 return 1;
530         else if (bpf_size == BPF_DW)
531                 return 8;
532         else
533                 return -EINVAL;
534 }
535
536 /* check_stack_read/write functions track spill/fill of registers,
537  * stack boundary and alignment are checked in check_mem_access()
538  */
539 static int check_stack_write(struct verifier_state *state, int off, int size,
540                              int value_regno)
541 {
542         struct bpf_stack_slot *slot;
543         int i;
544
545         if (value_regno >= 0 &&
546             (state->regs[value_regno].type == PTR_TO_MAP_VALUE ||
547              state->regs[value_regno].type == PTR_TO_STACK ||
548              state->regs[value_regno].type == PTR_TO_CTX)) {
549
550                 /* register containing pointer is being spilled into stack */
551                 if (size != 8) {
552                         verbose("invalid size of register spill\n");
553                         return -EACCES;
554                 }
555
556                 slot = &state->stack[MAX_BPF_STACK + off];
557                 slot->stype = STACK_SPILL;
558                 /* save register state */
559                 slot->reg_st = state->regs[value_regno];
560                 for (i = 1; i < 8; i++) {
561                         slot = &state->stack[MAX_BPF_STACK + off + i];
562                         slot->stype = STACK_SPILL_PART;
563                         slot->reg_st.type = UNKNOWN_VALUE;
564                         slot->reg_st.map_ptr = NULL;
565                 }
566         } else {
567
568                 /* regular write of data into stack */
569                 for (i = 0; i < size; i++) {
570                         slot = &state->stack[MAX_BPF_STACK + off + i];
571                         slot->stype = STACK_MISC;
572                         slot->reg_st.type = UNKNOWN_VALUE;
573                         slot->reg_st.map_ptr = NULL;
574                 }
575         }
576         return 0;
577 }
578
579 static int check_stack_read(struct verifier_state *state, int off, int size,
580                             int value_regno)
581 {
582         int i;
583         struct bpf_stack_slot *slot;
584
585         slot = &state->stack[MAX_BPF_STACK + off];
586
587         if (slot->stype == STACK_SPILL) {
588                 if (size != 8) {
589                         verbose("invalid size of register spill\n");
590                         return -EACCES;
591                 }
592                 for (i = 1; i < 8; i++) {
593                         if (state->stack[MAX_BPF_STACK + off + i].stype !=
594                             STACK_SPILL_PART) {
595                                 verbose("corrupted spill memory\n");
596                                 return -EACCES;
597                         }
598                 }
599
600                 if (value_regno >= 0)
601                         /* restore register state from stack */
602                         state->regs[value_regno] = slot->reg_st;
603                 return 0;
604         } else {
605                 for (i = 0; i < size; i++) {
606                         if (state->stack[MAX_BPF_STACK + off + i].stype !=
607                             STACK_MISC) {
608                                 verbose("invalid read from stack off %d+%d size %d\n",
609                                         off, i, size);
610                                 return -EACCES;
611                         }
612                 }
613                 if (value_regno >= 0)
614                         /* have read misc data from the stack */
615                         mark_reg_unknown_value(state->regs, value_regno);
616                 return 0;
617         }
618 }
619
620 /* check read/write into map element returned by bpf_map_lookup_elem() */
621 static int check_map_access(struct verifier_env *env, u32 regno, int off,
622                             int size)
623 {
624         struct bpf_map *map = env->cur_state.regs[regno].map_ptr;
625
626         if (off < 0 || off + size > map->value_size) {
627                 verbose("invalid access to map value, value_size=%d off=%d size=%d\n",
628                         map->value_size, off, size);
629                 return -EACCES;
630         }
631         return 0;
632 }
633
634 /* check access to 'struct bpf_context' fields */
635 static int check_ctx_access(struct verifier_env *env, int off, int size,
636                             enum bpf_access_type t)
637 {
638         if (env->prog->aux->ops->is_valid_access &&
639             env->prog->aux->ops->is_valid_access(off, size, t))
640                 return 0;
641
642         verbose("invalid bpf_context access off=%d size=%d\n", off, size);
643         return -EACCES;
644 }
645
646 /* check whether memory at (regno + off) is accessible for t = (read | write)
647  * if t==write, value_regno is a register which value is stored into memory
648  * if t==read, value_regno is a register which will receive the value from memory
649  * if t==write && value_regno==-1, some unknown value is stored into memory
650  * if t==read && value_regno==-1, don't care what we read from memory
651  */
652 static int check_mem_access(struct verifier_env *env, u32 regno, int off,
653                             int bpf_size, enum bpf_access_type t,
654                             int value_regno)
655 {
656         struct verifier_state *state = &env->cur_state;
657         int size, err = 0;
658
659         size = bpf_size_to_bytes(bpf_size);
660         if (size < 0)
661                 return size;
662
663         if (off % size != 0) {
664                 verbose("misaligned access off %d size %d\n", off, size);
665                 return -EACCES;
666         }
667
668         if (state->regs[regno].type == PTR_TO_MAP_VALUE) {
669                 err = check_map_access(env, regno, off, size);
670                 if (!err && t == BPF_READ && value_regno >= 0)
671                         mark_reg_unknown_value(state->regs, value_regno);
672
673         } else if (state->regs[regno].type == PTR_TO_CTX) {
674                 err = check_ctx_access(env, off, size, t);
675                 if (!err && t == BPF_READ && value_regno >= 0)
676                         mark_reg_unknown_value(state->regs, value_regno);
677
678         } else if (state->regs[regno].type == FRAME_PTR) {
679                 if (off >= 0 || off < -MAX_BPF_STACK) {
680                         verbose("invalid stack off=%d size=%d\n", off, size);
681                         return -EACCES;
682                 }
683                 if (t == BPF_WRITE)
684                         err = check_stack_write(state, off, size, value_regno);
685                 else
686                         err = check_stack_read(state, off, size, value_regno);
687         } else {
688                 verbose("R%d invalid mem access '%s'\n",
689                         regno, reg_type_str[state->regs[regno].type]);
690                 return -EACCES;
691         }
692         return err;
693 }
694
695 static int check_xadd(struct verifier_env *env, struct bpf_insn *insn)
696 {
697         struct reg_state *regs = env->cur_state.regs;
698         int err;
699
700         if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
701             insn->imm != 0) {
702                 verbose("BPF_XADD uses reserved fields\n");
703                 return -EINVAL;
704         }
705
706         /* check src1 operand */
707         err = check_reg_arg(regs, insn->src_reg, SRC_OP);
708         if (err)
709                 return err;
710
711         /* check src2 operand */
712         err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
713         if (err)
714                 return err;
715
716         /* check whether atomic_add can read the memory */
717         err = check_mem_access(env, insn->dst_reg, insn->off,
718                                BPF_SIZE(insn->code), BPF_READ, -1);
719         if (err)
720                 return err;
721
722         /* check whether atomic_add can write into the same memory */
723         return check_mem_access(env, insn->dst_reg, insn->off,
724                                 BPF_SIZE(insn->code), BPF_WRITE, -1);
725 }
726
727 /* when register 'regno' is passed into function that will read 'access_size'
728  * bytes from that pointer, make sure that it's within stack boundary
729  * and all elements of stack are initialized
730  */
731 static int check_stack_boundary(struct verifier_env *env,
732                                 int regno, int access_size)
733 {
734         struct verifier_state *state = &env->cur_state;
735         struct reg_state *regs = state->regs;
736         int off, i;
737
738         if (regs[regno].type != PTR_TO_STACK)
739                 return -EACCES;
740
741         off = regs[regno].imm;
742         if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
743             access_size <= 0) {
744                 verbose("invalid stack type R%d off=%d access_size=%d\n",
745                         regno, off, access_size);
746                 return -EACCES;
747         }
748
749         for (i = 0; i < access_size; i++) {
750                 if (state->stack[MAX_BPF_STACK + off + i].stype != STACK_MISC) {
751                         verbose("invalid indirect read from stack off %d+%d size %d\n",
752                                 off, i, access_size);
753                         return -EACCES;
754                 }
755         }
756         return 0;
757 }
758
759 static int check_func_arg(struct verifier_env *env, u32 regno,
760                           enum bpf_arg_type arg_type, struct bpf_map **mapp)
761 {
762         struct reg_state *reg = env->cur_state.regs + regno;
763         enum bpf_reg_type expected_type;
764         int err = 0;
765
766         if (arg_type == ARG_ANYTHING)
767                 return 0;
768
769         if (reg->type == NOT_INIT) {
770                 verbose("R%d !read_ok\n", regno);
771                 return -EACCES;
772         }
773
774         if (arg_type == ARG_PTR_TO_STACK || arg_type == ARG_PTR_TO_MAP_KEY ||
775             arg_type == ARG_PTR_TO_MAP_VALUE) {
776                 expected_type = PTR_TO_STACK;
777         } else if (arg_type == ARG_CONST_STACK_SIZE) {
778                 expected_type = CONST_IMM;
779         } else if (arg_type == ARG_CONST_MAP_PTR) {
780                 expected_type = CONST_PTR_TO_MAP;
781         } else {
782                 verbose("unsupported arg_type %d\n", arg_type);
783                 return -EFAULT;
784         }
785
786         if (reg->type != expected_type) {
787                 verbose("R%d type=%s expected=%s\n", regno,
788                         reg_type_str[reg->type], reg_type_str[expected_type]);
789                 return -EACCES;
790         }
791
792         if (arg_type == ARG_CONST_MAP_PTR) {
793                 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */
794                 *mapp = reg->map_ptr;
795
796         } else if (arg_type == ARG_PTR_TO_MAP_KEY) {
797                 /* bpf_map_xxx(..., map_ptr, ..., key) call:
798                  * check that [key, key + map->key_size) are within
799                  * stack limits and initialized
800                  */
801                 if (!*mapp) {
802                         /* in function declaration map_ptr must come before
803                          * map_key, so that it's verified and known before
804                          * we have to check map_key here. Otherwise it means
805                          * that kernel subsystem misconfigured verifier
806                          */
807                         verbose("invalid map_ptr to access map->key\n");
808                         return -EACCES;
809                 }
810                 err = check_stack_boundary(env, regno, (*mapp)->key_size);
811
812         } else if (arg_type == ARG_PTR_TO_MAP_VALUE) {
813                 /* bpf_map_xxx(..., map_ptr, ..., value) call:
814                  * check [value, value + map->value_size) validity
815                  */
816                 if (!*mapp) {
817                         /* kernel subsystem misconfigured verifier */
818                         verbose("invalid map_ptr to access map->value\n");
819                         return -EACCES;
820                 }
821                 err = check_stack_boundary(env, regno, (*mapp)->value_size);
822
823         } else if (arg_type == ARG_CONST_STACK_SIZE) {
824                 /* bpf_xxx(..., buf, len) call will access 'len' bytes
825                  * from stack pointer 'buf'. Check it
826                  * note: regno == len, regno - 1 == buf
827                  */
828                 if (regno == 0) {
829                         /* kernel subsystem misconfigured verifier */
830                         verbose("ARG_CONST_STACK_SIZE cannot be first argument\n");
831                         return -EACCES;
832                 }
833                 err = check_stack_boundary(env, regno - 1, reg->imm);
834         }
835
836         return err;
837 }
838
839 static int check_call(struct verifier_env *env, int func_id)
840 {
841         struct verifier_state *state = &env->cur_state;
842         const struct bpf_func_proto *fn = NULL;
843         struct reg_state *regs = state->regs;
844         struct bpf_map *map = NULL;
845         struct reg_state *reg;
846         int i, err;
847
848         /* find function prototype */
849         if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
850                 verbose("invalid func %d\n", func_id);
851                 return -EINVAL;
852         }
853
854         if (env->prog->aux->ops->get_func_proto)
855                 fn = env->prog->aux->ops->get_func_proto(func_id);
856
857         if (!fn) {
858                 verbose("unknown func %d\n", func_id);
859                 return -EINVAL;
860         }
861
862         /* eBPF programs must be GPL compatible to use GPL-ed functions */
863         if (!env->prog->aux->is_gpl_compatible && fn->gpl_only) {
864                 verbose("cannot call GPL only function from proprietary program\n");
865                 return -EINVAL;
866         }
867
868         /* check args */
869         err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &map);
870         if (err)
871                 return err;
872         err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &map);
873         if (err)
874                 return err;
875         err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &map);
876         if (err)
877                 return err;
878         err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &map);
879         if (err)
880                 return err;
881         err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &map);
882         if (err)
883                 return err;
884
885         /* reset caller saved regs */
886         for (i = 0; i < CALLER_SAVED_REGS; i++) {
887                 reg = regs + caller_saved[i];
888                 reg->type = NOT_INIT;
889                 reg->imm = 0;
890         }
891
892         /* update return register */
893         if (fn->ret_type == RET_INTEGER) {
894                 regs[BPF_REG_0].type = UNKNOWN_VALUE;
895         } else if (fn->ret_type == RET_VOID) {
896                 regs[BPF_REG_0].type = NOT_INIT;
897         } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) {
898                 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
899                 /* remember map_ptr, so that check_map_access()
900                  * can check 'value_size' boundary of memory access
901                  * to map element returned from bpf_map_lookup_elem()
902                  */
903                 if (map == NULL) {
904                         verbose("kernel subsystem misconfigured verifier\n");
905                         return -EINVAL;
906                 }
907                 regs[BPF_REG_0].map_ptr = map;
908         } else {
909                 verbose("unknown return type %d of func %d\n",
910                         fn->ret_type, func_id);
911                 return -EINVAL;
912         }
913         return 0;
914 }
915
916 /* check validity of 32-bit and 64-bit arithmetic operations */
917 static int check_alu_op(struct reg_state *regs, struct bpf_insn *insn)
918 {
919         u8 opcode = BPF_OP(insn->code);
920         int err;
921
922         if (opcode == BPF_END || opcode == BPF_NEG) {
923                 if (opcode == BPF_NEG) {
924                         if (BPF_SRC(insn->code) != 0 ||
925                             insn->src_reg != BPF_REG_0 ||
926                             insn->off != 0 || insn->imm != 0) {
927                                 verbose("BPF_NEG uses reserved fields\n");
928                                 return -EINVAL;
929                         }
930                 } else {
931                         if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
932                             (insn->imm != 16 && insn->imm != 32 && insn->imm != 64)) {
933                                 verbose("BPF_END uses reserved fields\n");
934                                 return -EINVAL;
935                         }
936                 }
937
938                 /* check src operand */
939                 err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
940                 if (err)
941                         return err;
942
943                 /* check dest operand */
944                 err = check_reg_arg(regs, insn->dst_reg, DST_OP);
945                 if (err)
946                         return err;
947
948         } else if (opcode == BPF_MOV) {
949
950                 if (BPF_SRC(insn->code) == BPF_X) {
951                         if (insn->imm != 0 || insn->off != 0) {
952                                 verbose("BPF_MOV uses reserved fields\n");
953                                 return -EINVAL;
954                         }
955
956                         /* check src operand */
957                         err = check_reg_arg(regs, insn->src_reg, SRC_OP);
958                         if (err)
959                                 return err;
960                 } else {
961                         if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
962                                 verbose("BPF_MOV uses reserved fields\n");
963                                 return -EINVAL;
964                         }
965                 }
966
967                 /* check dest operand */
968                 err = check_reg_arg(regs, insn->dst_reg, DST_OP);
969                 if (err)
970                         return err;
971
972                 if (BPF_SRC(insn->code) == BPF_X) {
973                         if (BPF_CLASS(insn->code) == BPF_ALU64) {
974                                 /* case: R1 = R2
975                                  * copy register state to dest reg
976                                  */
977                                 regs[insn->dst_reg] = regs[insn->src_reg];
978                         } else {
979                                 regs[insn->dst_reg].type = UNKNOWN_VALUE;
980                                 regs[insn->dst_reg].map_ptr = NULL;
981                         }
982                 } else {
983                         /* case: R = imm
984                          * remember the value we stored into this reg
985                          */
986                         regs[insn->dst_reg].type = CONST_IMM;
987                         regs[insn->dst_reg].imm = insn->imm;
988                 }
989
990         } else if (opcode > BPF_END) {
991                 verbose("invalid BPF_ALU opcode %x\n", opcode);
992                 return -EINVAL;
993
994         } else {        /* all other ALU ops: and, sub, xor, add, ... */
995
996                 bool stack_relative = false;
997
998                 if (BPF_SRC(insn->code) == BPF_X) {
999                         if (insn->imm != 0 || insn->off != 0) {
1000                                 verbose("BPF_ALU uses reserved fields\n");
1001                                 return -EINVAL;
1002                         }
1003                         /* check src1 operand */
1004                         err = check_reg_arg(regs, insn->src_reg, SRC_OP);
1005                         if (err)
1006                                 return err;
1007                 } else {
1008                         if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
1009                                 verbose("BPF_ALU uses reserved fields\n");
1010                                 return -EINVAL;
1011                         }
1012                 }
1013
1014                 /* check src2 operand */
1015                 err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
1016                 if (err)
1017                         return err;
1018
1019                 if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
1020                     BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
1021                         verbose("div by zero\n");
1022                         return -EINVAL;
1023                 }
1024
1025                 /* pattern match 'bpf_add Rx, imm' instruction */
1026                 if (opcode == BPF_ADD && BPF_CLASS(insn->code) == BPF_ALU64 &&
1027                     regs[insn->dst_reg].type == FRAME_PTR &&
1028                     BPF_SRC(insn->code) == BPF_K)
1029                         stack_relative = true;
1030
1031                 /* check dest operand */
1032                 err = check_reg_arg(regs, insn->dst_reg, DST_OP);
1033                 if (err)
1034                         return err;
1035
1036                 if (stack_relative) {
1037                         regs[insn->dst_reg].type = PTR_TO_STACK;
1038                         regs[insn->dst_reg].imm = insn->imm;
1039                 }
1040         }
1041
1042         return 0;
1043 }
1044
1045 static int check_cond_jmp_op(struct verifier_env *env,
1046                              struct bpf_insn *insn, int *insn_idx)
1047 {
1048         struct reg_state *regs = env->cur_state.regs;
1049         struct verifier_state *other_branch;
1050         u8 opcode = BPF_OP(insn->code);
1051         int err;
1052
1053         if (opcode > BPF_EXIT) {
1054                 verbose("invalid BPF_JMP opcode %x\n", opcode);
1055                 return -EINVAL;
1056         }
1057
1058         if (BPF_SRC(insn->code) == BPF_X) {
1059                 if (insn->imm != 0) {
1060                         verbose("BPF_JMP uses reserved fields\n");
1061                         return -EINVAL;
1062                 }
1063
1064                 /* check src1 operand */
1065                 err = check_reg_arg(regs, insn->src_reg, SRC_OP);
1066                 if (err)
1067                         return err;
1068         } else {
1069                 if (insn->src_reg != BPF_REG_0) {
1070                         verbose("BPF_JMP uses reserved fields\n");
1071                         return -EINVAL;
1072                 }
1073         }
1074
1075         /* check src2 operand */
1076         err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
1077         if (err)
1078                 return err;
1079
1080         /* detect if R == 0 where R was initialized to zero earlier */
1081         if (BPF_SRC(insn->code) == BPF_K &&
1082             (opcode == BPF_JEQ || opcode == BPF_JNE) &&
1083             regs[insn->dst_reg].type == CONST_IMM &&
1084             regs[insn->dst_reg].imm == insn->imm) {
1085                 if (opcode == BPF_JEQ) {
1086                         /* if (imm == imm) goto pc+off;
1087                          * only follow the goto, ignore fall-through
1088                          */
1089                         *insn_idx += insn->off;
1090                         return 0;
1091                 } else {
1092                         /* if (imm != imm) goto pc+off;
1093                          * only follow fall-through branch, since
1094                          * that's where the program will go
1095                          */
1096                         return 0;
1097                 }
1098         }
1099
1100         other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx);
1101         if (!other_branch)
1102                 return -EFAULT;
1103
1104         /* detect if R == 0 where R is returned value from bpf_map_lookup_elem() */
1105         if (BPF_SRC(insn->code) == BPF_K &&
1106             insn->imm == 0 && (opcode == BPF_JEQ ||
1107                                opcode == BPF_JNE) &&
1108             regs[insn->dst_reg].type == PTR_TO_MAP_VALUE_OR_NULL) {
1109                 if (opcode == BPF_JEQ) {
1110                         /* next fallthrough insn can access memory via
1111                          * this register
1112                          */
1113                         regs[insn->dst_reg].type = PTR_TO_MAP_VALUE;
1114                         /* branch targer cannot access it, since reg == 0 */
1115                         other_branch->regs[insn->dst_reg].type = CONST_IMM;
1116                         other_branch->regs[insn->dst_reg].imm = 0;
1117                 } else {
1118                         other_branch->regs[insn->dst_reg].type = PTR_TO_MAP_VALUE;
1119                         regs[insn->dst_reg].type = CONST_IMM;
1120                         regs[insn->dst_reg].imm = 0;
1121                 }
1122         } else if (BPF_SRC(insn->code) == BPF_K &&
1123                    (opcode == BPF_JEQ || opcode == BPF_JNE)) {
1124
1125                 if (opcode == BPF_JEQ) {
1126                         /* detect if (R == imm) goto
1127                          * and in the target state recognize that R = imm
1128                          */
1129                         other_branch->regs[insn->dst_reg].type = CONST_IMM;
1130                         other_branch->regs[insn->dst_reg].imm = insn->imm;
1131                 } else {
1132                         /* detect if (R != imm) goto
1133                          * and in the fall-through state recognize that R = imm
1134                          */
1135                         regs[insn->dst_reg].type = CONST_IMM;
1136                         regs[insn->dst_reg].imm = insn->imm;
1137                 }
1138         }
1139         if (log_level)
1140                 print_verifier_state(env);
1141         return 0;
1142 }
1143
1144 /* return the map pointer stored inside BPF_LD_IMM64 instruction */
1145 static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn)
1146 {
1147         u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32;
1148
1149         return (struct bpf_map *) (unsigned long) imm64;
1150 }
1151
1152 /* verify BPF_LD_IMM64 instruction */
1153 static int check_ld_imm(struct verifier_env *env, struct bpf_insn *insn)
1154 {
1155         struct reg_state *regs = env->cur_state.regs;
1156         int err;
1157
1158         if (BPF_SIZE(insn->code) != BPF_DW) {
1159                 verbose("invalid BPF_LD_IMM insn\n");
1160                 return -EINVAL;
1161         }
1162         if (insn->off != 0) {
1163                 verbose("BPF_LD_IMM64 uses reserved fields\n");
1164                 return -EINVAL;
1165         }
1166
1167         err = check_reg_arg(regs, insn->dst_reg, DST_OP);
1168         if (err)
1169                 return err;
1170
1171         if (insn->src_reg == 0)
1172                 /* generic move 64-bit immediate into a register */
1173                 return 0;
1174
1175         /* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */
1176         BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD);
1177
1178         regs[insn->dst_reg].type = CONST_PTR_TO_MAP;
1179         regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn);
1180         return 0;
1181 }
1182
1183 /* non-recursive DFS pseudo code
1184  * 1  procedure DFS-iterative(G,v):
1185  * 2      label v as discovered
1186  * 3      let S be a stack
1187  * 4      S.push(v)
1188  * 5      while S is not empty
1189  * 6            t <- S.pop()
1190  * 7            if t is what we're looking for:
1191  * 8                return t
1192  * 9            for all edges e in G.adjacentEdges(t) do
1193  * 10               if edge e is already labelled
1194  * 11                   continue with the next edge
1195  * 12               w <- G.adjacentVertex(t,e)
1196  * 13               if vertex w is not discovered and not explored
1197  * 14                   label e as tree-edge
1198  * 15                   label w as discovered
1199  * 16                   S.push(w)
1200  * 17                   continue at 5
1201  * 18               else if vertex w is discovered
1202  * 19                   label e as back-edge
1203  * 20               else
1204  * 21                   // vertex w is explored
1205  * 22                   label e as forward- or cross-edge
1206  * 23           label t as explored
1207  * 24           S.pop()
1208  *
1209  * convention:
1210  * 0x10 - discovered
1211  * 0x11 - discovered and fall-through edge labelled
1212  * 0x12 - discovered and fall-through and branch edges labelled
1213  * 0x20 - explored
1214  */
1215
1216 enum {
1217         DISCOVERED = 0x10,
1218         EXPLORED = 0x20,
1219         FALLTHROUGH = 1,
1220         BRANCH = 2,
1221 };
1222
1223 #define STATE_LIST_MARK ((struct verifier_state_list *) -1L)
1224
1225 static int *insn_stack; /* stack of insns to process */
1226 static int cur_stack;   /* current stack index */
1227 static int *insn_state;
1228
1229 /* t, w, e - match pseudo-code above:
1230  * t - index of current instruction
1231  * w - next instruction
1232  * e - edge
1233  */
1234 static int push_insn(int t, int w, int e, struct verifier_env *env)
1235 {
1236         if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
1237                 return 0;
1238
1239         if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
1240                 return 0;
1241
1242         if (w < 0 || w >= env->prog->len) {
1243                 verbose("jump out of range from insn %d to %d\n", t, w);
1244                 return -EINVAL;
1245         }
1246
1247         if (e == BRANCH)
1248                 /* mark branch target for state pruning */
1249                 env->explored_states[w] = STATE_LIST_MARK;
1250
1251         if (insn_state[w] == 0) {
1252                 /* tree-edge */
1253                 insn_state[t] = DISCOVERED | e;
1254                 insn_state[w] = DISCOVERED;
1255                 if (cur_stack >= env->prog->len)
1256                         return -E2BIG;
1257                 insn_stack[cur_stack++] = w;
1258                 return 1;
1259         } else if ((insn_state[w] & 0xF0) == DISCOVERED) {
1260                 verbose("back-edge from insn %d to %d\n", t, w);
1261                 return -EINVAL;
1262         } else if (insn_state[w] == EXPLORED) {
1263                 /* forward- or cross-edge */
1264                 insn_state[t] = DISCOVERED | e;
1265         } else {
1266                 verbose("insn state internal bug\n");
1267                 return -EFAULT;
1268         }
1269         return 0;
1270 }
1271
1272 /* non-recursive depth-first-search to detect loops in BPF program
1273  * loop == back-edge in directed graph
1274  */
1275 static int check_cfg(struct verifier_env *env)
1276 {
1277         struct bpf_insn *insns = env->prog->insnsi;
1278         int insn_cnt = env->prog->len;
1279         int ret = 0;
1280         int i, t;
1281
1282         insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
1283         if (!insn_state)
1284                 return -ENOMEM;
1285
1286         insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
1287         if (!insn_stack) {
1288                 kfree(insn_state);
1289                 return -ENOMEM;
1290         }
1291
1292         insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
1293         insn_stack[0] = 0; /* 0 is the first instruction */
1294         cur_stack = 1;
1295
1296 peek_stack:
1297         if (cur_stack == 0)
1298                 goto check_state;
1299         t = insn_stack[cur_stack - 1];
1300
1301         if (BPF_CLASS(insns[t].code) == BPF_JMP) {
1302                 u8 opcode = BPF_OP(insns[t].code);
1303
1304                 if (opcode == BPF_EXIT) {
1305                         goto mark_explored;
1306                 } else if (opcode == BPF_CALL) {
1307                         ret = push_insn(t, t + 1, FALLTHROUGH, env);
1308                         if (ret == 1)
1309                                 goto peek_stack;
1310                         else if (ret < 0)
1311                                 goto err_free;
1312                 } else if (opcode == BPF_JA) {
1313                         if (BPF_SRC(insns[t].code) != BPF_K) {
1314                                 ret = -EINVAL;
1315                                 goto err_free;
1316                         }
1317                         /* unconditional jump with single edge */
1318                         ret = push_insn(t, t + insns[t].off + 1,
1319                                         FALLTHROUGH, env);
1320                         if (ret == 1)
1321                                 goto peek_stack;
1322                         else if (ret < 0)
1323                                 goto err_free;
1324                         /* tell verifier to check for equivalent states
1325                          * after every call and jump
1326                          */
1327                         env->explored_states[t + 1] = STATE_LIST_MARK;
1328                 } else {
1329                         /* conditional jump with two edges */
1330                         ret = push_insn(t, t + 1, FALLTHROUGH, env);
1331                         if (ret == 1)
1332                                 goto peek_stack;
1333                         else if (ret < 0)
1334                                 goto err_free;
1335
1336                         ret = push_insn(t, t + insns[t].off + 1, BRANCH, env);
1337                         if (ret == 1)
1338                                 goto peek_stack;
1339                         else if (ret < 0)
1340                                 goto err_free;
1341                 }
1342         } else {
1343                 /* all other non-branch instructions with single
1344                  * fall-through edge
1345                  */
1346                 ret = push_insn(t, t + 1, FALLTHROUGH, env);
1347                 if (ret == 1)
1348                         goto peek_stack;
1349                 else if (ret < 0)
1350                         goto err_free;
1351         }
1352
1353 mark_explored:
1354         insn_state[t] = EXPLORED;
1355         if (cur_stack-- <= 0) {
1356                 verbose("pop stack internal bug\n");
1357                 ret = -EFAULT;
1358                 goto err_free;
1359         }
1360         goto peek_stack;
1361
1362 check_state:
1363         for (i = 0; i < insn_cnt; i++) {
1364                 if (insn_state[i] != EXPLORED) {
1365                         verbose("unreachable insn %d\n", i);
1366                         ret = -EINVAL;
1367                         goto err_free;
1368                 }
1369         }
1370         ret = 0; /* cfg looks good */
1371
1372 err_free:
1373         kfree(insn_state);
1374         kfree(insn_stack);
1375         return ret;
1376 }
1377
1378 /* compare two verifier states
1379  *
1380  * all states stored in state_list are known to be valid, since
1381  * verifier reached 'bpf_exit' instruction through them
1382  *
1383  * this function is called when verifier exploring different branches of
1384  * execution popped from the state stack. If it sees an old state that has
1385  * more strict register state and more strict stack state then this execution
1386  * branch doesn't need to be explored further, since verifier already
1387  * concluded that more strict state leads to valid finish.
1388  *
1389  * Therefore two states are equivalent if register state is more conservative
1390  * and explored stack state is more conservative than the current one.
1391  * Example:
1392  *       explored                   current
1393  * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
1394  * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
1395  *
1396  * In other words if current stack state (one being explored) has more
1397  * valid slots than old one that already passed validation, it means
1398  * the verifier can stop exploring and conclude that current state is valid too
1399  *
1400  * Similarly with registers. If explored state has register type as invalid
1401  * whereas register type in current state is meaningful, it means that
1402  * the current state will reach 'bpf_exit' instruction safely
1403  */
1404 static bool states_equal(struct verifier_state *old, struct verifier_state *cur)
1405 {
1406         int i;
1407
1408         for (i = 0; i < MAX_BPF_REG; i++) {
1409                 if (memcmp(&old->regs[i], &cur->regs[i],
1410                            sizeof(old->regs[0])) != 0) {
1411                         if (old->regs[i].type == NOT_INIT ||
1412                             (old->regs[i].type == UNKNOWN_VALUE &&
1413                              cur->regs[i].type != NOT_INIT))
1414                                 continue;
1415                         return false;
1416                 }
1417         }
1418
1419         for (i = 0; i < MAX_BPF_STACK; i++) {
1420                 if (memcmp(&old->stack[i], &cur->stack[i],
1421                            sizeof(old->stack[0])) != 0) {
1422                         if (old->stack[i].stype == STACK_INVALID)
1423                                 continue;
1424                         return false;
1425                 }
1426         }
1427         return true;
1428 }
1429
1430 static int is_state_visited(struct verifier_env *env, int insn_idx)
1431 {
1432         struct verifier_state_list *new_sl;
1433         struct verifier_state_list *sl;
1434
1435         sl = env->explored_states[insn_idx];
1436         if (!sl)
1437                 /* this 'insn_idx' instruction wasn't marked, so we will not
1438                  * be doing state search here
1439                  */
1440                 return 0;
1441
1442         while (sl != STATE_LIST_MARK) {
1443                 if (states_equal(&sl->state, &env->cur_state))
1444                         /* reached equivalent register/stack state,
1445                          * prune the search
1446                          */
1447                         return 1;
1448                 sl = sl->next;
1449         }
1450
1451         /* there were no equivalent states, remember current one.
1452          * technically the current state is not proven to be safe yet,
1453          * but it will either reach bpf_exit (which means it's safe) or
1454          * it will be rejected. Since there are no loops, we won't be
1455          * seeing this 'insn_idx' instruction again on the way to bpf_exit
1456          */
1457         new_sl = kmalloc(sizeof(struct verifier_state_list), GFP_USER);
1458         if (!new_sl)
1459                 return -ENOMEM;
1460
1461         /* add new state to the head of linked list */
1462         memcpy(&new_sl->state, &env->cur_state, sizeof(env->cur_state));
1463         new_sl->next = env->explored_states[insn_idx];
1464         env->explored_states[insn_idx] = new_sl;
1465         return 0;
1466 }
1467
1468 static int do_check(struct verifier_env *env)
1469 {
1470         struct verifier_state *state = &env->cur_state;
1471         struct bpf_insn *insns = env->prog->insnsi;
1472         struct reg_state *regs = state->regs;
1473         int insn_cnt = env->prog->len;
1474         int insn_idx, prev_insn_idx = 0;
1475         int insn_processed = 0;
1476         bool do_print_state = false;
1477
1478         init_reg_state(regs);
1479         insn_idx = 0;
1480         for (;;) {
1481                 struct bpf_insn *insn;
1482                 u8 class;
1483                 int err;
1484
1485                 if (insn_idx >= insn_cnt) {
1486                         verbose("invalid insn idx %d insn_cnt %d\n",
1487                                 insn_idx, insn_cnt);
1488                         return -EFAULT;
1489                 }
1490
1491                 insn = &insns[insn_idx];
1492                 class = BPF_CLASS(insn->code);
1493
1494                 if (++insn_processed > 32768) {
1495                         verbose("BPF program is too large. Proccessed %d insn\n",
1496                                 insn_processed);
1497                         return -E2BIG;
1498                 }
1499
1500                 err = is_state_visited(env, insn_idx);
1501                 if (err < 0)
1502                         return err;
1503                 if (err == 1) {
1504                         /* found equivalent state, can prune the search */
1505                         if (log_level) {
1506                                 if (do_print_state)
1507                                         verbose("\nfrom %d to %d: safe\n",
1508                                                 prev_insn_idx, insn_idx);
1509                                 else
1510                                         verbose("%d: safe\n", insn_idx);
1511                         }
1512                         goto process_bpf_exit;
1513                 }
1514
1515                 if (log_level && do_print_state) {
1516                         verbose("\nfrom %d to %d:", prev_insn_idx, insn_idx);
1517                         print_verifier_state(env);
1518                         do_print_state = false;
1519                 }
1520
1521                 if (log_level) {
1522                         verbose("%d: ", insn_idx);
1523                         print_bpf_insn(insn);
1524                 }
1525
1526                 if (class == BPF_ALU || class == BPF_ALU64) {
1527                         err = check_alu_op(regs, insn);
1528                         if (err)
1529                                 return err;
1530
1531                 } else if (class == BPF_LDX) {
1532                         if (BPF_MODE(insn->code) != BPF_MEM ||
1533                             insn->imm != 0) {
1534                                 verbose("BPF_LDX uses reserved fields\n");
1535                                 return -EINVAL;
1536                         }
1537                         /* check src operand */
1538                         err = check_reg_arg(regs, insn->src_reg, SRC_OP);
1539                         if (err)
1540                                 return err;
1541
1542                         err = check_reg_arg(regs, insn->dst_reg, DST_OP_NO_MARK);
1543                         if (err)
1544                                 return err;
1545
1546                         /* check that memory (src_reg + off) is readable,
1547                          * the state of dst_reg will be updated by this func
1548                          */
1549                         err = check_mem_access(env, insn->src_reg, insn->off,
1550                                                BPF_SIZE(insn->code), BPF_READ,
1551                                                insn->dst_reg);
1552                         if (err)
1553                                 return err;
1554
1555                 } else if (class == BPF_STX) {
1556                         if (BPF_MODE(insn->code) == BPF_XADD) {
1557                                 err = check_xadd(env, insn);
1558                                 if (err)
1559                                         return err;
1560                                 insn_idx++;
1561                                 continue;
1562                         }
1563
1564                         if (BPF_MODE(insn->code) != BPF_MEM ||
1565                             insn->imm != 0) {
1566                                 verbose("BPF_STX uses reserved fields\n");
1567                                 return -EINVAL;
1568                         }
1569                         /* check src1 operand */
1570                         err = check_reg_arg(regs, insn->src_reg, SRC_OP);
1571                         if (err)
1572                                 return err;
1573                         /* check src2 operand */
1574                         err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
1575                         if (err)
1576                                 return err;
1577
1578                         /* check that memory (dst_reg + off) is writeable */
1579                         err = check_mem_access(env, insn->dst_reg, insn->off,
1580                                                BPF_SIZE(insn->code), BPF_WRITE,
1581                                                insn->src_reg);
1582                         if (err)
1583                                 return err;
1584
1585                 } else if (class == BPF_ST) {
1586                         if (BPF_MODE(insn->code) != BPF_MEM ||
1587                             insn->src_reg != BPF_REG_0) {
1588                                 verbose("BPF_ST uses reserved fields\n");
1589                                 return -EINVAL;
1590                         }
1591                         /* check src operand */
1592                         err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
1593                         if (err)
1594                                 return err;
1595
1596                         /* check that memory (dst_reg + off) is writeable */
1597                         err = check_mem_access(env, insn->dst_reg, insn->off,
1598                                                BPF_SIZE(insn->code), BPF_WRITE,
1599                                                -1);
1600                         if (err)
1601                                 return err;
1602
1603                 } else if (class == BPF_JMP) {
1604                         u8 opcode = BPF_OP(insn->code);
1605
1606                         if (opcode == BPF_CALL) {
1607                                 if (BPF_SRC(insn->code) != BPF_K ||
1608                                     insn->off != 0 ||
1609                                     insn->src_reg != BPF_REG_0 ||
1610                                     insn->dst_reg != BPF_REG_0) {
1611                                         verbose("BPF_CALL uses reserved fields\n");
1612                                         return -EINVAL;
1613                                 }
1614
1615                                 err = check_call(env, insn->imm);
1616                                 if (err)
1617                                         return err;
1618
1619                         } else if (opcode == BPF_JA) {
1620                                 if (BPF_SRC(insn->code) != BPF_K ||
1621                                     insn->imm != 0 ||
1622                                     insn->src_reg != BPF_REG_0 ||
1623                                     insn->dst_reg != BPF_REG_0) {
1624                                         verbose("BPF_JA uses reserved fields\n");
1625                                         return -EINVAL;
1626                                 }
1627
1628                                 insn_idx += insn->off + 1;
1629                                 continue;
1630
1631                         } else if (opcode == BPF_EXIT) {
1632                                 if (BPF_SRC(insn->code) != BPF_K ||
1633                                     insn->imm != 0 ||
1634                                     insn->src_reg != BPF_REG_0 ||
1635                                     insn->dst_reg != BPF_REG_0) {
1636                                         verbose("BPF_EXIT uses reserved fields\n");
1637                                         return -EINVAL;
1638                                 }
1639
1640                                 /* eBPF calling convetion is such that R0 is used
1641                                  * to return the value from eBPF program.
1642                                  * Make sure that it's readable at this time
1643                                  * of bpf_exit, which means that program wrote
1644                                  * something into it earlier
1645                                  */
1646                                 err = check_reg_arg(regs, BPF_REG_0, SRC_OP);
1647                                 if (err)
1648                                         return err;
1649
1650 process_bpf_exit:
1651                                 insn_idx = pop_stack(env, &prev_insn_idx);
1652                                 if (insn_idx < 0) {
1653                                         break;
1654                                 } else {
1655                                         do_print_state = true;
1656                                         continue;
1657                                 }
1658                         } else {
1659                                 err = check_cond_jmp_op(env, insn, &insn_idx);
1660                                 if (err)
1661                                         return err;
1662                         }
1663                 } else if (class == BPF_LD) {
1664                         u8 mode = BPF_MODE(insn->code);
1665
1666                         if (mode == BPF_ABS || mode == BPF_IND) {
1667                                 verbose("LD_ABS is not supported yet\n");
1668                                 return -EINVAL;
1669                         } else if (mode == BPF_IMM) {
1670                                 err = check_ld_imm(env, insn);
1671                                 if (err)
1672                                         return err;
1673
1674                                 insn_idx++;
1675                         } else {
1676                                 verbose("invalid BPF_LD mode\n");
1677                                 return -EINVAL;
1678                         }
1679                 } else {
1680                         verbose("unknown insn class %d\n", class);
1681                         return -EINVAL;
1682                 }
1683
1684                 insn_idx++;
1685         }
1686
1687         return 0;
1688 }
1689
1690 /* look for pseudo eBPF instructions that access map FDs and
1691  * replace them with actual map pointers
1692  */
1693 static int replace_map_fd_with_map_ptr(struct verifier_env *env)
1694 {
1695         struct bpf_insn *insn = env->prog->insnsi;
1696         int insn_cnt = env->prog->len;
1697         int i, j;
1698
1699         for (i = 0; i < insn_cnt; i++, insn++) {
1700                 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
1701                         struct bpf_map *map;
1702                         struct fd f;
1703
1704                         if (i == insn_cnt - 1 || insn[1].code != 0 ||
1705                             insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
1706                             insn[1].off != 0) {
1707                                 verbose("invalid bpf_ld_imm64 insn\n");
1708                                 return -EINVAL;
1709                         }
1710
1711                         if (insn->src_reg == 0)
1712                                 /* valid generic load 64-bit imm */
1713                                 goto next_insn;
1714
1715                         if (insn->src_reg != BPF_PSEUDO_MAP_FD) {
1716                                 verbose("unrecognized bpf_ld_imm64 insn\n");
1717                                 return -EINVAL;
1718                         }
1719
1720                         f = fdget(insn->imm);
1721
1722                         map = bpf_map_get(f);
1723                         if (IS_ERR(map)) {
1724                                 verbose("fd %d is not pointing to valid bpf_map\n",
1725                                         insn->imm);
1726                                 fdput(f);
1727                                 return PTR_ERR(map);
1728                         }
1729
1730                         /* store map pointer inside BPF_LD_IMM64 instruction */
1731                         insn[0].imm = (u32) (unsigned long) map;
1732                         insn[1].imm = ((u64) (unsigned long) map) >> 32;
1733
1734                         /* check whether we recorded this map already */
1735                         for (j = 0; j < env->used_map_cnt; j++)
1736                                 if (env->used_maps[j] == map) {
1737                                         fdput(f);
1738                                         goto next_insn;
1739                                 }
1740
1741                         if (env->used_map_cnt >= MAX_USED_MAPS) {
1742                                 fdput(f);
1743                                 return -E2BIG;
1744                         }
1745
1746                         /* remember this map */
1747                         env->used_maps[env->used_map_cnt++] = map;
1748
1749                         /* hold the map. If the program is rejected by verifier,
1750                          * the map will be released by release_maps() or it
1751                          * will be used by the valid program until it's unloaded
1752                          * and all maps are released in free_bpf_prog_info()
1753                          */
1754                         atomic_inc(&map->refcnt);
1755
1756                         fdput(f);
1757 next_insn:
1758                         insn++;
1759                         i++;
1760                 }
1761         }
1762
1763         /* now all pseudo BPF_LD_IMM64 instructions load valid
1764          * 'struct bpf_map *' into a register instead of user map_fd.
1765          * These pointers will be used later by verifier to validate map access.
1766          */
1767         return 0;
1768 }
1769
1770 /* drop refcnt of maps used by the rejected program */
1771 static void release_maps(struct verifier_env *env)
1772 {
1773         int i;
1774
1775         for (i = 0; i < env->used_map_cnt; i++)
1776                 bpf_map_put(env->used_maps[i]);
1777 }
1778
1779 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
1780 static void convert_pseudo_ld_imm64(struct verifier_env *env)
1781 {
1782         struct bpf_insn *insn = env->prog->insnsi;
1783         int insn_cnt = env->prog->len;
1784         int i;
1785
1786         for (i = 0; i < insn_cnt; i++, insn++)
1787                 if (insn->code == (BPF_LD | BPF_IMM | BPF_DW))
1788                         insn->src_reg = 0;
1789 }
1790
1791 static void free_states(struct verifier_env *env)
1792 {
1793         struct verifier_state_list *sl, *sln;
1794         int i;
1795
1796         if (!env->explored_states)
1797                 return;
1798
1799         for (i = 0; i < env->prog->len; i++) {
1800                 sl = env->explored_states[i];
1801
1802                 if (sl)
1803                         while (sl != STATE_LIST_MARK) {
1804                                 sln = sl->next;
1805                                 kfree(sl);
1806                                 sl = sln;
1807                         }
1808         }
1809
1810         kfree(env->explored_states);
1811 }
1812
1813 int bpf_check(struct bpf_prog *prog, union bpf_attr *attr)
1814 {
1815         char __user *log_ubuf = NULL;
1816         struct verifier_env *env;
1817         int ret = -EINVAL;
1818
1819         if (prog->len <= 0 || prog->len > BPF_MAXINSNS)
1820                 return -E2BIG;
1821
1822         /* 'struct verifier_env' can be global, but since it's not small,
1823          * allocate/free it every time bpf_check() is called
1824          */
1825         env = kzalloc(sizeof(struct verifier_env), GFP_KERNEL);
1826         if (!env)
1827                 return -ENOMEM;
1828
1829         env->prog = prog;
1830
1831         /* grab the mutex to protect few globals used by verifier */
1832         mutex_lock(&bpf_verifier_lock);
1833
1834         if (attr->log_level || attr->log_buf || attr->log_size) {
1835                 /* user requested verbose verifier output
1836                  * and supplied buffer to store the verification trace
1837                  */
1838                 log_level = attr->log_level;
1839                 log_ubuf = (char __user *) (unsigned long) attr->log_buf;
1840                 log_size = attr->log_size;
1841                 log_len = 0;
1842
1843                 ret = -EINVAL;
1844                 /* log_* values have to be sane */
1845                 if (log_size < 128 || log_size > UINT_MAX >> 8 ||
1846                     log_level == 0 || log_ubuf == NULL)
1847                         goto free_env;
1848
1849                 ret = -ENOMEM;
1850                 log_buf = vmalloc(log_size);
1851                 if (!log_buf)
1852                         goto free_env;
1853         } else {
1854                 log_level = 0;
1855         }
1856
1857         ret = replace_map_fd_with_map_ptr(env);
1858         if (ret < 0)
1859                 goto skip_full_check;
1860
1861         env->explored_states = kcalloc(prog->len,
1862                                        sizeof(struct verifier_state_list *),
1863                                        GFP_USER);
1864         ret = -ENOMEM;
1865         if (!env->explored_states)
1866                 goto skip_full_check;
1867
1868         ret = check_cfg(env);
1869         if (ret < 0)
1870                 goto skip_full_check;
1871
1872         ret = do_check(env);
1873
1874 skip_full_check:
1875         while (pop_stack(env, NULL) >= 0);
1876         free_states(env);
1877
1878         if (log_level && log_len >= log_size - 1) {
1879                 BUG_ON(log_len >= log_size);
1880                 /* verifier log exceeded user supplied buffer */
1881                 ret = -ENOSPC;
1882                 /* fall through to return what was recorded */
1883         }
1884
1885         /* copy verifier log back to user space including trailing zero */
1886         if (log_level && copy_to_user(log_ubuf, log_buf, log_len + 1) != 0) {
1887                 ret = -EFAULT;
1888                 goto free_log_buf;
1889         }
1890
1891         if (ret == 0 && env->used_map_cnt) {
1892                 /* if program passed verifier, update used_maps in bpf_prog_info */
1893                 prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
1894                                                      sizeof(env->used_maps[0]),
1895                                                      GFP_KERNEL);
1896
1897                 if (!prog->aux->used_maps) {
1898                         ret = -ENOMEM;
1899                         goto free_log_buf;
1900                 }
1901
1902                 memcpy(prog->aux->used_maps, env->used_maps,
1903                        sizeof(env->used_maps[0]) * env->used_map_cnt);
1904                 prog->aux->used_map_cnt = env->used_map_cnt;
1905
1906                 /* program is valid. Convert pseudo bpf_ld_imm64 into generic
1907                  * bpf_ld_imm64 instructions
1908                  */
1909                 convert_pseudo_ld_imm64(env);
1910         }
1911
1912 free_log_buf:
1913         if (log_level)
1914                 vfree(log_buf);
1915 free_env:
1916         if (!prog->aux->used_maps)
1917                 /* if we didn't copy map pointers into bpf_prog_info, release
1918                  * them now. Otherwise free_bpf_prog_info() will release them.
1919                  */
1920                 release_maps(env);
1921         kfree(env);
1922         mutex_unlock(&bpf_verifier_lock);
1923         return ret;
1924 }