Merge tag 'mips_fixes_4.15_2' of git://git.kernel.org/pub/scm/linux/kernel/git/jhogan...
[sfrench/cifs-2.6.git] / kernel / bpf / core.c
1 /*
2  * Linux Socket Filter - Kernel level socket filtering
3  *
4  * Based on the design of the Berkeley Packet Filter. The new
5  * internal format has been designed by PLUMgrid:
6  *
7  *      Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8  *
9  * Authors:
10  *
11  *      Jay Schulist <jschlst@samba.org>
12  *      Alexei Starovoitov <ast@plumgrid.com>
13  *      Daniel Borkmann <dborkman@redhat.com>
14  *
15  * This program is free software; you can redistribute it and/or
16  * modify it under the terms of the GNU General Public License
17  * as published by the Free Software Foundation; either version
18  * 2 of the License, or (at your option) any later version.
19  *
20  * Andi Kleen - Fix a few bad bugs and races.
21  * Kris Katterjohn - Added many additional checks in bpf_check_classic()
22  */
23
24 #include <linux/filter.h>
25 #include <linux/skbuff.h>
26 #include <linux/vmalloc.h>
27 #include <linux/random.h>
28 #include <linux/moduleloader.h>
29 #include <linux/bpf.h>
30 #include <linux/frame.h>
31 #include <linux/rbtree_latch.h>
32 #include <linux/kallsyms.h>
33 #include <linux/rcupdate.h>
34
35 #include <asm/unaligned.h>
36
37 /* Registers */
38 #define BPF_R0  regs[BPF_REG_0]
39 #define BPF_R1  regs[BPF_REG_1]
40 #define BPF_R2  regs[BPF_REG_2]
41 #define BPF_R3  regs[BPF_REG_3]
42 #define BPF_R4  regs[BPF_REG_4]
43 #define BPF_R5  regs[BPF_REG_5]
44 #define BPF_R6  regs[BPF_REG_6]
45 #define BPF_R7  regs[BPF_REG_7]
46 #define BPF_R8  regs[BPF_REG_8]
47 #define BPF_R9  regs[BPF_REG_9]
48 #define BPF_R10 regs[BPF_REG_10]
49
50 /* Named registers */
51 #define DST     regs[insn->dst_reg]
52 #define SRC     regs[insn->src_reg]
53 #define FP      regs[BPF_REG_FP]
54 #define ARG1    regs[BPF_REG_ARG1]
55 #define CTX     regs[BPF_REG_CTX]
56 #define IMM     insn->imm
57
58 /* No hurry in this branch
59  *
60  * Exported for the bpf jit load helper.
61  */
62 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
63 {
64         u8 *ptr = NULL;
65
66         if (k >= SKF_NET_OFF)
67                 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
68         else if (k >= SKF_LL_OFF)
69                 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
70
71         if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
72                 return ptr;
73
74         return NULL;
75 }
76
77 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
78 {
79         gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
80         struct bpf_prog_aux *aux;
81         struct bpf_prog *fp;
82
83         size = round_up(size, PAGE_SIZE);
84         fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
85         if (fp == NULL)
86                 return NULL;
87
88         aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
89         if (aux == NULL) {
90                 vfree(fp);
91                 return NULL;
92         }
93
94         fp->pages = size / PAGE_SIZE;
95         fp->aux = aux;
96         fp->aux->prog = fp;
97
98         INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
99
100         return fp;
101 }
102 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
103
104 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
105                                   gfp_t gfp_extra_flags)
106 {
107         gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
108         struct bpf_prog *fp;
109         u32 pages, delta;
110         int ret;
111
112         BUG_ON(fp_old == NULL);
113
114         size = round_up(size, PAGE_SIZE);
115         pages = size / PAGE_SIZE;
116         if (pages <= fp_old->pages)
117                 return fp_old;
118
119         delta = pages - fp_old->pages;
120         ret = __bpf_prog_charge(fp_old->aux->user, delta);
121         if (ret)
122                 return NULL;
123
124         fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
125         if (fp == NULL) {
126                 __bpf_prog_uncharge(fp_old->aux->user, delta);
127         } else {
128                 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
129                 fp->pages = pages;
130                 fp->aux->prog = fp;
131
132                 /* We keep fp->aux from fp_old around in the new
133                  * reallocated structure.
134                  */
135                 fp_old->aux = NULL;
136                 __bpf_prog_free(fp_old);
137         }
138
139         return fp;
140 }
141
142 void __bpf_prog_free(struct bpf_prog *fp)
143 {
144         kfree(fp->aux);
145         vfree(fp);
146 }
147
148 int bpf_prog_calc_tag(struct bpf_prog *fp)
149 {
150         const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
151         u32 raw_size = bpf_prog_tag_scratch_size(fp);
152         u32 digest[SHA_DIGEST_WORDS];
153         u32 ws[SHA_WORKSPACE_WORDS];
154         u32 i, bsize, psize, blocks;
155         struct bpf_insn *dst;
156         bool was_ld_map;
157         u8 *raw, *todo;
158         __be32 *result;
159         __be64 *bits;
160
161         raw = vmalloc(raw_size);
162         if (!raw)
163                 return -ENOMEM;
164
165         sha_init(digest);
166         memset(ws, 0, sizeof(ws));
167
168         /* We need to take out the map fd for the digest calculation
169          * since they are unstable from user space side.
170          */
171         dst = (void *)raw;
172         for (i = 0, was_ld_map = false; i < fp->len; i++) {
173                 dst[i] = fp->insnsi[i];
174                 if (!was_ld_map &&
175                     dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
176                     dst[i].src_reg == BPF_PSEUDO_MAP_FD) {
177                         was_ld_map = true;
178                         dst[i].imm = 0;
179                 } else if (was_ld_map &&
180                            dst[i].code == 0 &&
181                            dst[i].dst_reg == 0 &&
182                            dst[i].src_reg == 0 &&
183                            dst[i].off == 0) {
184                         was_ld_map = false;
185                         dst[i].imm = 0;
186                 } else {
187                         was_ld_map = false;
188                 }
189         }
190
191         psize = bpf_prog_insn_size(fp);
192         memset(&raw[psize], 0, raw_size - psize);
193         raw[psize++] = 0x80;
194
195         bsize  = round_up(psize, SHA_MESSAGE_BYTES);
196         blocks = bsize / SHA_MESSAGE_BYTES;
197         todo   = raw;
198         if (bsize - psize >= sizeof(__be64)) {
199                 bits = (__be64 *)(todo + bsize - sizeof(__be64));
200         } else {
201                 bits = (__be64 *)(todo + bsize + bits_offset);
202                 blocks++;
203         }
204         *bits = cpu_to_be64((psize - 1) << 3);
205
206         while (blocks--) {
207                 sha_transform(digest, todo, ws);
208                 todo += SHA_MESSAGE_BYTES;
209         }
210
211         result = (__force __be32 *)digest;
212         for (i = 0; i < SHA_DIGEST_WORDS; i++)
213                 result[i] = cpu_to_be32(digest[i]);
214         memcpy(fp->tag, result, sizeof(fp->tag));
215
216         vfree(raw);
217         return 0;
218 }
219
220 static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)
221 {
222         return BPF_CLASS(insn->code) == BPF_JMP  &&
223                /* Call and Exit are both special jumps with no
224                 * target inside the BPF instruction image.
225                 */
226                BPF_OP(insn->code) != BPF_CALL &&
227                BPF_OP(insn->code) != BPF_EXIT;
228 }
229
230 static void bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta)
231 {
232         struct bpf_insn *insn = prog->insnsi;
233         u32 i, insn_cnt = prog->len;
234
235         for (i = 0; i < insn_cnt; i++, insn++) {
236                 if (!bpf_is_jmp_and_has_target(insn))
237                         continue;
238
239                 /* Adjust offset of jmps if we cross boundaries. */
240                 if (i < pos && i + insn->off + 1 > pos)
241                         insn->off += delta;
242                 else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
243                         insn->off -= delta;
244         }
245 }
246
247 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
248                                        const struct bpf_insn *patch, u32 len)
249 {
250         u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
251         struct bpf_prog *prog_adj;
252
253         /* Since our patchlet doesn't expand the image, we're done. */
254         if (insn_delta == 0) {
255                 memcpy(prog->insnsi + off, patch, sizeof(*patch));
256                 return prog;
257         }
258
259         insn_adj_cnt = prog->len + insn_delta;
260
261         /* Several new instructions need to be inserted. Make room
262          * for them. Likely, there's no need for a new allocation as
263          * last page could have large enough tailroom.
264          */
265         prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
266                                     GFP_USER);
267         if (!prog_adj)
268                 return NULL;
269
270         prog_adj->len = insn_adj_cnt;
271
272         /* Patching happens in 3 steps:
273          *
274          * 1) Move over tail of insnsi from next instruction onwards,
275          *    so we can patch the single target insn with one or more
276          *    new ones (patching is always from 1 to n insns, n > 0).
277          * 2) Inject new instructions at the target location.
278          * 3) Adjust branch offsets if necessary.
279          */
280         insn_rest = insn_adj_cnt - off - len;
281
282         memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
283                 sizeof(*patch) * insn_rest);
284         memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
285
286         bpf_adj_branches(prog_adj, off, insn_delta);
287
288         return prog_adj;
289 }
290
291 #ifdef CONFIG_BPF_JIT
292 static __always_inline void
293 bpf_get_prog_addr_region(const struct bpf_prog *prog,
294                          unsigned long *symbol_start,
295                          unsigned long *symbol_end)
296 {
297         const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
298         unsigned long addr = (unsigned long)hdr;
299
300         WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
301
302         *symbol_start = addr;
303         *symbol_end   = addr + hdr->pages * PAGE_SIZE;
304 }
305
306 static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
307 {
308         const char *end = sym + KSYM_NAME_LEN;
309
310         BUILD_BUG_ON(sizeof("bpf_prog_") +
311                      sizeof(prog->tag) * 2 +
312                      /* name has been null terminated.
313                       * We should need +1 for the '_' preceding
314                       * the name.  However, the null character
315                       * is double counted between the name and the
316                       * sizeof("bpf_prog_") above, so we omit
317                       * the +1 here.
318                       */
319                      sizeof(prog->aux->name) > KSYM_NAME_LEN);
320
321         sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
322         sym  = bin2hex(sym, prog->tag, sizeof(prog->tag));
323         if (prog->aux->name[0])
324                 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
325         else
326                 *sym = 0;
327 }
328
329 static __always_inline unsigned long
330 bpf_get_prog_addr_start(struct latch_tree_node *n)
331 {
332         unsigned long symbol_start, symbol_end;
333         const struct bpf_prog_aux *aux;
334
335         aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
336         bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
337
338         return symbol_start;
339 }
340
341 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
342                                           struct latch_tree_node *b)
343 {
344         return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
345 }
346
347 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
348 {
349         unsigned long val = (unsigned long)key;
350         unsigned long symbol_start, symbol_end;
351         const struct bpf_prog_aux *aux;
352
353         aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
354         bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
355
356         if (val < symbol_start)
357                 return -1;
358         if (val >= symbol_end)
359                 return  1;
360
361         return 0;
362 }
363
364 static const struct latch_tree_ops bpf_tree_ops = {
365         .less   = bpf_tree_less,
366         .comp   = bpf_tree_comp,
367 };
368
369 static DEFINE_SPINLOCK(bpf_lock);
370 static LIST_HEAD(bpf_kallsyms);
371 static struct latch_tree_root bpf_tree __cacheline_aligned;
372
373 int bpf_jit_kallsyms __read_mostly;
374
375 static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
376 {
377         WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
378         list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
379         latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
380 }
381
382 static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
383 {
384         if (list_empty(&aux->ksym_lnode))
385                 return;
386
387         latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
388         list_del_rcu(&aux->ksym_lnode);
389 }
390
391 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
392 {
393         return fp->jited && !bpf_prog_was_classic(fp);
394 }
395
396 static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
397 {
398         return list_empty(&fp->aux->ksym_lnode) ||
399                fp->aux->ksym_lnode.prev == LIST_POISON2;
400 }
401
402 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
403 {
404         if (!bpf_prog_kallsyms_candidate(fp) ||
405             !capable(CAP_SYS_ADMIN))
406                 return;
407
408         spin_lock_bh(&bpf_lock);
409         bpf_prog_ksym_node_add(fp->aux);
410         spin_unlock_bh(&bpf_lock);
411 }
412
413 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
414 {
415         if (!bpf_prog_kallsyms_candidate(fp))
416                 return;
417
418         spin_lock_bh(&bpf_lock);
419         bpf_prog_ksym_node_del(fp->aux);
420         spin_unlock_bh(&bpf_lock);
421 }
422
423 static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
424 {
425         struct latch_tree_node *n;
426
427         if (!bpf_jit_kallsyms_enabled())
428                 return NULL;
429
430         n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
431         return n ?
432                container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
433                NULL;
434 }
435
436 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
437                                  unsigned long *off, char *sym)
438 {
439         unsigned long symbol_start, symbol_end;
440         struct bpf_prog *prog;
441         char *ret = NULL;
442
443         rcu_read_lock();
444         prog = bpf_prog_kallsyms_find(addr);
445         if (prog) {
446                 bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);
447                 bpf_get_prog_name(prog, sym);
448
449                 ret = sym;
450                 if (size)
451                         *size = symbol_end - symbol_start;
452                 if (off)
453                         *off  = addr - symbol_start;
454         }
455         rcu_read_unlock();
456
457         return ret;
458 }
459
460 bool is_bpf_text_address(unsigned long addr)
461 {
462         bool ret;
463
464         rcu_read_lock();
465         ret = bpf_prog_kallsyms_find(addr) != NULL;
466         rcu_read_unlock();
467
468         return ret;
469 }
470
471 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
472                     char *sym)
473 {
474         unsigned long symbol_start, symbol_end;
475         struct bpf_prog_aux *aux;
476         unsigned int it = 0;
477         int ret = -ERANGE;
478
479         if (!bpf_jit_kallsyms_enabled())
480                 return ret;
481
482         rcu_read_lock();
483         list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
484                 if (it++ != symnum)
485                         continue;
486
487                 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
488                 bpf_get_prog_name(aux->prog, sym);
489
490                 *value = symbol_start;
491                 *type  = BPF_SYM_ELF_TYPE;
492
493                 ret = 0;
494                 break;
495         }
496         rcu_read_unlock();
497
498         return ret;
499 }
500
501 struct bpf_binary_header *
502 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
503                      unsigned int alignment,
504                      bpf_jit_fill_hole_t bpf_fill_ill_insns)
505 {
506         struct bpf_binary_header *hdr;
507         unsigned int size, hole, start;
508
509         /* Most of BPF filters are really small, but if some of them
510          * fill a page, allow at least 128 extra bytes to insert a
511          * random section of illegal instructions.
512          */
513         size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
514         hdr = module_alloc(size);
515         if (hdr == NULL)
516                 return NULL;
517
518         /* Fill space with illegal/arch-dep instructions. */
519         bpf_fill_ill_insns(hdr, size);
520
521         hdr->pages = size / PAGE_SIZE;
522         hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
523                      PAGE_SIZE - sizeof(*hdr));
524         start = (get_random_int() % hole) & ~(alignment - 1);
525
526         /* Leave a random number of instructions before BPF code. */
527         *image_ptr = &hdr->image[start];
528
529         return hdr;
530 }
531
532 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
533 {
534         module_memfree(hdr);
535 }
536
537 /* This symbol is only overridden by archs that have different
538  * requirements than the usual eBPF JITs, f.e. when they only
539  * implement cBPF JIT, do not set images read-only, etc.
540  */
541 void __weak bpf_jit_free(struct bpf_prog *fp)
542 {
543         if (fp->jited) {
544                 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
545
546                 bpf_jit_binary_unlock_ro(hdr);
547                 bpf_jit_binary_free(hdr);
548
549                 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
550         }
551
552         bpf_prog_unlock_free(fp);
553 }
554
555 int bpf_jit_harden __read_mostly;
556
557 static int bpf_jit_blind_insn(const struct bpf_insn *from,
558                               const struct bpf_insn *aux,
559                               struct bpf_insn *to_buff)
560 {
561         struct bpf_insn *to = to_buff;
562         u32 imm_rnd = get_random_int();
563         s16 off;
564
565         BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
566         BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
567
568         if (from->imm == 0 &&
569             (from->code == (BPF_ALU   | BPF_MOV | BPF_K) ||
570              from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
571                 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
572                 goto out;
573         }
574
575         switch (from->code) {
576         case BPF_ALU | BPF_ADD | BPF_K:
577         case BPF_ALU | BPF_SUB | BPF_K:
578         case BPF_ALU | BPF_AND | BPF_K:
579         case BPF_ALU | BPF_OR  | BPF_K:
580         case BPF_ALU | BPF_XOR | BPF_K:
581         case BPF_ALU | BPF_MUL | BPF_K:
582         case BPF_ALU | BPF_MOV | BPF_K:
583         case BPF_ALU | BPF_DIV | BPF_K:
584         case BPF_ALU | BPF_MOD | BPF_K:
585                 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
586                 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
587                 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
588                 break;
589
590         case BPF_ALU64 | BPF_ADD | BPF_K:
591         case BPF_ALU64 | BPF_SUB | BPF_K:
592         case BPF_ALU64 | BPF_AND | BPF_K:
593         case BPF_ALU64 | BPF_OR  | BPF_K:
594         case BPF_ALU64 | BPF_XOR | BPF_K:
595         case BPF_ALU64 | BPF_MUL | BPF_K:
596         case BPF_ALU64 | BPF_MOV | BPF_K:
597         case BPF_ALU64 | BPF_DIV | BPF_K:
598         case BPF_ALU64 | BPF_MOD | BPF_K:
599                 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
600                 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
601                 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
602                 break;
603
604         case BPF_JMP | BPF_JEQ  | BPF_K:
605         case BPF_JMP | BPF_JNE  | BPF_K:
606         case BPF_JMP | BPF_JGT  | BPF_K:
607         case BPF_JMP | BPF_JLT  | BPF_K:
608         case BPF_JMP | BPF_JGE  | BPF_K:
609         case BPF_JMP | BPF_JLE  | BPF_K:
610         case BPF_JMP | BPF_JSGT | BPF_K:
611         case BPF_JMP | BPF_JSLT | BPF_K:
612         case BPF_JMP | BPF_JSGE | BPF_K:
613         case BPF_JMP | BPF_JSLE | BPF_K:
614         case BPF_JMP | BPF_JSET | BPF_K:
615                 /* Accommodate for extra offset in case of a backjump. */
616                 off = from->off;
617                 if (off < 0)
618                         off -= 2;
619                 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
620                 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
621                 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
622                 break;
623
624         case BPF_LD | BPF_ABS | BPF_W:
625         case BPF_LD | BPF_ABS | BPF_H:
626         case BPF_LD | BPF_ABS | BPF_B:
627                 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
628                 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
629                 *to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0);
630                 break;
631
632         case BPF_LD | BPF_IND | BPF_W:
633         case BPF_LD | BPF_IND | BPF_H:
634         case BPF_LD | BPF_IND | BPF_B:
635                 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
636                 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
637                 *to++ = BPF_ALU32_REG(BPF_ADD, BPF_REG_AX, from->src_reg);
638                 *to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0);
639                 break;
640
641         case BPF_LD | BPF_IMM | BPF_DW:
642                 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
643                 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
644                 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
645                 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
646                 break;
647         case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
648                 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
649                 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
650                 *to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX);
651                 break;
652
653         case BPF_ST | BPF_MEM | BPF_DW:
654         case BPF_ST | BPF_MEM | BPF_W:
655         case BPF_ST | BPF_MEM | BPF_H:
656         case BPF_ST | BPF_MEM | BPF_B:
657                 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
658                 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
659                 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
660                 break;
661         }
662 out:
663         return to - to_buff;
664 }
665
666 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
667                                               gfp_t gfp_extra_flags)
668 {
669         gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
670         struct bpf_prog *fp;
671
672         fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
673         if (fp != NULL) {
674                 /* aux->prog still points to the fp_other one, so
675                  * when promoting the clone to the real program,
676                  * this still needs to be adapted.
677                  */
678                 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
679         }
680
681         return fp;
682 }
683
684 static void bpf_prog_clone_free(struct bpf_prog *fp)
685 {
686         /* aux was stolen by the other clone, so we cannot free
687          * it from this path! It will be freed eventually by the
688          * other program on release.
689          *
690          * At this point, we don't need a deferred release since
691          * clone is guaranteed to not be locked.
692          */
693         fp->aux = NULL;
694         __bpf_prog_free(fp);
695 }
696
697 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
698 {
699         /* We have to repoint aux->prog to self, as we don't
700          * know whether fp here is the clone or the original.
701          */
702         fp->aux->prog = fp;
703         bpf_prog_clone_free(fp_other);
704 }
705
706 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
707 {
708         struct bpf_insn insn_buff[16], aux[2];
709         struct bpf_prog *clone, *tmp;
710         int insn_delta, insn_cnt;
711         struct bpf_insn *insn;
712         int i, rewritten;
713
714         if (!bpf_jit_blinding_enabled())
715                 return prog;
716
717         clone = bpf_prog_clone_create(prog, GFP_USER);
718         if (!clone)
719                 return ERR_PTR(-ENOMEM);
720
721         insn_cnt = clone->len;
722         insn = clone->insnsi;
723
724         for (i = 0; i < insn_cnt; i++, insn++) {
725                 /* We temporarily need to hold the original ld64 insn
726                  * so that we can still access the first part in the
727                  * second blinding run.
728                  */
729                 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
730                     insn[1].code == 0)
731                         memcpy(aux, insn, sizeof(aux));
732
733                 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff);
734                 if (!rewritten)
735                         continue;
736
737                 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
738                 if (!tmp) {
739                         /* Patching may have repointed aux->prog during
740                          * realloc from the original one, so we need to
741                          * fix it up here on error.
742                          */
743                         bpf_jit_prog_release_other(prog, clone);
744                         return ERR_PTR(-ENOMEM);
745                 }
746
747                 clone = tmp;
748                 insn_delta = rewritten - 1;
749
750                 /* Walk new program and skip insns we just inserted. */
751                 insn = clone->insnsi + i + insn_delta;
752                 insn_cnt += insn_delta;
753                 i        += insn_delta;
754         }
755
756         return clone;
757 }
758 #endif /* CONFIG_BPF_JIT */
759
760 /* Base function for offset calculation. Needs to go into .text section,
761  * therefore keeping it non-static as well; will also be used by JITs
762  * anyway later on, so do not let the compiler omit it.
763  */
764 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
765 {
766         return 0;
767 }
768 EXPORT_SYMBOL_GPL(__bpf_call_base);
769
770 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
771 /**
772  *      __bpf_prog_run - run eBPF program on a given context
773  *      @ctx: is the data we are operating on
774  *      @insn: is the array of eBPF instructions
775  *
776  * Decode and execute eBPF instructions.
777  */
778 static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
779                                     u64 *stack)
780 {
781         u64 tmp;
782         static const void *jumptable[256] = {
783                 [0 ... 255] = &&default_label,
784                 /* Now overwrite non-defaults ... */
785                 /* 32 bit ALU operations */
786                 [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
787                 [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
788                 [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
789                 [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
790                 [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
791                 [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
792                 [BPF_ALU | BPF_OR | BPF_X]  = &&ALU_OR_X,
793                 [BPF_ALU | BPF_OR | BPF_K]  = &&ALU_OR_K,
794                 [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
795                 [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
796                 [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
797                 [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
798                 [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
799                 [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
800                 [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
801                 [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
802                 [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
803                 [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
804                 [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
805                 [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
806                 [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
807                 [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
808                 [BPF_ALU | BPF_NEG] = &&ALU_NEG,
809                 [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
810                 [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
811                 /* 64 bit ALU operations */
812                 [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
813                 [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
814                 [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
815                 [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
816                 [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
817                 [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
818                 [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
819                 [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
820                 [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
821                 [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
822                 [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
823                 [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
824                 [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
825                 [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
826                 [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
827                 [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
828                 [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
829                 [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
830                 [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
831                 [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
832                 [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
833                 [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
834                 [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
835                 [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
836                 [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
837                 /* Call instruction */
838                 [BPF_JMP | BPF_CALL] = &&JMP_CALL,
839                 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
840                 /* Jumps */
841                 [BPF_JMP | BPF_JA] = &&JMP_JA,
842                 [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
843                 [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
844                 [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
845                 [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
846                 [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
847                 [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
848                 [BPF_JMP | BPF_JLT | BPF_X] = &&JMP_JLT_X,
849                 [BPF_JMP | BPF_JLT | BPF_K] = &&JMP_JLT_K,
850                 [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
851                 [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
852                 [BPF_JMP | BPF_JLE | BPF_X] = &&JMP_JLE_X,
853                 [BPF_JMP | BPF_JLE | BPF_K] = &&JMP_JLE_K,
854                 [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
855                 [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
856                 [BPF_JMP | BPF_JSLT | BPF_X] = &&JMP_JSLT_X,
857                 [BPF_JMP | BPF_JSLT | BPF_K] = &&JMP_JSLT_K,
858                 [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
859                 [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
860                 [BPF_JMP | BPF_JSLE | BPF_X] = &&JMP_JSLE_X,
861                 [BPF_JMP | BPF_JSLE | BPF_K] = &&JMP_JSLE_K,
862                 [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
863                 [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
864                 /* Program return */
865                 [BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
866                 /* Store instructions */
867                 [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
868                 [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
869                 [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
870                 [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
871                 [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
872                 [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
873                 [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
874                 [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
875                 [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
876                 [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
877                 /* Load instructions */
878                 [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
879                 [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
880                 [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
881                 [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
882                 [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
883                 [BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
884                 [BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
885                 [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
886                 [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
887                 [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
888                 [BPF_LD | BPF_IMM | BPF_DW] = &&LD_IMM_DW,
889         };
890         u32 tail_call_cnt = 0;
891         void *ptr;
892         int off;
893
894 #define CONT     ({ insn++; goto select_insn; })
895 #define CONT_JMP ({ insn++; goto select_insn; })
896
897 select_insn:
898         goto *jumptable[insn->code];
899
900         /* ALU */
901 #define ALU(OPCODE, OP)                 \
902         ALU64_##OPCODE##_X:             \
903                 DST = DST OP SRC;       \
904                 CONT;                   \
905         ALU_##OPCODE##_X:               \
906                 DST = (u32) DST OP (u32) SRC;   \
907                 CONT;                   \
908         ALU64_##OPCODE##_K:             \
909                 DST = DST OP IMM;               \
910                 CONT;                   \
911         ALU_##OPCODE##_K:               \
912                 DST = (u32) DST OP (u32) IMM;   \
913                 CONT;
914
915         ALU(ADD,  +)
916         ALU(SUB,  -)
917         ALU(AND,  &)
918         ALU(OR,   |)
919         ALU(LSH, <<)
920         ALU(RSH, >>)
921         ALU(XOR,  ^)
922         ALU(MUL,  *)
923 #undef ALU
924         ALU_NEG:
925                 DST = (u32) -DST;
926                 CONT;
927         ALU64_NEG:
928                 DST = -DST;
929                 CONT;
930         ALU_MOV_X:
931                 DST = (u32) SRC;
932                 CONT;
933         ALU_MOV_K:
934                 DST = (u32) IMM;
935                 CONT;
936         ALU64_MOV_X:
937                 DST = SRC;
938                 CONT;
939         ALU64_MOV_K:
940                 DST = IMM;
941                 CONT;
942         LD_IMM_DW:
943                 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
944                 insn++;
945                 CONT;
946         ALU64_ARSH_X:
947                 (*(s64 *) &DST) >>= SRC;
948                 CONT;
949         ALU64_ARSH_K:
950                 (*(s64 *) &DST) >>= IMM;
951                 CONT;
952         ALU64_MOD_X:
953                 if (unlikely(SRC == 0))
954                         return 0;
955                 div64_u64_rem(DST, SRC, &tmp);
956                 DST = tmp;
957                 CONT;
958         ALU_MOD_X:
959                 if (unlikely((u32)SRC == 0))
960                         return 0;
961                 tmp = (u32) DST;
962                 DST = do_div(tmp, (u32) SRC);
963                 CONT;
964         ALU64_MOD_K:
965                 div64_u64_rem(DST, IMM, &tmp);
966                 DST = tmp;
967                 CONT;
968         ALU_MOD_K:
969                 tmp = (u32) DST;
970                 DST = do_div(tmp, (u32) IMM);
971                 CONT;
972         ALU64_DIV_X:
973                 if (unlikely(SRC == 0))
974                         return 0;
975                 DST = div64_u64(DST, SRC);
976                 CONT;
977         ALU_DIV_X:
978                 if (unlikely((u32)SRC == 0))
979                         return 0;
980                 tmp = (u32) DST;
981                 do_div(tmp, (u32) SRC);
982                 DST = (u32) tmp;
983                 CONT;
984         ALU64_DIV_K:
985                 DST = div64_u64(DST, IMM);
986                 CONT;
987         ALU_DIV_K:
988                 tmp = (u32) DST;
989                 do_div(tmp, (u32) IMM);
990                 DST = (u32) tmp;
991                 CONT;
992         ALU_END_TO_BE:
993                 switch (IMM) {
994                 case 16:
995                         DST = (__force u16) cpu_to_be16(DST);
996                         break;
997                 case 32:
998                         DST = (__force u32) cpu_to_be32(DST);
999                         break;
1000                 case 64:
1001                         DST = (__force u64) cpu_to_be64(DST);
1002                         break;
1003                 }
1004                 CONT;
1005         ALU_END_TO_LE:
1006                 switch (IMM) {
1007                 case 16:
1008                         DST = (__force u16) cpu_to_le16(DST);
1009                         break;
1010                 case 32:
1011                         DST = (__force u32) cpu_to_le32(DST);
1012                         break;
1013                 case 64:
1014                         DST = (__force u64) cpu_to_le64(DST);
1015                         break;
1016                 }
1017                 CONT;
1018
1019         /* CALL */
1020         JMP_CALL:
1021                 /* Function call scratches BPF_R1-BPF_R5 registers,
1022                  * preserves BPF_R6-BPF_R9, and stores return value
1023                  * into BPF_R0.
1024                  */
1025                 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1026                                                        BPF_R4, BPF_R5);
1027                 CONT;
1028
1029         JMP_TAIL_CALL: {
1030                 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1031                 struct bpf_array *array = container_of(map, struct bpf_array, map);
1032                 struct bpf_prog *prog;
1033                 u32 index = BPF_R3;
1034
1035                 if (unlikely(index >= array->map.max_entries))
1036                         goto out;
1037                 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1038                         goto out;
1039
1040                 tail_call_cnt++;
1041
1042                 prog = READ_ONCE(array->ptrs[index]);
1043                 if (!prog)
1044                         goto out;
1045
1046                 /* ARG1 at this point is guaranteed to point to CTX from
1047                  * the verifier side due to the fact that the tail call is
1048                  * handeled like a helper, that is, bpf_tail_call_proto,
1049                  * where arg1_type is ARG_PTR_TO_CTX.
1050                  */
1051                 insn = prog->insnsi;
1052                 goto select_insn;
1053 out:
1054                 CONT;
1055         }
1056         /* JMP */
1057         JMP_JA:
1058                 insn += insn->off;
1059                 CONT;
1060         JMP_JEQ_X:
1061                 if (DST == SRC) {
1062                         insn += insn->off;
1063                         CONT_JMP;
1064                 }
1065                 CONT;
1066         JMP_JEQ_K:
1067                 if (DST == IMM) {
1068                         insn += insn->off;
1069                         CONT_JMP;
1070                 }
1071                 CONT;
1072         JMP_JNE_X:
1073                 if (DST != SRC) {
1074                         insn += insn->off;
1075                         CONT_JMP;
1076                 }
1077                 CONT;
1078         JMP_JNE_K:
1079                 if (DST != IMM) {
1080                         insn += insn->off;
1081                         CONT_JMP;
1082                 }
1083                 CONT;
1084         JMP_JGT_X:
1085                 if (DST > SRC) {
1086                         insn += insn->off;
1087                         CONT_JMP;
1088                 }
1089                 CONT;
1090         JMP_JGT_K:
1091                 if (DST > IMM) {
1092                         insn += insn->off;
1093                         CONT_JMP;
1094                 }
1095                 CONT;
1096         JMP_JLT_X:
1097                 if (DST < SRC) {
1098                         insn += insn->off;
1099                         CONT_JMP;
1100                 }
1101                 CONT;
1102         JMP_JLT_K:
1103                 if (DST < IMM) {
1104                         insn += insn->off;
1105                         CONT_JMP;
1106                 }
1107                 CONT;
1108         JMP_JGE_X:
1109                 if (DST >= SRC) {
1110                         insn += insn->off;
1111                         CONT_JMP;
1112                 }
1113                 CONT;
1114         JMP_JGE_K:
1115                 if (DST >= IMM) {
1116                         insn += insn->off;
1117                         CONT_JMP;
1118                 }
1119                 CONT;
1120         JMP_JLE_X:
1121                 if (DST <= SRC) {
1122                         insn += insn->off;
1123                         CONT_JMP;
1124                 }
1125                 CONT;
1126         JMP_JLE_K:
1127                 if (DST <= IMM) {
1128                         insn += insn->off;
1129                         CONT_JMP;
1130                 }
1131                 CONT;
1132         JMP_JSGT_X:
1133                 if (((s64) DST) > ((s64) SRC)) {
1134                         insn += insn->off;
1135                         CONT_JMP;
1136                 }
1137                 CONT;
1138         JMP_JSGT_K:
1139                 if (((s64) DST) > ((s64) IMM)) {
1140                         insn += insn->off;
1141                         CONT_JMP;
1142                 }
1143                 CONT;
1144         JMP_JSLT_X:
1145                 if (((s64) DST) < ((s64) SRC)) {
1146                         insn += insn->off;
1147                         CONT_JMP;
1148                 }
1149                 CONT;
1150         JMP_JSLT_K:
1151                 if (((s64) DST) < ((s64) IMM)) {
1152                         insn += insn->off;
1153                         CONT_JMP;
1154                 }
1155                 CONT;
1156         JMP_JSGE_X:
1157                 if (((s64) DST) >= ((s64) SRC)) {
1158                         insn += insn->off;
1159                         CONT_JMP;
1160                 }
1161                 CONT;
1162         JMP_JSGE_K:
1163                 if (((s64) DST) >= ((s64) IMM)) {
1164                         insn += insn->off;
1165                         CONT_JMP;
1166                 }
1167                 CONT;
1168         JMP_JSLE_X:
1169                 if (((s64) DST) <= ((s64) SRC)) {
1170                         insn += insn->off;
1171                         CONT_JMP;
1172                 }
1173                 CONT;
1174         JMP_JSLE_K:
1175                 if (((s64) DST) <= ((s64) IMM)) {
1176                         insn += insn->off;
1177                         CONT_JMP;
1178                 }
1179                 CONT;
1180         JMP_JSET_X:
1181                 if (DST & SRC) {
1182                         insn += insn->off;
1183                         CONT_JMP;
1184                 }
1185                 CONT;
1186         JMP_JSET_K:
1187                 if (DST & IMM) {
1188                         insn += insn->off;
1189                         CONT_JMP;
1190                 }
1191                 CONT;
1192         JMP_EXIT:
1193                 return BPF_R0;
1194
1195         /* STX and ST and LDX*/
1196 #define LDST(SIZEOP, SIZE)                                              \
1197         STX_MEM_##SIZEOP:                                               \
1198                 *(SIZE *)(unsigned long) (DST + insn->off) = SRC;       \
1199                 CONT;                                                   \
1200         ST_MEM_##SIZEOP:                                                \
1201                 *(SIZE *)(unsigned long) (DST + insn->off) = IMM;       \
1202                 CONT;                                                   \
1203         LDX_MEM_##SIZEOP:                                               \
1204                 DST = *(SIZE *)(unsigned long) (SRC + insn->off);       \
1205                 CONT;
1206
1207         LDST(B,   u8)
1208         LDST(H,  u16)
1209         LDST(W,  u32)
1210         LDST(DW, u64)
1211 #undef LDST
1212         STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1213                 atomic_add((u32) SRC, (atomic_t *)(unsigned long)
1214                            (DST + insn->off));
1215                 CONT;
1216         STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1217                 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
1218                              (DST + insn->off));
1219                 CONT;
1220         LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
1221                 off = IMM;
1222 load_word:
1223                 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only
1224                  * appearing in the programs where ctx == skb
1225                  * (see may_access_skb() in the verifier). All programs
1226                  * keep 'ctx' in regs[BPF_REG_CTX] == BPF_R6,
1227                  * bpf_convert_filter() saves it in BPF_R6, internal BPF
1228                  * verifier will check that BPF_R6 == ctx.
1229                  *
1230                  * BPF_ABS and BPF_IND are wrappers of function calls,
1231                  * so they scratch BPF_R1-BPF_R5 registers, preserve
1232                  * BPF_R6-BPF_R9, and store return value into BPF_R0.
1233                  *
1234                  * Implicit input:
1235                  *   ctx == skb == BPF_R6 == CTX
1236                  *
1237                  * Explicit input:
1238                  *   SRC == any register
1239                  *   IMM == 32-bit immediate
1240                  *
1241                  * Output:
1242                  *   BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
1243                  */
1244
1245                 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
1246                 if (likely(ptr != NULL)) {
1247                         BPF_R0 = get_unaligned_be32(ptr);
1248                         CONT;
1249                 }
1250
1251                 return 0;
1252         LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
1253                 off = IMM;
1254 load_half:
1255                 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
1256                 if (likely(ptr != NULL)) {
1257                         BPF_R0 = get_unaligned_be16(ptr);
1258                         CONT;
1259                 }
1260
1261                 return 0;
1262         LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
1263                 off = IMM;
1264 load_byte:
1265                 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
1266                 if (likely(ptr != NULL)) {
1267                         BPF_R0 = *(u8 *)ptr;
1268                         CONT;
1269                 }
1270
1271                 return 0;
1272         LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
1273                 off = IMM + SRC;
1274                 goto load_word;
1275         LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
1276                 off = IMM + SRC;
1277                 goto load_half;
1278         LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
1279                 off = IMM + SRC;
1280                 goto load_byte;
1281
1282         default_label:
1283                 /* If we ever reach this, we have a bug somewhere. */
1284                 WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
1285                 return 0;
1286 }
1287 STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
1288
1289 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1290 #define DEFINE_BPF_PROG_RUN(stack_size) \
1291 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1292 { \
1293         u64 stack[stack_size / sizeof(u64)]; \
1294         u64 regs[MAX_BPF_REG]; \
1295 \
1296         FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1297         ARG1 = (u64) (unsigned long) ctx; \
1298         return ___bpf_prog_run(regs, insn, stack); \
1299 }
1300
1301 #define EVAL1(FN, X) FN(X)
1302 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1303 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1304 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1305 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1306 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1307
1308 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1309 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1310 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1311
1312 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1313
1314 static unsigned int (*interpreters[])(const void *ctx,
1315                                       const struct bpf_insn *insn) = {
1316 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1317 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1318 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1319 };
1320
1321 #else
1322 static unsigned int __bpf_prog_ret0(const void *ctx,
1323                                     const struct bpf_insn *insn)
1324 {
1325         return 0;
1326 }
1327 #endif
1328
1329 bool bpf_prog_array_compatible(struct bpf_array *array,
1330                                const struct bpf_prog *fp)
1331 {
1332         if (!array->owner_prog_type) {
1333                 /* There's no owner yet where we could check for
1334                  * compatibility.
1335                  */
1336                 array->owner_prog_type = fp->type;
1337                 array->owner_jited = fp->jited;
1338
1339                 return true;
1340         }
1341
1342         return array->owner_prog_type == fp->type &&
1343                array->owner_jited == fp->jited;
1344 }
1345
1346 static int bpf_check_tail_call(const struct bpf_prog *fp)
1347 {
1348         struct bpf_prog_aux *aux = fp->aux;
1349         int i;
1350
1351         for (i = 0; i < aux->used_map_cnt; i++) {
1352                 struct bpf_map *map = aux->used_maps[i];
1353                 struct bpf_array *array;
1354
1355                 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1356                         continue;
1357
1358                 array = container_of(map, struct bpf_array, map);
1359                 if (!bpf_prog_array_compatible(array, fp))
1360                         return -EINVAL;
1361         }
1362
1363         return 0;
1364 }
1365
1366 /**
1367  *      bpf_prog_select_runtime - select exec runtime for BPF program
1368  *      @fp: bpf_prog populated with internal BPF program
1369  *      @err: pointer to error variable
1370  *
1371  * Try to JIT eBPF program, if JIT is not available, use interpreter.
1372  * The BPF program will be executed via BPF_PROG_RUN() macro.
1373  */
1374 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1375 {
1376 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1377         u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1378
1379         fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1380 #else
1381         fp->bpf_func = __bpf_prog_ret0;
1382 #endif
1383
1384         /* eBPF JITs can rewrite the program in case constant
1385          * blinding is active. However, in case of error during
1386          * blinding, bpf_int_jit_compile() must always return a
1387          * valid program, which in this case would simply not
1388          * be JITed, but falls back to the interpreter.
1389          */
1390         if (!bpf_prog_is_dev_bound(fp->aux)) {
1391                 fp = bpf_int_jit_compile(fp);
1392 #ifdef CONFIG_BPF_JIT_ALWAYS_ON
1393                 if (!fp->jited) {
1394                         *err = -ENOTSUPP;
1395                         return fp;
1396                 }
1397 #endif
1398         } else {
1399                 *err = bpf_prog_offload_compile(fp);
1400                 if (*err)
1401                         return fp;
1402         }
1403         bpf_prog_lock_ro(fp);
1404
1405         /* The tail call compatibility check can only be done at
1406          * this late stage as we need to determine, if we deal
1407          * with JITed or non JITed program concatenations and not
1408          * all eBPF JITs might immediately support all features.
1409          */
1410         *err = bpf_check_tail_call(fp);
1411
1412         return fp;
1413 }
1414 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
1415
1416 static unsigned int __bpf_prog_ret1(const void *ctx,
1417                                     const struct bpf_insn *insn)
1418 {
1419         return 1;
1420 }
1421
1422 static struct bpf_prog_dummy {
1423         struct bpf_prog prog;
1424 } dummy_bpf_prog = {
1425         .prog = {
1426                 .bpf_func = __bpf_prog_ret1,
1427         },
1428 };
1429
1430 /* to avoid allocating empty bpf_prog_array for cgroups that
1431  * don't have bpf program attached use one global 'empty_prog_array'
1432  * It will not be modified the caller of bpf_prog_array_alloc()
1433  * (since caller requested prog_cnt == 0)
1434  * that pointer should be 'freed' by bpf_prog_array_free()
1435  */
1436 static struct {
1437         struct bpf_prog_array hdr;
1438         struct bpf_prog *null_prog;
1439 } empty_prog_array = {
1440         .null_prog = NULL,
1441 };
1442
1443 struct bpf_prog_array __rcu *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
1444 {
1445         if (prog_cnt)
1446                 return kzalloc(sizeof(struct bpf_prog_array) +
1447                                sizeof(struct bpf_prog *) * (prog_cnt + 1),
1448                                flags);
1449
1450         return &empty_prog_array.hdr;
1451 }
1452
1453 void bpf_prog_array_free(struct bpf_prog_array __rcu *progs)
1454 {
1455         if (!progs ||
1456             progs == (struct bpf_prog_array __rcu *)&empty_prog_array.hdr)
1457                 return;
1458         kfree_rcu(progs, rcu);
1459 }
1460
1461 int bpf_prog_array_length(struct bpf_prog_array __rcu *progs)
1462 {
1463         struct bpf_prog **prog;
1464         u32 cnt = 0;
1465
1466         rcu_read_lock();
1467         prog = rcu_dereference(progs)->progs;
1468         for (; *prog; prog++)
1469                 if (*prog != &dummy_bpf_prog.prog)
1470                         cnt++;
1471         rcu_read_unlock();
1472         return cnt;
1473 }
1474
1475 int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
1476                                 __u32 __user *prog_ids, u32 cnt)
1477 {
1478         struct bpf_prog **prog;
1479         u32 i = 0, id;
1480
1481         rcu_read_lock();
1482         prog = rcu_dereference(progs)->progs;
1483         for (; *prog; prog++) {
1484                 id = (*prog)->aux->id;
1485                 if (copy_to_user(prog_ids + i, &id, sizeof(id))) {
1486                         rcu_read_unlock();
1487                         return -EFAULT;
1488                 }
1489                 if (++i == cnt) {
1490                         prog++;
1491                         break;
1492                 }
1493         }
1494         rcu_read_unlock();
1495         if (*prog)
1496                 return -ENOSPC;
1497         return 0;
1498 }
1499
1500 void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs,
1501                                 struct bpf_prog *old_prog)
1502 {
1503         struct bpf_prog **prog = progs->progs;
1504
1505         for (; *prog; prog++)
1506                 if (*prog == old_prog) {
1507                         WRITE_ONCE(*prog, &dummy_bpf_prog.prog);
1508                         break;
1509                 }
1510 }
1511
1512 int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
1513                         struct bpf_prog *exclude_prog,
1514                         struct bpf_prog *include_prog,
1515                         struct bpf_prog_array **new_array)
1516 {
1517         int new_prog_cnt, carry_prog_cnt = 0;
1518         struct bpf_prog **existing_prog;
1519         struct bpf_prog_array *array;
1520         int new_prog_idx = 0;
1521
1522         /* Figure out how many existing progs we need to carry over to
1523          * the new array.
1524          */
1525         if (old_array) {
1526                 existing_prog = old_array->progs;
1527                 for (; *existing_prog; existing_prog++) {
1528                         if (*existing_prog != exclude_prog &&
1529                             *existing_prog != &dummy_bpf_prog.prog)
1530                                 carry_prog_cnt++;
1531                         if (*existing_prog == include_prog)
1532                                 return -EEXIST;
1533                 }
1534         }
1535
1536         /* How many progs (not NULL) will be in the new array? */
1537         new_prog_cnt = carry_prog_cnt;
1538         if (include_prog)
1539                 new_prog_cnt += 1;
1540
1541         /* Do we have any prog (not NULL) in the new array? */
1542         if (!new_prog_cnt) {
1543                 *new_array = NULL;
1544                 return 0;
1545         }
1546
1547         /* +1 as the end of prog_array is marked with NULL */
1548         array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
1549         if (!array)
1550                 return -ENOMEM;
1551
1552         /* Fill in the new prog array */
1553         if (carry_prog_cnt) {
1554                 existing_prog = old_array->progs;
1555                 for (; *existing_prog; existing_prog++)
1556                         if (*existing_prog != exclude_prog &&
1557                             *existing_prog != &dummy_bpf_prog.prog)
1558                                 array->progs[new_prog_idx++] = *existing_prog;
1559         }
1560         if (include_prog)
1561                 array->progs[new_prog_idx++] = include_prog;
1562         array->progs[new_prog_idx] = NULL;
1563         *new_array = array;
1564         return 0;
1565 }
1566
1567 static void bpf_prog_free_deferred(struct work_struct *work)
1568 {
1569         struct bpf_prog_aux *aux;
1570
1571         aux = container_of(work, struct bpf_prog_aux, work);
1572         if (bpf_prog_is_dev_bound(aux))
1573                 bpf_prog_offload_destroy(aux->prog);
1574         bpf_jit_free(aux->prog);
1575 }
1576
1577 /* Free internal BPF program */
1578 void bpf_prog_free(struct bpf_prog *fp)
1579 {
1580         struct bpf_prog_aux *aux = fp->aux;
1581
1582         INIT_WORK(&aux->work, bpf_prog_free_deferred);
1583         schedule_work(&aux->work);
1584 }
1585 EXPORT_SYMBOL_GPL(bpf_prog_free);
1586
1587 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
1588 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
1589
1590 void bpf_user_rnd_init_once(void)
1591 {
1592         prandom_init_once(&bpf_user_rnd_state);
1593 }
1594
1595 BPF_CALL_0(bpf_user_rnd_u32)
1596 {
1597         /* Should someone ever have the rather unwise idea to use some
1598          * of the registers passed into this function, then note that
1599          * this function is called from native eBPF and classic-to-eBPF
1600          * transformations. Register assignments from both sides are
1601          * different, f.e. classic always sets fn(ctx, A, X) here.
1602          */
1603         struct rnd_state *state;
1604         u32 res;
1605
1606         state = &get_cpu_var(bpf_user_rnd_state);
1607         res = prandom_u32_state(state);
1608         put_cpu_var(bpf_user_rnd_state);
1609
1610         return res;
1611 }
1612
1613 /* Weak definitions of helper functions in case we don't have bpf syscall. */
1614 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
1615 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
1616 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
1617
1618 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
1619 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
1620 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
1621 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
1622
1623 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
1624 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
1625 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
1626 const struct bpf_func_proto bpf_sock_map_update_proto __weak;
1627
1628 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
1629 {
1630         return NULL;
1631 }
1632
1633 u64 __weak
1634 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
1635                  void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
1636 {
1637         return -ENOTSUPP;
1638 }
1639
1640 /* Always built-in helper functions. */
1641 const struct bpf_func_proto bpf_tail_call_proto = {
1642         .func           = NULL,
1643         .gpl_only       = false,
1644         .ret_type       = RET_VOID,
1645         .arg1_type      = ARG_PTR_TO_CTX,
1646         .arg2_type      = ARG_CONST_MAP_PTR,
1647         .arg3_type      = ARG_ANYTHING,
1648 };
1649
1650 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
1651  * It is encouraged to implement bpf_int_jit_compile() instead, so that
1652  * eBPF and implicitly also cBPF can get JITed!
1653  */
1654 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
1655 {
1656         return prog;
1657 }
1658
1659 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
1660  * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
1661  */
1662 void __weak bpf_jit_compile(struct bpf_prog *prog)
1663 {
1664 }
1665
1666 bool __weak bpf_helper_changes_pkt_data(void *func)
1667 {
1668         return false;
1669 }
1670
1671 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
1672  * skb_copy_bits(), so provide a weak definition of it for NET-less config.
1673  */
1674 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
1675                          int len)
1676 {
1677         return -EFAULT;
1678 }
1679
1680 /* All definitions of tracepoints related to BPF. */
1681 #define CREATE_TRACE_POINTS
1682 #include <linux/bpf_trace.h>
1683
1684 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
1685
1686 /* These are only used within the BPF_SYSCALL code */
1687 #ifdef CONFIG_BPF_SYSCALL
1688 EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_get_type);
1689 EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_put_rcu);
1690 #endif