2 * Testsuite for eBPF verifier
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
5 * Copyright (c) 2017 Facebook
6 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
14 #include <asm/types.h>
15 #include <linux/types.h>
28 #include <sys/capability.h>
30 #include <linux/unistd.h>
31 #include <linux/filter.h>
32 #include <linux/bpf_perf_event.h>
33 #include <linux/bpf.h>
34 #include <linux/if_ether.h>
39 # include "autoconf.h"
41 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
42 # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
45 #include "bpf_rlimit.h"
48 #include "../../../include/linux/filter.h"
50 #define MAX_INSNS BPF_MAXINSNS
52 #define MAX_NR_MAPS 13
53 #define MAX_TEST_RUNS 8
54 #define POINTER_VALUE 0xcafe4all
55 #define TEST_DATA_LEN 64
57 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
58 #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
60 #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
61 static bool unpriv_disabled = false;
65 struct bpf_insn insns[MAX_INSNS];
66 int fixup_map_hash_8b[MAX_FIXUPS];
67 int fixup_map_hash_48b[MAX_FIXUPS];
68 int fixup_map_hash_16b[MAX_FIXUPS];
69 int fixup_map_array_48b[MAX_FIXUPS];
70 int fixup_map_sockmap[MAX_FIXUPS];
71 int fixup_map_sockhash[MAX_FIXUPS];
72 int fixup_map_xskmap[MAX_FIXUPS];
73 int fixup_map_stacktrace[MAX_FIXUPS];
74 int fixup_prog1[MAX_FIXUPS];
75 int fixup_prog2[MAX_FIXUPS];
76 int fixup_map_in_map[MAX_FIXUPS];
77 int fixup_cgroup_storage[MAX_FIXUPS];
78 int fixup_percpu_cgroup_storage[MAX_FIXUPS];
80 const char *errstr_unpriv;
81 uint32_t retval, retval_unpriv, insn_processed;
86 } result, result_unpriv;
87 enum bpf_prog_type prog_type;
89 __u8 data[TEST_DATA_LEN];
90 void (*fill_helper)(struct bpf_test *self);
93 uint32_t retval, retval_unpriv;
95 __u8 data[TEST_DATA_LEN];
96 __u64 data64[TEST_DATA_LEN / 8];
98 } retvals[MAX_TEST_RUNS];
101 /* Note we want this to be 64 bit aligned so that the end of our array is
102 * actually the end of the structure.
104 #define MAX_ENTRIES 11
108 int foo[MAX_ENTRIES];
116 static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
118 /* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */
120 unsigned int len = BPF_MAXINSNS;
121 struct bpf_insn *insn = self->insns;
124 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
126 for (j = 0; j < PUSH_CNT; j++) {
127 insn[i++] = BPF_LD_ABS(BPF_B, 0);
128 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
130 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
131 insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
132 insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
133 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
134 BPF_FUNC_skb_vlan_push),
135 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
139 for (j = 0; j < PUSH_CNT; j++) {
140 insn[i++] = BPF_LD_ABS(BPF_B, 0);
141 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
143 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
144 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
145 BPF_FUNC_skb_vlan_pop),
146 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
152 for (; i < len - 1; i++)
153 insn[i] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 0xbef);
154 insn[len - 1] = BPF_EXIT_INSN();
157 static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
159 struct bpf_insn *insn = self->insns;
160 unsigned int len = BPF_MAXINSNS;
163 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
164 insn[i++] = BPF_LD_ABS(BPF_B, 0);
165 insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
168 insn[i++] = BPF_LD_ABS(BPF_B, 1);
169 insn[i] = BPF_EXIT_INSN();
172 static void bpf_fill_rand_ld_dw(struct bpf_test *self)
174 struct bpf_insn *insn = self->insns;
178 insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
179 while (i < self->retval) {
180 uint64_t val = bpf_semi_rand_get();
181 struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
186 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
188 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
189 insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
190 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
191 insn[i] = BPF_EXIT_INSN();
193 self->retval = (uint32_t)res;
196 /* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
197 #define BPF_SK_LOOKUP \
198 /* struct bpf_sock_tuple tuple = {} */ \
199 BPF_MOV64_IMM(BPF_REG_2, 0), \
200 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8), \
201 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16), \
202 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24), \
203 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32), \
204 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40), \
205 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48), \
206 /* sk = sk_lookup_tcp(ctx, &tuple, sizeof tuple, 0, 0) */ \
207 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \
208 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48), \
209 BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)), \
210 BPF_MOV64_IMM(BPF_REG_4, 0), \
211 BPF_MOV64_IMM(BPF_REG_5, 0), \
212 BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp)
214 static struct bpf_test tests[] = {
218 BPF_MOV64_IMM(BPF_REG_1, 1),
219 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
220 BPF_MOV64_IMM(BPF_REG_2, 3),
221 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
222 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
223 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
224 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
231 "DIV32 by 0, zero check 1",
233 BPF_MOV32_IMM(BPF_REG_0, 42),
234 BPF_MOV32_IMM(BPF_REG_1, 0),
235 BPF_MOV32_IMM(BPF_REG_2, 1),
236 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
243 "DIV32 by 0, zero check 2",
245 BPF_MOV32_IMM(BPF_REG_0, 42),
246 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
247 BPF_MOV32_IMM(BPF_REG_2, 1),
248 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
255 "DIV64 by 0, zero check",
257 BPF_MOV32_IMM(BPF_REG_0, 42),
258 BPF_MOV32_IMM(BPF_REG_1, 0),
259 BPF_MOV32_IMM(BPF_REG_2, 1),
260 BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
267 "MOD32 by 0, zero check 1",
269 BPF_MOV32_IMM(BPF_REG_0, 42),
270 BPF_MOV32_IMM(BPF_REG_1, 0),
271 BPF_MOV32_IMM(BPF_REG_2, 1),
272 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
279 "MOD32 by 0, zero check 2",
281 BPF_MOV32_IMM(BPF_REG_0, 42),
282 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
283 BPF_MOV32_IMM(BPF_REG_2, 1),
284 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
291 "MOD64 by 0, zero check",
293 BPF_MOV32_IMM(BPF_REG_0, 42),
294 BPF_MOV32_IMM(BPF_REG_1, 0),
295 BPF_MOV32_IMM(BPF_REG_2, 1),
296 BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
303 "DIV32 by 0, zero check ok, cls",
305 BPF_MOV32_IMM(BPF_REG_0, 42),
306 BPF_MOV32_IMM(BPF_REG_1, 2),
307 BPF_MOV32_IMM(BPF_REG_2, 16),
308 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
309 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
312 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
317 "DIV32 by 0, zero check 1, cls",
319 BPF_MOV32_IMM(BPF_REG_1, 0),
320 BPF_MOV32_IMM(BPF_REG_0, 1),
321 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
324 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
329 "DIV32 by 0, zero check 2, cls",
331 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
332 BPF_MOV32_IMM(BPF_REG_0, 1),
333 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
336 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
341 "DIV64 by 0, zero check, cls",
343 BPF_MOV32_IMM(BPF_REG_1, 0),
344 BPF_MOV32_IMM(BPF_REG_0, 1),
345 BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
348 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
353 "MOD32 by 0, zero check ok, cls",
355 BPF_MOV32_IMM(BPF_REG_0, 42),
356 BPF_MOV32_IMM(BPF_REG_1, 3),
357 BPF_MOV32_IMM(BPF_REG_2, 5),
358 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
359 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
362 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
367 "MOD32 by 0, zero check 1, cls",
369 BPF_MOV32_IMM(BPF_REG_1, 0),
370 BPF_MOV32_IMM(BPF_REG_0, 1),
371 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
374 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
379 "MOD32 by 0, zero check 2, cls",
381 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
382 BPF_MOV32_IMM(BPF_REG_0, 1),
383 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
386 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
391 "MOD64 by 0, zero check 1, cls",
393 BPF_MOV32_IMM(BPF_REG_1, 0),
394 BPF_MOV32_IMM(BPF_REG_0, 2),
395 BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
398 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
403 "MOD64 by 0, zero check 2, cls",
405 BPF_MOV32_IMM(BPF_REG_1, 0),
406 BPF_MOV32_IMM(BPF_REG_0, -1),
407 BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
410 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
414 /* Just make sure that JITs used udiv/umod as otherwise we get
415 * an exception from INT_MIN/-1 overflow similarly as with div
419 "DIV32 overflow, check 1",
421 BPF_MOV32_IMM(BPF_REG_1, -1),
422 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
423 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
426 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
431 "DIV32 overflow, check 2",
433 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
434 BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1),
437 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
442 "DIV64 overflow, check 1",
444 BPF_MOV64_IMM(BPF_REG_1, -1),
445 BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
446 BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
449 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
454 "DIV64 overflow, check 2",
456 BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
457 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1),
460 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
465 "MOD32 overflow, check 1",
467 BPF_MOV32_IMM(BPF_REG_1, -1),
468 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
469 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
472 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
477 "MOD32 overflow, check 2",
479 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
480 BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1),
483 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
488 "MOD64 overflow, check 1",
490 BPF_MOV64_IMM(BPF_REG_1, -1),
491 BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
492 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
493 BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
494 BPF_MOV32_IMM(BPF_REG_0, 0),
495 BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
496 BPF_MOV32_IMM(BPF_REG_0, 1),
499 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
504 "MOD64 overflow, check 2",
506 BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
507 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
508 BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1),
509 BPF_MOV32_IMM(BPF_REG_0, 0),
510 BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
511 BPF_MOV32_IMM(BPF_REG_0, 1),
514 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
519 "xor32 zero extend check",
521 BPF_MOV32_IMM(BPF_REG_2, -1),
522 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
523 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff),
524 BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2),
525 BPF_MOV32_IMM(BPF_REG_0, 2),
526 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
527 BPF_MOV32_IMM(BPF_REG_0, 1),
530 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
538 .errstr = "unknown opcode 00",
546 .errstr = "R0 !read_ok",
555 .errstr = "unreachable",
561 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
562 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
565 .errstr = "unreachable",
571 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
574 .errstr = "jump out of range",
578 "out of range jump2",
580 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
583 .errstr = "jump out of range",
589 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
590 BPF_LD_IMM64(BPF_REG_0, 0),
591 BPF_LD_IMM64(BPF_REG_0, 0),
592 BPF_LD_IMM64(BPF_REG_0, 1),
593 BPF_LD_IMM64(BPF_REG_0, 1),
594 BPF_MOV64_IMM(BPF_REG_0, 2),
597 .errstr = "invalid BPF_LD_IMM insn",
598 .errstr_unpriv = "R1 pointer comparison",
604 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
605 BPF_LD_IMM64(BPF_REG_0, 0),
606 BPF_LD_IMM64(BPF_REG_0, 0),
607 BPF_LD_IMM64(BPF_REG_0, 1),
608 BPF_LD_IMM64(BPF_REG_0, 1),
611 .errstr = "invalid BPF_LD_IMM insn",
612 .errstr_unpriv = "R1 pointer comparison",
618 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
619 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
620 BPF_LD_IMM64(BPF_REG_0, 0),
621 BPF_LD_IMM64(BPF_REG_0, 0),
622 BPF_LD_IMM64(BPF_REG_0, 1),
623 BPF_LD_IMM64(BPF_REG_0, 1),
626 .errstr = "invalid bpf_ld_imm64 insn",
632 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
635 .errstr = "invalid bpf_ld_imm64 insn",
641 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
643 .errstr = "invalid bpf_ld_imm64 insn",
649 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
650 BPF_RAW_INSN(0, 0, 0, 0, 0),
658 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
659 BPF_RAW_INSN(0, 0, 0, 0, 1),
668 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
669 BPF_RAW_INSN(0, 0, 0, 0, 1),
672 .errstr = "uses reserved fields",
678 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
679 BPF_RAW_INSN(0, 0, 0, 1, 1),
682 .errstr = "invalid bpf_ld_imm64 insn",
688 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
689 BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
692 .errstr = "invalid bpf_ld_imm64 insn",
698 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
699 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
702 .errstr = "invalid bpf_ld_imm64 insn",
708 BPF_MOV64_IMM(BPF_REG_1, 0),
709 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
710 BPF_RAW_INSN(0, 0, 0, 0, 1),
713 .errstr = "not pointing to valid bpf_map",
719 BPF_MOV64_IMM(BPF_REG_1, 0),
720 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
721 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
724 .errstr = "invalid bpf_ld_imm64 insn",
730 BPF_MOV64_IMM(BPF_REG_0, 1),
731 BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
740 BPF_LD_IMM64(BPF_REG_0, 0x1122334485667788),
741 BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 7),
750 BPF_MOV64_IMM(BPF_REG_0, 1),
751 BPF_MOV64_IMM(BPF_REG_1, 5),
752 BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
761 BPF_LD_IMM64(BPF_REG_0, 0xffff55667788),
762 BPF_MOV64_IMM(BPF_REG_1, 15),
763 BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
772 BPF_MOV64_IMM(BPF_REG_0, 1),
773 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
781 BPF_MOV64_IMM(BPF_REG_0, 1),
782 BPF_MOV64_IMM(BPF_REG_1, 5),
783 BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
791 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
793 .errstr = "not an exit",
799 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
802 .errstr = "back-edge",
808 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
809 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
810 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
811 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
814 .errstr = "back-edge",
820 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
821 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
822 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
823 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
826 .errstr = "back-edge",
830 "read uninitialized register",
832 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
835 .errstr = "R2 !read_ok",
839 "read invalid register",
841 BPF_MOV64_REG(BPF_REG_0, -1),
844 .errstr = "R15 is invalid",
848 "program doesn't init R0 before exit",
850 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
853 .errstr = "R0 !read_ok",
857 "program doesn't init R0 before exit in all branches",
859 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
860 BPF_MOV64_IMM(BPF_REG_0, 1),
861 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
864 .errstr = "R0 !read_ok",
865 .errstr_unpriv = "R1 pointer comparison",
869 "stack out of bounds",
871 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
874 .errstr = "invalid stack",
878 "invalid call insn1",
880 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
883 .errstr = "unknown opcode 8d",
887 "invalid call insn2",
889 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
892 .errstr = "BPF_CALL uses reserved",
896 "invalid function call",
898 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
901 .errstr = "invalid func unknown#1234567",
905 "uninitialized stack1",
907 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
908 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
909 BPF_LD_MAP_FD(BPF_REG_1, 0),
910 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
911 BPF_FUNC_map_lookup_elem),
914 .fixup_map_hash_8b = { 2 },
915 .errstr = "invalid indirect read from stack",
919 "uninitialized stack2",
921 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
922 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
925 .errstr = "invalid read from stack",
929 "invalid fp arithmetic",
930 /* If this gets ever changed, make sure JITs can deal with it. */
932 BPF_MOV64_IMM(BPF_REG_0, 0),
933 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
934 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
935 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
938 .errstr = "R1 subtraction from stack pointer",
942 "non-invalid fp arithmetic",
944 BPF_MOV64_IMM(BPF_REG_0, 0),
945 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
951 "invalid argument register",
953 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
954 BPF_FUNC_get_cgroup_classid),
955 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
956 BPF_FUNC_get_cgroup_classid),
959 .errstr = "R1 !read_ok",
961 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
964 "non-invalid argument register",
966 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
967 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
968 BPF_FUNC_get_cgroup_classid),
969 BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
970 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
971 BPF_FUNC_get_cgroup_classid),
975 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
978 "check valid spill/fill",
980 /* spill R1(ctx) into stack */
981 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
982 /* fill it back into R2 */
983 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
984 /* should be able to access R0 = *(R2 + 8) */
985 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
986 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
989 .errstr_unpriv = "R0 leaks addr",
991 .result_unpriv = REJECT,
992 .retval = POINTER_VALUE,
995 "check valid spill/fill, skb mark",
997 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
998 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
999 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1000 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
1001 offsetof(struct __sk_buff, mark)),
1005 .result_unpriv = ACCEPT,
1008 "check corrupted spill/fill",
1010 /* spill R1(ctx) into stack */
1011 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
1012 /* mess up with R1 pointer on stack */
1013 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
1014 /* fill back into R0 is fine for priv.
1015 * R0 now becomes SCALAR_VALUE.
1017 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1018 /* Load from R0 should fail. */
1019 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
1022 .errstr_unpriv = "attempt to corrupt spilled",
1023 .errstr = "R0 invalid mem access 'inv",
1027 "check corrupted spill/fill, LSB",
1029 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
1030 BPF_ST_MEM(BPF_H, BPF_REG_10, -8, 0xcafe),
1031 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1034 .errstr_unpriv = "attempt to corrupt spilled",
1035 .result_unpriv = REJECT,
1037 .retval = POINTER_VALUE,
1040 "check corrupted spill/fill, MSB",
1042 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
1043 BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x12345678),
1044 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1047 .errstr_unpriv = "attempt to corrupt spilled",
1048 .result_unpriv = REJECT,
1050 .retval = POINTER_VALUE,
1053 "invalid src register in STX",
1055 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
1058 .errstr = "R15 is invalid",
1062 "invalid dst register in STX",
1064 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
1067 .errstr = "R14 is invalid",
1071 "invalid dst register in ST",
1073 BPF_ST_MEM(BPF_B, 14, -1, -1),
1076 .errstr = "R14 is invalid",
1080 "invalid src register in LDX",
1082 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
1085 .errstr = "R12 is invalid",
1089 "invalid dst register in LDX",
1091 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
1094 .errstr = "R11 is invalid",
1100 BPF_RAW_INSN(0, 0, 0, 0, 0),
1103 .errstr = "unknown opcode 00",
1109 BPF_RAW_INSN(1, 0, 0, 0, 0),
1112 .errstr = "BPF_LDX uses reserved fields",
1118 BPF_RAW_INSN(-1, 0, 0, 0, 0),
1121 .errstr = "unknown opcode ff",
1127 BPF_RAW_INSN(-1, -1, -1, -1, -1),
1130 .errstr = "unknown opcode ff",
1136 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
1139 .errstr = "BPF_ALU uses reserved fields",
1143 "misaligned read from stack",
1145 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1146 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
1149 .errstr = "misaligned stack access",
1153 "invalid map_fd for function call",
1155 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1156 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
1157 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1158 BPF_LD_MAP_FD(BPF_REG_1, 0),
1159 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1160 BPF_FUNC_map_delete_elem),
1163 .errstr = "fd 0 is not pointing to valid bpf_map",
1167 "don't check return value before access",
1169 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1170 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1171 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1172 BPF_LD_MAP_FD(BPF_REG_1, 0),
1173 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1174 BPF_FUNC_map_lookup_elem),
1175 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1178 .fixup_map_hash_8b = { 3 },
1179 .errstr = "R0 invalid mem access 'map_value_or_null'",
1183 "access memory with incorrect alignment",
1185 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1186 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1187 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1188 BPF_LD_MAP_FD(BPF_REG_1, 0),
1189 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1190 BPF_FUNC_map_lookup_elem),
1191 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1192 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
1195 .fixup_map_hash_8b = { 3 },
1196 .errstr = "misaligned value access",
1198 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1201 "sometimes access memory with incorrect alignment",
1203 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1204 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1205 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1206 BPF_LD_MAP_FD(BPF_REG_1, 0),
1207 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1208 BPF_FUNC_map_lookup_elem),
1209 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1210 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1212 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
1215 .fixup_map_hash_8b = { 3 },
1216 .errstr = "R0 invalid mem access",
1217 .errstr_unpriv = "R0 leaks addr",
1219 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1224 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1225 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
1226 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1227 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1228 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
1229 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
1230 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
1231 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
1232 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
1233 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
1234 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
1235 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
1236 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1237 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
1238 BPF_MOV64_IMM(BPF_REG_0, 0),
1241 .errstr_unpriv = "R1 pointer comparison",
1242 .result_unpriv = REJECT,
1248 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1249 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
1250 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1251 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
1252 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
1253 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1254 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1255 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
1256 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1257 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
1258 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
1259 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1260 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
1261 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
1262 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1263 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1264 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1265 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1266 BPF_MOV64_IMM(BPF_REG_0, 0),
1269 .errstr_unpriv = "R1 pointer comparison",
1270 .result_unpriv = REJECT,
1276 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1277 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1278 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1279 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1280 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
1281 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
1282 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1283 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1284 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
1285 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
1286 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1287 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
1288 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1289 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
1290 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1291 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
1292 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
1293 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
1294 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1295 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
1296 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
1297 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
1298 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1299 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
1300 BPF_LD_MAP_FD(BPF_REG_1, 0),
1301 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1302 BPF_FUNC_map_delete_elem),
1305 .fixup_map_hash_8b = { 24 },
1306 .errstr_unpriv = "R1 pointer comparison",
1307 .result_unpriv = REJECT,
1314 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1315 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1316 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1317 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1318 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1319 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1320 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1321 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1322 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1323 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1324 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1325 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1326 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1327 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1328 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1329 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1330 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1331 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1332 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1333 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1334 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1335 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1336 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1337 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1338 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1339 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1340 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1341 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1342 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1343 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1344 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1345 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1346 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1347 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1348 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1349 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1350 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1351 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1352 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1353 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1354 BPF_MOV64_IMM(BPF_REG_0, 0),
1357 .errstr_unpriv = "R1 pointer comparison",
1358 .result_unpriv = REJECT,
1364 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1365 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1366 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1367 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1368 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1369 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1370 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1371 BPF_MOV64_IMM(BPF_REG_0, 0),
1372 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1373 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1374 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1375 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1376 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1377 BPF_MOV64_IMM(BPF_REG_0, 0),
1378 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1379 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1380 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1381 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1382 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1383 BPF_MOV64_IMM(BPF_REG_0, 0),
1384 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1385 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1386 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1387 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1388 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1389 BPF_MOV64_IMM(BPF_REG_0, 0),
1390 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1391 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1392 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1393 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1394 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1395 BPF_MOV64_IMM(BPF_REG_0, 0),
1398 .errstr_unpriv = "R1 pointer comparison",
1399 .result_unpriv = REJECT,
1403 "access skb fields ok",
1405 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1406 offsetof(struct __sk_buff, len)),
1407 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1408 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1409 offsetof(struct __sk_buff, mark)),
1410 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1411 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1412 offsetof(struct __sk_buff, pkt_type)),
1413 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1414 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1415 offsetof(struct __sk_buff, queue_mapping)),
1416 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1417 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1418 offsetof(struct __sk_buff, protocol)),
1419 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1420 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1421 offsetof(struct __sk_buff, vlan_present)),
1422 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1423 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1424 offsetof(struct __sk_buff, vlan_tci)),
1425 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1426 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1427 offsetof(struct __sk_buff, napi_id)),
1428 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1434 "access skb fields bad1",
1436 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
1439 .errstr = "invalid bpf_context access",
1443 "access skb fields bad2",
1445 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
1446 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1447 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1448 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1449 BPF_LD_MAP_FD(BPF_REG_1, 0),
1450 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1451 BPF_FUNC_map_lookup_elem),
1452 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1454 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1455 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1456 offsetof(struct __sk_buff, pkt_type)),
1459 .fixup_map_hash_8b = { 4 },
1460 .errstr = "different pointers",
1461 .errstr_unpriv = "R1 pointer comparison",
1465 "access skb fields bad3",
1467 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1468 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1469 offsetof(struct __sk_buff, pkt_type)),
1471 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1472 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1473 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1474 BPF_LD_MAP_FD(BPF_REG_1, 0),
1475 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1476 BPF_FUNC_map_lookup_elem),
1477 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1479 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1480 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
1482 .fixup_map_hash_8b = { 6 },
1483 .errstr = "different pointers",
1484 .errstr_unpriv = "R1 pointer comparison",
1488 "access skb fields bad4",
1490 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
1491 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1492 offsetof(struct __sk_buff, len)),
1493 BPF_MOV64_IMM(BPF_REG_0, 0),
1495 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1496 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1497 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1498 BPF_LD_MAP_FD(BPF_REG_1, 0),
1499 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1500 BPF_FUNC_map_lookup_elem),
1501 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1503 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1504 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
1506 .fixup_map_hash_8b = { 7 },
1507 .errstr = "different pointers",
1508 .errstr_unpriv = "R1 pointer comparison",
1512 "invalid access __sk_buff family",
1514 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1515 offsetof(struct __sk_buff, family)),
1518 .errstr = "invalid bpf_context access",
1522 "invalid access __sk_buff remote_ip4",
1524 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1525 offsetof(struct __sk_buff, remote_ip4)),
1528 .errstr = "invalid bpf_context access",
1532 "invalid access __sk_buff local_ip4",
1534 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1535 offsetof(struct __sk_buff, local_ip4)),
1538 .errstr = "invalid bpf_context access",
1542 "invalid access __sk_buff remote_ip6",
1544 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1545 offsetof(struct __sk_buff, remote_ip6)),
1548 .errstr = "invalid bpf_context access",
1552 "invalid access __sk_buff local_ip6",
1554 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1555 offsetof(struct __sk_buff, local_ip6)),
1558 .errstr = "invalid bpf_context access",
1562 "invalid access __sk_buff remote_port",
1564 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1565 offsetof(struct __sk_buff, remote_port)),
1568 .errstr = "invalid bpf_context access",
1572 "invalid access __sk_buff remote_port",
1574 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1575 offsetof(struct __sk_buff, local_port)),
1578 .errstr = "invalid bpf_context access",
1582 "valid access __sk_buff family",
1584 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1585 offsetof(struct __sk_buff, family)),
1589 .prog_type = BPF_PROG_TYPE_SK_SKB,
1592 "valid access __sk_buff remote_ip4",
1594 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1595 offsetof(struct __sk_buff, remote_ip4)),
1599 .prog_type = BPF_PROG_TYPE_SK_SKB,
1602 "valid access __sk_buff local_ip4",
1604 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1605 offsetof(struct __sk_buff, local_ip4)),
1609 .prog_type = BPF_PROG_TYPE_SK_SKB,
1612 "valid access __sk_buff remote_ip6",
1614 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1615 offsetof(struct __sk_buff, remote_ip6[0])),
1616 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1617 offsetof(struct __sk_buff, remote_ip6[1])),
1618 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1619 offsetof(struct __sk_buff, remote_ip6[2])),
1620 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1621 offsetof(struct __sk_buff, remote_ip6[3])),
1625 .prog_type = BPF_PROG_TYPE_SK_SKB,
1628 "valid access __sk_buff local_ip6",
1630 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1631 offsetof(struct __sk_buff, local_ip6[0])),
1632 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1633 offsetof(struct __sk_buff, local_ip6[1])),
1634 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1635 offsetof(struct __sk_buff, local_ip6[2])),
1636 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1637 offsetof(struct __sk_buff, local_ip6[3])),
1641 .prog_type = BPF_PROG_TYPE_SK_SKB,
1644 "valid access __sk_buff remote_port",
1646 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1647 offsetof(struct __sk_buff, remote_port)),
1651 .prog_type = BPF_PROG_TYPE_SK_SKB,
1654 "valid access __sk_buff remote_port",
1656 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1657 offsetof(struct __sk_buff, local_port)),
1661 .prog_type = BPF_PROG_TYPE_SK_SKB,
1664 "invalid access of tc_classid for SK_SKB",
1666 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1667 offsetof(struct __sk_buff, tc_classid)),
1671 .prog_type = BPF_PROG_TYPE_SK_SKB,
1672 .errstr = "invalid bpf_context access",
1675 "invalid access of skb->mark for SK_SKB",
1677 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1678 offsetof(struct __sk_buff, mark)),
1682 .prog_type = BPF_PROG_TYPE_SK_SKB,
1683 .errstr = "invalid bpf_context access",
1686 "check skb->mark is not writeable by SK_SKB",
1688 BPF_MOV64_IMM(BPF_REG_0, 0),
1689 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1690 offsetof(struct __sk_buff, mark)),
1694 .prog_type = BPF_PROG_TYPE_SK_SKB,
1695 .errstr = "invalid bpf_context access",
1698 "check skb->tc_index is writeable by SK_SKB",
1700 BPF_MOV64_IMM(BPF_REG_0, 0),
1701 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1702 offsetof(struct __sk_buff, tc_index)),
1706 .prog_type = BPF_PROG_TYPE_SK_SKB,
1709 "check skb->priority is writeable by SK_SKB",
1711 BPF_MOV64_IMM(BPF_REG_0, 0),
1712 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1713 offsetof(struct __sk_buff, priority)),
1717 .prog_type = BPF_PROG_TYPE_SK_SKB,
1720 "direct packet read for SK_SKB",
1722 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1723 offsetof(struct __sk_buff, data)),
1724 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1725 offsetof(struct __sk_buff, data_end)),
1726 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1727 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1728 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1729 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1730 BPF_MOV64_IMM(BPF_REG_0, 0),
1734 .prog_type = BPF_PROG_TYPE_SK_SKB,
1737 "direct packet write for SK_SKB",
1739 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1740 offsetof(struct __sk_buff, data)),
1741 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1742 offsetof(struct __sk_buff, data_end)),
1743 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1744 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1745 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1746 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1747 BPF_MOV64_IMM(BPF_REG_0, 0),
1751 .prog_type = BPF_PROG_TYPE_SK_SKB,
1754 "overlapping checks for direct packet access SK_SKB",
1756 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1757 offsetof(struct __sk_buff, data)),
1758 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1759 offsetof(struct __sk_buff, data_end)),
1760 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1761 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1762 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1763 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1764 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1765 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1766 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1767 BPF_MOV64_IMM(BPF_REG_0, 0),
1771 .prog_type = BPF_PROG_TYPE_SK_SKB,
1774 "valid access family in SK_MSG",
1776 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1777 offsetof(struct sk_msg_md, family)),
1781 .prog_type = BPF_PROG_TYPE_SK_MSG,
1784 "valid access remote_ip4 in SK_MSG",
1786 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1787 offsetof(struct sk_msg_md, remote_ip4)),
1791 .prog_type = BPF_PROG_TYPE_SK_MSG,
1794 "valid access local_ip4 in SK_MSG",
1796 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1797 offsetof(struct sk_msg_md, local_ip4)),
1801 .prog_type = BPF_PROG_TYPE_SK_MSG,
1804 "valid access remote_port in SK_MSG",
1806 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1807 offsetof(struct sk_msg_md, remote_port)),
1811 .prog_type = BPF_PROG_TYPE_SK_MSG,
1814 "valid access local_port in SK_MSG",
1816 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1817 offsetof(struct sk_msg_md, local_port)),
1821 .prog_type = BPF_PROG_TYPE_SK_MSG,
1824 "valid access remote_ip6 in SK_MSG",
1826 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1827 offsetof(struct sk_msg_md, remote_ip6[0])),
1828 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1829 offsetof(struct sk_msg_md, remote_ip6[1])),
1830 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1831 offsetof(struct sk_msg_md, remote_ip6[2])),
1832 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1833 offsetof(struct sk_msg_md, remote_ip6[3])),
1837 .prog_type = BPF_PROG_TYPE_SK_SKB,
1840 "valid access local_ip6 in SK_MSG",
1842 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1843 offsetof(struct sk_msg_md, local_ip6[0])),
1844 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1845 offsetof(struct sk_msg_md, local_ip6[1])),
1846 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1847 offsetof(struct sk_msg_md, local_ip6[2])),
1848 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1849 offsetof(struct sk_msg_md, local_ip6[3])),
1853 .prog_type = BPF_PROG_TYPE_SK_SKB,
1856 "valid access size in SK_MSG",
1858 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1859 offsetof(struct sk_msg_md, size)),
1863 .prog_type = BPF_PROG_TYPE_SK_MSG,
1866 "invalid 64B read of size in SK_MSG",
1868 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1869 offsetof(struct sk_msg_md, size)),
1872 .errstr = "invalid bpf_context access",
1874 .prog_type = BPF_PROG_TYPE_SK_MSG,
1877 "invalid read past end of SK_MSG",
1879 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1880 offsetof(struct sk_msg_md, size) + 4),
1883 .errstr = "invalid bpf_context access",
1885 .prog_type = BPF_PROG_TYPE_SK_MSG,
1888 "invalid read offset in SK_MSG",
1890 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1891 offsetof(struct sk_msg_md, family) + 1),
1894 .errstr = "invalid bpf_context access",
1896 .prog_type = BPF_PROG_TYPE_SK_MSG,
1897 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1900 "direct packet read for SK_MSG",
1902 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1903 offsetof(struct sk_msg_md, data)),
1904 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1905 offsetof(struct sk_msg_md, data_end)),
1906 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1907 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1908 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1909 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1910 BPF_MOV64_IMM(BPF_REG_0, 0),
1914 .prog_type = BPF_PROG_TYPE_SK_MSG,
1917 "direct packet write for SK_MSG",
1919 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1920 offsetof(struct sk_msg_md, data)),
1921 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1922 offsetof(struct sk_msg_md, data_end)),
1923 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1924 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1925 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1926 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1927 BPF_MOV64_IMM(BPF_REG_0, 0),
1931 .prog_type = BPF_PROG_TYPE_SK_MSG,
1934 "overlapping checks for direct packet access SK_MSG",
1936 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1937 offsetof(struct sk_msg_md, data)),
1938 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1939 offsetof(struct sk_msg_md, data_end)),
1940 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1941 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1942 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1943 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1944 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1945 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1946 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1947 BPF_MOV64_IMM(BPF_REG_0, 0),
1951 .prog_type = BPF_PROG_TYPE_SK_MSG,
1954 "check skb->mark is not writeable by sockets",
1956 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1957 offsetof(struct __sk_buff, mark)),
1960 .errstr = "invalid bpf_context access",
1961 .errstr_unpriv = "R1 leaks addr",
1965 "check skb->tc_index is not writeable by sockets",
1967 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1968 offsetof(struct __sk_buff, tc_index)),
1971 .errstr = "invalid bpf_context access",
1972 .errstr_unpriv = "R1 leaks addr",
1976 "check cb access: byte",
1978 BPF_MOV64_IMM(BPF_REG_0, 0),
1979 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1980 offsetof(struct __sk_buff, cb[0])),
1981 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1982 offsetof(struct __sk_buff, cb[0]) + 1),
1983 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1984 offsetof(struct __sk_buff, cb[0]) + 2),
1985 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1986 offsetof(struct __sk_buff, cb[0]) + 3),
1987 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1988 offsetof(struct __sk_buff, cb[1])),
1989 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1990 offsetof(struct __sk_buff, cb[1]) + 1),
1991 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1992 offsetof(struct __sk_buff, cb[1]) + 2),
1993 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1994 offsetof(struct __sk_buff, cb[1]) + 3),
1995 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1996 offsetof(struct __sk_buff, cb[2])),
1997 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1998 offsetof(struct __sk_buff, cb[2]) + 1),
1999 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2000 offsetof(struct __sk_buff, cb[2]) + 2),
2001 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2002 offsetof(struct __sk_buff, cb[2]) + 3),
2003 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2004 offsetof(struct __sk_buff, cb[3])),
2005 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2006 offsetof(struct __sk_buff, cb[3]) + 1),
2007 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2008 offsetof(struct __sk_buff, cb[3]) + 2),
2009 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2010 offsetof(struct __sk_buff, cb[3]) + 3),
2011 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2012 offsetof(struct __sk_buff, cb[4])),
2013 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2014 offsetof(struct __sk_buff, cb[4]) + 1),
2015 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2016 offsetof(struct __sk_buff, cb[4]) + 2),
2017 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2018 offsetof(struct __sk_buff, cb[4]) + 3),
2019 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2020 offsetof(struct __sk_buff, cb[0])),
2021 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2022 offsetof(struct __sk_buff, cb[0]) + 1),
2023 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2024 offsetof(struct __sk_buff, cb[0]) + 2),
2025 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2026 offsetof(struct __sk_buff, cb[0]) + 3),
2027 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2028 offsetof(struct __sk_buff, cb[1])),
2029 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2030 offsetof(struct __sk_buff, cb[1]) + 1),
2031 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2032 offsetof(struct __sk_buff, cb[1]) + 2),
2033 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2034 offsetof(struct __sk_buff, cb[1]) + 3),
2035 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2036 offsetof(struct __sk_buff, cb[2])),
2037 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2038 offsetof(struct __sk_buff, cb[2]) + 1),
2039 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2040 offsetof(struct __sk_buff, cb[2]) + 2),
2041 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2042 offsetof(struct __sk_buff, cb[2]) + 3),
2043 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2044 offsetof(struct __sk_buff, cb[3])),
2045 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2046 offsetof(struct __sk_buff, cb[3]) + 1),
2047 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2048 offsetof(struct __sk_buff, cb[3]) + 2),
2049 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2050 offsetof(struct __sk_buff, cb[3]) + 3),
2051 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2052 offsetof(struct __sk_buff, cb[4])),
2053 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2054 offsetof(struct __sk_buff, cb[4]) + 1),
2055 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2056 offsetof(struct __sk_buff, cb[4]) + 2),
2057 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2058 offsetof(struct __sk_buff, cb[4]) + 3),
2064 "__sk_buff->hash, offset 0, byte store not permitted",
2066 BPF_MOV64_IMM(BPF_REG_0, 0),
2067 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2068 offsetof(struct __sk_buff, hash)),
2071 .errstr = "invalid bpf_context access",
2075 "__sk_buff->tc_index, offset 3, byte store not permitted",
2077 BPF_MOV64_IMM(BPF_REG_0, 0),
2078 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2079 offsetof(struct __sk_buff, tc_index) + 3),
2082 .errstr = "invalid bpf_context access",
2086 "check skb->hash byte load permitted",
2088 BPF_MOV64_IMM(BPF_REG_0, 0),
2089 #if __BYTE_ORDER == __LITTLE_ENDIAN
2090 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2091 offsetof(struct __sk_buff, hash)),
2093 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2094 offsetof(struct __sk_buff, hash) + 3),
2101 "check skb->hash byte load permitted 1",
2103 BPF_MOV64_IMM(BPF_REG_0, 0),
2104 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2105 offsetof(struct __sk_buff, hash) + 1),
2111 "check skb->hash byte load permitted 2",
2113 BPF_MOV64_IMM(BPF_REG_0, 0),
2114 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2115 offsetof(struct __sk_buff, hash) + 2),
2121 "check skb->hash byte load permitted 3",
2123 BPF_MOV64_IMM(BPF_REG_0, 0),
2124 #if __BYTE_ORDER == __LITTLE_ENDIAN
2125 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2126 offsetof(struct __sk_buff, hash) + 3),
2128 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2129 offsetof(struct __sk_buff, hash)),
2136 "check cb access: byte, wrong type",
2138 BPF_MOV64_IMM(BPF_REG_0, 0),
2139 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2140 offsetof(struct __sk_buff, cb[0])),
2143 .errstr = "invalid bpf_context access",
2145 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2148 "check cb access: half",
2150 BPF_MOV64_IMM(BPF_REG_0, 0),
2151 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2152 offsetof(struct __sk_buff, cb[0])),
2153 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2154 offsetof(struct __sk_buff, cb[0]) + 2),
2155 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2156 offsetof(struct __sk_buff, cb[1])),
2157 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2158 offsetof(struct __sk_buff, cb[1]) + 2),
2159 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2160 offsetof(struct __sk_buff, cb[2])),
2161 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2162 offsetof(struct __sk_buff, cb[2]) + 2),
2163 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2164 offsetof(struct __sk_buff, cb[3])),
2165 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2166 offsetof(struct __sk_buff, cb[3]) + 2),
2167 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2168 offsetof(struct __sk_buff, cb[4])),
2169 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2170 offsetof(struct __sk_buff, cb[4]) + 2),
2171 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2172 offsetof(struct __sk_buff, cb[0])),
2173 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2174 offsetof(struct __sk_buff, cb[0]) + 2),
2175 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2176 offsetof(struct __sk_buff, cb[1])),
2177 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2178 offsetof(struct __sk_buff, cb[1]) + 2),
2179 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2180 offsetof(struct __sk_buff, cb[2])),
2181 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2182 offsetof(struct __sk_buff, cb[2]) + 2),
2183 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2184 offsetof(struct __sk_buff, cb[3])),
2185 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2186 offsetof(struct __sk_buff, cb[3]) + 2),
2187 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2188 offsetof(struct __sk_buff, cb[4])),
2189 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2190 offsetof(struct __sk_buff, cb[4]) + 2),
2196 "check cb access: half, unaligned",
2198 BPF_MOV64_IMM(BPF_REG_0, 0),
2199 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2200 offsetof(struct __sk_buff, cb[0]) + 1),
2203 .errstr = "misaligned context access",
2205 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2208 "check __sk_buff->hash, offset 0, half store not permitted",
2210 BPF_MOV64_IMM(BPF_REG_0, 0),
2211 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2212 offsetof(struct __sk_buff, hash)),
2215 .errstr = "invalid bpf_context access",
2219 "check __sk_buff->tc_index, offset 2, half store not permitted",
2221 BPF_MOV64_IMM(BPF_REG_0, 0),
2222 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2223 offsetof(struct __sk_buff, tc_index) + 2),
2226 .errstr = "invalid bpf_context access",
2230 "check skb->hash half load permitted",
2232 BPF_MOV64_IMM(BPF_REG_0, 0),
2233 #if __BYTE_ORDER == __LITTLE_ENDIAN
2234 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2235 offsetof(struct __sk_buff, hash)),
2237 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2238 offsetof(struct __sk_buff, hash) + 2),
2245 "check skb->hash half load permitted 2",
2247 BPF_MOV64_IMM(BPF_REG_0, 0),
2248 #if __BYTE_ORDER == __LITTLE_ENDIAN
2249 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2250 offsetof(struct __sk_buff, hash) + 2),
2252 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2253 offsetof(struct __sk_buff, hash)),
2260 "check skb->hash half load not permitted, unaligned 1",
2262 BPF_MOV64_IMM(BPF_REG_0, 0),
2263 #if __BYTE_ORDER == __LITTLE_ENDIAN
2264 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2265 offsetof(struct __sk_buff, hash) + 1),
2267 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2268 offsetof(struct __sk_buff, hash) + 3),
2272 .errstr = "invalid bpf_context access",
2276 "check skb->hash half load not permitted, unaligned 3",
2278 BPF_MOV64_IMM(BPF_REG_0, 0),
2279 #if __BYTE_ORDER == __LITTLE_ENDIAN
2280 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2281 offsetof(struct __sk_buff, hash) + 3),
2283 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2284 offsetof(struct __sk_buff, hash) + 1),
2288 .errstr = "invalid bpf_context access",
2290 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2291 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2294 "check cb access: half, wrong type",
2296 BPF_MOV64_IMM(BPF_REG_0, 0),
2297 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2298 offsetof(struct __sk_buff, cb[0])),
2301 .errstr = "invalid bpf_context access",
2303 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2306 "check cb access: word",
2308 BPF_MOV64_IMM(BPF_REG_0, 0),
2309 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2310 offsetof(struct __sk_buff, cb[0])),
2311 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2312 offsetof(struct __sk_buff, cb[1])),
2313 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2314 offsetof(struct __sk_buff, cb[2])),
2315 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2316 offsetof(struct __sk_buff, cb[3])),
2317 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2318 offsetof(struct __sk_buff, cb[4])),
2319 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2320 offsetof(struct __sk_buff, cb[0])),
2321 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2322 offsetof(struct __sk_buff, cb[1])),
2323 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2324 offsetof(struct __sk_buff, cb[2])),
2325 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2326 offsetof(struct __sk_buff, cb[3])),
2327 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2328 offsetof(struct __sk_buff, cb[4])),
2334 "check cb access: word, unaligned 1",
2336 BPF_MOV64_IMM(BPF_REG_0, 0),
2337 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2338 offsetof(struct __sk_buff, cb[0]) + 2),
2341 .errstr = "misaligned context access",
2343 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2346 "check cb access: word, unaligned 2",
2348 BPF_MOV64_IMM(BPF_REG_0, 0),
2349 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2350 offsetof(struct __sk_buff, cb[4]) + 1),
2353 .errstr = "misaligned context access",
2355 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2358 "check cb access: word, unaligned 3",
2360 BPF_MOV64_IMM(BPF_REG_0, 0),
2361 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2362 offsetof(struct __sk_buff, cb[4]) + 2),
2365 .errstr = "misaligned context access",
2367 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2370 "check cb access: word, unaligned 4",
2372 BPF_MOV64_IMM(BPF_REG_0, 0),
2373 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2374 offsetof(struct __sk_buff, cb[4]) + 3),
2377 .errstr = "misaligned context access",
2379 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2382 "check cb access: double",
2384 BPF_MOV64_IMM(BPF_REG_0, 0),
2385 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2386 offsetof(struct __sk_buff, cb[0])),
2387 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2388 offsetof(struct __sk_buff, cb[2])),
2389 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2390 offsetof(struct __sk_buff, cb[0])),
2391 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2392 offsetof(struct __sk_buff, cb[2])),
2398 "check cb access: double, unaligned 1",
2400 BPF_MOV64_IMM(BPF_REG_0, 0),
2401 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2402 offsetof(struct __sk_buff, cb[1])),
2405 .errstr = "misaligned context access",
2407 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2410 "check cb access: double, unaligned 2",
2412 BPF_MOV64_IMM(BPF_REG_0, 0),
2413 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2414 offsetof(struct __sk_buff, cb[3])),
2417 .errstr = "misaligned context access",
2419 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2422 "check cb access: double, oob 1",
2424 BPF_MOV64_IMM(BPF_REG_0, 0),
2425 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2426 offsetof(struct __sk_buff, cb[4])),
2429 .errstr = "invalid bpf_context access",
2433 "check cb access: double, oob 2",
2435 BPF_MOV64_IMM(BPF_REG_0, 0),
2436 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2437 offsetof(struct __sk_buff, cb[4])),
2440 .errstr = "invalid bpf_context access",
2444 "check __sk_buff->ifindex dw store not permitted",
2446 BPF_MOV64_IMM(BPF_REG_0, 0),
2447 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2448 offsetof(struct __sk_buff, ifindex)),
2451 .errstr = "invalid bpf_context access",
2455 "check __sk_buff->ifindex dw load not permitted",
2457 BPF_MOV64_IMM(BPF_REG_0, 0),
2458 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2459 offsetof(struct __sk_buff, ifindex)),
2462 .errstr = "invalid bpf_context access",
2466 "check cb access: double, wrong type",
2468 BPF_MOV64_IMM(BPF_REG_0, 0),
2469 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2470 offsetof(struct __sk_buff, cb[0])),
2473 .errstr = "invalid bpf_context access",
2475 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2478 "check out of range skb->cb access",
2480 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2481 offsetof(struct __sk_buff, cb[0]) + 256),
2484 .errstr = "invalid bpf_context access",
2485 .errstr_unpriv = "",
2487 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
2490 "write skb fields from socket prog",
2492 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2493 offsetof(struct __sk_buff, cb[4])),
2494 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2495 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2496 offsetof(struct __sk_buff, mark)),
2497 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2498 offsetof(struct __sk_buff, tc_index)),
2499 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2500 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2501 offsetof(struct __sk_buff, cb[0])),
2502 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2503 offsetof(struct __sk_buff, cb[2])),
2507 .errstr_unpriv = "R1 leaks addr",
2508 .result_unpriv = REJECT,
2511 "write skb fields from tc_cls_act prog",
2513 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2514 offsetof(struct __sk_buff, cb[0])),
2515 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2516 offsetof(struct __sk_buff, mark)),
2517 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2518 offsetof(struct __sk_buff, tc_index)),
2519 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2520 offsetof(struct __sk_buff, tc_index)),
2521 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2522 offsetof(struct __sk_buff, cb[3])),
2523 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2524 offsetof(struct __sk_buff, tstamp)),
2525 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2526 offsetof(struct __sk_buff, tstamp)),
2529 .errstr_unpriv = "",
2530 .result_unpriv = REJECT,
2532 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2535 "PTR_TO_STACK store/load",
2537 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2538 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2539 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2540 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2544 .retval = 0xfaceb00c,
2547 "PTR_TO_STACK store/load - bad alignment on off",
2549 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2550 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2551 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2552 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2556 .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
2559 "PTR_TO_STACK store/load - bad alignment on reg",
2561 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2562 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2563 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2564 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2568 .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
2571 "PTR_TO_STACK store/load - out of bounds low",
2573 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2574 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
2575 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2576 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2580 .errstr = "invalid stack off=-79992 size=8",
2581 .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
2584 "PTR_TO_STACK store/load - out of bounds high",
2586 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2587 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2588 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2589 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2593 .errstr = "invalid stack off=0 size=8",
2596 "unpriv: return pointer",
2598 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
2602 .result_unpriv = REJECT,
2603 .errstr_unpriv = "R0 leaks addr",
2604 .retval = POINTER_VALUE,
2607 "unpriv: add const to pointer",
2609 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2610 BPF_MOV64_IMM(BPF_REG_0, 0),
2616 "unpriv: add pointer to pointer",
2618 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2619 BPF_MOV64_IMM(BPF_REG_0, 0),
2623 .errstr = "R1 pointer += pointer",
2626 "unpriv: neg pointer",
2628 BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
2629 BPF_MOV64_IMM(BPF_REG_0, 0),
2633 .result_unpriv = REJECT,
2634 .errstr_unpriv = "R1 pointer arithmetic",
2637 "unpriv: cmp pointer with const",
2639 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2640 BPF_MOV64_IMM(BPF_REG_0, 0),
2644 .result_unpriv = REJECT,
2645 .errstr_unpriv = "R1 pointer comparison",
2648 "unpriv: cmp pointer with pointer",
2650 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
2651 BPF_MOV64_IMM(BPF_REG_0, 0),
2655 .result_unpriv = REJECT,
2656 .errstr_unpriv = "R10 pointer comparison",
2659 "unpriv: check that printk is disallowed",
2661 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2662 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2663 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2664 BPF_MOV64_IMM(BPF_REG_2, 8),
2665 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2666 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2667 BPF_FUNC_trace_printk),
2668 BPF_MOV64_IMM(BPF_REG_0, 0),
2671 .errstr_unpriv = "unknown func bpf_trace_printk#6",
2672 .result_unpriv = REJECT,
2676 "unpriv: pass pointer to helper function",
2678 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2679 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2680 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2681 BPF_LD_MAP_FD(BPF_REG_1, 0),
2682 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2683 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2684 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2685 BPF_FUNC_map_update_elem),
2686 BPF_MOV64_IMM(BPF_REG_0, 0),
2689 .fixup_map_hash_8b = { 3 },
2690 .errstr_unpriv = "R4 leaks addr",
2691 .result_unpriv = REJECT,
2695 "unpriv: indirectly pass pointer on stack to helper function",
2697 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2698 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2699 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2700 BPF_LD_MAP_FD(BPF_REG_1, 0),
2701 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2702 BPF_FUNC_map_lookup_elem),
2703 BPF_MOV64_IMM(BPF_REG_0, 0),
2706 .fixup_map_hash_8b = { 3 },
2707 .errstr = "invalid indirect read from stack off -8+0 size 8",
2711 "unpriv: mangle pointer on stack 1",
2713 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2714 BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
2715 BPF_MOV64_IMM(BPF_REG_0, 0),
2718 .errstr_unpriv = "attempt to corrupt spilled",
2719 .result_unpriv = REJECT,
2723 "unpriv: mangle pointer on stack 2",
2725 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2726 BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
2727 BPF_MOV64_IMM(BPF_REG_0, 0),
2730 .errstr_unpriv = "attempt to corrupt spilled",
2731 .result_unpriv = REJECT,
2735 "unpriv: read pointer from stack in small chunks",
2737 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2738 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
2739 BPF_MOV64_IMM(BPF_REG_0, 0),
2742 .errstr = "invalid size",
2746 "unpriv: write pointer into ctx",
2748 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
2749 BPF_MOV64_IMM(BPF_REG_0, 0),
2752 .errstr_unpriv = "R1 leaks addr",
2753 .result_unpriv = REJECT,
2754 .errstr = "invalid bpf_context access",
2758 "unpriv: spill/fill of ctx",
2760 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2761 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2762 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2763 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2764 BPF_MOV64_IMM(BPF_REG_0, 0),
2770 "unpriv: spill/fill of ctx 2",
2772 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2773 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2774 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2775 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2776 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2777 BPF_FUNC_get_hash_recalc),
2778 BPF_MOV64_IMM(BPF_REG_0, 0),
2782 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2785 "unpriv: spill/fill of ctx 3",
2787 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2788 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2789 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2790 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2791 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2792 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2793 BPF_FUNC_get_hash_recalc),
2797 .errstr = "R1 type=fp expected=ctx",
2798 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2801 "unpriv: spill/fill of ctx 4",
2803 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2804 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2805 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2806 BPF_MOV64_IMM(BPF_REG_0, 1),
2807 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
2809 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2810 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2811 BPF_FUNC_get_hash_recalc),
2815 .errstr = "R1 type=inv expected=ctx",
2816 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2819 "unpriv: spill/fill of different pointers stx",
2821 BPF_MOV64_IMM(BPF_REG_3, 42),
2822 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2823 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2824 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2825 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2826 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
2827 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2828 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2829 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2830 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2831 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2832 offsetof(struct __sk_buff, mark)),
2833 BPF_MOV64_IMM(BPF_REG_0, 0),
2837 .errstr = "same insn cannot be used with different pointers",
2838 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2841 "unpriv: spill/fill of different pointers stx - ctx and sock",
2843 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2844 /* struct bpf_sock *sock = bpf_sock_lookup(...); */
2846 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2848 /* void *target = &foo; */
2849 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2850 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2851 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2852 /* if (skb == NULL) *target = sock; */
2853 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2854 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2855 /* else *target = skb; */
2856 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2857 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2858 /* struct __sk_buff *skb = *target; */
2859 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2860 /* skb->mark = 42; */
2861 BPF_MOV64_IMM(BPF_REG_3, 42),
2862 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2863 offsetof(struct __sk_buff, mark)),
2864 /* if (sk) bpf_sk_release(sk) */
2865 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2866 BPF_EMIT_CALL(BPF_FUNC_sk_release),
2867 BPF_MOV64_IMM(BPF_REG_0, 0),
2871 .errstr = "type=ctx expected=sock",
2872 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2875 "unpriv: spill/fill of different pointers stx - leak sock",
2877 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2878 /* struct bpf_sock *sock = bpf_sock_lookup(...); */
2880 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2882 /* void *target = &foo; */
2883 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2884 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2885 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2886 /* if (skb == NULL) *target = sock; */
2887 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2888 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2889 /* else *target = skb; */
2890 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2891 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2892 /* struct __sk_buff *skb = *target; */
2893 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2894 /* skb->mark = 42; */
2895 BPF_MOV64_IMM(BPF_REG_3, 42),
2896 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2897 offsetof(struct __sk_buff, mark)),
2901 //.errstr = "same insn cannot be used with different pointers",
2902 .errstr = "Unreleased reference",
2903 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2906 "unpriv: spill/fill of different pointers stx - sock and ctx (read)",
2908 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2909 /* struct bpf_sock *sock = bpf_sock_lookup(...); */
2911 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2913 /* void *target = &foo; */
2914 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2915 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2916 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2917 /* if (skb) *target = skb */
2918 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2919 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2920 /* else *target = sock */
2921 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2922 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2923 /* struct bpf_sock *sk = *target; */
2924 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2925 /* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */
2926 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
2927 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2928 offsetof(struct bpf_sock, mark)),
2929 BPF_EMIT_CALL(BPF_FUNC_sk_release),
2930 BPF_MOV64_IMM(BPF_REG_0, 0),
2934 .errstr = "same insn cannot be used with different pointers",
2935 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2938 "unpriv: spill/fill of different pointers stx - sock and ctx (write)",
2940 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2941 /* struct bpf_sock *sock = bpf_sock_lookup(...); */
2943 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2945 /* void *target = &foo; */
2946 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2947 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2948 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2949 /* if (skb) *target = skb */
2950 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2951 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2952 /* else *target = sock */
2953 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2954 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2955 /* struct bpf_sock *sk = *target; */
2956 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2957 /* if (sk) sk->mark = 42; bpf_sk_release(sk); */
2958 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2959 BPF_MOV64_IMM(BPF_REG_3, 42),
2960 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2961 offsetof(struct bpf_sock, mark)),
2962 BPF_EMIT_CALL(BPF_FUNC_sk_release),
2963 BPF_MOV64_IMM(BPF_REG_0, 0),
2967 //.errstr = "same insn cannot be used with different pointers",
2968 .errstr = "cannot write into socket",
2969 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2972 "unpriv: spill/fill of different pointers ldx",
2974 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2975 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2976 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2977 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2978 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
2979 -(__s32)offsetof(struct bpf_perf_event_data,
2980 sample_period) - 8),
2981 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2982 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2983 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2984 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2985 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
2986 offsetof(struct bpf_perf_event_data,
2988 BPF_MOV64_IMM(BPF_REG_0, 0),
2992 .errstr = "same insn cannot be used with different pointers",
2993 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
2996 "unpriv: write pointer into map elem value",
2998 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2999 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3000 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3001 BPF_LD_MAP_FD(BPF_REG_1, 0),
3002 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3003 BPF_FUNC_map_lookup_elem),
3004 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3005 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
3008 .fixup_map_hash_8b = { 3 },
3009 .errstr_unpriv = "R0 leaks addr",
3010 .result_unpriv = REJECT,
3014 "alu32: mov u32 const",
3016 BPF_MOV32_IMM(BPF_REG_7, 0),
3017 BPF_ALU32_IMM(BPF_AND, BPF_REG_7, 1),
3018 BPF_MOV32_REG(BPF_REG_0, BPF_REG_7),
3019 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3020 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
3027 "unpriv: partial copy of pointer",
3029 BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
3030 BPF_MOV64_IMM(BPF_REG_0, 0),
3033 .errstr_unpriv = "R10 partial copy",
3034 .result_unpriv = REJECT,
3038 "unpriv: pass pointer to tail_call",
3040 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
3041 BPF_LD_MAP_FD(BPF_REG_2, 0),
3042 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3043 BPF_FUNC_tail_call),
3044 BPF_MOV64_IMM(BPF_REG_0, 0),
3047 .fixup_prog1 = { 1 },
3048 .errstr_unpriv = "R3 leaks addr into helper",
3049 .result_unpriv = REJECT,
3053 "unpriv: cmp map pointer with zero",
3055 BPF_MOV64_IMM(BPF_REG_1, 0),
3056 BPF_LD_MAP_FD(BPF_REG_1, 0),
3057 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
3058 BPF_MOV64_IMM(BPF_REG_0, 0),
3061 .fixup_map_hash_8b = { 1 },
3062 .errstr_unpriv = "R1 pointer comparison",
3063 .result_unpriv = REJECT,
3067 "unpriv: write into frame pointer",
3069 BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
3070 BPF_MOV64_IMM(BPF_REG_0, 0),
3073 .errstr = "frame pointer is read only",
3077 "unpriv: spill/fill frame pointer",
3079 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3080 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3081 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
3082 BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
3083 BPF_MOV64_IMM(BPF_REG_0, 0),
3086 .errstr = "frame pointer is read only",
3090 "unpriv: cmp of frame pointer",
3092 BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
3093 BPF_MOV64_IMM(BPF_REG_0, 0),
3096 .errstr_unpriv = "R10 pointer comparison",
3097 .result_unpriv = REJECT,
3101 "unpriv: adding of fp",
3103 BPF_MOV64_IMM(BPF_REG_0, 0),
3104 BPF_MOV64_IMM(BPF_REG_1, 0),
3105 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
3106 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
3109 .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
3110 .result_unpriv = REJECT,
3114 "unpriv: cmp of stack pointer",
3116 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3117 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3118 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
3119 BPF_MOV64_IMM(BPF_REG_0, 0),
3122 .errstr_unpriv = "R2 pointer comparison",
3123 .result_unpriv = REJECT,
3127 "runtime/jit: tail_call within bounds, prog once",
3129 BPF_MOV64_IMM(BPF_REG_3, 0),
3130 BPF_LD_MAP_FD(BPF_REG_2, 0),
3131 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3132 BPF_FUNC_tail_call),
3133 BPF_MOV64_IMM(BPF_REG_0, 1),
3136 .fixup_prog1 = { 1 },
3141 "runtime/jit: tail_call within bounds, prog loop",
3143 BPF_MOV64_IMM(BPF_REG_3, 1),
3144 BPF_LD_MAP_FD(BPF_REG_2, 0),
3145 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3146 BPF_FUNC_tail_call),
3147 BPF_MOV64_IMM(BPF_REG_0, 1),
3150 .fixup_prog1 = { 1 },
3155 "runtime/jit: tail_call within bounds, no prog",
3157 BPF_MOV64_IMM(BPF_REG_3, 2),
3158 BPF_LD_MAP_FD(BPF_REG_2, 0),
3159 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3160 BPF_FUNC_tail_call),
3161 BPF_MOV64_IMM(BPF_REG_0, 1),
3164 .fixup_prog1 = { 1 },
3169 "runtime/jit: tail_call out of bounds",
3171 BPF_MOV64_IMM(BPF_REG_3, 256),
3172 BPF_LD_MAP_FD(BPF_REG_2, 0),
3173 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3174 BPF_FUNC_tail_call),
3175 BPF_MOV64_IMM(BPF_REG_0, 2),
3178 .fixup_prog1 = { 1 },
3183 "runtime/jit: pass negative index to tail_call",
3185 BPF_MOV64_IMM(BPF_REG_3, -1),
3186 BPF_LD_MAP_FD(BPF_REG_2, 0),
3187 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3188 BPF_FUNC_tail_call),
3189 BPF_MOV64_IMM(BPF_REG_0, 2),
3192 .fixup_prog1 = { 1 },
3197 "runtime/jit: pass > 32bit index to tail_call",
3199 BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
3200 BPF_LD_MAP_FD(BPF_REG_2, 0),
3201 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3202 BPF_FUNC_tail_call),
3203 BPF_MOV64_IMM(BPF_REG_0, 2),
3206 .fixup_prog1 = { 2 },
3209 /* Verifier rewrite for unpriv skips tail call here. */
3213 "PTR_TO_STACK check high 1",
3215 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3216 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
3217 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
3218 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
3225 "PTR_TO_STACK check high 2",
3227 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3228 BPF_ST_MEM(BPF_B, BPF_REG_1, -1, 42),
3229 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, -1),
3236 "PTR_TO_STACK check high 3",
3238 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3239 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
3240 BPF_ST_MEM(BPF_B, BPF_REG_1, -1, 42),
3241 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, -1),
3244 .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
3245 .result_unpriv = REJECT,
3250 "PTR_TO_STACK check high 4",
3252 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3253 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
3254 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
3255 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
3258 .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
3259 .errstr = "invalid stack off=0 size=1",
3263 "PTR_TO_STACK check high 5",
3265 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3266 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
3267 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
3268 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
3272 .errstr = "invalid stack off",
3275 "PTR_TO_STACK check high 6",
3277 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3278 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
3279 BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MAX, 42),
3280 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MAX),
3284 .errstr = "invalid stack off",
3287 "PTR_TO_STACK check high 7",
3289 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3290 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
3291 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
3292 BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MAX, 42),
3293 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MAX),
3297 .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
3298 .errstr = "fp pointer offset",
3301 "PTR_TO_STACK check low 1",
3303 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3304 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -512),
3305 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
3306 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
3313 "PTR_TO_STACK check low 2",
3315 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3316 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -513),
3317 BPF_ST_MEM(BPF_B, BPF_REG_1, 1, 42),
3318 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1),
3321 .result_unpriv = REJECT,
3322 .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
3327 "PTR_TO_STACK check low 3",
3329 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3330 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -513),
3331 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
3332 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
3335 .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
3336 .errstr = "invalid stack off=-513 size=1",
3340 "PTR_TO_STACK check low 4",
3342 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3343 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, INT_MIN),
3344 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
3345 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
3349 .errstr = "math between fp pointer",
3352 "PTR_TO_STACK check low 5",
3354 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3355 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
3356 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
3357 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
3361 .errstr = "invalid stack off",
3364 "PTR_TO_STACK check low 6",
3366 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3367 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
3368 BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MIN, 42),
3369 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MIN),
3373 .errstr = "invalid stack off",
3376 "PTR_TO_STACK check low 7",
3378 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3379 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
3380 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
3381 BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MIN, 42),
3382 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MIN),
3386 .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
3387 .errstr = "fp pointer offset",
3390 "PTR_TO_STACK mixed reg/k, 1",
3392 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3393 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
3394 BPF_MOV64_IMM(BPF_REG_2, -3),
3395 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
3396 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
3397 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
3404 "PTR_TO_STACK mixed reg/k, 2",
3406 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3407 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
3408 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3409 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
3410 BPF_MOV64_IMM(BPF_REG_2, -3),
3411 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
3412 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
3413 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
3414 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_5, -6),
3421 "PTR_TO_STACK mixed reg/k, 3",
3423 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3424 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
3425 BPF_MOV64_IMM(BPF_REG_2, -3),
3426 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
3427 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
3428 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3437 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3438 BPF_MOV64_IMM(BPF_REG_2, -3),
3439 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
3440 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
3441 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
3444 .result_unpriv = REJECT,
3445 .errstr_unpriv = "invalid stack off=0 size=1",
3450 "stack pointer arithmetic",
3452 BPF_MOV64_IMM(BPF_REG_1, 4),
3453 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
3454 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
3455 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
3456 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
3457 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3458 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
3459 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
3460 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3461 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
3462 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
3463 BPF_MOV64_IMM(BPF_REG_0, 0),
3469 "raw_stack: no skb_load_bytes",
3471 BPF_MOV64_IMM(BPF_REG_2, 4),
3472 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3473 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3474 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3475 BPF_MOV64_IMM(BPF_REG_4, 8),
3476 /* Call to skb_load_bytes() omitted. */
3477 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3481 .errstr = "invalid read from stack off -8+0 size 8",
3482 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3485 "raw_stack: skb_load_bytes, negative len",
3487 BPF_MOV64_IMM(BPF_REG_2, 4),
3488 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3489 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3490 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3491 BPF_MOV64_IMM(BPF_REG_4, -8),
3492 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3493 BPF_FUNC_skb_load_bytes),
3494 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3498 .errstr = "R4 min value is negative",
3499 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3502 "raw_stack: skb_load_bytes, negative len 2",
3504 BPF_MOV64_IMM(BPF_REG_2, 4),
3505 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3506 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3507 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3508 BPF_MOV64_IMM(BPF_REG_4, ~0),
3509 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3510 BPF_FUNC_skb_load_bytes),
3511 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3515 .errstr = "R4 min value is negative",
3516 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3519 "raw_stack: skb_load_bytes, zero len",
3521 BPF_MOV64_IMM(BPF_REG_2, 4),
3522 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3523 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3524 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3525 BPF_MOV64_IMM(BPF_REG_4, 0),
3526 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3527 BPF_FUNC_skb_load_bytes),
3528 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3532 .errstr = "invalid stack type R3",
3533 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3536 "raw_stack: skb_load_bytes, no init",
3538 BPF_MOV64_IMM(BPF_REG_2, 4),
3539 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3540 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3541 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3542 BPF_MOV64_IMM(BPF_REG_4, 8),
3543 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3544 BPF_FUNC_skb_load_bytes),
3545 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3549 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3552 "raw_stack: skb_load_bytes, init",
3554 BPF_MOV64_IMM(BPF_REG_2, 4),
3555 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3556 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3557 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
3558 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3559 BPF_MOV64_IMM(BPF_REG_4, 8),
3560 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3561 BPF_FUNC_skb_load_bytes),
3562 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3566 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3569 "raw_stack: skb_load_bytes, spilled regs around bounds",
3571 BPF_MOV64_IMM(BPF_REG_2, 4),
3572 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3573 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3574 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3575 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
3576 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3577 BPF_MOV64_IMM(BPF_REG_4, 8),
3578 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3579 BPF_FUNC_skb_load_bytes),
3580 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3581 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
3582 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3583 offsetof(struct __sk_buff, mark)),
3584 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3585 offsetof(struct __sk_buff, priority)),
3586 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3590 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3593 "raw_stack: skb_load_bytes, spilled regs corruption",
3595 BPF_MOV64_IMM(BPF_REG_2, 4),
3596 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3597 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3598 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
3599 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3600 BPF_MOV64_IMM(BPF_REG_4, 8),
3601 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3602 BPF_FUNC_skb_load_bytes),
3603 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3604 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3605 offsetof(struct __sk_buff, mark)),
3609 .errstr = "R0 invalid mem access 'inv'",
3610 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3611 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3614 "raw_stack: skb_load_bytes, spilled regs corruption 2",
3616 BPF_MOV64_IMM(BPF_REG_2, 4),
3617 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3618 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3619 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3620 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
3621 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
3622 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3623 BPF_MOV64_IMM(BPF_REG_4, 8),
3624 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3625 BPF_FUNC_skb_load_bytes),
3626 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3627 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
3628 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
3629 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3630 offsetof(struct __sk_buff, mark)),
3631 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3632 offsetof(struct __sk_buff, priority)),
3633 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3634 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
3635 offsetof(struct __sk_buff, pkt_type)),
3636 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
3640 .errstr = "R3 invalid mem access 'inv'",
3641 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3642 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3645 "raw_stack: skb_load_bytes, spilled regs + data",
3647 BPF_MOV64_IMM(BPF_REG_2, 4),
3648 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3649 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3650 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3651 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
3652 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
3653 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3654 BPF_MOV64_IMM(BPF_REG_4, 8),
3655 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3656 BPF_FUNC_skb_load_bytes),
3657 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3658 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
3659 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
3660 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3661 offsetof(struct __sk_buff, mark)),
3662 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3663 offsetof(struct __sk_buff, priority)),
3664 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3665 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
3669 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3672 "raw_stack: skb_load_bytes, invalid access 1",
3674 BPF_MOV64_IMM(BPF_REG_2, 4),
3675 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3676 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
3677 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3678 BPF_MOV64_IMM(BPF_REG_4, 8),
3679 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3680 BPF_FUNC_skb_load_bytes),
3681 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3685 .errstr = "invalid stack type R3 off=-513 access_size=8",
3686 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3689 "raw_stack: skb_load_bytes, invalid access 2",
3691 BPF_MOV64_IMM(BPF_REG_2, 4),
3692 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3693 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
3694 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3695 BPF_MOV64_IMM(BPF_REG_4, 8),
3696 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3697 BPF_FUNC_skb_load_bytes),
3698 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3702 .errstr = "invalid stack type R3 off=-1 access_size=8",
3703 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3706 "raw_stack: skb_load_bytes, invalid access 3",
3708 BPF_MOV64_IMM(BPF_REG_2, 4),
3709 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3710 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
3711 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3712 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3713 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3714 BPF_FUNC_skb_load_bytes),
3715 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3719 .errstr = "R4 min value is negative",
3720 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3723 "raw_stack: skb_load_bytes, invalid access 4",
3725 BPF_MOV64_IMM(BPF_REG_2, 4),
3726 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3727 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
3728 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3729 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
3730 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3731 BPF_FUNC_skb_load_bytes),
3732 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3736 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
3737 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3740 "raw_stack: skb_load_bytes, invalid access 5",
3742 BPF_MOV64_IMM(BPF_REG_2, 4),
3743 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3744 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3745 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3746 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
3747 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3748 BPF_FUNC_skb_load_bytes),
3749 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3753 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
3754 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3757 "raw_stack: skb_load_bytes, invalid access 6",
3759 BPF_MOV64_IMM(BPF_REG_2, 4),
3760 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3761 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3762 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3763 BPF_MOV64_IMM(BPF_REG_4, 0),
3764 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3765 BPF_FUNC_skb_load_bytes),
3766 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3770 .errstr = "invalid stack type R3 off=-512 access_size=0",
3771 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3774 "raw_stack: skb_load_bytes, large access",
3776 BPF_MOV64_IMM(BPF_REG_2, 4),
3777 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3778 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3779 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3780 BPF_MOV64_IMM(BPF_REG_4, 512),
3781 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3782 BPF_FUNC_skb_load_bytes),
3783 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3787 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3790 "context stores via ST",
3792 BPF_MOV64_IMM(BPF_REG_0, 0),
3793 BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
3796 .errstr = "BPF_ST stores into R1 ctx is not allowed",
3798 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3801 "context stores via XADD",
3803 BPF_MOV64_IMM(BPF_REG_0, 0),
3804 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
3805 BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
3808 .errstr = "BPF_XADD stores into R1 ctx is not allowed",
3810 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3813 "direct packet access: test1",
3815 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3816 offsetof(struct __sk_buff, data)),
3817 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3818 offsetof(struct __sk_buff, data_end)),
3819 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3820 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3821 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3822 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3823 BPF_MOV64_IMM(BPF_REG_0, 0),
3827 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3830 "direct packet access: test2",
3832 BPF_MOV64_IMM(BPF_REG_0, 1),
3833 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
3834 offsetof(struct __sk_buff, data_end)),
3835 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3836 offsetof(struct __sk_buff, data)),
3837 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3838 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
3839 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
3840 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
3841 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
3842 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
3843 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3844 offsetof(struct __sk_buff, data)),
3845 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
3846 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3847 offsetof(struct __sk_buff, len)),
3848 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
3849 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
3850 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
3851 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
3852 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
3853 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
3854 offsetof(struct __sk_buff, data_end)),
3855 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3856 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
3857 BPF_MOV64_IMM(BPF_REG_0, 0),
3861 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3864 "direct packet access: test3",
3866 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3867 offsetof(struct __sk_buff, data)),
3868 BPF_MOV64_IMM(BPF_REG_0, 0),
3871 .errstr = "invalid bpf_context access off=76",
3873 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
3876 "direct packet access: test4 (write)",
3878 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3879 offsetof(struct __sk_buff, data)),
3880 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3881 offsetof(struct __sk_buff, data_end)),
3882 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3883 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3884 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3885 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3886 BPF_MOV64_IMM(BPF_REG_0, 0),
3890 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3893 "direct packet access: test5 (pkt_end >= reg, good access)",
3895 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3896 offsetof(struct __sk_buff, data)),
3897 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3898 offsetof(struct __sk_buff, data_end)),
3899 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3900 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3901 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3902 BPF_MOV64_IMM(BPF_REG_0, 1),
3904 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3905 BPF_MOV64_IMM(BPF_REG_0, 0),
3909 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3912 "direct packet access: test6 (pkt_end >= reg, bad access)",
3914 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3915 offsetof(struct __sk_buff, data)),
3916 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3917 offsetof(struct __sk_buff, data_end)),
3918 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3919 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3920 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3921 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3922 BPF_MOV64_IMM(BPF_REG_0, 1),
3924 BPF_MOV64_IMM(BPF_REG_0, 0),
3927 .errstr = "invalid access to packet",
3929 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3932 "direct packet access: test7 (pkt_end >= reg, both accesses)",
3934 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3935 offsetof(struct __sk_buff, data)),
3936 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3937 offsetof(struct __sk_buff, data_end)),
3938 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3939 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3940 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3941 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3942 BPF_MOV64_IMM(BPF_REG_0, 1),
3944 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3945 BPF_MOV64_IMM(BPF_REG_0, 0),
3948 .errstr = "invalid access to packet",
3950 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3953 "direct packet access: test8 (double test, variant 1)",
3955 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3956 offsetof(struct __sk_buff, data)),
3957 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3958 offsetof(struct __sk_buff, data_end)),
3959 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3960 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3961 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
3962 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3963 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3964 BPF_MOV64_IMM(BPF_REG_0, 1),
3966 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3967 BPF_MOV64_IMM(BPF_REG_0, 0),
3971 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3974 "direct packet access: test9 (double test, variant 2)",
3976 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3977 offsetof(struct __sk_buff, data)),
3978 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3979 offsetof(struct __sk_buff, data_end)),
3980 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3981 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3982 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3983 BPF_MOV64_IMM(BPF_REG_0, 1),
3985 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3986 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3987 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3988 BPF_MOV64_IMM(BPF_REG_0, 0),
3992 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3995 "direct packet access: test10 (write invalid)",
3997 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3998 offsetof(struct __sk_buff, data)),
3999 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4000 offsetof(struct __sk_buff, data_end)),
4001 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4002 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4003 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
4004 BPF_MOV64_IMM(BPF_REG_0, 0),
4006 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4007 BPF_MOV64_IMM(BPF_REG_0, 0),
4010 .errstr = "invalid access to packet",
4012 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4015 "direct packet access: test11 (shift, good access)",
4017 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4018 offsetof(struct __sk_buff, data)),
4019 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4020 offsetof(struct __sk_buff, data_end)),
4021 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4022 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
4023 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
4024 BPF_MOV64_IMM(BPF_REG_3, 144),
4025 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
4026 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
4027 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
4028 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
4029 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
4030 BPF_MOV64_IMM(BPF_REG_0, 1),
4032 BPF_MOV64_IMM(BPF_REG_0, 0),
4036 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4040 "direct packet access: test12 (and, good access)",
4042 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4043 offsetof(struct __sk_buff, data)),
4044 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4045 offsetof(struct __sk_buff, data_end)),
4046 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4047 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
4048 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
4049 BPF_MOV64_IMM(BPF_REG_3, 144),
4050 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
4051 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
4052 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
4053 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
4054 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
4055 BPF_MOV64_IMM(BPF_REG_0, 1),
4057 BPF_MOV64_IMM(BPF_REG_0, 0),
4061 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4065 "direct packet access: test13 (branches, good access)",
4067 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4068 offsetof(struct __sk_buff, data)),
4069 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4070 offsetof(struct __sk_buff, data_end)),
4071 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4072 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
4073 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
4074 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4075 offsetof(struct __sk_buff, mark)),
4076 BPF_MOV64_IMM(BPF_REG_4, 1),
4077 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
4078 BPF_MOV64_IMM(BPF_REG_3, 14),
4079 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
4080 BPF_MOV64_IMM(BPF_REG_3, 24),
4081 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
4082 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
4083 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
4084 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
4085 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
4086 BPF_MOV64_IMM(BPF_REG_0, 1),
4088 BPF_MOV64_IMM(BPF_REG_0, 0),
4092 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4096 "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
4098 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4099 offsetof(struct __sk_buff, data)),
4100 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4101 offsetof(struct __sk_buff, data_end)),
4102 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4103 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
4104 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
4105 BPF_MOV64_IMM(BPF_REG_5, 12),
4106 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
4107 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
4108 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
4109 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
4110 BPF_MOV64_IMM(BPF_REG_0, 1),
4112 BPF_MOV64_IMM(BPF_REG_0, 0),
4116 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4120 "direct packet access: test15 (spill with xadd)",
4122 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4123 offsetof(struct __sk_buff, data)),
4124 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4125 offsetof(struct __sk_buff, data_end)),
4126 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4127 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4128 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
4129 BPF_MOV64_IMM(BPF_REG_5, 4096),
4130 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
4131 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
4132 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
4133 BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
4134 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
4135 BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
4136 BPF_MOV64_IMM(BPF_REG_0, 0),
4139 .errstr = "R2 invalid mem access 'inv'",
4141 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4142 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4145 "direct packet access: test16 (arith on data_end)",
4147 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4148 offsetof(struct __sk_buff, data)),
4149 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4150 offsetof(struct __sk_buff, data_end)),
4151 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4152 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4153 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
4154 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4155 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4156 BPF_MOV64_IMM(BPF_REG_0, 0),
4159 .errstr = "R3 pointer arithmetic on pkt_end",
4161 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4164 "direct packet access: test17 (pruning, alignment)",
4166 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4167 offsetof(struct __sk_buff, data)),
4168 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4169 offsetof(struct __sk_buff, data_end)),
4170 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4171 offsetof(struct __sk_buff, mark)),
4172 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4173 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
4174 BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
4175 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4176 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
4177 BPF_MOV64_IMM(BPF_REG_0, 0),
4179 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
4182 .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
4184 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4185 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
4188 "direct packet access: test18 (imm += pkt_ptr, 1)",
4190 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4191 offsetof(struct __sk_buff, data)),
4192 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4193 offsetof(struct __sk_buff, data_end)),
4194 BPF_MOV64_IMM(BPF_REG_0, 8),
4195 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
4196 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4197 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4198 BPF_MOV64_IMM(BPF_REG_0, 0),
4202 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4205 "direct packet access: test19 (imm += pkt_ptr, 2)",
4207 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4208 offsetof(struct __sk_buff, data)),
4209 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4210 offsetof(struct __sk_buff, data_end)),
4211 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4212 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4213 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
4214 BPF_MOV64_IMM(BPF_REG_4, 4),
4215 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
4216 BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
4217 BPF_MOV64_IMM(BPF_REG_0, 0),
4221 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4224 "direct packet access: test20 (x += pkt_ptr, 1)",
4226 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4227 offsetof(struct __sk_buff, data)),
4228 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4229 offsetof(struct __sk_buff, data_end)),
4230 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
4231 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4232 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
4233 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
4234 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4235 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
4236 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4237 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
4238 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
4239 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
4240 BPF_MOV64_IMM(BPF_REG_0, 0),
4243 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4245 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4248 "direct packet access: test21 (x += pkt_ptr, 2)",
4250 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4251 offsetof(struct __sk_buff, data)),
4252 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4253 offsetof(struct __sk_buff, data_end)),
4254 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4255 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4256 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
4257 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
4258 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
4259 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
4260 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
4261 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
4262 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4263 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
4264 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
4265 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
4266 BPF_MOV64_IMM(BPF_REG_0, 0),
4269 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4271 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4274 "direct packet access: test22 (x += pkt_ptr, 3)",
4276 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4277 offsetof(struct __sk_buff, data)),
4278 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4279 offsetof(struct __sk_buff, data_end)),
4280 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4281 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4282 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
4283 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
4284 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
4285 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
4286 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
4287 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
4288 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
4289 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
4290 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
4291 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
4292 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
4293 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
4294 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
4295 BPF_MOV64_IMM(BPF_REG_2, 1),
4296 BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
4297 BPF_MOV64_IMM(BPF_REG_0, 0),
4300 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4302 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4305 "direct packet access: test23 (x += pkt_ptr, 4)",
4307 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4308 offsetof(struct __sk_buff, data)),
4309 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4310 offsetof(struct __sk_buff, data_end)),
4311 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
4312 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4313 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
4314 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
4315 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4316 BPF_MOV64_IMM(BPF_REG_0, 31),
4317 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
4318 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
4319 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
4320 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
4321 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4322 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
4323 BPF_MOV64_IMM(BPF_REG_0, 0),
4326 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4328 .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
4329 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4332 "direct packet access: test24 (x += pkt_ptr, 5)",
4334 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4335 offsetof(struct __sk_buff, data)),
4336 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4337 offsetof(struct __sk_buff, data_end)),
4338 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
4339 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4340 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
4341 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
4342 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4343 BPF_MOV64_IMM(BPF_REG_0, 64),
4344 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
4345 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
4346 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
4347 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
4348 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4349 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
4350 BPF_MOV64_IMM(BPF_REG_0, 0),
4353 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4355 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4358 "direct packet access: test25 (marking on <, good access)",
4360 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4361 offsetof(struct __sk_buff, data)),
4362 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4363 offsetof(struct __sk_buff, data_end)),
4364 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4365 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4366 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
4367 BPF_MOV64_IMM(BPF_REG_0, 0),
4369 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4370 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
4373 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4376 "direct packet access: test26 (marking on <, bad access)",
4378 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4379 offsetof(struct __sk_buff, data)),
4380 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4381 offsetof(struct __sk_buff, data_end)),
4382 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4383 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4384 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
4385 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4386 BPF_MOV64_IMM(BPF_REG_0, 0),
4388 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
4391 .errstr = "invalid access to packet",
4392 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4395 "direct packet access: test27 (marking on <=, good access)",
4397 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4398 offsetof(struct __sk_buff, data)),
4399 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4400 offsetof(struct __sk_buff, data_end)),
4401 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4402 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4403 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
4404 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4405 BPF_MOV64_IMM(BPF_REG_0, 1),
4409 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4413 "direct packet access: test28 (marking on <=, bad access)",
4415 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4416 offsetof(struct __sk_buff, data)),
4417 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4418 offsetof(struct __sk_buff, data_end)),
4419 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4420 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4421 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
4422 BPF_MOV64_IMM(BPF_REG_0, 1),
4424 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4425 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
4428 .errstr = "invalid access to packet",
4429 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4432 "helper access to packet: test1, valid packet_ptr range",
4434 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4435 offsetof(struct xdp_md, data)),
4436 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4437 offsetof(struct xdp_md, data_end)),
4438 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4439 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
4440 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
4441 BPF_LD_MAP_FD(BPF_REG_1, 0),
4442 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
4443 BPF_MOV64_IMM(BPF_REG_4, 0),
4444 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4445 BPF_FUNC_map_update_elem),
4446 BPF_MOV64_IMM(BPF_REG_0, 0),
4449 .fixup_map_hash_8b = { 5 },
4450 .result_unpriv = ACCEPT,
4452 .prog_type = BPF_PROG_TYPE_XDP,
4455 "helper access to packet: test2, unchecked packet_ptr",
4457 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4458 offsetof(struct xdp_md, data)),
4459 BPF_LD_MAP_FD(BPF_REG_1, 0),
4460 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4461 BPF_FUNC_map_lookup_elem),
4462 BPF_MOV64_IMM(BPF_REG_0, 0),
4465 .fixup_map_hash_8b = { 1 },
4467 .errstr = "invalid access to packet",
4468 .prog_type = BPF_PROG_TYPE_XDP,
4471 "helper access to packet: test3, variable add",
4473 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4474 offsetof(struct xdp_md, data)),
4475 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4476 offsetof(struct xdp_md, data_end)),
4477 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4478 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
4479 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
4480 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
4481 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4482 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
4483 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4484 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
4485 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
4486 BPF_LD_MAP_FD(BPF_REG_1, 0),
4487 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
4488 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4489 BPF_FUNC_map_lookup_elem),
4490 BPF_MOV64_IMM(BPF_REG_0, 0),
4493 .fixup_map_hash_8b = { 11 },
4495 .prog_type = BPF_PROG_TYPE_XDP,
4498 "helper access to packet: test4, packet_ptr with bad range",
4500 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4501 offsetof(struct xdp_md, data)),
4502 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4503 offsetof(struct xdp_md, data_end)),
4504 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4505 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
4506 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
4507 BPF_MOV64_IMM(BPF_REG_0, 0),
4509 BPF_LD_MAP_FD(BPF_REG_1, 0),
4510 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4511 BPF_FUNC_map_lookup_elem),
4512 BPF_MOV64_IMM(BPF_REG_0, 0),
4515 .fixup_map_hash_8b = { 7 },
4517 .errstr = "invalid access to packet",
4518 .prog_type = BPF_PROG_TYPE_XDP,
4521 "helper access to packet: test5, packet_ptr with too short range",
4523 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4524 offsetof(struct xdp_md, data)),
4525 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4526 offsetof(struct xdp_md, data_end)),
4527 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4528 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4529 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
4530 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
4531 BPF_LD_MAP_FD(BPF_REG_1, 0),
4532 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4533 BPF_FUNC_map_lookup_elem),
4534 BPF_MOV64_IMM(BPF_REG_0, 0),
4537 .fixup_map_hash_8b = { 6 },
4539 .errstr = "invalid access to packet",
4540 .prog_type = BPF_PROG_TYPE_XDP,
4543 "helper access to packet: test6, cls valid packet_ptr range",
4545 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4546 offsetof(struct __sk_buff, data)),
4547 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4548 offsetof(struct __sk_buff, data_end)),
4549 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4550 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
4551 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
4552 BPF_LD_MAP_FD(BPF_REG_1, 0),
4553 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
4554 BPF_MOV64_IMM(BPF_REG_4, 0),
4555 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4556 BPF_FUNC_map_update_elem),
4557 BPF_MOV64_IMM(BPF_REG_0, 0),
4560 .fixup_map_hash_8b = { 5 },
4562 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4565 "helper access to packet: test7, cls unchecked packet_ptr",
4567 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4568 offsetof(struct __sk_buff, data)),
4569 BPF_LD_MAP_FD(BPF_REG_1, 0),
4570 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4571 BPF_FUNC_map_lookup_elem),
4572 BPF_MOV64_IMM(BPF_REG_0, 0),
4575 .fixup_map_hash_8b = { 1 },
4577 .errstr = "invalid access to packet",
4578 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4581 "helper access to packet: test8, cls variable add",
4583 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4584 offsetof(struct __sk_buff, data)),
4585 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4586 offsetof(struct __sk_buff, data_end)),
4587 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4588 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
4589 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
4590 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
4591 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4592 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
4593 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4594 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
4595 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
4596 BPF_LD_MAP_FD(BPF_REG_1, 0),
4597 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
4598 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4599 BPF_FUNC_map_lookup_elem),
4600 BPF_MOV64_IMM(BPF_REG_0, 0),
4603 .fixup_map_hash_8b = { 11 },
4605 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4608 "helper access to packet: test9, cls packet_ptr with bad range",
4610 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4611 offsetof(struct __sk_buff, data)),
4612 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4613 offsetof(struct __sk_buff, data_end)),
4614 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4615 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
4616 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
4617 BPF_MOV64_IMM(BPF_REG_0, 0),
4619 BPF_LD_MAP_FD(BPF_REG_1, 0),
4620 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4621 BPF_FUNC_map_lookup_elem),
4622 BPF_MOV64_IMM(BPF_REG_0, 0),
4625 .fixup_map_hash_8b = { 7 },
4627 .errstr = "invalid access to packet",
4628 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4631 "helper access to packet: test10, cls packet_ptr with too short range",
4633 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4634 offsetof(struct __sk_buff, data)),
4635 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4636 offsetof(struct __sk_buff, data_end)),
4637 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4638 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4639 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
4640 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
4641 BPF_LD_MAP_FD(BPF_REG_1, 0),
4642 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4643 BPF_FUNC_map_lookup_elem),
4644 BPF_MOV64_IMM(BPF_REG_0, 0),
4647 .fixup_map_hash_8b = { 6 },
4649 .errstr = "invalid access to packet",
4650 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4653 "helper access to packet: test11, cls unsuitable helper 1",
4655 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4656 offsetof(struct __sk_buff, data)),
4657 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4658 offsetof(struct __sk_buff, data_end)),
4659 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4660 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
4661 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
4662 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
4663 BPF_MOV64_IMM(BPF_REG_2, 0),
4664 BPF_MOV64_IMM(BPF_REG_4, 42),
4665 BPF_MOV64_IMM(BPF_REG_5, 0),
4666 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4667 BPF_FUNC_skb_store_bytes),
4668 BPF_MOV64_IMM(BPF_REG_0, 0),
4672 .errstr = "helper access to the packet",
4673 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4676 "helper access to packet: test12, cls unsuitable helper 2",
4678 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4679 offsetof(struct __sk_buff, data)),
4680 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4681 offsetof(struct __sk_buff, data_end)),
4682 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
4683 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
4684 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
4685 BPF_MOV64_IMM(BPF_REG_2, 0),
4686 BPF_MOV64_IMM(BPF_REG_4, 4),
4687 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4688 BPF_FUNC_skb_load_bytes),
4689 BPF_MOV64_IMM(BPF_REG_0, 0),
4693 .errstr = "helper access to the packet",
4694 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4697 "helper access to packet: test13, cls helper ok",
4699 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4700 offsetof(struct __sk_buff, data)),
4701 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4702 offsetof(struct __sk_buff, data_end)),
4703 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4704 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4705 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4706 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4707 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4708 BPF_MOV64_IMM(BPF_REG_2, 4),
4709 BPF_MOV64_IMM(BPF_REG_3, 0),
4710 BPF_MOV64_IMM(BPF_REG_4, 0),
4711 BPF_MOV64_IMM(BPF_REG_5, 0),
4712 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4713 BPF_FUNC_csum_diff),
4714 BPF_MOV64_IMM(BPF_REG_0, 0),
4718 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4721 "helper access to packet: test14, cls helper ok sub",
4723 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4724 offsetof(struct __sk_buff, data)),
4725 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4726 offsetof(struct __sk_buff, data_end)),
4727 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4728 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4729 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4730 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4731 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
4732 BPF_MOV64_IMM(BPF_REG_2, 4),
4733 BPF_MOV64_IMM(BPF_REG_3, 0),
4734 BPF_MOV64_IMM(BPF_REG_4, 0),
4735 BPF_MOV64_IMM(BPF_REG_5, 0),
4736 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4737 BPF_FUNC_csum_diff),
4738 BPF_MOV64_IMM(BPF_REG_0, 0),
4742 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4745 "helper access to packet: test15, cls helper fail sub",
4747 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4748 offsetof(struct __sk_buff, data)),
4749 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4750 offsetof(struct __sk_buff, data_end)),
4751 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4752 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4753 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4754 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4755 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
4756 BPF_MOV64_IMM(BPF_REG_2, 4),
4757 BPF_MOV64_IMM(BPF_REG_3, 0),
4758 BPF_MOV64_IMM(BPF_REG_4, 0),
4759 BPF_MOV64_IMM(BPF_REG_5, 0),
4760 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4761 BPF_FUNC_csum_diff),
4762 BPF_MOV64_IMM(BPF_REG_0, 0),
4766 .errstr = "invalid access to packet",
4767 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4770 "helper access to packet: test16, cls helper fail range 1",
4772 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4773 offsetof(struct __sk_buff, data)),
4774 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4775 offsetof(struct __sk_buff, data_end)),
4776 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4777 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4778 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4779 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4780 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4781 BPF_MOV64_IMM(BPF_REG_2, 8),
4782 BPF_MOV64_IMM(BPF_REG_3, 0),
4783 BPF_MOV64_IMM(BPF_REG_4, 0),
4784 BPF_MOV64_IMM(BPF_REG_5, 0),
4785 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4786 BPF_FUNC_csum_diff),
4787 BPF_MOV64_IMM(BPF_REG_0, 0),
4791 .errstr = "invalid access to packet",
4792 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4795 "helper access to packet: test17, cls helper fail range 2",
4797 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4798 offsetof(struct __sk_buff, data)),
4799 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4800 offsetof(struct __sk_buff, data_end)),
4801 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4802 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4803 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4804 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4805 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4806 BPF_MOV64_IMM(BPF_REG_2, -9),
4807 BPF_MOV64_IMM(BPF_REG_3, 0),
4808 BPF_MOV64_IMM(BPF_REG_4, 0),
4809 BPF_MOV64_IMM(BPF_REG_5, 0),
4810 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4811 BPF_FUNC_csum_diff),
4812 BPF_MOV64_IMM(BPF_REG_0, 0),
4816 .errstr = "R2 min value is negative",
4817 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4820 "helper access to packet: test18, cls helper fail range 3",
4822 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4823 offsetof(struct __sk_buff, data)),
4824 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4825 offsetof(struct __sk_buff, data_end)),
4826 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4827 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4828 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4829 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4830 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4831 BPF_MOV64_IMM(BPF_REG_2, ~0),
4832 BPF_MOV64_IMM(BPF_REG_3, 0),
4833 BPF_MOV64_IMM(BPF_REG_4, 0),
4834 BPF_MOV64_IMM(BPF_REG_5, 0),
4835 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4836 BPF_FUNC_csum_diff),
4837 BPF_MOV64_IMM(BPF_REG_0, 0),
4841 .errstr = "R2 min value is negative",
4842 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4845 "helper access to packet: test19, cls helper range zero",
4847 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4848 offsetof(struct __sk_buff, data)),
4849 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4850 offsetof(struct __sk_buff, data_end)),
4851 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4852 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4853 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4854 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4855 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4856 BPF_MOV64_IMM(BPF_REG_2, 0),
4857 BPF_MOV64_IMM(BPF_REG_3, 0),
4858 BPF_MOV64_IMM(BPF_REG_4, 0),
4859 BPF_MOV64_IMM(BPF_REG_5, 0),
4860 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4861 BPF_FUNC_csum_diff),
4862 BPF_MOV64_IMM(BPF_REG_0, 0),
4866 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4869 "helper access to packet: test20, pkt end as input",
4871 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4872 offsetof(struct __sk_buff, data)),
4873 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4874 offsetof(struct __sk_buff, data_end)),
4875 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4876 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4877 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4878 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4879 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
4880 BPF_MOV64_IMM(BPF_REG_2, 4),
4881 BPF_MOV64_IMM(BPF_REG_3, 0),
4882 BPF_MOV64_IMM(BPF_REG_4, 0),
4883 BPF_MOV64_IMM(BPF_REG_5, 0),
4884 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4885 BPF_FUNC_csum_diff),
4886 BPF_MOV64_IMM(BPF_REG_0, 0),
4890 .errstr = "R1 type=pkt_end expected=fp",
4891 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4894 "helper access to packet: test21, wrong reg",
4896 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4897 offsetof(struct __sk_buff, data)),
4898 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4899 offsetof(struct __sk_buff, data_end)),
4900 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4901 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4902 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4903 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4904 BPF_MOV64_IMM(BPF_REG_2, 4),
4905 BPF_MOV64_IMM(BPF_REG_3, 0),
4906 BPF_MOV64_IMM(BPF_REG_4, 0),
4907 BPF_MOV64_IMM(BPF_REG_5, 0),
4908 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4909 BPF_FUNC_csum_diff),
4910 BPF_MOV64_IMM(BPF_REG_0, 0),
4914 .errstr = "invalid access to packet",
4915 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4918 "prevent map lookup in sockmap",
4920 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4921 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4922 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4923 BPF_LD_MAP_FD(BPF_REG_1, 0),
4924 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4925 BPF_FUNC_map_lookup_elem),
4928 .fixup_map_sockmap = { 3 },
4930 .errstr = "cannot pass map_type 15 into func bpf_map_lookup_elem",
4931 .prog_type = BPF_PROG_TYPE_SOCK_OPS,
4934 "prevent map lookup in sockhash",
4936 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4937 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4938 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4939 BPF_LD_MAP_FD(BPF_REG_1, 0),
4940 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4941 BPF_FUNC_map_lookup_elem),
4944 .fixup_map_sockhash = { 3 },
4946 .errstr = "cannot pass map_type 18 into func bpf_map_lookup_elem",
4947 .prog_type = BPF_PROG_TYPE_SOCK_OPS,
4950 "prevent map lookup in xskmap",
4952 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4953 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4954 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4955 BPF_LD_MAP_FD(BPF_REG_1, 0),
4956 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4957 BPF_FUNC_map_lookup_elem),
4960 .fixup_map_xskmap = { 3 },
4962 .errstr = "cannot pass map_type 17 into func bpf_map_lookup_elem",
4963 .prog_type = BPF_PROG_TYPE_XDP,
4966 "prevent map lookup in stack trace",
4968 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4969 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4970 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4971 BPF_LD_MAP_FD(BPF_REG_1, 0),
4972 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4973 BPF_FUNC_map_lookup_elem),
4976 .fixup_map_stacktrace = { 3 },
4978 .errstr = "cannot pass map_type 7 into func bpf_map_lookup_elem",
4979 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
4982 "prevent map lookup in prog array",
4984 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4985 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4986 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4987 BPF_LD_MAP_FD(BPF_REG_1, 0),
4988 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4989 BPF_FUNC_map_lookup_elem),
4992 .fixup_prog2 = { 3 },
4994 .errstr = "cannot pass map_type 3 into func bpf_map_lookup_elem",
4997 "valid map access into an array with a constant",
4999 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5000 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5001 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5002 BPF_LD_MAP_FD(BPF_REG_1, 0),
5003 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5004 BPF_FUNC_map_lookup_elem),
5005 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5006 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
5007 offsetof(struct test_val, foo)),
5010 .fixup_map_hash_48b = { 3 },
5011 .errstr_unpriv = "R0 leaks addr",
5012 .result_unpriv = REJECT,
5016 "valid map access into an array with a register",
5018 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5019 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5020 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5021 BPF_LD_MAP_FD(BPF_REG_1, 0),
5022 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5023 BPF_FUNC_map_lookup_elem),
5024 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5025 BPF_MOV64_IMM(BPF_REG_1, 4),
5026 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
5027 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5028 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
5029 offsetof(struct test_val, foo)),
5032 .fixup_map_hash_48b = { 3 },
5033 .errstr_unpriv = "R0 leaks addr",
5034 .result_unpriv = REJECT,
5036 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5039 "valid map access into an array with a variable",
5041 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5042 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5043 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5044 BPF_LD_MAP_FD(BPF_REG_1, 0),
5045 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5046 BPF_FUNC_map_lookup_elem),
5047 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5048 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5049 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
5050 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
5051 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5052 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
5053 offsetof(struct test_val, foo)),
5056 .fixup_map_hash_48b = { 3 },
5057 .errstr_unpriv = "R0 leaks addr",
5058 .result_unpriv = REJECT,
5060 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5063 "valid map access into an array with a signed variable",
5065 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5066 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5067 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5068 BPF_LD_MAP_FD(BPF_REG_1, 0),
5069 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5070 BPF_FUNC_map_lookup_elem),
5071 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
5072 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5073 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
5074 BPF_MOV32_IMM(BPF_REG_1, 0),
5075 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
5076 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
5077 BPF_MOV32_IMM(BPF_REG_1, 0),
5078 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
5079 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5080 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
5081 offsetof(struct test_val, foo)),
5084 .fixup_map_hash_48b = { 3 },
5085 .errstr_unpriv = "R0 leaks addr",
5086 .result_unpriv = REJECT,
5088 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5091 "invalid map access into an array with a constant",
5093 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5094 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5095 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5096 BPF_LD_MAP_FD(BPF_REG_1, 0),
5097 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5098 BPF_FUNC_map_lookup_elem),
5099 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5100 BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
5101 offsetof(struct test_val, foo)),
5104 .fixup_map_hash_48b = { 3 },
5105 .errstr = "invalid access to map value, value_size=48 off=48 size=8",
5109 "invalid map access into an array with a register",
5111 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5112 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5113 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5114 BPF_LD_MAP_FD(BPF_REG_1, 0),
5115 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5116 BPF_FUNC_map_lookup_elem),
5117 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5118 BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
5119 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
5120 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5121 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
5122 offsetof(struct test_val, foo)),
5125 .fixup_map_hash_48b = { 3 },
5126 .errstr = "R0 min value is outside of the array range",
5128 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5131 "invalid map access into an array with a variable",
5133 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5134 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5135 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5136 BPF_LD_MAP_FD(BPF_REG_1, 0),
5137 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5138 BPF_FUNC_map_lookup_elem),
5139 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5140 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5141 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
5142 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5143 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
5144 offsetof(struct test_val, foo)),
5147 .fixup_map_hash_48b = { 3 },
5148 .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
5150 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5153 "invalid map access into an array with no floor check",
5155 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5156 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5157 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5158 BPF_LD_MAP_FD(BPF_REG_1, 0),
5159 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5160 BPF_FUNC_map_lookup_elem),
5161 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5162 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5163 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
5164 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
5165 BPF_MOV32_IMM(BPF_REG_1, 0),
5166 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
5167 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5168 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
5169 offsetof(struct test_val, foo)),
5172 .fixup_map_hash_48b = { 3 },
5173 .errstr_unpriv = "R0 leaks addr",
5174 .errstr = "R0 unbounded memory access",
5175 .result_unpriv = REJECT,
5177 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5180 "invalid map access into an array with a invalid max check",
5182 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5183 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5184 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5185 BPF_LD_MAP_FD(BPF_REG_1, 0),
5186 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5187 BPF_FUNC_map_lookup_elem),
5188 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5189 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5190 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
5191 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
5192 BPF_MOV32_IMM(BPF_REG_1, 0),
5193 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
5194 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5195 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
5196 offsetof(struct test_val, foo)),
5199 .fixup_map_hash_48b = { 3 },
5200 .errstr_unpriv = "R0 leaks addr",
5201 .errstr = "invalid access to map value, value_size=48 off=44 size=8",
5202 .result_unpriv = REJECT,
5204 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5207 "invalid map access into an array with a invalid max check",
5209 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5210 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5211 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5212 BPF_LD_MAP_FD(BPF_REG_1, 0),
5213 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5214 BPF_FUNC_map_lookup_elem),
5215 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
5216 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
5217 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5218 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5219 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5220 BPF_LD_MAP_FD(BPF_REG_1, 0),
5221 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5222 BPF_FUNC_map_lookup_elem),
5223 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5224 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
5225 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
5226 offsetof(struct test_val, foo)),
5229 .fixup_map_hash_48b = { 3, 11 },
5230 .errstr = "R0 pointer += pointer",
5232 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5235 "direct packet read test#1 for CGROUP_SKB",
5237 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5238 offsetof(struct __sk_buff, data)),
5239 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5240 offsetof(struct __sk_buff, data_end)),
5241 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
5242 offsetof(struct __sk_buff, len)),
5243 BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5244 offsetof(struct __sk_buff, pkt_type)),
5245 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5246 offsetof(struct __sk_buff, mark)),
5247 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
5248 offsetof(struct __sk_buff, mark)),
5249 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
5250 offsetof(struct __sk_buff, queue_mapping)),
5251 BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
5252 offsetof(struct __sk_buff, protocol)),
5253 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
5254 offsetof(struct __sk_buff, vlan_present)),
5255 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5256 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5257 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5258 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5259 BPF_MOV64_IMM(BPF_REG_0, 0),
5263 .result_unpriv = REJECT,
5264 .errstr_unpriv = "invalid bpf_context access off=76 size=4",
5265 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5268 "direct packet read test#2 for CGROUP_SKB",
5270 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
5271 offsetof(struct __sk_buff, vlan_tci)),
5272 BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5273 offsetof(struct __sk_buff, vlan_proto)),
5274 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5275 offsetof(struct __sk_buff, priority)),
5276 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
5277 offsetof(struct __sk_buff, priority)),
5278 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
5279 offsetof(struct __sk_buff,
5281 BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
5282 offsetof(struct __sk_buff, tc_index)),
5283 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
5284 offsetof(struct __sk_buff, hash)),
5285 BPF_MOV64_IMM(BPF_REG_0, 0),
5289 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5292 "direct packet read test#3 for CGROUP_SKB",
5294 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
5295 offsetof(struct __sk_buff, cb[0])),
5296 BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5297 offsetof(struct __sk_buff, cb[1])),
5298 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5299 offsetof(struct __sk_buff, cb[2])),
5300 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
5301 offsetof(struct __sk_buff, cb[3])),
5302 BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
5303 offsetof(struct __sk_buff, cb[4])),
5304 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
5305 offsetof(struct __sk_buff, napi_id)),
5306 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_4,
5307 offsetof(struct __sk_buff, cb[0])),
5308 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_5,
5309 offsetof(struct __sk_buff, cb[1])),
5310 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
5311 offsetof(struct __sk_buff, cb[2])),
5312 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7,
5313 offsetof(struct __sk_buff, cb[3])),
5314 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_8,
5315 offsetof(struct __sk_buff, cb[4])),
5316 BPF_MOV64_IMM(BPF_REG_0, 0),
5320 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5323 "direct packet read test#4 for CGROUP_SKB",
5325 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5326 offsetof(struct __sk_buff, family)),
5327 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5328 offsetof(struct __sk_buff, remote_ip4)),
5329 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
5330 offsetof(struct __sk_buff, local_ip4)),
5331 BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5332 offsetof(struct __sk_buff, remote_ip6[0])),
5333 BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5334 offsetof(struct __sk_buff, remote_ip6[1])),
5335 BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5336 offsetof(struct __sk_buff, remote_ip6[2])),
5337 BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5338 offsetof(struct __sk_buff, remote_ip6[3])),
5339 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5340 offsetof(struct __sk_buff, local_ip6[0])),
5341 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5342 offsetof(struct __sk_buff, local_ip6[1])),
5343 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5344 offsetof(struct __sk_buff, local_ip6[2])),
5345 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5346 offsetof(struct __sk_buff, local_ip6[3])),
5347 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
5348 offsetof(struct __sk_buff, remote_port)),
5349 BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
5350 offsetof(struct __sk_buff, local_port)),
5351 BPF_MOV64_IMM(BPF_REG_0, 0),
5355 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5358 "invalid access of tc_classid for CGROUP_SKB",
5360 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5361 offsetof(struct __sk_buff, tc_classid)),
5362 BPF_MOV64_IMM(BPF_REG_0, 0),
5366 .errstr = "invalid bpf_context access",
5367 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5370 "invalid access of data_meta for CGROUP_SKB",
5372 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5373 offsetof(struct __sk_buff, data_meta)),
5374 BPF_MOV64_IMM(BPF_REG_0, 0),
5378 .errstr = "invalid bpf_context access",
5379 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5382 "invalid access of flow_keys for CGROUP_SKB",
5384 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5385 offsetof(struct __sk_buff, flow_keys)),
5386 BPF_MOV64_IMM(BPF_REG_0, 0),
5390 .errstr = "invalid bpf_context access",
5391 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5394 "invalid write access to napi_id for CGROUP_SKB",
5396 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
5397 offsetof(struct __sk_buff, napi_id)),
5398 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_9,
5399 offsetof(struct __sk_buff, napi_id)),
5400 BPF_MOV64_IMM(BPF_REG_0, 0),
5404 .errstr = "invalid bpf_context access",
5405 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5408 "valid cgroup storage access",
5410 BPF_MOV64_IMM(BPF_REG_2, 0),
5411 BPF_LD_MAP_FD(BPF_REG_1, 0),
5412 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5413 BPF_FUNC_get_local_storage),
5414 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5415 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5416 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5419 .fixup_cgroup_storage = { 1 },
5421 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5424 "invalid cgroup storage access 1",
5426 BPF_MOV64_IMM(BPF_REG_2, 0),
5427 BPF_LD_MAP_FD(BPF_REG_1, 0),
5428 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5429 BPF_FUNC_get_local_storage),
5430 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5431 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5432 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5435 .fixup_map_hash_8b = { 1 },
5437 .errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
5438 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5441 "invalid cgroup storage access 2",
5443 BPF_MOV64_IMM(BPF_REG_2, 0),
5444 BPF_LD_MAP_FD(BPF_REG_1, 1),
5445 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5446 BPF_FUNC_get_local_storage),
5447 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5451 .errstr = "fd 1 is not pointing to valid bpf_map",
5452 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5455 "invalid cgroup storage access 3",
5457 BPF_MOV64_IMM(BPF_REG_2, 0),
5458 BPF_LD_MAP_FD(BPF_REG_1, 0),
5459 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5460 BPF_FUNC_get_local_storage),
5461 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
5462 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5463 BPF_MOV64_IMM(BPF_REG_0, 0),
5466 .fixup_cgroup_storage = { 1 },
5468 .errstr = "invalid access to map value, value_size=64 off=256 size=4",
5469 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5472 "invalid cgroup storage access 4",
5474 BPF_MOV64_IMM(BPF_REG_2, 0),
5475 BPF_LD_MAP_FD(BPF_REG_1, 0),
5476 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5477 BPF_FUNC_get_local_storage),
5478 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
5479 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5480 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5483 .fixup_cgroup_storage = { 1 },
5485 .errstr = "invalid access to map value, value_size=64 off=-2 size=4",
5486 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5487 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5490 "invalid cgroup storage access 5",
5492 BPF_MOV64_IMM(BPF_REG_2, 7),
5493 BPF_LD_MAP_FD(BPF_REG_1, 0),
5494 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5495 BPF_FUNC_get_local_storage),
5496 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5497 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5498 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5501 .fixup_cgroup_storage = { 1 },
5503 .errstr = "get_local_storage() doesn't support non-zero flags",
5504 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5507 "invalid cgroup storage access 6",
5509 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
5510 BPF_LD_MAP_FD(BPF_REG_1, 0),
5511 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5512 BPF_FUNC_get_local_storage),
5513 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5514 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5515 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5518 .fixup_cgroup_storage = { 1 },
5520 .errstr = "get_local_storage() doesn't support non-zero flags",
5521 .errstr_unpriv = "R2 leaks addr into helper function",
5522 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5525 "valid per-cpu cgroup storage access",
5527 BPF_MOV64_IMM(BPF_REG_2, 0),
5528 BPF_LD_MAP_FD(BPF_REG_1, 0),
5529 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5530 BPF_FUNC_get_local_storage),
5531 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5532 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5533 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5536 .fixup_percpu_cgroup_storage = { 1 },
5538 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5541 "invalid per-cpu cgroup storage access 1",
5543 BPF_MOV64_IMM(BPF_REG_2, 0),
5544 BPF_LD_MAP_FD(BPF_REG_1, 0),
5545 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5546 BPF_FUNC_get_local_storage),
5547 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5548 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5549 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5552 .fixup_map_hash_8b = { 1 },
5554 .errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
5555 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5558 "invalid per-cpu cgroup storage access 2",
5560 BPF_MOV64_IMM(BPF_REG_2, 0),
5561 BPF_LD_MAP_FD(BPF_REG_1, 1),
5562 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5563 BPF_FUNC_get_local_storage),
5564 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5568 .errstr = "fd 1 is not pointing to valid bpf_map",
5569 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5572 "invalid per-cpu cgroup storage access 3",
5574 BPF_MOV64_IMM(BPF_REG_2, 0),
5575 BPF_LD_MAP_FD(BPF_REG_1, 0),
5576 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5577 BPF_FUNC_get_local_storage),
5578 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
5579 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5580 BPF_MOV64_IMM(BPF_REG_0, 0),
5583 .fixup_percpu_cgroup_storage = { 1 },
5585 .errstr = "invalid access to map value, value_size=64 off=256 size=4",
5586 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5589 "invalid per-cpu cgroup storage access 4",
5591 BPF_MOV64_IMM(BPF_REG_2, 0),
5592 BPF_LD_MAP_FD(BPF_REG_1, 0),
5593 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5594 BPF_FUNC_get_local_storage),
5595 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
5596 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5597 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5600 .fixup_cgroup_storage = { 1 },
5602 .errstr = "invalid access to map value, value_size=64 off=-2 size=4",
5603 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5604 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5607 "invalid per-cpu cgroup storage access 5",
5609 BPF_MOV64_IMM(BPF_REG_2, 7),
5610 BPF_LD_MAP_FD(BPF_REG_1, 0),
5611 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5612 BPF_FUNC_get_local_storage),
5613 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5614 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5615 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5618 .fixup_percpu_cgroup_storage = { 1 },
5620 .errstr = "get_local_storage() doesn't support non-zero flags",
5621 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5624 "invalid per-cpu cgroup storage access 6",
5626 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
5627 BPF_LD_MAP_FD(BPF_REG_1, 0),
5628 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5629 BPF_FUNC_get_local_storage),
5630 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5631 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5632 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5635 .fixup_percpu_cgroup_storage = { 1 },
5637 .errstr = "get_local_storage() doesn't support non-zero flags",
5638 .errstr_unpriv = "R2 leaks addr into helper function",
5639 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5642 "write tstamp from CGROUP_SKB",
5644 BPF_MOV64_IMM(BPF_REG_0, 0),
5645 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
5646 offsetof(struct __sk_buff, tstamp)),
5647 BPF_MOV64_IMM(BPF_REG_0, 0),
5651 .result_unpriv = REJECT,
5652 .errstr_unpriv = "invalid bpf_context access off=152 size=8",
5653 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5656 "read tstamp from CGROUP_SKB",
5658 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
5659 offsetof(struct __sk_buff, tstamp)),
5660 BPF_MOV64_IMM(BPF_REG_0, 0),
5664 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5667 "multiple registers share map_lookup_elem result",
5669 BPF_MOV64_IMM(BPF_REG_1, 10),
5670 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5671 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5672 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5673 BPF_LD_MAP_FD(BPF_REG_1, 0),
5674 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5675 BPF_FUNC_map_lookup_elem),
5676 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5677 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5678 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5681 .fixup_map_hash_8b = { 4 },
5683 .prog_type = BPF_PROG_TYPE_SCHED_CLS
5686 "alu ops on ptr_to_map_value_or_null, 1",
5688 BPF_MOV64_IMM(BPF_REG_1, 10),
5689 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5690 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5691 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5692 BPF_LD_MAP_FD(BPF_REG_1, 0),
5693 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5694 BPF_FUNC_map_lookup_elem),
5695 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5696 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
5697 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
5698 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5699 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5702 .fixup_map_hash_8b = { 4 },
5703 .errstr = "R4 pointer arithmetic on map_value_or_null",
5705 .prog_type = BPF_PROG_TYPE_SCHED_CLS
5708 "alu ops on ptr_to_map_value_or_null, 2",
5710 BPF_MOV64_IMM(BPF_REG_1, 10),
5711 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5712 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5713 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5714 BPF_LD_MAP_FD(BPF_REG_1, 0),
5715 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5716 BPF_FUNC_map_lookup_elem),
5717 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5718 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
5719 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5720 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5723 .fixup_map_hash_8b = { 4 },
5724 .errstr = "R4 pointer arithmetic on map_value_or_null",
5726 .prog_type = BPF_PROG_TYPE_SCHED_CLS
5729 "alu ops on ptr_to_map_value_or_null, 3",
5731 BPF_MOV64_IMM(BPF_REG_1, 10),
5732 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5733 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5734 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5735 BPF_LD_MAP_FD(BPF_REG_1, 0),
5736 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5737 BPF_FUNC_map_lookup_elem),
5738 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5739 BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
5740 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5741 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5744 .fixup_map_hash_8b = { 4 },
5745 .errstr = "R4 pointer arithmetic on map_value_or_null",
5747 .prog_type = BPF_PROG_TYPE_SCHED_CLS
5750 "invalid memory access with multiple map_lookup_elem calls",
5752 BPF_MOV64_IMM(BPF_REG_1, 10),
5753 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5754 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5755 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5756 BPF_LD_MAP_FD(BPF_REG_1, 0),
5757 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
5758 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
5759 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5760 BPF_FUNC_map_lookup_elem),
5761 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5762 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
5763 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
5764 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5765 BPF_FUNC_map_lookup_elem),
5766 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5767 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5770 .fixup_map_hash_8b = { 4 },
5772 .errstr = "R4 !read_ok",
5773 .prog_type = BPF_PROG_TYPE_SCHED_CLS
5776 "valid indirect map_lookup_elem access with 2nd lookup in branch",
5778 BPF_MOV64_IMM(BPF_REG_1, 10),
5779 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5780 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5781 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5782 BPF_LD_MAP_FD(BPF_REG_1, 0),
5783 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
5784 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
5785 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5786 BPF_FUNC_map_lookup_elem),
5787 BPF_MOV64_IMM(BPF_REG_2, 10),
5788 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
5789 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
5790 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
5791 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5792 BPF_FUNC_map_lookup_elem),
5793 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5794 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5795 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5798 .fixup_map_hash_8b = { 4 },
5800 .prog_type = BPF_PROG_TYPE_SCHED_CLS
5803 "invalid map access from else condition",
5805 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5806 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5807 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5808 BPF_LD_MAP_FD(BPF_REG_1, 0),
5809 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
5810 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5811 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5812 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
5813 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5814 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
5815 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5816 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
5819 .fixup_map_hash_48b = { 3 },
5820 .errstr = "R0 unbounded memory access",
5822 .errstr_unpriv = "R0 leaks addr",
5823 .result_unpriv = REJECT,
5824 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5827 "constant register |= constant should keep constant type",
5829 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5830 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5831 BPF_MOV64_IMM(BPF_REG_2, 34),
5832 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
5833 BPF_MOV64_IMM(BPF_REG_3, 0),
5834 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5838 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5841 "constant register |= constant should not bypass stack boundary checks",
5843 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5844 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5845 BPF_MOV64_IMM(BPF_REG_2, 34),
5846 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
5847 BPF_MOV64_IMM(BPF_REG_3, 0),
5848 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5851 .errstr = "invalid stack type R1 off=-48 access_size=58",
5853 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5856 "constant register |= constant register should keep constant type",
5858 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5859 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5860 BPF_MOV64_IMM(BPF_REG_2, 34),
5861 BPF_MOV64_IMM(BPF_REG_4, 13),
5862 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
5863 BPF_MOV64_IMM(BPF_REG_3, 0),
5864 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5868 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5871 "constant register |= constant register should not bypass stack boundary checks",
5873 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5874 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5875 BPF_MOV64_IMM(BPF_REG_2, 34),
5876 BPF_MOV64_IMM(BPF_REG_4, 24),
5877 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
5878 BPF_MOV64_IMM(BPF_REG_3, 0),
5879 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5882 .errstr = "invalid stack type R1 off=-48 access_size=58",
5884 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5887 "invalid direct packet write for LWT_IN",
5889 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5890 offsetof(struct __sk_buff, data)),
5891 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5892 offsetof(struct __sk_buff, data_end)),
5893 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5894 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5895 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5896 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5897 BPF_MOV64_IMM(BPF_REG_0, 0),
5900 .errstr = "cannot write into packet",
5902 .prog_type = BPF_PROG_TYPE_LWT_IN,
5905 "invalid direct packet write for LWT_OUT",
5907 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5908 offsetof(struct __sk_buff, data)),
5909 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5910 offsetof(struct __sk_buff, data_end)),
5911 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5912 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5913 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5914 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5915 BPF_MOV64_IMM(BPF_REG_0, 0),
5918 .errstr = "cannot write into packet",
5920 .prog_type = BPF_PROG_TYPE_LWT_OUT,
5923 "direct packet write for LWT_XMIT",
5925 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5926 offsetof(struct __sk_buff, data)),
5927 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5928 offsetof(struct __sk_buff, data_end)),
5929 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5930 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5931 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5932 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5933 BPF_MOV64_IMM(BPF_REG_0, 0),
5937 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
5940 "direct packet read for LWT_IN",
5942 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5943 offsetof(struct __sk_buff, data)),
5944 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5945 offsetof(struct __sk_buff, data_end)),
5946 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5947 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5948 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5949 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5950 BPF_MOV64_IMM(BPF_REG_0, 0),
5954 .prog_type = BPF_PROG_TYPE_LWT_IN,
5957 "direct packet read for LWT_OUT",
5959 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5960 offsetof(struct __sk_buff, data)),
5961 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5962 offsetof(struct __sk_buff, data_end)),
5963 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5964 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5965 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5966 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5967 BPF_MOV64_IMM(BPF_REG_0, 0),
5971 .prog_type = BPF_PROG_TYPE_LWT_OUT,
5974 "direct packet read for LWT_XMIT",
5976 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5977 offsetof(struct __sk_buff, data)),
5978 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5979 offsetof(struct __sk_buff, data_end)),
5980 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5981 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5982 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5983 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5984 BPF_MOV64_IMM(BPF_REG_0, 0),
5988 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
5991 "overlapping checks for direct packet access",
5993 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5994 offsetof(struct __sk_buff, data)),
5995 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5996 offsetof(struct __sk_buff, data_end)),
5997 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5998 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5999 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
6000 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6001 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
6002 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
6003 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
6004 BPF_MOV64_IMM(BPF_REG_0, 0),
6008 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
6011 "make headroom for LWT_XMIT",
6013 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6014 BPF_MOV64_IMM(BPF_REG_2, 34),
6015 BPF_MOV64_IMM(BPF_REG_3, 0),
6016 BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
6017 /* split for s390 to succeed */
6018 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
6019 BPF_MOV64_IMM(BPF_REG_2, 42),
6020 BPF_MOV64_IMM(BPF_REG_3, 0),
6021 BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
6022 BPF_MOV64_IMM(BPF_REG_0, 0),
6026 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
6029 "invalid access of tc_classid for LWT_IN",
6031 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6032 offsetof(struct __sk_buff, tc_classid)),
6036 .errstr = "invalid bpf_context access",
6039 "invalid access of tc_classid for LWT_OUT",
6041 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6042 offsetof(struct __sk_buff, tc_classid)),
6046 .errstr = "invalid bpf_context access",
6049 "invalid access of tc_classid for LWT_XMIT",
6051 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6052 offsetof(struct __sk_buff, tc_classid)),
6056 .errstr = "invalid bpf_context access",
6059 "leak pointer into ctx 1",
6061 BPF_MOV64_IMM(BPF_REG_0, 0),
6062 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
6063 offsetof(struct __sk_buff, cb[0])),
6064 BPF_LD_MAP_FD(BPF_REG_2, 0),
6065 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
6066 offsetof(struct __sk_buff, cb[0])),
6069 .fixup_map_hash_8b = { 2 },
6070 .errstr_unpriv = "R2 leaks addr into mem",
6071 .result_unpriv = REJECT,
6073 .errstr = "BPF_XADD stores into R1 ctx is not allowed",
6076 "leak pointer into ctx 2",
6078 BPF_MOV64_IMM(BPF_REG_0, 0),
6079 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
6080 offsetof(struct __sk_buff, cb[0])),
6081 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
6082 offsetof(struct __sk_buff, cb[0])),
6085 .errstr_unpriv = "R10 leaks addr into mem",
6086 .result_unpriv = REJECT,
6088 .errstr = "BPF_XADD stores into R1 ctx is not allowed",
6091 "leak pointer into ctx 3",
6093 BPF_MOV64_IMM(BPF_REG_0, 0),
6094 BPF_LD_MAP_FD(BPF_REG_2, 0),
6095 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
6096 offsetof(struct __sk_buff, cb[0])),
6099 .fixup_map_hash_8b = { 1 },
6100 .errstr_unpriv = "R2 leaks addr into ctx",
6101 .result_unpriv = REJECT,
6105 "leak pointer into map val",
6107 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6108 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6109 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6110 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6111 BPF_LD_MAP_FD(BPF_REG_1, 0),
6112 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6113 BPF_FUNC_map_lookup_elem),
6114 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
6115 BPF_MOV64_IMM(BPF_REG_3, 0),
6116 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
6117 BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
6118 BPF_MOV64_IMM(BPF_REG_0, 0),
6121 .fixup_map_hash_8b = { 4 },
6122 .errstr_unpriv = "R6 leaks addr into mem",
6123 .result_unpriv = REJECT,
6127 "helper access to map: full range",
6129 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6130 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6131 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6132 BPF_LD_MAP_FD(BPF_REG_1, 0),
6133 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6134 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6135 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6136 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
6137 BPF_MOV64_IMM(BPF_REG_3, 0),
6138 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6141 .fixup_map_hash_48b = { 3 },
6143 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6146 "helper access to map: partial range",
6148 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6149 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6150 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6151 BPF_LD_MAP_FD(BPF_REG_1, 0),
6152 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6153 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6154 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6155 BPF_MOV64_IMM(BPF_REG_2, 8),
6156 BPF_MOV64_IMM(BPF_REG_3, 0),
6157 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6160 .fixup_map_hash_48b = { 3 },
6162 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6165 "helper access to map: empty range",
6167 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6168 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6169 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6170 BPF_LD_MAP_FD(BPF_REG_1, 0),
6171 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6172 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
6173 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6174 BPF_MOV64_IMM(BPF_REG_2, 0),
6175 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
6178 .fixup_map_hash_48b = { 3 },
6179 .errstr = "invalid access to map value, value_size=48 off=0 size=0",
6181 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6184 "helper access to map: out-of-bound range",
6186 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6187 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6188 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6189 BPF_LD_MAP_FD(BPF_REG_1, 0),
6190 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6191 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6192 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6193 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
6194 BPF_MOV64_IMM(BPF_REG_3, 0),
6195 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6198 .fixup_map_hash_48b = { 3 },
6199 .errstr = "invalid access to map value, value_size=48 off=0 size=56",
6201 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6204 "helper access to map: negative range",
6206 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6207 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6208 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6209 BPF_LD_MAP_FD(BPF_REG_1, 0),
6210 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6211 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6212 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6213 BPF_MOV64_IMM(BPF_REG_2, -8),
6214 BPF_MOV64_IMM(BPF_REG_3, 0),
6215 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6218 .fixup_map_hash_48b = { 3 },
6219 .errstr = "R2 min value is negative",
6221 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6224 "helper access to adjusted map (via const imm): full range",
6226 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6227 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6228 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6229 BPF_LD_MAP_FD(BPF_REG_1, 0),
6230 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6231 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6232 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6233 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
6234 offsetof(struct test_val, foo)),
6235 BPF_MOV64_IMM(BPF_REG_2,
6236 sizeof(struct test_val) -
6237 offsetof(struct test_val, foo)),
6238 BPF_MOV64_IMM(BPF_REG_3, 0),
6239 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6242 .fixup_map_hash_48b = { 3 },
6244 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6247 "helper access to adjusted map (via const imm): partial range",
6249 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6250 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6251 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6252 BPF_LD_MAP_FD(BPF_REG_1, 0),
6253 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6254 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6255 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6256 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
6257 offsetof(struct test_val, foo)),
6258 BPF_MOV64_IMM(BPF_REG_2, 8),
6259 BPF_MOV64_IMM(BPF_REG_3, 0),
6260 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6263 .fixup_map_hash_48b = { 3 },
6265 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6268 "helper access to adjusted map (via const imm): empty range",
6270 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6271 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6272 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6273 BPF_LD_MAP_FD(BPF_REG_1, 0),
6274 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6275 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6276 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6277 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
6278 offsetof(struct test_val, foo)),
6279 BPF_MOV64_IMM(BPF_REG_2, 0),
6280 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
6283 .fixup_map_hash_48b = { 3 },
6284 .errstr = "invalid access to map value, value_size=48 off=4 size=0",
6286 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6289 "helper access to adjusted map (via const imm): out-of-bound range",
6291 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6292 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6293 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6294 BPF_LD_MAP_FD(BPF_REG_1, 0),
6295 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6296 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6297 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6298 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
6299 offsetof(struct test_val, foo)),
6300 BPF_MOV64_IMM(BPF_REG_2,
6301 sizeof(struct test_val) -
6302 offsetof(struct test_val, foo) + 8),
6303 BPF_MOV64_IMM(BPF_REG_3, 0),
6304 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6307 .fixup_map_hash_48b = { 3 },
6308 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
6310 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6313 "helper access to adjusted map (via const imm): negative range (> adjustment)",
6315 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6316 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6317 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6318 BPF_LD_MAP_FD(BPF_REG_1, 0),
6319 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6320 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6321 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6322 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
6323 offsetof(struct test_val, foo)),
6324 BPF_MOV64_IMM(BPF_REG_2, -8),
6325 BPF_MOV64_IMM(BPF_REG_3, 0),
6326 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6329 .fixup_map_hash_48b = { 3 },
6330 .errstr = "R2 min value is negative",
6332 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6335 "helper access to adjusted map (via const imm): negative range (< adjustment)",
6337 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6338 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6339 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6340 BPF_LD_MAP_FD(BPF_REG_1, 0),
6341 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6342 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6343 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6344 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
6345 offsetof(struct test_val, foo)),
6346 BPF_MOV64_IMM(BPF_REG_2, -1),
6347 BPF_MOV64_IMM(BPF_REG_3, 0),
6348 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6351 .fixup_map_hash_48b = { 3 },
6352 .errstr = "R2 min value is negative",
6354 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6357 "helper access to adjusted map (via const reg): full range",
6359 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6360 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6361 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6362 BPF_LD_MAP_FD(BPF_REG_1, 0),
6363 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6364 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6365 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6366 BPF_MOV64_IMM(BPF_REG_3,
6367 offsetof(struct test_val, foo)),
6368 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6369 BPF_MOV64_IMM(BPF_REG_2,
6370 sizeof(struct test_val) -
6371 offsetof(struct test_val, foo)),
6372 BPF_MOV64_IMM(BPF_REG_3, 0),
6373 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6376 .fixup_map_hash_48b = { 3 },
6378 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6381 "helper access to adjusted map (via const reg): partial range",
6383 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6384 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6385 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6386 BPF_LD_MAP_FD(BPF_REG_1, 0),
6387 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6388 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6389 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6390 BPF_MOV64_IMM(BPF_REG_3,
6391 offsetof(struct test_val, foo)),
6392 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6393 BPF_MOV64_IMM(BPF_REG_2, 8),
6394 BPF_MOV64_IMM(BPF_REG_3, 0),
6395 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6398 .fixup_map_hash_48b = { 3 },
6400 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6403 "helper access to adjusted map (via const reg): empty range",
6405 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6406 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6407 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6408 BPF_LD_MAP_FD(BPF_REG_1, 0),
6409 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6410 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6411 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6412 BPF_MOV64_IMM(BPF_REG_3, 0),
6413 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6414 BPF_MOV64_IMM(BPF_REG_2, 0),
6415 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
6418 .fixup_map_hash_48b = { 3 },
6419 .errstr = "R1 min value is outside of the array range",
6421 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6424 "helper access to adjusted map (via const reg): out-of-bound range",
6426 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6427 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6428 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6429 BPF_LD_MAP_FD(BPF_REG_1, 0),
6430 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6431 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6432 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6433 BPF_MOV64_IMM(BPF_REG_3,
6434 offsetof(struct test_val, foo)),
6435 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6436 BPF_MOV64_IMM(BPF_REG_2,
6437 sizeof(struct test_val) -
6438 offsetof(struct test_val, foo) + 8),
6439 BPF_MOV64_IMM(BPF_REG_3, 0),
6440 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6443 .fixup_map_hash_48b = { 3 },
6444 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
6446 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6449 "helper access to adjusted map (via const reg): negative range (> adjustment)",
6451 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6452 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6453 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6454 BPF_LD_MAP_FD(BPF_REG_1, 0),
6455 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6456 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6457 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6458 BPF_MOV64_IMM(BPF_REG_3,
6459 offsetof(struct test_val, foo)),
6460 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6461 BPF_MOV64_IMM(BPF_REG_2, -8),
6462 BPF_MOV64_IMM(BPF_REG_3, 0),
6463 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6466 .fixup_map_hash_48b = { 3 },
6467 .errstr = "R2 min value is negative",
6469 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6472 "helper access to adjusted map (via const reg): negative range (< adjustment)",
6474 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6475 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6476 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6477 BPF_LD_MAP_FD(BPF_REG_1, 0),
6478 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6479 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6480 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6481 BPF_MOV64_IMM(BPF_REG_3,
6482 offsetof(struct test_val, foo)),
6483 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6484 BPF_MOV64_IMM(BPF_REG_2, -1),
6485 BPF_MOV64_IMM(BPF_REG_3, 0),
6486 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6489 .fixup_map_hash_48b = { 3 },
6490 .errstr = "R2 min value is negative",
6492 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6495 "helper access to adjusted map (via variable): full range",
6497 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6498 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6499 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6500 BPF_LD_MAP_FD(BPF_REG_1, 0),
6501 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6502 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6503 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6504 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6505 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6506 offsetof(struct test_val, foo), 4),
6507 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6508 BPF_MOV64_IMM(BPF_REG_2,
6509 sizeof(struct test_val) -
6510 offsetof(struct test_val, foo)),
6511 BPF_MOV64_IMM(BPF_REG_3, 0),
6512 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6515 .fixup_map_hash_48b = { 3 },
6517 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6520 "helper access to adjusted map (via variable): partial range",
6522 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6523 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6524 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6525 BPF_LD_MAP_FD(BPF_REG_1, 0),
6526 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6527 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6528 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6529 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6530 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6531 offsetof(struct test_val, foo), 4),
6532 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6533 BPF_MOV64_IMM(BPF_REG_2, 8),
6534 BPF_MOV64_IMM(BPF_REG_3, 0),
6535 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6538 .fixup_map_hash_48b = { 3 },
6540 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6543 "helper access to adjusted map (via variable): empty range",
6545 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6546 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6547 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6548 BPF_LD_MAP_FD(BPF_REG_1, 0),
6549 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6550 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6551 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6552 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6553 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6554 offsetof(struct test_val, foo), 3),
6555 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6556 BPF_MOV64_IMM(BPF_REG_2, 0),
6557 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
6560 .fixup_map_hash_48b = { 3 },
6561 .errstr = "R1 min value is outside of the array range",
6563 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6566 "helper access to adjusted map (via variable): no max check",
6568 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6569 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6570 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6571 BPF_LD_MAP_FD(BPF_REG_1, 0),
6572 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6573 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6574 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6575 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6576 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6577 BPF_MOV64_IMM(BPF_REG_2, 1),
6578 BPF_MOV64_IMM(BPF_REG_3, 0),
6579 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6582 .fixup_map_hash_48b = { 3 },
6583 .errstr = "R1 unbounded memory access",
6585 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6588 "helper access to adjusted map (via variable): wrong max check",
6590 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6591 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6592 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6593 BPF_LD_MAP_FD(BPF_REG_1, 0),
6594 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6595 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6596 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6597 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6598 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6599 offsetof(struct test_val, foo), 4),
6600 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6601 BPF_MOV64_IMM(BPF_REG_2,
6602 sizeof(struct test_val) -
6603 offsetof(struct test_val, foo) + 1),
6604 BPF_MOV64_IMM(BPF_REG_3, 0),
6605 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6608 .fixup_map_hash_48b = { 3 },
6609 .errstr = "invalid access to map value, value_size=48 off=4 size=45",
6611 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6614 "helper access to map: bounds check using <, good access",
6616 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6617 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6618 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6619 BPF_LD_MAP_FD(BPF_REG_1, 0),
6620 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6621 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6622 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6623 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6624 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
6625 BPF_MOV64_IMM(BPF_REG_0, 0),
6627 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6628 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6629 BPF_MOV64_IMM(BPF_REG_0, 0),
6632 .fixup_map_hash_48b = { 3 },
6634 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6637 "helper access to map: bounds check using <, bad access",
6639 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6640 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6641 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6642 BPF_LD_MAP_FD(BPF_REG_1, 0),
6643 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6644 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6645 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6646 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6647 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
6648 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6649 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6650 BPF_MOV64_IMM(BPF_REG_0, 0),
6652 BPF_MOV64_IMM(BPF_REG_0, 0),
6655 .fixup_map_hash_48b = { 3 },
6657 .errstr = "R1 unbounded memory access",
6658 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6661 "helper access to map: bounds check using <=, good access",
6663 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6664 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6665 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6666 BPF_LD_MAP_FD(BPF_REG_1, 0),
6667 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6668 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6669 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6670 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6671 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
6672 BPF_MOV64_IMM(BPF_REG_0, 0),
6674 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6675 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6676 BPF_MOV64_IMM(BPF_REG_0, 0),
6679 .fixup_map_hash_48b = { 3 },
6681 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6684 "helper access to map: bounds check using <=, bad access",
6686 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6687 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6688 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6689 BPF_LD_MAP_FD(BPF_REG_1, 0),
6690 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6691 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6692 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6693 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6694 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
6695 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6696 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6697 BPF_MOV64_IMM(BPF_REG_0, 0),
6699 BPF_MOV64_IMM(BPF_REG_0, 0),
6702 .fixup_map_hash_48b = { 3 },
6704 .errstr = "R1 unbounded memory access",
6705 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6708 "helper access to map: bounds check using s<, good access",
6710 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6711 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6712 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6713 BPF_LD_MAP_FD(BPF_REG_1, 0),
6714 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6715 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6716 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6717 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6718 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6719 BPF_MOV64_IMM(BPF_REG_0, 0),
6721 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
6722 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6723 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6724 BPF_MOV64_IMM(BPF_REG_0, 0),
6727 .fixup_map_hash_48b = { 3 },
6729 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6732 "helper access to map: bounds check using s<, good access 2",
6734 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6735 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6736 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6737 BPF_LD_MAP_FD(BPF_REG_1, 0),
6738 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6739 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6740 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6741 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6742 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6743 BPF_MOV64_IMM(BPF_REG_0, 0),
6745 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
6746 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6747 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6748 BPF_MOV64_IMM(BPF_REG_0, 0),
6751 .fixup_map_hash_48b = { 3 },
6753 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6756 "helper access to map: bounds check using s<, bad access",
6758 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6759 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6760 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6761 BPF_LD_MAP_FD(BPF_REG_1, 0),
6762 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6763 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6764 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6765 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
6766 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6767 BPF_MOV64_IMM(BPF_REG_0, 0),
6769 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
6770 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6771 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6772 BPF_MOV64_IMM(BPF_REG_0, 0),
6775 .fixup_map_hash_48b = { 3 },
6777 .errstr = "R1 min value is negative",
6778 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6781 "helper access to map: bounds check using s<=, good access",
6783 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6784 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6785 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6786 BPF_LD_MAP_FD(BPF_REG_1, 0),
6787 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6788 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6789 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6790 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6791 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6792 BPF_MOV64_IMM(BPF_REG_0, 0),
6794 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
6795 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6796 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6797 BPF_MOV64_IMM(BPF_REG_0, 0),
6800 .fixup_map_hash_48b = { 3 },
6802 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6805 "helper access to map: bounds check using s<=, good access 2",
6807 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6808 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6809 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6810 BPF_LD_MAP_FD(BPF_REG_1, 0),
6811 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6812 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6813 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6814 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6815 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6816 BPF_MOV64_IMM(BPF_REG_0, 0),
6818 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
6819 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6820 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6821 BPF_MOV64_IMM(BPF_REG_0, 0),
6824 .fixup_map_hash_48b = { 3 },
6826 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6829 "helper access to map: bounds check using s<=, bad access",
6831 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6832 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6833 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6834 BPF_LD_MAP_FD(BPF_REG_1, 0),
6835 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6836 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6837 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6838 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
6839 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6840 BPF_MOV64_IMM(BPF_REG_0, 0),
6842 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
6843 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6844 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6845 BPF_MOV64_IMM(BPF_REG_0, 0),
6848 .fixup_map_hash_48b = { 3 },
6850 .errstr = "R1 min value is negative",
6851 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6854 "map access: known scalar += value_ptr from different maps",
6856 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6857 offsetof(struct __sk_buff, len)),
6858 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6859 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6860 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6861 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
6862 BPF_LD_MAP_FD(BPF_REG_1, 0),
6863 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
6864 BPF_LD_MAP_FD(BPF_REG_1, 0),
6865 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6866 BPF_FUNC_map_lookup_elem),
6867 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
6868 BPF_MOV64_IMM(BPF_REG_1, 4),
6869 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
6870 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
6871 BPF_MOV64_IMM(BPF_REG_0, 1),
6874 .fixup_map_hash_16b = { 5 },
6875 .fixup_map_array_48b = { 8 },
6877 .result_unpriv = REJECT,
6878 .errstr_unpriv = "R1 tried to add from different maps",
6882 "map access: value_ptr -= known scalar from different maps",
6884 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6885 offsetof(struct __sk_buff, len)),
6886 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6887 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6888 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6889 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
6890 BPF_LD_MAP_FD(BPF_REG_1, 0),
6891 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
6892 BPF_LD_MAP_FD(BPF_REG_1, 0),
6893 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6894 BPF_FUNC_map_lookup_elem),
6895 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6896 BPF_MOV64_IMM(BPF_REG_1, 4),
6897 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
6898 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6899 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6900 BPF_MOV64_IMM(BPF_REG_0, 1),
6903 .fixup_map_hash_16b = { 5 },
6904 .fixup_map_array_48b = { 8 },
6906 .result_unpriv = REJECT,
6907 .errstr_unpriv = "R0 min value is outside of the array range",
6911 "map access: known scalar += value_ptr from different maps, but same value properties",
6913 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6914 offsetof(struct __sk_buff, len)),
6915 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6916 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6917 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6918 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
6919 BPF_LD_MAP_FD(BPF_REG_1, 0),
6920 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
6921 BPF_LD_MAP_FD(BPF_REG_1, 0),
6922 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6923 BPF_FUNC_map_lookup_elem),
6924 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
6925 BPF_MOV64_IMM(BPF_REG_1, 4),
6926 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
6927 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
6928 BPF_MOV64_IMM(BPF_REG_0, 1),
6931 .fixup_map_hash_48b = { 5 },
6932 .fixup_map_array_48b = { 8 },
6937 "map access: mixing value pointer and scalar, 1",
6939 // load map value pointer into r0 and r2
6940 BPF_MOV64_IMM(BPF_REG_0, 1),
6941 BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
6942 BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
6943 BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
6944 BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
6945 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6946 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
6948 // load some number from the map into r1
6949 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6950 // depending on r1, branch:
6951 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 3),
6953 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6954 BPF_MOV64_IMM(BPF_REG_3, 0),
6957 BPF_MOV64_IMM(BPF_REG_2, 0),
6958 BPF_MOV64_IMM(BPF_REG_3, 0x100000),
6959 // common instruction
6960 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6961 // depending on r1, branch:
6962 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
6966 BPF_MOV64_IMM(BPF_REG_0, 0x13371337),
6967 // verifier follows fall-through
6968 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2),
6969 BPF_MOV64_IMM(BPF_REG_0, 0),
6971 // fake-dead code; targeted from branch A to
6972 // prevent dead code sanitization
6973 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6974 BPF_MOV64_IMM(BPF_REG_0, 0),
6977 .fixup_map_array_48b = { 1 },
6979 .result_unpriv = REJECT,
6980 .errstr_unpriv = "R2 tried to add from different pointers or scalars",
6984 "map access: mixing value pointer and scalar, 2",
6986 // load map value pointer into r0 and r2
6987 BPF_MOV64_IMM(BPF_REG_0, 1),
6988 BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
6989 BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
6990 BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
6991 BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
6992 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6993 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
6995 // load some number from the map into r1
6996 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6997 // depending on r1, branch:
6998 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
7000 BPF_MOV64_IMM(BPF_REG_2, 0),
7001 BPF_MOV64_IMM(BPF_REG_3, 0x100000),
7004 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7005 BPF_MOV64_IMM(BPF_REG_3, 0),
7006 // common instruction
7007 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7008 // depending on r1, branch:
7009 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
7013 BPF_MOV64_IMM(BPF_REG_0, 0x13371337),
7014 // verifier follows fall-through
7015 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2),
7016 BPF_MOV64_IMM(BPF_REG_0, 0),
7018 // fake-dead code; targeted from branch A to
7019 // prevent dead code sanitization
7020 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7021 BPF_MOV64_IMM(BPF_REG_0, 0),
7024 .fixup_map_array_48b = { 1 },
7026 .result_unpriv = REJECT,
7027 .errstr_unpriv = "R2 tried to add from different maps or paths",
7031 "sanitation: alu with different scalars",
7033 BPF_MOV64_IMM(BPF_REG_0, 1),
7034 BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
7035 BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
7036 BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
7037 BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
7038 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7039 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7041 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7042 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
7043 BPF_MOV64_IMM(BPF_REG_2, 0),
7044 BPF_MOV64_IMM(BPF_REG_3, 0x100000),
7046 BPF_MOV64_IMM(BPF_REG_2, 42),
7047 BPF_MOV64_IMM(BPF_REG_3, 0x100001),
7048 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7049 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7052 .fixup_map_array_48b = { 1 },
7057 "map access: value_ptr += known scalar, upper oob arith, test 1",
7059 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7060 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7061 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7062 BPF_LD_MAP_FD(BPF_REG_1, 0),
7063 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7064 BPF_FUNC_map_lookup_elem),
7065 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7066 BPF_MOV64_IMM(BPF_REG_1, 48),
7067 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7068 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7069 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7070 BPF_MOV64_IMM(BPF_REG_0, 1),
7073 .fixup_map_array_48b = { 3 },
7075 .result_unpriv = REJECT,
7076 .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
7080 "map access: value_ptr += known scalar, upper oob arith, test 2",
7082 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7083 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7084 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7085 BPF_LD_MAP_FD(BPF_REG_1, 0),
7086 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7087 BPF_FUNC_map_lookup_elem),
7088 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7089 BPF_MOV64_IMM(BPF_REG_1, 49),
7090 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7091 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7092 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7093 BPF_MOV64_IMM(BPF_REG_0, 1),
7096 .fixup_map_array_48b = { 3 },
7098 .result_unpriv = REJECT,
7099 .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
7103 "map access: value_ptr += known scalar, upper oob arith, test 3",
7105 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7106 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7107 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7108 BPF_LD_MAP_FD(BPF_REG_1, 0),
7109 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7110 BPF_FUNC_map_lookup_elem),
7111 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7112 BPF_MOV64_IMM(BPF_REG_1, 47),
7113 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7114 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7115 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7116 BPF_MOV64_IMM(BPF_REG_0, 1),
7119 .fixup_map_array_48b = { 3 },
7121 .result_unpriv = REJECT,
7122 .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
7126 "map access: value_ptr -= known scalar, lower oob arith, test 1",
7128 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7129 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7130 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7131 BPF_LD_MAP_FD(BPF_REG_1, 0),
7132 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7133 BPF_FUNC_map_lookup_elem),
7134 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7135 BPF_MOV64_IMM(BPF_REG_1, 47),
7136 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7137 BPF_MOV64_IMM(BPF_REG_1, 48),
7138 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7139 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7140 BPF_MOV64_IMM(BPF_REG_0, 1),
7143 .fixup_map_array_48b = { 3 },
7145 .errstr = "R0 min value is outside of the array range",
7146 .result_unpriv = REJECT,
7147 .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
7150 "map access: value_ptr -= known scalar, lower oob arith, test 2",
7152 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7153 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7154 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7155 BPF_LD_MAP_FD(BPF_REG_1, 0),
7156 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7157 BPF_FUNC_map_lookup_elem),
7158 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7159 BPF_MOV64_IMM(BPF_REG_1, 47),
7160 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7161 BPF_MOV64_IMM(BPF_REG_1, 48),
7162 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7163 BPF_MOV64_IMM(BPF_REG_1, 1),
7164 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7165 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7166 BPF_MOV64_IMM(BPF_REG_0, 1),
7169 .fixup_map_array_48b = { 3 },
7171 .result_unpriv = REJECT,
7172 .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
7176 "map access: value_ptr -= known scalar, lower oob arith, test 3",
7178 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7179 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7180 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7181 BPF_LD_MAP_FD(BPF_REG_1, 0),
7182 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7183 BPF_FUNC_map_lookup_elem),
7184 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7185 BPF_MOV64_IMM(BPF_REG_1, 47),
7186 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7187 BPF_MOV64_IMM(BPF_REG_1, 47),
7188 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7189 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7190 BPF_MOV64_IMM(BPF_REG_0, 1),
7193 .fixup_map_array_48b = { 3 },
7195 .result_unpriv = REJECT,
7196 .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
7200 "map access: known scalar += value_ptr",
7202 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7203 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7204 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7205 BPF_LD_MAP_FD(BPF_REG_1, 0),
7206 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7207 BPF_FUNC_map_lookup_elem),
7208 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
7209 BPF_MOV64_IMM(BPF_REG_1, 4),
7210 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
7211 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
7212 BPF_MOV64_IMM(BPF_REG_0, 1),
7215 .fixup_map_array_48b = { 3 },
7220 "map access: value_ptr += known scalar, 1",
7222 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7223 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7224 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7225 BPF_LD_MAP_FD(BPF_REG_1, 0),
7226 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7227 BPF_FUNC_map_lookup_elem),
7228 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
7229 BPF_MOV64_IMM(BPF_REG_1, 4),
7230 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7231 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7232 BPF_MOV64_IMM(BPF_REG_0, 1),
7235 .fixup_map_array_48b = { 3 },
7240 "map access: value_ptr += known scalar, 2",
7242 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7243 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7244 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7245 BPF_LD_MAP_FD(BPF_REG_1, 0),
7246 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7247 BPF_FUNC_map_lookup_elem),
7248 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
7249 BPF_MOV64_IMM(BPF_REG_1, 49),
7250 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7251 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7252 BPF_MOV64_IMM(BPF_REG_0, 1),
7255 .fixup_map_array_48b = { 3 },
7257 .errstr = "invalid access to map value",
7260 "map access: value_ptr += known scalar, 3",
7262 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7263 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7264 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7265 BPF_LD_MAP_FD(BPF_REG_1, 0),
7266 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7267 BPF_FUNC_map_lookup_elem),
7268 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
7269 BPF_MOV64_IMM(BPF_REG_1, -1),
7270 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7271 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7272 BPF_MOV64_IMM(BPF_REG_0, 1),
7275 .fixup_map_array_48b = { 3 },
7277 .errstr = "invalid access to map value",
7280 "map access: value_ptr += known scalar, 4",
7282 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7283 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7284 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7285 BPF_LD_MAP_FD(BPF_REG_1, 0),
7286 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7287 BPF_FUNC_map_lookup_elem),
7288 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7289 BPF_MOV64_IMM(BPF_REG_1, 5),
7290 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7291 BPF_MOV64_IMM(BPF_REG_1, -2),
7292 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7293 BPF_MOV64_IMM(BPF_REG_1, -1),
7294 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7295 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7296 BPF_MOV64_IMM(BPF_REG_0, 1),
7299 .fixup_map_array_48b = { 3 },
7301 .result_unpriv = REJECT,
7302 .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
7306 "map access: value_ptr += known scalar, 5",
7308 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7309 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7310 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7311 BPF_LD_MAP_FD(BPF_REG_1, 0),
7312 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7313 BPF_FUNC_map_lookup_elem),
7314 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
7315 BPF_MOV64_IMM(BPF_REG_1, (6 + 1) * sizeof(int)),
7316 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
7317 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
7320 .fixup_map_array_48b = { 3 },
7322 .retval = 0xabcdef12,
7325 "map access: value_ptr += known scalar, 6",
7327 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7328 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7329 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7330 BPF_LD_MAP_FD(BPF_REG_1, 0),
7331 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7332 BPF_FUNC_map_lookup_elem),
7333 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7334 BPF_MOV64_IMM(BPF_REG_1, (3 + 1) * sizeof(int)),
7335 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7336 BPF_MOV64_IMM(BPF_REG_1, 3 * sizeof(int)),
7337 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7338 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
7341 .fixup_map_array_48b = { 3 },
7343 .retval = 0xabcdef12,
7346 "map access: unknown scalar += value_ptr, 1",
7348 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7349 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7350 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7351 BPF_LD_MAP_FD(BPF_REG_1, 0),
7352 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7353 BPF_FUNC_map_lookup_elem),
7354 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7355 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7356 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
7357 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
7358 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
7359 BPF_MOV64_IMM(BPF_REG_0, 1),
7362 .fixup_map_array_48b = { 3 },
7367 "map access: unknown scalar += value_ptr, 2",
7369 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7370 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7371 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7372 BPF_LD_MAP_FD(BPF_REG_1, 0),
7373 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7374 BPF_FUNC_map_lookup_elem),
7375 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7376 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
7377 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
7378 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
7379 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
7382 .fixup_map_array_48b = { 3 },
7384 .retval = 0xabcdef12,
7387 "map access: unknown scalar += value_ptr, 3",
7389 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7390 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7391 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7392 BPF_LD_MAP_FD(BPF_REG_1, 0),
7393 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7394 BPF_FUNC_map_lookup_elem),
7395 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7396 BPF_MOV64_IMM(BPF_REG_1, -1),
7397 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7398 BPF_MOV64_IMM(BPF_REG_1, 1),
7399 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7400 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
7401 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
7402 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
7403 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
7406 .fixup_map_array_48b = { 3 },
7408 .result_unpriv = REJECT,
7409 .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
7410 .retval = 0xabcdef12,
7413 "map access: unknown scalar += value_ptr, 4",
7415 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7416 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7417 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7418 BPF_LD_MAP_FD(BPF_REG_1, 0),
7419 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7420 BPF_FUNC_map_lookup_elem),
7421 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7422 BPF_MOV64_IMM(BPF_REG_1, 19),
7423 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7424 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
7425 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
7426 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
7427 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
7430 .fixup_map_array_48b = { 3 },
7432 .errstr = "R1 max value is outside of the array range",
7433 .errstr_unpriv = "R1 pointer arithmetic of map value goes out of range",
7436 "map access: value_ptr += unknown scalar, 1",
7438 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7439 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7440 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7441 BPF_LD_MAP_FD(BPF_REG_1, 0),
7442 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7443 BPF_FUNC_map_lookup_elem),
7444 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7445 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7446 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
7447 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7448 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7449 BPF_MOV64_IMM(BPF_REG_0, 1),
7452 .fixup_map_array_48b = { 3 },
7457 "map access: value_ptr += unknown scalar, 2",
7459 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7460 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7461 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7462 BPF_LD_MAP_FD(BPF_REG_1, 0),
7463 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7464 BPF_FUNC_map_lookup_elem),
7465 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7466 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
7467 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
7468 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7469 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
7472 .fixup_map_array_48b = { 3 },
7474 .retval = 0xabcdef12,
7477 "map access: value_ptr += unknown scalar, 3",
7479 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7480 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7481 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7482 BPF_LD_MAP_FD(BPF_REG_1, 0),
7483 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7484 BPF_FUNC_map_lookup_elem),
7485 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
7486 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
7487 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 8),
7488 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 16),
7489 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
7490 BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 1),
7491 BPF_ALU64_IMM(BPF_OR, BPF_REG_3, 1),
7492 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_3, 4),
7493 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
7494 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7495 BPF_MOV64_IMM(BPF_REG_0, 1),
7497 BPF_MOV64_IMM(BPF_REG_0, 2),
7498 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
7500 .fixup_map_array_48b = { 3 },
7505 "map access: value_ptr += value_ptr",
7507 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7508 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7509 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7510 BPF_LD_MAP_FD(BPF_REG_1, 0),
7511 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7512 BPF_FUNC_map_lookup_elem),
7513 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7514 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_0),
7515 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7516 BPF_MOV64_IMM(BPF_REG_0, 1),
7519 .fixup_map_array_48b = { 3 },
7521 .errstr = "R0 pointer += pointer prohibited",
7524 "map access: known scalar -= value_ptr",
7526 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7527 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7528 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7529 BPF_LD_MAP_FD(BPF_REG_1, 0),
7530 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7531 BPF_FUNC_map_lookup_elem),
7532 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
7533 BPF_MOV64_IMM(BPF_REG_1, 4),
7534 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
7535 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
7536 BPF_MOV64_IMM(BPF_REG_0, 1),
7539 .fixup_map_array_48b = { 3 },
7541 .errstr = "R1 tried to subtract pointer from scalar",
7544 "map access: value_ptr -= known scalar",
7546 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7547 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7548 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7549 BPF_LD_MAP_FD(BPF_REG_1, 0),
7550 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7551 BPF_FUNC_map_lookup_elem),
7552 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
7553 BPF_MOV64_IMM(BPF_REG_1, 4),
7554 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7555 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7556 BPF_MOV64_IMM(BPF_REG_0, 1),
7559 .fixup_map_array_48b = { 3 },
7561 .errstr = "R0 min value is outside of the array range",
7564 "map access: value_ptr -= known scalar, 2",
7566 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7567 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7568 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7569 BPF_LD_MAP_FD(BPF_REG_1, 0),
7570 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7571 BPF_FUNC_map_lookup_elem),
7572 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7573 BPF_MOV64_IMM(BPF_REG_1, 6),
7574 BPF_MOV64_IMM(BPF_REG_2, 4),
7575 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7576 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
7577 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7578 BPF_MOV64_IMM(BPF_REG_0, 1),
7581 .fixup_map_array_48b = { 3 },
7583 .result_unpriv = REJECT,
7584 .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
7588 "map access: unknown scalar -= value_ptr",
7590 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7591 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7592 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7593 BPF_LD_MAP_FD(BPF_REG_1, 0),
7594 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7595 BPF_FUNC_map_lookup_elem),
7596 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7597 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7598 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
7599 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
7600 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
7601 BPF_MOV64_IMM(BPF_REG_0, 1),
7604 .fixup_map_array_48b = { 3 },
7606 .errstr = "R1 tried to subtract pointer from scalar",
7609 "map access: value_ptr -= unknown scalar",
7611 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7612 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7613 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7614 BPF_LD_MAP_FD(BPF_REG_1, 0),
7615 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7616 BPF_FUNC_map_lookup_elem),
7617 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7618 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7619 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
7620 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7621 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7622 BPF_MOV64_IMM(BPF_REG_0, 1),
7625 .fixup_map_array_48b = { 3 },
7627 .errstr = "R0 min value is negative",
7630 "map access: value_ptr -= unknown scalar, 2",
7632 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7633 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7634 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7635 BPF_LD_MAP_FD(BPF_REG_1, 0),
7636 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7637 BPF_FUNC_map_lookup_elem),
7638 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7639 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7640 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
7641 BPF_ALU64_IMM(BPF_OR, BPF_REG_1, 0x7),
7642 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7643 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7644 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
7645 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7646 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7647 BPF_MOV64_IMM(BPF_REG_0, 1),
7650 .fixup_map_array_48b = { 3 },
7652 .result_unpriv = REJECT,
7653 .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
7657 "map access: value_ptr -= value_ptr",
7659 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7660 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7661 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7662 BPF_LD_MAP_FD(BPF_REG_1, 0),
7663 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7664 BPF_FUNC_map_lookup_elem),
7665 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7666 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_0),
7667 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7668 BPF_MOV64_IMM(BPF_REG_0, 1),
7671 .fixup_map_array_48b = { 3 },
7673 .errstr = "R0 invalid mem access 'inv'",
7674 .errstr_unpriv = "R0 pointer -= pointer prohibited",
7677 "map lookup helper access to map",
7679 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7680 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7681 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7682 BPF_LD_MAP_FD(BPF_REG_1, 0),
7683 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7684 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7685 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7686 BPF_LD_MAP_FD(BPF_REG_1, 0),
7687 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7690 .fixup_map_hash_16b = { 3, 8 },
7692 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7695 "map update helper access to map",
7697 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7698 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7699 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7700 BPF_LD_MAP_FD(BPF_REG_1, 0),
7701 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7702 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7703 BPF_MOV64_IMM(BPF_REG_4, 0),
7704 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
7705 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7706 BPF_LD_MAP_FD(BPF_REG_1, 0),
7707 BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
7710 .fixup_map_hash_16b = { 3, 10 },
7712 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7715 "map update helper access to map: wrong size",
7717 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7718 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7719 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7720 BPF_LD_MAP_FD(BPF_REG_1, 0),
7721 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7722 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7723 BPF_MOV64_IMM(BPF_REG_4, 0),
7724 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
7725 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7726 BPF_LD_MAP_FD(BPF_REG_1, 0),
7727 BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
7730 .fixup_map_hash_8b = { 3 },
7731 .fixup_map_hash_16b = { 10 },
7733 .errstr = "invalid access to map value, value_size=8 off=0 size=16",
7734 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7737 "map helper access to adjusted map (via const imm)",
7739 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7740 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7741 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7742 BPF_LD_MAP_FD(BPF_REG_1, 0),
7743 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7744 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7745 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7746 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
7747 offsetof(struct other_val, bar)),
7748 BPF_LD_MAP_FD(BPF_REG_1, 0),
7749 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7752 .fixup_map_hash_16b = { 3, 9 },
7754 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7757 "map helper access to adjusted map (via const imm): out-of-bound 1",
7759 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7760 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7761 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7762 BPF_LD_MAP_FD(BPF_REG_1, 0),
7763 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7764 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7765 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7766 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
7767 sizeof(struct other_val) - 4),
7768 BPF_LD_MAP_FD(BPF_REG_1, 0),
7769 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7772 .fixup_map_hash_16b = { 3, 9 },
7774 .errstr = "invalid access to map value, value_size=16 off=12 size=8",
7775 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7778 "map helper access to adjusted map (via const imm): out-of-bound 2",
7780 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7781 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7782 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7783 BPF_LD_MAP_FD(BPF_REG_1, 0),
7784 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7785 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7786 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7787 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7788 BPF_LD_MAP_FD(BPF_REG_1, 0),
7789 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7792 .fixup_map_hash_16b = { 3, 9 },
7794 .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
7795 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7798 "map helper access to adjusted map (via const reg)",
7800 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7801 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7802 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7803 BPF_LD_MAP_FD(BPF_REG_1, 0),
7804 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7805 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7806 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7807 BPF_MOV64_IMM(BPF_REG_3,
7808 offsetof(struct other_val, bar)),
7809 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7810 BPF_LD_MAP_FD(BPF_REG_1, 0),
7811 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7814 .fixup_map_hash_16b = { 3, 10 },
7816 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7819 "map helper access to adjusted map (via const reg): out-of-bound 1",
7821 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7822 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7823 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7824 BPF_LD_MAP_FD(BPF_REG_1, 0),
7825 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7826 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7827 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7828 BPF_MOV64_IMM(BPF_REG_3,
7829 sizeof(struct other_val) - 4),
7830 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7831 BPF_LD_MAP_FD(BPF_REG_1, 0),
7832 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7835 .fixup_map_hash_16b = { 3, 10 },
7837 .errstr = "invalid access to map value, value_size=16 off=12 size=8",
7838 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7841 "map helper access to adjusted map (via const reg): out-of-bound 2",
7843 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7844 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7845 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7846 BPF_LD_MAP_FD(BPF_REG_1, 0),
7847 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7848 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7849 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7850 BPF_MOV64_IMM(BPF_REG_3, -4),
7851 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7852 BPF_LD_MAP_FD(BPF_REG_1, 0),
7853 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7856 .fixup_map_hash_16b = { 3, 10 },
7858 .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
7859 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7862 "map helper access to adjusted map (via variable)",
7864 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7865 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7866 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7867 BPF_LD_MAP_FD(BPF_REG_1, 0),
7868 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7869 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7870 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7871 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
7872 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
7873 offsetof(struct other_val, bar), 4),
7874 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7875 BPF_LD_MAP_FD(BPF_REG_1, 0),
7876 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7879 .fixup_map_hash_16b = { 3, 11 },
7881 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7884 "map helper access to adjusted map (via variable): no max check",
7886 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7887 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7888 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7889 BPF_LD_MAP_FD(BPF_REG_1, 0),
7890 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7891 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7892 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7893 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
7894 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7895 BPF_LD_MAP_FD(BPF_REG_1, 0),
7896 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7899 .fixup_map_hash_16b = { 3, 10 },
7901 .errstr = "R2 unbounded memory access, make sure to bounds check any array access into a map",
7902 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7905 "map helper access to adjusted map (via variable): wrong max check",
7907 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7908 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7909 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7910 BPF_LD_MAP_FD(BPF_REG_1, 0),
7911 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7912 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7913 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7914 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
7915 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
7916 offsetof(struct other_val, bar) + 1, 4),
7917 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7918 BPF_LD_MAP_FD(BPF_REG_1, 0),
7919 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7922 .fixup_map_hash_16b = { 3, 11 },
7924 .errstr = "invalid access to map value, value_size=16 off=9 size=8",
7925 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
7928 "map element value is preserved across register spilling",
7930 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7931 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7932 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7933 BPF_LD_MAP_FD(BPF_REG_1, 0),
7934 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7935 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7936 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
7937 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7938 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
7939 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
7940 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
7941 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
7944 .fixup_map_hash_48b = { 3 },
7945 .errstr_unpriv = "R0 leaks addr",
7947 .result_unpriv = REJECT,
7950 "map element value or null is marked on register spilling",
7952 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7953 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7954 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7955 BPF_LD_MAP_FD(BPF_REG_1, 0),
7956 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7957 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7958 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
7959 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
7960 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7961 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
7962 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
7965 .fixup_map_hash_48b = { 3 },
7966 .errstr_unpriv = "R0 leaks addr",
7968 .result_unpriv = REJECT,
7971 "map element value store of cleared call register",
7973 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7974 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7975 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7976 BPF_LD_MAP_FD(BPF_REG_1, 0),
7977 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7978 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
7979 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
7982 .fixup_map_hash_48b = { 3 },
7983 .errstr_unpriv = "R1 !read_ok",
7984 .errstr = "R1 !read_ok",
7986 .result_unpriv = REJECT,
7989 "map element value with unaligned store",
7991 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7992 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7993 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7994 BPF_LD_MAP_FD(BPF_REG_1, 0),
7995 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7996 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
7997 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
7998 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
7999 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
8000 BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
8001 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
8002 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
8003 BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
8004 BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
8005 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
8006 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
8007 BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
8008 BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
8009 BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
8010 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
8011 BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
8012 BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
8013 BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
8016 .fixup_map_hash_48b = { 3 },
8017 .errstr_unpriv = "R0 leaks addr",
8019 .result_unpriv = REJECT,
8020 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8023 "map element value with unaligned load",
8025 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8026 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8027 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
8028 BPF_LD_MAP_FD(BPF_REG_1, 0),
8029 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
8030 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
8031 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
8032 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
8033 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
8034 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
8035 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
8036 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
8037 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
8038 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
8039 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
8040 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
8041 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
8044 .fixup_map_hash_48b = { 3 },
8045 .errstr_unpriv = "R0 leaks addr",
8047 .result_unpriv = REJECT,
8048 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8051 "map element value illegal alu op, 1",
8053 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8054 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8055 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
8056 BPF_LD_MAP_FD(BPF_REG_1, 0),
8057 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
8058 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
8059 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
8060 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
8063 .fixup_map_hash_48b = { 3 },
8064 .errstr = "R0 bitwise operator &= on pointer",
8068 "map element value illegal alu op, 2",
8070 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8071 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8072 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
8073 BPF_LD_MAP_FD(BPF_REG_1, 0),
8074 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
8075 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
8076 BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
8077 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
8080 .fixup_map_hash_48b = { 3 },
8081 .errstr = "R0 32-bit pointer arithmetic prohibited",
8085 "map element value illegal alu op, 3",
8087 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8088 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8089 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
8090 BPF_LD_MAP_FD(BPF_REG_1, 0),
8091 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
8092 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
8093 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
8094 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
8097 .fixup_map_hash_48b = { 3 },
8098 .errstr = "R0 pointer arithmetic with /= operator",
8102 "map element value illegal alu op, 4",
8104 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8105 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8106 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
8107 BPF_LD_MAP_FD(BPF_REG_1, 0),
8108 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
8109 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
8110 BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
8111 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
8114 .fixup_map_hash_48b = { 3 },
8115 .errstr_unpriv = "R0 pointer arithmetic prohibited",
8116 .errstr = "invalid mem access 'inv'",
8118 .result_unpriv = REJECT,
8119 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8122 "map element value illegal alu op, 5",
8124 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8125 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8126 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
8127 BPF_LD_MAP_FD(BPF_REG_1, 0),
8128 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
8129 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
8130 BPF_MOV64_IMM(BPF_REG_3, 4096),
8131 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8132 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8133 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
8134 BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
8135 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
8136 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
8139 .fixup_map_hash_48b = { 3 },
8140 .errstr = "R0 invalid mem access 'inv'",
8142 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8145 "map element value is preserved across register spilling",
8147 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8148 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8149 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
8150 BPF_LD_MAP_FD(BPF_REG_1, 0),
8151 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
8152 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
8153 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
8154 offsetof(struct test_val, foo)),
8155 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
8156 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8157 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
8158 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
8159 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
8160 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
8163 .fixup_map_hash_48b = { 3 },
8164 .errstr_unpriv = "R0 leaks addr",
8166 .result_unpriv = REJECT,
8167 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8170 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
8172 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8173 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
8174 BPF_MOV64_IMM(BPF_REG_0, 0),
8175 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
8176 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
8177 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
8178 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
8179 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
8180 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
8181 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
8182 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
8183 BPF_MOV64_IMM(BPF_REG_2, 16),
8184 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
8185 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
8186 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
8187 BPF_MOV64_IMM(BPF_REG_4, 0),
8188 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
8189 BPF_MOV64_IMM(BPF_REG_3, 0),
8190 BPF_EMIT_CALL(BPF_FUNC_probe_read),
8191 BPF_MOV64_IMM(BPF_REG_0, 0),
8195 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8198 "helper access to variable memory: stack, bitwise AND, zero included",
8200 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8201 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
8202 BPF_MOV64_IMM(BPF_REG_2, 16),
8203 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
8204 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
8205 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
8206 BPF_MOV64_IMM(BPF_REG_3, 0),
8207 BPF_EMIT_CALL(BPF_FUNC_probe_read),
8210 .errstr = "invalid indirect read from stack off -64+0 size 64",
8212 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8215 "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
8217 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8218 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
8219 BPF_MOV64_IMM(BPF_REG_2, 16),
8220 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
8221 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
8222 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
8223 BPF_MOV64_IMM(BPF_REG_4, 0),
8224 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
8225 BPF_MOV64_IMM(BPF_REG_3, 0),
8226 BPF_EMIT_CALL(BPF_FUNC_probe_read),
8227 BPF_MOV64_IMM(BPF_REG_0, 0),
8230 .errstr = "invalid stack type R1 off=-64 access_size=65",
8232 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8235 "helper access to variable memory: stack, JMP, correct bounds",
8237 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8238 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
8239 BPF_MOV64_IMM(BPF_REG_0, 0),
8240 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
8241 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
8242 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
8243 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
8244 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
8245 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
8246 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
8247 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
8248 BPF_MOV64_IMM(BPF_REG_2, 16),
8249 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
8250 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
8251 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
8252 BPF_MOV64_IMM(BPF_REG_4, 0),
8253 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
8254 BPF_MOV64_IMM(BPF_REG_3, 0),
8255 BPF_EMIT_CALL(BPF_FUNC_probe_read),
8256 BPF_MOV64_IMM(BPF_REG_0, 0),
8260 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8263 "helper access to variable memory: stack, JMP (signed), correct bounds",
8265 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8266 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
8267 BPF_MOV64_IMM(BPF_REG_0, 0),
8268 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
8269 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
8270 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
8271 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
8272 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
8273 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
8274 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
8275 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
8276 BPF_MOV64_IMM(BPF_REG_2, 16),
8277 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
8278 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
8279 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
8280 BPF_MOV64_IMM(BPF_REG_4, 0),
8281 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
8282 BPF_MOV64_IMM(BPF_REG_3, 0),
8283 BPF_EMIT_CALL(BPF_FUNC_probe_read),
8284 BPF_MOV64_IMM(BPF_REG_0, 0),
8288 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8291 "helper access to variable memory: stack, JMP, bounds + offset",
8293 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8294 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
8295 BPF_MOV64_IMM(BPF_REG_2, 16),
8296 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
8297 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
8298 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
8299 BPF_MOV64_IMM(BPF_REG_4, 0),
8300 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
8301 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
8302 BPF_MOV64_IMM(BPF_REG_3, 0),
8303 BPF_EMIT_CALL(BPF_FUNC_probe_read),
8304 BPF_MOV64_IMM(BPF_REG_0, 0),
8307 .errstr = "invalid stack type R1 off=-64 access_size=65",
8309 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8312 "helper access to variable memory: stack, JMP, wrong max",
8314 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8315 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
8316 BPF_MOV64_IMM(BPF_REG_2, 16),
8317 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
8318 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
8319 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
8320 BPF_MOV64_IMM(BPF_REG_4, 0),
8321 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
8322 BPF_MOV64_IMM(BPF_REG_3, 0),
8323 BPF_EMIT_CALL(BPF_FUNC_probe_read),
8324 BPF_MOV64_IMM(BPF_REG_0, 0),
8327 .errstr = "invalid stack type R1 off=-64 access_size=65",
8329 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8332 "helper access to variable memory: stack, JMP, no max check",
8334 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8335 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
8336 BPF_MOV64_IMM(BPF_REG_2, 16),
8337 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
8338 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
8339 BPF_MOV64_IMM(BPF_REG_4, 0),
8340 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
8341 BPF_MOV64_IMM(BPF_REG_3, 0),
8342 BPF_EMIT_CALL(BPF_FUNC_probe_read),
8343 BPF_MOV64_IMM(BPF_REG_0, 0),
8346 /* because max wasn't checked, signed min is negative */
8347 .errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
8349 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8352 "helper access to variable memory: stack, JMP, no min check",
8354 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8355 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
8356 BPF_MOV64_IMM(BPF_REG_2, 16),
8357 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
8358 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
8359 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
8360 BPF_MOV64_IMM(BPF_REG_3, 0),
8361 BPF_EMIT_CALL(BPF_FUNC_probe_read),
8362 BPF_MOV64_IMM(BPF_REG_0, 0),
8365 .errstr = "invalid indirect read from stack off -64+0 size 64",
8367 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8370 "helper access to variable memory: stack, JMP (signed), no min check",
8372 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8373 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
8374 BPF_MOV64_IMM(BPF_REG_2, 16),
8375 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
8376 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
8377 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
8378 BPF_MOV64_IMM(BPF_REG_3, 0),
8379 BPF_EMIT_CALL(BPF_FUNC_probe_read),
8380 BPF_MOV64_IMM(BPF_REG_0, 0),
8383 .errstr = "R2 min value is negative",
8385 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8388 "helper access to variable memory: map, JMP, correct bounds",
8390 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8391 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8392 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
8393 BPF_LD_MAP_FD(BPF_REG_1, 0),
8394 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
8395 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
8396 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
8397 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
8398 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
8399 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
8400 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
8401 sizeof(struct test_val), 4),
8402 BPF_MOV64_IMM(BPF_REG_4, 0),
8403 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
8404 BPF_MOV64_IMM(BPF_REG_3, 0),
8405 BPF_EMIT_CALL(BPF_FUNC_probe_read),
8406 BPF_MOV64_IMM(BPF_REG_0, 0),
8409 .fixup_map_hash_48b = { 3 },
8411 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8414 "helper access to variable memory: map, JMP, wrong max",
8416 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8417 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8418 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
8419 BPF_LD_MAP_FD(BPF_REG_1, 0),
8420 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
8421 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
8422 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
8423 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
8424 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
8425 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
8426 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
8427 sizeof(struct test_val) + 1, 4),
8428 BPF_MOV64_IMM(BPF_REG_4, 0),
8429 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
8430 BPF_MOV64_IMM(BPF_REG_3, 0),
8431 BPF_EMIT_CALL(BPF_FUNC_probe_read),
8432 BPF_MOV64_IMM(BPF_REG_0, 0),
8435 .fixup_map_hash_48b = { 3 },
8436 .errstr = "invalid access to map value, value_size=48 off=0 size=49",
8438 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8441 "helper access to variable memory: map adjusted, JMP, correct bounds",
8443 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8444 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8445 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
8446 BPF_LD_MAP_FD(BPF_REG_1, 0),
8447 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
8448 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
8449 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
8450 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
8451 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
8452 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
8453 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
8454 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
8455 sizeof(struct test_val) - 20, 4),
8456 BPF_MOV64_IMM(BPF_REG_4, 0),
8457 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
8458 BPF_MOV64_IMM(BPF_REG_3, 0),
8459 BPF_EMIT_CALL(BPF_FUNC_probe_read),
8460 BPF_MOV64_IMM(BPF_REG_0, 0),
8463 .fixup_map_hash_48b = { 3 },
8465 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8468 "helper access to variable memory: map adjusted, JMP, wrong max",
8470 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8471 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8472 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
8473 BPF_LD_MAP_FD(BPF_REG_1, 0),
8474 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
8475 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
8476 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
8477 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
8478 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
8479 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
8480 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
8481 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
8482 sizeof(struct test_val) - 19, 4),
8483 BPF_MOV64_IMM(BPF_REG_4, 0),
8484 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
8485 BPF_MOV64_IMM(BPF_REG_3, 0),
8486 BPF_EMIT_CALL(BPF_FUNC_probe_read),
8487 BPF_MOV64_IMM(BPF_REG_0, 0),
8490 .fixup_map_hash_48b = { 3 },
8491 .errstr = "R1 min value is outside of the array range",
8493 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8496 "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
8498 BPF_MOV64_IMM(BPF_REG_1, 0),
8499 BPF_MOV64_IMM(BPF_REG_2, 0),
8500 BPF_MOV64_IMM(BPF_REG_3, 0),
8501 BPF_MOV64_IMM(BPF_REG_4, 0),
8502 BPF_MOV64_IMM(BPF_REG_5, 0),
8503 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
8507 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8510 "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
8512 BPF_MOV64_IMM(BPF_REG_1, 0),
8513 BPF_MOV64_IMM(BPF_REG_2, 1),
8514 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
8515 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
8516 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
8517 BPF_MOV64_IMM(BPF_REG_3, 0),
8518 BPF_MOV64_IMM(BPF_REG_4, 0),
8519 BPF_MOV64_IMM(BPF_REG_5, 0),
8520 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
8523 .errstr = "R1 type=inv expected=fp",
8525 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8528 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
8530 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8531 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
8532 BPF_MOV64_IMM(BPF_REG_2, 0),
8533 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
8534 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
8535 BPF_MOV64_IMM(BPF_REG_3, 0),
8536 BPF_MOV64_IMM(BPF_REG_4, 0),
8537 BPF_MOV64_IMM(BPF_REG_5, 0),
8538 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
8542 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8545 "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
8547 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8548 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8549 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8550 BPF_LD_MAP_FD(BPF_REG_1, 0),
8551 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8552 BPF_FUNC_map_lookup_elem),
8553 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
8554 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
8555 BPF_MOV64_IMM(BPF_REG_2, 0),
8556 BPF_MOV64_IMM(BPF_REG_3, 0),
8557 BPF_MOV64_IMM(BPF_REG_4, 0),
8558 BPF_MOV64_IMM(BPF_REG_5, 0),
8559 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
8562 .fixup_map_hash_8b = { 3 },
8564 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8567 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
8569 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8570 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8571 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8572 BPF_LD_MAP_FD(BPF_REG_1, 0),
8573 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8574 BPF_FUNC_map_lookup_elem),
8575 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8576 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
8577 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7),
8578 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8579 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
8580 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
8581 BPF_MOV64_IMM(BPF_REG_3, 0),
8582 BPF_MOV64_IMM(BPF_REG_4, 0),
8583 BPF_MOV64_IMM(BPF_REG_5, 0),
8584 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
8587 .fixup_map_hash_8b = { 3 },
8589 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8592 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
8594 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8595 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8596 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8597 BPF_LD_MAP_FD(BPF_REG_1, 0),
8598 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8599 BPF_FUNC_map_lookup_elem),
8600 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
8601 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
8602 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
8603 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
8604 BPF_MOV64_IMM(BPF_REG_3, 0),
8605 BPF_MOV64_IMM(BPF_REG_4, 0),
8606 BPF_MOV64_IMM(BPF_REG_5, 0),
8607 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
8610 .fixup_map_hash_8b = { 3 },
8612 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8615 "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
8617 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
8618 offsetof(struct __sk_buff, data)),
8619 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8620 offsetof(struct __sk_buff, data_end)),
8621 BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
8622 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8623 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
8624 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
8625 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
8626 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
8627 BPF_MOV64_IMM(BPF_REG_3, 0),
8628 BPF_MOV64_IMM(BPF_REG_4, 0),
8629 BPF_MOV64_IMM(BPF_REG_5, 0),
8630 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
8634 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8635 .retval = 0 /* csum_diff of 64-byte packet */,
8636 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8639 "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
8641 BPF_MOV64_IMM(BPF_REG_1, 0),
8642 BPF_MOV64_IMM(BPF_REG_2, 0),
8643 BPF_MOV64_IMM(BPF_REG_3, 0),
8644 BPF_EMIT_CALL(BPF_FUNC_probe_read),
8647 .errstr = "R1 type=inv expected=fp",
8649 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8652 "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
8654 BPF_MOV64_IMM(BPF_REG_1, 0),
8655 BPF_MOV64_IMM(BPF_REG_2, 1),
8656 BPF_MOV64_IMM(BPF_REG_3, 0),
8657 BPF_EMIT_CALL(BPF_FUNC_probe_read),
8660 .errstr = "R1 type=inv expected=fp",
8662 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8665 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
8667 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8668 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
8669 BPF_MOV64_IMM(BPF_REG_2, 0),
8670 BPF_MOV64_IMM(BPF_REG_3, 0),
8671 BPF_EMIT_CALL(BPF_FUNC_probe_read),
8675 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8678 "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
8680 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8681 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8682 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8683 BPF_LD_MAP_FD(BPF_REG_1, 0),
8684 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
8685 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8686 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
8687 BPF_MOV64_IMM(BPF_REG_2, 0),
8688 BPF_MOV64_IMM(BPF_REG_3, 0),
8689 BPF_EMIT_CALL(BPF_FUNC_probe_read),
8692 .fixup_map_hash_8b = { 3 },
8694 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8697 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
8699 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8700 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8701 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8702 BPF_LD_MAP_FD(BPF_REG_1, 0),
8703 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
8704 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
8705 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
8706 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
8707 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8708 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
8709 BPF_MOV64_IMM(BPF_REG_3, 0),
8710 BPF_EMIT_CALL(BPF_FUNC_probe_read),
8713 .fixup_map_hash_8b = { 3 },
8715 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8718 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
8720 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8721 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8722 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8723 BPF_LD_MAP_FD(BPF_REG_1, 0),
8724 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
8725 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
8726 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
8727 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
8728 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
8729 BPF_MOV64_IMM(BPF_REG_3, 0),
8730 BPF_EMIT_CALL(BPF_FUNC_probe_read),
8733 .fixup_map_hash_8b = { 3 },
8735 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8738 "helper access to variable memory: 8 bytes leak",
8740 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8741 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
8742 BPF_MOV64_IMM(BPF_REG_0, 0),
8743 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
8744 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
8745 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
8746 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
8747 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
8748 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
8749 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
8750 BPF_MOV64_IMM(BPF_REG_2, 1),
8751 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
8752 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
8753 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
8754 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
8755 BPF_MOV64_IMM(BPF_REG_3, 0),
8756 BPF_EMIT_CALL(BPF_FUNC_probe_read),
8757 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8760 .errstr = "invalid indirect read from stack off -64+32 size 64",
8762 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8765 "helper access to variable memory: 8 bytes no leak (init memory)",
8767 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8768 BPF_MOV64_IMM(BPF_REG_0, 0),
8769 BPF_MOV64_IMM(BPF_REG_0, 0),
8770 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
8771 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
8772 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
8773 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
8774 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
8775 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
8776 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
8777 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
8778 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
8779 BPF_MOV64_IMM(BPF_REG_2, 0),
8780 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
8781 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
8782 BPF_MOV64_IMM(BPF_REG_3, 0),
8783 BPF_EMIT_CALL(BPF_FUNC_probe_read),
8784 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8788 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8791 "invalid and of negative number",
8793 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8794 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8795 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8796 BPF_LD_MAP_FD(BPF_REG_1, 0),
8797 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8798 BPF_FUNC_map_lookup_elem),
8799 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8800 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8801 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
8802 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
8803 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8804 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
8805 offsetof(struct test_val, foo)),
8808 .fixup_map_hash_48b = { 3 },
8809 .errstr = "R0 max value is outside of the array range",
8811 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8814 "invalid range check",
8816 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8817 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8818 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8819 BPF_LD_MAP_FD(BPF_REG_1, 0),
8820 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8821 BPF_FUNC_map_lookup_elem),
8822 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
8823 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
8824 BPF_MOV64_IMM(BPF_REG_9, 1),
8825 BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
8826 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
8827 BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
8828 BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
8829 BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
8830 BPF_MOV32_IMM(BPF_REG_3, 1),
8831 BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
8832 BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
8833 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
8834 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
8835 BPF_MOV64_REG(BPF_REG_0, 0),
8838 .fixup_map_hash_48b = { 3 },
8839 .errstr = "R0 max value is outside of the array range",
8841 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8844 "map in map access",
8846 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
8847 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8848 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
8849 BPF_LD_MAP_FD(BPF_REG_1, 0),
8850 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8851 BPF_FUNC_map_lookup_elem),
8852 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
8853 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
8854 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8855 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
8856 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
8857 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8858 BPF_FUNC_map_lookup_elem),
8859 BPF_MOV64_IMM(BPF_REG_0, 0),
8862 .fixup_map_in_map = { 3 },
8866 "invalid inner map pointer",
8868 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
8869 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8870 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
8871 BPF_LD_MAP_FD(BPF_REG_1, 0),
8872 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8873 BPF_FUNC_map_lookup_elem),
8874 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
8875 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
8876 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8877 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
8878 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
8879 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8880 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8881 BPF_FUNC_map_lookup_elem),
8882 BPF_MOV64_IMM(BPF_REG_0, 0),
8885 .fixup_map_in_map = { 3 },
8886 .errstr = "R1 pointer arithmetic on map_ptr prohibited",
8890 "forgot null checking on the inner map pointer",
8892 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
8893 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8894 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
8895 BPF_LD_MAP_FD(BPF_REG_1, 0),
8896 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8897 BPF_FUNC_map_lookup_elem),
8898 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
8899 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8900 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
8901 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
8902 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8903 BPF_FUNC_map_lookup_elem),
8904 BPF_MOV64_IMM(BPF_REG_0, 0),
8907 .fixup_map_in_map = { 3 },
8908 .errstr = "R1 type=map_value_or_null expected=map_ptr",
8912 "ld_abs: check calling conv, r1",
8914 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8915 BPF_MOV64_IMM(BPF_REG_1, 0),
8916 BPF_LD_ABS(BPF_W, -0x200000),
8917 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
8920 .errstr = "R1 !read_ok",
8924 "ld_abs: check calling conv, r2",
8926 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8927 BPF_MOV64_IMM(BPF_REG_2, 0),
8928 BPF_LD_ABS(BPF_W, -0x200000),
8929 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
8932 .errstr = "R2 !read_ok",
8936 "ld_abs: check calling conv, r3",
8938 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8939 BPF_MOV64_IMM(BPF_REG_3, 0),
8940 BPF_LD_ABS(BPF_W, -0x200000),
8941 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
8944 .errstr = "R3 !read_ok",
8948 "ld_abs: check calling conv, r4",
8950 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8951 BPF_MOV64_IMM(BPF_REG_4, 0),
8952 BPF_LD_ABS(BPF_W, -0x200000),
8953 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
8956 .errstr = "R4 !read_ok",
8960 "ld_abs: check calling conv, r5",
8962 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8963 BPF_MOV64_IMM(BPF_REG_5, 0),
8964 BPF_LD_ABS(BPF_W, -0x200000),
8965 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
8968 .errstr = "R5 !read_ok",
8972 "ld_abs: check calling conv, r7",
8974 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8975 BPF_MOV64_IMM(BPF_REG_7, 0),
8976 BPF_LD_ABS(BPF_W, -0x200000),
8977 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
8983 "ld_abs: tests on r6 and skb data reload helper",
8985 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8986 BPF_LD_ABS(BPF_B, 0),
8987 BPF_LD_ABS(BPF_H, 0),
8988 BPF_LD_ABS(BPF_W, 0),
8989 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
8990 BPF_MOV64_IMM(BPF_REG_6, 0),
8991 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
8992 BPF_MOV64_IMM(BPF_REG_2, 1),
8993 BPF_MOV64_IMM(BPF_REG_3, 2),
8994 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8995 BPF_FUNC_skb_vlan_push),
8996 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
8997 BPF_LD_ABS(BPF_B, 0),
8998 BPF_LD_ABS(BPF_H, 0),
8999 BPF_LD_ABS(BPF_W, 0),
9000 BPF_MOV64_IMM(BPF_REG_0, 42),
9003 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9005 .retval = 42 /* ultimate return value */,
9008 "ld_ind: check calling conv, r1",
9010 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9011 BPF_MOV64_IMM(BPF_REG_1, 1),
9012 BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
9013 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
9016 .errstr = "R1 !read_ok",
9020 "ld_ind: check calling conv, r2",
9022 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9023 BPF_MOV64_IMM(BPF_REG_2, 1),
9024 BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
9025 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9028 .errstr = "R2 !read_ok",
9032 "ld_ind: check calling conv, r3",
9034 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9035 BPF_MOV64_IMM(BPF_REG_3, 1),
9036 BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
9037 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
9040 .errstr = "R3 !read_ok",
9044 "ld_ind: check calling conv, r4",
9046 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9047 BPF_MOV64_IMM(BPF_REG_4, 1),
9048 BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
9049 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
9052 .errstr = "R4 !read_ok",
9056 "ld_ind: check calling conv, r5",
9058 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9059 BPF_MOV64_IMM(BPF_REG_5, 1),
9060 BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
9061 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
9064 .errstr = "R5 !read_ok",
9068 "ld_ind: check calling conv, r7",
9070 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9071 BPF_MOV64_IMM(BPF_REG_7, 1),
9072 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
9073 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
9080 "check bpf_perf_event_data->sample_period byte load permitted",
9082 BPF_MOV64_IMM(BPF_REG_0, 0),
9083 #if __BYTE_ORDER == __LITTLE_ENDIAN
9084 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
9085 offsetof(struct bpf_perf_event_data, sample_period)),
9087 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
9088 offsetof(struct bpf_perf_event_data, sample_period) + 7),
9093 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
9096 "check bpf_perf_event_data->sample_period half load permitted",
9098 BPF_MOV64_IMM(BPF_REG_0, 0),
9099 #if __BYTE_ORDER == __LITTLE_ENDIAN
9100 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
9101 offsetof(struct bpf_perf_event_data, sample_period)),
9103 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
9104 offsetof(struct bpf_perf_event_data, sample_period) + 6),
9109 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
9112 "check bpf_perf_event_data->sample_period word load permitted",
9114 BPF_MOV64_IMM(BPF_REG_0, 0),
9115 #if __BYTE_ORDER == __LITTLE_ENDIAN
9116 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9117 offsetof(struct bpf_perf_event_data, sample_period)),
9119 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9120 offsetof(struct bpf_perf_event_data, sample_period) + 4),
9125 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
9128 "check bpf_perf_event_data->sample_period dword load permitted",
9130 BPF_MOV64_IMM(BPF_REG_0, 0),
9131 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
9132 offsetof(struct bpf_perf_event_data, sample_period)),
9136 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
9139 "check skb->data half load not permitted",
9141 BPF_MOV64_IMM(BPF_REG_0, 0),
9142 #if __BYTE_ORDER == __LITTLE_ENDIAN
9143 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
9144 offsetof(struct __sk_buff, data)),
9146 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
9147 offsetof(struct __sk_buff, data) + 2),
9152 .errstr = "invalid bpf_context access",
9155 "check skb->tc_classid half load not permitted for lwt prog",
9157 BPF_MOV64_IMM(BPF_REG_0, 0),
9158 #if __BYTE_ORDER == __LITTLE_ENDIAN
9159 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
9160 offsetof(struct __sk_buff, tc_classid)),
9162 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
9163 offsetof(struct __sk_buff, tc_classid) + 2),
9168 .errstr = "invalid bpf_context access",
9169 .prog_type = BPF_PROG_TYPE_LWT_IN,
9172 "bounds checks mixing signed and unsigned, positive bounds",
9174 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9175 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9176 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9177 BPF_LD_MAP_FD(BPF_REG_1, 0),
9178 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9179 BPF_FUNC_map_lookup_elem),
9180 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
9181 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
9182 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
9183 BPF_MOV64_IMM(BPF_REG_2, 2),
9184 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
9185 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
9186 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9187 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
9188 BPF_MOV64_IMM(BPF_REG_0, 0),
9191 .fixup_map_hash_8b = { 3 },
9192 .errstr = "unbounded min value",
9193 .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
9197 "bounds checks mixing signed and unsigned",
9199 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9200 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9201 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9202 BPF_LD_MAP_FD(BPF_REG_1, 0),
9203 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9204 BPF_FUNC_map_lookup_elem),
9205 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
9206 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
9207 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
9208 BPF_MOV64_IMM(BPF_REG_2, -1),
9209 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
9210 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
9211 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9212 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
9213 BPF_MOV64_IMM(BPF_REG_0, 0),
9216 .fixup_map_hash_8b = { 3 },
9217 .errstr = "unbounded min value",
9218 .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
9222 "bounds checks mixing signed and unsigned, variant 2",
9224 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9225 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9226 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9227 BPF_LD_MAP_FD(BPF_REG_1, 0),
9228 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9229 BPF_FUNC_map_lookup_elem),
9230 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
9231 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
9232 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
9233 BPF_MOV64_IMM(BPF_REG_2, -1),
9234 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
9235 BPF_MOV64_IMM(BPF_REG_8, 0),
9236 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
9237 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
9238 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
9239 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
9240 BPF_MOV64_IMM(BPF_REG_0, 0),
9243 .fixup_map_hash_8b = { 3 },
9244 .errstr = "unbounded min value",
9245 .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds",
9249 "bounds checks mixing signed and unsigned, variant 3",
9251 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9252 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9253 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9254 BPF_LD_MAP_FD(BPF_REG_1, 0),
9255 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9256 BPF_FUNC_map_lookup_elem),
9257 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
9258 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
9259 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
9260 BPF_MOV64_IMM(BPF_REG_2, -1),
9261 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
9262 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
9263 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
9264 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
9265 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
9266 BPF_MOV64_IMM(BPF_REG_0, 0),
9269 .fixup_map_hash_8b = { 3 },
9270 .errstr = "unbounded min value",
9271 .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds",
9275 "bounds checks mixing signed and unsigned, variant 4",
9277 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9278 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9279 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9280 BPF_LD_MAP_FD(BPF_REG_1, 0),
9281 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9282 BPF_FUNC_map_lookup_elem),
9283 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
9284 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
9285 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
9286 BPF_MOV64_IMM(BPF_REG_2, 1),
9287 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
9288 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
9289 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9290 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
9291 BPF_MOV64_IMM(BPF_REG_0, 0),
9294 .fixup_map_hash_8b = { 3 },
9298 "bounds checks mixing signed and unsigned, variant 5",
9300 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9301 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9302 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9303 BPF_LD_MAP_FD(BPF_REG_1, 0),
9304 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9305 BPF_FUNC_map_lookup_elem),
9306 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
9307 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
9308 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
9309 BPF_MOV64_IMM(BPF_REG_2, -1),
9310 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
9311 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
9312 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
9313 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9314 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
9315 BPF_MOV64_IMM(BPF_REG_0, 0),
9318 .fixup_map_hash_8b = { 3 },
9319 .errstr = "unbounded min value",
9320 .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
9324 "bounds checks mixing signed and unsigned, variant 6",
9326 BPF_MOV64_IMM(BPF_REG_2, 0),
9327 BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
9328 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
9329 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
9330 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
9331 BPF_MOV64_IMM(BPF_REG_6, -1),
9332 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
9333 BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
9334 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
9335 BPF_MOV64_IMM(BPF_REG_5, 0),
9336 BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
9337 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9338 BPF_FUNC_skb_load_bytes),
9339 BPF_MOV64_IMM(BPF_REG_0, 0),
9342 .errstr = "R4 min value is negative, either use unsigned",
9346 "bounds checks mixing signed and unsigned, variant 7",
9348 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9349 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9350 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9351 BPF_LD_MAP_FD(BPF_REG_1, 0),
9352 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9353 BPF_FUNC_map_lookup_elem),
9354 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
9355 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
9356 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
9357 BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
9358 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
9359 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
9360 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9361 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
9362 BPF_MOV64_IMM(BPF_REG_0, 0),
9365 .fixup_map_hash_8b = { 3 },
9369 "bounds checks mixing signed and unsigned, variant 8",
9371 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9372 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9373 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9374 BPF_LD_MAP_FD(BPF_REG_1, 0),
9375 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9376 BPF_FUNC_map_lookup_elem),
9377 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
9378 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
9379 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
9380 BPF_MOV64_IMM(BPF_REG_2, -1),
9381 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
9382 BPF_MOV64_IMM(BPF_REG_0, 0),
9384 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
9385 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9386 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
9387 BPF_MOV64_IMM(BPF_REG_0, 0),
9390 .fixup_map_hash_8b = { 3 },
9391 .errstr = "unbounded min value",
9392 .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
9396 "bounds checks mixing signed and unsigned, variant 9",
9398 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9399 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9400 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9401 BPF_LD_MAP_FD(BPF_REG_1, 0),
9402 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9403 BPF_FUNC_map_lookup_elem),
9404 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
9405 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
9406 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
9407 BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
9408 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
9409 BPF_MOV64_IMM(BPF_REG_0, 0),
9411 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
9412 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9413 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
9414 BPF_MOV64_IMM(BPF_REG_0, 0),
9417 .fixup_map_hash_8b = { 3 },
9421 "bounds checks mixing signed and unsigned, variant 10",
9423 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9424 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9425 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9426 BPF_LD_MAP_FD(BPF_REG_1, 0),
9427 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9428 BPF_FUNC_map_lookup_elem),
9429 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
9430 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
9431 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
9432 BPF_MOV64_IMM(BPF_REG_2, 0),
9433 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
9434 BPF_MOV64_IMM(BPF_REG_0, 0),
9436 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
9437 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9438 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
9439 BPF_MOV64_IMM(BPF_REG_0, 0),
9442 .fixup_map_hash_8b = { 3 },
9443 .errstr = "unbounded min value",
9444 .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
9448 "bounds checks mixing signed and unsigned, variant 11",
9450 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9451 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9452 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9453 BPF_LD_MAP_FD(BPF_REG_1, 0),
9454 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9455 BPF_FUNC_map_lookup_elem),
9456 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
9457 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
9458 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
9459 BPF_MOV64_IMM(BPF_REG_2, -1),
9460 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
9462 BPF_MOV64_IMM(BPF_REG_0, 0),
9464 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
9465 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9466 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
9467 BPF_MOV64_IMM(BPF_REG_0, 0),
9470 .fixup_map_hash_8b = { 3 },
9471 .errstr = "unbounded min value",
9472 .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
9476 "bounds checks mixing signed and unsigned, variant 12",
9478 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9479 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9480 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9481 BPF_LD_MAP_FD(BPF_REG_1, 0),
9482 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9483 BPF_FUNC_map_lookup_elem),
9484 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
9485 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
9486 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
9487 BPF_MOV64_IMM(BPF_REG_2, -6),
9488 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
9489 BPF_MOV64_IMM(BPF_REG_0, 0),
9491 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
9492 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9493 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
9494 BPF_MOV64_IMM(BPF_REG_0, 0),
9497 .fixup_map_hash_8b = { 3 },
9498 .errstr = "unbounded min value",
9499 .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
9503 "bounds checks mixing signed and unsigned, variant 13",
9505 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9506 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9507 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9508 BPF_LD_MAP_FD(BPF_REG_1, 0),
9509 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9510 BPF_FUNC_map_lookup_elem),
9511 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
9512 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
9513 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
9514 BPF_MOV64_IMM(BPF_REG_2, 2),
9515 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
9516 BPF_MOV64_IMM(BPF_REG_7, 1),
9517 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
9518 BPF_MOV64_IMM(BPF_REG_0, 0),
9520 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
9521 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
9522 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
9523 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
9524 BPF_MOV64_IMM(BPF_REG_0, 0),
9527 .fixup_map_hash_8b = { 3 },
9528 .errstr = "unbounded min value",
9529 .errstr_unpriv = "R7 has unknown scalar with mixed signed bounds",
9533 "bounds checks mixing signed and unsigned, variant 14",
9535 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
9536 offsetof(struct __sk_buff, mark)),
9537 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9538 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9539 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9540 BPF_LD_MAP_FD(BPF_REG_1, 0),
9541 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9542 BPF_FUNC_map_lookup_elem),
9543 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
9544 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
9545 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
9546 BPF_MOV64_IMM(BPF_REG_2, -1),
9547 BPF_MOV64_IMM(BPF_REG_8, 2),
9548 BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
9549 BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
9550 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
9551 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9552 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
9553 BPF_MOV64_IMM(BPF_REG_0, 0),
9555 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
9556 BPF_JMP_IMM(BPF_JA, 0, 0, -7),
9558 .fixup_map_hash_8b = { 4 },
9559 .errstr = "unbounded min value",
9560 .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
9564 "bounds checks mixing signed and unsigned, variant 15",
9566 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9567 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9568 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9569 BPF_LD_MAP_FD(BPF_REG_1, 0),
9570 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9571 BPF_FUNC_map_lookup_elem),
9572 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
9573 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
9574 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
9575 BPF_MOV64_IMM(BPF_REG_2, -6),
9576 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
9577 BPF_MOV64_IMM(BPF_REG_0, 0),
9579 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9580 BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
9581 BPF_MOV64_IMM(BPF_REG_0, 0),
9583 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
9584 BPF_MOV64_IMM(BPF_REG_0, 0),
9587 .fixup_map_hash_8b = { 3 },
9588 .errstr = "unbounded min value",
9589 .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
9591 .result_unpriv = REJECT,
9594 "subtraction bounds (map value) variant 1",
9596 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9597 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9598 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9599 BPF_LD_MAP_FD(BPF_REG_1, 0),
9600 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9601 BPF_FUNC_map_lookup_elem),
9602 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
9603 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9604 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
9605 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
9606 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
9607 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
9608 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
9609 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9610 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9612 BPF_MOV64_IMM(BPF_REG_0, 0),
9615 .fixup_map_hash_8b = { 3 },
9616 .errstr = "R0 max value is outside of the array range",
9620 "subtraction bounds (map value) variant 2",
9622 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9623 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9624 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9625 BPF_LD_MAP_FD(BPF_REG_1, 0),
9626 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9627 BPF_FUNC_map_lookup_elem),
9628 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
9629 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9630 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
9631 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
9632 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
9633 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
9634 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9635 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9637 BPF_MOV64_IMM(BPF_REG_0, 0),
9640 .fixup_map_hash_8b = { 3 },
9641 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
9642 .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
9646 "check subtraction on pointers for unpriv",
9648 BPF_MOV64_IMM(BPF_REG_0, 0),
9649 BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
9650 BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
9651 BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8),
9652 BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 9),
9653 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9654 BPF_FUNC_map_lookup_elem),
9655 BPF_MOV64_REG(BPF_REG_9, BPF_REG_FP),
9656 BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_0),
9657 BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
9658 BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
9659 BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8),
9660 BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 0),
9661 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9662 BPF_FUNC_map_lookup_elem),
9663 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9665 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
9666 BPF_MOV64_IMM(BPF_REG_0, 0),
9669 .fixup_map_hash_8b = { 1, 9 },
9671 .result_unpriv = REJECT,
9672 .errstr_unpriv = "R9 pointer -= pointer prohibited",
9675 "bounds check based on zero-extended MOV",
9677 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9678 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9679 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9680 BPF_LD_MAP_FD(BPF_REG_1, 0),
9681 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9682 BPF_FUNC_map_lookup_elem),
9683 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
9684 /* r2 = 0x0000'0000'ffff'ffff */
9685 BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
9687 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
9689 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
9690 /* access at offset 0 */
9691 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9693 BPF_MOV64_IMM(BPF_REG_0, 0),
9696 .fixup_map_hash_8b = { 3 },
9700 "bounds check based on sign-extended MOV. test1",
9702 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9703 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9704 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9705 BPF_LD_MAP_FD(BPF_REG_1, 0),
9706 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9707 BPF_FUNC_map_lookup_elem),
9708 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
9709 /* r2 = 0xffff'ffff'ffff'ffff */
9710 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
9711 /* r2 = 0xffff'ffff */
9712 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
9713 /* r0 = <oob pointer> */
9714 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
9715 /* access to OOB pointer */
9716 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9718 BPF_MOV64_IMM(BPF_REG_0, 0),
9721 .fixup_map_hash_8b = { 3 },
9722 .errstr = "map_value pointer and 4294967295",
9726 "bounds check based on sign-extended MOV. test2",
9728 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9729 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9730 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9731 BPF_LD_MAP_FD(BPF_REG_1, 0),
9732 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9733 BPF_FUNC_map_lookup_elem),
9734 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
9735 /* r2 = 0xffff'ffff'ffff'ffff */
9736 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
9737 /* r2 = 0xfff'ffff */
9738 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
9739 /* r0 = <oob pointer> */
9740 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
9741 /* access to OOB pointer */
9742 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9744 BPF_MOV64_IMM(BPF_REG_0, 0),
9747 .fixup_map_hash_8b = { 3 },
9748 .errstr = "R0 min value is outside of the array range",
9752 "bounds check based on reg_off + var_off + insn_off. test1",
9754 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
9755 offsetof(struct __sk_buff, mark)),
9756 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9757 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9758 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9759 BPF_LD_MAP_FD(BPF_REG_1, 0),
9760 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9761 BPF_FUNC_map_lookup_elem),
9762 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
9763 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
9764 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
9765 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
9766 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
9767 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
9768 BPF_MOV64_IMM(BPF_REG_0, 0),
9771 .fixup_map_hash_8b = { 4 },
9772 .errstr = "value_size=8 off=1073741825",
9774 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9777 "bounds check based on reg_off + var_off + insn_off. test2",
9779 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
9780 offsetof(struct __sk_buff, mark)),
9781 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9782 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9783 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9784 BPF_LD_MAP_FD(BPF_REG_1, 0),
9785 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9786 BPF_FUNC_map_lookup_elem),
9787 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
9788 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
9789 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
9790 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
9791 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
9792 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
9793 BPF_MOV64_IMM(BPF_REG_0, 0),
9796 .fixup_map_hash_8b = { 4 },
9797 .errstr = "value 1073741823",
9799 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9802 "bounds check after truncation of non-boundary-crossing range",
9804 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9805 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9806 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9807 BPF_LD_MAP_FD(BPF_REG_1, 0),
9808 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9809 BPF_FUNC_map_lookup_elem),
9810 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
9811 /* r1 = [0x00, 0xff] */
9812 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9813 BPF_MOV64_IMM(BPF_REG_2, 1),
9814 /* r2 = 0x10'0000'0000 */
9815 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
9816 /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
9817 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
9818 /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
9819 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
9820 /* r1 = [0x00, 0xff] */
9821 BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
9823 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
9825 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9826 /* access at offset 0 */
9827 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9829 BPF_MOV64_IMM(BPF_REG_0, 0),
9832 .fixup_map_hash_8b = { 3 },
9836 "bounds check after truncation of boundary-crossing range (1)",
9838 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9839 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9840 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9841 BPF_LD_MAP_FD(BPF_REG_1, 0),
9842 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9843 BPF_FUNC_map_lookup_elem),
9844 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
9845 /* r1 = [0x00, 0xff] */
9846 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9847 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
9848 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
9849 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
9850 /* r1 = [0xffff'ff80, 0xffff'ffff] or
9851 * [0x0000'0000, 0x0000'007f]
9853 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
9854 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
9855 /* r1 = [0x00, 0xff] or
9856 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
9858 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
9860 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
9862 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
9863 /* no-op or OOB pointer computation */
9864 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9865 /* potentially OOB access */
9866 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9868 BPF_MOV64_IMM(BPF_REG_0, 0),
9871 .fixup_map_hash_8b = { 3 },
9872 /* not actually fully unbounded, but the bound is very high */
9873 .errstr = "R0 unbounded memory access",
9877 "bounds check after truncation of boundary-crossing range (2)",
9879 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9880 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9881 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9882 BPF_LD_MAP_FD(BPF_REG_1, 0),
9883 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9884 BPF_FUNC_map_lookup_elem),
9885 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
9886 /* r1 = [0x00, 0xff] */
9887 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9888 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
9889 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
9890 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
9891 /* r1 = [0xffff'ff80, 0xffff'ffff] or
9892 * [0x0000'0000, 0x0000'007f]
9893 * difference to previous test: truncation via MOV32
9896 BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
9897 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
9898 /* r1 = [0x00, 0xff] or
9899 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
9901 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
9903 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
9905 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
9906 /* no-op or OOB pointer computation */
9907 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9908 /* potentially OOB access */
9909 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9911 BPF_MOV64_IMM(BPF_REG_0, 0),
9914 .fixup_map_hash_8b = { 3 },
9915 /* not actually fully unbounded, but the bound is very high */
9916 .errstr = "R0 unbounded memory access",
9920 "bounds check after wrapping 32-bit addition",
9922 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9923 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9924 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9925 BPF_LD_MAP_FD(BPF_REG_1, 0),
9926 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9927 BPF_FUNC_map_lookup_elem),
9928 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
9929 /* r1 = 0x7fff'ffff */
9930 BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
9931 /* r1 = 0xffff'fffe */
9932 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
9934 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
9936 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9937 /* access at offset 0 */
9938 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9940 BPF_MOV64_IMM(BPF_REG_0, 0),
9943 .fixup_map_hash_8b = { 3 },
9947 "bounds check after shift with oversized count operand",
9949 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9950 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9951 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9952 BPF_LD_MAP_FD(BPF_REG_1, 0),
9953 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9954 BPF_FUNC_map_lookup_elem),
9955 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
9956 BPF_MOV64_IMM(BPF_REG_2, 32),
9957 BPF_MOV64_IMM(BPF_REG_1, 1),
9958 /* r1 = (u32)1 << (u32)32 = ? */
9959 BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
9960 /* r1 = [0x0000, 0xffff] */
9961 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
9962 /* computes unknown pointer, potentially OOB */
9963 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9964 /* potentially OOB access */
9965 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9967 BPF_MOV64_IMM(BPF_REG_0, 0),
9970 .fixup_map_hash_8b = { 3 },
9971 .errstr = "R0 max value is outside of the array range",
9975 "bounds check after right shift of maybe-negative number",
9977 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9978 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9979 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9980 BPF_LD_MAP_FD(BPF_REG_1, 0),
9981 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9982 BPF_FUNC_map_lookup_elem),
9983 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
9984 /* r1 = [0x00, 0xff] */
9985 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9986 /* r1 = [-0x01, 0xfe] */
9987 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
9988 /* r1 = 0 or 0xff'ffff'ffff'ffff */
9989 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
9990 /* r1 = 0 or 0xffff'ffff'ffff */
9991 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
9992 /* computes unknown pointer, potentially OOB */
9993 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9994 /* potentially OOB access */
9995 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9997 BPF_MOV64_IMM(BPF_REG_0, 0),
10000 .fixup_map_hash_8b = { 3 },
10001 .errstr = "R0 unbounded memory access",
10005 "bounds check after 32-bit right shift with 64-bit input",
10007 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10008 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10009 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10010 BPF_LD_MAP_FD(BPF_REG_1, 0),
10011 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10012 BPF_FUNC_map_lookup_elem),
10013 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
10015 BPF_MOV64_IMM(BPF_REG_1, 2),
10017 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 31),
10018 /* r1 = 0 (NOT 2!) */
10019 BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 31),
10020 /* r1 = 0xffff'fffe (NOT 0!) */
10021 BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 2),
10022 /* computes OOB pointer */
10023 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
10025 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10027 BPF_MOV64_IMM(BPF_REG_0, 0),
10030 .fixup_map_hash_8b = { 3 },
10031 .errstr = "R0 invalid mem access",
10035 "bounds check map access with off+size signed 32bit overflow. test1",
10037 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10038 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10039 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10040 BPF_LD_MAP_FD(BPF_REG_1, 0),
10041 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10042 BPF_FUNC_map_lookup_elem),
10043 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
10045 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
10046 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
10050 .fixup_map_hash_8b = { 3 },
10051 .errstr = "map_value pointer and 2147483646",
10055 "bounds check map access with off+size signed 32bit overflow. test2",
10057 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10058 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10059 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10060 BPF_LD_MAP_FD(BPF_REG_1, 0),
10061 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10062 BPF_FUNC_map_lookup_elem),
10063 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
10065 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
10066 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
10067 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
10068 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
10072 .fixup_map_hash_8b = { 3 },
10073 .errstr = "pointer offset 1073741822",
10074 .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
10078 "bounds check map access with off+size signed 32bit overflow. test3",
10080 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10081 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10082 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10083 BPF_LD_MAP_FD(BPF_REG_1, 0),
10084 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10085 BPF_FUNC_map_lookup_elem),
10086 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
10088 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
10089 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
10090 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
10094 .fixup_map_hash_8b = { 3 },
10095 .errstr = "pointer offset -1073741822",
10096 .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
10100 "bounds check map access with off+size signed 32bit overflow. test4",
10102 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10103 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10104 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10105 BPF_LD_MAP_FD(BPF_REG_1, 0),
10106 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10107 BPF_FUNC_map_lookup_elem),
10108 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
10110 BPF_MOV64_IMM(BPF_REG_1, 1000000),
10111 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
10112 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
10113 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
10117 .fixup_map_hash_8b = { 3 },
10118 .errstr = "map_value pointer and 1000000000000",
10122 "pointer/scalar confusion in state equality check (way 1)",
10124 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10125 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10126 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10127 BPF_LD_MAP_FD(BPF_REG_1, 0),
10128 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10129 BPF_FUNC_map_lookup_elem),
10130 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
10131 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
10133 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
10137 .fixup_map_hash_8b = { 3 },
10139 .retval = POINTER_VALUE,
10140 .result_unpriv = REJECT,
10141 .errstr_unpriv = "R0 leaks addr as return value"
10144 "pointer/scalar confusion in state equality check (way 2)",
10146 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10147 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10148 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10149 BPF_LD_MAP_FD(BPF_REG_1, 0),
10150 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10151 BPF_FUNC_map_lookup_elem),
10152 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10153 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
10155 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
10158 .fixup_map_hash_8b = { 3 },
10160 .retval = POINTER_VALUE,
10161 .result_unpriv = REJECT,
10162 .errstr_unpriv = "R0 leaks addr as return value"
10165 "variable-offset ctx access",
10167 /* Get an unknown value */
10168 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
10169 /* Make it small and 4-byte aligned */
10170 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
10171 /* add it to skb. We now have either &skb->len or
10172 * &skb->pkt_type, but we don't know which
10174 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
10175 /* dereference it */
10176 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10179 .errstr = "variable ctx access var_off=(0x0; 0x4)",
10181 .prog_type = BPF_PROG_TYPE_LWT_IN,
10184 "variable-offset stack access",
10186 /* Fill the top 8 bytes of the stack */
10187 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10188 /* Get an unknown value */
10189 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
10190 /* Make it small and 4-byte aligned */
10191 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
10192 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
10193 /* add it to fp. We now have either fp-4 or fp-8, but
10194 * we don't know which
10196 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
10197 /* dereference it */
10198 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
10201 .errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
10203 .prog_type = BPF_PROG_TYPE_LWT_IN,
10206 "indirect variable-offset stack access",
10208 /* Fill the top 8 bytes of the stack */
10209 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10210 /* Get an unknown value */
10211 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
10212 /* Make it small and 4-byte aligned */
10213 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
10214 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
10215 /* add it to fp. We now have either fp-4 or fp-8, but
10216 * we don't know which
10218 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
10219 /* dereference it indirectly */
10220 BPF_LD_MAP_FD(BPF_REG_1, 0),
10221 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10222 BPF_FUNC_map_lookup_elem),
10223 BPF_MOV64_IMM(BPF_REG_0, 0),
10226 .fixup_map_hash_8b = { 5 },
10227 .errstr = "variable stack read R2",
10229 .prog_type = BPF_PROG_TYPE_LWT_IN,
10232 "direct stack access with 32-bit wraparound. test1",
10234 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10235 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
10236 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
10237 BPF_MOV32_IMM(BPF_REG_0, 0),
10238 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
10241 .errstr = "fp pointer and 2147483647",
10245 "direct stack access with 32-bit wraparound. test2",
10247 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10248 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
10249 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
10250 BPF_MOV32_IMM(BPF_REG_0, 0),
10251 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
10254 .errstr = "fp pointer and 1073741823",
10258 "direct stack access with 32-bit wraparound. test3",
10260 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10261 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
10262 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
10263 BPF_MOV32_IMM(BPF_REG_0, 0),
10264 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
10267 .errstr = "fp pointer offset 1073741822",
10268 .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
10272 "liveness pruning and write screening",
10274 /* Get an unknown value */
10275 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
10276 /* branch conditions teach us nothing about R2 */
10277 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
10278 BPF_MOV64_IMM(BPF_REG_0, 0),
10279 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
10280 BPF_MOV64_IMM(BPF_REG_0, 0),
10283 .errstr = "R0 !read_ok",
10285 .prog_type = BPF_PROG_TYPE_LWT_IN,
10288 "varlen_map_value_access pruning",
10290 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10291 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10292 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10293 BPF_LD_MAP_FD(BPF_REG_1, 0),
10294 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10295 BPF_FUNC_map_lookup_elem),
10296 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
10297 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
10298 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
10299 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
10300 BPF_MOV32_IMM(BPF_REG_1, 0),
10301 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
10302 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
10303 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
10304 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
10305 offsetof(struct test_val, foo)),
10308 .fixup_map_hash_48b = { 3 },
10309 .errstr_unpriv = "R0 leaks addr",
10310 .errstr = "R0 unbounded memory access",
10311 .result_unpriv = REJECT,
10313 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10316 "invalid 64-bit BPF_END",
10318 BPF_MOV32_IMM(BPF_REG_0, 0),
10320 .code = BPF_ALU64 | BPF_END | BPF_TO_LE,
10321 .dst_reg = BPF_REG_0,
10328 .errstr = "unknown opcode d7",
10332 "XDP, using ifindex from netdev",
10334 BPF_MOV64_IMM(BPF_REG_0, 0),
10335 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10336 offsetof(struct xdp_md, ingress_ifindex)),
10337 BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 1, 1),
10338 BPF_MOV64_IMM(BPF_REG_0, 1),
10342 .prog_type = BPF_PROG_TYPE_XDP,
10346 "meta access, test1",
10348 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10349 offsetof(struct xdp_md, data_meta)),
10350 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10351 offsetof(struct xdp_md, data)),
10352 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10353 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10354 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
10355 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
10356 BPF_MOV64_IMM(BPF_REG_0, 0),
10360 .prog_type = BPF_PROG_TYPE_XDP,
10363 "meta access, test2",
10365 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10366 offsetof(struct xdp_md, data_meta)),
10367 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10368 offsetof(struct xdp_md, data)),
10369 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10370 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8),
10371 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
10372 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
10373 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
10374 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10375 BPF_MOV64_IMM(BPF_REG_0, 0),
10379 .errstr = "invalid access to packet, off=-8",
10380 .prog_type = BPF_PROG_TYPE_XDP,
10383 "meta access, test3",
10385 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10386 offsetof(struct xdp_md, data_meta)),
10387 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10388 offsetof(struct xdp_md, data_end)),
10389 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10390 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10391 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
10392 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
10393 BPF_MOV64_IMM(BPF_REG_0, 0),
10397 .errstr = "invalid access to packet",
10398 .prog_type = BPF_PROG_TYPE_XDP,
10401 "meta access, test4",
10403 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10404 offsetof(struct xdp_md, data_meta)),
10405 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10406 offsetof(struct xdp_md, data_end)),
10407 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
10408 offsetof(struct xdp_md, data)),
10409 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
10410 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10411 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
10412 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
10413 BPF_MOV64_IMM(BPF_REG_0, 0),
10417 .errstr = "invalid access to packet",
10418 .prog_type = BPF_PROG_TYPE_XDP,
10421 "meta access, test5",
10423 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10424 offsetof(struct xdp_md, data_meta)),
10425 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
10426 offsetof(struct xdp_md, data)),
10427 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
10428 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10429 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3),
10430 BPF_MOV64_IMM(BPF_REG_2, -8),
10431 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10432 BPF_FUNC_xdp_adjust_meta),
10433 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
10434 BPF_MOV64_IMM(BPF_REG_0, 0),
10438 .errstr = "R3 !read_ok",
10439 .prog_type = BPF_PROG_TYPE_XDP,
10442 "meta access, test6",
10444 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10445 offsetof(struct xdp_md, data_meta)),
10446 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10447 offsetof(struct xdp_md, data)),
10448 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
10449 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10450 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
10451 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
10452 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1),
10453 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
10454 BPF_MOV64_IMM(BPF_REG_0, 0),
10458 .errstr = "invalid access to packet",
10459 .prog_type = BPF_PROG_TYPE_XDP,
10462 "meta access, test7",
10464 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10465 offsetof(struct xdp_md, data_meta)),
10466 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10467 offsetof(struct xdp_md, data)),
10468 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
10469 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10470 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
10471 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
10472 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
10473 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
10474 BPF_MOV64_IMM(BPF_REG_0, 0),
10478 .prog_type = BPF_PROG_TYPE_XDP,
10481 "meta access, test8",
10483 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10484 offsetof(struct xdp_md, data_meta)),
10485 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10486 offsetof(struct xdp_md, data)),
10487 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
10488 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
10489 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
10490 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
10491 BPF_MOV64_IMM(BPF_REG_0, 0),
10495 .prog_type = BPF_PROG_TYPE_XDP,
10498 "meta access, test9",
10500 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10501 offsetof(struct xdp_md, data_meta)),
10502 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10503 offsetof(struct xdp_md, data)),
10504 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
10505 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
10506 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
10507 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
10508 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
10509 BPF_MOV64_IMM(BPF_REG_0, 0),
10513 .errstr = "invalid access to packet",
10514 .prog_type = BPF_PROG_TYPE_XDP,
10517 "meta access, test10",
10519 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10520 offsetof(struct xdp_md, data_meta)),
10521 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10522 offsetof(struct xdp_md, data)),
10523 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
10524 offsetof(struct xdp_md, data_end)),
10525 BPF_MOV64_IMM(BPF_REG_5, 42),
10526 BPF_MOV64_IMM(BPF_REG_6, 24),
10527 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
10528 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
10529 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
10530 BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
10531 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
10532 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
10533 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
10534 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
10535 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1),
10536 BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
10537 BPF_MOV64_IMM(BPF_REG_0, 0),
10541 .errstr = "invalid access to packet",
10542 .prog_type = BPF_PROG_TYPE_XDP,
10545 "meta access, test11",
10547 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10548 offsetof(struct xdp_md, data_meta)),
10549 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10550 offsetof(struct xdp_md, data)),
10551 BPF_MOV64_IMM(BPF_REG_5, 42),
10552 BPF_MOV64_IMM(BPF_REG_6, 24),
10553 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
10554 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
10555 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
10556 BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
10557 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
10558 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
10559 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
10560 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
10561 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1),
10562 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0),
10563 BPF_MOV64_IMM(BPF_REG_0, 0),
10567 .prog_type = BPF_PROG_TYPE_XDP,
10570 "meta access, test12",
10572 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10573 offsetof(struct xdp_md, data_meta)),
10574 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10575 offsetof(struct xdp_md, data)),
10576 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
10577 offsetof(struct xdp_md, data_end)),
10578 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
10579 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
10580 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5),
10581 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
10582 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
10583 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
10584 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1),
10585 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
10586 BPF_MOV64_IMM(BPF_REG_0, 0),
10590 .prog_type = BPF_PROG_TYPE_XDP,
10593 "arithmetic ops make PTR_TO_CTX unusable",
10595 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
10596 offsetof(struct __sk_buff, data) -
10597 offsetof(struct __sk_buff, mark)),
10598 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10599 offsetof(struct __sk_buff, mark)),
10602 .errstr = "dereference of modified ctx ptr",
10604 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10607 "pkt_end - pkt_start is allowed",
10609 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10610 offsetof(struct __sk_buff, data_end)),
10611 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10612 offsetof(struct __sk_buff, data)),
10613 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
10617 .retval = TEST_DATA_LEN,
10618 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10621 "XDP pkt read, pkt_end mangling, bad access 1",
10623 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10624 offsetof(struct xdp_md, data)),
10625 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10626 offsetof(struct xdp_md, data_end)),
10627 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10628 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10629 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
10630 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
10631 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10632 BPF_MOV64_IMM(BPF_REG_0, 0),
10635 .errstr = "R3 pointer arithmetic on pkt_end",
10637 .prog_type = BPF_PROG_TYPE_XDP,
10640 "XDP pkt read, pkt_end mangling, bad access 2",
10642 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10643 offsetof(struct xdp_md, data)),
10644 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10645 offsetof(struct xdp_md, data_end)),
10646 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10647 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10648 BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
10649 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
10650 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10651 BPF_MOV64_IMM(BPF_REG_0, 0),
10654 .errstr = "R3 pointer arithmetic on pkt_end",
10656 .prog_type = BPF_PROG_TYPE_XDP,
10659 "XDP pkt read, pkt_data' > pkt_end, good access",
10661 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10662 offsetof(struct xdp_md, data)),
10663 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10664 offsetof(struct xdp_md, data_end)),
10665 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10666 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10667 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
10668 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10669 BPF_MOV64_IMM(BPF_REG_0, 0),
10673 .prog_type = BPF_PROG_TYPE_XDP,
10674 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10677 "XDP pkt read, pkt_data' > pkt_end, bad access 1",
10679 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10680 offsetof(struct xdp_md, data)),
10681 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10682 offsetof(struct xdp_md, data_end)),
10683 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10684 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10685 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
10686 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10687 BPF_MOV64_IMM(BPF_REG_0, 0),
10690 .errstr = "R1 offset is outside of the packet",
10692 .prog_type = BPF_PROG_TYPE_XDP,
10693 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10696 "XDP pkt read, pkt_data' > pkt_end, bad access 2",
10698 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10699 offsetof(struct xdp_md, data)),
10700 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10701 offsetof(struct xdp_md, data_end)),
10702 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10703 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10704 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
10705 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10706 BPF_MOV64_IMM(BPF_REG_0, 0),
10709 .errstr = "R1 offset is outside of the packet",
10711 .prog_type = BPF_PROG_TYPE_XDP,
10712 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10715 "XDP pkt read, pkt_end > pkt_data', good access",
10717 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10718 offsetof(struct xdp_md, data)),
10719 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10720 offsetof(struct xdp_md, data_end)),
10721 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10722 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10723 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
10724 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10725 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10726 BPF_MOV64_IMM(BPF_REG_0, 0),
10730 .prog_type = BPF_PROG_TYPE_XDP,
10731 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10734 "XDP pkt read, pkt_end > pkt_data', bad access 1",
10736 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10737 offsetof(struct xdp_md, data)),
10738 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10739 offsetof(struct xdp_md, data_end)),
10740 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10741 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10742 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
10743 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10744 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10745 BPF_MOV64_IMM(BPF_REG_0, 0),
10748 .errstr = "R1 offset is outside of the packet",
10750 .prog_type = BPF_PROG_TYPE_XDP,
10751 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10754 "XDP pkt read, pkt_end > pkt_data', bad access 2",
10756 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10757 offsetof(struct xdp_md, data)),
10758 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10759 offsetof(struct xdp_md, data_end)),
10760 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10761 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10762 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
10763 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10764 BPF_MOV64_IMM(BPF_REG_0, 0),
10767 .errstr = "R1 offset is outside of the packet",
10769 .prog_type = BPF_PROG_TYPE_XDP,
10770 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10773 "XDP pkt read, pkt_data' < pkt_end, good access",
10775 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10776 offsetof(struct xdp_md, data)),
10777 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10778 offsetof(struct xdp_md, data_end)),
10779 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10780 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10781 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
10782 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10783 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10784 BPF_MOV64_IMM(BPF_REG_0, 0),
10788 .prog_type = BPF_PROG_TYPE_XDP,
10789 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10792 "XDP pkt read, pkt_data' < pkt_end, bad access 1",
10794 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10795 offsetof(struct xdp_md, data)),
10796 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10797 offsetof(struct xdp_md, data_end)),
10798 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10799 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10800 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
10801 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10802 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10803 BPF_MOV64_IMM(BPF_REG_0, 0),
10806 .errstr = "R1 offset is outside of the packet",
10808 .prog_type = BPF_PROG_TYPE_XDP,
10809 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10812 "XDP pkt read, pkt_data' < pkt_end, bad access 2",
10814 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10815 offsetof(struct xdp_md, data)),
10816 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10817 offsetof(struct xdp_md, data_end)),
10818 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10819 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10820 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
10821 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10822 BPF_MOV64_IMM(BPF_REG_0, 0),
10825 .errstr = "R1 offset is outside of the packet",
10827 .prog_type = BPF_PROG_TYPE_XDP,
10828 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10831 "XDP pkt read, pkt_end < pkt_data', good access",
10833 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10834 offsetof(struct xdp_md, data)),
10835 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10836 offsetof(struct xdp_md, data_end)),
10837 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10838 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10839 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
10840 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10841 BPF_MOV64_IMM(BPF_REG_0, 0),
10845 .prog_type = BPF_PROG_TYPE_XDP,
10846 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10849 "XDP pkt read, pkt_end < pkt_data', bad access 1",
10851 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10852 offsetof(struct xdp_md, data)),
10853 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10854 offsetof(struct xdp_md, data_end)),
10855 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10856 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10857 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
10858 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10859 BPF_MOV64_IMM(BPF_REG_0, 0),
10862 .errstr = "R1 offset is outside of the packet",
10864 .prog_type = BPF_PROG_TYPE_XDP,
10865 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10868 "XDP pkt read, pkt_end < pkt_data', bad access 2",
10870 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10871 offsetof(struct xdp_md, data)),
10872 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10873 offsetof(struct xdp_md, data_end)),
10874 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10875 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10876 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
10877 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10878 BPF_MOV64_IMM(BPF_REG_0, 0),
10881 .errstr = "R1 offset is outside of the packet",
10883 .prog_type = BPF_PROG_TYPE_XDP,
10884 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10887 "XDP pkt read, pkt_data' >= pkt_end, good access",
10889 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10890 offsetof(struct xdp_md, data)),
10891 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10892 offsetof(struct xdp_md, data_end)),
10893 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10894 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10895 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
10896 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10897 BPF_MOV64_IMM(BPF_REG_0, 0),
10901 .prog_type = BPF_PROG_TYPE_XDP,
10902 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10905 "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
10907 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10908 offsetof(struct xdp_md, data)),
10909 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10910 offsetof(struct xdp_md, data_end)),
10911 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10912 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10913 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
10914 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10915 BPF_MOV64_IMM(BPF_REG_0, 0),
10918 .errstr = "R1 offset is outside of the packet",
10920 .prog_type = BPF_PROG_TYPE_XDP,
10921 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10924 "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
10926 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10927 offsetof(struct xdp_md, data)),
10928 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10929 offsetof(struct xdp_md, data_end)),
10930 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10931 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10932 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
10933 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10934 BPF_MOV64_IMM(BPF_REG_0, 0),
10937 .errstr = "R1 offset is outside of the packet",
10939 .prog_type = BPF_PROG_TYPE_XDP,
10940 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10943 "XDP pkt read, pkt_end >= pkt_data', good access",
10945 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10946 offsetof(struct xdp_md, data)),
10947 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10948 offsetof(struct xdp_md, data_end)),
10949 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10950 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10951 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10952 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10953 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10954 BPF_MOV64_IMM(BPF_REG_0, 0),
10958 .prog_type = BPF_PROG_TYPE_XDP,
10959 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10962 "XDP pkt read, pkt_end >= pkt_data', bad access 1",
10964 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10965 offsetof(struct xdp_md, data)),
10966 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10967 offsetof(struct xdp_md, data_end)),
10968 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10969 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10970 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10971 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10972 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10973 BPF_MOV64_IMM(BPF_REG_0, 0),
10976 .errstr = "R1 offset is outside of the packet",
10978 .prog_type = BPF_PROG_TYPE_XDP,
10979 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10982 "XDP pkt read, pkt_end >= pkt_data', bad access 2",
10984 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10985 offsetof(struct xdp_md, data)),
10986 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10987 offsetof(struct xdp_md, data_end)),
10988 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10989 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10990 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10991 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10992 BPF_MOV64_IMM(BPF_REG_0, 0),
10995 .errstr = "R1 offset is outside of the packet",
10997 .prog_type = BPF_PROG_TYPE_XDP,
10998 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11001 "XDP pkt read, pkt_data' <= pkt_end, good access",
11003 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11004 offsetof(struct xdp_md, data)),
11005 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11006 offsetof(struct xdp_md, data_end)),
11007 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11008 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11009 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
11010 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11011 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
11012 BPF_MOV64_IMM(BPF_REG_0, 0),
11016 .prog_type = BPF_PROG_TYPE_XDP,
11017 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11020 "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
11022 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11023 offsetof(struct xdp_md, data)),
11024 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11025 offsetof(struct xdp_md, data_end)),
11026 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11027 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11028 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
11029 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11030 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
11031 BPF_MOV64_IMM(BPF_REG_0, 0),
11034 .errstr = "R1 offset is outside of the packet",
11036 .prog_type = BPF_PROG_TYPE_XDP,
11037 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11040 "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
11042 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11043 offsetof(struct xdp_md, data)),
11044 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11045 offsetof(struct xdp_md, data_end)),
11046 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11047 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11048 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
11049 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
11050 BPF_MOV64_IMM(BPF_REG_0, 0),
11053 .errstr = "R1 offset is outside of the packet",
11055 .prog_type = BPF_PROG_TYPE_XDP,
11056 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11059 "XDP pkt read, pkt_end <= pkt_data', good access",
11061 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11062 offsetof(struct xdp_md, data)),
11063 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11064 offsetof(struct xdp_md, data_end)),
11065 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11066 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11067 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
11068 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
11069 BPF_MOV64_IMM(BPF_REG_0, 0),
11073 .prog_type = BPF_PROG_TYPE_XDP,
11074 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11077 "XDP pkt read, pkt_end <= pkt_data', bad access 1",
11079 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11080 offsetof(struct xdp_md, data)),
11081 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11082 offsetof(struct xdp_md, data_end)),
11083 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11084 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11085 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
11086 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
11087 BPF_MOV64_IMM(BPF_REG_0, 0),
11090 .errstr = "R1 offset is outside of the packet",
11092 .prog_type = BPF_PROG_TYPE_XDP,
11093 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11096 "XDP pkt read, pkt_end <= pkt_data', bad access 2",
11098 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11099 offsetof(struct xdp_md, data)),
11100 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11101 offsetof(struct xdp_md, data_end)),
11102 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11103 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11104 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
11105 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
11106 BPF_MOV64_IMM(BPF_REG_0, 0),
11109 .errstr = "R1 offset is outside of the packet",
11111 .prog_type = BPF_PROG_TYPE_XDP,
11112 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11115 "XDP pkt read, pkt_meta' > pkt_data, good access",
11117 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11118 offsetof(struct xdp_md, data_meta)),
11119 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11120 offsetof(struct xdp_md, data)),
11121 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11122 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11123 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
11124 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
11125 BPF_MOV64_IMM(BPF_REG_0, 0),
11129 .prog_type = BPF_PROG_TYPE_XDP,
11130 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11133 "XDP pkt read, pkt_meta' > pkt_data, bad access 1",
11135 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11136 offsetof(struct xdp_md, data_meta)),
11137 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11138 offsetof(struct xdp_md, data)),
11139 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11140 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11141 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
11142 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
11143 BPF_MOV64_IMM(BPF_REG_0, 0),
11146 .errstr = "R1 offset is outside of the packet",
11148 .prog_type = BPF_PROG_TYPE_XDP,
11149 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11152 "XDP pkt read, pkt_meta' > pkt_data, bad access 2",
11154 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11155 offsetof(struct xdp_md, data_meta)),
11156 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11157 offsetof(struct xdp_md, data)),
11158 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11159 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11160 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
11161 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
11162 BPF_MOV64_IMM(BPF_REG_0, 0),
11165 .errstr = "R1 offset is outside of the packet",
11167 .prog_type = BPF_PROG_TYPE_XDP,
11168 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11171 "XDP pkt read, pkt_data > pkt_meta', good access",
11173 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11174 offsetof(struct xdp_md, data_meta)),
11175 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11176 offsetof(struct xdp_md, data)),
11177 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11178 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11179 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
11180 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11181 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
11182 BPF_MOV64_IMM(BPF_REG_0, 0),
11186 .prog_type = BPF_PROG_TYPE_XDP,
11187 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11190 "XDP pkt read, pkt_data > pkt_meta', bad access 1",
11192 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11193 offsetof(struct xdp_md, data_meta)),
11194 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11195 offsetof(struct xdp_md, data)),
11196 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11197 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11198 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
11199 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11200 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
11201 BPF_MOV64_IMM(BPF_REG_0, 0),
11204 .errstr = "R1 offset is outside of the packet",
11206 .prog_type = BPF_PROG_TYPE_XDP,
11207 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11210 "XDP pkt read, pkt_data > pkt_meta', bad access 2",
11212 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11213 offsetof(struct xdp_md, data_meta)),
11214 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11215 offsetof(struct xdp_md, data)),
11216 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11217 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11218 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
11219 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
11220 BPF_MOV64_IMM(BPF_REG_0, 0),
11223 .errstr = "R1 offset is outside of the packet",
11225 .prog_type = BPF_PROG_TYPE_XDP,
11226 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11229 "XDP pkt read, pkt_meta' < pkt_data, good access",
11231 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11232 offsetof(struct xdp_md, data_meta)),
11233 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11234 offsetof(struct xdp_md, data)),
11235 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11236 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11237 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
11238 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11239 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
11240 BPF_MOV64_IMM(BPF_REG_0, 0),
11244 .prog_type = BPF_PROG_TYPE_XDP,
11245 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11248 "XDP pkt read, pkt_meta' < pkt_data, bad access 1",
11250 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11251 offsetof(struct xdp_md, data_meta)),
11252 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11253 offsetof(struct xdp_md, data)),
11254 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11255 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11256 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
11257 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11258 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
11259 BPF_MOV64_IMM(BPF_REG_0, 0),
11262 .errstr = "R1 offset is outside of the packet",
11264 .prog_type = BPF_PROG_TYPE_XDP,
11265 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11268 "XDP pkt read, pkt_meta' < pkt_data, bad access 2",
11270 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11271 offsetof(struct xdp_md, data_meta)),
11272 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11273 offsetof(struct xdp_md, data)),
11274 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11275 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11276 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
11277 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
11278 BPF_MOV64_IMM(BPF_REG_0, 0),
11281 .errstr = "R1 offset is outside of the packet",
11283 .prog_type = BPF_PROG_TYPE_XDP,
11284 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11287 "XDP pkt read, pkt_data < pkt_meta', good access",
11289 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11290 offsetof(struct xdp_md, data_meta)),
11291 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11292 offsetof(struct xdp_md, data)),
11293 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11294 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11295 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
11296 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
11297 BPF_MOV64_IMM(BPF_REG_0, 0),
11301 .prog_type = BPF_PROG_TYPE_XDP,
11302 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11305 "XDP pkt read, pkt_data < pkt_meta', bad access 1",
11307 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11308 offsetof(struct xdp_md, data_meta)),
11309 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11310 offsetof(struct xdp_md, data)),
11311 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11312 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11313 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
11314 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
11315 BPF_MOV64_IMM(BPF_REG_0, 0),
11318 .errstr = "R1 offset is outside of the packet",
11320 .prog_type = BPF_PROG_TYPE_XDP,
11321 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11324 "XDP pkt read, pkt_data < pkt_meta', bad access 2",
11326 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11327 offsetof(struct xdp_md, data_meta)),
11328 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11329 offsetof(struct xdp_md, data)),
11330 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11331 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11332 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
11333 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
11334 BPF_MOV64_IMM(BPF_REG_0, 0),
11337 .errstr = "R1 offset is outside of the packet",
11339 .prog_type = BPF_PROG_TYPE_XDP,
11340 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11343 "XDP pkt read, pkt_meta' >= pkt_data, good access",
11345 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11346 offsetof(struct xdp_md, data_meta)),
11347 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11348 offsetof(struct xdp_md, data)),
11349 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11350 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11351 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
11352 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
11353 BPF_MOV64_IMM(BPF_REG_0, 0),
11357 .prog_type = BPF_PROG_TYPE_XDP,
11358 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11361 "XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
11363 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11364 offsetof(struct xdp_md, data_meta)),
11365 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11366 offsetof(struct xdp_md, data)),
11367 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11368 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11369 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
11370 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
11371 BPF_MOV64_IMM(BPF_REG_0, 0),
11374 .errstr = "R1 offset is outside of the packet",
11376 .prog_type = BPF_PROG_TYPE_XDP,
11377 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11380 "XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
11382 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11383 offsetof(struct xdp_md, data_meta)),
11384 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11385 offsetof(struct xdp_md, data)),
11386 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11387 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11388 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
11389 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
11390 BPF_MOV64_IMM(BPF_REG_0, 0),
11393 .errstr = "R1 offset is outside of the packet",
11395 .prog_type = BPF_PROG_TYPE_XDP,
11396 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11399 "XDP pkt read, pkt_data >= pkt_meta', good access",
11401 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11402 offsetof(struct xdp_md, data_meta)),
11403 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11404 offsetof(struct xdp_md, data)),
11405 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11406 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11407 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
11408 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11409 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
11410 BPF_MOV64_IMM(BPF_REG_0, 0),
11414 .prog_type = BPF_PROG_TYPE_XDP,
11415 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11418 "XDP pkt read, pkt_data >= pkt_meta', bad access 1",
11420 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11421 offsetof(struct xdp_md, data_meta)),
11422 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11423 offsetof(struct xdp_md, data)),
11424 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11425 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11426 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
11427 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11428 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
11429 BPF_MOV64_IMM(BPF_REG_0, 0),
11432 .errstr = "R1 offset is outside of the packet",
11434 .prog_type = BPF_PROG_TYPE_XDP,
11435 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11438 "XDP pkt read, pkt_data >= pkt_meta', bad access 2",
11440 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11441 offsetof(struct xdp_md, data_meta)),
11442 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11443 offsetof(struct xdp_md, data)),
11444 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11445 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11446 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
11447 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
11448 BPF_MOV64_IMM(BPF_REG_0, 0),
11451 .errstr = "R1 offset is outside of the packet",
11453 .prog_type = BPF_PROG_TYPE_XDP,
11454 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11457 "XDP pkt read, pkt_meta' <= pkt_data, good access",
11459 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11460 offsetof(struct xdp_md, data_meta)),
11461 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11462 offsetof(struct xdp_md, data)),
11463 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11464 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11465 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
11466 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11467 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
11468 BPF_MOV64_IMM(BPF_REG_0, 0),
11472 .prog_type = BPF_PROG_TYPE_XDP,
11473 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11476 "XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
11478 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11479 offsetof(struct xdp_md, data_meta)),
11480 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11481 offsetof(struct xdp_md, data)),
11482 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11483 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11484 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
11485 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11486 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
11487 BPF_MOV64_IMM(BPF_REG_0, 0),
11490 .errstr = "R1 offset is outside of the packet",
11492 .prog_type = BPF_PROG_TYPE_XDP,
11493 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11496 "XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
11498 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11499 offsetof(struct xdp_md, data_meta)),
11500 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11501 offsetof(struct xdp_md, data)),
11502 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11503 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11504 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
11505 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
11506 BPF_MOV64_IMM(BPF_REG_0, 0),
11509 .errstr = "R1 offset is outside of the packet",
11511 .prog_type = BPF_PROG_TYPE_XDP,
11512 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11515 "XDP pkt read, pkt_data <= pkt_meta', good access",
11517 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11518 offsetof(struct xdp_md, data_meta)),
11519 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11520 offsetof(struct xdp_md, data)),
11521 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11522 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11523 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
11524 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
11525 BPF_MOV64_IMM(BPF_REG_0, 0),
11529 .prog_type = BPF_PROG_TYPE_XDP,
11530 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11533 "XDP pkt read, pkt_data <= pkt_meta', bad access 1",
11535 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11536 offsetof(struct xdp_md, data_meta)),
11537 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11538 offsetof(struct xdp_md, data)),
11539 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11540 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11541 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
11542 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
11543 BPF_MOV64_IMM(BPF_REG_0, 0),
11546 .errstr = "R1 offset is outside of the packet",
11548 .prog_type = BPF_PROG_TYPE_XDP,
11549 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11552 "XDP pkt read, pkt_data <= pkt_meta', bad access 2",
11554 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11555 offsetof(struct xdp_md, data_meta)),
11556 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11557 offsetof(struct xdp_md, data)),
11558 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11559 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11560 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
11561 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
11562 BPF_MOV64_IMM(BPF_REG_0, 0),
11565 .errstr = "R1 offset is outside of the packet",
11567 .prog_type = BPF_PROG_TYPE_XDP,
11568 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11571 "check deducing bounds from const, 1",
11573 BPF_MOV64_IMM(BPF_REG_0, 1),
11574 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
11575 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
11579 .errstr = "R0 tried to subtract pointer from scalar",
11582 "check deducing bounds from const, 2",
11584 BPF_MOV64_IMM(BPF_REG_0, 1),
11585 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
11587 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
11589 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
11596 "check deducing bounds from const, 3",
11598 BPF_MOV64_IMM(BPF_REG_0, 0),
11599 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
11600 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
11604 .errstr = "R0 tried to subtract pointer from scalar",
11607 "check deducing bounds from const, 4",
11609 BPF_MOV64_IMM(BPF_REG_0, 0),
11610 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
11612 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
11614 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
11620 "check deducing bounds from const, 5",
11622 BPF_MOV64_IMM(BPF_REG_0, 0),
11623 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
11624 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
11628 .errstr = "R0 tried to subtract pointer from scalar",
11631 "check deducing bounds from const, 6",
11633 BPF_MOV64_IMM(BPF_REG_0, 0),
11634 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
11636 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
11640 .errstr = "R0 tried to subtract pointer from scalar",
11643 "check deducing bounds from const, 7",
11645 BPF_MOV64_IMM(BPF_REG_0, ~0),
11646 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
11647 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
11648 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11649 offsetof(struct __sk_buff, mark)),
11653 .errstr = "dereference of modified ctx ptr",
11654 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11657 "check deducing bounds from const, 8",
11659 BPF_MOV64_IMM(BPF_REG_0, ~0),
11660 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
11661 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
11662 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11663 offsetof(struct __sk_buff, mark)),
11667 .errstr = "dereference of modified ctx ptr",
11668 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11671 "check deducing bounds from const, 9",
11673 BPF_MOV64_IMM(BPF_REG_0, 0),
11674 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
11675 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
11679 .errstr = "R0 tried to subtract pointer from scalar",
11682 "check deducing bounds from const, 10",
11684 BPF_MOV64_IMM(BPF_REG_0, 0),
11685 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
11686 /* Marks reg as unknown. */
11687 BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
11688 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
11692 .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
11695 "bpf_exit with invalid return code. test1",
11697 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
11700 .errstr = "R0 has value (0x0; 0xffffffff)",
11702 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
11705 "bpf_exit with invalid return code. test2",
11707 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
11708 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
11712 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
11715 "bpf_exit with invalid return code. test3",
11717 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
11718 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
11721 .errstr = "R0 has value (0x0; 0x3)",
11723 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
11726 "bpf_exit with invalid return code. test4",
11728 BPF_MOV64_IMM(BPF_REG_0, 1),
11732 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
11735 "bpf_exit with invalid return code. test5",
11737 BPF_MOV64_IMM(BPF_REG_0, 2),
11740 .errstr = "R0 has value (0x2; 0x0)",
11742 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
11745 "bpf_exit with invalid return code. test6",
11747 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
11750 .errstr = "R0 is not a known value (ctx)",
11752 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
11755 "bpf_exit with invalid return code. test7",
11757 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
11758 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
11759 BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
11762 .errstr = "R0 has unknown scalar value",
11764 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
11767 "calls: basic sanity",
11769 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11770 BPF_MOV64_IMM(BPF_REG_0, 1),
11772 BPF_MOV64_IMM(BPF_REG_0, 2),
11775 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11779 "calls: not on unpriviledged",
11781 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11782 BPF_MOV64_IMM(BPF_REG_0, 1),
11784 BPF_MOV64_IMM(BPF_REG_0, 2),
11787 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
11788 .result_unpriv = REJECT,
11793 "calls: div by 0 in subprog",
11795 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11796 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
11797 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11798 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
11799 offsetof(struct __sk_buff, data_end)),
11800 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
11801 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
11802 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
11803 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
11804 BPF_MOV64_IMM(BPF_REG_0, 1),
11806 BPF_MOV32_IMM(BPF_REG_2, 0),
11807 BPF_MOV32_IMM(BPF_REG_3, 1),
11808 BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
11809 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11810 offsetof(struct __sk_buff, data)),
11813 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11818 "calls: multiple ret types in subprog 1",
11820 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11821 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
11822 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11823 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
11824 offsetof(struct __sk_buff, data_end)),
11825 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
11826 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
11827 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
11828 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
11829 BPF_MOV64_IMM(BPF_REG_0, 1),
11831 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11832 offsetof(struct __sk_buff, data)),
11833 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
11834 BPF_MOV32_IMM(BPF_REG_0, 42),
11837 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11839 .errstr = "R0 invalid mem access 'inv'",
11842 "calls: multiple ret types in subprog 2",
11844 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11845 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
11846 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11847 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
11848 offsetof(struct __sk_buff, data_end)),
11849 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
11850 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
11851 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
11852 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
11853 BPF_MOV64_IMM(BPF_REG_0, 1),
11855 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11856 offsetof(struct __sk_buff, data)),
11857 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11858 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
11859 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11860 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11861 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11862 BPF_LD_MAP_FD(BPF_REG_1, 0),
11863 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11864 BPF_FUNC_map_lookup_elem),
11865 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
11866 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
11867 offsetof(struct __sk_buff, data)),
11868 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
11871 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11872 .fixup_map_hash_8b = { 16 },
11874 .errstr = "R0 min value is outside of the array range",
11877 "calls: overlapping caller/callee",
11879 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
11880 BPF_MOV64_IMM(BPF_REG_0, 1),
11883 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11884 .errstr = "last insn is not an exit or jmp",
11888 "calls: wrong recursive calls",
11890 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
11891 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
11892 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
11893 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
11894 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
11895 BPF_MOV64_IMM(BPF_REG_0, 1),
11898 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11899 .errstr = "jump out of range",
11903 "calls: wrong src reg",
11905 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
11906 BPF_MOV64_IMM(BPF_REG_0, 1),
11909 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11910 .errstr = "BPF_CALL uses reserved fields",
11914 "calls: wrong off value",
11916 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
11917 BPF_MOV64_IMM(BPF_REG_0, 1),
11919 BPF_MOV64_IMM(BPF_REG_0, 2),
11922 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11923 .errstr = "BPF_CALL uses reserved fields",
11927 "calls: jump back loop",
11929 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
11930 BPF_MOV64_IMM(BPF_REG_0, 1),
11933 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11934 .errstr = "back-edge from insn 0 to 0",
11938 "calls: conditional call",
11940 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11941 offsetof(struct __sk_buff, mark)),
11942 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
11943 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11944 BPF_MOV64_IMM(BPF_REG_0, 1),
11946 BPF_MOV64_IMM(BPF_REG_0, 2),
11949 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11950 .errstr = "jump out of range",
11954 "calls: conditional call 2",
11956 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11957 offsetof(struct __sk_buff, mark)),
11958 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
11959 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11960 BPF_MOV64_IMM(BPF_REG_0, 1),
11962 BPF_MOV64_IMM(BPF_REG_0, 2),
11964 BPF_MOV64_IMM(BPF_REG_0, 3),
11967 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11971 "calls: conditional call 3",
11973 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11974 offsetof(struct __sk_buff, mark)),
11975 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
11976 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
11977 BPF_MOV64_IMM(BPF_REG_0, 1),
11979 BPF_MOV64_IMM(BPF_REG_0, 1),
11980 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
11981 BPF_MOV64_IMM(BPF_REG_0, 3),
11982 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
11984 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11985 .errstr = "back-edge from insn",
11989 "calls: conditional call 4",
11991 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11992 offsetof(struct __sk_buff, mark)),
11993 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
11994 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11995 BPF_MOV64_IMM(BPF_REG_0, 1),
11997 BPF_MOV64_IMM(BPF_REG_0, 1),
11998 BPF_JMP_IMM(BPF_JA, 0, 0, -5),
11999 BPF_MOV64_IMM(BPF_REG_0, 3),
12002 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
12006 "calls: conditional call 5",
12008 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
12009 offsetof(struct __sk_buff, mark)),
12010 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
12011 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12012 BPF_MOV64_IMM(BPF_REG_0, 1),
12014 BPF_MOV64_IMM(BPF_REG_0, 1),
12015 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
12016 BPF_MOV64_IMM(BPF_REG_0, 3),
12019 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
12020 .errstr = "back-edge from insn",
12024 "calls: conditional call 6",
12026 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12027 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
12029 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
12030 offsetof(struct __sk_buff, mark)),
12033 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
12034 .errstr = "back-edge from insn",
12038 "calls: using r0 returned by callee",
12040 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12042 BPF_MOV64_IMM(BPF_REG_0, 2),
12045 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
12049 "calls: using uninit r0 from callee",
12051 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12055 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
12056 .errstr = "!read_ok",
12060 "calls: callee is using r1",
12062 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12064 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
12065 offsetof(struct __sk_buff, len)),
12068 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
12070 .retval = TEST_DATA_LEN,
12073 "calls: callee using args1",
12075 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12077 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
12080 .errstr_unpriv = "allowed for root only",
12081 .result_unpriv = REJECT,
12083 .retval = POINTER_VALUE,
12086 "calls: callee using wrong args2",
12088 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12090 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12093 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
12094 .errstr = "R2 !read_ok",
12098 "calls: callee using two args",
12100 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12101 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
12102 offsetof(struct __sk_buff, len)),
12103 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
12104 offsetof(struct __sk_buff, len)),
12105 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12107 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
12108 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
12111 .errstr_unpriv = "allowed for root only",
12112 .result_unpriv = REJECT,
12114 .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
12117 "calls: callee changing pkt pointers",
12119 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
12120 offsetof(struct xdp_md, data)),
12121 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
12122 offsetof(struct xdp_md, data_end)),
12123 BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
12124 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
12125 BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
12126 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12127 /* clear_all_pkt_pointers() has to walk all frames
12128 * to make sure that pkt pointers in the caller
12129 * are cleared when callee is calling a helper that
12130 * adjusts packet size
12132 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
12133 BPF_MOV32_IMM(BPF_REG_0, 0),
12135 BPF_MOV64_IMM(BPF_REG_2, 0),
12136 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12137 BPF_FUNC_xdp_adjust_head),
12141 .errstr = "R6 invalid mem access 'inv'",
12142 .prog_type = BPF_PROG_TYPE_XDP,
12143 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12146 "calls: two calls with args",
12148 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12150 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12151 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
12152 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
12153 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12154 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12155 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
12156 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
12158 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
12159 offsetof(struct __sk_buff, len)),
12162 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12164 .retval = TEST_DATA_LEN + TEST_DATA_LEN,
12167 "calls: calls with stack arith",
12169 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12170 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
12171 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12173 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
12174 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12176 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
12177 BPF_MOV64_IMM(BPF_REG_0, 42),
12178 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
12181 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12186 "calls: calls with misaligned stack access",
12188 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12189 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
12190 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12192 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
12193 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12195 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
12196 BPF_MOV64_IMM(BPF_REG_0, 42),
12197 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
12200 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12201 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
12202 .errstr = "misaligned stack access",
12206 "calls: calls control flow, jump test",
12208 BPF_MOV64_IMM(BPF_REG_0, 42),
12209 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12210 BPF_MOV64_IMM(BPF_REG_0, 43),
12211 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12212 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
12215 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12220 "calls: calls control flow, jump test 2",
12222 BPF_MOV64_IMM(BPF_REG_0, 42),
12223 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12224 BPF_MOV64_IMM(BPF_REG_0, 43),
12225 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12226 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
12229 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12230 .errstr = "jump out of range from insn 1 to 4",
12234 "calls: two calls with bad jump",
12236 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12238 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12239 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
12240 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
12241 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12242 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12243 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
12244 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
12246 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
12247 offsetof(struct __sk_buff, len)),
12248 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
12251 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
12252 .errstr = "jump out of range from insn 11 to 9",
12256 "calls: recursive call. test1",
12258 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12260 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
12263 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
12264 .errstr = "back-edge",
12268 "calls: recursive call. test2",
12270 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12272 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
12275 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
12276 .errstr = "back-edge",
12280 "calls: unreachable code",
12282 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12284 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12286 BPF_MOV64_IMM(BPF_REG_0, 0),
12288 BPF_MOV64_IMM(BPF_REG_0, 0),
12291 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
12292 .errstr = "unreachable insn 6",
12296 "calls: invalid call",
12298 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12300 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
12303 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
12304 .errstr = "invalid destination",
12308 "calls: invalid call 2",
12310 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12312 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
12315 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
12316 .errstr = "invalid destination",
12320 "calls: jumping across function bodies. test1",
12322 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12323 BPF_MOV64_IMM(BPF_REG_0, 0),
12325 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
12328 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
12329 .errstr = "jump out of range",
12333 "calls: jumping across function bodies. test2",
12335 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
12336 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12337 BPF_MOV64_IMM(BPF_REG_0, 0),
12341 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
12342 .errstr = "jump out of range",
12346 "calls: call without exit",
12348 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12350 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12352 BPF_MOV64_IMM(BPF_REG_0, 0),
12353 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
12355 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
12356 .errstr = "not an exit",
12360 "calls: call into middle of ld_imm64",
12362 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12363 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12364 BPF_MOV64_IMM(BPF_REG_0, 0),
12366 BPF_LD_IMM64(BPF_REG_0, 0),
12369 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
12370 .errstr = "last insn",
12374 "calls: call into middle of other call",
12376 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12377 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12378 BPF_MOV64_IMM(BPF_REG_0, 0),
12380 BPF_MOV64_IMM(BPF_REG_0, 0),
12381 BPF_MOV64_IMM(BPF_REG_0, 0),
12384 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
12385 .errstr = "last insn",
12389 "calls: ld_abs with changing ctx data in callee",
12391 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12392 BPF_LD_ABS(BPF_B, 0),
12393 BPF_LD_ABS(BPF_H, 0),
12394 BPF_LD_ABS(BPF_W, 0),
12395 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
12396 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
12397 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
12398 BPF_LD_ABS(BPF_B, 0),
12399 BPF_LD_ABS(BPF_H, 0),
12400 BPF_LD_ABS(BPF_W, 0),
12402 BPF_MOV64_IMM(BPF_REG_2, 1),
12403 BPF_MOV64_IMM(BPF_REG_3, 2),
12404 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12405 BPF_FUNC_skb_vlan_push),
12408 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12409 .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
12413 "calls: two calls with bad fallthrough",
12415 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12417 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12418 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
12419 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
12420 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12421 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12422 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
12423 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
12424 BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
12425 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
12426 offsetof(struct __sk_buff, len)),
12429 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
12430 .errstr = "not an exit",
12434 "calls: two calls with stack read",
12436 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12437 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12438 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12439 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12441 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12442 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
12443 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
12444 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12445 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12446 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
12447 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
12449 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
12452 .prog_type = BPF_PROG_TYPE_XDP,
12456 "calls: two calls with stack write",
12459 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12460 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12461 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12462 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12463 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12464 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12465 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
12469 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12470 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12471 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
12472 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
12473 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12474 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12475 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
12476 BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
12477 /* write into stack frame of main prog */
12478 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
12482 /* read from stack frame of main prog */
12483 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
12486 .prog_type = BPF_PROG_TYPE_XDP,
12490 "calls: stack overflow using two frames (pre-call access)",
12493 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
12494 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
12498 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
12499 BPF_MOV64_IMM(BPF_REG_0, 0),
12502 .prog_type = BPF_PROG_TYPE_XDP,
12503 .errstr = "combined stack size",
12507 "calls: stack overflow using two frames (post-call access)",
12510 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
12511 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
12515 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
12516 BPF_MOV64_IMM(BPF_REG_0, 0),
12519 .prog_type = BPF_PROG_TYPE_XDP,
12520 .errstr = "combined stack size",
12524 "calls: stack depth check using three frames. test1",
12527 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
12528 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
12529 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
12530 BPF_MOV64_IMM(BPF_REG_0, 0),
12533 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
12536 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
12537 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
12540 .prog_type = BPF_PROG_TYPE_XDP,
12541 /* stack_main=32, stack_A=256, stack_B=64
12542 * and max(main+A, main+A+B) < 512
12547 "calls: stack depth check using three frames. test2",
12550 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
12551 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
12552 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
12553 BPF_MOV64_IMM(BPF_REG_0, 0),
12556 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
12559 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
12560 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
12563 .prog_type = BPF_PROG_TYPE_XDP,
12564 /* stack_main=32, stack_A=64, stack_B=256
12565 * and max(main+A, main+A+B) < 512
12570 "calls: stack depth check using three frames. test3",
12573 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12574 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
12575 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12576 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
12577 BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
12578 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
12579 BPF_MOV64_IMM(BPF_REG_0, 0),
12582 BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
12584 BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
12585 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
12587 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
12588 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
12589 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
12592 .prog_type = BPF_PROG_TYPE_XDP,
12593 /* stack_main=64, stack_A=224, stack_B=256
12594 * and max(main+A, main+A+B) > 512
12596 .errstr = "combined stack",
12600 "calls: stack depth check using three frames. test4",
12601 /* void main(void) {
12606 * void func1(int alloc_or_recurse) {
12607 * if (alloc_or_recurse) {
12608 * frame_pointer[-300] = 1;
12610 * func2(alloc_or_recurse);
12613 * void func2(int alloc_or_recurse) {
12614 * if (alloc_or_recurse) {
12615 * frame_pointer[-300] = 1;
12621 BPF_MOV64_IMM(BPF_REG_1, 0),
12622 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
12623 BPF_MOV64_IMM(BPF_REG_1, 1),
12624 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
12625 BPF_MOV64_IMM(BPF_REG_1, 1),
12626 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
12627 BPF_MOV64_IMM(BPF_REG_0, 0),
12630 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
12631 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
12633 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
12636 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
12637 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
12640 .prog_type = BPF_PROG_TYPE_XDP,
12642 .errstr = "combined stack",
12645 "calls: stack depth check using three frames. test5",
12648 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
12651 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
12654 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
12657 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
12660 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
12663 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
12666 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
12669 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
12672 BPF_MOV64_IMM(BPF_REG_0, 0),
12675 .prog_type = BPF_PROG_TYPE_XDP,
12676 .errstr = "call stack",
12680 "calls: spill into caller stack frame",
12682 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12683 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12684 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12685 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12687 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
12688 BPF_MOV64_IMM(BPF_REG_0, 0),
12691 .prog_type = BPF_PROG_TYPE_XDP,
12692 .errstr = "cannot spill",
12696 "calls: write into caller stack frame",
12698 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12699 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12700 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12701 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12702 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
12704 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
12705 BPF_MOV64_IMM(BPF_REG_0, 0),
12708 .prog_type = BPF_PROG_TYPE_XDP,
12713 "calls: write into callee stack frame",
12715 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12716 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
12718 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
12719 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
12722 .prog_type = BPF_PROG_TYPE_XDP,
12723 .errstr = "cannot return stack pointer",
12727 "calls: two calls with stack write and void return",
12730 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12731 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12732 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12733 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12734 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12735 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12736 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
12740 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12741 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12742 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12743 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
12744 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12748 /* write into stack frame of main prog */
12749 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
12750 BPF_EXIT_INSN(), /* void return */
12752 .prog_type = BPF_PROG_TYPE_XDP,
12756 "calls: ambiguous return value",
12758 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12759 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
12760 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12761 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12762 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12763 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12765 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
12766 BPF_MOV64_IMM(BPF_REG_0, 0),
12769 .errstr_unpriv = "allowed for root only",
12770 .result_unpriv = REJECT,
12771 .errstr = "R0 !read_ok",
12775 "calls: two calls that return map_value",
12778 /* pass fp-16, fp-8 into a function */
12779 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12780 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12781 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12782 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12783 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
12785 /* fetch map_value_ptr from the stack of this function */
12786 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
12787 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
12788 /* write into map value */
12789 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12790 /* fetch secound map_value_ptr from the stack */
12791 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
12792 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
12793 /* write into map value */
12794 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12795 BPF_MOV64_IMM(BPF_REG_0, 0),
12799 /* call 3rd function twice */
12800 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12801 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12802 /* first time with fp-8 */
12803 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12804 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
12805 /* second time with fp-16 */
12806 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12810 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12811 /* lookup from map */
12812 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12813 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12814 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12815 BPF_LD_MAP_FD(BPF_REG_1, 0),
12816 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12817 BPF_FUNC_map_lookup_elem),
12818 /* write map_value_ptr into stack frame of main prog */
12819 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12820 BPF_MOV64_IMM(BPF_REG_0, 0),
12821 BPF_EXIT_INSN(), /* return 0 */
12823 .prog_type = BPF_PROG_TYPE_XDP,
12824 .fixup_map_hash_8b = { 23 },
12828 "calls: two calls that return map_value with bool condition",
12831 /* pass fp-16, fp-8 into a function */
12832 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12833 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12834 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12835 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12836 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12837 BPF_MOV64_IMM(BPF_REG_0, 0),
12841 /* call 3rd function twice */
12842 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12843 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12844 /* first time with fp-8 */
12845 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
12846 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
12847 /* fetch map_value_ptr from the stack of this function */
12848 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
12849 /* write into map value */
12850 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12851 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
12852 /* second time with fp-16 */
12853 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12854 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
12855 /* fetch secound map_value_ptr from the stack */
12856 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
12857 /* write into map value */
12858 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12862 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12863 /* lookup from map */
12864 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12865 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12866 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12867 BPF_LD_MAP_FD(BPF_REG_1, 0),
12868 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12869 BPF_FUNC_map_lookup_elem),
12870 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12871 BPF_MOV64_IMM(BPF_REG_0, 0),
12872 BPF_EXIT_INSN(), /* return 0 */
12873 /* write map_value_ptr into stack frame of main prog */
12874 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12875 BPF_MOV64_IMM(BPF_REG_0, 1),
12876 BPF_EXIT_INSN(), /* return 1 */
12878 .prog_type = BPF_PROG_TYPE_XDP,
12879 .fixup_map_hash_8b = { 23 },
12883 "calls: two calls that return map_value with incorrect bool check",
12886 /* pass fp-16, fp-8 into a function */
12887 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12888 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12889 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12890 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12891 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12892 BPF_MOV64_IMM(BPF_REG_0, 0),
12896 /* call 3rd function twice */
12897 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12898 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12899 /* first time with fp-8 */
12900 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
12901 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
12902 /* fetch map_value_ptr from the stack of this function */
12903 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
12904 /* write into map value */
12905 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12906 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
12907 /* second time with fp-16 */
12908 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12909 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12910 /* fetch secound map_value_ptr from the stack */
12911 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
12912 /* write into map value */
12913 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12917 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12918 /* lookup from map */
12919 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12920 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12921 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12922 BPF_LD_MAP_FD(BPF_REG_1, 0),
12923 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12924 BPF_FUNC_map_lookup_elem),
12925 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12926 BPF_MOV64_IMM(BPF_REG_0, 0),
12927 BPF_EXIT_INSN(), /* return 0 */
12928 /* write map_value_ptr into stack frame of main prog */
12929 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12930 BPF_MOV64_IMM(BPF_REG_0, 1),
12931 BPF_EXIT_INSN(), /* return 1 */
12933 .prog_type = BPF_PROG_TYPE_XDP,
12934 .fixup_map_hash_8b = { 23 },
12936 .errstr = "invalid read from stack off -16+0 size 8",
12939 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
12942 /* pass fp-16, fp-8 into a function */
12943 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12944 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12945 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12946 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12947 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12948 BPF_MOV64_IMM(BPF_REG_0, 0),
12952 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12953 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12954 /* 1st lookup from map */
12955 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12956 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12957 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12958 BPF_LD_MAP_FD(BPF_REG_1, 0),
12959 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12960 BPF_FUNC_map_lookup_elem),
12961 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12962 BPF_MOV64_IMM(BPF_REG_8, 0),
12963 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12964 /* write map_value_ptr into stack frame of main prog at fp-8 */
12965 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12966 BPF_MOV64_IMM(BPF_REG_8, 1),
12968 /* 2nd lookup from map */
12969 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
12970 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12971 BPF_LD_MAP_FD(BPF_REG_1, 0),
12972 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
12973 BPF_FUNC_map_lookup_elem),
12974 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12975 BPF_MOV64_IMM(BPF_REG_9, 0),
12976 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12977 /* write map_value_ptr into stack frame of main prog at fp-16 */
12978 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
12979 BPF_MOV64_IMM(BPF_REG_9, 1),
12981 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
12982 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
12983 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
12984 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
12985 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
12986 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
12990 /* if arg2 == 1 do *arg1 = 0 */
12991 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
12992 /* fetch map_value_ptr from the stack of this function */
12993 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
12994 /* write into map value */
12995 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12997 /* if arg4 == 1 do *arg3 = 0 */
12998 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
12999 /* fetch map_value_ptr from the stack of this function */
13000 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
13001 /* write into map value */
13002 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
13005 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13006 .fixup_map_hash_8b = { 12, 22 },
13008 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
13009 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13012 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
13015 /* pass fp-16, fp-8 into a function */
13016 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
13017 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
13018 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13019 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
13020 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13021 BPF_MOV64_IMM(BPF_REG_0, 0),
13025 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13026 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
13027 /* 1st lookup from map */
13028 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
13029 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13030 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13031 BPF_LD_MAP_FD(BPF_REG_1, 0),
13032 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13033 BPF_FUNC_map_lookup_elem),
13034 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
13035 BPF_MOV64_IMM(BPF_REG_8, 0),
13036 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
13037 /* write map_value_ptr into stack frame of main prog at fp-8 */
13038 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
13039 BPF_MOV64_IMM(BPF_REG_8, 1),
13041 /* 2nd lookup from map */
13042 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
13043 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13044 BPF_LD_MAP_FD(BPF_REG_1, 0),
13045 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
13046 BPF_FUNC_map_lookup_elem),
13047 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
13048 BPF_MOV64_IMM(BPF_REG_9, 0),
13049 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
13050 /* write map_value_ptr into stack frame of main prog at fp-16 */
13051 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
13052 BPF_MOV64_IMM(BPF_REG_9, 1),
13054 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
13055 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
13056 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
13057 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
13058 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
13059 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
13063 /* if arg2 == 1 do *arg1 = 0 */
13064 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
13065 /* fetch map_value_ptr from the stack of this function */
13066 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
13067 /* write into map value */
13068 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
13070 /* if arg4 == 1 do *arg3 = 0 */
13071 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
13072 /* fetch map_value_ptr from the stack of this function */
13073 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
13074 /* write into map value */
13075 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
13078 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13079 .fixup_map_hash_8b = { 12, 22 },
13083 "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
13086 /* pass fp-16, fp-8 into a function */
13087 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
13088 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
13089 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13090 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
13091 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
13092 BPF_MOV64_IMM(BPF_REG_0, 0),
13096 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13097 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
13098 /* 1st lookup from map */
13099 BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
13100 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13101 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
13102 BPF_LD_MAP_FD(BPF_REG_1, 0),
13103 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13104 BPF_FUNC_map_lookup_elem),
13105 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
13106 BPF_MOV64_IMM(BPF_REG_8, 0),
13107 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
13108 /* write map_value_ptr into stack frame of main prog at fp-8 */
13109 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
13110 BPF_MOV64_IMM(BPF_REG_8, 1),
13112 /* 2nd lookup from map */
13113 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13114 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
13115 BPF_LD_MAP_FD(BPF_REG_1, 0),
13116 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13117 BPF_FUNC_map_lookup_elem),
13118 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
13119 BPF_MOV64_IMM(BPF_REG_9, 0), // 26
13120 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
13121 /* write map_value_ptr into stack frame of main prog at fp-16 */
13122 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
13123 BPF_MOV64_IMM(BPF_REG_9, 1),
13125 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
13126 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
13127 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
13128 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
13129 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
13130 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
13131 BPF_JMP_IMM(BPF_JA, 0, 0, -30),
13134 /* if arg2 == 1 do *arg1 = 0 */
13135 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
13136 /* fetch map_value_ptr from the stack of this function */
13137 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
13138 /* write into map value */
13139 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
13141 /* if arg4 == 1 do *arg3 = 0 */
13142 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
13143 /* fetch map_value_ptr from the stack of this function */
13144 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
13145 /* write into map value */
13146 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
13147 BPF_JMP_IMM(BPF_JA, 0, 0, -8),
13149 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13150 .fixup_map_hash_8b = { 12, 22 },
13152 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
13153 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13156 "calls: two calls that receive map_value_ptr_or_null via arg. test1",
13159 /* pass fp-16, fp-8 into a function */
13160 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
13161 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
13162 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13163 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
13164 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13165 BPF_MOV64_IMM(BPF_REG_0, 0),
13169 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13170 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
13171 /* 1st lookup from map */
13172 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
13173 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13174 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13175 BPF_LD_MAP_FD(BPF_REG_1, 0),
13176 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13177 BPF_FUNC_map_lookup_elem),
13178 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
13179 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
13180 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
13181 BPF_MOV64_IMM(BPF_REG_8, 0),
13182 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
13183 BPF_MOV64_IMM(BPF_REG_8, 1),
13185 /* 2nd lookup from map */
13186 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13187 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13188 BPF_LD_MAP_FD(BPF_REG_1, 0),
13189 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13190 BPF_FUNC_map_lookup_elem),
13191 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
13192 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
13193 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
13194 BPF_MOV64_IMM(BPF_REG_9, 0),
13195 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
13196 BPF_MOV64_IMM(BPF_REG_9, 1),
13198 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
13199 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13200 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
13201 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
13202 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
13203 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
13207 /* if arg2 == 1 do *arg1 = 0 */
13208 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
13209 /* fetch map_value_ptr from the stack of this function */
13210 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
13211 /* write into map value */
13212 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
13214 /* if arg4 == 1 do *arg3 = 0 */
13215 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
13216 /* fetch map_value_ptr from the stack of this function */
13217 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
13218 /* write into map value */
13219 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
13222 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13223 .fixup_map_hash_8b = { 12, 22 },
13227 "calls: two calls that receive map_value_ptr_or_null via arg. test2",
13230 /* pass fp-16, fp-8 into a function */
13231 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
13232 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
13233 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13234 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
13235 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13236 BPF_MOV64_IMM(BPF_REG_0, 0),
13240 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13241 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
13242 /* 1st lookup from map */
13243 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
13244 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13245 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13246 BPF_LD_MAP_FD(BPF_REG_1, 0),
13247 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13248 BPF_FUNC_map_lookup_elem),
13249 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
13250 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
13251 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
13252 BPF_MOV64_IMM(BPF_REG_8, 0),
13253 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
13254 BPF_MOV64_IMM(BPF_REG_8, 1),
13256 /* 2nd lookup from map */
13257 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13258 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13259 BPF_LD_MAP_FD(BPF_REG_1, 0),
13260 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13261 BPF_FUNC_map_lookup_elem),
13262 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
13263 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
13264 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
13265 BPF_MOV64_IMM(BPF_REG_9, 0),
13266 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
13267 BPF_MOV64_IMM(BPF_REG_9, 1),
13269 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
13270 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13271 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
13272 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
13273 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
13274 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
13278 /* if arg2 == 1 do *arg1 = 0 */
13279 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
13280 /* fetch map_value_ptr from the stack of this function */
13281 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
13282 /* write into map value */
13283 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
13285 /* if arg4 == 0 do *arg3 = 0 */
13286 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
13287 /* fetch map_value_ptr from the stack of this function */
13288 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
13289 /* write into map value */
13290 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
13293 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13294 .fixup_map_hash_8b = { 12, 22 },
13296 .errstr = "R0 invalid mem access 'inv'",
13299 "calls: pkt_ptr spill into caller stack",
13301 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13302 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13303 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
13307 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13308 offsetof(struct __sk_buff, data)),
13309 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13310 offsetof(struct __sk_buff, data_end)),
13311 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13312 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
13313 /* spill unchecked pkt_ptr into stack of caller */
13314 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
13315 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
13316 /* now the pkt range is verified, read pkt_ptr from stack */
13317 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
13318 /* write 4 bytes into packet */
13319 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
13323 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13324 .retval = POINTER_VALUE,
13325 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13328 "calls: pkt_ptr spill into caller stack 2",
13330 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13331 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13332 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
13333 /* Marking is still kept, but not in all cases safe. */
13334 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
13335 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
13339 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13340 offsetof(struct __sk_buff, data)),
13341 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13342 offsetof(struct __sk_buff, data_end)),
13343 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13344 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
13345 /* spill unchecked pkt_ptr into stack of caller */
13346 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
13347 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
13348 /* now the pkt range is verified, read pkt_ptr from stack */
13349 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
13350 /* write 4 bytes into packet */
13351 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
13354 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13355 .errstr = "invalid access to packet",
13357 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13360 "calls: pkt_ptr spill into caller stack 3",
13362 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13363 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13364 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
13365 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
13366 /* Marking is still kept and safe here. */
13367 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
13368 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
13372 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13373 offsetof(struct __sk_buff, data)),
13374 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13375 offsetof(struct __sk_buff, data_end)),
13376 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13377 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
13378 /* spill unchecked pkt_ptr into stack of caller */
13379 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
13380 BPF_MOV64_IMM(BPF_REG_5, 0),
13381 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
13382 BPF_MOV64_IMM(BPF_REG_5, 1),
13383 /* now the pkt range is verified, read pkt_ptr from stack */
13384 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
13385 /* write 4 bytes into packet */
13386 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
13387 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
13390 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13393 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13396 "calls: pkt_ptr spill into caller stack 4",
13398 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13399 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13400 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
13401 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
13402 /* Check marking propagated. */
13403 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
13404 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
13408 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13409 offsetof(struct __sk_buff, data)),
13410 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13411 offsetof(struct __sk_buff, data_end)),
13412 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13413 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
13414 /* spill unchecked pkt_ptr into stack of caller */
13415 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
13416 BPF_MOV64_IMM(BPF_REG_5, 0),
13417 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
13418 BPF_MOV64_IMM(BPF_REG_5, 1),
13419 /* don't read back pkt_ptr from stack here */
13420 /* write 4 bytes into packet */
13421 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
13422 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
13425 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13428 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13431 "calls: pkt_ptr spill into caller stack 5",
13433 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13434 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13435 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
13436 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
13437 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
13438 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
13442 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13443 offsetof(struct __sk_buff, data)),
13444 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13445 offsetof(struct __sk_buff, data_end)),
13446 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13447 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
13448 BPF_MOV64_IMM(BPF_REG_5, 0),
13449 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
13450 /* spill checked pkt_ptr into stack of caller */
13451 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
13452 BPF_MOV64_IMM(BPF_REG_5, 1),
13453 /* don't read back pkt_ptr from stack here */
13454 /* write 4 bytes into packet */
13455 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
13456 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
13459 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13460 .errstr = "same insn cannot be used with different",
13462 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13465 "calls: pkt_ptr spill into caller stack 6",
13467 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13468 offsetof(struct __sk_buff, data_end)),
13469 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13470 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13471 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
13472 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
13473 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
13474 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
13478 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13479 offsetof(struct __sk_buff, data)),
13480 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13481 offsetof(struct __sk_buff, data_end)),
13482 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13483 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
13484 BPF_MOV64_IMM(BPF_REG_5, 0),
13485 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
13486 /* spill checked pkt_ptr into stack of caller */
13487 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
13488 BPF_MOV64_IMM(BPF_REG_5, 1),
13489 /* don't read back pkt_ptr from stack here */
13490 /* write 4 bytes into packet */
13491 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
13492 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
13495 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13496 .errstr = "R4 invalid mem access",
13498 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13501 "calls: pkt_ptr spill into caller stack 7",
13503 BPF_MOV64_IMM(BPF_REG_2, 0),
13504 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13505 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13506 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
13507 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
13508 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
13509 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
13513 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13514 offsetof(struct __sk_buff, data)),
13515 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13516 offsetof(struct __sk_buff, data_end)),
13517 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13518 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
13519 BPF_MOV64_IMM(BPF_REG_5, 0),
13520 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
13521 /* spill checked pkt_ptr into stack of caller */
13522 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
13523 BPF_MOV64_IMM(BPF_REG_5, 1),
13524 /* don't read back pkt_ptr from stack here */
13525 /* write 4 bytes into packet */
13526 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
13527 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
13530 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13531 .errstr = "R4 invalid mem access",
13533 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13536 "calls: pkt_ptr spill into caller stack 8",
13538 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13539 offsetof(struct __sk_buff, data)),
13540 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13541 offsetof(struct __sk_buff, data_end)),
13542 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13543 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
13544 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
13546 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13547 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13548 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
13549 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
13550 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
13551 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
13555 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13556 offsetof(struct __sk_buff, data)),
13557 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13558 offsetof(struct __sk_buff, data_end)),
13559 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13560 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
13561 BPF_MOV64_IMM(BPF_REG_5, 0),
13562 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
13563 /* spill checked pkt_ptr into stack of caller */
13564 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
13565 BPF_MOV64_IMM(BPF_REG_5, 1),
13566 /* don't read back pkt_ptr from stack here */
13567 /* write 4 bytes into packet */
13568 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
13569 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
13572 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13574 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13577 "calls: pkt_ptr spill into caller stack 9",
13579 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13580 offsetof(struct __sk_buff, data)),
13581 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13582 offsetof(struct __sk_buff, data_end)),
13583 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13584 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
13585 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
13587 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13588 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13589 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
13590 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
13591 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
13592 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
13596 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13597 offsetof(struct __sk_buff, data)),
13598 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13599 offsetof(struct __sk_buff, data_end)),
13600 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13601 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
13602 BPF_MOV64_IMM(BPF_REG_5, 0),
13603 /* spill unchecked pkt_ptr into stack of caller */
13604 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
13605 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
13606 BPF_MOV64_IMM(BPF_REG_5, 1),
13607 /* don't read back pkt_ptr from stack here */
13608 /* write 4 bytes into packet */
13609 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
13610 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
13613 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13614 .errstr = "invalid access to packet",
13616 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13619 "calls: caller stack init to zero or map_value_or_null",
13621 BPF_MOV64_IMM(BPF_REG_0, 0),
13622 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
13623 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13624 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13625 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
13626 /* fetch map_value_or_null or const_zero from stack */
13627 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
13628 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13629 /* store into map_value */
13630 BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
13634 /* if (ctx == 0) return; */
13635 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
13636 /* else bpf_map_lookup() and *(fp - 8) = r0 */
13637 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
13638 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13639 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13640 BPF_LD_MAP_FD(BPF_REG_1, 0),
13641 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
13642 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13643 BPF_FUNC_map_lookup_elem),
13644 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
13645 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
13648 .fixup_map_hash_8b = { 13 },
13650 .prog_type = BPF_PROG_TYPE_XDP,
13653 "calls: stack init to zero and pruning",
13655 /* first make allocated_stack 16 byte */
13656 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
13657 /* now fork the execution such that the false branch
13658 * of JGT insn will be verified second and it skisp zero
13659 * init of fp-8 stack slot. If stack liveness marking
13660 * is missing live_read marks from call map_lookup
13661 * processing then pruning will incorrectly assume
13662 * that fp-8 stack slot was unused in the fall-through
13663 * branch and will accept the program incorrectly
13665 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
13666 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
13667 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
13668 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13669 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13670 BPF_LD_MAP_FD(BPF_REG_1, 0),
13671 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13672 BPF_FUNC_map_lookup_elem),
13675 .fixup_map_hash_48b = { 6 },
13676 .errstr = "invalid indirect read from stack off -8+0 size 8",
13678 .prog_type = BPF_PROG_TYPE_XDP,
13681 "calls: two calls returning different map pointers for lookup (hash, array)",
13684 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
13686 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
13688 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13689 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
13690 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13691 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13692 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13693 BPF_FUNC_map_lookup_elem),
13694 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
13695 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
13696 offsetof(struct test_val, foo)),
13697 BPF_MOV64_IMM(BPF_REG_0, 1),
13700 BPF_LD_MAP_FD(BPF_REG_0, 0),
13703 BPF_LD_MAP_FD(BPF_REG_0, 0),
13706 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13707 .fixup_map_hash_48b = { 13 },
13708 .fixup_map_array_48b = { 16 },
13713 "calls: two calls returning different map pointers for lookup (hash, map in map)",
13716 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
13718 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
13720 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13721 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
13722 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13723 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13724 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13725 BPF_FUNC_map_lookup_elem),
13726 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
13727 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
13728 offsetof(struct test_val, foo)),
13729 BPF_MOV64_IMM(BPF_REG_0, 1),
13732 BPF_LD_MAP_FD(BPF_REG_0, 0),
13735 BPF_LD_MAP_FD(BPF_REG_0, 0),
13738 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13739 .fixup_map_in_map = { 16 },
13740 .fixup_map_array_48b = { 13 },
13742 .errstr = "R0 invalid mem access 'map_ptr'",
13745 "cond: two branches returning different map pointers for lookup (tail, tail)",
13747 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
13748 offsetof(struct __sk_buff, mark)),
13749 BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 3),
13750 BPF_LD_MAP_FD(BPF_REG_2, 0),
13751 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
13752 BPF_LD_MAP_FD(BPF_REG_2, 0),
13753 BPF_MOV64_IMM(BPF_REG_3, 7),
13754 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13755 BPF_FUNC_tail_call),
13756 BPF_MOV64_IMM(BPF_REG_0, 1),
13759 .fixup_prog1 = { 5 },
13760 .fixup_prog2 = { 2 },
13761 .result_unpriv = REJECT,
13762 .errstr_unpriv = "tail_call abusing map_ptr",
13767 "cond: two branches returning same map pointers for lookup (tail, tail)",
13769 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
13770 offsetof(struct __sk_buff, mark)),
13771 BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 3),
13772 BPF_LD_MAP_FD(BPF_REG_2, 0),
13773 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
13774 BPF_LD_MAP_FD(BPF_REG_2, 0),
13775 BPF_MOV64_IMM(BPF_REG_3, 7),
13776 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13777 BPF_FUNC_tail_call),
13778 BPF_MOV64_IMM(BPF_REG_0, 1),
13781 .fixup_prog2 = { 2, 5 },
13782 .result_unpriv = ACCEPT,
13787 "search pruning: all branches should be verified (nop operation)",
13789 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13790 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13791 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
13792 BPF_LD_MAP_FD(BPF_REG_1, 0),
13793 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
13794 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
13795 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
13796 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
13797 BPF_MOV64_IMM(BPF_REG_4, 0),
13799 BPF_MOV64_IMM(BPF_REG_4, 1),
13800 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
13801 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
13802 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
13803 BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
13804 BPF_MOV64_IMM(BPF_REG_6, 0),
13805 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
13808 .fixup_map_hash_8b = { 3 },
13809 .errstr = "R6 invalid mem access 'inv'",
13811 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
13814 "search pruning: all branches should be verified (invalid stack access)",
13816 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13817 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13818 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
13819 BPF_LD_MAP_FD(BPF_REG_1, 0),
13820 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
13821 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
13822 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
13823 BPF_MOV64_IMM(BPF_REG_4, 0),
13824 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
13825 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
13827 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
13828 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
13829 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
13832 .fixup_map_hash_8b = { 3 },
13833 .errstr = "invalid read from stack off -16+0 size 8",
13835 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
13838 "jit: lsh, rsh, arsh by 1",
13840 BPF_MOV64_IMM(BPF_REG_0, 1),
13841 BPF_MOV64_IMM(BPF_REG_1, 0xff),
13842 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1),
13843 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1),
13844 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
13846 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1),
13847 BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1),
13848 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1),
13850 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1),
13851 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1),
13853 BPF_MOV64_IMM(BPF_REG_0, 2),
13860 "jit: mov32 for ldimm64, 1",
13862 BPF_MOV64_IMM(BPF_REG_0, 2),
13863 BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL),
13864 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
13865 BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL),
13866 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
13867 BPF_MOV64_IMM(BPF_REG_0, 1),
13874 "jit: mov32 for ldimm64, 2",
13876 BPF_MOV64_IMM(BPF_REG_0, 1),
13877 BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL),
13878 BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL),
13879 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
13880 BPF_MOV64_IMM(BPF_REG_0, 2),
13887 "jit: various mul tests",
13889 BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
13890 BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
13891 BPF_LD_IMM64(BPF_REG_1, 0xefefefULL),
13892 BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
13893 BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
13894 BPF_MOV64_IMM(BPF_REG_0, 1),
13896 BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
13897 BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
13898 BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
13899 BPF_MOV64_IMM(BPF_REG_0, 1),
13901 BPF_MOV32_REG(BPF_REG_2, BPF_REG_2),
13902 BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
13903 BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
13904 BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
13905 BPF_MOV64_IMM(BPF_REG_0, 1),
13907 BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
13908 BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
13909 BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
13910 BPF_MOV64_IMM(BPF_REG_0, 1),
13912 BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL),
13913 BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL),
13914 BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
13915 BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1),
13916 BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2),
13917 BPF_MOV64_IMM(BPF_REG_0, 1),
13919 BPF_MOV64_IMM(BPF_REG_0, 2),
13926 "xadd/w check unaligned stack",
13928 BPF_MOV64_IMM(BPF_REG_0, 1),
13929 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
13930 BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
13931 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
13935 .errstr = "misaligned stack access off",
13936 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13939 "xadd/w check unaligned map",
13941 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
13942 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13943 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13944 BPF_LD_MAP_FD(BPF_REG_1, 0),
13945 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13946 BPF_FUNC_map_lookup_elem),
13947 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
13949 BPF_MOV64_IMM(BPF_REG_1, 1),
13950 BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
13951 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
13954 .fixup_map_hash_8b = { 3 },
13956 .errstr = "misaligned value access off",
13957 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
13960 "xadd/w check unaligned pkt",
13962 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13963 offsetof(struct xdp_md, data)),
13964 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13965 offsetof(struct xdp_md, data_end)),
13966 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
13967 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
13968 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
13969 BPF_MOV64_IMM(BPF_REG_0, 99),
13970 BPF_JMP_IMM(BPF_JA, 0, 0, 6),
13971 BPF_MOV64_IMM(BPF_REG_0, 1),
13972 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
13973 BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
13974 BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
13975 BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
13976 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
13980 .errstr = "BPF_XADD stores into R2 pkt is not allowed",
13981 .prog_type = BPF_PROG_TYPE_XDP,
13982 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13985 "xadd/w check whether src/dst got mangled, 1",
13987 BPF_MOV64_IMM(BPF_REG_0, 1),
13988 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13989 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
13990 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
13991 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
13992 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
13993 BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
13994 BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
13995 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
13997 BPF_MOV64_IMM(BPF_REG_0, 42),
14001 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14005 "xadd/w check whether src/dst got mangled, 2",
14007 BPF_MOV64_IMM(BPF_REG_0, 1),
14008 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
14009 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
14010 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
14011 BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
14012 BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
14013 BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
14014 BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
14015 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
14017 BPF_MOV64_IMM(BPF_REG_0, 42),
14021 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14025 "bpf_get_stack return R0 within range",
14027 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
14028 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
14029 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
14030 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
14031 BPF_LD_MAP_FD(BPF_REG_1, 0),
14032 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
14033 BPF_FUNC_map_lookup_elem),
14034 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28),
14035 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
14036 BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)),
14037 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
14038 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
14039 BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)),
14040 BPF_MOV64_IMM(BPF_REG_4, 256),
14041 BPF_EMIT_CALL(BPF_FUNC_get_stack),
14042 BPF_MOV64_IMM(BPF_REG_1, 0),
14043 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
14044 BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
14045 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32),
14046 BPF_JMP_REG(BPF_JSLT, BPF_REG_1, BPF_REG_8, 16),
14047 BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8),
14048 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
14049 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8),
14050 BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
14051 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
14052 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 32),
14053 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
14054 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
14055 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
14056 BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)),
14057 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5),
14058 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4),
14059 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
14060 BPF_MOV64_REG(BPF_REG_3, BPF_REG_9),
14061 BPF_MOV64_IMM(BPF_REG_4, 0),
14062 BPF_EMIT_CALL(BPF_FUNC_get_stack),
14065 .fixup_map_hash_48b = { 4 },
14067 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
14070 "ld_abs: invalid op 1",
14072 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
14073 BPF_LD_ABS(BPF_DW, 0),
14076 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14078 .errstr = "unknown opcode",
14081 "ld_abs: invalid op 2",
14083 BPF_MOV32_IMM(BPF_REG_0, 256),
14084 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
14085 BPF_LD_IND(BPF_DW, BPF_REG_0, 0),
14088 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14090 .errstr = "unknown opcode",
14093 "ld_abs: nmap reduced",
14095 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
14096 BPF_LD_ABS(BPF_H, 12),
14097 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 28),
14098 BPF_LD_ABS(BPF_H, 12),
14099 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 26),
14100 BPF_MOV32_IMM(BPF_REG_0, 18),
14101 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -64),
14102 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -64),
14103 BPF_LD_IND(BPF_W, BPF_REG_7, 14),
14104 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -60),
14105 BPF_MOV32_IMM(BPF_REG_0, 280971478),
14106 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
14107 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
14108 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -60),
14109 BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
14110 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 15),
14111 BPF_LD_ABS(BPF_H, 12),
14112 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 13),
14113 BPF_MOV32_IMM(BPF_REG_0, 22),
14114 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
14115 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
14116 BPF_LD_IND(BPF_H, BPF_REG_7, 14),
14117 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -52),
14118 BPF_MOV32_IMM(BPF_REG_0, 17366),
14119 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -48),
14120 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -48),
14121 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -52),
14122 BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
14123 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
14124 BPF_MOV32_IMM(BPF_REG_0, 256),
14126 BPF_MOV32_IMM(BPF_REG_0, 0),
14130 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0,
14131 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
14132 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
14134 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14139 "ld_abs: div + abs, test 1",
14141 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
14142 BPF_LD_ABS(BPF_B, 3),
14143 BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
14144 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
14145 BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
14146 BPF_LD_ABS(BPF_B, 4),
14147 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
14148 BPF_LD_IND(BPF_B, BPF_REG_8, -70),
14152 10, 20, 30, 40, 50,
14154 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14159 "ld_abs: div + abs, test 2",
14161 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
14162 BPF_LD_ABS(BPF_B, 3),
14163 BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
14164 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
14165 BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
14166 BPF_LD_ABS(BPF_B, 128),
14167 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
14168 BPF_LD_IND(BPF_B, BPF_REG_8, -70),
14172 10, 20, 30, 40, 50,
14174 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14179 "ld_abs: div + abs, test 3",
14181 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
14182 BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
14183 BPF_LD_ABS(BPF_B, 3),
14184 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
14188 10, 20, 30, 40, 50,
14190 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14195 "ld_abs: div + abs, test 4",
14197 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
14198 BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
14199 BPF_LD_ABS(BPF_B, 256),
14200 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
14204 10, 20, 30, 40, 50,
14206 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14211 "ld_abs: vlan + abs, test 1",
14216 .fill_helper = bpf_fill_ld_abs_vlan_push_pop,
14217 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14222 "ld_abs: vlan + abs, test 2",
14224 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
14225 BPF_LD_ABS(BPF_B, 0),
14226 BPF_LD_ABS(BPF_H, 0),
14227 BPF_LD_ABS(BPF_W, 0),
14228 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
14229 BPF_MOV64_IMM(BPF_REG_6, 0),
14230 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
14231 BPF_MOV64_IMM(BPF_REG_2, 1),
14232 BPF_MOV64_IMM(BPF_REG_3, 2),
14233 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
14234 BPF_FUNC_skb_vlan_push),
14235 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
14236 BPF_LD_ABS(BPF_B, 0),
14237 BPF_LD_ABS(BPF_H, 0),
14238 BPF_LD_ABS(BPF_W, 0),
14239 BPF_MOV64_IMM(BPF_REG_0, 42),
14245 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14250 "ld_abs: jump around ld_abs",
14255 .fill_helper = bpf_fill_jump_around_ld_abs,
14256 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14261 "ld_dw: xor semi-random 64 bit imms, test 1",
14264 .fill_helper = bpf_fill_rand_ld_dw,
14265 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14270 "ld_dw: xor semi-random 64 bit imms, test 2",
14273 .fill_helper = bpf_fill_rand_ld_dw,
14274 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14279 "ld_dw: xor semi-random 64 bit imms, test 3",
14282 .fill_helper = bpf_fill_rand_ld_dw,
14283 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14288 "ld_dw: xor semi-random 64 bit imms, test 4",
14291 .fill_helper = bpf_fill_rand_ld_dw,
14292 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14297 "pass unmodified ctx pointer to helper",
14299 BPF_MOV64_IMM(BPF_REG_2, 0),
14300 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
14301 BPF_FUNC_csum_update),
14302 BPF_MOV64_IMM(BPF_REG_0, 0),
14305 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14309 "reference tracking: leak potential reference",
14312 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
14315 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14316 .errstr = "Unreleased reference",
14320 "reference tracking: leak potential reference on stack",
14323 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
14324 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
14325 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
14326 BPF_MOV64_IMM(BPF_REG_0, 0),
14329 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14330 .errstr = "Unreleased reference",
14334 "reference tracking: leak potential reference on stack 2",
14337 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
14338 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
14339 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
14340 BPF_MOV64_IMM(BPF_REG_0, 0),
14341 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
14344 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14345 .errstr = "Unreleased reference",
14349 "reference tracking: zero potential reference",
14352 BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
14355 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14356 .errstr = "Unreleased reference",
14360 "reference tracking: copy and zero potential references",
14363 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
14364 BPF_MOV64_IMM(BPF_REG_0, 0),
14365 BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */
14368 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14369 .errstr = "Unreleased reference",
14373 "reference tracking: release reference without check",
14376 /* reference in r0 may be NULL */
14377 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14378 BPF_MOV64_IMM(BPF_REG_2, 0),
14379 BPF_EMIT_CALL(BPF_FUNC_sk_release),
14382 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14383 .errstr = "type=sock_or_null expected=sock",
14387 "reference tracking: release reference",
14390 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14391 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
14392 BPF_EMIT_CALL(BPF_FUNC_sk_release),
14395 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14399 "reference tracking: release reference 2",
14402 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14403 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
14405 BPF_EMIT_CALL(BPF_FUNC_sk_release),
14408 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14412 "reference tracking: release reference twice",
14415 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14416 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
14417 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
14418 BPF_EMIT_CALL(BPF_FUNC_sk_release),
14419 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
14420 BPF_EMIT_CALL(BPF_FUNC_sk_release),
14423 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14424 .errstr = "type=inv expected=sock",
14428 "reference tracking: release reference twice inside branch",
14431 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14432 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
14433 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */
14434 BPF_EMIT_CALL(BPF_FUNC_sk_release),
14435 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
14436 BPF_EMIT_CALL(BPF_FUNC_sk_release),
14439 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14440 .errstr = "type=inv expected=sock",
14444 "reference tracking: alloc, check, free in one subbranch",
14446 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
14447 offsetof(struct __sk_buff, data)),
14448 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
14449 offsetof(struct __sk_buff, data_end)),
14450 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
14451 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
14452 /* if (offsetof(skb, mark) > data_len) exit; */
14453 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
14455 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
14456 offsetof(struct __sk_buff, mark)),
14458 BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */
14459 /* Leak reference in R0 */
14461 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
14462 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14463 BPF_EMIT_CALL(BPF_FUNC_sk_release),
14466 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14467 .errstr = "Unreleased reference",
14469 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
14472 "reference tracking: alloc, check, free in both subbranches",
14474 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
14475 offsetof(struct __sk_buff, data)),
14476 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
14477 offsetof(struct __sk_buff, data_end)),
14478 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
14479 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
14480 /* if (offsetof(skb, mark) > data_len) exit; */
14481 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
14483 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
14484 offsetof(struct __sk_buff, mark)),
14486 BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */
14487 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
14488 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14489 BPF_EMIT_CALL(BPF_FUNC_sk_release),
14491 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
14492 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14493 BPF_EMIT_CALL(BPF_FUNC_sk_release),
14496 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14498 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
14501 "reference tracking in call: free reference in subprog",
14504 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
14505 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
14506 BPF_MOV64_IMM(BPF_REG_0, 0),
14510 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
14511 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
14512 BPF_EMIT_CALL(BPF_FUNC_sk_release),
14515 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14519 "pass modified ctx pointer to helper, 1",
14521 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
14522 BPF_MOV64_IMM(BPF_REG_2, 0),
14523 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
14524 BPF_FUNC_csum_update),
14525 BPF_MOV64_IMM(BPF_REG_0, 0),
14528 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14530 .errstr = "dereference of modified ctx ptr",
14533 "pass modified ctx pointer to helper, 2",
14535 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
14536 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
14537 BPF_FUNC_get_socket_cookie),
14538 BPF_MOV64_IMM(BPF_REG_0, 0),
14541 .result_unpriv = REJECT,
14543 .errstr_unpriv = "dereference of modified ctx ptr",
14544 .errstr = "dereference of modified ctx ptr",
14547 "pass modified ctx pointer to helper, 3",
14549 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0),
14550 BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4),
14551 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
14552 BPF_MOV64_IMM(BPF_REG_2, 0),
14553 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
14554 BPF_FUNC_csum_update),
14555 BPF_MOV64_IMM(BPF_REG_0, 0),
14558 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14560 .errstr = "variable ctx access var_off=(0x0; 0x4)",
14563 "mov64 src == dst",
14565 BPF_MOV64_IMM(BPF_REG_2, 0),
14566 BPF_MOV64_REG(BPF_REG_2, BPF_REG_2),
14567 // Check bounds are OK
14568 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
14569 BPF_MOV64_IMM(BPF_REG_0, 0),
14572 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14576 "mov64 src != dst",
14578 BPF_MOV64_IMM(BPF_REG_3, 0),
14579 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
14580 // Check bounds are OK
14581 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
14582 BPF_MOV64_IMM(BPF_REG_0, 0),
14585 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14591 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
14592 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
14593 BPF_ALU64_REG(BPF_MOV, BPF_REG_7, BPF_REG_0),
14594 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
14595 BPF_MOV64_IMM(BPF_REG_0, 0),
14596 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
14597 BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, -8),
14598 BPF_STX_MEM(BPF_B, BPF_REG_10, BPF_REG_7, -9),
14599 BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_10, -9),
14600 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
14601 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
14602 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
14603 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
14607 .result_unpriv = ACCEPT,
14608 .insn_processed = 15,
14611 "masking, test out of bounds 1",
14613 BPF_MOV32_IMM(BPF_REG_1, 5),
14614 BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
14615 BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14616 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14617 BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14618 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14619 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14620 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14627 "masking, test out of bounds 2",
14629 BPF_MOV32_IMM(BPF_REG_1, 1),
14630 BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
14631 BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14632 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14633 BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14634 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14635 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14636 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14643 "masking, test out of bounds 3",
14645 BPF_MOV32_IMM(BPF_REG_1, 0xffffffff),
14646 BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
14647 BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14648 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14649 BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14650 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14651 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14652 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14659 "masking, test out of bounds 4",
14661 BPF_MOV32_IMM(BPF_REG_1, 0xffffffff),
14662 BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
14663 BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14664 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14665 BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14666 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14667 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14668 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14675 "masking, test out of bounds 5",
14677 BPF_MOV32_IMM(BPF_REG_1, -1),
14678 BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
14679 BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14680 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14681 BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14682 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14683 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14684 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14691 "masking, test out of bounds 6",
14693 BPF_MOV32_IMM(BPF_REG_1, -1),
14694 BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
14695 BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14696 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14697 BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14698 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14699 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14700 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14707 "masking, test out of bounds 7",
14709 BPF_MOV64_IMM(BPF_REG_1, 5),
14710 BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
14711 BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14712 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14713 BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14714 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14715 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14716 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14723 "masking, test out of bounds 8",
14725 BPF_MOV64_IMM(BPF_REG_1, 1),
14726 BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
14727 BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14728 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14729 BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14730 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14731 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14732 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14739 "masking, test out of bounds 9",
14741 BPF_MOV64_IMM(BPF_REG_1, 0xffffffff),
14742 BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
14743 BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14744 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14745 BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14746 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14747 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14748 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14755 "masking, test out of bounds 10",
14757 BPF_MOV64_IMM(BPF_REG_1, 0xffffffff),
14758 BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
14759 BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14760 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14761 BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14762 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14763 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14764 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14771 "masking, test out of bounds 11",
14773 BPF_MOV64_IMM(BPF_REG_1, -1),
14774 BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
14775 BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14776 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14777 BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14778 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14779 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14780 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14787 "masking, test out of bounds 12",
14789 BPF_MOV64_IMM(BPF_REG_1, -1),
14790 BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
14791 BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14792 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14793 BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14794 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14795 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14796 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14803 "masking, test in bounds 1",
14805 BPF_MOV32_IMM(BPF_REG_1, 4),
14806 BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
14807 BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14808 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14809 BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14810 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14811 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14812 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14819 "masking, test in bounds 2",
14821 BPF_MOV32_IMM(BPF_REG_1, 0),
14822 BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
14823 BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14824 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14825 BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14826 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14827 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14828 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14835 "masking, test in bounds 3",
14837 BPF_MOV32_IMM(BPF_REG_1, 0xfffffffe),
14838 BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
14839 BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14840 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14841 BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14842 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14843 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14844 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14848 .retval = 0xfffffffe,
14851 "masking, test in bounds 4",
14853 BPF_MOV32_IMM(BPF_REG_1, 0xabcde),
14854 BPF_MOV32_IMM(BPF_REG_2, 0xabcdef - 1),
14855 BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14856 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14857 BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14858 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14859 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14860 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14867 "masking, test in bounds 5",
14869 BPF_MOV32_IMM(BPF_REG_1, 0),
14870 BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
14871 BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14872 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14873 BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14874 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14875 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14876 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14883 "masking, test in bounds 6",
14885 BPF_MOV32_IMM(BPF_REG_1, 46),
14886 BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
14887 BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14888 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14889 BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14890 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14891 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14892 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14899 "masking, test in bounds 7",
14901 BPF_MOV64_IMM(BPF_REG_3, -46),
14902 BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, -1),
14903 BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
14904 BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_3),
14905 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_3),
14906 BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14907 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14908 BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2),
14909 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
14916 "masking, test in bounds 8",
14918 BPF_MOV64_IMM(BPF_REG_3, -47),
14919 BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, -1),
14920 BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
14921 BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_3),
14922 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_3),
14923 BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14924 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14925 BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2),
14926 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
14933 "reference tracking in call: free reference in subprog and outside",
14936 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
14937 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
14938 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
14939 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
14940 BPF_EMIT_CALL(BPF_FUNC_sk_release),
14944 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
14945 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
14946 BPF_EMIT_CALL(BPF_FUNC_sk_release),
14949 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14950 .errstr = "type=inv expected=sock",
14954 "reference tracking in call: alloc & leak reference in subprog",
14956 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
14957 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
14958 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
14959 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14960 BPF_MOV64_IMM(BPF_REG_0, 0),
14964 BPF_MOV64_REG(BPF_REG_6, BPF_REG_4),
14966 /* spill unchecked sk_ptr into stack of caller */
14967 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
14968 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14971 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14972 .errstr = "Unreleased reference",
14976 "reference tracking in call: alloc in subprog, release outside",
14978 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
14979 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
14980 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14981 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
14982 BPF_EMIT_CALL(BPF_FUNC_sk_release),
14987 BPF_EXIT_INSN(), /* return sk */
14989 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14990 .retval = POINTER_VALUE,
14994 "reference tracking in call: sk_ptr leak into caller stack",
14996 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
14997 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
14998 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
14999 BPF_MOV64_IMM(BPF_REG_0, 0),
15003 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
15004 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
15005 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
15006 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
15007 /* spill unchecked sk_ptr into stack of caller */
15008 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
15009 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
15010 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
15011 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
15018 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
15019 .errstr = "Unreleased reference",
15023 "reference tracking in call: sk_ptr spill into caller stack",
15025 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
15026 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
15027 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
15028 BPF_MOV64_IMM(BPF_REG_0, 0),
15032 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
15033 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
15034 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
15035 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
15036 /* spill unchecked sk_ptr into stack of caller */
15037 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
15038 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
15039 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
15040 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
15041 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
15042 /* now the sk_ptr is verified, free the reference */
15043 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_4, 0),
15044 BPF_EMIT_CALL(BPF_FUNC_sk_release),
15051 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
15055 "reference tracking: allow LD_ABS",
15057 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
15059 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
15060 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
15061 BPF_EMIT_CALL(BPF_FUNC_sk_release),
15062 BPF_LD_ABS(BPF_B, 0),
15063 BPF_LD_ABS(BPF_H, 0),
15064 BPF_LD_ABS(BPF_W, 0),
15067 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
15071 "reference tracking: forbid LD_ABS while holding reference",
15073 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
15075 BPF_LD_ABS(BPF_B, 0),
15076 BPF_LD_ABS(BPF_H, 0),
15077 BPF_LD_ABS(BPF_W, 0),
15078 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
15079 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
15080 BPF_EMIT_CALL(BPF_FUNC_sk_release),
15083 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
15084 .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
15088 "reference tracking: allow LD_IND",
15090 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
15092 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
15093 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
15094 BPF_EMIT_CALL(BPF_FUNC_sk_release),
15095 BPF_MOV64_IMM(BPF_REG_7, 1),
15096 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
15097 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
15100 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
15105 "reference tracking: forbid LD_IND while holding reference",
15107 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
15109 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
15110 BPF_MOV64_IMM(BPF_REG_7, 1),
15111 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
15112 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
15113 BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),
15114 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
15115 BPF_EMIT_CALL(BPF_FUNC_sk_release),
15118 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
15119 .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
15123 "reference tracking: check reference or tail call",
15125 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
15127 /* if (sk) bpf_sk_release() */
15128 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
15129 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7),
15130 /* bpf_tail_call() */
15131 BPF_MOV64_IMM(BPF_REG_3, 2),
15132 BPF_LD_MAP_FD(BPF_REG_2, 0),
15133 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
15134 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
15135 BPF_FUNC_tail_call),
15136 BPF_MOV64_IMM(BPF_REG_0, 0),
15138 BPF_EMIT_CALL(BPF_FUNC_sk_release),
15141 .fixup_prog1 = { 17 },
15142 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
15146 "reference tracking: release reference then tail call",
15148 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
15150 /* if (sk) bpf_sk_release() */
15151 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
15152 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
15153 BPF_EMIT_CALL(BPF_FUNC_sk_release),
15154 /* bpf_tail_call() */
15155 BPF_MOV64_IMM(BPF_REG_3, 2),
15156 BPF_LD_MAP_FD(BPF_REG_2, 0),
15157 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
15158 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
15159 BPF_FUNC_tail_call),
15160 BPF_MOV64_IMM(BPF_REG_0, 0),
15163 .fixup_prog1 = { 18 },
15164 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
15168 "reference tracking: leak possible reference over tail call",
15170 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
15171 /* Look up socket and store in REG_6 */
15173 /* bpf_tail_call() */
15174 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
15175 BPF_MOV64_IMM(BPF_REG_3, 2),
15176 BPF_LD_MAP_FD(BPF_REG_2, 0),
15177 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
15178 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
15179 BPF_FUNC_tail_call),
15180 BPF_MOV64_IMM(BPF_REG_0, 0),
15181 /* if (sk) bpf_sk_release() */
15182 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
15183 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
15184 BPF_EMIT_CALL(BPF_FUNC_sk_release),
15187 .fixup_prog1 = { 16 },
15188 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
15189 .errstr = "tail_call would lead to reference leak",
15193 "reference tracking: leak checked reference over tail call",
15195 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
15196 /* Look up socket and store in REG_6 */
15198 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
15199 /* if (!sk) goto end */
15200 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
15201 /* bpf_tail_call() */
15202 BPF_MOV64_IMM(BPF_REG_3, 0),
15203 BPF_LD_MAP_FD(BPF_REG_2, 0),
15204 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
15205 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
15206 BPF_FUNC_tail_call),
15207 BPF_MOV64_IMM(BPF_REG_0, 0),
15208 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
15209 BPF_EMIT_CALL(BPF_FUNC_sk_release),
15212 .fixup_prog1 = { 17 },
15213 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
15214 .errstr = "tail_call would lead to reference leak",
15218 "reference tracking: mangle and release sock_or_null",
15221 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
15222 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
15223 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
15224 BPF_EMIT_CALL(BPF_FUNC_sk_release),
15227 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
15228 .errstr = "R1 pointer arithmetic on sock_or_null prohibited",
15232 "reference tracking: mangle and release sock",
15235 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
15236 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
15237 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
15238 BPF_EMIT_CALL(BPF_FUNC_sk_release),
15241 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
15242 .errstr = "R1 pointer arithmetic on sock prohibited",
15246 "reference tracking: access member",
15249 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
15250 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
15251 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
15252 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
15253 BPF_EMIT_CALL(BPF_FUNC_sk_release),
15256 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
15260 "reference tracking: write to member",
15263 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
15264 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
15265 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
15266 BPF_LD_IMM64(BPF_REG_2, 42),
15267 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_2,
15268 offsetof(struct bpf_sock, mark)),
15269 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
15270 BPF_EMIT_CALL(BPF_FUNC_sk_release),
15271 BPF_LD_IMM64(BPF_REG_0, 0),
15274 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
15275 .errstr = "cannot write into socket",
15279 "reference tracking: invalid 64-bit access of member",
15282 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
15283 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
15284 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
15285 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
15286 BPF_EMIT_CALL(BPF_FUNC_sk_release),
15289 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
15290 .errstr = "invalid bpf_sock access off=0 size=8",
15294 "reference tracking: access after release",
15297 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
15298 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
15299 BPF_EMIT_CALL(BPF_FUNC_sk_release),
15300 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
15303 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
15304 .errstr = "!read_ok",
15308 "reference tracking: direct access for lookup",
15310 /* Check that the packet is at least 64B long */
15311 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
15312 offsetof(struct __sk_buff, data)),
15313 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
15314 offsetof(struct __sk_buff, data_end)),
15315 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
15316 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
15317 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
15318 /* sk = sk_lookup_tcp(ctx, skb->data, ...) */
15319 BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),
15320 BPF_MOV64_IMM(BPF_REG_4, 0),
15321 BPF_MOV64_IMM(BPF_REG_5, 0),
15322 BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp),
15323 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
15324 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
15325 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
15326 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
15327 BPF_EMIT_CALL(BPF_FUNC_sk_release),
15330 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
15334 "calls: ctx read at start of subprog",
15336 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
15337 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
15338 BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
15339 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
15340 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
15341 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
15343 BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
15344 BPF_MOV64_IMM(BPF_REG_0, 0),
15347 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
15348 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
15349 .result_unpriv = REJECT,
15353 "check wire_len is not readable by sockets",
15355 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
15356 offsetof(struct __sk_buff, wire_len)),
15359 .errstr = "invalid bpf_context access",
15363 "check wire_len is readable by tc classifier",
15365 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
15366 offsetof(struct __sk_buff, wire_len)),
15369 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
15373 "check wire_len is not writable by tc classifier",
15375 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
15376 offsetof(struct __sk_buff, wire_len)),
15379 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
15380 .errstr = "invalid bpf_context access",
15381 .errstr_unpriv = "R1 leaks addr",
15385 "calls: cross frame pruning",
15387 /* r8 = !!random();
15390 * do something bad;
15392 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
15393 BPF_FUNC_get_prandom_u32),
15394 BPF_MOV64_IMM(BPF_REG_8, 0),
15395 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
15396 BPF_MOV64_IMM(BPF_REG_8, 1),
15397 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
15398 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
15399 BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
15400 BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
15401 BPF_MOV64_IMM(BPF_REG_0, 0),
15403 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
15406 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
15407 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
15408 .errstr = "!read_ok",
15412 "jset: functional",
15415 BPF_MOV64_IMM(BPF_REG_0, 0),
15416 /* prep for direct packet access via r2 */
15417 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
15418 offsetof(struct __sk_buff, data)),
15419 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
15420 offsetof(struct __sk_buff, data_end)),
15421 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
15422 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
15423 BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1),
15426 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
15428 /* reg, bit 63 or bit 0 set, taken */
15429 BPF_LD_IMM64(BPF_REG_8, 0x8000000000000001),
15430 BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
15433 /* reg, bit 62, not taken */
15434 BPF_LD_IMM64(BPF_REG_8, 0x4000000000000000),
15435 BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
15436 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
15439 /* imm, any bit set, taken */
15440 BPF_JMP_IMM(BPF_JSET, BPF_REG_7, -1, 1),
15443 /* imm, bit 31 set, taken */
15444 BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x80000000, 1),
15447 /* all good - return r0 == 2 */
15448 BPF_MOV64_IMM(BPF_REG_0, 2),
15451 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
15456 .data64 = { (1ULL << 63) | (1U << 31) | (1U << 0), }
15459 .data64 = { (1ULL << 63) | (1U << 31), }
15462 .data64 = { (1ULL << 31) | (1U << 0), }
15465 .data64 = { (__u32)-1, }
15468 .data64 = { ~0x4000000000000000ULL, }
15474 .data64 = { ~0ULL, }
15479 "jset: sign-extend",
15482 BPF_MOV64_IMM(BPF_REG_0, 0),
15483 /* prep for direct packet access via r2 */
15484 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
15485 offsetof(struct __sk_buff, data)),
15486 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
15487 offsetof(struct __sk_buff, data_end)),
15488 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
15489 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
15490 BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1),
15493 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
15495 BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x80000000, 1),
15498 BPF_MOV64_IMM(BPF_REG_0, 2),
15501 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
15504 .data = { 1, 0, 0, 0, 0, 0, 0, 1, },
15507 "jset: known const compare",
15509 BPF_MOV64_IMM(BPF_REG_0, 1),
15510 BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
15511 BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
15514 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
15515 .retval_unpriv = 1,
15516 .result_unpriv = ACCEPT,
15521 "jset: known const compare bad",
15523 BPF_MOV64_IMM(BPF_REG_0, 0),
15524 BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
15525 BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
15528 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
15529 .errstr_unpriv = "!read_ok",
15530 .result_unpriv = REJECT,
15531 .errstr = "!read_ok",
15535 "jset: unknown const compare taken",
15537 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
15538 BPF_FUNC_get_prandom_u32),
15539 BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
15540 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
15541 BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
15544 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
15545 .errstr_unpriv = "!read_ok",
15546 .result_unpriv = REJECT,
15547 .errstr = "!read_ok",
15551 "jset: unknown const compare not taken",
15553 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
15554 BPF_FUNC_get_prandom_u32),
15555 BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
15556 BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
15559 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
15560 .errstr_unpriv = "!read_ok",
15561 .result_unpriv = REJECT,
15562 .errstr = "!read_ok",
15566 "jset: half-known const compare",
15568 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
15569 BPF_FUNC_get_prandom_u32),
15570 BPF_ALU64_IMM(BPF_OR, BPF_REG_0, 2),
15571 BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 3, 1),
15572 BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
15573 BPF_MOV64_IMM(BPF_REG_0, 0),
15576 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
15577 .result_unpriv = ACCEPT,
15583 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
15584 BPF_FUNC_get_prandom_u32),
15585 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
15586 BPF_MOV64_IMM(BPF_REG_0, 0),
15587 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xff),
15588 BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0xf0, 3),
15589 BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 0x10, 1),
15590 BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
15592 BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0x10, 1),
15594 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0x10, 1),
15595 BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
15598 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
15599 .result_unpriv = ACCEPT,
15604 static int probe_filter_length(const struct bpf_insn *fp)
15608 for (len = MAX_INSNS - 1; len > 0; --len)
15609 if (fp[len].code != 0 || fp[len].imm != 0)
15614 static int create_map(uint32_t type, uint32_t size_key,
15615 uint32_t size_value, uint32_t max_elem)
15619 fd = bpf_create_map(type, size_key, size_value, max_elem,
15620 type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0);
15622 printf("Failed to create hash map '%s'!\n", strerror(errno));
15627 static void update_map(int fd, int index)
15629 struct test_val value = {
15630 .index = (6 + 1) * sizeof(int),
15631 .foo[6] = 0xabcdef12,
15634 assert(!bpf_map_update_elem(fd, &index, &value, 0));
15637 static int create_prog_dummy1(enum bpf_prog_type prog_type)
15639 struct bpf_insn prog[] = {
15640 BPF_MOV64_IMM(BPF_REG_0, 42),
15644 return bpf_load_program(prog_type, prog,
15645 ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
15648 static int create_prog_dummy2(enum bpf_prog_type prog_type, int mfd, int idx)
15650 struct bpf_insn prog[] = {
15651 BPF_MOV64_IMM(BPF_REG_3, idx),
15652 BPF_LD_MAP_FD(BPF_REG_2, mfd),
15653 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
15654 BPF_FUNC_tail_call),
15655 BPF_MOV64_IMM(BPF_REG_0, 41),
15659 return bpf_load_program(prog_type, prog,
15660 ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
15663 static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem,
15667 int mfd, p1fd, p2fd;
15669 mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
15670 sizeof(int), max_elem, 0);
15672 printf("Failed to create prog array '%s'!\n", strerror(errno));
15676 p1fd = create_prog_dummy1(prog_type);
15677 p2fd = create_prog_dummy2(prog_type, mfd, p2key);
15678 if (p1fd < 0 || p2fd < 0)
15680 if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
15682 if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
15695 static int create_map_in_map(void)
15697 int inner_map_fd, outer_map_fd;
15699 inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
15700 sizeof(int), 1, 0);
15701 if (inner_map_fd < 0) {
15702 printf("Failed to create array '%s'!\n", strerror(errno));
15703 return inner_map_fd;
15706 outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
15707 sizeof(int), inner_map_fd, 1, 0);
15708 if (outer_map_fd < 0)
15709 printf("Failed to create array of maps '%s'!\n",
15712 close(inner_map_fd);
15714 return outer_map_fd;
15717 static int create_cgroup_storage(bool percpu)
15719 enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE :
15720 BPF_MAP_TYPE_CGROUP_STORAGE;
15723 fd = bpf_create_map(type, sizeof(struct bpf_cgroup_storage_key),
15724 TEST_DATA_LEN, 0, 0);
15726 printf("Failed to create cgroup storage '%s'!\n",
15732 static char bpf_vlog[UINT_MAX >> 8];
15734 static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
15735 struct bpf_insn *prog, int *map_fds)
15737 int *fixup_map_hash_8b = test->fixup_map_hash_8b;
15738 int *fixup_map_hash_48b = test->fixup_map_hash_48b;
15739 int *fixup_map_hash_16b = test->fixup_map_hash_16b;
15740 int *fixup_map_array_48b = test->fixup_map_array_48b;
15741 int *fixup_map_sockmap = test->fixup_map_sockmap;
15742 int *fixup_map_sockhash = test->fixup_map_sockhash;
15743 int *fixup_map_xskmap = test->fixup_map_xskmap;
15744 int *fixup_map_stacktrace = test->fixup_map_stacktrace;
15745 int *fixup_prog1 = test->fixup_prog1;
15746 int *fixup_prog2 = test->fixup_prog2;
15747 int *fixup_map_in_map = test->fixup_map_in_map;
15748 int *fixup_cgroup_storage = test->fixup_cgroup_storage;
15749 int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
15751 if (test->fill_helper)
15752 test->fill_helper(test);
15754 /* Allocating HTs with 1 elem is fine here, since we only test
15755 * for verifier and not do a runtime lookup, so the only thing
15756 * that really matters is value size in this case.
15758 if (*fixup_map_hash_8b) {
15759 map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
15760 sizeof(long long), 1);
15762 prog[*fixup_map_hash_8b].imm = map_fds[0];
15763 fixup_map_hash_8b++;
15764 } while (*fixup_map_hash_8b);
15767 if (*fixup_map_hash_48b) {
15768 map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
15769 sizeof(struct test_val), 1);
15771 prog[*fixup_map_hash_48b].imm = map_fds[1];
15772 fixup_map_hash_48b++;
15773 } while (*fixup_map_hash_48b);
15776 if (*fixup_map_hash_16b) {
15777 map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
15778 sizeof(struct other_val), 1);
15780 prog[*fixup_map_hash_16b].imm = map_fds[2];
15781 fixup_map_hash_16b++;
15782 } while (*fixup_map_hash_16b);
15785 if (*fixup_map_array_48b) {
15786 map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
15787 sizeof(struct test_val), 1);
15788 update_map(map_fds[3], 0);
15790 prog[*fixup_map_array_48b].imm = map_fds[3];
15791 fixup_map_array_48b++;
15792 } while (*fixup_map_array_48b);
15795 if (*fixup_prog1) {
15796 map_fds[4] = create_prog_array(prog_type, 4, 0);
15798 prog[*fixup_prog1].imm = map_fds[4];
15800 } while (*fixup_prog1);
15803 if (*fixup_prog2) {
15804 map_fds[5] = create_prog_array(prog_type, 8, 7);
15806 prog[*fixup_prog2].imm = map_fds[5];
15808 } while (*fixup_prog2);
15811 if (*fixup_map_in_map) {
15812 map_fds[6] = create_map_in_map();
15814 prog[*fixup_map_in_map].imm = map_fds[6];
15815 fixup_map_in_map++;
15816 } while (*fixup_map_in_map);
15819 if (*fixup_cgroup_storage) {
15820 map_fds[7] = create_cgroup_storage(false);
15822 prog[*fixup_cgroup_storage].imm = map_fds[7];
15823 fixup_cgroup_storage++;
15824 } while (*fixup_cgroup_storage);
15827 if (*fixup_percpu_cgroup_storage) {
15828 map_fds[8] = create_cgroup_storage(true);
15830 prog[*fixup_percpu_cgroup_storage].imm = map_fds[8];
15831 fixup_percpu_cgroup_storage++;
15832 } while (*fixup_percpu_cgroup_storage);
15834 if (*fixup_map_sockmap) {
15835 map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int),
15838 prog[*fixup_map_sockmap].imm = map_fds[9];
15839 fixup_map_sockmap++;
15840 } while (*fixup_map_sockmap);
15842 if (*fixup_map_sockhash) {
15843 map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int),
15846 prog[*fixup_map_sockhash].imm = map_fds[10];
15847 fixup_map_sockhash++;
15848 } while (*fixup_map_sockhash);
15850 if (*fixup_map_xskmap) {
15851 map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int),
15854 prog[*fixup_map_xskmap].imm = map_fds[11];
15855 fixup_map_xskmap++;
15856 } while (*fixup_map_xskmap);
15858 if (*fixup_map_stacktrace) {
15859 map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32),
15862 prog[*fixup_map_stacktrace].imm = map_fds[12];
15863 fixup_map_stacktrace++;
15864 } while (*fixup_map_stacktrace);
15868 static int set_admin(bool admin)
15871 const cap_value_t cap_val = CAP_SYS_ADMIN;
15874 caps = cap_get_proc();
15876 perror("cap_get_proc");
15879 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
15880 admin ? CAP_SET : CAP_CLEAR)) {
15881 perror("cap_set_flag");
15884 if (cap_set_proc(caps)) {
15885 perror("cap_set_proc");
15890 if (cap_free(caps))
15891 perror("cap_free");
15895 static int do_prog_test_run(int fd_prog, bool unpriv, uint32_t expected_val,
15896 void *data, size_t size_data)
15898 __u8 tmp[TEST_DATA_LEN << 2];
15899 __u32 size_tmp = sizeof(tmp);
15905 err = bpf_prog_test_run(fd_prog, 1, data, size_data,
15906 tmp, &size_tmp, &retval, NULL);
15909 if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
15910 printf("Unexpected bpf_prog_test_run error ");
15913 if (!err && retval != expected_val &&
15914 expected_val != POINTER_VALUE) {
15915 printf("FAIL retval %d != %d ", retval, expected_val);
15922 static void do_test_single(struct bpf_test *test, bool unpriv,
15923 int *passes, int *errors)
15925 int fd_prog, expected_ret, alignment_prevented_execution;
15926 int prog_len, prog_type = test->prog_type;
15927 struct bpf_insn *prog = test->insns;
15928 int run_errs, run_successes;
15929 int map_fds[MAX_NR_MAPS];
15930 const char *expected_err;
15934 for (i = 0; i < MAX_NR_MAPS; i++)
15938 prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
15939 do_test_fixup(test, prog_type, prog, map_fds);
15940 prog_len = probe_filter_length(prog);
15943 if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
15944 pflags |= BPF_F_STRICT_ALIGNMENT;
15945 if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
15946 pflags |= BPF_F_ANY_ALIGNMENT;
15947 fd_prog = bpf_verify_program(prog_type, prog, prog_len, pflags,
15948 "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
15950 expected_ret = unpriv && test->result_unpriv != UNDEF ?
15951 test->result_unpriv : test->result;
15952 expected_err = unpriv && test->errstr_unpriv ?
15953 test->errstr_unpriv : test->errstr;
15955 alignment_prevented_execution = 0;
15957 if (expected_ret == ACCEPT) {
15959 printf("FAIL\nFailed to load prog '%s'!\n",
15963 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15964 if (fd_prog >= 0 &&
15965 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS))
15966 alignment_prevented_execution = 1;
15969 if (fd_prog >= 0) {
15970 printf("FAIL\nUnexpected success to load!\n");
15973 if (!strstr(bpf_vlog, expected_err)) {
15974 printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
15975 expected_err, bpf_vlog);
15980 if (test->insn_processed) {
15981 uint32_t insn_processed;
15984 proc = strstr(bpf_vlog, "processed ");
15985 insn_processed = atoi(proc + 10);
15986 if (test->insn_processed != insn_processed) {
15987 printf("FAIL\nUnexpected insn_processed %u vs %u\n",
15988 insn_processed, test->insn_processed);
15995 if (!alignment_prevented_execution && fd_prog >= 0) {
15996 uint32_t expected_val;
16000 expected_val = unpriv && test->retval_unpriv ?
16001 test->retval_unpriv : test->retval;
16003 err = do_prog_test_run(fd_prog, unpriv, expected_val,
16004 test->data, sizeof(test->data));
16011 for (i = 0; i < test->runs; i++) {
16012 if (unpriv && test->retvals[i].retval_unpriv)
16013 expected_val = test->retvals[i].retval_unpriv;
16015 expected_val = test->retvals[i].retval;
16017 err = do_prog_test_run(fd_prog, unpriv, expected_val,
16018 test->retvals[i].data,
16019 sizeof(test->retvals[i].data));
16021 printf("(run %d/%d) ", i + 1, test->runs);
16031 if (run_successes > 1)
16032 printf("%d cases ", run_successes);
16034 if (alignment_prevented_execution)
16035 printf(" (NOTE: not executed due to unknown alignment)");
16043 for (i = 0; i < MAX_NR_MAPS; i++)
16049 printf("%s", bpf_vlog);
16053 static bool is_admin(void)
16056 cap_flag_value_t sysadmin = CAP_CLEAR;
16057 const cap_value_t cap_val = CAP_SYS_ADMIN;
16059 #ifdef CAP_IS_SUPPORTED
16060 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
16061 perror("cap_get_flag");
16065 caps = cap_get_proc();
16067 perror("cap_get_proc");
16070 if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
16071 perror("cap_get_flag");
16072 if (cap_free(caps))
16073 perror("cap_free");
16074 return (sysadmin == CAP_SET);
16077 static void get_unpriv_disabled()
16082 fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
16084 perror("fopen /proc/sys/"UNPRIV_SYSCTL);
16085 unpriv_disabled = true;
16088 if (fgets(buf, 2, fd) == buf && atoi(buf))
16089 unpriv_disabled = true;
16093 static bool test_as_unpriv(struct bpf_test *test)
16095 return !test->prog_type ||
16096 test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
16097 test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
16100 static int do_test(bool unpriv, unsigned int from, unsigned int to)
16102 int i, passes = 0, errors = 0, skips = 0;
16104 for (i = from; i < to; i++) {
16105 struct bpf_test *test = &tests[i];
16107 /* Program types that are not supported by non-root we
16110 if (test_as_unpriv(test) && unpriv_disabled) {
16111 printf("#%d/u %s SKIP\n", i, test->descr);
16113 } else if (test_as_unpriv(test)) {
16116 printf("#%d/u %s ", i, test->descr);
16117 do_test_single(test, true, &passes, &errors);
16123 printf("#%d/p %s SKIP\n", i, test->descr);
16126 printf("#%d/p %s ", i, test->descr);
16127 do_test_single(test, false, &passes, &errors);
16131 printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
16133 return errors ? EXIT_FAILURE : EXIT_SUCCESS;
16136 int main(int argc, char **argv)
16138 unsigned int from = 0, to = ARRAY_SIZE(tests);
16139 bool unpriv = !is_admin();
16142 unsigned int l = atoi(argv[argc - 2]);
16143 unsigned int u = atoi(argv[argc - 1]);
16145 if (l < to && u < to) {
16149 } else if (argc == 2) {
16150 unsigned int t = atoi(argv[argc - 1]);
16158 get_unpriv_disabled();
16159 if (unpriv && unpriv_disabled) {
16160 printf("Cannot run as unprivileged user with sysctl %s.\n",
16162 return EXIT_FAILURE;
16165 bpf_semi_rand_init();
16166 return do_test(unpriv, from, to);