1 // SPDX-License-Identifier: GPL-2.0
4 #include <bpf/bpf_helpers.h>
8 __uint(type, BPF_MAP_TYPE_ARRAY);
9 __uint(max_entries, 8);
15 __uint(type, BPF_MAP_TYPE_USER_RINGBUF);
16 __uint(max_entries, 8);
17 } ringbuf SEC(".maps");
19 struct vm_area_struct;
31 __u8 choice_arr[2] = { 0, 1 };
33 static int unsafe_on_2nd_iter_cb(__u32 idx, struct buf_context *ctx)
36 ctx->buf = (char *)(0xDEAD);
40 if (bpf_probe_read_user(ctx->buf, 8, (void *)(0xBADC0FFEE)))
47 __failure __msg("R1 type=scalar expected=fp")
48 int unsafe_on_2nd_iter(void *unused)
51 struct buf_context loop_ctx = { .buf = buf };
53 bpf_loop(100, unsafe_on_2nd_iter_cb, &loop_ctx, 0);
57 static int unsafe_on_zero_iter_cb(__u32 idx, struct num_context *ctx)
64 __failure __msg("invalid access to map value, value_size=2 off=32 size=1")
65 int unsafe_on_zero_iter(void *unused)
67 struct num_context loop_ctx = { .i = 32 };
69 bpf_loop(100, unsafe_on_zero_iter_cb, &loop_ctx, 0);
70 return choice_arr[loop_ctx.i];
73 static int widening_cb(__u32 idx, struct num_context *ctx)
81 int widening(void *unused)
83 struct num_context loop_ctx = { .i = 0, .j = 1 };
85 bpf_loop(100, widening_cb, &loop_ctx, 0);
86 /* loop_ctx.j is not changed during callback iteration,
87 * verifier should not apply widening to it.
89 return choice_arr[loop_ctx.j];
92 static int loop_detection_cb(__u32 idx, struct num_context *ctx)
99 __failure __msg("infinite loop detected")
100 int loop_detection(void *unused)
102 struct num_context loop_ctx = { .i = 0 };
104 bpf_loop(100, loop_detection_cb, &loop_ctx, 0);
108 static __always_inline __u64 oob_state_machine(struct num_context *ctx)
121 static __u64 for_each_map_elem_cb(struct bpf_map *map, __u32 *key, __u64 *val, void *data)
123 return oob_state_machine(data);
127 __failure __msg("invalid access to map value, value_size=2 off=32 size=1")
128 int unsafe_for_each_map_elem(void *unused)
130 struct num_context loop_ctx = { .i = 0 };
132 bpf_for_each_map_elem(&map, for_each_map_elem_cb, &loop_ctx, 0);
133 return choice_arr[loop_ctx.i];
136 static __u64 ringbuf_drain_cb(struct bpf_dynptr *dynptr, void *data)
138 return oob_state_machine(data);
142 __failure __msg("invalid access to map value, value_size=2 off=32 size=1")
143 int unsafe_ringbuf_drain(void *unused)
145 struct num_context loop_ctx = { .i = 0 };
147 bpf_user_ringbuf_drain(&ringbuf, ringbuf_drain_cb, &loop_ctx, 0);
148 return choice_arr[loop_ctx.i];
151 static __u64 find_vma_cb(struct task_struct *task, struct vm_area_struct *vma, void *data)
153 return oob_state_machine(data);
157 __failure __msg("invalid access to map value, value_size=2 off=32 size=1")
158 int unsafe_find_vma(void *unused)
160 struct task_struct *task = bpf_get_current_task_btf();
161 struct num_context loop_ctx = { .i = 0 };
163 bpf_find_vma(task, 0, find_vma_cb, &loop_ctx, 0);
164 return choice_arr[loop_ctx.i];
167 static int iter_limit_cb(__u32 idx, struct num_context *ctx)
175 int bpf_loop_iter_limit_ok(void *unused)
177 struct num_context ctx = { .i = 0 };
179 bpf_loop(1, iter_limit_cb, &ctx, 0);
180 return choice_arr[ctx.i];
184 __failure __msg("invalid access to map value, value_size=2 off=2 size=1")
185 int bpf_loop_iter_limit_overflow(void *unused)
187 struct num_context ctx = { .i = 0 };
189 bpf_loop(2, iter_limit_cb, &ctx, 0);
190 return choice_arr[ctx.i];
193 static int iter_limit_level2a_cb(__u32 idx, struct num_context *ctx)
199 static int iter_limit_level2b_cb(__u32 idx, struct num_context *ctx)
205 static int iter_limit_level1_cb(__u32 idx, struct num_context *ctx)
208 bpf_loop(1, iter_limit_level2a_cb, ctx, 0);
209 bpf_loop(1, iter_limit_level2b_cb, ctx, 0);
213 /* Check that path visiting every callback function once had been
214 * reached by verifier. Variables 'ctx{1,2}i' below serve as flags,
215 * with each decimal digit corresponding to a callback visit marker.
218 __success __retval(111111)
219 int bpf_loop_iter_limit_nested(void *unused)
221 struct num_context ctx1 = { .i = 0 };
222 struct num_context ctx2 = { .i = 0 };
225 bpf_loop(1, iter_limit_level1_cb, &ctx1, 0);
226 bpf_loop(1, iter_limit_level1_cb, &ctx2, 0);
229 /* Force 'ctx1.i' and 'ctx2.i' precise. */
230 c = choice_arr[(a + b) % 2];
231 /* This makes 'c' zero, but neither clang nor verifier know it. */
233 /* Make sure that verifier does not visit 'impossible' states:
234 * enumerate all possible callback visit masks.
236 if (a != 0 && a != 1 && a != 11 && a != 101 && a != 111 &&
237 b != 0 && b != 1 && b != 11 && b != 101 && b != 111)
238 asm volatile ("r0 /= 0;" ::: "r0");
239 return 1000 * a + b + c;
242 struct iter_limit_bug_ctx {
248 static __naked void iter_limit_bug_cb(void)
250 /* This is the same as C code below, but written
251 * in assembly to control which branches are fall-through.
253 * switch (bpf_get_prandom_u32()) {
254 * case 1: ctx->a = 42; break;
255 * case 2: ctx->b = 42; break;
256 * default: ctx->c = 42; break;
261 "call %[bpf_get_prandom_u32];"
265 "if r1 == 0x1 goto 1f;"
266 "if r1 == 0x2 goto 2f;"
267 "*(u64 *)(r9 + 16) = r2;"
269 "1: *(u64 *)(r9 + 0) = r2;"
271 "2: *(u64 *)(r9 + 8) = r2;"
274 : __imm(bpf_get_prandom_u32)
281 __flag(BPF_F_TEST_STATE_FREQ)
282 int iter_limit_bug(struct __sk_buff *skb)
284 struct iter_limit_bug_ctx ctx = { 7, 7, 7 };
286 bpf_loop(2, iter_limit_bug_cb, &ctx, 0);
288 /* This is the same as C code below,
289 * written in assembly to guarantee checks order.
291 * if (ctx.a == 42 && ctx.b == 42 && ctx.c == 7)
292 * asm volatile("r1 /= 0;":::"r1");
295 "r1 = *(u64 *)%[ctx_a];"
296 "if r1 != 42 goto 1f;"
297 "r1 = *(u64 *)%[ctx_b];"
298 "if r1 != 42 goto 1f;"
299 "r1 = *(u64 *)%[ctx_c];"
300 "if r1 != 7 goto 1f;"
312 char _license[] SEC("license") = "GPL";