1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
4 /* test_tailcall_1 checks basic functionality by patching multiple locations
5 * in a single program for a single tail call slot with nop->jmp, jmp->nop
6 * and jmp->jmp rewrites. Also checks for nop->nop.
8 static void test_tailcall_1(void)
10 int err, map_fd, prog_fd, main_fd, i, j;
11 struct bpf_map *prog_array;
12 struct bpf_program *prog;
13 struct bpf_object *obj;
14 __u32 retval, duration;
18 err = bpf_prog_load("tailcall1.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
23 prog = bpf_object__find_program_by_title(obj, "classifier");
24 if (CHECK_FAIL(!prog))
27 main_fd = bpf_program__fd(prog);
28 if (CHECK_FAIL(main_fd < 0))
31 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
32 if (CHECK_FAIL(!prog_array))
35 map_fd = bpf_map__fd(prog_array);
36 if (CHECK_FAIL(map_fd < 0))
39 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
40 snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
42 prog = bpf_object__find_program_by_title(obj, prog_name);
43 if (CHECK_FAIL(!prog))
46 prog_fd = bpf_program__fd(prog);
47 if (CHECK_FAIL(prog_fd < 0))
50 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
55 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
56 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
57 &duration, &retval, NULL);
58 CHECK(err || retval != i, "tailcall",
59 "err %d errno %d retval %d\n", err, errno, retval);
61 err = bpf_map_delete_elem(map_fd, &i);
66 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
67 &duration, &retval, NULL);
68 CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n",
71 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
72 snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
74 prog = bpf_object__find_program_by_title(obj, prog_name);
75 if (CHECK_FAIL(!prog))
78 prog_fd = bpf_program__fd(prog);
79 if (CHECK_FAIL(prog_fd < 0))
82 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
87 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
88 &duration, &retval, NULL);
89 CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
92 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
93 j = bpf_map__def(prog_array)->max_entries - 1 - i;
94 snprintf(prog_name, sizeof(prog_name), "classifier/%i", j);
96 prog = bpf_object__find_program_by_title(obj, prog_name);
97 if (CHECK_FAIL(!prog))
100 prog_fd = bpf_program__fd(prog);
101 if (CHECK_FAIL(prog_fd < 0))
104 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
109 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
110 j = bpf_map__def(prog_array)->max_entries - 1 - i;
112 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
113 &duration, &retval, NULL);
114 CHECK(err || retval != j, "tailcall",
115 "err %d errno %d retval %d\n", err, errno, retval);
117 err = bpf_map_delete_elem(map_fd, &i);
122 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
123 &duration, &retval, NULL);
124 CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n",
127 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
128 err = bpf_map_delete_elem(map_fd, &i);
129 if (CHECK_FAIL(err >= 0 || errno != ENOENT))
132 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
133 &duration, &retval, NULL);
134 CHECK(err || retval != 3, "tailcall",
135 "err %d errno %d retval %d\n", err, errno, retval);
139 bpf_object__close(obj);
142 /* test_tailcall_2 checks that patching multiple programs for a single
143 * tail call slot works. It also jumps through several programs and tests
144 * the tail call limit counter.
146 static void test_tailcall_2(void)
148 int err, map_fd, prog_fd, main_fd, i;
149 struct bpf_map *prog_array;
150 struct bpf_program *prog;
151 struct bpf_object *obj;
152 __u32 retval, duration;
156 err = bpf_prog_load("tailcall2.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
161 prog = bpf_object__find_program_by_title(obj, "classifier");
162 if (CHECK_FAIL(!prog))
165 main_fd = bpf_program__fd(prog);
166 if (CHECK_FAIL(main_fd < 0))
169 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
170 if (CHECK_FAIL(!prog_array))
173 map_fd = bpf_map__fd(prog_array);
174 if (CHECK_FAIL(map_fd < 0))
177 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
178 snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
180 prog = bpf_object__find_program_by_title(obj, prog_name);
181 if (CHECK_FAIL(!prog))
184 prog_fd = bpf_program__fd(prog);
185 if (CHECK_FAIL(prog_fd < 0))
188 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
193 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
194 &duration, &retval, NULL);
195 CHECK(err || retval != 2, "tailcall", "err %d errno %d retval %d\n",
199 err = bpf_map_delete_elem(map_fd, &i);
203 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
204 &duration, &retval, NULL);
205 CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n",
209 err = bpf_map_delete_elem(map_fd, &i);
213 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
214 &duration, &retval, NULL);
215 CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n",
218 bpf_object__close(obj);
221 /* test_tailcall_3 checks that the count value of the tail call limit
222 * enforcement matches with expectations.
224 static void test_tailcall_3(void)
226 int err, map_fd, prog_fd, main_fd, data_fd, i, val;
227 struct bpf_map *prog_array, *data_map;
228 struct bpf_program *prog;
229 struct bpf_object *obj;
230 __u32 retval, duration;
233 err = bpf_prog_load("tailcall3.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
238 prog = bpf_object__find_program_by_title(obj, "classifier");
239 if (CHECK_FAIL(!prog))
242 main_fd = bpf_program__fd(prog);
243 if (CHECK_FAIL(main_fd < 0))
246 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
247 if (CHECK_FAIL(!prog_array))
250 map_fd = bpf_map__fd(prog_array);
251 if (CHECK_FAIL(map_fd < 0))
254 prog = bpf_object__find_program_by_title(obj, "classifier/0");
255 if (CHECK_FAIL(!prog))
258 prog_fd = bpf_program__fd(prog);
259 if (CHECK_FAIL(prog_fd < 0))
263 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
267 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
268 &duration, &retval, NULL);
269 CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n",
272 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
273 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
276 data_fd = bpf_map__fd(data_map);
277 if (CHECK_FAIL(map_fd < 0))
281 err = bpf_map_lookup_elem(data_fd, &i, &val);
282 CHECK(err || val != 33, "tailcall count", "err %d errno %d count %d\n",
286 err = bpf_map_delete_elem(map_fd, &i);
290 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
291 &duration, &retval, NULL);
292 CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
295 bpf_object__close(obj);
298 /* test_tailcall_4 checks that the kernel properly selects indirect jump
299 * for the case where the key is not known. Latter is passed via global
300 * data to select different targets we can compare return value of.
302 static void test_tailcall_4(void)
304 int err, map_fd, prog_fd, main_fd, data_fd, i;
305 struct bpf_map *prog_array, *data_map;
306 struct bpf_program *prog;
307 struct bpf_object *obj;
308 __u32 retval, duration;
309 static const int zero = 0;
313 err = bpf_prog_load("tailcall4.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
318 prog = bpf_object__find_program_by_title(obj, "classifier");
319 if (CHECK_FAIL(!prog))
322 main_fd = bpf_program__fd(prog);
323 if (CHECK_FAIL(main_fd < 0))
326 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
327 if (CHECK_FAIL(!prog_array))
330 map_fd = bpf_map__fd(prog_array);
331 if (CHECK_FAIL(map_fd < 0))
334 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
335 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
338 data_fd = bpf_map__fd(data_map);
339 if (CHECK_FAIL(map_fd < 0))
342 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
343 snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
345 prog = bpf_object__find_program_by_title(obj, prog_name);
346 if (CHECK_FAIL(!prog))
349 prog_fd = bpf_program__fd(prog);
350 if (CHECK_FAIL(prog_fd < 0))
353 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
358 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
359 err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
363 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
364 &duration, &retval, NULL);
365 CHECK(err || retval != i, "tailcall",
366 "err %d errno %d retval %d\n", err, errno, retval);
369 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
370 err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
374 err = bpf_map_delete_elem(map_fd, &i);
378 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
379 &duration, &retval, NULL);
380 CHECK(err || retval != 3, "tailcall",
381 "err %d errno %d retval %d\n", err, errno, retval);
384 bpf_object__close(obj);
387 /* test_tailcall_5 probes similarly to test_tailcall_4 that the kernel generates
388 * an indirect jump when the keys are const but different from different branches.
390 static void test_tailcall_5(void)
392 int err, map_fd, prog_fd, main_fd, data_fd, i, key[] = { 1111, 1234, 5678 };
393 struct bpf_map *prog_array, *data_map;
394 struct bpf_program *prog;
395 struct bpf_object *obj;
396 __u32 retval, duration;
397 static const int zero = 0;
401 err = bpf_prog_load("tailcall5.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
406 prog = bpf_object__find_program_by_title(obj, "classifier");
407 if (CHECK_FAIL(!prog))
410 main_fd = bpf_program__fd(prog);
411 if (CHECK_FAIL(main_fd < 0))
414 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
415 if (CHECK_FAIL(!prog_array))
418 map_fd = bpf_map__fd(prog_array);
419 if (CHECK_FAIL(map_fd < 0))
422 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
423 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
426 data_fd = bpf_map__fd(data_map);
427 if (CHECK_FAIL(map_fd < 0))
430 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
431 snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
433 prog = bpf_object__find_program_by_title(obj, prog_name);
434 if (CHECK_FAIL(!prog))
437 prog_fd = bpf_program__fd(prog);
438 if (CHECK_FAIL(prog_fd < 0))
441 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
446 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
447 err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
451 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
452 &duration, &retval, NULL);
453 CHECK(err || retval != i, "tailcall",
454 "err %d errno %d retval %d\n", err, errno, retval);
457 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
458 err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
462 err = bpf_map_delete_elem(map_fd, &i);
466 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
467 &duration, &retval, NULL);
468 CHECK(err || retval != 3, "tailcall",
469 "err %d errno %d retval %d\n", err, errno, retval);
472 bpf_object__close(obj);
475 void test_tailcalls(void)
477 if (test__start_subtest("tailcall_1"))
479 if (test__start_subtest("tailcall_2"))
481 if (test__start_subtest("tailcall_3"))
483 if (test__start_subtest("tailcall_4"))
485 if (test__start_subtest("tailcall_5"))