1 // SPDX-License-Identifier: GPL-2.0-only
3 * Functions to manage eBPF programs attached to cgroups
5 * Copyright (c) 2016 Daniel Mack
8 #include <linux/kernel.h>
9 #include <linux/atomic.h>
10 #include <linux/cgroup.h>
11 #include <linux/filter.h>
12 #include <linux/slab.h>
13 #include <linux/sysctl.h>
14 #include <linux/string.h>
15 #include <linux/bpf.h>
16 #include <linux/bpf-cgroup.h>
17 #include <linux/bpf_lsm.h>
18 #include <linux/bpf_verifier.h>
20 #include <net/bpf_sk_storage.h>
22 #include "../cgroup/cgroup-internal.h"
24 DEFINE_STATIC_KEY_ARRAY_FALSE(cgroup_bpf_enabled_key, MAX_CGROUP_BPF_ATTACH_TYPE);
25 EXPORT_SYMBOL(cgroup_bpf_enabled_key);
27 /* __always_inline is necessary to prevent indirect call through run_prog
30 static __always_inline int
31 bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp,
32 enum cgroup_bpf_attach_type atype,
33 const void *ctx, bpf_prog_run_fn run_prog,
34 int retval, u32 *ret_flags)
36 const struct bpf_prog_array_item *item;
37 const struct bpf_prog *prog;
38 const struct bpf_prog_array *array;
39 struct bpf_run_ctx *old_run_ctx;
40 struct bpf_cg_run_ctx run_ctx;
43 run_ctx.retval = retval;
46 array = rcu_dereference(cgrp->effective[atype]);
47 item = &array->items[0];
48 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
49 while ((prog = READ_ONCE(item->prog))) {
50 run_ctx.prog_item = item;
51 func_ret = run_prog(prog, ctx);
53 *(ret_flags) |= (func_ret >> 1);
56 if (!func_ret && !IS_ERR_VALUE((long)run_ctx.retval))
57 run_ctx.retval = -EPERM;
60 bpf_reset_run_ctx(old_run_ctx);
63 return run_ctx.retval;
66 unsigned int __cgroup_bpf_run_lsm_sock(const void *ctx,
67 const struct bpf_insn *insn)
69 const struct bpf_prog *shim_prog;
76 sk = (void *)(unsigned long)args[0];
77 /*shim_prog = container_of(insn, struct bpf_prog, insnsi);*/
78 shim_prog = (const struct bpf_prog *)((void *)insn - offsetof(struct bpf_prog, insnsi));
80 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
82 ret = bpf_prog_run_array_cg(&cgrp->bpf,
83 shim_prog->aux->cgroup_atype,
84 ctx, bpf_prog_run, 0, NULL);
88 unsigned int __cgroup_bpf_run_lsm_socket(const void *ctx,
89 const struct bpf_insn *insn)
91 const struct bpf_prog *shim_prog;
98 sock = (void *)(unsigned long)args[0];
99 /*shim_prog = container_of(insn, struct bpf_prog, insnsi);*/
100 shim_prog = (const struct bpf_prog *)((void *)insn - offsetof(struct bpf_prog, insnsi));
102 cgrp = sock_cgroup_ptr(&sock->sk->sk_cgrp_data);
104 ret = bpf_prog_run_array_cg(&cgrp->bpf,
105 shim_prog->aux->cgroup_atype,
106 ctx, bpf_prog_run, 0, NULL);
110 unsigned int __cgroup_bpf_run_lsm_current(const void *ctx,
111 const struct bpf_insn *insn)
113 const struct bpf_prog *shim_prog;
117 /*shim_prog = container_of(insn, struct bpf_prog, insnsi);*/
118 shim_prog = (const struct bpf_prog *)((void *)insn - offsetof(struct bpf_prog, insnsi));
120 /* We rely on trampoline's __bpf_prog_enter_lsm_cgroup to grab RCU read lock. */
121 cgrp = task_dfl_cgroup(current);
123 ret = bpf_prog_run_array_cg(&cgrp->bpf,
124 shim_prog->aux->cgroup_atype,
125 ctx, bpf_prog_run, 0, NULL);
129 #ifdef CONFIG_BPF_LSM
130 struct cgroup_lsm_atype {
135 static struct cgroup_lsm_atype cgroup_lsm_atype[CGROUP_LSM_NUM];
137 static enum cgroup_bpf_attach_type
138 bpf_cgroup_atype_find(enum bpf_attach_type attach_type, u32 attach_btf_id)
142 lockdep_assert_held(&cgroup_mutex);
144 if (attach_type != BPF_LSM_CGROUP)
145 return to_cgroup_bpf_attach_type(attach_type);
147 for (i = 0; i < ARRAY_SIZE(cgroup_lsm_atype); i++)
148 if (cgroup_lsm_atype[i].attach_btf_id == attach_btf_id)
149 return CGROUP_LSM_START + i;
151 for (i = 0; i < ARRAY_SIZE(cgroup_lsm_atype); i++)
152 if (cgroup_lsm_atype[i].attach_btf_id == 0)
153 return CGROUP_LSM_START + i;
159 void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype)
161 int i = cgroup_atype - CGROUP_LSM_START;
163 lockdep_assert_held(&cgroup_mutex);
165 WARN_ON_ONCE(cgroup_lsm_atype[i].attach_btf_id &&
166 cgroup_lsm_atype[i].attach_btf_id != attach_btf_id);
168 cgroup_lsm_atype[i].attach_btf_id = attach_btf_id;
169 cgroup_lsm_atype[i].refcnt++;
172 void bpf_cgroup_atype_put(int cgroup_atype)
174 int i = cgroup_atype - CGROUP_LSM_START;
176 mutex_lock(&cgroup_mutex);
177 if (--cgroup_lsm_atype[i].refcnt <= 0)
178 cgroup_lsm_atype[i].attach_btf_id = 0;
179 WARN_ON_ONCE(cgroup_lsm_atype[i].refcnt < 0);
180 mutex_unlock(&cgroup_mutex);
183 static enum cgroup_bpf_attach_type
184 bpf_cgroup_atype_find(enum bpf_attach_type attach_type, u32 attach_btf_id)
186 if (attach_type != BPF_LSM_CGROUP)
187 return to_cgroup_bpf_attach_type(attach_type);
190 #endif /* CONFIG_BPF_LSM */
192 void cgroup_bpf_offline(struct cgroup *cgrp)
195 percpu_ref_kill(&cgrp->bpf.refcnt);
198 static void bpf_cgroup_storages_free(struct bpf_cgroup_storage *storages[])
200 enum bpf_cgroup_storage_type stype;
202 for_each_cgroup_storage_type(stype)
203 bpf_cgroup_storage_free(storages[stype]);
206 static int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage *storages[],
207 struct bpf_cgroup_storage *new_storages[],
208 enum bpf_attach_type type,
209 struct bpf_prog *prog,
212 enum bpf_cgroup_storage_type stype;
213 struct bpf_cgroup_storage_key key;
216 key.cgroup_inode_id = cgroup_id(cgrp);
217 key.attach_type = type;
219 for_each_cgroup_storage_type(stype) {
220 map = prog->aux->cgroup_storage[stype];
224 storages[stype] = cgroup_storage_lookup((void *)map, &key, false);
228 storages[stype] = bpf_cgroup_storage_alloc(prog, stype);
229 if (IS_ERR(storages[stype])) {
230 bpf_cgroup_storages_free(new_storages);
234 new_storages[stype] = storages[stype];
240 static void bpf_cgroup_storages_assign(struct bpf_cgroup_storage *dst[],
241 struct bpf_cgroup_storage *src[])
243 enum bpf_cgroup_storage_type stype;
245 for_each_cgroup_storage_type(stype)
246 dst[stype] = src[stype];
249 static void bpf_cgroup_storages_link(struct bpf_cgroup_storage *storages[],
251 enum bpf_attach_type attach_type)
253 enum bpf_cgroup_storage_type stype;
255 for_each_cgroup_storage_type(stype)
256 bpf_cgroup_storage_link(storages[stype], cgrp, attach_type);
259 /* Called when bpf_cgroup_link is auto-detached from dying cgroup.
260 * It drops cgroup and bpf_prog refcounts, and marks bpf_link as defunct. It
261 * doesn't free link memory, which will eventually be done by bpf_link's
262 * release() callback, when its last FD is closed.
264 static void bpf_cgroup_link_auto_detach(struct bpf_cgroup_link *link)
266 cgroup_put(link->cgroup);
271 * cgroup_bpf_release() - put references of all bpf programs and
272 * release all cgroup bpf data
273 * @work: work structure embedded into the cgroup to modify
275 static void cgroup_bpf_release(struct work_struct *work)
277 struct cgroup *p, *cgrp = container_of(work, struct cgroup,
279 struct bpf_prog_array *old_array;
280 struct list_head *storages = &cgrp->bpf.storages;
281 struct bpf_cgroup_storage *storage, *stmp;
285 mutex_lock(&cgroup_mutex);
287 for (atype = 0; atype < ARRAY_SIZE(cgrp->bpf.progs); atype++) {
288 struct hlist_head *progs = &cgrp->bpf.progs[atype];
289 struct bpf_prog_list *pl;
290 struct hlist_node *pltmp;
292 hlist_for_each_entry_safe(pl, pltmp, progs, node) {
293 hlist_del(&pl->node);
295 if (pl->prog->expected_attach_type == BPF_LSM_CGROUP)
296 bpf_trampoline_unlink_cgroup_shim(pl->prog);
297 bpf_prog_put(pl->prog);
300 if (pl->link->link.prog->expected_attach_type == BPF_LSM_CGROUP)
301 bpf_trampoline_unlink_cgroup_shim(pl->link->link.prog);
302 bpf_cgroup_link_auto_detach(pl->link);
305 static_branch_dec(&cgroup_bpf_enabled_key[atype]);
307 old_array = rcu_dereference_protected(
308 cgrp->bpf.effective[atype],
309 lockdep_is_held(&cgroup_mutex));
310 bpf_prog_array_free(old_array);
313 list_for_each_entry_safe(storage, stmp, storages, list_cg) {
314 bpf_cgroup_storage_unlink(storage);
315 bpf_cgroup_storage_free(storage);
318 mutex_unlock(&cgroup_mutex);
320 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
323 percpu_ref_exit(&cgrp->bpf.refcnt);
328 * cgroup_bpf_release_fn() - callback used to schedule releasing
330 * @ref: percpu ref counter structure
332 static void cgroup_bpf_release_fn(struct percpu_ref *ref)
334 struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt);
336 INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release);
337 queue_work(system_wq, &cgrp->bpf.release_work);
340 /* Get underlying bpf_prog of bpf_prog_list entry, regardless if it's through
341 * link or direct prog.
343 static struct bpf_prog *prog_list_prog(struct bpf_prog_list *pl)
348 return pl->link->link.prog;
352 /* count number of elements in the list.
353 * it's slow but the list cannot be long
355 static u32 prog_list_length(struct hlist_head *head)
357 struct bpf_prog_list *pl;
360 hlist_for_each_entry(pl, head, node) {
361 if (!prog_list_prog(pl))
368 /* if parent has non-overridable prog attached,
369 * disallow attaching new programs to the descendent cgroup.
370 * if parent has overridable or multi-prog, allow attaching
372 static bool hierarchy_allows_attach(struct cgroup *cgrp,
373 enum cgroup_bpf_attach_type atype)
377 p = cgroup_parent(cgrp);
381 u32 flags = p->bpf.flags[atype];
384 if (flags & BPF_F_ALLOW_MULTI)
386 cnt = prog_list_length(&p->bpf.progs[atype]);
387 WARN_ON_ONCE(cnt > 1);
389 return !!(flags & BPF_F_ALLOW_OVERRIDE);
390 p = cgroup_parent(p);
395 /* compute a chain of effective programs for a given cgroup:
396 * start from the list of programs in this cgroup and add
397 * all parent programs.
398 * Note that parent's F_ALLOW_OVERRIDE-type program is yielding
399 * to programs in this cgroup
401 static int compute_effective_progs(struct cgroup *cgrp,
402 enum cgroup_bpf_attach_type atype,
403 struct bpf_prog_array **array)
405 struct bpf_prog_array_item *item;
406 struct bpf_prog_array *progs;
407 struct bpf_prog_list *pl;
408 struct cgroup *p = cgrp;
411 /* count number of effective programs by walking parents */
413 if (cnt == 0 || (p->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
414 cnt += prog_list_length(&p->bpf.progs[atype]);
415 p = cgroup_parent(p);
418 progs = bpf_prog_array_alloc(cnt, GFP_KERNEL);
422 /* populate the array with effective progs */
426 if (cnt > 0 && !(p->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
429 hlist_for_each_entry(pl, &p->bpf.progs[atype], node) {
430 if (!prog_list_prog(pl))
433 item = &progs->items[cnt];
434 item->prog = prog_list_prog(pl);
435 bpf_cgroup_storages_assign(item->cgroup_storage,
439 } while ((p = cgroup_parent(p)));
445 static void activate_effective_progs(struct cgroup *cgrp,
446 enum cgroup_bpf_attach_type atype,
447 struct bpf_prog_array *old_array)
449 old_array = rcu_replace_pointer(cgrp->bpf.effective[atype], old_array,
450 lockdep_is_held(&cgroup_mutex));
451 /* free prog array after grace period, since __cgroup_bpf_run_*()
452 * might be still walking the array
454 bpf_prog_array_free(old_array);
458 * cgroup_bpf_inherit() - inherit effective programs from parent
459 * @cgrp: the cgroup to modify
461 int cgroup_bpf_inherit(struct cgroup *cgrp)
463 /* has to use marco instead of const int, since compiler thinks
464 * that array below is variable length
466 #define NR ARRAY_SIZE(cgrp->bpf.effective)
467 struct bpf_prog_array *arrays[NR] = {};
471 ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0,
476 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
479 for (i = 0; i < NR; i++)
480 INIT_HLIST_HEAD(&cgrp->bpf.progs[i]);
482 INIT_LIST_HEAD(&cgrp->bpf.storages);
484 for (i = 0; i < NR; i++)
485 if (compute_effective_progs(cgrp, i, &arrays[i]))
488 for (i = 0; i < NR; i++)
489 activate_effective_progs(cgrp, i, arrays[i]);
493 for (i = 0; i < NR; i++)
494 bpf_prog_array_free(arrays[i]);
496 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
499 percpu_ref_exit(&cgrp->bpf.refcnt);
504 static int update_effective_progs(struct cgroup *cgrp,
505 enum cgroup_bpf_attach_type atype)
507 struct cgroup_subsys_state *css;
510 /* allocate and recompute effective prog arrays */
511 css_for_each_descendant_pre(css, &cgrp->self) {
512 struct cgroup *desc = container_of(css, struct cgroup, self);
514 if (percpu_ref_is_zero(&desc->bpf.refcnt))
517 err = compute_effective_progs(desc, atype, &desc->bpf.inactive);
522 /* all allocations were successful. Activate all prog arrays */
523 css_for_each_descendant_pre(css, &cgrp->self) {
524 struct cgroup *desc = container_of(css, struct cgroup, self);
526 if (percpu_ref_is_zero(&desc->bpf.refcnt)) {
527 if (unlikely(desc->bpf.inactive)) {
528 bpf_prog_array_free(desc->bpf.inactive);
529 desc->bpf.inactive = NULL;
534 activate_effective_progs(desc, atype, desc->bpf.inactive);
535 desc->bpf.inactive = NULL;
541 /* oom while computing effective. Free all computed effective arrays
542 * since they were not activated
544 css_for_each_descendant_pre(css, &cgrp->self) {
545 struct cgroup *desc = container_of(css, struct cgroup, self);
547 bpf_prog_array_free(desc->bpf.inactive);
548 desc->bpf.inactive = NULL;
554 #define BPF_CGROUP_MAX_PROGS 64
556 static struct bpf_prog_list *find_attach_entry(struct hlist_head *progs,
557 struct bpf_prog *prog,
558 struct bpf_cgroup_link *link,
559 struct bpf_prog *replace_prog,
562 struct bpf_prog_list *pl;
564 /* single-attach case */
566 if (hlist_empty(progs))
568 return hlist_entry(progs->first, typeof(*pl), node);
571 hlist_for_each_entry(pl, progs, node) {
572 if (prog && pl->prog == prog && prog != replace_prog)
573 /* disallow attaching the same prog twice */
574 return ERR_PTR(-EINVAL);
575 if (link && pl->link == link)
576 /* disallow attaching the same link twice */
577 return ERR_PTR(-EINVAL);
580 /* direct prog multi-attach w/ replacement case */
582 hlist_for_each_entry(pl, progs, node) {
583 if (pl->prog == replace_prog)
587 /* prog to replace not found for cgroup */
588 return ERR_PTR(-ENOENT);
595 * __cgroup_bpf_attach() - Attach the program or the link to a cgroup, and
596 * propagate the change to descendants
597 * @cgrp: The cgroup which descendants to traverse
598 * @prog: A program to attach
599 * @link: A link to attach
600 * @replace_prog: Previously attached program to replace if BPF_F_REPLACE is set
601 * @type: Type of attach operation
602 * @flags: Option flags
604 * Exactly one of @prog or @link can be non-null.
605 * Must be called with cgroup_mutex held.
607 static int __cgroup_bpf_attach(struct cgroup *cgrp,
608 struct bpf_prog *prog, struct bpf_prog *replace_prog,
609 struct bpf_cgroup_link *link,
610 enum bpf_attach_type type, u32 flags)
612 u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI));
613 struct bpf_prog *old_prog = NULL;
614 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
615 struct bpf_cgroup_storage *new_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
616 struct bpf_prog *new_prog = prog ? : link->link.prog;
617 enum cgroup_bpf_attach_type atype;
618 struct bpf_prog_list *pl;
619 struct hlist_head *progs;
622 if (((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI)) ||
623 ((flags & BPF_F_REPLACE) && !(flags & BPF_F_ALLOW_MULTI)))
624 /* invalid combination */
626 if (link && (prog || replace_prog))
627 /* only either link or prog/replace_prog can be specified */
629 if (!!replace_prog != !!(flags & BPF_F_REPLACE))
630 /* replace_prog implies BPF_F_REPLACE, and vice versa */
633 atype = bpf_cgroup_atype_find(type, new_prog->aux->attach_btf_id);
637 progs = &cgrp->bpf.progs[atype];
639 if (!hierarchy_allows_attach(cgrp, atype))
642 if (!hlist_empty(progs) && cgrp->bpf.flags[atype] != saved_flags)
643 /* Disallow attaching non-overridable on top
644 * of existing overridable in this cgroup.
645 * Disallow attaching multi-prog if overridable or none
649 if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
652 pl = find_attach_entry(progs, prog, link, replace_prog,
653 flags & BPF_F_ALLOW_MULTI);
657 if (bpf_cgroup_storages_alloc(storage, new_storage, type,
658 prog ? : link->link.prog, cgrp))
664 struct hlist_node *last = NULL;
666 pl = kmalloc(sizeof(*pl), GFP_KERNEL);
668 bpf_cgroup_storages_free(new_storage);
671 if (hlist_empty(progs))
672 hlist_add_head(&pl->node, progs);
674 hlist_for_each(last, progs) {
677 hlist_add_behind(&pl->node, last);
684 bpf_cgroup_storages_assign(pl->storage, storage);
685 cgrp->bpf.flags[atype] = saved_flags;
687 if (type == BPF_LSM_CGROUP) {
688 err = bpf_trampoline_link_cgroup_shim(new_prog, atype);
693 err = update_effective_progs(cgrp, atype);
695 goto cleanup_trampoline;
698 if (type == BPF_LSM_CGROUP)
699 bpf_trampoline_unlink_cgroup_shim(old_prog);
700 bpf_prog_put(old_prog);
702 static_branch_inc(&cgroup_bpf_enabled_key[atype]);
704 bpf_cgroup_storages_link(new_storage, cgrp, type);
708 if (type == BPF_LSM_CGROUP)
709 bpf_trampoline_unlink_cgroup_shim(new_prog);
716 bpf_cgroup_storages_free(new_storage);
718 hlist_del(&pl->node);
724 static int cgroup_bpf_attach(struct cgroup *cgrp,
725 struct bpf_prog *prog, struct bpf_prog *replace_prog,
726 struct bpf_cgroup_link *link,
727 enum bpf_attach_type type,
732 mutex_lock(&cgroup_mutex);
733 ret = __cgroup_bpf_attach(cgrp, prog, replace_prog, link, type, flags);
734 mutex_unlock(&cgroup_mutex);
738 /* Swap updated BPF program for given link in effective program arrays across
739 * all descendant cgroups. This function is guaranteed to succeed.
741 static void replace_effective_prog(struct cgroup *cgrp,
742 enum cgroup_bpf_attach_type atype,
743 struct bpf_cgroup_link *link)
745 struct bpf_prog_array_item *item;
746 struct cgroup_subsys_state *css;
747 struct bpf_prog_array *progs;
748 struct bpf_prog_list *pl;
749 struct hlist_head *head;
753 css_for_each_descendant_pre(css, &cgrp->self) {
754 struct cgroup *desc = container_of(css, struct cgroup, self);
756 if (percpu_ref_is_zero(&desc->bpf.refcnt))
759 /* find position of link in effective progs array */
760 for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) {
761 if (pos && !(cg->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
764 head = &cg->bpf.progs[atype];
765 hlist_for_each_entry(pl, head, node) {
766 if (!prog_list_prog(pl))
768 if (pl->link == link)
775 progs = rcu_dereference_protected(
776 desc->bpf.effective[atype],
777 lockdep_is_held(&cgroup_mutex));
778 item = &progs->items[pos];
779 WRITE_ONCE(item->prog, link->link.prog);
784 * __cgroup_bpf_replace() - Replace link's program and propagate the change
786 * @cgrp: The cgroup which descendants to traverse
787 * @link: A link for which to replace BPF program
788 * @type: Type of attach operation
790 * Must be called with cgroup_mutex held.
792 static int __cgroup_bpf_replace(struct cgroup *cgrp,
793 struct bpf_cgroup_link *link,
794 struct bpf_prog *new_prog)
796 enum cgroup_bpf_attach_type atype;
797 struct bpf_prog *old_prog;
798 struct bpf_prog_list *pl;
799 struct hlist_head *progs;
802 atype = bpf_cgroup_atype_find(link->type, new_prog->aux->attach_btf_id);
806 progs = &cgrp->bpf.progs[atype];
808 if (link->link.prog->type != new_prog->type)
811 hlist_for_each_entry(pl, progs, node) {
812 if (pl->link == link) {
820 old_prog = xchg(&link->link.prog, new_prog);
821 replace_effective_prog(cgrp, atype, link);
822 bpf_prog_put(old_prog);
826 static int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *new_prog,
827 struct bpf_prog *old_prog)
829 struct bpf_cgroup_link *cg_link;
832 cg_link = container_of(link, struct bpf_cgroup_link, link);
834 mutex_lock(&cgroup_mutex);
835 /* link might have been auto-released by dying cgroup, so fail */
836 if (!cg_link->cgroup) {
840 if (old_prog && link->prog != old_prog) {
844 ret = __cgroup_bpf_replace(cg_link->cgroup, cg_link, new_prog);
846 mutex_unlock(&cgroup_mutex);
850 static struct bpf_prog_list *find_detach_entry(struct hlist_head *progs,
851 struct bpf_prog *prog,
852 struct bpf_cgroup_link *link,
855 struct bpf_prog_list *pl;
858 if (hlist_empty(progs))
859 /* report error when trying to detach and nothing is attached */
860 return ERR_PTR(-ENOENT);
862 /* to maintain backward compatibility NONE and OVERRIDE cgroups
863 * allow detaching with invalid FD (prog==NULL) in legacy mode
865 return hlist_entry(progs->first, typeof(*pl), node);
869 /* to detach MULTI prog the user has to specify valid FD
870 * of the program or link to be detached
872 return ERR_PTR(-EINVAL);
874 /* find the prog or link and detach it */
875 hlist_for_each_entry(pl, progs, node) {
876 if (pl->prog == prog && pl->link == link)
879 return ERR_PTR(-ENOENT);
883 * purge_effective_progs() - After compute_effective_progs fails to alloc new
884 * cgrp->bpf.inactive table we can recover by
885 * recomputing the array in place.
887 * @cgrp: The cgroup which descendants to travers
888 * @prog: A program to detach or NULL
889 * @link: A link to detach or NULL
890 * @atype: Type of detach operation
892 static void purge_effective_progs(struct cgroup *cgrp, struct bpf_prog *prog,
893 struct bpf_cgroup_link *link,
894 enum cgroup_bpf_attach_type atype)
896 struct cgroup_subsys_state *css;
897 struct bpf_prog_array *progs;
898 struct bpf_prog_list *pl;
899 struct hlist_head *head;
903 /* recompute effective prog array in place */
904 css_for_each_descendant_pre(css, &cgrp->self) {
905 struct cgroup *desc = container_of(css, struct cgroup, self);
907 if (percpu_ref_is_zero(&desc->bpf.refcnt))
910 /* find position of link or prog in effective progs array */
911 for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) {
912 if (pos && !(cg->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
915 head = &cg->bpf.progs[atype];
916 hlist_for_each_entry(pl, head, node) {
917 if (!prog_list_prog(pl))
919 if (pl->prog == prog && pl->link == link)
926 progs = rcu_dereference_protected(
927 desc->bpf.effective[atype],
928 lockdep_is_held(&cgroup_mutex));
930 /* Remove the program from the array */
931 WARN_ONCE(bpf_prog_array_delete_safe_at(progs, pos),
932 "Failed to purge a prog from array at index %d", pos);
937 * __cgroup_bpf_detach() - Detach the program or link from a cgroup, and
938 * propagate the change to descendants
939 * @cgrp: The cgroup which descendants to traverse
940 * @prog: A program to detach or NULL
941 * @link: A link to detach or NULL
942 * @type: Type of detach operation
944 * At most one of @prog or @link can be non-NULL.
945 * Must be called with cgroup_mutex held.
947 static int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
948 struct bpf_cgroup_link *link, enum bpf_attach_type type)
950 enum cgroup_bpf_attach_type atype;
951 struct bpf_prog *old_prog;
952 struct bpf_prog_list *pl;
953 struct hlist_head *progs;
954 u32 attach_btf_id = 0;
958 attach_btf_id = prog->aux->attach_btf_id;
960 attach_btf_id = link->link.prog->aux->attach_btf_id;
962 atype = bpf_cgroup_atype_find(type, attach_btf_id);
966 progs = &cgrp->bpf.progs[atype];
967 flags = cgrp->bpf.flags[atype];
970 /* only one of prog or link can be specified */
973 pl = find_detach_entry(progs, prog, link, flags & BPF_F_ALLOW_MULTI);
977 /* mark it deleted, so it's ignored while recomputing effective */
982 if (update_effective_progs(cgrp, atype)) {
983 /* if update effective array failed replace the prog with a dummy prog*/
986 purge_effective_progs(cgrp, old_prog, link, atype);
989 /* now can actually delete it from this cgroup list */
990 hlist_del(&pl->node);
993 if (hlist_empty(progs))
994 /* last program was detached, reset flags to zero */
995 cgrp->bpf.flags[atype] = 0;
997 if (type == BPF_LSM_CGROUP)
998 bpf_trampoline_unlink_cgroup_shim(old_prog);
999 bpf_prog_put(old_prog);
1001 static_branch_dec(&cgroup_bpf_enabled_key[atype]);
1005 static int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
1006 enum bpf_attach_type type)
1010 mutex_lock(&cgroup_mutex);
1011 ret = __cgroup_bpf_detach(cgrp, prog, NULL, type);
1012 mutex_unlock(&cgroup_mutex);
1016 /* Must be called with cgroup_mutex held to avoid races. */
1017 static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
1018 union bpf_attr __user *uattr)
1020 __u32 __user *prog_attach_flags = u64_to_user_ptr(attr->query.prog_attach_flags);
1021 __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
1022 enum bpf_attach_type type = attr->query.attach_type;
1023 enum cgroup_bpf_attach_type from_atype, to_atype;
1024 enum cgroup_bpf_attach_type atype;
1025 struct bpf_prog_array *effective;
1026 int cnt, ret = 0, i;
1030 if (type == BPF_LSM_CGROUP) {
1031 if (attr->query.prog_cnt && prog_ids && !prog_attach_flags)
1034 from_atype = CGROUP_LSM_START;
1035 to_atype = CGROUP_LSM_END;
1038 from_atype = to_cgroup_bpf_attach_type(type);
1041 to_atype = from_atype;
1042 flags = cgrp->bpf.flags[from_atype];
1045 for (atype = from_atype; atype <= to_atype; atype++) {
1046 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
1047 effective = rcu_dereference_protected(cgrp->bpf.effective[atype],
1048 lockdep_is_held(&cgroup_mutex));
1049 total_cnt += bpf_prog_array_length(effective);
1051 total_cnt += prog_list_length(&cgrp->bpf.progs[atype]);
1055 if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
1057 if (copy_to_user(&uattr->query.prog_cnt, &total_cnt, sizeof(total_cnt)))
1059 if (attr->query.prog_cnt == 0 || !prog_ids || !total_cnt)
1060 /* return early if user requested only program count + flags */
1063 if (attr->query.prog_cnt < total_cnt) {
1064 total_cnt = attr->query.prog_cnt;
1068 for (atype = from_atype; atype <= to_atype && total_cnt; atype++) {
1069 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
1070 effective = rcu_dereference_protected(cgrp->bpf.effective[atype],
1071 lockdep_is_held(&cgroup_mutex));
1072 cnt = min_t(int, bpf_prog_array_length(effective), total_cnt);
1073 ret = bpf_prog_array_copy_to_user(effective, prog_ids, cnt);
1075 struct hlist_head *progs;
1076 struct bpf_prog_list *pl;
1077 struct bpf_prog *prog;
1080 progs = &cgrp->bpf.progs[atype];
1081 cnt = min_t(int, prog_list_length(progs), total_cnt);
1083 hlist_for_each_entry(pl, progs, node) {
1084 prog = prog_list_prog(pl);
1086 if (copy_to_user(prog_ids + i, &id, sizeof(id)))
1093 if (prog_attach_flags) {
1094 flags = cgrp->bpf.flags[atype];
1096 for (i = 0; i < cnt; i++)
1097 if (copy_to_user(prog_attach_flags + i, &flags, sizeof(flags)))
1099 prog_attach_flags += cnt;
1108 static int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
1109 union bpf_attr __user *uattr)
1113 mutex_lock(&cgroup_mutex);
1114 ret = __cgroup_bpf_query(cgrp, attr, uattr);
1115 mutex_unlock(&cgroup_mutex);
1119 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
1120 enum bpf_prog_type ptype, struct bpf_prog *prog)
1122 struct bpf_prog *replace_prog = NULL;
1123 struct cgroup *cgrp;
1126 cgrp = cgroup_get_from_fd(attr->target_fd);
1128 return PTR_ERR(cgrp);
1130 if ((attr->attach_flags & BPF_F_ALLOW_MULTI) &&
1131 (attr->attach_flags & BPF_F_REPLACE)) {
1132 replace_prog = bpf_prog_get_type(attr->replace_bpf_fd, ptype);
1133 if (IS_ERR(replace_prog)) {
1135 return PTR_ERR(replace_prog);
1139 ret = cgroup_bpf_attach(cgrp, prog, replace_prog, NULL,
1140 attr->attach_type, attr->attach_flags);
1143 bpf_prog_put(replace_prog);
1148 int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
1150 struct bpf_prog *prog;
1151 struct cgroup *cgrp;
1154 cgrp = cgroup_get_from_fd(attr->target_fd);
1156 return PTR_ERR(cgrp);
1158 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
1162 ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type);
1170 static void bpf_cgroup_link_release(struct bpf_link *link)
1172 struct bpf_cgroup_link *cg_link =
1173 container_of(link, struct bpf_cgroup_link, link);
1176 /* link might have been auto-detached by dying cgroup already,
1177 * in that case our work is done here
1179 if (!cg_link->cgroup)
1182 mutex_lock(&cgroup_mutex);
1184 /* re-check cgroup under lock again */
1185 if (!cg_link->cgroup) {
1186 mutex_unlock(&cgroup_mutex);
1190 WARN_ON(__cgroup_bpf_detach(cg_link->cgroup, NULL, cg_link,
1192 if (cg_link->type == BPF_LSM_CGROUP)
1193 bpf_trampoline_unlink_cgroup_shim(cg_link->link.prog);
1195 cg = cg_link->cgroup;
1196 cg_link->cgroup = NULL;
1198 mutex_unlock(&cgroup_mutex);
1203 static void bpf_cgroup_link_dealloc(struct bpf_link *link)
1205 struct bpf_cgroup_link *cg_link =
1206 container_of(link, struct bpf_cgroup_link, link);
1211 static int bpf_cgroup_link_detach(struct bpf_link *link)
1213 bpf_cgroup_link_release(link);
1218 static void bpf_cgroup_link_show_fdinfo(const struct bpf_link *link,
1219 struct seq_file *seq)
1221 struct bpf_cgroup_link *cg_link =
1222 container_of(link, struct bpf_cgroup_link, link);
1225 mutex_lock(&cgroup_mutex);
1226 if (cg_link->cgroup)
1227 cg_id = cgroup_id(cg_link->cgroup);
1228 mutex_unlock(&cgroup_mutex);
1231 "cgroup_id:\t%llu\n"
1232 "attach_type:\t%d\n",
1237 static int bpf_cgroup_link_fill_link_info(const struct bpf_link *link,
1238 struct bpf_link_info *info)
1240 struct bpf_cgroup_link *cg_link =
1241 container_of(link, struct bpf_cgroup_link, link);
1244 mutex_lock(&cgroup_mutex);
1245 if (cg_link->cgroup)
1246 cg_id = cgroup_id(cg_link->cgroup);
1247 mutex_unlock(&cgroup_mutex);
1249 info->cgroup.cgroup_id = cg_id;
1250 info->cgroup.attach_type = cg_link->type;
1254 static const struct bpf_link_ops bpf_cgroup_link_lops = {
1255 .release = bpf_cgroup_link_release,
1256 .dealloc = bpf_cgroup_link_dealloc,
1257 .detach = bpf_cgroup_link_detach,
1258 .update_prog = cgroup_bpf_replace,
1259 .show_fdinfo = bpf_cgroup_link_show_fdinfo,
1260 .fill_link_info = bpf_cgroup_link_fill_link_info,
1263 int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
1265 struct bpf_link_primer link_primer;
1266 struct bpf_cgroup_link *link;
1267 struct cgroup *cgrp;
1270 if (attr->link_create.flags)
1273 cgrp = cgroup_get_from_fd(attr->link_create.target_fd);
1275 return PTR_ERR(cgrp);
1277 link = kzalloc(sizeof(*link), GFP_USER);
1280 goto out_put_cgroup;
1282 bpf_link_init(&link->link, BPF_LINK_TYPE_CGROUP, &bpf_cgroup_link_lops,
1284 link->cgroup = cgrp;
1285 link->type = attr->link_create.attach_type;
1287 err = bpf_link_prime(&link->link, &link_primer);
1290 goto out_put_cgroup;
1293 err = cgroup_bpf_attach(cgrp, NULL, NULL, link,
1294 link->type, BPF_F_ALLOW_MULTI);
1296 bpf_link_cleanup(&link_primer);
1297 goto out_put_cgroup;
1300 return bpf_link_settle(&link_primer);
1307 int cgroup_bpf_prog_query(const union bpf_attr *attr,
1308 union bpf_attr __user *uattr)
1310 struct cgroup *cgrp;
1313 cgrp = cgroup_get_from_fd(attr->query.target_fd);
1315 return PTR_ERR(cgrp);
1317 ret = cgroup_bpf_query(cgrp, attr, uattr);
1324 * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
1325 * @sk: The socket sending or receiving traffic
1326 * @skb: The skb that is being sent or received
1327 * @type: The type of program to be executed
1329 * If no socket is passed, or the socket is not of type INET or INET6,
1330 * this function does nothing and returns 0.
1332 * The program type passed in via @type must be suitable for network
1333 * filtering. No further check is performed to assert that.
1335 * For egress packets, this function can return:
1336 * NET_XMIT_SUCCESS (0) - continue with packet output
1337 * NET_XMIT_DROP (1) - drop packet and notify TCP to call cwr
1338 * NET_XMIT_CN (2) - continue with packet output and notify TCP
1340 * -err - drop packet
1342 * For ingress packets, this function will return -EPERM if any
1343 * attached program was found and if it returned != 1 during execution.
1344 * Otherwise 0 is returned.
1346 int __cgroup_bpf_run_filter_skb(struct sock *sk,
1347 struct sk_buff *skb,
1348 enum cgroup_bpf_attach_type atype)
1350 unsigned int offset = skb->data - skb_network_header(skb);
1351 struct sock *save_sk;
1352 void *saved_data_end;
1353 struct cgroup *cgrp;
1356 if (!sk || !sk_fullsock(sk))
1359 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
1362 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1365 __skb_push(skb, offset);
1367 /* compute pointers for the bpf prog */
1368 bpf_compute_and_save_data_end(skb, &saved_data_end);
1370 if (atype == CGROUP_INET_EGRESS) {
1374 ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, skb,
1375 __bpf_prog_run_save_cb, 0, &flags);
1377 /* Return values of CGROUP EGRESS BPF programs are:
1380 * 2: drop packet and cn
1381 * 3: keep packet and cn
1383 * The returned value is then converted to one of the NET_XMIT
1384 * or an error code that is then interpreted as drop packet
1386 * 0: NET_XMIT_SUCCESS skb should be transmitted
1387 * 1: NET_XMIT_DROP skb should be dropped and cn
1388 * 2: NET_XMIT_CN skb should be transmitted and cn
1389 * 3: -err skb should be dropped
1392 cn = flags & BPF_RET_SET_CN;
1393 if (ret && !IS_ERR_VALUE((long)ret))
1396 ret = (cn ? NET_XMIT_CN : NET_XMIT_SUCCESS);
1398 ret = (cn ? NET_XMIT_DROP : ret);
1400 ret = bpf_prog_run_array_cg(&cgrp->bpf, atype,
1401 skb, __bpf_prog_run_save_cb, 0,
1403 if (ret && !IS_ERR_VALUE((long)ret))
1406 bpf_restore_data_end(skb, saved_data_end);
1407 __skb_pull(skb, offset);
1412 EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb);
1415 * __cgroup_bpf_run_filter_sk() - Run a program on a sock
1416 * @sk: sock structure to manipulate
1417 * @type: The type of program to be executed
1419 * socket is passed is expected to be of type INET or INET6.
1421 * The program type passed in via @type must be suitable for sock
1422 * filtering. No further check is performed to assert that.
1424 * This function will return %-EPERM if any if an attached program was found
1425 * and if it returned != 1 during execution. In all other cases, 0 is returned.
1427 int __cgroup_bpf_run_filter_sk(struct sock *sk,
1428 enum cgroup_bpf_attach_type atype)
1430 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1432 return bpf_prog_run_array_cg(&cgrp->bpf, atype, sk, bpf_prog_run, 0,
1435 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
1438 * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and
1439 * provided by user sockaddr
1440 * @sk: sock struct that will use sockaddr
1441 * @uaddr: sockaddr struct provided by user
1442 * @type: The type of program to be executed
1443 * @t_ctx: Pointer to attach type specific context
1444 * @flags: Pointer to u32 which contains higher bits of BPF program
1445 * return value (OR'ed together).
1447 * socket is expected to be of type INET or INET6.
1449 * This function will return %-EPERM if an attached program is found and
1450 * returned value != 1 during execution. In all other cases, 0 is returned.
1452 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
1453 struct sockaddr *uaddr,
1454 enum cgroup_bpf_attach_type atype,
1458 struct bpf_sock_addr_kern ctx = {
1463 struct sockaddr_storage unspec;
1464 struct cgroup *cgrp;
1466 /* Check socket family since not all sockets represent network
1467 * endpoint (e.g. AF_UNIX).
1469 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
1473 memset(&unspec, 0, sizeof(unspec));
1474 ctx.uaddr = (struct sockaddr *)&unspec;
1477 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1478 return bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run,
1481 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
1484 * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock
1485 * @sk: socket to get cgroup from
1486 * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
1487 * sk with connection information (IP addresses, etc.) May not contain
1488 * cgroup info if it is a req sock.
1489 * @type: The type of program to be executed
1491 * socket passed is expected to be of type INET or INET6.
1493 * The program type passed in via @type must be suitable for sock_ops
1494 * filtering. No further check is performed to assert that.
1496 * This function will return %-EPERM if any if an attached program was found
1497 * and if it returned != 1 during execution. In all other cases, 0 is returned.
1499 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
1500 struct bpf_sock_ops_kern *sock_ops,
1501 enum cgroup_bpf_attach_type atype)
1503 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1505 return bpf_prog_run_array_cg(&cgrp->bpf, atype, sock_ops, bpf_prog_run,
1508 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
1510 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
1511 short access, enum cgroup_bpf_attach_type atype)
1513 struct cgroup *cgrp;
1514 struct bpf_cgroup_dev_ctx ctx = {
1515 .access_type = (access << 16) | dev_type,
1522 cgrp = task_dfl_cgroup(current);
1523 ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0,
1530 BPF_CALL_0(bpf_get_retval)
1532 struct bpf_cg_run_ctx *ctx =
1533 container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx);
1538 const struct bpf_func_proto bpf_get_retval_proto = {
1539 .func = bpf_get_retval,
1541 .ret_type = RET_INTEGER,
1544 BPF_CALL_1(bpf_set_retval, int, retval)
1546 struct bpf_cg_run_ctx *ctx =
1547 container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx);
1549 ctx->retval = retval;
1553 const struct bpf_func_proto bpf_set_retval_proto = {
1554 .func = bpf_set_retval,
1556 .ret_type = RET_INTEGER,
1557 .arg1_type = ARG_ANYTHING,
1560 static const struct bpf_func_proto *
1561 cgroup_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1564 case BPF_FUNC_get_current_uid_gid:
1565 return &bpf_get_current_uid_gid_proto;
1566 case BPF_FUNC_get_local_storage:
1567 return &bpf_get_local_storage_proto;
1568 case BPF_FUNC_get_current_cgroup_id:
1569 return &bpf_get_current_cgroup_id_proto;
1570 case BPF_FUNC_perf_event_output:
1571 return &bpf_event_output_data_proto;
1572 case BPF_FUNC_get_retval:
1573 return &bpf_get_retval_proto;
1574 case BPF_FUNC_set_retval:
1575 return &bpf_set_retval_proto;
1577 return bpf_base_func_proto(func_id);
1581 static const struct bpf_func_proto *
1582 cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1584 return cgroup_base_func_proto(func_id, prog);
1587 static bool cgroup_dev_is_valid_access(int off, int size,
1588 enum bpf_access_type type,
1589 const struct bpf_prog *prog,
1590 struct bpf_insn_access_aux *info)
1592 const int size_default = sizeof(__u32);
1594 if (type == BPF_WRITE)
1597 if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx))
1599 /* The verifier guarantees that size > 0. */
1600 if (off % size != 0)
1604 case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type):
1605 bpf_ctx_record_field_size(info, size_default);
1606 if (!bpf_ctx_narrow_access_ok(off, size, size_default))
1610 if (size != size_default)
1617 const struct bpf_prog_ops cg_dev_prog_ops = {
1620 const struct bpf_verifier_ops cg_dev_verifier_ops = {
1621 .get_func_proto = cgroup_dev_func_proto,
1622 .is_valid_access = cgroup_dev_is_valid_access,
1626 * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl
1628 * @head: sysctl table header
1629 * @table: sysctl table
1630 * @write: sysctl is being read (= 0) or written (= 1)
1631 * @buf: pointer to buffer (in and out)
1632 * @pcount: value-result argument: value is size of buffer pointed to by @buf,
1633 * result is size of @new_buf if program set new value, initial value
1635 * @ppos: value-result argument: value is position at which read from or write
1636 * to sysctl is happening, result is new position if program overrode it,
1637 * initial value otherwise
1638 * @type: type of program to be executed
1640 * Program is run when sysctl is being accessed, either read or written, and
1641 * can allow or deny such access.
1643 * This function will return %-EPERM if an attached program is found and
1644 * returned value != 1 during execution. In all other cases 0 is returned.
1646 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
1647 struct ctl_table *table, int write,
1648 char **buf, size_t *pcount, loff_t *ppos,
1649 enum cgroup_bpf_attach_type atype)
1651 struct bpf_sysctl_kern ctx = {
1657 .cur_len = PAGE_SIZE,
1662 struct cgroup *cgrp;
1666 ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL);
1668 table->proc_handler(table, 0, ctx.cur_val, &ctx.cur_len, &pos)) {
1669 /* Let BPF program decide how to proceed. */
1673 if (write && *buf && *pcount) {
1674 /* BPF program should be able to override new value with a
1675 * buffer bigger than provided by user.
1677 ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL);
1678 ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount);
1680 memcpy(ctx.new_val, *buf, ctx.new_len);
1682 /* Let BPF program decide how to proceed. */
1688 cgrp = task_dfl_cgroup(current);
1689 ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0,
1695 if (ret == 1 && ctx.new_updated) {
1698 *pcount = ctx.new_len;
1707 static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen,
1708 struct bpf_sockopt_buf *buf)
1710 if (unlikely(max_optlen < 0))
1713 if (unlikely(max_optlen > PAGE_SIZE)) {
1714 /* We don't expose optvals that are greater than PAGE_SIZE
1715 * to the BPF program.
1717 max_optlen = PAGE_SIZE;
1720 if (max_optlen <= sizeof(buf->data)) {
1721 /* When the optval fits into BPF_SOCKOPT_KERN_BUF_SIZE
1722 * bytes avoid the cost of kzalloc.
1724 ctx->optval = buf->data;
1725 ctx->optval_end = ctx->optval + max_optlen;
1729 ctx->optval = kzalloc(max_optlen, GFP_USER);
1733 ctx->optval_end = ctx->optval + max_optlen;
1738 static void sockopt_free_buf(struct bpf_sockopt_kern *ctx,
1739 struct bpf_sockopt_buf *buf)
1741 if (ctx->optval == buf->data)
1746 static bool sockopt_buf_allocated(struct bpf_sockopt_kern *ctx,
1747 struct bpf_sockopt_buf *buf)
1749 return ctx->optval != buf->data;
1752 int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
1753 int *optname, char __user *optval,
1754 int *optlen, char **kernel_optval)
1756 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1757 struct bpf_sockopt_buf buf = {};
1758 struct bpf_sockopt_kern ctx = {
1761 .optname = *optname,
1763 int ret, max_optlen;
1765 /* Allocate a bit more than the initial user buffer for
1766 * BPF program. The canonical use case is overriding
1767 * TCP_CONGESTION(nv) to TCP_CONGESTION(cubic).
1769 max_optlen = max_t(int, 16, *optlen);
1770 max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
1774 ctx.optlen = *optlen;
1776 if (copy_from_user(ctx.optval, optval, min(*optlen, max_optlen)) != 0) {
1782 ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_SETSOCKOPT,
1783 &ctx, bpf_prog_run, 0, NULL);
1789 if (ctx.optlen == -1) {
1790 /* optlen set to -1, bypass kernel */
1792 } else if (ctx.optlen > max_optlen || ctx.optlen < -1) {
1793 /* optlen is out of bounds */
1796 /* optlen within bounds, run kernel handler */
1799 /* export any potential modifications */
1801 *optname = ctx.optname;
1803 /* optlen == 0 from BPF indicates that we should
1804 * use original userspace data.
1806 if (ctx.optlen != 0) {
1807 *optlen = ctx.optlen;
1808 /* We've used bpf_sockopt_kern->buf as an intermediary
1809 * storage, but the BPF program indicates that we need
1810 * to pass this data to the kernel setsockopt handler.
1811 * No way to export on-stack buf, have to allocate a
1814 if (!sockopt_buf_allocated(&ctx, &buf)) {
1815 void *p = kmalloc(ctx.optlen, GFP_USER);
1821 memcpy(p, ctx.optval, ctx.optlen);
1824 *kernel_optval = ctx.optval;
1826 /* export and don't free sockopt buf */
1832 sockopt_free_buf(&ctx, &buf);
1836 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
1837 int optname, char __user *optval,
1838 int __user *optlen, int max_optlen,
1841 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1842 struct bpf_sockopt_buf buf = {};
1843 struct bpf_sockopt_kern ctx = {
1847 .current_task = current,
1851 ctx.optlen = max_optlen;
1852 max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
1857 /* If kernel getsockopt finished successfully,
1858 * copy whatever was returned to the user back
1859 * into our temporary buffer. Set optlen to the
1860 * one that kernel returned as well to let
1861 * BPF programs inspect the value.
1864 if (get_user(ctx.optlen, optlen)) {
1869 if (ctx.optlen < 0) {
1874 if (copy_from_user(ctx.optval, optval,
1875 min(ctx.optlen, max_optlen)) != 0) {
1882 ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_GETSOCKOPT,
1883 &ctx, bpf_prog_run, retval, NULL);
1889 if (ctx.optlen > max_optlen || ctx.optlen < 0) {
1894 if (ctx.optlen != 0) {
1895 if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
1896 put_user(ctx.optlen, optlen)) {
1903 sockopt_free_buf(&ctx, &buf);
1907 int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
1908 int optname, void *optval,
1909 int *optlen, int retval)
1911 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1912 struct bpf_sockopt_kern ctx = {
1918 .optval_end = optval + *optlen,
1919 .current_task = current,
1923 /* Note that __cgroup_bpf_run_filter_getsockopt doesn't copy
1924 * user data back into BPF buffer when reval != 0. This is
1925 * done as an optimization to avoid extra copy, assuming
1926 * kernel won't populate the data in case of an error.
1927 * Here we always pass the data and memset() should
1928 * be called if that data shouldn't be "exported".
1931 ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_GETSOCKOPT,
1932 &ctx, bpf_prog_run, retval, NULL);
1936 if (ctx.optlen > *optlen)
1939 /* BPF programs can shrink the buffer, export the modifications.
1941 if (ctx.optlen != 0)
1942 *optlen = ctx.optlen;
1948 static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp,
1951 ssize_t tmp_ret = 0, ret;
1953 if (dir->header.parent) {
1954 tmp_ret = sysctl_cpy_dir(dir->header.parent, bufp, lenp);
1959 ret = strscpy(*bufp, dir->header.ctl_table[0].procname, *lenp);
1966 /* Avoid leading slash. */
1970 tmp_ret = strscpy(*bufp, "/", *lenp);
1976 return ret + tmp_ret;
1979 BPF_CALL_4(bpf_sysctl_get_name, struct bpf_sysctl_kern *, ctx, char *, buf,
1980 size_t, buf_len, u64, flags)
1982 ssize_t tmp_ret = 0, ret;
1987 if (!(flags & BPF_F_SYSCTL_BASE_NAME)) {
1990 tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len);
1995 ret = strscpy(buf, ctx->table->procname, buf_len);
1997 return ret < 0 ? ret : tmp_ret + ret;
2000 static const struct bpf_func_proto bpf_sysctl_get_name_proto = {
2001 .func = bpf_sysctl_get_name,
2003 .ret_type = RET_INTEGER,
2004 .arg1_type = ARG_PTR_TO_CTX,
2005 .arg2_type = ARG_PTR_TO_MEM,
2006 .arg3_type = ARG_CONST_SIZE,
2007 .arg4_type = ARG_ANYTHING,
2010 static int copy_sysctl_value(char *dst, size_t dst_len, char *src,
2019 if (!src || !src_len) {
2020 memset(dst, 0, dst_len);
2024 memcpy(dst, src, min(dst_len, src_len));
2026 if (dst_len > src_len) {
2027 memset(dst + src_len, '\0', dst_len - src_len);
2031 dst[dst_len - 1] = '\0';
2036 BPF_CALL_3(bpf_sysctl_get_current_value, struct bpf_sysctl_kern *, ctx,
2037 char *, buf, size_t, buf_len)
2039 return copy_sysctl_value(buf, buf_len, ctx->cur_val, ctx->cur_len);
2042 static const struct bpf_func_proto bpf_sysctl_get_current_value_proto = {
2043 .func = bpf_sysctl_get_current_value,
2045 .ret_type = RET_INTEGER,
2046 .arg1_type = ARG_PTR_TO_CTX,
2047 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
2048 .arg3_type = ARG_CONST_SIZE,
2051 BPF_CALL_3(bpf_sysctl_get_new_value, struct bpf_sysctl_kern *, ctx, char *, buf,
2056 memset(buf, '\0', buf_len);
2059 return copy_sysctl_value(buf, buf_len, ctx->new_val, ctx->new_len);
2062 static const struct bpf_func_proto bpf_sysctl_get_new_value_proto = {
2063 .func = bpf_sysctl_get_new_value,
2065 .ret_type = RET_INTEGER,
2066 .arg1_type = ARG_PTR_TO_CTX,
2067 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
2068 .arg3_type = ARG_CONST_SIZE,
2071 BPF_CALL_3(bpf_sysctl_set_new_value, struct bpf_sysctl_kern *, ctx,
2072 const char *, buf, size_t, buf_len)
2074 if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len)
2077 if (buf_len > PAGE_SIZE - 1)
2080 memcpy(ctx->new_val, buf, buf_len);
2081 ctx->new_len = buf_len;
2082 ctx->new_updated = 1;
2087 static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = {
2088 .func = bpf_sysctl_set_new_value,
2090 .ret_type = RET_INTEGER,
2091 .arg1_type = ARG_PTR_TO_CTX,
2092 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
2093 .arg3_type = ARG_CONST_SIZE,
2096 static const struct bpf_func_proto *
2097 sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2100 case BPF_FUNC_strtol:
2101 return &bpf_strtol_proto;
2102 case BPF_FUNC_strtoul:
2103 return &bpf_strtoul_proto;
2104 case BPF_FUNC_sysctl_get_name:
2105 return &bpf_sysctl_get_name_proto;
2106 case BPF_FUNC_sysctl_get_current_value:
2107 return &bpf_sysctl_get_current_value_proto;
2108 case BPF_FUNC_sysctl_get_new_value:
2109 return &bpf_sysctl_get_new_value_proto;
2110 case BPF_FUNC_sysctl_set_new_value:
2111 return &bpf_sysctl_set_new_value_proto;
2112 case BPF_FUNC_ktime_get_coarse_ns:
2113 return &bpf_ktime_get_coarse_ns_proto;
2115 return cgroup_base_func_proto(func_id, prog);
2119 static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type,
2120 const struct bpf_prog *prog,
2121 struct bpf_insn_access_aux *info)
2123 const int size_default = sizeof(__u32);
2125 if (off < 0 || off + size > sizeof(struct bpf_sysctl) || off % size)
2129 case bpf_ctx_range(struct bpf_sysctl, write):
2130 if (type != BPF_READ)
2132 bpf_ctx_record_field_size(info, size_default);
2133 return bpf_ctx_narrow_access_ok(off, size, size_default);
2134 case bpf_ctx_range(struct bpf_sysctl, file_pos):
2135 if (type == BPF_READ) {
2136 bpf_ctx_record_field_size(info, size_default);
2137 return bpf_ctx_narrow_access_ok(off, size, size_default);
2139 return size == size_default;
2146 static u32 sysctl_convert_ctx_access(enum bpf_access_type type,
2147 const struct bpf_insn *si,
2148 struct bpf_insn *insn_buf,
2149 struct bpf_prog *prog, u32 *target_size)
2151 struct bpf_insn *insn = insn_buf;
2155 case offsetof(struct bpf_sysctl, write):
2156 *insn++ = BPF_LDX_MEM(
2157 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
2158 bpf_target_off(struct bpf_sysctl_kern, write,
2159 sizeof_field(struct bpf_sysctl_kern,
2163 case offsetof(struct bpf_sysctl, file_pos):
2164 /* ppos is a pointer so it should be accessed via indirect
2165 * loads and stores. Also for stores additional temporary
2166 * register is used since neither src_reg nor dst_reg can be
2169 if (type == BPF_WRITE) {
2170 int treg = BPF_REG_9;
2172 if (si->src_reg == treg || si->dst_reg == treg)
2174 if (si->src_reg == treg || si->dst_reg == treg)
2176 *insn++ = BPF_STX_MEM(
2177 BPF_DW, si->dst_reg, treg,
2178 offsetof(struct bpf_sysctl_kern, tmp_reg));
2179 *insn++ = BPF_LDX_MEM(
2180 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
2182 offsetof(struct bpf_sysctl_kern, ppos));
2183 *insn++ = BPF_STX_MEM(
2184 BPF_SIZEOF(u32), treg, si->src_reg,
2185 bpf_ctx_narrow_access_offset(
2186 0, sizeof(u32), sizeof(loff_t)));
2187 *insn++ = BPF_LDX_MEM(
2188 BPF_DW, treg, si->dst_reg,
2189 offsetof(struct bpf_sysctl_kern, tmp_reg));
2191 *insn++ = BPF_LDX_MEM(
2192 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
2193 si->dst_reg, si->src_reg,
2194 offsetof(struct bpf_sysctl_kern, ppos));
2195 read_size = bpf_size_to_bytes(BPF_SIZE(si->code));
2196 *insn++ = BPF_LDX_MEM(
2197 BPF_SIZE(si->code), si->dst_reg, si->dst_reg,
2198 bpf_ctx_narrow_access_offset(
2199 0, read_size, sizeof(loff_t)));
2201 *target_size = sizeof(u32);
2205 return insn - insn_buf;
2208 const struct bpf_verifier_ops cg_sysctl_verifier_ops = {
2209 .get_func_proto = sysctl_func_proto,
2210 .is_valid_access = sysctl_is_valid_access,
2211 .convert_ctx_access = sysctl_convert_ctx_access,
2214 const struct bpf_prog_ops cg_sysctl_prog_ops = {
2218 BPF_CALL_1(bpf_get_netns_cookie_sockopt, struct bpf_sockopt_kern *, ctx)
2220 const struct net *net = ctx ? sock_net(ctx->sk) : &init_net;
2222 return net->net_cookie;
2225 static const struct bpf_func_proto bpf_get_netns_cookie_sockopt_proto = {
2226 .func = bpf_get_netns_cookie_sockopt,
2228 .ret_type = RET_INTEGER,
2229 .arg1_type = ARG_PTR_TO_CTX_OR_NULL,
2233 static const struct bpf_func_proto *
2234 cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2238 case BPF_FUNC_get_netns_cookie:
2239 return &bpf_get_netns_cookie_sockopt_proto;
2240 case BPF_FUNC_sk_storage_get:
2241 return &bpf_sk_storage_get_proto;
2242 case BPF_FUNC_sk_storage_delete:
2243 return &bpf_sk_storage_delete_proto;
2244 case BPF_FUNC_setsockopt:
2245 if (prog->expected_attach_type == BPF_CGROUP_SETSOCKOPT)
2246 return &bpf_sk_setsockopt_proto;
2248 case BPF_FUNC_getsockopt:
2249 if (prog->expected_attach_type == BPF_CGROUP_SETSOCKOPT)
2250 return &bpf_sk_getsockopt_proto;
2254 case BPF_FUNC_tcp_sock:
2255 return &bpf_tcp_sock_proto;
2258 return cgroup_base_func_proto(func_id, prog);
2262 static bool cg_sockopt_is_valid_access(int off, int size,
2263 enum bpf_access_type type,
2264 const struct bpf_prog *prog,
2265 struct bpf_insn_access_aux *info)
2267 const int size_default = sizeof(__u32);
2269 if (off < 0 || off >= sizeof(struct bpf_sockopt))
2272 if (off % size != 0)
2275 if (type == BPF_WRITE) {
2277 case offsetof(struct bpf_sockopt, retval):
2278 if (size != size_default)
2280 return prog->expected_attach_type ==
2281 BPF_CGROUP_GETSOCKOPT;
2282 case offsetof(struct bpf_sockopt, optname):
2284 case offsetof(struct bpf_sockopt, level):
2285 if (size != size_default)
2287 return prog->expected_attach_type ==
2288 BPF_CGROUP_SETSOCKOPT;
2289 case offsetof(struct bpf_sockopt, optlen):
2290 return size == size_default;
2297 case offsetof(struct bpf_sockopt, sk):
2298 if (size != sizeof(__u64))
2300 info->reg_type = PTR_TO_SOCKET;
2302 case offsetof(struct bpf_sockopt, optval):
2303 if (size != sizeof(__u64))
2305 info->reg_type = PTR_TO_PACKET;
2307 case offsetof(struct bpf_sockopt, optval_end):
2308 if (size != sizeof(__u64))
2310 info->reg_type = PTR_TO_PACKET_END;
2312 case offsetof(struct bpf_sockopt, retval):
2313 if (size != size_default)
2315 return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT;
2317 if (size != size_default)
2324 #define CG_SOCKOPT_ACCESS_FIELD(T, F) \
2325 T(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F), \
2326 si->dst_reg, si->src_reg, \
2327 offsetof(struct bpf_sockopt_kern, F))
2329 static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type,
2330 const struct bpf_insn *si,
2331 struct bpf_insn *insn_buf,
2332 struct bpf_prog *prog,
2335 struct bpf_insn *insn = insn_buf;
2338 case offsetof(struct bpf_sockopt, sk):
2339 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, sk);
2341 case offsetof(struct bpf_sockopt, level):
2342 if (type == BPF_WRITE)
2343 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, level);
2345 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, level);
2347 case offsetof(struct bpf_sockopt, optname):
2348 if (type == BPF_WRITE)
2349 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optname);
2351 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optname);
2353 case offsetof(struct bpf_sockopt, optlen):
2354 if (type == BPF_WRITE)
2355 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optlen);
2357 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optlen);
2359 case offsetof(struct bpf_sockopt, retval):
2360 BUILD_BUG_ON(offsetof(struct bpf_cg_run_ctx, run_ctx) != 0);
2362 if (type == BPF_WRITE) {
2363 int treg = BPF_REG_9;
2365 if (si->src_reg == treg || si->dst_reg == treg)
2367 if (si->src_reg == treg || si->dst_reg == treg)
2369 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, treg,
2370 offsetof(struct bpf_sockopt_kern, tmp_reg));
2371 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, current_task),
2373 offsetof(struct bpf_sockopt_kern, current_task));
2374 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct task_struct, bpf_ctx),
2376 offsetof(struct task_struct, bpf_ctx));
2377 *insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(struct bpf_cg_run_ctx, retval),
2379 offsetof(struct bpf_cg_run_ctx, retval));
2380 *insn++ = BPF_LDX_MEM(BPF_DW, treg, si->dst_reg,
2381 offsetof(struct bpf_sockopt_kern, tmp_reg));
2383 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, current_task),
2384 si->dst_reg, si->src_reg,
2385 offsetof(struct bpf_sockopt_kern, current_task));
2386 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct task_struct, bpf_ctx),
2387 si->dst_reg, si->dst_reg,
2388 offsetof(struct task_struct, bpf_ctx));
2389 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_cg_run_ctx, retval),
2390 si->dst_reg, si->dst_reg,
2391 offsetof(struct bpf_cg_run_ctx, retval));
2394 case offsetof(struct bpf_sockopt, optval):
2395 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval);
2397 case offsetof(struct bpf_sockopt, optval_end):
2398 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval_end);
2402 return insn - insn_buf;
2405 static int cg_sockopt_get_prologue(struct bpf_insn *insn_buf,
2407 const struct bpf_prog *prog)
2409 /* Nothing to do for sockopt argument. The data is kzalloc'ated.
2414 const struct bpf_verifier_ops cg_sockopt_verifier_ops = {
2415 .get_func_proto = cg_sockopt_func_proto,
2416 .is_valid_access = cg_sockopt_is_valid_access,
2417 .convert_ctx_access = cg_sockopt_convert_ctx_access,
2418 .gen_prologue = cg_sockopt_get_prologue,
2421 const struct bpf_prog_ops cg_sockopt_prog_ops = {