1 //SPDX-License-Identifier: GPL-2.0
2 #include <linux/bpf-cgroup.h>
5 #include <linux/filter.h>
7 #include <linux/rbtree.h>
8 #include <linux/slab.h>
10 DEFINE_PER_CPU(struct bpf_cgroup_storage*,
11 bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
13 #ifdef CONFIG_CGROUP_BPF
15 #define LOCAL_STORAGE_CREATE_FLAG_MASK \
16 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
18 struct bpf_cgroup_storage_map {
22 struct bpf_prog *prog;
24 struct list_head list;
27 static struct bpf_cgroup_storage_map *map_to_storage(struct bpf_map *map)
29 return container_of(map, struct bpf_cgroup_storage_map, map);
32 static int bpf_cgroup_storage_key_cmp(
33 const struct bpf_cgroup_storage_key *key1,
34 const struct bpf_cgroup_storage_key *key2)
36 if (key1->cgroup_inode_id < key2->cgroup_inode_id)
38 else if (key1->cgroup_inode_id > key2->cgroup_inode_id)
40 else if (key1->attach_type < key2->attach_type)
42 else if (key1->attach_type > key2->attach_type)
47 static struct bpf_cgroup_storage *cgroup_storage_lookup(
48 struct bpf_cgroup_storage_map *map, struct bpf_cgroup_storage_key *key,
51 struct rb_root *root = &map->root;
55 spin_lock_bh(&map->lock);
59 struct bpf_cgroup_storage *storage;
61 storage = container_of(node, struct bpf_cgroup_storage, node);
63 switch (bpf_cgroup_storage_key_cmp(key, &storage->key)) {
68 node = node->rb_right;
72 spin_unlock_bh(&map->lock);
78 spin_unlock_bh(&map->lock);
83 static int cgroup_storage_insert(struct bpf_cgroup_storage_map *map,
84 struct bpf_cgroup_storage *storage)
86 struct rb_root *root = &map->root;
87 struct rb_node **new = &(root->rb_node), *parent = NULL;
90 struct bpf_cgroup_storage *this;
92 this = container_of(*new, struct bpf_cgroup_storage, node);
95 switch (bpf_cgroup_storage_key_cmp(&storage->key, &this->key)) {
97 new = &((*new)->rb_left);
100 new = &((*new)->rb_right);
107 rb_link_node(&storage->node, parent, new);
108 rb_insert_color(&storage->node, root);
113 static void *cgroup_storage_lookup_elem(struct bpf_map *_map, void *_key)
115 struct bpf_cgroup_storage_map *map = map_to_storage(_map);
116 struct bpf_cgroup_storage_key *key = _key;
117 struct bpf_cgroup_storage *storage;
119 storage = cgroup_storage_lookup(map, key, false);
123 return &READ_ONCE(storage->buf)->data[0];
126 static int cgroup_storage_update_elem(struct bpf_map *map, void *_key,
127 void *value, u64 flags)
129 struct bpf_cgroup_storage_key *key = _key;
130 struct bpf_cgroup_storage *storage;
131 struct bpf_storage_buffer *new;
133 if (flags != BPF_ANY && flags != BPF_EXIST)
136 storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map,
141 new = kmalloc_node(sizeof(struct bpf_storage_buffer) +
143 __GFP_ZERO | GFP_ATOMIC | __GFP_NOWARN,
148 memcpy(&new->data[0], value, map->value_size);
150 new = xchg(&storage->buf, new);
156 int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *_key,
159 struct bpf_cgroup_storage_map *map = map_to_storage(_map);
160 struct bpf_cgroup_storage_key *key = _key;
161 struct bpf_cgroup_storage *storage;
166 storage = cgroup_storage_lookup(map, key, false);
172 /* per_cpu areas are zero-filled and bpf programs can only
173 * access 'value_size' of them, so copying rounded areas
174 * will not leak any kernel data
176 size = round_up(_map->value_size, 8);
177 for_each_possible_cpu(cpu) {
178 bpf_long_memcpy(value + off,
179 per_cpu_ptr(storage->percpu_buf, cpu), size);
186 int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *_key,
187 void *value, u64 map_flags)
189 struct bpf_cgroup_storage_map *map = map_to_storage(_map);
190 struct bpf_cgroup_storage_key *key = _key;
191 struct bpf_cgroup_storage *storage;
195 if (map_flags != BPF_ANY && map_flags != BPF_EXIST)
199 storage = cgroup_storage_lookup(map, key, false);
205 /* the user space will provide round_up(value_size, 8) bytes that
206 * will be copied into per-cpu area. bpf programs can only access
207 * value_size of it. During lookup the same extra bytes will be
208 * returned or zeros which were zero-filled by percpu_alloc,
209 * so no kernel data leaks possible
211 size = round_up(_map->value_size, 8);
212 for_each_possible_cpu(cpu) {
213 bpf_long_memcpy(per_cpu_ptr(storage->percpu_buf, cpu),
221 static int cgroup_storage_get_next_key(struct bpf_map *_map, void *_key,
224 struct bpf_cgroup_storage_map *map = map_to_storage(_map);
225 struct bpf_cgroup_storage_key *key = _key;
226 struct bpf_cgroup_storage_key *next = _next_key;
227 struct bpf_cgroup_storage *storage;
229 spin_lock_bh(&map->lock);
231 if (list_empty(&map->list))
235 storage = cgroup_storage_lookup(map, key, true);
239 storage = list_next_entry(storage, list);
243 storage = list_first_entry(&map->list,
244 struct bpf_cgroup_storage, list);
247 spin_unlock_bh(&map->lock);
248 next->attach_type = storage->key.attach_type;
249 next->cgroup_inode_id = storage->key.cgroup_inode_id;
253 spin_unlock_bh(&map->lock);
257 static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
259 int numa_node = bpf_map_attr_numa_node(attr);
260 struct bpf_cgroup_storage_map *map;
262 if (attr->key_size != sizeof(struct bpf_cgroup_storage_key))
263 return ERR_PTR(-EINVAL);
265 if (attr->value_size == 0)
266 return ERR_PTR(-EINVAL);
268 if (attr->value_size > PAGE_SIZE)
269 return ERR_PTR(-E2BIG);
271 if (attr->map_flags & ~LOCAL_STORAGE_CREATE_FLAG_MASK)
272 /* reserved bits should not be used */
273 return ERR_PTR(-EINVAL);
275 if (attr->max_entries)
276 /* max_entries is not used and enforced to be 0 */
277 return ERR_PTR(-EINVAL);
279 map = kmalloc_node(sizeof(struct bpf_cgroup_storage_map),
280 __GFP_ZERO | GFP_USER, numa_node);
282 return ERR_PTR(-ENOMEM);
284 map->map.pages = round_up(sizeof(struct bpf_cgroup_storage_map),
285 PAGE_SIZE) >> PAGE_SHIFT;
287 /* copy mandatory map attributes */
288 bpf_map_init_from_attr(&map->map, attr);
290 spin_lock_init(&map->lock);
292 INIT_LIST_HEAD(&map->list);
297 static void cgroup_storage_map_free(struct bpf_map *_map)
299 struct bpf_cgroup_storage_map *map = map_to_storage(_map);
301 WARN_ON(!RB_EMPTY_ROOT(&map->root));
302 WARN_ON(!list_empty(&map->list));
307 static int cgroup_storage_delete_elem(struct bpf_map *map, void *key)
312 const struct bpf_map_ops cgroup_storage_map_ops = {
313 .map_alloc = cgroup_storage_map_alloc,
314 .map_free = cgroup_storage_map_free,
315 .map_get_next_key = cgroup_storage_get_next_key,
316 .map_lookup_elem = cgroup_storage_lookup_elem,
317 .map_update_elem = cgroup_storage_update_elem,
318 .map_delete_elem = cgroup_storage_delete_elem,
319 .map_check_btf = map_check_no_btf,
322 int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map)
324 enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map);
325 struct bpf_cgroup_storage_map *map = map_to_storage(_map);
328 spin_lock_bh(&map->lock);
330 if (map->prog && map->prog != prog)
332 if (prog->aux->cgroup_storage[stype] &&
333 prog->aux->cgroup_storage[stype] != _map)
337 prog->aux->cgroup_storage[stype] = _map;
340 spin_unlock_bh(&map->lock);
345 void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *_map)
347 enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map);
348 struct bpf_cgroup_storage_map *map = map_to_storage(_map);
350 spin_lock_bh(&map->lock);
351 if (map->prog == prog) {
352 WARN_ON(prog->aux->cgroup_storage[stype] != _map);
354 prog->aux->cgroup_storage[stype] = NULL;
356 spin_unlock_bh(&map->lock);
359 static size_t bpf_cgroup_storage_calculate_size(struct bpf_map *map, u32 *pages)
363 if (cgroup_storage_type(map) == BPF_CGROUP_STORAGE_SHARED) {
364 size = sizeof(struct bpf_storage_buffer) + map->value_size;
365 *pages = round_up(sizeof(struct bpf_cgroup_storage) + size,
366 PAGE_SIZE) >> PAGE_SHIFT;
368 size = map->value_size;
369 *pages = round_up(round_up(size, 8) * num_possible_cpus(),
370 PAGE_SIZE) >> PAGE_SHIFT;
376 struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
377 enum bpf_cgroup_storage_type stype)
379 struct bpf_cgroup_storage *storage;
385 map = prog->aux->cgroup_storage[stype];
389 size = bpf_cgroup_storage_calculate_size(map, &pages);
391 if (bpf_map_charge_memlock(map, pages))
392 return ERR_PTR(-EPERM);
394 storage = kmalloc_node(sizeof(struct bpf_cgroup_storage),
395 __GFP_ZERO | GFP_USER, map->numa_node);
399 flags = __GFP_ZERO | GFP_USER;
401 if (stype == BPF_CGROUP_STORAGE_SHARED) {
402 storage->buf = kmalloc_node(size, flags, map->numa_node);
406 storage->percpu_buf = __alloc_percpu_gfp(size, 8, flags);
407 if (!storage->percpu_buf)
411 storage->map = (struct bpf_cgroup_storage_map *)map;
416 bpf_map_uncharge_memlock(map, pages);
418 return ERR_PTR(-ENOMEM);
421 static void free_shared_cgroup_storage_rcu(struct rcu_head *rcu)
423 struct bpf_cgroup_storage *storage =
424 container_of(rcu, struct bpf_cgroup_storage, rcu);
430 static void free_percpu_cgroup_storage_rcu(struct rcu_head *rcu)
432 struct bpf_cgroup_storage *storage =
433 container_of(rcu, struct bpf_cgroup_storage, rcu);
435 free_percpu(storage->percpu_buf);
439 void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage)
441 enum bpf_cgroup_storage_type stype;
448 map = &storage->map->map;
450 bpf_cgroup_storage_calculate_size(map, &pages);
451 bpf_map_uncharge_memlock(map, pages);
453 stype = cgroup_storage_type(map);
454 if (stype == BPF_CGROUP_STORAGE_SHARED)
455 call_rcu(&storage->rcu, free_shared_cgroup_storage_rcu);
457 call_rcu(&storage->rcu, free_percpu_cgroup_storage_rcu);
460 void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
461 struct cgroup *cgroup,
462 enum bpf_attach_type type)
464 struct bpf_cgroup_storage_map *map;
469 storage->key.attach_type = type;
470 storage->key.cgroup_inode_id = cgroup->kn->id.id;
474 spin_lock_bh(&map->lock);
475 WARN_ON(cgroup_storage_insert(map, storage));
476 list_add(&storage->list, &map->list);
477 spin_unlock_bh(&map->lock);
480 void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage)
482 struct bpf_cgroup_storage_map *map;
483 struct rb_root *root;
490 spin_lock_bh(&map->lock);
492 rb_erase(&storage->node, root);
494 list_del(&storage->list);
495 spin_unlock_bh(&map->lock);