1 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
13 /* A BPF sock_map is used to store sock objects. This is primarly used
14 * for doing socket redirect with BPF helper routines.
16 * A sock map may have BPF programs attached to it, currently a program
17 * used to parse packets and a program to provide a verdict and redirect
18 * decision on the packet are supported. Any programs attached to a sock
19 * map are inherited by sock objects when they are added to the map. If
20 * no BPF programs are attached the sock object may only be used for sock
23 * A sock object may be in multiple maps, but can only inherit a single
24 * parse or verdict program. If adding a sock object to a map would result
25 * in having multiple parsing programs the update will return an EBUSY error.
27 * For reference this program is similar to devmap used in XDP context
28 * reviewing these together may be useful. For an example please review
29 * ./samples/bpf/sockmap/.
31 #include <linux/bpf.h>
33 #include <linux/filter.h>
34 #include <linux/errno.h>
35 #include <linux/file.h>
36 #include <linux/kernel.h>
37 #include <linux/net.h>
38 #include <linux/skbuff.h>
39 #include <linux/workqueue.h>
40 #include <linux/list.h>
41 #include <net/strparser.h>
45 struct sock **sock_map;
46 struct bpf_prog *bpf_parse;
47 struct bpf_prog *bpf_verdict;
50 enum smap_psock_state {
54 struct smap_psock_map_entry {
55 struct list_head list;
61 /* refcnt is used inside sk_callback_lock */
64 /* datapath variables */
65 struct sk_buff_head rxqueue;
68 /* datapath error path cache across tx work invocations */
71 struct sk_buff *save_skb;
73 struct strparser strp;
74 struct bpf_prog *bpf_parse;
75 struct bpf_prog *bpf_verdict;
76 struct list_head maps;
78 /* Back reference used when sock callback trigger sockmap operations */
82 struct work_struct tx_work;
83 struct work_struct gc_work;
85 void (*save_data_ready)(struct sock *sk);
86 void (*save_write_space)(struct sock *sk);
87 void (*save_state_change)(struct sock *sk);
90 static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
92 return rcu_dereference_sk_user_data(sk);
95 static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
97 struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
104 skb->sk = psock->sock;
105 bpf_compute_data_end(skb);
106 rc = (*prog->bpf_func)(skb, prog->insnsi);
112 static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
117 /* Because we use per cpu values to feed input from sock redirect
118 * in BPF program to do_sk_redirect_map() call we need to ensure we
119 * are not preempted. RCU read lock is not sufficient in this case
120 * with CONFIG_PREEMPT_RCU enabled so we must be explicit here.
123 rc = smap_verdict_func(psock, skb);
126 sk = do_sk_redirect_map();
129 struct smap_psock *peer = smap_psock_sk(sk);
132 test_bit(SMAP_TX_RUNNING, &peer->state) &&
133 !sock_flag(sk, SOCK_DEAD) &&
134 sock_writeable(sk))) {
135 skb_set_owner_w(skb, sk);
136 skb_queue_tail(&peer->rxqueue, skb);
137 schedule_work(&peer->tx_work);
141 /* Fall through and free skb otherwise */
144 if (rc != SK_REDIRECT)
150 static void smap_report_sk_error(struct smap_psock *psock, int err)
152 struct sock *sk = psock->sock;
155 sk->sk_error_report(sk);
158 static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
160 /* Called with lock_sock(sk) held */
161 static void smap_state_change(struct sock *sk)
163 struct smap_psock_map_entry *e, *tmp;
164 struct smap_psock *psock;
165 struct socket_wq *wq;
170 /* Allowing transitions into an established syn_recv states allows
171 * for early binding sockets to a smap object before the connection
174 switch (sk->sk_state) {
177 case TCP_ESTABLISHED:
187 /* Only release if the map entry is in fact the sock in
188 * question. There is a case where the operator deletes
189 * the sock from the map, but the TCP sock is closed before
190 * the psock is detached. Use cmpxchg to verify correct
193 psock = smap_psock_sk(sk);
194 if (unlikely(!psock))
196 write_lock_bh(&sk->sk_callback_lock);
197 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
198 osk = cmpxchg(e->entry, sk, NULL);
201 smap_release_sock(psock, sk);
204 write_unlock_bh(&sk->sk_callback_lock);
207 psock = smap_psock_sk(sk);
208 if (unlikely(!psock))
210 smap_report_sk_error(psock, EPIPE);
214 wq = rcu_dereference(sk->sk_wq);
215 if (skwq_has_sleeper(wq))
216 wake_up_interruptible_all(&wq->wait);
220 static void smap_read_sock_strparser(struct strparser *strp,
223 struct smap_psock *psock;
226 psock = container_of(strp, struct smap_psock, strp);
227 smap_do_verdict(psock, skb);
231 /* Called with lock held on socket */
232 static void smap_data_ready(struct sock *sk)
234 struct smap_psock *psock;
237 psock = smap_psock_sk(sk);
239 write_lock_bh(&sk->sk_callback_lock);
240 strp_data_ready(&psock->strp);
241 write_unlock_bh(&sk->sk_callback_lock);
246 static void smap_tx_work(struct work_struct *w)
248 struct smap_psock *psock;
252 psock = container_of(w, struct smap_psock, tx_work);
254 /* lock sock to avoid losing sk_socket at some point during loop */
255 lock_sock(psock->sock);
256 if (psock->save_skb) {
257 skb = psock->save_skb;
258 rem = psock->save_rem;
259 off = psock->save_off;
260 psock->save_skb = NULL;
264 while ((skb = skb_dequeue(&psock->rxqueue))) {
269 if (likely(psock->sock->sk_socket))
270 n = skb_send_sock_locked(psock->sock,
276 /* Retry when space is available */
277 psock->save_skb = skb;
278 psock->save_rem = rem;
279 psock->save_off = off;
282 /* Hard errors break pipe and stop xmit */
283 smap_report_sk_error(psock, n ? -n : EPIPE);
284 clear_bit(SMAP_TX_RUNNING, &psock->state);
294 release_sock(psock->sock);
297 static void smap_write_space(struct sock *sk)
299 struct smap_psock *psock;
302 psock = smap_psock_sk(sk);
303 if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state)))
304 schedule_work(&psock->tx_work);
308 static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
310 if (!psock->strp_enabled)
312 sk->sk_data_ready = psock->save_data_ready;
313 sk->sk_write_space = psock->save_write_space;
314 sk->sk_state_change = psock->save_state_change;
315 psock->save_data_ready = NULL;
316 psock->save_write_space = NULL;
317 psock->save_state_change = NULL;
318 strp_stop(&psock->strp);
319 psock->strp_enabled = false;
322 static void smap_destroy_psock(struct rcu_head *rcu)
324 struct smap_psock *psock = container_of(rcu,
325 struct smap_psock, rcu);
327 /* Now that a grace period has passed there is no longer
328 * any reference to this sock in the sockmap so we can
329 * destroy the psock, strparser, and bpf programs. But,
330 * because we use workqueue sync operations we can not
331 * do it in rcu context
333 schedule_work(&psock->gc_work);
336 static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
342 smap_stop_sock(psock, sock);
343 clear_bit(SMAP_TX_RUNNING, &psock->state);
344 rcu_assign_sk_user_data(sock, NULL);
345 call_rcu_sched(&psock->rcu, smap_destroy_psock);
348 static int smap_parse_func_strparser(struct strparser *strp,
351 struct smap_psock *psock;
352 struct bpf_prog *prog;
356 psock = container_of(strp, struct smap_psock, strp);
357 prog = READ_ONCE(psock->bpf_parse);
359 if (unlikely(!prog)) {
364 /* Attach socket for bpf program to use if needed we can do this
365 * because strparser clones the skb before handing it to a upper
366 * layer, meaning skb_orphan has been called. We NULL sk on the
367 * way out to ensure we don't trigger a BUG_ON in skb/sk operations
368 * later and because we are not charging the memory of this skb to
371 skb->sk = psock->sock;
372 bpf_compute_data_end(skb);
373 rc = (*prog->bpf_func)(skb, prog->insnsi);
380 static int smap_read_sock_done(struct strparser *strp, int err)
385 static int smap_init_sock(struct smap_psock *psock,
388 static const struct strp_callbacks cb = {
389 .rcv_msg = smap_read_sock_strparser,
390 .parse_msg = smap_parse_func_strparser,
391 .read_sock_done = smap_read_sock_done,
394 return strp_init(&psock->strp, sk, &cb);
397 static void smap_init_progs(struct smap_psock *psock,
398 struct bpf_stab *stab,
399 struct bpf_prog *verdict,
400 struct bpf_prog *parse)
402 struct bpf_prog *orig_parse, *orig_verdict;
404 orig_parse = xchg(&psock->bpf_parse, parse);
405 orig_verdict = xchg(&psock->bpf_verdict, verdict);
408 bpf_prog_put(orig_verdict);
410 bpf_prog_put(orig_parse);
413 static void smap_start_sock(struct smap_psock *psock, struct sock *sk)
415 if (sk->sk_data_ready == smap_data_ready)
417 psock->save_data_ready = sk->sk_data_ready;
418 psock->save_write_space = sk->sk_write_space;
419 psock->save_state_change = sk->sk_state_change;
420 sk->sk_data_ready = smap_data_ready;
421 sk->sk_write_space = smap_write_space;
422 sk->sk_state_change = smap_state_change;
423 psock->strp_enabled = true;
426 static void sock_map_remove_complete(struct bpf_stab *stab)
428 bpf_map_area_free(stab->sock_map);
432 static void smap_gc_work(struct work_struct *w)
434 struct smap_psock_map_entry *e, *tmp;
435 struct smap_psock *psock;
437 psock = container_of(w, struct smap_psock, gc_work);
439 /* no callback lock needed because we already detached sockmap ops */
440 if (psock->strp_enabled)
441 strp_done(&psock->strp);
443 cancel_work_sync(&psock->tx_work);
444 __skb_queue_purge(&psock->rxqueue);
446 /* At this point all strparser and xmit work must be complete */
447 if (psock->bpf_parse)
448 bpf_prog_put(psock->bpf_parse);
449 if (psock->bpf_verdict)
450 bpf_prog_put(psock->bpf_verdict);
452 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
457 sock_put(psock->sock);
461 static struct smap_psock *smap_init_psock(struct sock *sock,
462 struct bpf_stab *stab)
464 struct smap_psock *psock;
466 psock = kzalloc_node(sizeof(struct smap_psock),
467 GFP_ATOMIC | __GFP_NOWARN,
468 stab->map.numa_node);
470 return ERR_PTR(-ENOMEM);
473 skb_queue_head_init(&psock->rxqueue);
474 INIT_WORK(&psock->tx_work, smap_tx_work);
475 INIT_WORK(&psock->gc_work, smap_gc_work);
476 INIT_LIST_HEAD(&psock->maps);
479 rcu_assign_sk_user_data(sock, psock);
484 static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
486 struct bpf_stab *stab;
490 /* check sanity of attributes */
491 if (attr->max_entries == 0 || attr->key_size != 4 ||
492 attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE)
493 return ERR_PTR(-EINVAL);
495 if (attr->value_size > KMALLOC_MAX_SIZE)
496 return ERR_PTR(-E2BIG);
498 stab = kzalloc(sizeof(*stab), GFP_USER);
500 return ERR_PTR(-ENOMEM);
502 /* mandatory map attributes */
503 stab->map.map_type = attr->map_type;
504 stab->map.key_size = attr->key_size;
505 stab->map.value_size = attr->value_size;
506 stab->map.max_entries = attr->max_entries;
507 stab->map.map_flags = attr->map_flags;
508 stab->map.numa_node = bpf_map_attr_numa_node(attr);
510 /* make sure page count doesn't overflow */
511 cost = (u64) stab->map.max_entries * sizeof(struct sock *);
512 if (cost >= U32_MAX - PAGE_SIZE)
515 stab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
517 /* if map size is larger than memlock limit, reject it early */
518 err = bpf_map_precharge_memlock(stab->map.pages);
523 stab->sock_map = bpf_map_area_alloc(stab->map.max_entries *
524 sizeof(struct sock *),
525 stab->map.numa_node);
535 static void smap_list_remove(struct smap_psock *psock, struct sock **entry)
537 struct smap_psock_map_entry *e, *tmp;
539 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
540 if (e->entry == entry) {
547 static void sock_map_free(struct bpf_map *map)
549 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
554 /* At this point no update, lookup or delete operations can happen.
555 * However, be aware we can still get a socket state event updates,
556 * and data ready callabacks that reference the psock from sk_user_data
557 * Also psock worker threads are still in-flight. So smap_release_sock
558 * will only free the psock after cancel_sync on the worker threads
559 * and a grace period expire to ensure psock is really safe to remove.
562 for (i = 0; i < stab->map.max_entries; i++) {
563 struct smap_psock *psock;
566 sock = xchg(&stab->sock_map[i], NULL);
570 write_lock_bh(&sock->sk_callback_lock);
571 psock = smap_psock_sk(sock);
572 smap_list_remove(psock, &stab->sock_map[i]);
573 smap_release_sock(psock, sock);
574 write_unlock_bh(&sock->sk_callback_lock);
578 if (stab->bpf_verdict)
579 bpf_prog_put(stab->bpf_verdict);
581 bpf_prog_put(stab->bpf_parse);
583 sock_map_remove_complete(stab);
586 static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
588 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
589 u32 i = key ? *(u32 *)key : U32_MAX;
590 u32 *next = (u32 *)next_key;
592 if (i >= stab->map.max_entries) {
597 if (i == stab->map.max_entries - 1)
604 struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
606 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
608 if (key >= map->max_entries)
611 return READ_ONCE(stab->sock_map[key]);
614 static int sock_map_delete_elem(struct bpf_map *map, void *key)
616 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
617 struct smap_psock *psock;
621 if (k >= map->max_entries)
624 sock = xchg(&stab->sock_map[k], NULL);
628 write_lock_bh(&sock->sk_callback_lock);
629 psock = smap_psock_sk(sock);
633 if (psock->bpf_parse)
634 smap_stop_sock(psock, sock);
635 smap_list_remove(psock, &stab->sock_map[k]);
636 smap_release_sock(psock, sock);
638 write_unlock_bh(&sock->sk_callback_lock);
642 /* Locking notes: Concurrent updates, deletes, and lookups are allowed and are
643 * done inside rcu critical sections. This ensures on updates that the psock
644 * will not be released via smap_release_sock() until concurrent updates/deletes
645 * complete. All operations operate on sock_map using cmpxchg and xchg
646 * operations to ensure we do not get stale references. Any reads into the
647 * map must be done with READ_ONCE() because of this.
649 * A psock is destroyed via call_rcu and after any worker threads are cancelled
650 * and syncd so we are certain all references from the update/lookup/delete
651 * operations as well as references in the data path are no longer in use.
653 * Psocks may exist in multiple maps, but only a single set of parse/verdict
654 * programs may be inherited from the maps it belongs to. A reference count
655 * is kept with the total number of references to the psock from all maps. The
656 * psock will not be released until this reaches zero. The psock and sock
657 * user data data use the sk_callback_lock to protect critical data structures
658 * from concurrent access. This allows us to avoid two updates from modifying
659 * the user data in sock and the lock is required anyways for modifying
660 * callbacks, we simply increase its scope slightly.
663 * - psock must always be read inside RCU critical section
664 * - sk_user_data must only be modified inside sk_callback_lock and read
665 * inside RCU critical section.
666 * - psock->maps list must only be read & modified inside sk_callback_lock
667 * - sock_map must use READ_ONCE and (cmp)xchg operations
668 * - BPF verdict/parse programs must use READ_ONCE and xchg operations
670 static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
672 void *key, u64 flags)
674 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
675 struct smap_psock_map_entry *e = NULL;
676 struct bpf_prog *verdict, *parse;
677 struct sock *osock, *sock;
678 struct smap_psock *psock;
682 if (unlikely(flags > BPF_EXIST))
685 if (unlikely(i >= stab->map.max_entries))
688 sock = READ_ONCE(stab->sock_map[i]);
689 if (flags == BPF_EXIST && !sock)
691 else if (flags == BPF_NOEXIST && sock)
696 /* 1. If sock map has BPF programs those will be inherited by the
697 * sock being added. If the sock is already attached to BPF programs
698 * this results in an error.
700 verdict = READ_ONCE(stab->bpf_verdict);
701 parse = READ_ONCE(stab->bpf_parse);
703 if (parse && verdict) {
704 /* bpf prog refcnt may be zero if a concurrent attach operation
705 * removes the program after the above READ_ONCE() but before
706 * we increment the refcnt. If this is the case abort with an
709 verdict = bpf_prog_inc_not_zero(stab->bpf_verdict);
711 return PTR_ERR(verdict);
713 parse = bpf_prog_inc_not_zero(stab->bpf_parse);
715 bpf_prog_put(verdict);
716 return PTR_ERR(parse);
720 write_lock_bh(&sock->sk_callback_lock);
721 psock = smap_psock_sk(sock);
723 /* 2. Do not allow inheriting programs if psock exists and has
724 * already inherited programs. This would create confusion on
725 * which parser/verdict program is running. If no psock exists
726 * create one. Inside sk_callback_lock to ensure concurrent create
727 * doesn't update user data.
730 if (READ_ONCE(psock->bpf_parse) && parse) {
736 psock = smap_init_psock(sock, stab);
738 err = PTR_ERR(psock);
742 set_bit(SMAP_TX_RUNNING, &psock->state);
745 e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
750 e->entry = &stab->sock_map[i];
752 /* 3. At this point we have a reference to a valid psock that is
753 * running. Attach any BPF programs needed.
755 if (parse && verdict && !psock->strp_enabled) {
756 err = smap_init_sock(psock, sock);
759 smap_init_progs(psock, stab, verdict, parse);
760 smap_start_sock(psock, sock);
763 /* 4. Place psock in sockmap for use and stop any programs on
764 * the old sock assuming its not the same sock we are replacing
765 * it with. Because we can only have a single set of programs if
766 * old_sock has a strp we can stop it.
768 list_add_tail(&e->list, &psock->maps);
769 write_unlock_bh(&sock->sk_callback_lock);
771 osock = xchg(&stab->sock_map[i], sock);
773 struct smap_psock *opsock = smap_psock_sk(osock);
775 write_lock_bh(&osock->sk_callback_lock);
776 if (osock != sock && parse)
777 smap_stop_sock(opsock, osock);
778 smap_list_remove(opsock, &stab->sock_map[i]);
779 smap_release_sock(opsock, osock);
780 write_unlock_bh(&osock->sk_callback_lock);
784 smap_release_sock(psock, sock);
787 bpf_prog_put(verdict);
790 write_unlock_bh(&sock->sk_callback_lock);
795 int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type)
797 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
798 struct bpf_prog *orig;
800 if (unlikely(map->map_type != BPF_MAP_TYPE_SOCKMAP))
804 case BPF_SK_SKB_STREAM_PARSER:
805 orig = xchg(&stab->bpf_parse, prog);
807 case BPF_SK_SKB_STREAM_VERDICT:
808 orig = xchg(&stab->bpf_verdict, prog);
820 static void *sock_map_lookup(struct bpf_map *map, void *key)
825 static int sock_map_update_elem(struct bpf_map *map,
826 void *key, void *value, u64 flags)
828 struct bpf_sock_ops_kern skops;
829 u32 fd = *(u32 *)value;
830 struct socket *socket;
833 socket = sockfd_lookup(fd, &err);
837 skops.sk = socket->sk;
843 err = sock_map_ctx_update_elem(&skops, map, key, flags);
848 const struct bpf_map_ops sock_map_ops = {
849 .map_alloc = sock_map_alloc,
850 .map_free = sock_map_free,
851 .map_lookup_elem = sock_map_lookup,
852 .map_get_next_key = sock_map_get_next_key,
853 .map_update_elem = sock_map_update_elem,
854 .map_delete_elem = sock_map_delete_elem,
857 BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
858 struct bpf_map *, map, void *, key, u64, flags)
860 WARN_ON_ONCE(!rcu_read_lock_held());
861 return sock_map_ctx_update_elem(bpf_sock, map, key, flags);
864 const struct bpf_func_proto bpf_sock_map_update_proto = {
865 .func = bpf_sock_map_update,
868 .ret_type = RET_INTEGER,
869 .arg1_type = ARG_PTR_TO_CTX,
870 .arg2_type = ARG_CONST_MAP_PTR,
871 .arg3_type = ARG_PTR_TO_MAP_KEY,
872 .arg4_type = ARG_ANYTHING,