dt-bindings: reset: imx7: Fix the spelling of 'indices'
[sfrench/cifs-2.6.git] / net / core / lwt_bpf.c
1 /* Copyright (c) 2016 Thomas Graf <tgraf@tgraf.ch>
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful, but
8  * WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10  * General Public License for more details.
11  */
12
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/skbuff.h>
16 #include <linux/types.h>
17 #include <linux/bpf.h>
18 #include <net/lwtunnel.h>
19 #include <net/gre.h>
20 #include <net/ip6_route.h>
21 #include <net/ipv6_stubs.h>
22
23 struct bpf_lwt_prog {
24         struct bpf_prog *prog;
25         char *name;
26 };
27
28 struct bpf_lwt {
29         struct bpf_lwt_prog in;
30         struct bpf_lwt_prog out;
31         struct bpf_lwt_prog xmit;
32         int family;
33 };
34
35 #define MAX_PROG_NAME 256
36
37 static inline struct bpf_lwt *bpf_lwt_lwtunnel(struct lwtunnel_state *lwt)
38 {
39         return (struct bpf_lwt *)lwt->data;
40 }
41
42 #define NO_REDIRECT false
43 #define CAN_REDIRECT true
44
45 static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
46                        struct dst_entry *dst, bool can_redirect)
47 {
48         int ret;
49
50         /* Preempt disable is needed to protect per-cpu redirect_info between
51          * BPF prog and skb_do_redirect(). The call_rcu in bpf_prog_put() and
52          * access to maps strictly require a rcu_read_lock() for protection,
53          * mixing with BH RCU lock doesn't work.
54          */
55         preempt_disable();
56         bpf_compute_data_pointers(skb);
57         ret = bpf_prog_run_save_cb(lwt->prog, skb);
58
59         switch (ret) {
60         case BPF_OK:
61         case BPF_LWT_REROUTE:
62                 break;
63
64         case BPF_REDIRECT:
65                 if (unlikely(!can_redirect)) {
66                         pr_warn_once("Illegal redirect return code in prog %s\n",
67                                      lwt->name ? : "<unknown>");
68                         ret = BPF_OK;
69                 } else {
70                         skb_reset_mac_header(skb);
71                         ret = skb_do_redirect(skb);
72                         if (ret == 0)
73                                 ret = BPF_REDIRECT;
74                 }
75                 break;
76
77         case BPF_DROP:
78                 kfree_skb(skb);
79                 ret = -EPERM;
80                 break;
81
82         default:
83                 pr_warn_once("bpf-lwt: Illegal return value %u, expect packet loss\n", ret);
84                 kfree_skb(skb);
85                 ret = -EINVAL;
86                 break;
87         }
88
89         preempt_enable();
90
91         return ret;
92 }
93
94 static int bpf_lwt_input_reroute(struct sk_buff *skb)
95 {
96         int err = -EINVAL;
97
98         if (skb->protocol == htons(ETH_P_IP)) {
99                 struct iphdr *iph = ip_hdr(skb);
100
101                 err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
102                                            iph->tos, skb_dst(skb)->dev);
103         } else if (skb->protocol == htons(ETH_P_IPV6)) {
104                 err = ipv6_stub->ipv6_route_input(skb);
105         } else {
106                 err = -EAFNOSUPPORT;
107         }
108
109         if (err)
110                 goto err;
111         return dst_input(skb);
112
113 err:
114         kfree_skb(skb);
115         return err;
116 }
117
118 static int bpf_input(struct sk_buff *skb)
119 {
120         struct dst_entry *dst = skb_dst(skb);
121         struct bpf_lwt *bpf;
122         int ret;
123
124         bpf = bpf_lwt_lwtunnel(dst->lwtstate);
125         if (bpf->in.prog) {
126                 ret = run_lwt_bpf(skb, &bpf->in, dst, NO_REDIRECT);
127                 if (ret < 0)
128                         return ret;
129                 if (ret == BPF_LWT_REROUTE)
130                         return bpf_lwt_input_reroute(skb);
131         }
132
133         if (unlikely(!dst->lwtstate->orig_input)) {
134                 kfree_skb(skb);
135                 return -EINVAL;
136         }
137
138         return dst->lwtstate->orig_input(skb);
139 }
140
141 static int bpf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
142 {
143         struct dst_entry *dst = skb_dst(skb);
144         struct bpf_lwt *bpf;
145         int ret;
146
147         bpf = bpf_lwt_lwtunnel(dst->lwtstate);
148         if (bpf->out.prog) {
149                 ret = run_lwt_bpf(skb, &bpf->out, dst, NO_REDIRECT);
150                 if (ret < 0)
151                         return ret;
152         }
153
154         if (unlikely(!dst->lwtstate->orig_output)) {
155                 pr_warn_once("orig_output not set on dst for prog %s\n",
156                              bpf->out.name);
157                 kfree_skb(skb);
158                 return -EINVAL;
159         }
160
161         return dst->lwtstate->orig_output(net, sk, skb);
162 }
163
164 static int xmit_check_hhlen(struct sk_buff *skb)
165 {
166         int hh_len = skb_dst(skb)->dev->hard_header_len;
167
168         if (skb_headroom(skb) < hh_len) {
169                 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
170
171                 if (pskb_expand_head(skb, nhead, 0, GFP_ATOMIC))
172                         return -ENOMEM;
173         }
174
175         return 0;
176 }
177
178 static int bpf_lwt_xmit_reroute(struct sk_buff *skb)
179 {
180         struct net_device *l3mdev = l3mdev_master_dev_rcu(skb_dst(skb)->dev);
181         int oif = l3mdev ? l3mdev->ifindex : 0;
182         struct dst_entry *dst = NULL;
183         int err = -EAFNOSUPPORT;
184         struct sock *sk;
185         struct net *net;
186         bool ipv4;
187
188         if (skb->protocol == htons(ETH_P_IP))
189                 ipv4 = true;
190         else if (skb->protocol == htons(ETH_P_IPV6))
191                 ipv4 = false;
192         else
193                 goto err;
194
195         sk = sk_to_full_sk(skb->sk);
196         if (sk) {
197                 if (sk->sk_bound_dev_if)
198                         oif = sk->sk_bound_dev_if;
199                 net = sock_net(sk);
200         } else {
201                 net = dev_net(skb_dst(skb)->dev);
202         }
203
204         if (ipv4) {
205                 struct iphdr *iph = ip_hdr(skb);
206                 struct flowi4 fl4 = {};
207                 struct rtable *rt;
208
209                 fl4.flowi4_oif = oif;
210                 fl4.flowi4_mark = skb->mark;
211                 fl4.flowi4_uid = sock_net_uid(net, sk);
212                 fl4.flowi4_tos = RT_TOS(iph->tos);
213                 fl4.flowi4_flags = FLOWI_FLAG_ANYSRC;
214                 fl4.flowi4_proto = iph->protocol;
215                 fl4.daddr = iph->daddr;
216                 fl4.saddr = iph->saddr;
217
218                 rt = ip_route_output_key(net, &fl4);
219                 if (IS_ERR(rt)) {
220                         err = PTR_ERR(rt);
221                         goto err;
222                 }
223                 dst = &rt->dst;
224         } else {
225                 struct ipv6hdr *iph6 = ipv6_hdr(skb);
226                 struct flowi6 fl6 = {};
227
228                 fl6.flowi6_oif = oif;
229                 fl6.flowi6_mark = skb->mark;
230                 fl6.flowi6_uid = sock_net_uid(net, sk);
231                 fl6.flowlabel = ip6_flowinfo(iph6);
232                 fl6.flowi6_proto = iph6->nexthdr;
233                 fl6.daddr = iph6->daddr;
234                 fl6.saddr = iph6->saddr;
235
236                 err = ipv6_stub->ipv6_dst_lookup(net, skb->sk, &dst, &fl6);
237                 if (unlikely(err))
238                         goto err;
239                 if (IS_ERR(dst)) {
240                         err = PTR_ERR(dst);
241                         goto err;
242                 }
243         }
244         if (unlikely(dst->error)) {
245                 err = dst->error;
246                 dst_release(dst);
247                 goto err;
248         }
249
250         /* Although skb header was reserved in bpf_lwt_push_ip_encap(), it
251          * was done for the previous dst, so we are doing it here again, in
252          * case the new dst needs much more space. The call below is a noop
253          * if there is enough header space in skb.
254          */
255         err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
256         if (unlikely(err))
257                 goto err;
258
259         skb_dst_drop(skb);
260         skb_dst_set(skb, dst);
261
262         err = dst_output(dev_net(skb_dst(skb)->dev), skb->sk, skb);
263         if (unlikely(err))
264                 return err;
265
266         /* ip[6]_finish_output2 understand LWTUNNEL_XMIT_DONE */
267         return LWTUNNEL_XMIT_DONE;
268
269 err:
270         kfree_skb(skb);
271         return err;
272 }
273
274 static int bpf_xmit(struct sk_buff *skb)
275 {
276         struct dst_entry *dst = skb_dst(skb);
277         struct bpf_lwt *bpf;
278
279         bpf = bpf_lwt_lwtunnel(dst->lwtstate);
280         if (bpf->xmit.prog) {
281                 __be16 proto = skb->protocol;
282                 int ret;
283
284                 ret = run_lwt_bpf(skb, &bpf->xmit, dst, CAN_REDIRECT);
285                 switch (ret) {
286                 case BPF_OK:
287                         /* If the header changed, e.g. via bpf_lwt_push_encap,
288                          * BPF_LWT_REROUTE below should have been used if the
289                          * protocol was also changed.
290                          */
291                         if (skb->protocol != proto) {
292                                 kfree_skb(skb);
293                                 return -EINVAL;
294                         }
295                         /* If the header was expanded, headroom might be too
296                          * small for L2 header to come, expand as needed.
297                          */
298                         ret = xmit_check_hhlen(skb);
299                         if (unlikely(ret))
300                                 return ret;
301
302                         return LWTUNNEL_XMIT_CONTINUE;
303                 case BPF_REDIRECT:
304                         return LWTUNNEL_XMIT_DONE;
305                 case BPF_LWT_REROUTE:
306                         return bpf_lwt_xmit_reroute(skb);
307                 default:
308                         return ret;
309                 }
310         }
311
312         return LWTUNNEL_XMIT_CONTINUE;
313 }
314
315 static void bpf_lwt_prog_destroy(struct bpf_lwt_prog *prog)
316 {
317         if (prog->prog)
318                 bpf_prog_put(prog->prog);
319
320         kfree(prog->name);
321 }
322
323 static void bpf_destroy_state(struct lwtunnel_state *lwt)
324 {
325         struct bpf_lwt *bpf = bpf_lwt_lwtunnel(lwt);
326
327         bpf_lwt_prog_destroy(&bpf->in);
328         bpf_lwt_prog_destroy(&bpf->out);
329         bpf_lwt_prog_destroy(&bpf->xmit);
330 }
331
332 static const struct nla_policy bpf_prog_policy[LWT_BPF_PROG_MAX + 1] = {
333         [LWT_BPF_PROG_FD]   = { .type = NLA_U32, },
334         [LWT_BPF_PROG_NAME] = { .type = NLA_NUL_STRING,
335                                 .len = MAX_PROG_NAME },
336 };
337
338 static int bpf_parse_prog(struct nlattr *attr, struct bpf_lwt_prog *prog,
339                           enum bpf_prog_type type)
340 {
341         struct nlattr *tb[LWT_BPF_PROG_MAX + 1];
342         struct bpf_prog *p;
343         int ret;
344         u32 fd;
345
346         ret = nla_parse_nested_deprecated(tb, LWT_BPF_PROG_MAX, attr,
347                                           bpf_prog_policy, NULL);
348         if (ret < 0)
349                 return ret;
350
351         if (!tb[LWT_BPF_PROG_FD] || !tb[LWT_BPF_PROG_NAME])
352                 return -EINVAL;
353
354         prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_ATOMIC);
355         if (!prog->name)
356                 return -ENOMEM;
357
358         fd = nla_get_u32(tb[LWT_BPF_PROG_FD]);
359         p = bpf_prog_get_type(fd, type);
360         if (IS_ERR(p))
361                 return PTR_ERR(p);
362
363         prog->prog = p;
364
365         return 0;
366 }
367
368 static const struct nla_policy bpf_nl_policy[LWT_BPF_MAX + 1] = {
369         [LWT_BPF_IN]            = { .type = NLA_NESTED, },
370         [LWT_BPF_OUT]           = { .type = NLA_NESTED, },
371         [LWT_BPF_XMIT]          = { .type = NLA_NESTED, },
372         [LWT_BPF_XMIT_HEADROOM] = { .type = NLA_U32 },
373 };
374
375 static int bpf_build_state(struct nlattr *nla,
376                            unsigned int family, const void *cfg,
377                            struct lwtunnel_state **ts,
378                            struct netlink_ext_ack *extack)
379 {
380         struct nlattr *tb[LWT_BPF_MAX + 1];
381         struct lwtunnel_state *newts;
382         struct bpf_lwt *bpf;
383         int ret;
384
385         if (family != AF_INET && family != AF_INET6)
386                 return -EAFNOSUPPORT;
387
388         ret = nla_parse_nested_deprecated(tb, LWT_BPF_MAX, nla, bpf_nl_policy,
389                                           extack);
390         if (ret < 0)
391                 return ret;
392
393         if (!tb[LWT_BPF_IN] && !tb[LWT_BPF_OUT] && !tb[LWT_BPF_XMIT])
394                 return -EINVAL;
395
396         newts = lwtunnel_state_alloc(sizeof(*bpf));
397         if (!newts)
398                 return -ENOMEM;
399
400         newts->type = LWTUNNEL_ENCAP_BPF;
401         bpf = bpf_lwt_lwtunnel(newts);
402
403         if (tb[LWT_BPF_IN]) {
404                 newts->flags |= LWTUNNEL_STATE_INPUT_REDIRECT;
405                 ret = bpf_parse_prog(tb[LWT_BPF_IN], &bpf->in,
406                                      BPF_PROG_TYPE_LWT_IN);
407                 if (ret  < 0)
408                         goto errout;
409         }
410
411         if (tb[LWT_BPF_OUT]) {
412                 newts->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT;
413                 ret = bpf_parse_prog(tb[LWT_BPF_OUT], &bpf->out,
414                                      BPF_PROG_TYPE_LWT_OUT);
415                 if (ret < 0)
416                         goto errout;
417         }
418
419         if (tb[LWT_BPF_XMIT]) {
420                 newts->flags |= LWTUNNEL_STATE_XMIT_REDIRECT;
421                 ret = bpf_parse_prog(tb[LWT_BPF_XMIT], &bpf->xmit,
422                                      BPF_PROG_TYPE_LWT_XMIT);
423                 if (ret < 0)
424                         goto errout;
425         }
426
427         if (tb[LWT_BPF_XMIT_HEADROOM]) {
428                 u32 headroom = nla_get_u32(tb[LWT_BPF_XMIT_HEADROOM]);
429
430                 if (headroom > LWT_BPF_MAX_HEADROOM) {
431                         ret = -ERANGE;
432                         goto errout;
433                 }
434
435                 newts->headroom = headroom;
436         }
437
438         bpf->family = family;
439         *ts = newts;
440
441         return 0;
442
443 errout:
444         bpf_destroy_state(newts);
445         kfree(newts);
446         return ret;
447 }
448
449 static int bpf_fill_lwt_prog(struct sk_buff *skb, int attr,
450                              struct bpf_lwt_prog *prog)
451 {
452         struct nlattr *nest;
453
454         if (!prog->prog)
455                 return 0;
456
457         nest = nla_nest_start_noflag(skb, attr);
458         if (!nest)
459                 return -EMSGSIZE;
460
461         if (prog->name &&
462             nla_put_string(skb, LWT_BPF_PROG_NAME, prog->name))
463                 return -EMSGSIZE;
464
465         return nla_nest_end(skb, nest);
466 }
467
468 static int bpf_fill_encap_info(struct sk_buff *skb, struct lwtunnel_state *lwt)
469 {
470         struct bpf_lwt *bpf = bpf_lwt_lwtunnel(lwt);
471
472         if (bpf_fill_lwt_prog(skb, LWT_BPF_IN, &bpf->in) < 0 ||
473             bpf_fill_lwt_prog(skb, LWT_BPF_OUT, &bpf->out) < 0 ||
474             bpf_fill_lwt_prog(skb, LWT_BPF_XMIT, &bpf->xmit) < 0)
475                 return -EMSGSIZE;
476
477         return 0;
478 }
479
480 static int bpf_encap_nlsize(struct lwtunnel_state *lwtstate)
481 {
482         int nest_len = nla_total_size(sizeof(struct nlattr)) +
483                        nla_total_size(MAX_PROG_NAME) + /* LWT_BPF_PROG_NAME */
484                        0;
485
486         return nest_len + /* LWT_BPF_IN */
487                nest_len + /* LWT_BPF_OUT */
488                nest_len + /* LWT_BPF_XMIT */
489                0;
490 }
491
492 static int bpf_lwt_prog_cmp(struct bpf_lwt_prog *a, struct bpf_lwt_prog *b)
493 {
494         /* FIXME:
495          * The LWT state is currently rebuilt for delete requests which
496          * results in a new bpf_prog instance. Comparing names for now.
497          */
498         if (!a->name && !b->name)
499                 return 0;
500
501         if (!a->name || !b->name)
502                 return 1;
503
504         return strcmp(a->name, b->name);
505 }
506
507 static int bpf_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
508 {
509         struct bpf_lwt *a_bpf = bpf_lwt_lwtunnel(a);
510         struct bpf_lwt *b_bpf = bpf_lwt_lwtunnel(b);
511
512         return bpf_lwt_prog_cmp(&a_bpf->in, &b_bpf->in) ||
513                bpf_lwt_prog_cmp(&a_bpf->out, &b_bpf->out) ||
514                bpf_lwt_prog_cmp(&a_bpf->xmit, &b_bpf->xmit);
515 }
516
517 static const struct lwtunnel_encap_ops bpf_encap_ops = {
518         .build_state    = bpf_build_state,
519         .destroy_state  = bpf_destroy_state,
520         .input          = bpf_input,
521         .output         = bpf_output,
522         .xmit           = bpf_xmit,
523         .fill_encap     = bpf_fill_encap_info,
524         .get_encap_size = bpf_encap_nlsize,
525         .cmp_encap      = bpf_encap_cmp,
526         .owner          = THIS_MODULE,
527 };
528
529 static int handle_gso_type(struct sk_buff *skb, unsigned int gso_type,
530                            int encap_len)
531 {
532         struct skb_shared_info *shinfo = skb_shinfo(skb);
533
534         gso_type |= SKB_GSO_DODGY;
535         shinfo->gso_type |= gso_type;
536         skb_decrease_gso_size(shinfo, encap_len);
537         shinfo->gso_segs = 0;
538         return 0;
539 }
540
541 static int handle_gso_encap(struct sk_buff *skb, bool ipv4, int encap_len)
542 {
543         int next_hdr_offset;
544         void *next_hdr;
545         __u8 protocol;
546
547         /* SCTP and UDP_L4 gso need more nuanced handling than what
548          * handle_gso_type() does above: skb_decrease_gso_size() is not enough.
549          * So at the moment only TCP GSO packets are let through.
550          */
551         if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
552                 return -ENOTSUPP;
553
554         if (ipv4) {
555                 protocol = ip_hdr(skb)->protocol;
556                 next_hdr_offset = sizeof(struct iphdr);
557                 next_hdr = skb_network_header(skb) + next_hdr_offset;
558         } else {
559                 protocol = ipv6_hdr(skb)->nexthdr;
560                 next_hdr_offset = sizeof(struct ipv6hdr);
561                 next_hdr = skb_network_header(skb) + next_hdr_offset;
562         }
563
564         switch (protocol) {
565         case IPPROTO_GRE:
566                 next_hdr_offset += sizeof(struct gre_base_hdr);
567                 if (next_hdr_offset > encap_len)
568                         return -EINVAL;
569
570                 if (((struct gre_base_hdr *)next_hdr)->flags & GRE_CSUM)
571                         return handle_gso_type(skb, SKB_GSO_GRE_CSUM,
572                                                encap_len);
573                 return handle_gso_type(skb, SKB_GSO_GRE, encap_len);
574
575         case IPPROTO_UDP:
576                 next_hdr_offset += sizeof(struct udphdr);
577                 if (next_hdr_offset > encap_len)
578                         return -EINVAL;
579
580                 if (((struct udphdr *)next_hdr)->check)
581                         return handle_gso_type(skb, SKB_GSO_UDP_TUNNEL_CSUM,
582                                                encap_len);
583                 return handle_gso_type(skb, SKB_GSO_UDP_TUNNEL, encap_len);
584
585         case IPPROTO_IP:
586         case IPPROTO_IPV6:
587                 if (ipv4)
588                         return handle_gso_type(skb, SKB_GSO_IPXIP4, encap_len);
589                 else
590                         return handle_gso_type(skb, SKB_GSO_IPXIP6, encap_len);
591
592         default:
593                 return -EPROTONOSUPPORT;
594         }
595 }
596
597 int bpf_lwt_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len, bool ingress)
598 {
599         struct iphdr *iph;
600         bool ipv4;
601         int err;
602
603         if (unlikely(len < sizeof(struct iphdr) || len > LWT_BPF_MAX_HEADROOM))
604                 return -EINVAL;
605
606         /* validate protocol and length */
607         iph = (struct iphdr *)hdr;
608         if (iph->version == 4) {
609                 ipv4 = true;
610                 if (unlikely(len < iph->ihl * 4))
611                         return -EINVAL;
612         } else if (iph->version == 6) {
613                 ipv4 = false;
614                 if (unlikely(len < sizeof(struct ipv6hdr)))
615                         return -EINVAL;
616         } else {
617                 return -EINVAL;
618         }
619
620         if (ingress)
621                 err = skb_cow_head(skb, len + skb->mac_len);
622         else
623                 err = skb_cow_head(skb,
624                                    len + LL_RESERVED_SPACE(skb_dst(skb)->dev));
625         if (unlikely(err))
626                 return err;
627
628         /* push the encap headers and fix pointers */
629         skb_reset_inner_headers(skb);
630         skb_reset_inner_mac_header(skb);  /* mac header is not yet set */
631         skb_set_inner_protocol(skb, skb->protocol);
632         skb->encapsulation = 1;
633         skb_push(skb, len);
634         if (ingress)
635                 skb_postpush_rcsum(skb, iph, len);
636         skb_reset_network_header(skb);
637         memcpy(skb_network_header(skb), hdr, len);
638         bpf_compute_data_pointers(skb);
639         skb_clear_hash(skb);
640
641         if (ipv4) {
642                 skb->protocol = htons(ETH_P_IP);
643                 iph = ip_hdr(skb);
644
645                 if (!iph->check)
646                         iph->check = ip_fast_csum((unsigned char *)iph,
647                                                   iph->ihl);
648         } else {
649                 skb->protocol = htons(ETH_P_IPV6);
650         }
651
652         if (skb_is_gso(skb))
653                 return handle_gso_encap(skb, ipv4, len);
654
655         return 0;
656 }
657
658 static int __init bpf_lwt_init(void)
659 {
660         return lwtunnel_encap_add_ops(&bpf_encap_ops, LWTUNNEL_ENCAP_BPF);
661 }
662
663 subsys_initcall(bpf_lwt_init)