2 * GRE over IPv6 protocol decoder.
4 * Authors: Dmitry Kozlov (xeb@mail.ru)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/init.h>
28 #include <linux/in6.h>
29 #include <linux/inetdevice.h>
30 #include <linux/igmp.h>
31 #include <linux/netfilter_ipv4.h>
32 #include <linux/etherdevice.h>
33 #include <linux/if_ether.h>
34 #include <linux/hash.h>
35 #include <linux/if_tunnel.h>
36 #include <linux/ip6_tunnel.h>
40 #include <net/ip_tunnels.h>
42 #include <net/protocol.h>
43 #include <net/addrconf.h>
45 #include <net/checksum.h>
46 #include <net/dsfield.h>
47 #include <net/inet_ecn.h>
49 #include <net/net_namespace.h>
50 #include <net/netns/generic.h>
51 #include <net/rtnetlink.h>
54 #include <net/ip6_fib.h>
55 #include <net/ip6_route.h>
56 #include <net/ip6_tunnel.h>
58 #include <net/erspan.h>
59 #include <net/dst_metadata.h>
62 static bool log_ecn_error = true;
63 module_param(log_ecn_error, bool, 0644);
64 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
66 #define IP6_GRE_HASH_SIZE_SHIFT 5
67 #define IP6_GRE_HASH_SIZE (1 << IP6_GRE_HASH_SIZE_SHIFT)
69 static unsigned int ip6gre_net_id __read_mostly;
71 struct ip6_tnl __rcu *tunnels[4][IP6_GRE_HASH_SIZE];
73 struct ip6_tnl __rcu *collect_md_tun;
74 struct net_device *fb_tunnel_dev;
77 static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
78 static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
79 static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly;
80 static int ip6gre_tunnel_init(struct net_device *dev);
81 static void ip6gre_tunnel_setup(struct net_device *dev);
82 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
83 static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu);
85 /* Tunnel hash table */
95 We require exact key match i.e. if a key is present in packet
96 it will match only tunnel with the same key; if it is not present,
97 it will match only keyless tunnel.
99 All keysless packets, if not matched configured keyless tunnels
100 will match fallback tunnel.
103 #define HASH_KEY(key) (((__force u32)key^((__force u32)key>>4))&(IP6_GRE_HASH_SIZE - 1))
104 static u32 HASH_ADDR(const struct in6_addr *addr)
106 u32 hash = ipv6_addr_hash(addr);
108 return hash_32(hash, IP6_GRE_HASH_SIZE_SHIFT);
111 #define tunnels_r_l tunnels[3]
112 #define tunnels_r tunnels[2]
113 #define tunnels_l tunnels[1]
114 #define tunnels_wc tunnels[0]
116 /* Given src, dst and key, find appropriate for input tunnel. */
118 static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
119 const struct in6_addr *remote, const struct in6_addr *local,
120 __be32 key, __be16 gre_proto)
122 struct net *net = dev_net(dev);
123 int link = dev->ifindex;
124 unsigned int h0 = HASH_ADDR(remote);
125 unsigned int h1 = HASH_KEY(key);
126 struct ip6_tnl *t, *cand = NULL;
127 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
128 int dev_type = (gre_proto == htons(ETH_P_TEB) ||
129 gre_proto == htons(ETH_P_ERSPAN) ||
130 gre_proto == htons(ETH_P_ERSPAN2)) ?
131 ARPHRD_ETHER : ARPHRD_IP6GRE;
132 int score, cand_score = 4;
134 for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) {
135 if (!ipv6_addr_equal(local, &t->parms.laddr) ||
136 !ipv6_addr_equal(remote, &t->parms.raddr) ||
137 key != t->parms.i_key ||
138 !(t->dev->flags & IFF_UP))
141 if (t->dev->type != ARPHRD_IP6GRE &&
142 t->dev->type != dev_type)
146 if (t->parms.link != link)
148 if (t->dev->type != dev_type)
153 if (score < cand_score) {
159 for_each_ip_tunnel_rcu(t, ign->tunnels_r[h0 ^ h1]) {
160 if (!ipv6_addr_equal(remote, &t->parms.raddr) ||
161 key != t->parms.i_key ||
162 !(t->dev->flags & IFF_UP))
165 if (t->dev->type != ARPHRD_IP6GRE &&
166 t->dev->type != dev_type)
170 if (t->parms.link != link)
172 if (t->dev->type != dev_type)
177 if (score < cand_score) {
183 for_each_ip_tunnel_rcu(t, ign->tunnels_l[h1]) {
184 if ((!ipv6_addr_equal(local, &t->parms.laddr) &&
185 (!ipv6_addr_equal(local, &t->parms.raddr) ||
186 !ipv6_addr_is_multicast(local))) ||
187 key != t->parms.i_key ||
188 !(t->dev->flags & IFF_UP))
191 if (t->dev->type != ARPHRD_IP6GRE &&
192 t->dev->type != dev_type)
196 if (t->parms.link != link)
198 if (t->dev->type != dev_type)
203 if (score < cand_score) {
209 for_each_ip_tunnel_rcu(t, ign->tunnels_wc[h1]) {
210 if (t->parms.i_key != key ||
211 !(t->dev->flags & IFF_UP))
214 if (t->dev->type != ARPHRD_IP6GRE &&
215 t->dev->type != dev_type)
219 if (t->parms.link != link)
221 if (t->dev->type != dev_type)
226 if (score < cand_score) {
235 t = rcu_dereference(ign->collect_md_tun);
236 if (t && t->dev->flags & IFF_UP)
239 dev = ign->fb_tunnel_dev;
240 if (dev && dev->flags & IFF_UP)
241 return netdev_priv(dev);
246 static struct ip6_tnl __rcu **__ip6gre_bucket(struct ip6gre_net *ign,
247 const struct __ip6_tnl_parm *p)
249 const struct in6_addr *remote = &p->raddr;
250 const struct in6_addr *local = &p->laddr;
251 unsigned int h = HASH_KEY(p->i_key);
254 if (!ipv6_addr_any(local))
256 if (!ipv6_addr_any(remote) && !ipv6_addr_is_multicast(remote)) {
258 h ^= HASH_ADDR(remote);
261 return &ign->tunnels[prio][h];
264 static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign,
265 const struct ip6_tnl *t)
267 return __ip6gre_bucket(ign, &t->parms);
270 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t)
272 struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t);
274 if (t->parms.collect_md)
275 rcu_assign_pointer(ign->collect_md_tun, t);
277 rcu_assign_pointer(t->next, rtnl_dereference(*tp));
278 rcu_assign_pointer(*tp, t);
281 static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t)
283 struct ip6_tnl __rcu **tp;
284 struct ip6_tnl *iter;
286 if (t->parms.collect_md)
287 rcu_assign_pointer(ign->collect_md_tun, NULL);
289 for (tp = ip6gre_bucket(ign, t);
290 (iter = rtnl_dereference(*tp)) != NULL;
293 rcu_assign_pointer(*tp, t->next);
299 static struct ip6_tnl *ip6gre_tunnel_find(struct net *net,
300 const struct __ip6_tnl_parm *parms,
303 const struct in6_addr *remote = &parms->raddr;
304 const struct in6_addr *local = &parms->laddr;
305 __be32 key = parms->i_key;
306 int link = parms->link;
308 struct ip6_tnl __rcu **tp;
309 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
311 for (tp = __ip6gre_bucket(ign, parms);
312 (t = rtnl_dereference(*tp)) != NULL;
314 if (ipv6_addr_equal(local, &t->parms.laddr) &&
315 ipv6_addr_equal(remote, &t->parms.raddr) &&
316 key == t->parms.i_key &&
317 link == t->parms.link &&
318 type == t->dev->type)
324 static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
325 const struct __ip6_tnl_parm *parms, int create)
327 struct ip6_tnl *t, *nt;
328 struct net_device *dev;
330 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
332 t = ip6gre_tunnel_find(net, parms, ARPHRD_IP6GRE);
339 strlcpy(name, parms->name, IFNAMSIZ);
341 strcpy(name, "ip6gre%d");
343 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
344 ip6gre_tunnel_setup);
348 dev_net_set(dev, net);
350 nt = netdev_priv(dev);
352 dev->rtnl_link_ops = &ip6gre_link_ops;
355 nt->net = dev_net(dev);
357 if (register_netdevice(dev) < 0)
360 ip6gre_tnl_link_config(nt, 1);
362 /* Can use a lockless transmit, unless we generate output sequences */
363 if (!(nt->parms.o_flags & TUNNEL_SEQ))
364 dev->features |= NETIF_F_LLTX;
367 ip6gre_tunnel_link(ign, nt);
375 static void ip6gre_tunnel_uninit(struct net_device *dev)
377 struct ip6_tnl *t = netdev_priv(dev);
378 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
380 ip6gre_tunnel_unlink(ign, t);
381 dst_cache_reset(&t->dst_cache);
386 static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
387 u8 type, u8 code, int offset, __be32 info)
389 struct net *net = dev_net(skb->dev);
390 const struct gre_base_hdr *greh;
391 const struct ipv6hdr *ipv6h;
392 int grehlen = sizeof(*greh);
398 if (!pskb_may_pull(skb, offset + grehlen))
400 greh = (const struct gre_base_hdr *)(skb->data + offset);
402 if (flags & (GRE_VERSION | GRE_ROUTING))
404 if (flags & GRE_CSUM)
406 if (flags & GRE_KEY) {
407 key_off = grehlen + offset;
411 if (!pskb_may_pull(skb, offset + grehlen))
413 ipv6h = (const struct ipv6hdr *)skb->data;
414 greh = (const struct gre_base_hdr *)(skb->data + offset);
415 key = key_off ? *(__be32 *)(skb->data + key_off) : 0;
417 t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
418 key, greh->protocol);
423 struct ipv6_tlv_tnl_enc_lim *tel;
425 case ICMPV6_DEST_UNREACH:
426 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
428 if (code != ICMPV6_PORT_UNREACH)
431 case ICMPV6_TIME_EXCEED:
432 if (code == ICMPV6_EXC_HOPLIMIT) {
433 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
438 case ICMPV6_PARAMPROB:
440 if (code == ICMPV6_HDR_FIELD)
441 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
443 if (teli && teli == be32_to_cpu(info) - 2) {
444 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
445 if (tel->encap_limit == 0) {
446 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
450 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
454 case ICMPV6_PKT_TOOBIG:
455 ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
458 ip6_redirect(skb, net, skb->dev->ifindex, 0,
459 sock_net_uid(net, NULL));
463 if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO))
467 t->err_time = jiffies;
470 static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
472 const struct ipv6hdr *ipv6h;
473 struct ip6_tnl *tunnel;
475 ipv6h = ipv6_hdr(skb);
476 tunnel = ip6gre_tunnel_lookup(skb->dev,
477 &ipv6h->saddr, &ipv6h->daddr, tpi->key,
480 if (tunnel->parms.collect_md) {
481 struct metadata_dst *tun_dst;
486 tun_id = key32_to_tunnel_id(tpi->key);
488 tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id, 0);
490 return PACKET_REJECT;
492 ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
494 ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
500 return PACKET_REJECT;
503 static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
504 struct tnl_ptk_info *tpi)
506 struct erspan_base_hdr *ershdr;
507 struct erspan_metadata *pkt_md;
508 const struct ipv6hdr *ipv6h;
509 struct erspan_md2 *md2;
510 struct ip6_tnl *tunnel;
513 if (unlikely(!pskb_may_pull(skb, sizeof(*ershdr))))
514 return PACKET_REJECT;
516 ipv6h = ipv6_hdr(skb);
517 ershdr = (struct erspan_base_hdr *)skb->data;
519 tpi->key = cpu_to_be32(get_session_id(ershdr));
521 tunnel = ip6gre_tunnel_lookup(skb->dev,
522 &ipv6h->saddr, &ipv6h->daddr, tpi->key,
525 int len = erspan_hdr_len(ver);
527 if (unlikely(!pskb_may_pull(skb, len)))
528 return PACKET_REJECT;
530 ershdr = (struct erspan_base_hdr *)skb->data;
531 pkt_md = (struct erspan_metadata *)(ershdr + 1);
533 if (__iptunnel_pull_header(skb, len,
536 return PACKET_REJECT;
538 if (tunnel->parms.collect_md) {
539 struct metadata_dst *tun_dst;
540 struct ip_tunnel_info *info;
541 struct erspan_metadata *md;
545 tpi->flags |= TUNNEL_KEY;
547 tun_id = key32_to_tunnel_id(tpi->key);
549 tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id,
552 return PACKET_REJECT;
554 info = &tun_dst->u.tun_info;
555 md = ip_tunnel_info_opts(info);
558 memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
560 info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
561 info->options_len = sizeof(*md);
563 ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
566 ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
572 return PACKET_REJECT;
575 static int gre_rcv(struct sk_buff *skb)
577 struct tnl_ptk_info tpi;
578 bool csum_err = false;
581 hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IPV6), 0);
585 if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false))
588 if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
589 tpi.proto == htons(ETH_P_ERSPAN2))) {
590 if (ip6erspan_rcv(skb, hdr_len, &tpi) == PACKET_RCVD)
595 if (ip6gre_rcv(skb, &tpi) == PACKET_RCVD)
599 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
605 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
607 return iptunnel_handle_offloads(skb,
608 csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
611 static void prepare_ip6gre_xmit_ipv4(struct sk_buff *skb,
612 struct net_device *dev,
613 struct flowi6 *fl6, __u8 *dsfield,
616 const struct iphdr *iph = ip_hdr(skb);
617 struct ip6_tnl *t = netdev_priv(dev);
619 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
620 *encap_limit = t->parms.encap_limit;
622 memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6));
624 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
625 *dsfield = ipv4_get_dsfield(iph);
627 *dsfield = ip6_tclass(t->parms.flowinfo);
629 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
630 fl6->flowi6_mark = skb->mark;
632 fl6->flowi6_mark = t->parms.fwmark;
634 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
637 static int prepare_ip6gre_xmit_ipv6(struct sk_buff *skb,
638 struct net_device *dev,
639 struct flowi6 *fl6, __u8 *dsfield,
642 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
643 struct ip6_tnl *t = netdev_priv(dev);
646 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
647 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
650 struct ipv6_tlv_tnl_enc_lim *tel;
652 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
653 if (tel->encap_limit == 0) {
654 icmpv6_send(skb, ICMPV6_PARAMPROB,
655 ICMPV6_HDR_FIELD, offset + 2);
658 *encap_limit = tel->encap_limit - 1;
659 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) {
660 *encap_limit = t->parms.encap_limit;
663 memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6));
665 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
666 *dsfield = ipv6_get_dsfield(ipv6h);
668 *dsfield = ip6_tclass(t->parms.flowinfo);
670 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
671 fl6->flowlabel |= ip6_flowlabel(ipv6h);
673 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
674 fl6->flowi6_mark = skb->mark;
676 fl6->flowi6_mark = t->parms.fwmark;
678 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
683 static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
684 struct net_device *dev, __u8 dsfield,
685 struct flowi6 *fl6, int encap_limit,
686 __u32 *pmtu, __be16 proto)
688 struct ip6_tnl *tunnel = netdev_priv(dev);
691 if (dev->type == ARPHRD_ETHER)
692 IPCB(skb)->flags = 0;
694 if (dev->header_ops && dev->type == ARPHRD_IP6GRE)
695 fl6->daddr = ((struct ipv6hdr *)skb->data)->daddr;
697 fl6->daddr = tunnel->parms.raddr;
699 /* Push GRE header. */
700 protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto;
702 if (tunnel->parms.collect_md) {
703 struct ip_tunnel_info *tun_info;
704 const struct ip_tunnel_key *key;
707 tun_info = skb_tunnel_info(skb);
708 if (unlikely(!tun_info ||
709 !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
710 ip_tunnel_info_af(tun_info) != AF_INET6))
713 key = &tun_info->key;
714 memset(fl6, 0, sizeof(*fl6));
715 fl6->flowi6_proto = IPPROTO_GRE;
716 fl6->daddr = key->u.ipv6.dst;
717 fl6->flowlabel = key->label;
718 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
721 flags = key->tun_flags &
722 (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
723 tunnel->tun_hlen = gre_calc_hlen(flags);
725 gre_build_header(skb, tunnel->tun_hlen,
727 tunnel_id_to_key32(tun_info->key.tun_id),
728 (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++)
732 if (tunnel->parms.o_flags & TUNNEL_SEQ)
735 gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
736 protocol, tunnel->parms.o_key,
737 htonl(tunnel->o_seqno));
740 return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
744 static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
746 struct ip6_tnl *t = netdev_priv(dev);
747 int encap_limit = -1;
753 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
755 if (!t->parms.collect_md)
756 prepare_ip6gre_xmit_ipv4(skb, dev, &fl6,
757 &dsfield, &encap_limit);
759 err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
763 err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
766 /* XXX: send ICMP error even if DF is not set. */
767 if (err == -EMSGSIZE)
768 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
776 static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
778 struct ip6_tnl *t = netdev_priv(dev);
779 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
780 int encap_limit = -1;
786 if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
789 if (!t->parms.collect_md &&
790 prepare_ip6gre_xmit_ipv6(skb, dev, &fl6, &dsfield, &encap_limit))
793 if (gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM)))
796 err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit,
797 &mtu, skb->protocol);
799 if (err == -EMSGSIZE)
800 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
808 * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
809 * @t: the outgoing tunnel device
810 * @hdr: IPv6 header from the incoming packet
813 * Avoid trivial tunneling loop by checking that tunnel exit-point
814 * doesn't match source of incoming packet.
821 static inline bool ip6gre_tnl_addr_conflict(const struct ip6_tnl *t,
822 const struct ipv6hdr *hdr)
824 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
827 static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
829 struct ip6_tnl *t = netdev_priv(dev);
830 int encap_limit = -1;
835 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
836 encap_limit = t->parms.encap_limit;
838 if (!t->parms.collect_md)
839 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
841 err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
845 err = __gre6_xmit(skb, dev, 0, &fl6, encap_limit, &mtu, skb->protocol);
850 static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
851 struct net_device *dev)
853 struct ip6_tnl *t = netdev_priv(dev);
854 struct net_device_stats *stats = &t->dev->stats;
857 if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
860 switch (skb->protocol) {
861 case htons(ETH_P_IP):
862 ret = ip6gre_xmit_ipv4(skb, dev);
864 case htons(ETH_P_IPV6):
865 ret = ip6gre_xmit_ipv6(skb, dev);
868 ret = ip6gre_xmit_other(skb, dev);
884 static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
885 struct net_device *dev)
887 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
888 struct ip6_tnl *t = netdev_priv(dev);
889 struct dst_entry *dst = skb_dst(skb);
890 struct net_device_stats *stats;
891 bool truncate = false;
892 int encap_limit = -1;
893 __u8 dsfield = false;
898 if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
901 if (gre_handle_offloads(skb, false))
904 if (skb->len > dev->mtu + dev->hard_header_len) {
905 pskb_trim(skb, dev->mtu + dev->hard_header_len);
909 if (skb_cow_head(skb, dev->needed_headroom))
912 t->parms.o_flags &= ~TUNNEL_KEY;
913 IPCB(skb)->flags = 0;
915 /* For collect_md mode, derive fl6 from the tunnel key,
916 * for native mode, call prepare_ip6gre_xmit_{ipv4,ipv6}.
918 if (t->parms.collect_md) {
919 struct ip_tunnel_info *tun_info;
920 const struct ip_tunnel_key *key;
921 struct erspan_metadata *md;
924 tun_info = skb_tunnel_info(skb);
925 if (unlikely(!tun_info ||
926 !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
927 ip_tunnel_info_af(tun_info) != AF_INET6))
930 key = &tun_info->key;
931 memset(&fl6, 0, sizeof(fl6));
932 fl6.flowi6_proto = IPPROTO_GRE;
933 fl6.daddr = key->u.ipv6.dst;
934 fl6.flowlabel = key->label;
935 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
938 md = ip_tunnel_info_opts(tun_info);
942 tun_id = tunnel_id_to_key32(key->tun_id);
943 if (md->version == 1) {
944 erspan_build_header(skb,
946 ntohl(md->u.index), truncate,
948 } else if (md->version == 2) {
949 erspan_build_header_v2(skb,
952 get_hwid(&md->u.md2),
958 switch (skb->protocol) {
959 case htons(ETH_P_IP):
960 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
961 prepare_ip6gre_xmit_ipv4(skb, dev, &fl6,
962 &dsfield, &encap_limit);
964 case htons(ETH_P_IPV6):
965 if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
967 if (prepare_ip6gre_xmit_ipv6(skb, dev, &fl6,
968 &dsfield, &encap_limit))
972 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
976 if (t->parms.erspan_ver == 1)
977 erspan_build_header(skb, ntohl(t->parms.o_key),
981 erspan_build_header_v2(skb, ntohl(t->parms.o_key),
985 fl6.daddr = t->parms.raddr;
988 /* Push GRE header. */
989 gre_build_header(skb, 8, TUNNEL_SEQ,
990 htons(ETH_P_ERSPAN), 0, htonl(t->o_seqno++));
992 /* TooBig packet may have updated dst->dev's mtu */
993 if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
994 dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu);
996 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
999 /* XXX: send ICMP error even if DF is not set. */
1000 if (err == -EMSGSIZE) {
1001 if (skb->protocol == htons(ETH_P_IP))
1002 icmp_send(skb, ICMP_DEST_UNREACH,
1003 ICMP_FRAG_NEEDED, htonl(mtu));
1005 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1010 return NETDEV_TX_OK;
1013 stats = &t->dev->stats;
1015 stats->tx_dropped++;
1017 return NETDEV_TX_OK;
1020 static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
1022 struct net_device *dev = t->dev;
1023 struct __ip6_tnl_parm *p = &t->parms;
1024 struct flowi6 *fl6 = &t->fl.u.ip6;
1027 if (dev->type != ARPHRD_ETHER) {
1028 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
1029 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
1032 /* Set up flowi template */
1033 fl6->saddr = p->laddr;
1034 fl6->daddr = p->raddr;
1035 fl6->flowi6_oif = p->link;
1037 fl6->flowi6_proto = IPPROTO_GRE;
1039 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
1040 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
1041 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
1042 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
1044 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
1045 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
1047 if (p->flags&IP6_TNL_F_CAP_XMIT &&
1048 p->flags&IP6_TNL_F_CAP_RCV && dev->type != ARPHRD_ETHER)
1049 dev->flags |= IFF_POINTOPOINT;
1051 dev->flags &= ~IFF_POINTOPOINT;
1053 t->tun_hlen = gre_calc_hlen(t->parms.o_flags);
1055 t->hlen = t->encap_hlen + t->tun_hlen;
1057 t_hlen = t->hlen + sizeof(struct ipv6hdr);
1059 if (p->flags & IP6_TNL_F_CAP_XMIT) {
1060 int strict = (ipv6_addr_type(&p->raddr) &
1061 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
1063 struct rt6_info *rt = rt6_lookup(t->net,
1064 &p->raddr, &p->laddr,
1065 p->link, NULL, strict);
1071 dev->hard_header_len = rt->dst.dev->hard_header_len +
1075 dev->mtu = rt->dst.dev->mtu - t_hlen;
1076 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1078 if (dev->type == ARPHRD_ETHER)
1079 dev->mtu -= ETH_HLEN;
1081 if (dev->mtu < IPV6_MIN_MTU)
1082 dev->mtu = IPV6_MIN_MTU;
1089 static int ip6gre_tnl_change(struct ip6_tnl *t,
1090 const struct __ip6_tnl_parm *p, int set_mtu)
1092 t->parms.laddr = p->laddr;
1093 t->parms.raddr = p->raddr;
1094 t->parms.flags = p->flags;
1095 t->parms.hop_limit = p->hop_limit;
1096 t->parms.encap_limit = p->encap_limit;
1097 t->parms.flowinfo = p->flowinfo;
1098 t->parms.link = p->link;
1099 t->parms.proto = p->proto;
1100 t->parms.i_key = p->i_key;
1101 t->parms.o_key = p->o_key;
1102 t->parms.i_flags = p->i_flags;
1103 t->parms.o_flags = p->o_flags;
1104 t->parms.fwmark = p->fwmark;
1105 dst_cache_reset(&t->dst_cache);
1106 ip6gre_tnl_link_config(t, set_mtu);
1110 static void ip6gre_tnl_parm_from_user(struct __ip6_tnl_parm *p,
1111 const struct ip6_tnl_parm2 *u)
1113 p->laddr = u->laddr;
1114 p->raddr = u->raddr;
1115 p->flags = u->flags;
1116 p->hop_limit = u->hop_limit;
1117 p->encap_limit = u->encap_limit;
1118 p->flowinfo = u->flowinfo;
1120 p->i_key = u->i_key;
1121 p->o_key = u->o_key;
1122 p->i_flags = gre_flags_to_tnl_flags(u->i_flags);
1123 p->o_flags = gre_flags_to_tnl_flags(u->o_flags);
1124 memcpy(p->name, u->name, sizeof(u->name));
1127 static void ip6gre_tnl_parm_to_user(struct ip6_tnl_parm2 *u,
1128 const struct __ip6_tnl_parm *p)
1130 u->proto = IPPROTO_GRE;
1131 u->laddr = p->laddr;
1132 u->raddr = p->raddr;
1133 u->flags = p->flags;
1134 u->hop_limit = p->hop_limit;
1135 u->encap_limit = p->encap_limit;
1136 u->flowinfo = p->flowinfo;
1138 u->i_key = p->i_key;
1139 u->o_key = p->o_key;
1140 u->i_flags = gre_tnl_flags_to_gre_flags(p->i_flags);
1141 u->o_flags = gre_tnl_flags_to_gre_flags(p->o_flags);
1142 memcpy(u->name, p->name, sizeof(u->name));
1145 static int ip6gre_tunnel_ioctl(struct net_device *dev,
1146 struct ifreq *ifr, int cmd)
1149 struct ip6_tnl_parm2 p;
1150 struct __ip6_tnl_parm p1;
1151 struct ip6_tnl *t = netdev_priv(dev);
1152 struct net *net = t->net;
1153 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1155 memset(&p1, 0, sizeof(p1));
1159 if (dev == ign->fb_tunnel_dev) {
1160 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1164 ip6gre_tnl_parm_from_user(&p1, &p);
1165 t = ip6gre_tunnel_locate(net, &p1, 0);
1167 t = netdev_priv(dev);
1169 memset(&p, 0, sizeof(p));
1170 ip6gre_tnl_parm_to_user(&p, &t->parms);
1171 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1178 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1182 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1186 if ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING))
1189 if (!(p.i_flags&GRE_KEY))
1191 if (!(p.o_flags&GRE_KEY))
1194 ip6gre_tnl_parm_from_user(&p1, &p);
1195 t = ip6gre_tunnel_locate(net, &p1, cmd == SIOCADDTUNNEL);
1197 if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
1199 if (t->dev != dev) {
1204 t = netdev_priv(dev);
1206 ip6gre_tunnel_unlink(ign, t);
1208 ip6gre_tnl_change(t, &p1, 1);
1209 ip6gre_tunnel_link(ign, t);
1210 netdev_state_change(dev);
1217 memset(&p, 0, sizeof(p));
1218 ip6gre_tnl_parm_to_user(&p, &t->parms);
1219 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1222 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
1227 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1230 if (dev == ign->fb_tunnel_dev) {
1232 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1235 ip6gre_tnl_parm_from_user(&p1, &p);
1236 t = ip6gre_tunnel_locate(net, &p1, 0);
1240 if (t == netdev_priv(ign->fb_tunnel_dev))
1244 unregister_netdevice(dev);
1256 static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
1257 unsigned short type, const void *daddr,
1258 const void *saddr, unsigned int len)
1260 struct ip6_tnl *t = netdev_priv(dev);
1261 struct ipv6hdr *ipv6h;
1264 ipv6h = skb_push(skb, t->hlen + sizeof(*ipv6h));
1265 ip6_flow_hdr(ipv6h, 0, ip6_make_flowlabel(dev_net(dev), skb,
1266 t->fl.u.ip6.flowlabel,
1267 true, &t->fl.u.ip6));
1268 ipv6h->hop_limit = t->parms.hop_limit;
1269 ipv6h->nexthdr = NEXTHDR_GRE;
1270 ipv6h->saddr = t->parms.laddr;
1271 ipv6h->daddr = t->parms.raddr;
1273 p = (__be16 *)(ipv6h + 1);
1274 p[0] = t->parms.o_flags;
1278 * Set the source hardware address.
1282 memcpy(&ipv6h->saddr, saddr, sizeof(struct in6_addr));
1284 memcpy(&ipv6h->daddr, daddr, sizeof(struct in6_addr));
1285 if (!ipv6_addr_any(&ipv6h->daddr))
1291 static const struct header_ops ip6gre_header_ops = {
1292 .create = ip6gre_header,
1295 static const struct net_device_ops ip6gre_netdev_ops = {
1296 .ndo_init = ip6gre_tunnel_init,
1297 .ndo_uninit = ip6gre_tunnel_uninit,
1298 .ndo_start_xmit = ip6gre_tunnel_xmit,
1299 .ndo_do_ioctl = ip6gre_tunnel_ioctl,
1300 .ndo_change_mtu = ip6_tnl_change_mtu,
1301 .ndo_get_stats64 = ip_tunnel_get_stats64,
1302 .ndo_get_iflink = ip6_tnl_get_iflink,
1305 static void ip6gre_dev_free(struct net_device *dev)
1307 struct ip6_tnl *t = netdev_priv(dev);
1309 dst_cache_destroy(&t->dst_cache);
1310 free_percpu(dev->tstats);
1313 static void ip6gre_tunnel_setup(struct net_device *dev)
1315 dev->netdev_ops = &ip6gre_netdev_ops;
1316 dev->needs_free_netdev = true;
1317 dev->priv_destructor = ip6gre_dev_free;
1319 dev->type = ARPHRD_IP6GRE;
1321 dev->flags |= IFF_NOARP;
1322 dev->addr_len = sizeof(struct in6_addr);
1323 netif_keep_dst(dev);
1324 /* This perm addr will be used as interface identifier by IPv6 */
1325 dev->addr_assign_type = NET_ADDR_RANDOM;
1326 eth_random_addr(dev->perm_addr);
1329 #define GRE6_FEATURES (NETIF_F_SG | \
1330 NETIF_F_FRAGLIST | \
1334 static void ip6gre_tnl_init_features(struct net_device *dev)
1336 struct ip6_tnl *nt = netdev_priv(dev);
1338 dev->features |= GRE6_FEATURES;
1339 dev->hw_features |= GRE6_FEATURES;
1341 if (!(nt->parms.o_flags & TUNNEL_SEQ)) {
1342 /* TCP offload with GRE SEQ is not supported, nor
1343 * can we support 2 levels of outer headers requiring
1346 if (!(nt->parms.o_flags & TUNNEL_CSUM) ||
1347 nt->encap.type == TUNNEL_ENCAP_NONE) {
1348 dev->features |= NETIF_F_GSO_SOFTWARE;
1349 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1352 /* Can use a lockless transmit, unless we generate
1355 dev->features |= NETIF_F_LLTX;
1359 static int ip6gre_tunnel_init_common(struct net_device *dev)
1361 struct ip6_tnl *tunnel;
1365 tunnel = netdev_priv(dev);
1368 tunnel->net = dev_net(dev);
1369 strcpy(tunnel->parms.name, dev->name);
1371 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1375 ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
1377 free_percpu(dev->tstats);
1382 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
1383 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
1384 t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
1386 dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1387 dev->mtu = ETH_DATA_LEN - t_hlen;
1388 if (dev->type == ARPHRD_ETHER)
1389 dev->mtu -= ETH_HLEN;
1390 if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1393 if (tunnel->parms.collect_md) {
1394 dev->features |= NETIF_F_NETNS_LOCAL;
1395 netif_keep_dst(dev);
1397 ip6gre_tnl_init_features(dev);
1402 static int ip6gre_tunnel_init(struct net_device *dev)
1404 struct ip6_tnl *tunnel;
1407 ret = ip6gre_tunnel_init_common(dev);
1411 tunnel = netdev_priv(dev);
1413 if (tunnel->parms.collect_md)
1416 memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr));
1417 memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr));
1419 if (ipv6_addr_any(&tunnel->parms.raddr))
1420 dev->header_ops = &ip6gre_header_ops;
1425 static void ip6gre_fb_tunnel_init(struct net_device *dev)
1427 struct ip6_tnl *tunnel = netdev_priv(dev);
1430 tunnel->net = dev_net(dev);
1431 strcpy(tunnel->parms.name, dev->name);
1433 tunnel->hlen = sizeof(struct ipv6hdr) + 4;
1438 static struct inet6_protocol ip6gre_protocol __read_mostly = {
1440 .err_handler = ip6gre_err,
1441 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1444 static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head)
1446 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1447 struct net_device *dev, *aux;
1450 for_each_netdev_safe(net, dev, aux)
1451 if (dev->rtnl_link_ops == &ip6gre_link_ops ||
1452 dev->rtnl_link_ops == &ip6gre_tap_ops ||
1453 dev->rtnl_link_ops == &ip6erspan_tap_ops)
1454 unregister_netdevice_queue(dev, head);
1456 for (prio = 0; prio < 4; prio++) {
1458 for (h = 0; h < IP6_GRE_HASH_SIZE; h++) {
1461 t = rtnl_dereference(ign->tunnels[prio][h]);
1464 /* If dev is in the same netns, it has already
1465 * been added to the list by the previous loop.
1467 if (!net_eq(dev_net(t->dev), net))
1468 unregister_netdevice_queue(t->dev,
1470 t = rtnl_dereference(t->next);
1476 static int __net_init ip6gre_init_net(struct net *net)
1478 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1481 if (!net_has_fallback_tunnels(net))
1483 ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0",
1485 ip6gre_tunnel_setup);
1486 if (!ign->fb_tunnel_dev) {
1490 dev_net_set(ign->fb_tunnel_dev, net);
1491 /* FB netdevice is special: we have one, and only one per netns.
1492 * Allowing to move it to another netns is clearly unsafe.
1494 ign->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
1497 ip6gre_fb_tunnel_init(ign->fb_tunnel_dev);
1498 ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops;
1500 err = register_netdev(ign->fb_tunnel_dev);
1504 rcu_assign_pointer(ign->tunnels_wc[0],
1505 netdev_priv(ign->fb_tunnel_dev));
1509 free_netdev(ign->fb_tunnel_dev);
1514 static void __net_exit ip6gre_exit_batch_net(struct list_head *net_list)
1520 list_for_each_entry(net, net_list, exit_list)
1521 ip6gre_destroy_tunnels(net, &list);
1522 unregister_netdevice_many(&list);
1526 static struct pernet_operations ip6gre_net_ops = {
1527 .init = ip6gre_init_net,
1528 .exit_batch = ip6gre_exit_batch_net,
1529 .id = &ip6gre_net_id,
1530 .size = sizeof(struct ip6gre_net),
1533 static int ip6gre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1534 struct netlink_ext_ack *extack)
1542 if (data[IFLA_GRE_IFLAGS])
1543 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1544 if (data[IFLA_GRE_OFLAGS])
1545 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1546 if (flags & (GRE_VERSION|GRE_ROUTING))
1552 static int ip6gre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1553 struct netlink_ext_ack *extack)
1555 struct in6_addr daddr;
1557 if (tb[IFLA_ADDRESS]) {
1558 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1560 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1561 return -EADDRNOTAVAIL;
1567 if (data[IFLA_GRE_REMOTE]) {
1568 daddr = nla_get_in6_addr(data[IFLA_GRE_REMOTE]);
1569 if (ipv6_addr_any(&daddr))
1574 return ip6gre_tunnel_validate(tb, data, extack);
1577 static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1578 struct netlink_ext_ack *extack)
1586 ret = ip6gre_tap_validate(tb, data, extack);
1590 /* ERSPAN should only have GRE sequence and key flag */
1591 if (data[IFLA_GRE_OFLAGS])
1592 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1593 if (data[IFLA_GRE_IFLAGS])
1594 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1595 if (!data[IFLA_GRE_COLLECT_METADATA] &&
1596 flags != (GRE_SEQ | GRE_KEY))
1599 /* ERSPAN Session ID only has 10-bit. Since we reuse
1600 * 32-bit key field as ID, check it's range.
1602 if (data[IFLA_GRE_IKEY] &&
1603 (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1606 if (data[IFLA_GRE_OKEY] &&
1607 (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1610 if (data[IFLA_GRE_ERSPAN_VER]) {
1611 ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1612 if (ver != 1 && ver != 2)
1617 if (data[IFLA_GRE_ERSPAN_INDEX]) {
1618 u32 index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1620 if (index & ~INDEX_MASK)
1623 } else if (ver == 2) {
1624 if (data[IFLA_GRE_ERSPAN_DIR]) {
1625 u16 dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1627 if (dir & ~(DIR_MASK >> DIR_OFFSET))
1631 if (data[IFLA_GRE_ERSPAN_HWID]) {
1632 u16 hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1634 if (hwid & ~(HWID_MASK >> HWID_OFFSET))
1642 static void ip6gre_netlink_parms(struct nlattr *data[],
1643 struct __ip6_tnl_parm *parms)
1645 memset(parms, 0, sizeof(*parms));
1650 if (data[IFLA_GRE_LINK])
1651 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1653 if (data[IFLA_GRE_IFLAGS])
1654 parms->i_flags = gre_flags_to_tnl_flags(
1655 nla_get_be16(data[IFLA_GRE_IFLAGS]));
1657 if (data[IFLA_GRE_OFLAGS])
1658 parms->o_flags = gre_flags_to_tnl_flags(
1659 nla_get_be16(data[IFLA_GRE_OFLAGS]));
1661 if (data[IFLA_GRE_IKEY])
1662 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1664 if (data[IFLA_GRE_OKEY])
1665 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1667 if (data[IFLA_GRE_LOCAL])
1668 parms->laddr = nla_get_in6_addr(data[IFLA_GRE_LOCAL]);
1670 if (data[IFLA_GRE_REMOTE])
1671 parms->raddr = nla_get_in6_addr(data[IFLA_GRE_REMOTE]);
1673 if (data[IFLA_GRE_TTL])
1674 parms->hop_limit = nla_get_u8(data[IFLA_GRE_TTL]);
1676 if (data[IFLA_GRE_ENCAP_LIMIT])
1677 parms->encap_limit = nla_get_u8(data[IFLA_GRE_ENCAP_LIMIT]);
1679 if (data[IFLA_GRE_FLOWINFO])
1680 parms->flowinfo = nla_get_be32(data[IFLA_GRE_FLOWINFO]);
1682 if (data[IFLA_GRE_FLAGS])
1683 parms->flags = nla_get_u32(data[IFLA_GRE_FLAGS]);
1685 if (data[IFLA_GRE_FWMARK])
1686 parms->fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1688 if (data[IFLA_GRE_COLLECT_METADATA])
1689 parms->collect_md = true;
1691 if (data[IFLA_GRE_ERSPAN_VER])
1692 parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1694 if (parms->erspan_ver == 1) {
1695 if (data[IFLA_GRE_ERSPAN_INDEX])
1696 parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1697 } else if (parms->erspan_ver == 2) {
1698 if (data[IFLA_GRE_ERSPAN_DIR])
1699 parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1700 if (data[IFLA_GRE_ERSPAN_HWID])
1701 parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1705 static int ip6gre_tap_init(struct net_device *dev)
1709 ret = ip6gre_tunnel_init_common(dev);
1713 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1718 static const struct net_device_ops ip6gre_tap_netdev_ops = {
1719 .ndo_init = ip6gre_tap_init,
1720 .ndo_uninit = ip6gre_tunnel_uninit,
1721 .ndo_start_xmit = ip6gre_tunnel_xmit,
1722 .ndo_set_mac_address = eth_mac_addr,
1723 .ndo_validate_addr = eth_validate_addr,
1724 .ndo_change_mtu = ip6_tnl_change_mtu,
1725 .ndo_get_stats64 = ip_tunnel_get_stats64,
1726 .ndo_get_iflink = ip6_tnl_get_iflink,
1729 static int ip6erspan_tap_init(struct net_device *dev)
1731 struct ip6_tnl *tunnel;
1735 tunnel = netdev_priv(dev);
1738 tunnel->net = dev_net(dev);
1739 strcpy(tunnel->parms.name, dev->name);
1741 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1745 ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
1747 free_percpu(dev->tstats);
1752 tunnel->tun_hlen = 8;
1753 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1754 erspan_hdr_len(tunnel->parms.erspan_ver);
1755 t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
1757 dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1758 dev->mtu = ETH_DATA_LEN - t_hlen;
1759 if (dev->type == ARPHRD_ETHER)
1760 dev->mtu -= ETH_HLEN;
1761 if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1764 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1765 ip6gre_tnl_link_config(tunnel, 1);
1770 static const struct net_device_ops ip6erspan_netdev_ops = {
1771 .ndo_init = ip6erspan_tap_init,
1772 .ndo_uninit = ip6gre_tunnel_uninit,
1773 .ndo_start_xmit = ip6erspan_tunnel_xmit,
1774 .ndo_set_mac_address = eth_mac_addr,
1775 .ndo_validate_addr = eth_validate_addr,
1776 .ndo_change_mtu = ip6_tnl_change_mtu,
1777 .ndo_get_stats64 = ip_tunnel_get_stats64,
1778 .ndo_get_iflink = ip6_tnl_get_iflink,
1781 static void ip6gre_tap_setup(struct net_device *dev)
1787 dev->netdev_ops = &ip6gre_tap_netdev_ops;
1788 dev->needs_free_netdev = true;
1789 dev->priv_destructor = ip6gre_dev_free;
1791 dev->features |= NETIF_F_NETNS_LOCAL;
1792 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1793 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1794 netif_keep_dst(dev);
1797 bool is_ip6gretap_dev(const struct net_device *dev)
1799 return dev->netdev_ops == &ip6gre_tap_netdev_ops;
1801 EXPORT_SYMBOL_GPL(is_ip6gretap_dev);
1803 static bool ip6gre_netlink_encap_parms(struct nlattr *data[],
1804 struct ip_tunnel_encap *ipencap)
1808 memset(ipencap, 0, sizeof(*ipencap));
1813 if (data[IFLA_GRE_ENCAP_TYPE]) {
1815 ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1818 if (data[IFLA_GRE_ENCAP_FLAGS]) {
1820 ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1823 if (data[IFLA_GRE_ENCAP_SPORT]) {
1825 ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1828 if (data[IFLA_GRE_ENCAP_DPORT]) {
1830 ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1836 static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
1837 struct nlattr *tb[], struct nlattr *data[],
1838 struct netlink_ext_ack *extack)
1841 struct net *net = dev_net(dev);
1842 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1843 struct ip_tunnel_encap ipencap;
1846 nt = netdev_priv(dev);
1848 if (ip6gre_netlink_encap_parms(data, &ipencap)) {
1849 int err = ip6_tnl_encap_setup(nt, &ipencap);
1855 ip6gre_netlink_parms(data, &nt->parms);
1857 if (nt->parms.collect_md) {
1858 if (rtnl_dereference(ign->collect_md_tun))
1861 if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
1865 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
1866 eth_hw_addr_random(dev);
1869 nt->net = dev_net(dev);
1871 err = register_netdevice(dev);
1875 ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
1878 ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
1881 ip6gre_tunnel_link(ign, nt);
1887 static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
1888 struct nlattr *data[],
1889 struct netlink_ext_ack *extack)
1891 struct ip6_tnl *t, *nt = netdev_priv(dev);
1892 struct net *net = nt->net;
1893 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1894 struct __ip6_tnl_parm p;
1895 struct ip_tunnel_encap ipencap;
1897 if (dev == ign->fb_tunnel_dev)
1900 if (ip6gre_netlink_encap_parms(data, &ipencap)) {
1901 int err = ip6_tnl_encap_setup(nt, &ipencap);
1907 ip6gre_netlink_parms(data, &p);
1909 t = ip6gre_tunnel_locate(net, &p, 0);
1918 ip6gre_tunnel_unlink(ign, t);
1919 ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
1920 ip6gre_tunnel_link(ign, t);
1924 static void ip6gre_dellink(struct net_device *dev, struct list_head *head)
1926 struct net *net = dev_net(dev);
1927 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1929 if (dev != ign->fb_tunnel_dev)
1930 unregister_netdevice_queue(dev, head);
1933 static size_t ip6gre_get_size(const struct net_device *dev)
1938 /* IFLA_GRE_IFLAGS */
1940 /* IFLA_GRE_OFLAGS */
1946 /* IFLA_GRE_LOCAL */
1947 nla_total_size(sizeof(struct in6_addr)) +
1948 /* IFLA_GRE_REMOTE */
1949 nla_total_size(sizeof(struct in6_addr)) +
1952 /* IFLA_GRE_ENCAP_LIMIT */
1954 /* IFLA_GRE_FLOWINFO */
1956 /* IFLA_GRE_FLAGS */
1958 /* IFLA_GRE_ENCAP_TYPE */
1960 /* IFLA_GRE_ENCAP_FLAGS */
1962 /* IFLA_GRE_ENCAP_SPORT */
1964 /* IFLA_GRE_ENCAP_DPORT */
1966 /* IFLA_GRE_COLLECT_METADATA */
1968 /* IFLA_GRE_FWMARK */
1970 /* IFLA_GRE_ERSPAN_INDEX */
1975 static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1977 struct ip6_tnl *t = netdev_priv(dev);
1978 struct __ip6_tnl_parm *p = &t->parms;
1980 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1981 nla_put_be16(skb, IFLA_GRE_IFLAGS,
1982 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1983 nla_put_be16(skb, IFLA_GRE_OFLAGS,
1984 gre_tnl_flags_to_gre_flags(p->o_flags)) ||
1985 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1986 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1987 nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) ||
1988 nla_put_in6_addr(skb, IFLA_GRE_REMOTE, &p->raddr) ||
1989 nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) ||
1990 nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
1991 nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) ||
1992 nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags) ||
1993 nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark) ||
1994 nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
1995 goto nla_put_failure;
1997 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1999 nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
2001 nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
2003 nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
2005 goto nla_put_failure;
2007 if (p->collect_md) {
2008 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
2009 goto nla_put_failure;
2012 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver))
2013 goto nla_put_failure;
2015 if (p->erspan_ver == 1) {
2016 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
2017 goto nla_put_failure;
2018 } else if (p->erspan_ver == 2) {
2019 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir))
2020 goto nla_put_failure;
2021 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid))
2022 goto nla_put_failure;
2031 static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
2032 [IFLA_GRE_LINK] = { .type = NLA_U32 },
2033 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
2034 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
2035 [IFLA_GRE_IKEY] = { .type = NLA_U32 },
2036 [IFLA_GRE_OKEY] = { .type = NLA_U32 },
2037 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct ipv6hdr, saddr) },
2038 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct ipv6hdr, daddr) },
2039 [IFLA_GRE_TTL] = { .type = NLA_U8 },
2040 [IFLA_GRE_ENCAP_LIMIT] = { .type = NLA_U8 },
2041 [IFLA_GRE_FLOWINFO] = { .type = NLA_U32 },
2042 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
2043 [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 },
2044 [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 },
2045 [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 },
2046 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
2047 [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
2048 [IFLA_GRE_FWMARK] = { .type = NLA_U32 },
2049 [IFLA_GRE_ERSPAN_INDEX] = { .type = NLA_U32 },
2050 [IFLA_GRE_ERSPAN_VER] = { .type = NLA_U8 },
2051 [IFLA_GRE_ERSPAN_DIR] = { .type = NLA_U8 },
2052 [IFLA_GRE_ERSPAN_HWID] = { .type = NLA_U16 },
2055 static void ip6erspan_tap_setup(struct net_device *dev)
2059 dev->netdev_ops = &ip6erspan_netdev_ops;
2060 dev->needs_free_netdev = true;
2061 dev->priv_destructor = ip6gre_dev_free;
2063 dev->features |= NETIF_F_NETNS_LOCAL;
2064 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
2065 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
2066 netif_keep_dst(dev);
2069 static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
2071 .maxtype = IFLA_GRE_MAX,
2072 .policy = ip6gre_policy,
2073 .priv_size = sizeof(struct ip6_tnl),
2074 .setup = ip6gre_tunnel_setup,
2075 .validate = ip6gre_tunnel_validate,
2076 .newlink = ip6gre_newlink,
2077 .changelink = ip6gre_changelink,
2078 .dellink = ip6gre_dellink,
2079 .get_size = ip6gre_get_size,
2080 .fill_info = ip6gre_fill_info,
2081 .get_link_net = ip6_tnl_get_link_net,
2084 static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
2085 .kind = "ip6gretap",
2086 .maxtype = IFLA_GRE_MAX,
2087 .policy = ip6gre_policy,
2088 .priv_size = sizeof(struct ip6_tnl),
2089 .setup = ip6gre_tap_setup,
2090 .validate = ip6gre_tap_validate,
2091 .newlink = ip6gre_newlink,
2092 .changelink = ip6gre_changelink,
2093 .get_size = ip6gre_get_size,
2094 .fill_info = ip6gre_fill_info,
2095 .get_link_net = ip6_tnl_get_link_net,
2098 static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly = {
2099 .kind = "ip6erspan",
2100 .maxtype = IFLA_GRE_MAX,
2101 .policy = ip6gre_policy,
2102 .priv_size = sizeof(struct ip6_tnl),
2103 .setup = ip6erspan_tap_setup,
2104 .validate = ip6erspan_tap_validate,
2105 .newlink = ip6gre_newlink,
2106 .changelink = ip6gre_changelink,
2107 .get_size = ip6gre_get_size,
2108 .fill_info = ip6gre_fill_info,
2109 .get_link_net = ip6_tnl_get_link_net,
2113 * And now the modules code and kernel interface.
2116 static int __init ip6gre_init(void)
2120 pr_info("GRE over IPv6 tunneling driver\n");
2122 err = register_pernet_device(&ip6gre_net_ops);
2126 err = inet6_add_protocol(&ip6gre_protocol, IPPROTO_GRE);
2128 pr_info("%s: can't add protocol\n", __func__);
2129 goto add_proto_failed;
2132 err = rtnl_link_register(&ip6gre_link_ops);
2134 goto rtnl_link_failed;
2136 err = rtnl_link_register(&ip6gre_tap_ops);
2138 goto tap_ops_failed;
2140 err = rtnl_link_register(&ip6erspan_tap_ops);
2142 goto erspan_link_failed;
2148 rtnl_link_unregister(&ip6gre_tap_ops);
2150 rtnl_link_unregister(&ip6gre_link_ops);
2152 inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
2154 unregister_pernet_device(&ip6gre_net_ops);
2158 static void __exit ip6gre_fini(void)
2160 rtnl_link_unregister(&ip6gre_tap_ops);
2161 rtnl_link_unregister(&ip6gre_link_ops);
2162 rtnl_link_unregister(&ip6erspan_tap_ops);
2163 inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
2164 unregister_pernet_device(&ip6gre_net_ops);
2167 module_init(ip6gre_init);
2168 module_exit(ip6gre_fini);
2169 MODULE_LICENSE("GPL");
2170 MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
2171 MODULE_DESCRIPTION("GRE over IPv6 tunneling device");
2172 MODULE_ALIAS_RTNL_LINK("ip6gre");
2173 MODULE_ALIAS_RTNL_LINK("ip6gretap");
2174 MODULE_ALIAS_RTNL_LINK("ip6erspan");
2175 MODULE_ALIAS_NETDEV("ip6gre0");