2 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
16 * YOSHIFUJI Hideaki @USAGI
17 * reworked default router selection.
18 * - respect outgoing interface
19 * - select from (probably) reachable routers (i.e.
20 * routers in REACHABLE, STALE, DELAY or PROBE states).
21 * - always select the same router if it is (probably)
22 * reachable. otherwise, round-robin the list.
24 * Fixed routing subtrees.
27 #define pr_fmt(fmt) "IPv6: " fmt
29 #include <linux/capability.h>
30 #include <linux/errno.h>
31 #include <linux/export.h>
32 #include <linux/types.h>
33 #include <linux/times.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/net.h>
37 #include <linux/route.h>
38 #include <linux/netdevice.h>
39 #include <linux/in6.h>
40 #include <linux/mroute6.h>
41 #include <linux/init.h>
42 #include <linux/if_arp.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/nsproxy.h>
46 #include <linux/slab.h>
47 #include <linux/jhash.h>
48 #include <net/net_namespace.h>
51 #include <net/ip6_fib.h>
52 #include <net/ip6_route.h>
53 #include <net/ndisc.h>
54 #include <net/addrconf.h>
56 #include <linux/rtnetlink.h>
58 #include <net/dst_metadata.h>
60 #include <net/netevent.h>
61 #include <net/netlink.h>
62 #include <net/nexthop.h>
63 #include <net/lwtunnel.h>
64 #include <net/ip_tunnels.h>
65 #include <net/l3mdev.h>
67 #include <linux/uaccess.h>
70 #include <linux/sysctl.h>
73 static int ip6_rt_type_to_error(u8 fib6_type);
75 #define CREATE_TRACE_POINTS
76 #include <trace/events/fib6.h>
77 EXPORT_TRACEPOINT_SYMBOL_GPL(fib6_table_lookup);
78 #undef CREATE_TRACE_POINTS
81 RT6_NUD_FAIL_HARD = -3,
82 RT6_NUD_FAIL_PROBE = -2,
83 RT6_NUD_FAIL_DO_RR = -1,
87 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
88 static unsigned int ip6_default_advmss(const struct dst_entry *dst);
89 static unsigned int ip6_mtu(const struct dst_entry *dst);
90 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
91 static void ip6_dst_destroy(struct dst_entry *);
92 static void ip6_dst_ifdown(struct dst_entry *,
93 struct net_device *dev, int how);
94 static int ip6_dst_gc(struct dst_ops *ops);
96 static int ip6_pkt_discard(struct sk_buff *skb);
97 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
98 static int ip6_pkt_prohibit(struct sk_buff *skb);
99 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
100 static void ip6_link_failure(struct sk_buff *skb);
101 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
102 struct sk_buff *skb, u32 mtu);
103 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
104 struct sk_buff *skb);
105 static int rt6_score_route(struct fib6_info *rt, int oif, int strict);
106 static size_t rt6_nlmsg_size(struct fib6_info *rt);
107 static int rt6_fill_node(struct net *net, struct sk_buff *skb,
108 struct fib6_info *rt, struct dst_entry *dst,
109 struct in6_addr *dest, struct in6_addr *src,
110 int iif, int type, u32 portid, u32 seq,
112 static struct rt6_info *rt6_find_cached_rt(struct fib6_info *rt,
113 struct in6_addr *daddr,
114 struct in6_addr *saddr);
116 #ifdef CONFIG_IPV6_ROUTE_INFO
117 static struct fib6_info *rt6_add_route_info(struct net *net,
118 const struct in6_addr *prefix, int prefixlen,
119 const struct in6_addr *gwaddr,
120 struct net_device *dev,
122 static struct fib6_info *rt6_get_route_info(struct net *net,
123 const struct in6_addr *prefix, int prefixlen,
124 const struct in6_addr *gwaddr,
125 struct net_device *dev);
128 struct uncached_list {
130 struct list_head head;
133 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
135 void rt6_uncached_list_add(struct rt6_info *rt)
137 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
139 rt->rt6i_uncached_list = ul;
141 spin_lock_bh(&ul->lock);
142 list_add_tail(&rt->rt6i_uncached, &ul->head);
143 spin_unlock_bh(&ul->lock);
146 void rt6_uncached_list_del(struct rt6_info *rt)
148 if (!list_empty(&rt->rt6i_uncached)) {
149 struct uncached_list *ul = rt->rt6i_uncached_list;
150 struct net *net = dev_net(rt->dst.dev);
152 spin_lock_bh(&ul->lock);
153 list_del(&rt->rt6i_uncached);
154 atomic_dec(&net->ipv6.rt6_stats->fib_rt_uncache);
155 spin_unlock_bh(&ul->lock);
159 static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
161 struct net_device *loopback_dev = net->loopback_dev;
164 if (dev == loopback_dev)
167 for_each_possible_cpu(cpu) {
168 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
171 spin_lock_bh(&ul->lock);
172 list_for_each_entry(rt, &ul->head, rt6i_uncached) {
173 struct inet6_dev *rt_idev = rt->rt6i_idev;
174 struct net_device *rt_dev = rt->dst.dev;
176 if (rt_idev->dev == dev) {
177 rt->rt6i_idev = in6_dev_get(loopback_dev);
178 in6_dev_put(rt_idev);
182 rt->dst.dev = loopback_dev;
183 dev_hold(rt->dst.dev);
187 spin_unlock_bh(&ul->lock);
191 static inline const void *choose_neigh_daddr(const struct in6_addr *p,
195 if (!ipv6_addr_any(p))
196 return (const void *) p;
198 return &ipv6_hdr(skb)->daddr;
202 struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw,
203 struct net_device *dev,
209 daddr = choose_neigh_daddr(gw, skb, daddr);
210 n = __ipv6_neigh_lookup(dev, daddr);
213 return neigh_create(&nd_tbl, daddr, dev);
216 static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
220 const struct rt6_info *rt = container_of(dst, struct rt6_info, dst);
222 return ip6_neigh_lookup(&rt->rt6i_gateway, dst->dev, skb, daddr);
225 static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
227 struct net_device *dev = dst->dev;
228 struct rt6_info *rt = (struct rt6_info *)dst;
230 daddr = choose_neigh_daddr(&rt->rt6i_gateway, NULL, daddr);
233 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
235 if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
237 __ipv6_confirm_neigh(dev, daddr);
240 static struct dst_ops ip6_dst_ops_template = {
244 .check = ip6_dst_check,
245 .default_advmss = ip6_default_advmss,
247 .cow_metrics = dst_cow_metrics_generic,
248 .destroy = ip6_dst_destroy,
249 .ifdown = ip6_dst_ifdown,
250 .negative_advice = ip6_negative_advice,
251 .link_failure = ip6_link_failure,
252 .update_pmtu = ip6_rt_update_pmtu,
253 .redirect = rt6_do_redirect,
254 .local_out = __ip6_local_out,
255 .neigh_lookup = ip6_dst_neigh_lookup,
256 .confirm_neigh = ip6_confirm_neigh,
259 static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
261 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
263 return mtu ? : dst->dev->mtu;
266 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
267 struct sk_buff *skb, u32 mtu)
271 static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
276 static struct dst_ops ip6_dst_blackhole_ops = {
278 .destroy = ip6_dst_destroy,
279 .check = ip6_dst_check,
280 .mtu = ip6_blackhole_mtu,
281 .default_advmss = ip6_default_advmss,
282 .update_pmtu = ip6_rt_blackhole_update_pmtu,
283 .redirect = ip6_rt_blackhole_redirect,
284 .cow_metrics = dst_cow_metrics_generic,
285 .neigh_lookup = ip6_dst_neigh_lookup,
288 static const u32 ip6_template_metrics[RTAX_MAX] = {
289 [RTAX_HOPLIMIT - 1] = 0,
292 static const struct fib6_info fib6_null_entry_template = {
293 .fib6_flags = (RTF_REJECT | RTF_NONEXTHOP),
294 .fib6_protocol = RTPROT_KERNEL,
295 .fib6_metric = ~(u32)0,
296 .fib6_ref = ATOMIC_INIT(1),
297 .fib6_type = RTN_UNREACHABLE,
298 .fib6_metrics = (struct dst_metrics *)&dst_default_metrics,
301 static const struct rt6_info ip6_null_entry_template = {
303 .__refcnt = ATOMIC_INIT(1),
305 .obsolete = DST_OBSOLETE_FORCE_CHK,
306 .error = -ENETUNREACH,
307 .input = ip6_pkt_discard,
308 .output = ip6_pkt_discard_out,
310 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
313 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
315 static const struct rt6_info ip6_prohibit_entry_template = {
317 .__refcnt = ATOMIC_INIT(1),
319 .obsolete = DST_OBSOLETE_FORCE_CHK,
321 .input = ip6_pkt_prohibit,
322 .output = ip6_pkt_prohibit_out,
324 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
327 static const struct rt6_info ip6_blk_hole_entry_template = {
329 .__refcnt = ATOMIC_INIT(1),
331 .obsolete = DST_OBSOLETE_FORCE_CHK,
333 .input = dst_discard,
334 .output = dst_discard_out,
336 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
341 static void rt6_info_init(struct rt6_info *rt)
343 struct dst_entry *dst = &rt->dst;
345 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
346 INIT_LIST_HEAD(&rt->rt6i_uncached);
349 /* allocate dst with ip6_dst_ops */
350 struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
353 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
354 1, DST_OBSOLETE_FORCE_CHK, flags);
358 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
363 EXPORT_SYMBOL(ip6_dst_alloc);
365 static void ip6_dst_destroy(struct dst_entry *dst)
367 struct rt6_info *rt = (struct rt6_info *)dst;
368 struct fib6_info *from;
369 struct inet6_dev *idev;
371 ip_dst_metrics_put(dst);
372 rt6_uncached_list_del(rt);
374 idev = rt->rt6i_idev;
376 rt->rt6i_idev = NULL;
381 from = rcu_dereference(rt->from);
382 rcu_assign_pointer(rt->from, NULL);
383 fib6_info_release(from);
387 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
390 struct rt6_info *rt = (struct rt6_info *)dst;
391 struct inet6_dev *idev = rt->rt6i_idev;
392 struct net_device *loopback_dev =
393 dev_net(dev)->loopback_dev;
395 if (idev && idev->dev != loopback_dev) {
396 struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev);
398 rt->rt6i_idev = loopback_idev;
404 static bool __rt6_check_expired(const struct rt6_info *rt)
406 if (rt->rt6i_flags & RTF_EXPIRES)
407 return time_after(jiffies, rt->dst.expires);
412 static bool rt6_check_expired(const struct rt6_info *rt)
414 struct fib6_info *from;
416 from = rcu_dereference(rt->from);
418 if (rt->rt6i_flags & RTF_EXPIRES) {
419 if (time_after(jiffies, rt->dst.expires))
422 return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
423 fib6_check_expired(from);
428 struct fib6_info *fib6_multipath_select(const struct net *net,
429 struct fib6_info *match,
430 struct flowi6 *fl6, int oif,
431 const struct sk_buff *skb,
434 struct fib6_info *sibling, *next_sibling;
436 /* We might have already computed the hash for ICMPv6 errors. In such
437 * case it will always be non-zero. Otherwise now is the time to do it.
440 fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL);
442 if (fl6->mp_hash <= atomic_read(&match->fib6_nh.nh_upper_bound))
445 list_for_each_entry_safe(sibling, next_sibling, &match->fib6_siblings,
449 nh_upper_bound = atomic_read(&sibling->fib6_nh.nh_upper_bound);
450 if (fl6->mp_hash > nh_upper_bound)
452 if (rt6_score_route(sibling, oif, strict) < 0)
462 * Route lookup. rcu_read_lock() should be held.
465 static inline struct fib6_info *rt6_device_match(struct net *net,
466 struct fib6_info *rt,
467 const struct in6_addr *saddr,
471 struct fib6_info *sprt;
473 if (!oif && ipv6_addr_any(saddr) &&
474 !(rt->fib6_nh.nh_flags & RTNH_F_DEAD))
477 for (sprt = rt; sprt; sprt = rcu_dereference(sprt->fib6_next)) {
478 const struct net_device *dev = sprt->fib6_nh.nh_dev;
480 if (sprt->fib6_nh.nh_flags & RTNH_F_DEAD)
484 if (dev->ifindex == oif)
487 if (ipv6_chk_addr(net, saddr, dev,
488 flags & RT6_LOOKUP_F_IFACE))
493 if (oif && flags & RT6_LOOKUP_F_IFACE)
494 return net->ipv6.fib6_null_entry;
496 return rt->fib6_nh.nh_flags & RTNH_F_DEAD ? net->ipv6.fib6_null_entry : rt;
499 #ifdef CONFIG_IPV6_ROUTER_PREF
500 struct __rt6_probe_work {
501 struct work_struct work;
502 struct in6_addr target;
503 struct net_device *dev;
506 static void rt6_probe_deferred(struct work_struct *w)
508 struct in6_addr mcaddr;
509 struct __rt6_probe_work *work =
510 container_of(w, struct __rt6_probe_work, work);
512 addrconf_addr_solict_mult(&work->target, &mcaddr);
513 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
518 static void rt6_probe(struct fib6_info *rt)
520 struct __rt6_probe_work *work;
521 const struct in6_addr *nh_gw;
522 struct neighbour *neigh;
523 struct net_device *dev;
526 * Okay, this does not seem to be appropriate
527 * for now, however, we need to check if it
528 * is really so; aka Router Reachability Probing.
530 * Router Reachability Probe MUST be rate-limited
531 * to no more than one per minute.
533 if (!rt || !(rt->fib6_flags & RTF_GATEWAY))
536 nh_gw = &rt->fib6_nh.nh_gw;
537 dev = rt->fib6_nh.nh_dev;
539 neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
541 struct inet6_dev *idev;
543 if (neigh->nud_state & NUD_VALID)
546 idev = __in6_dev_get(dev);
548 write_lock(&neigh->lock);
549 if (!(neigh->nud_state & NUD_VALID) &&
551 neigh->updated + idev->cnf.rtr_probe_interval)) {
552 work = kmalloc(sizeof(*work), GFP_ATOMIC);
554 __neigh_set_probe_once(neigh);
556 write_unlock(&neigh->lock);
558 work = kmalloc(sizeof(*work), GFP_ATOMIC);
562 INIT_WORK(&work->work, rt6_probe_deferred);
563 work->target = *nh_gw;
566 schedule_work(&work->work);
570 rcu_read_unlock_bh();
573 static inline void rt6_probe(struct fib6_info *rt)
579 * Default Router Selection (RFC 2461 6.3.6)
581 static inline int rt6_check_dev(struct fib6_info *rt, int oif)
583 const struct net_device *dev = rt->fib6_nh.nh_dev;
585 if (!oif || dev->ifindex == oif)
590 static inline enum rt6_nud_state rt6_check_neigh(struct fib6_info *rt)
592 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
593 struct neighbour *neigh;
595 if (rt->fib6_flags & RTF_NONEXTHOP ||
596 !(rt->fib6_flags & RTF_GATEWAY))
597 return RT6_NUD_SUCCEED;
600 neigh = __ipv6_neigh_lookup_noref(rt->fib6_nh.nh_dev,
603 read_lock(&neigh->lock);
604 if (neigh->nud_state & NUD_VALID)
605 ret = RT6_NUD_SUCCEED;
606 #ifdef CONFIG_IPV6_ROUTER_PREF
607 else if (!(neigh->nud_state & NUD_FAILED))
608 ret = RT6_NUD_SUCCEED;
610 ret = RT6_NUD_FAIL_PROBE;
612 read_unlock(&neigh->lock);
614 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
615 RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
617 rcu_read_unlock_bh();
622 static int rt6_score_route(struct fib6_info *rt, int oif, int strict)
626 m = rt6_check_dev(rt, oif);
627 if (!m && (strict & RT6_LOOKUP_F_IFACE))
628 return RT6_NUD_FAIL_HARD;
629 #ifdef CONFIG_IPV6_ROUTER_PREF
630 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->fib6_flags)) << 2;
632 if (strict & RT6_LOOKUP_F_REACHABLE) {
633 int n = rt6_check_neigh(rt);
640 /* called with rc_read_lock held */
641 static inline bool fib6_ignore_linkdown(const struct fib6_info *f6i)
643 const struct net_device *dev = fib6_info_nh_dev(f6i);
647 const struct inet6_dev *idev = __in6_dev_get(dev);
649 rc = !!idev->cnf.ignore_routes_with_linkdown;
655 static struct fib6_info *find_match(struct fib6_info *rt, int oif, int strict,
656 int *mpri, struct fib6_info *match,
660 bool match_do_rr = false;
662 if (rt->fib6_nh.nh_flags & RTNH_F_DEAD)
665 if (fib6_ignore_linkdown(rt) &&
666 rt->fib6_nh.nh_flags & RTNH_F_LINKDOWN &&
667 !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
670 if (fib6_check_expired(rt))
673 m = rt6_score_route(rt, oif, strict);
674 if (m == RT6_NUD_FAIL_DO_RR) {
676 m = 0; /* lowest valid score */
677 } else if (m == RT6_NUD_FAIL_HARD) {
681 if (strict & RT6_LOOKUP_F_REACHABLE)
684 /* note that m can be RT6_NUD_FAIL_PROBE at this point */
686 *do_rr = match_do_rr;
694 static struct fib6_info *find_rr_leaf(struct fib6_node *fn,
695 struct fib6_info *leaf,
696 struct fib6_info *rr_head,
697 u32 metric, int oif, int strict,
700 struct fib6_info *rt, *match, *cont;
705 for (rt = rr_head; rt; rt = rcu_dereference(rt->fib6_next)) {
706 if (rt->fib6_metric != metric) {
711 match = find_match(rt, oif, strict, &mpri, match, do_rr);
714 for (rt = leaf; rt && rt != rr_head;
715 rt = rcu_dereference(rt->fib6_next)) {
716 if (rt->fib6_metric != metric) {
721 match = find_match(rt, oif, strict, &mpri, match, do_rr);
727 for (rt = cont; rt; rt = rcu_dereference(rt->fib6_next))
728 match = find_match(rt, oif, strict, &mpri, match, do_rr);
733 static struct fib6_info *rt6_select(struct net *net, struct fib6_node *fn,
736 struct fib6_info *leaf = rcu_dereference(fn->leaf);
737 struct fib6_info *match, *rt0;
741 if (!leaf || leaf == net->ipv6.fib6_null_entry)
742 return net->ipv6.fib6_null_entry;
744 rt0 = rcu_dereference(fn->rr_ptr);
748 /* Double check to make sure fn is not an intermediate node
749 * and fn->leaf does not points to its child's leaf
750 * (This might happen if all routes under fn are deleted from
751 * the tree and fib6_repair_tree() is called on the node.)
753 key_plen = rt0->fib6_dst.plen;
754 #ifdef CONFIG_IPV6_SUBTREES
755 if (rt0->fib6_src.plen)
756 key_plen = rt0->fib6_src.plen;
758 if (fn->fn_bit != key_plen)
759 return net->ipv6.fib6_null_entry;
761 match = find_rr_leaf(fn, leaf, rt0, rt0->fib6_metric, oif, strict,
765 struct fib6_info *next = rcu_dereference(rt0->fib6_next);
767 /* no entries matched; do round-robin */
768 if (!next || next->fib6_metric != rt0->fib6_metric)
772 spin_lock_bh(&leaf->fib6_table->tb6_lock);
773 /* make sure next is not being deleted from the tree */
775 rcu_assign_pointer(fn->rr_ptr, next);
776 spin_unlock_bh(&leaf->fib6_table->tb6_lock);
780 return match ? match : net->ipv6.fib6_null_entry;
783 static bool rt6_is_gw_or_nonexthop(const struct fib6_info *rt)
785 return (rt->fib6_flags & (RTF_NONEXTHOP | RTF_GATEWAY));
788 #ifdef CONFIG_IPV6_ROUTE_INFO
789 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
790 const struct in6_addr *gwaddr)
792 struct net *net = dev_net(dev);
793 struct route_info *rinfo = (struct route_info *) opt;
794 struct in6_addr prefix_buf, *prefix;
796 unsigned long lifetime;
797 struct fib6_info *rt;
799 if (len < sizeof(struct route_info)) {
803 /* Sanity check for prefix_len and length */
804 if (rinfo->length > 3) {
806 } else if (rinfo->prefix_len > 128) {
808 } else if (rinfo->prefix_len > 64) {
809 if (rinfo->length < 2) {
812 } else if (rinfo->prefix_len > 0) {
813 if (rinfo->length < 1) {
818 pref = rinfo->route_pref;
819 if (pref == ICMPV6_ROUTER_PREF_INVALID)
822 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
824 if (rinfo->length == 3)
825 prefix = (struct in6_addr *)rinfo->prefix;
827 /* this function is safe */
828 ipv6_addr_prefix(&prefix_buf,
829 (struct in6_addr *)rinfo->prefix,
831 prefix = &prefix_buf;
834 if (rinfo->prefix_len == 0)
835 rt = rt6_get_dflt_router(net, gwaddr, dev);
837 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
840 if (rt && !lifetime) {
846 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
849 rt->fib6_flags = RTF_ROUTEINFO |
850 (rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
853 if (!addrconf_finite_timeout(lifetime))
854 fib6_clean_expires(rt);
856 fib6_set_expires(rt, jiffies + HZ * lifetime);
858 fib6_info_release(rt);
865 * Misc support functions
868 /* called with rcu_lock held */
869 static struct net_device *ip6_rt_get_dev_rcu(struct fib6_info *rt)
871 struct net_device *dev = rt->fib6_nh.nh_dev;
873 if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) {
874 /* for copies of local routes, dst->dev needs to be the
875 * device if it is a master device, the master device if
876 * device is enslaved, and the loopback as the default
878 if (netif_is_l3_slave(dev) &&
879 !rt6_need_strict(&rt->fib6_dst.addr))
880 dev = l3mdev_master_dev_rcu(dev);
881 else if (!netif_is_l3_master(dev))
882 dev = dev_net(dev)->loopback_dev;
883 /* last case is netif_is_l3_master(dev) is true in which
884 * case we want dev returned to be dev
891 static const int fib6_prop[RTN_MAX + 1] = {
898 [RTN_BLACKHOLE] = -EINVAL,
899 [RTN_UNREACHABLE] = -EHOSTUNREACH,
900 [RTN_PROHIBIT] = -EACCES,
901 [RTN_THROW] = -EAGAIN,
903 [RTN_XRESOLVE] = -EINVAL,
906 static int ip6_rt_type_to_error(u8 fib6_type)
908 return fib6_prop[fib6_type];
911 static unsigned short fib6_info_dst_flags(struct fib6_info *rt)
913 unsigned short flags = 0;
916 flags |= DST_NOCOUNT;
917 if (rt->dst_nopolicy)
918 flags |= DST_NOPOLICY;
925 static void ip6_rt_init_dst_reject(struct rt6_info *rt, struct fib6_info *ort)
927 rt->dst.error = ip6_rt_type_to_error(ort->fib6_type);
929 switch (ort->fib6_type) {
931 rt->dst.output = dst_discard_out;
932 rt->dst.input = dst_discard;
935 rt->dst.output = ip6_pkt_prohibit_out;
936 rt->dst.input = ip6_pkt_prohibit;
939 case RTN_UNREACHABLE:
941 rt->dst.output = ip6_pkt_discard_out;
942 rt->dst.input = ip6_pkt_discard;
947 static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort)
949 if (ort->fib6_flags & RTF_REJECT) {
950 ip6_rt_init_dst_reject(rt, ort);
955 rt->dst.output = ip6_output;
957 if (ort->fib6_type == RTN_LOCAL || ort->fib6_type == RTN_ANYCAST) {
958 rt->dst.input = ip6_input;
959 } else if (ipv6_addr_type(&ort->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
960 rt->dst.input = ip6_mc_input;
962 rt->dst.input = ip6_forward;
965 if (ort->fib6_nh.nh_lwtstate) {
966 rt->dst.lwtstate = lwtstate_get(ort->fib6_nh.nh_lwtstate);
967 lwtunnel_set_redirect(&rt->dst);
970 rt->dst.lastuse = jiffies;
973 /* Caller must already hold reference to @from */
974 static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
976 rt->rt6i_flags &= ~RTF_EXPIRES;
977 rcu_assign_pointer(rt->from, from);
978 ip_dst_init_metrics(&rt->dst, from->fib6_metrics);
981 /* Caller must already hold reference to @ort */
982 static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort)
984 struct net_device *dev = fib6_info_nh_dev(ort);
986 ip6_rt_init_dst(rt, ort);
988 rt->rt6i_dst = ort->fib6_dst;
989 rt->rt6i_idev = dev ? in6_dev_get(dev) : NULL;
990 rt->rt6i_gateway = ort->fib6_nh.nh_gw;
991 rt->rt6i_flags = ort->fib6_flags;
992 rt6_set_from(rt, ort);
993 #ifdef CONFIG_IPV6_SUBTREES
994 rt->rt6i_src = ort->fib6_src;
998 static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
999 struct in6_addr *saddr)
1001 struct fib6_node *pn, *sn;
1003 if (fn->fn_flags & RTN_TL_ROOT)
1005 pn = rcu_dereference(fn->parent);
1006 sn = FIB6_SUBTREE(pn);
1008 fn = fib6_node_lookup(sn, NULL, saddr);
1011 if (fn->fn_flags & RTN_RTINFO)
1016 static bool ip6_hold_safe(struct net *net, struct rt6_info **prt,
1019 struct rt6_info *rt = *prt;
1021 if (dst_hold_safe(&rt->dst))
1023 if (null_fallback) {
1024 rt = net->ipv6.ip6_null_entry;
1033 /* called with rcu_lock held */
1034 static struct rt6_info *ip6_create_rt_rcu(struct fib6_info *rt)
1036 unsigned short flags = fib6_info_dst_flags(rt);
1037 struct net_device *dev = rt->fib6_nh.nh_dev;
1038 struct rt6_info *nrt;
1040 if (!fib6_info_hold_safe(rt))
1043 nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
1045 ip6_rt_copy_init(nrt, rt);
1047 fib6_info_release(rt);
1052 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
1053 struct fib6_table *table,
1055 const struct sk_buff *skb,
1058 struct fib6_info *f6i;
1059 struct fib6_node *fn;
1060 struct rt6_info *rt;
1062 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
1063 flags &= ~RT6_LOOKUP_F_IFACE;
1066 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1068 f6i = rcu_dereference(fn->leaf);
1070 f6i = net->ipv6.fib6_null_entry;
1072 f6i = rt6_device_match(net, f6i, &fl6->saddr,
1073 fl6->flowi6_oif, flags);
1074 if (f6i->fib6_nsiblings && fl6->flowi6_oif == 0)
1075 f6i = fib6_multipath_select(net, f6i, fl6,
1076 fl6->flowi6_oif, skb,
1079 if (f6i == net->ipv6.fib6_null_entry) {
1080 fn = fib6_backtrack(fn, &fl6->saddr);
1085 trace_fib6_table_lookup(net, f6i, table, fl6);
1087 /* Search through exception table */
1088 rt = rt6_find_cached_rt(f6i, &fl6->daddr, &fl6->saddr);
1090 if (ip6_hold_safe(net, &rt, true))
1091 dst_use_noref(&rt->dst, jiffies);
1092 } else if (f6i == net->ipv6.fib6_null_entry) {
1093 rt = net->ipv6.ip6_null_entry;
1096 rt = ip6_create_rt_rcu(f6i);
1098 rt = net->ipv6.ip6_null_entry;
1108 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
1109 const struct sk_buff *skb, int flags)
1111 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_lookup);
1113 EXPORT_SYMBOL_GPL(ip6_route_lookup);
1115 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
1116 const struct in6_addr *saddr, int oif,
1117 const struct sk_buff *skb, int strict)
1119 struct flowi6 fl6 = {
1123 struct dst_entry *dst;
1124 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
1127 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
1128 flags |= RT6_LOOKUP_F_HAS_SADDR;
1131 dst = fib6_rule_lookup(net, &fl6, skb, flags, ip6_pol_route_lookup);
1132 if (dst->error == 0)
1133 return (struct rt6_info *) dst;
1139 EXPORT_SYMBOL(rt6_lookup);
1141 /* ip6_ins_rt is called with FREE table->tb6_lock.
1142 * It takes new route entry, the addition fails by any reason the
1143 * route is released.
1144 * Caller must hold dst before calling it.
1147 static int __ip6_ins_rt(struct fib6_info *rt, struct nl_info *info,
1148 struct netlink_ext_ack *extack)
1151 struct fib6_table *table;
1153 table = rt->fib6_table;
1154 spin_lock_bh(&table->tb6_lock);
1155 err = fib6_add(&table->tb6_root, rt, info, extack);
1156 spin_unlock_bh(&table->tb6_lock);
1161 int ip6_ins_rt(struct net *net, struct fib6_info *rt)
1163 struct nl_info info = { .nl_net = net, };
1165 return __ip6_ins_rt(rt, &info, NULL);
1168 static struct rt6_info *ip6_rt_cache_alloc(struct fib6_info *ort,
1169 const struct in6_addr *daddr,
1170 const struct in6_addr *saddr)
1172 struct net_device *dev;
1173 struct rt6_info *rt;
1179 if (!fib6_info_hold_safe(ort))
1182 dev = ip6_rt_get_dev_rcu(ort);
1183 rt = ip6_dst_alloc(dev_net(dev), dev, 0);
1185 fib6_info_release(ort);
1189 ip6_rt_copy_init(rt, ort);
1190 rt->rt6i_flags |= RTF_CACHE;
1191 rt->dst.flags |= DST_HOST;
1192 rt->rt6i_dst.addr = *daddr;
1193 rt->rt6i_dst.plen = 128;
1195 if (!rt6_is_gw_or_nonexthop(ort)) {
1196 if (ort->fib6_dst.plen != 128 &&
1197 ipv6_addr_equal(&ort->fib6_dst.addr, daddr))
1198 rt->rt6i_flags |= RTF_ANYCAST;
1199 #ifdef CONFIG_IPV6_SUBTREES
1200 if (rt->rt6i_src.plen && saddr) {
1201 rt->rt6i_src.addr = *saddr;
1202 rt->rt6i_src.plen = 128;
1210 static struct rt6_info *ip6_rt_pcpu_alloc(struct fib6_info *rt)
1212 unsigned short flags = fib6_info_dst_flags(rt);
1213 struct net_device *dev;
1214 struct rt6_info *pcpu_rt;
1216 if (!fib6_info_hold_safe(rt))
1220 dev = ip6_rt_get_dev_rcu(rt);
1221 pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags);
1224 fib6_info_release(rt);
1227 ip6_rt_copy_init(pcpu_rt, rt);
1228 pcpu_rt->rt6i_flags |= RTF_PCPU;
1232 /* It should be called with rcu_read_lock() acquired */
1233 static struct rt6_info *rt6_get_pcpu_route(struct fib6_info *rt)
1235 struct rt6_info *pcpu_rt, **p;
1237 p = this_cpu_ptr(rt->rt6i_pcpu);
1241 ip6_hold_safe(NULL, &pcpu_rt, false);
1246 static struct rt6_info *rt6_make_pcpu_route(struct net *net,
1247 struct fib6_info *rt)
1249 struct rt6_info *pcpu_rt, *prev, **p;
1251 pcpu_rt = ip6_rt_pcpu_alloc(rt);
1253 dst_hold(&net->ipv6.ip6_null_entry->dst);
1254 return net->ipv6.ip6_null_entry;
1257 dst_hold(&pcpu_rt->dst);
1258 p = this_cpu_ptr(rt->rt6i_pcpu);
1259 prev = cmpxchg(p, NULL, pcpu_rt);
1265 /* exception hash table implementation
1267 static DEFINE_SPINLOCK(rt6_exception_lock);
1269 /* Remove rt6_ex from hash table and free the memory
1270 * Caller must hold rt6_exception_lock
1272 static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
1273 struct rt6_exception *rt6_ex)
1277 if (!bucket || !rt6_ex)
1280 net = dev_net(rt6_ex->rt6i->dst.dev);
1281 hlist_del_rcu(&rt6_ex->hlist);
1282 dst_release(&rt6_ex->rt6i->dst);
1283 kfree_rcu(rt6_ex, rcu);
1284 WARN_ON_ONCE(!bucket->depth);
1286 net->ipv6.rt6_stats->fib_rt_cache--;
1289 /* Remove oldest rt6_ex in bucket and free the memory
1290 * Caller must hold rt6_exception_lock
1292 static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
1294 struct rt6_exception *rt6_ex, *oldest = NULL;
1299 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1300 if (!oldest || time_before(rt6_ex->stamp, oldest->stamp))
1303 rt6_remove_exception(bucket, oldest);
1306 static u32 rt6_exception_hash(const struct in6_addr *dst,
1307 const struct in6_addr *src)
1309 static u32 seed __read_mostly;
1312 net_get_random_once(&seed, sizeof(seed));
1313 val = jhash(dst, sizeof(*dst), seed);
1315 #ifdef CONFIG_IPV6_SUBTREES
1317 val = jhash(src, sizeof(*src), val);
1319 return hash_32(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
1322 /* Helper function to find the cached rt in the hash table
1323 * and update bucket pointer to point to the bucket for this
1324 * (daddr, saddr) pair
1325 * Caller must hold rt6_exception_lock
1327 static struct rt6_exception *
1328 __rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket,
1329 const struct in6_addr *daddr,
1330 const struct in6_addr *saddr)
1332 struct rt6_exception *rt6_ex;
1335 if (!(*bucket) || !daddr)
1338 hval = rt6_exception_hash(daddr, saddr);
1341 hlist_for_each_entry(rt6_ex, &(*bucket)->chain, hlist) {
1342 struct rt6_info *rt6 = rt6_ex->rt6i;
1343 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1345 #ifdef CONFIG_IPV6_SUBTREES
1346 if (matched && saddr)
1347 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1355 /* Helper function to find the cached rt in the hash table
1356 * and update bucket pointer to point to the bucket for this
1357 * (daddr, saddr) pair
1358 * Caller must hold rcu_read_lock()
1360 static struct rt6_exception *
1361 __rt6_find_exception_rcu(struct rt6_exception_bucket **bucket,
1362 const struct in6_addr *daddr,
1363 const struct in6_addr *saddr)
1365 struct rt6_exception *rt6_ex;
1368 WARN_ON_ONCE(!rcu_read_lock_held());
1370 if (!(*bucket) || !daddr)
1373 hval = rt6_exception_hash(daddr, saddr);
1376 hlist_for_each_entry_rcu(rt6_ex, &(*bucket)->chain, hlist) {
1377 struct rt6_info *rt6 = rt6_ex->rt6i;
1378 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1380 #ifdef CONFIG_IPV6_SUBTREES
1381 if (matched && saddr)
1382 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1390 static unsigned int fib6_mtu(const struct fib6_info *rt)
1394 if (rt->fib6_pmtu) {
1395 mtu = rt->fib6_pmtu;
1397 struct net_device *dev = fib6_info_nh_dev(rt);
1398 struct inet6_dev *idev;
1401 idev = __in6_dev_get(dev);
1402 mtu = idev->cnf.mtu6;
1406 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
1408 return mtu - lwtunnel_headroom(rt->fib6_nh.nh_lwtstate, mtu);
1411 static int rt6_insert_exception(struct rt6_info *nrt,
1412 struct fib6_info *ort)
1414 struct net *net = dev_net(nrt->dst.dev);
1415 struct rt6_exception_bucket *bucket;
1416 struct in6_addr *src_key = NULL;
1417 struct rt6_exception *rt6_ex;
1420 spin_lock_bh(&rt6_exception_lock);
1422 if (ort->exception_bucket_flushed) {
1427 bucket = rcu_dereference_protected(ort->rt6i_exception_bucket,
1428 lockdep_is_held(&rt6_exception_lock));
1430 bucket = kcalloc(FIB6_EXCEPTION_BUCKET_SIZE, sizeof(*bucket),
1436 rcu_assign_pointer(ort->rt6i_exception_bucket, bucket);
1439 #ifdef CONFIG_IPV6_SUBTREES
1440 /* rt6i_src.plen != 0 indicates ort is in subtree
1441 * and exception table is indexed by a hash of
1442 * both rt6i_dst and rt6i_src.
1443 * Otherwise, the exception table is indexed by
1444 * a hash of only rt6i_dst.
1446 if (ort->fib6_src.plen)
1447 src_key = &nrt->rt6i_src.addr;
1449 /* rt6_mtu_change() might lower mtu on ort.
1450 * Only insert this exception route if its mtu
1451 * is less than ort's mtu value.
1453 if (dst_metric_raw(&nrt->dst, RTAX_MTU) >= fib6_mtu(ort)) {
1458 rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr,
1461 rt6_remove_exception(bucket, rt6_ex);
1463 rt6_ex = kzalloc(sizeof(*rt6_ex), GFP_ATOMIC);
1469 rt6_ex->stamp = jiffies;
1470 hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain);
1472 net->ipv6.rt6_stats->fib_rt_cache++;
1474 if (bucket->depth > FIB6_MAX_DEPTH)
1475 rt6_exception_remove_oldest(bucket);
1478 spin_unlock_bh(&rt6_exception_lock);
1480 /* Update fn->fn_sernum to invalidate all cached dst */
1482 spin_lock_bh(&ort->fib6_table->tb6_lock);
1483 fib6_update_sernum(net, ort);
1484 spin_unlock_bh(&ort->fib6_table->tb6_lock);
1485 fib6_force_start_gc(net);
1491 void rt6_flush_exceptions(struct fib6_info *rt)
1493 struct rt6_exception_bucket *bucket;
1494 struct rt6_exception *rt6_ex;
1495 struct hlist_node *tmp;
1498 spin_lock_bh(&rt6_exception_lock);
1499 /* Prevent rt6_insert_exception() to recreate the bucket list */
1500 rt->exception_bucket_flushed = 1;
1502 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1503 lockdep_is_held(&rt6_exception_lock));
1507 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1508 hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist)
1509 rt6_remove_exception(bucket, rt6_ex);
1510 WARN_ON_ONCE(bucket->depth);
1515 spin_unlock_bh(&rt6_exception_lock);
1518 /* Find cached rt in the hash table inside passed in rt
1519 * Caller has to hold rcu_read_lock()
1521 static struct rt6_info *rt6_find_cached_rt(struct fib6_info *rt,
1522 struct in6_addr *daddr,
1523 struct in6_addr *saddr)
1525 struct rt6_exception_bucket *bucket;
1526 struct in6_addr *src_key = NULL;
1527 struct rt6_exception *rt6_ex;
1528 struct rt6_info *res = NULL;
1530 bucket = rcu_dereference(rt->rt6i_exception_bucket);
1532 #ifdef CONFIG_IPV6_SUBTREES
1533 /* rt6i_src.plen != 0 indicates rt is in subtree
1534 * and exception table is indexed by a hash of
1535 * both rt6i_dst and rt6i_src.
1536 * Otherwise, the exception table is indexed by
1537 * a hash of only rt6i_dst.
1539 if (rt->fib6_src.plen)
1542 rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
1544 if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
1550 /* Remove the passed in cached rt from the hash table that contains it */
1551 static int rt6_remove_exception_rt(struct rt6_info *rt)
1553 struct rt6_exception_bucket *bucket;
1554 struct in6_addr *src_key = NULL;
1555 struct rt6_exception *rt6_ex;
1556 struct fib6_info *from;
1559 from = rcu_dereference(rt->from);
1561 !(rt->rt6i_flags & RTF_CACHE))
1564 if (!rcu_access_pointer(from->rt6i_exception_bucket))
1567 spin_lock_bh(&rt6_exception_lock);
1568 bucket = rcu_dereference_protected(from->rt6i_exception_bucket,
1569 lockdep_is_held(&rt6_exception_lock));
1570 #ifdef CONFIG_IPV6_SUBTREES
1571 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1572 * and exception table is indexed by a hash of
1573 * both rt6i_dst and rt6i_src.
1574 * Otherwise, the exception table is indexed by
1575 * a hash of only rt6i_dst.
1577 if (from->fib6_src.plen)
1578 src_key = &rt->rt6i_src.addr;
1580 rt6_ex = __rt6_find_exception_spinlock(&bucket,
1584 rt6_remove_exception(bucket, rt6_ex);
1590 spin_unlock_bh(&rt6_exception_lock);
1594 /* Find rt6_ex which contains the passed in rt cache and
1597 static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1599 struct rt6_exception_bucket *bucket;
1600 struct fib6_info *from = rt->from;
1601 struct in6_addr *src_key = NULL;
1602 struct rt6_exception *rt6_ex;
1605 !(rt->rt6i_flags & RTF_CACHE))
1609 bucket = rcu_dereference(from->rt6i_exception_bucket);
1611 #ifdef CONFIG_IPV6_SUBTREES
1612 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1613 * and exception table is indexed by a hash of
1614 * both rt6i_dst and rt6i_src.
1615 * Otherwise, the exception table is indexed by
1616 * a hash of only rt6i_dst.
1618 if (from->fib6_src.plen)
1619 src_key = &rt->rt6i_src.addr;
1621 rt6_ex = __rt6_find_exception_rcu(&bucket,
1625 rt6_ex->stamp = jiffies;
1630 static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
1631 struct rt6_info *rt, int mtu)
1633 /* If the new MTU is lower than the route PMTU, this new MTU will be the
1634 * lowest MTU in the path: always allow updating the route PMTU to
1635 * reflect PMTU decreases.
1637 * If the new MTU is higher, and the route PMTU is equal to the local
1638 * MTU, this means the old MTU is the lowest in the path, so allow
1639 * updating it: if other nodes now have lower MTUs, PMTU discovery will
1643 if (dst_mtu(&rt->dst) >= mtu)
1646 if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
1652 static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
1653 struct fib6_info *rt, int mtu)
1655 struct rt6_exception_bucket *bucket;
1656 struct rt6_exception *rt6_ex;
1659 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1660 lockdep_is_held(&rt6_exception_lock));
1665 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1666 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1667 struct rt6_info *entry = rt6_ex->rt6i;
1669 /* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected
1670 * route), the metrics of its rt->from have already
1673 if (dst_metric_raw(&entry->dst, RTAX_MTU) &&
1674 rt6_mtu_change_route_allowed(idev, entry, mtu))
1675 dst_metric_set(&entry->dst, RTAX_MTU, mtu);
1681 #define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
1683 static void rt6_exceptions_clean_tohost(struct fib6_info *rt,
1684 struct in6_addr *gateway)
1686 struct rt6_exception_bucket *bucket;
1687 struct rt6_exception *rt6_ex;
1688 struct hlist_node *tmp;
1691 if (!rcu_access_pointer(rt->rt6i_exception_bucket))
1694 spin_lock_bh(&rt6_exception_lock);
1695 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1696 lockdep_is_held(&rt6_exception_lock));
1699 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1700 hlist_for_each_entry_safe(rt6_ex, tmp,
1701 &bucket->chain, hlist) {
1702 struct rt6_info *entry = rt6_ex->rt6i;
1704 if ((entry->rt6i_flags & RTF_CACHE_GATEWAY) ==
1705 RTF_CACHE_GATEWAY &&
1706 ipv6_addr_equal(gateway,
1707 &entry->rt6i_gateway)) {
1708 rt6_remove_exception(bucket, rt6_ex);
1715 spin_unlock_bh(&rt6_exception_lock);
1718 static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
1719 struct rt6_exception *rt6_ex,
1720 struct fib6_gc_args *gc_args,
1723 struct rt6_info *rt = rt6_ex->rt6i;
1725 /* we are pruning and obsoleting aged-out and non gateway exceptions
1726 * even if others have still references to them, so that on next
1727 * dst_check() such references can be dropped.
1728 * EXPIRES exceptions - e.g. pmtu-generated ones are pruned when
1729 * expired, independently from their aging, as per RFC 8201 section 4
1731 if (!(rt->rt6i_flags & RTF_EXPIRES)) {
1732 if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
1733 RT6_TRACE("aging clone %p\n", rt);
1734 rt6_remove_exception(bucket, rt6_ex);
1737 } else if (time_after(jiffies, rt->dst.expires)) {
1738 RT6_TRACE("purging expired route %p\n", rt);
1739 rt6_remove_exception(bucket, rt6_ex);
1743 if (rt->rt6i_flags & RTF_GATEWAY) {
1744 struct neighbour *neigh;
1745 __u8 neigh_flags = 0;
1747 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
1749 neigh_flags = neigh->flags;
1751 if (!(neigh_flags & NTF_ROUTER)) {
1752 RT6_TRACE("purging route %p via non-router but gateway\n",
1754 rt6_remove_exception(bucket, rt6_ex);
1762 void rt6_age_exceptions(struct fib6_info *rt,
1763 struct fib6_gc_args *gc_args,
1766 struct rt6_exception_bucket *bucket;
1767 struct rt6_exception *rt6_ex;
1768 struct hlist_node *tmp;
1771 if (!rcu_access_pointer(rt->rt6i_exception_bucket))
1775 spin_lock(&rt6_exception_lock);
1776 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1777 lockdep_is_held(&rt6_exception_lock));
1780 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1781 hlist_for_each_entry_safe(rt6_ex, tmp,
1782 &bucket->chain, hlist) {
1783 rt6_age_examine_exception(bucket, rt6_ex,
1789 spin_unlock(&rt6_exception_lock);
1790 rcu_read_unlock_bh();
1793 /* must be called with rcu lock held */
1794 struct fib6_info *fib6_table_lookup(struct net *net, struct fib6_table *table,
1795 int oif, struct flowi6 *fl6, int strict)
1797 struct fib6_node *fn, *saved_fn;
1798 struct fib6_info *f6i;
1800 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1803 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
1807 f6i = rt6_select(net, fn, oif, strict);
1808 if (f6i == net->ipv6.fib6_null_entry) {
1809 fn = fib6_backtrack(fn, &fl6->saddr);
1811 goto redo_rt6_select;
1812 else if (strict & RT6_LOOKUP_F_REACHABLE) {
1813 /* also consider unreachable route */
1814 strict &= ~RT6_LOOKUP_F_REACHABLE;
1816 goto redo_rt6_select;
1820 trace_fib6_table_lookup(net, f6i, table, fl6);
1825 struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
1826 int oif, struct flowi6 *fl6,
1827 const struct sk_buff *skb, int flags)
1829 struct fib6_info *f6i;
1830 struct rt6_info *rt;
1833 strict |= flags & RT6_LOOKUP_F_IFACE;
1834 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
1835 if (net->ipv6.devconf_all->forwarding == 0)
1836 strict |= RT6_LOOKUP_F_REACHABLE;
1840 f6i = fib6_table_lookup(net, table, oif, fl6, strict);
1841 if (f6i->fib6_nsiblings)
1842 f6i = fib6_multipath_select(net, f6i, fl6, oif, skb, strict);
1844 if (f6i == net->ipv6.fib6_null_entry) {
1845 rt = net->ipv6.ip6_null_entry;
1851 /*Search through exception table */
1852 rt = rt6_find_cached_rt(f6i, &fl6->daddr, &fl6->saddr);
1854 if (ip6_hold_safe(net, &rt, true))
1855 dst_use_noref(&rt->dst, jiffies);
1859 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
1860 !(f6i->fib6_flags & RTF_GATEWAY))) {
1861 /* Create a RTF_CACHE clone which will not be
1862 * owned by the fib6 tree. It is for the special case where
1863 * the daddr in the skb during the neighbor look-up is different
1864 * from the fl6->daddr used to look-up route here.
1866 struct rt6_info *uncached_rt;
1868 uncached_rt = ip6_rt_cache_alloc(f6i, &fl6->daddr, NULL);
1873 /* Uncached_rt's refcnt is taken during ip6_rt_cache_alloc()
1874 * No need for another dst_hold()
1876 rt6_uncached_list_add(uncached_rt);
1877 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
1879 uncached_rt = net->ipv6.ip6_null_entry;
1880 dst_hold(&uncached_rt->dst);
1885 /* Get a percpu copy */
1887 struct rt6_info *pcpu_rt;
1890 pcpu_rt = rt6_get_pcpu_route(f6i);
1893 pcpu_rt = rt6_make_pcpu_route(net, f6i);
1901 EXPORT_SYMBOL_GPL(ip6_pol_route);
1903 static struct rt6_info *ip6_pol_route_input(struct net *net,
1904 struct fib6_table *table,
1906 const struct sk_buff *skb,
1909 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, skb, flags);
1912 struct dst_entry *ip6_route_input_lookup(struct net *net,
1913 struct net_device *dev,
1915 const struct sk_buff *skb,
1918 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
1919 flags |= RT6_LOOKUP_F_IFACE;
1921 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_input);
1923 EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
1925 static void ip6_multipath_l3_keys(const struct sk_buff *skb,
1926 struct flow_keys *keys,
1927 struct flow_keys *flkeys)
1929 const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
1930 const struct ipv6hdr *key_iph = outer_iph;
1931 struct flow_keys *_flkeys = flkeys;
1932 const struct ipv6hdr *inner_iph;
1933 const struct icmp6hdr *icmph;
1934 struct ipv6hdr _inner_iph;
1935 struct icmp6hdr _icmph;
1937 if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
1940 icmph = skb_header_pointer(skb, skb_transport_offset(skb),
1941 sizeof(_icmph), &_icmph);
1945 if (icmph->icmp6_type != ICMPV6_DEST_UNREACH &&
1946 icmph->icmp6_type != ICMPV6_PKT_TOOBIG &&
1947 icmph->icmp6_type != ICMPV6_TIME_EXCEED &&
1948 icmph->icmp6_type != ICMPV6_PARAMPROB)
1951 inner_iph = skb_header_pointer(skb,
1952 skb_transport_offset(skb) + sizeof(*icmph),
1953 sizeof(_inner_iph), &_inner_iph);
1957 key_iph = inner_iph;
1961 keys->addrs.v6addrs.src = _flkeys->addrs.v6addrs.src;
1962 keys->addrs.v6addrs.dst = _flkeys->addrs.v6addrs.dst;
1963 keys->tags.flow_label = _flkeys->tags.flow_label;
1964 keys->basic.ip_proto = _flkeys->basic.ip_proto;
1966 keys->addrs.v6addrs.src = key_iph->saddr;
1967 keys->addrs.v6addrs.dst = key_iph->daddr;
1968 keys->tags.flow_label = ip6_flowlabel(key_iph);
1969 keys->basic.ip_proto = key_iph->nexthdr;
1973 /* if skb is set it will be used and fl6 can be NULL */
1974 u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
1975 const struct sk_buff *skb, struct flow_keys *flkeys)
1977 struct flow_keys hash_keys;
1980 switch (ip6_multipath_hash_policy(net)) {
1982 memset(&hash_keys, 0, sizeof(hash_keys));
1983 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1985 ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
1987 hash_keys.addrs.v6addrs.src = fl6->saddr;
1988 hash_keys.addrs.v6addrs.dst = fl6->daddr;
1989 hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
1990 hash_keys.basic.ip_proto = fl6->flowi6_proto;
1995 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
1996 struct flow_keys keys;
1998 /* short-circuit if we already have L4 hash present */
2000 return skb_get_hash_raw(skb) >> 1;
2002 memset(&hash_keys, 0, sizeof(hash_keys));
2005 skb_flow_dissect_flow_keys(skb, &keys, flag);
2008 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2009 hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
2010 hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
2011 hash_keys.ports.src = flkeys->ports.src;
2012 hash_keys.ports.dst = flkeys->ports.dst;
2013 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2015 memset(&hash_keys, 0, sizeof(hash_keys));
2016 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2017 hash_keys.addrs.v6addrs.src = fl6->saddr;
2018 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2019 hash_keys.ports.src = fl6->fl6_sport;
2020 hash_keys.ports.dst = fl6->fl6_dport;
2021 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2025 mhash = flow_hash_from_keys(&hash_keys);
2030 void ip6_route_input(struct sk_buff *skb)
2032 const struct ipv6hdr *iph = ipv6_hdr(skb);
2033 struct net *net = dev_net(skb->dev);
2034 int flags = RT6_LOOKUP_F_HAS_SADDR;
2035 struct ip_tunnel_info *tun_info;
2036 struct flowi6 fl6 = {
2037 .flowi6_iif = skb->dev->ifindex,
2038 .daddr = iph->daddr,
2039 .saddr = iph->saddr,
2040 .flowlabel = ip6_flowinfo(iph),
2041 .flowi6_mark = skb->mark,
2042 .flowi6_proto = iph->nexthdr,
2044 struct flow_keys *flkeys = NULL, _flkeys;
2046 tun_info = skb_tunnel_info(skb);
2047 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
2048 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
2050 if (fib6_rules_early_flow_dissect(net, skb, &fl6, &_flkeys))
2053 if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6))
2054 fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, flkeys);
2057 ip6_route_input_lookup(net, skb->dev, &fl6, skb, flags));
2060 static struct rt6_info *ip6_pol_route_output(struct net *net,
2061 struct fib6_table *table,
2063 const struct sk_buff *skb,
2066 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, skb, flags);
2069 struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
2070 struct flowi6 *fl6, int flags)
2074 if (ipv6_addr_type(&fl6->daddr) &
2075 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)) {
2076 struct dst_entry *dst;
2078 dst = l3mdev_link_scope_lookup(net, fl6);
2083 fl6->flowi6_iif = LOOPBACK_IFINDEX;
2085 any_src = ipv6_addr_any(&fl6->saddr);
2086 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
2087 (fl6->flowi6_oif && any_src))
2088 flags |= RT6_LOOKUP_F_IFACE;
2091 flags |= RT6_LOOKUP_F_HAS_SADDR;
2093 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
2095 return fib6_rule_lookup(net, fl6, NULL, flags, ip6_pol_route_output);
2097 EXPORT_SYMBOL_GPL(ip6_route_output_flags);
2099 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2101 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
2102 struct net_device *loopback_dev = net->loopback_dev;
2103 struct dst_entry *new = NULL;
2105 rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
2106 DST_OBSOLETE_DEAD, 0);
2109 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
2113 new->input = dst_discard;
2114 new->output = dst_discard_out;
2116 dst_copy_metrics(new, &ort->dst);
2118 rt->rt6i_idev = in6_dev_get(loopback_dev);
2119 rt->rt6i_gateway = ort->rt6i_gateway;
2120 rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
2122 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
2123 #ifdef CONFIG_IPV6_SUBTREES
2124 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
2128 dst_release(dst_orig);
2129 return new ? new : ERR_PTR(-ENOMEM);
2133 * Destination cache support functions
2136 static bool fib6_check(struct fib6_info *f6i, u32 cookie)
2140 if (!fib6_get_cookie_safe(f6i, &rt_cookie) || rt_cookie != cookie)
2143 if (fib6_check_expired(f6i))
2149 static struct dst_entry *rt6_check(struct rt6_info *rt,
2150 struct fib6_info *from,
2155 if ((from && !fib6_get_cookie_safe(from, &rt_cookie)) ||
2156 rt_cookie != cookie)
2159 if (rt6_check_expired(rt))
2165 static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt,
2166 struct fib6_info *from,
2169 if (!__rt6_check_expired(rt) &&
2170 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
2171 fib6_check(from, cookie))
2177 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
2179 struct dst_entry *dst_ret;
2180 struct fib6_info *from;
2181 struct rt6_info *rt;
2183 rt = container_of(dst, struct rt6_info, dst);
2187 /* All IPV6 dsts are created with ->obsolete set to the value
2188 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
2189 * into this function always.
2192 from = rcu_dereference(rt->from);
2194 if (from && (rt->rt6i_flags & RTF_PCPU ||
2195 unlikely(!list_empty(&rt->rt6i_uncached))))
2196 dst_ret = rt6_dst_from_check(rt, from, cookie);
2198 dst_ret = rt6_check(rt, from, cookie);
2205 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
2207 struct rt6_info *rt = (struct rt6_info *) dst;
2210 if (rt->rt6i_flags & RTF_CACHE) {
2212 if (rt6_check_expired(rt)) {
2213 rt6_remove_exception_rt(rt);
2225 static void ip6_link_failure(struct sk_buff *skb)
2227 struct rt6_info *rt;
2229 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
2231 rt = (struct rt6_info *) skb_dst(skb);
2234 if (rt->rt6i_flags & RTF_CACHE) {
2235 if (dst_hold_safe(&rt->dst))
2236 rt6_remove_exception_rt(rt);
2238 struct fib6_info *from;
2239 struct fib6_node *fn;
2241 from = rcu_dereference(rt->from);
2243 fn = rcu_dereference(from->fib6_node);
2244 if (fn && (rt->rt6i_flags & RTF_DEFAULT))
2252 static void rt6_update_expires(struct rt6_info *rt0, int timeout)
2254 if (!(rt0->rt6i_flags & RTF_EXPIRES)) {
2255 struct fib6_info *from;
2258 from = rcu_dereference(rt0->from);
2260 rt0->dst.expires = from->expires;
2264 dst_set_expires(&rt0->dst, timeout);
2265 rt0->rt6i_flags |= RTF_EXPIRES;
2268 static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
2270 struct net *net = dev_net(rt->dst.dev);
2272 dst_metric_set(&rt->dst, RTAX_MTU, mtu);
2273 rt->rt6i_flags |= RTF_MODIFIED;
2274 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
2277 static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
2282 from_set = !!rcu_dereference(rt->from);
2285 return !(rt->rt6i_flags & RTF_CACHE) &&
2286 (rt->rt6i_flags & RTF_PCPU || from_set);
2289 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
2290 const struct ipv6hdr *iph, u32 mtu)
2292 const struct in6_addr *daddr, *saddr;
2293 struct rt6_info *rt6 = (struct rt6_info *)dst;
2295 if (dst_metric_locked(dst, RTAX_MTU))
2299 daddr = &iph->daddr;
2300 saddr = &iph->saddr;
2302 daddr = &sk->sk_v6_daddr;
2303 saddr = &inet6_sk(sk)->saddr;
2308 dst_confirm_neigh(dst, daddr);
2309 mtu = max_t(u32, mtu, IPV6_MIN_MTU);
2310 if (mtu >= dst_mtu(dst))
2313 if (!rt6_cache_allowed_for_pmtu(rt6)) {
2314 rt6_do_update_pmtu(rt6, mtu);
2315 /* update rt6_ex->stamp for cache */
2316 if (rt6->rt6i_flags & RTF_CACHE)
2317 rt6_update_exception_stamp_rt(rt6);
2319 struct fib6_info *from;
2320 struct rt6_info *nrt6;
2323 from = rcu_dereference(rt6->from);
2324 nrt6 = ip6_rt_cache_alloc(from, daddr, saddr);
2326 rt6_do_update_pmtu(nrt6, mtu);
2327 if (rt6_insert_exception(nrt6, from))
2328 dst_release_immediate(&nrt6->dst);
2334 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
2335 struct sk_buff *skb, u32 mtu)
2337 __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
2340 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
2341 int oif, u32 mark, kuid_t uid)
2343 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2344 struct dst_entry *dst;
2345 struct flowi6 fl6 = {
2347 .flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark),
2348 .daddr = iph->daddr,
2349 .saddr = iph->saddr,
2350 .flowlabel = ip6_flowinfo(iph),
2354 dst = ip6_route_output(net, NULL, &fl6);
2356 __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
2359 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
2361 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
2363 struct dst_entry *dst;
2365 ip6_update_pmtu(skb, sock_net(sk), mtu,
2366 sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid);
2368 dst = __sk_dst_get(sk);
2369 if (!dst || !dst->obsolete ||
2370 dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
2374 if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
2375 ip6_datagram_dst_update(sk, false);
2378 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
2380 void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
2381 const struct flowi6 *fl6)
2383 #ifdef CONFIG_IPV6_SUBTREES
2384 struct ipv6_pinfo *np = inet6_sk(sk);
2387 ip6_dst_store(sk, dst,
2388 ipv6_addr_equal(&fl6->daddr, &sk->sk_v6_daddr) ?
2389 &sk->sk_v6_daddr : NULL,
2390 #ifdef CONFIG_IPV6_SUBTREES
2391 ipv6_addr_equal(&fl6->saddr, &np->saddr) ?
2397 /* Handle redirects */
2398 struct ip6rd_flowi {
2400 struct in6_addr gateway;
2403 static struct rt6_info *__ip6_route_redirect(struct net *net,
2404 struct fib6_table *table,
2406 const struct sk_buff *skb,
2409 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
2410 struct rt6_info *ret = NULL, *rt_cache;
2411 struct fib6_info *rt;
2412 struct fib6_node *fn;
2414 /* Get the "current" route for this destination and
2415 * check if the redirect has come from appropriate router.
2417 * RFC 4861 specifies that redirects should only be
2418 * accepted if they come from the nexthop to the target.
2419 * Due to the way the routes are chosen, this notion
2420 * is a bit fuzzy and one might need to check all possible
2425 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2427 for_each_fib6_node_rt_rcu(fn) {
2428 if (rt->fib6_nh.nh_flags & RTNH_F_DEAD)
2430 if (fib6_check_expired(rt))
2432 if (rt->fib6_flags & RTF_REJECT)
2434 if (!(rt->fib6_flags & RTF_GATEWAY))
2436 if (fl6->flowi6_oif != rt->fib6_nh.nh_dev->ifindex)
2438 /* rt_cache's gateway might be different from its 'parent'
2439 * in the case of an ip redirect.
2440 * So we keep searching in the exception table if the gateway
2443 if (!ipv6_addr_equal(&rdfl->gateway, &rt->fib6_nh.nh_gw)) {
2444 rt_cache = rt6_find_cached_rt(rt,
2448 ipv6_addr_equal(&rdfl->gateway,
2449 &rt_cache->rt6i_gateway)) {
2459 rt = net->ipv6.fib6_null_entry;
2460 else if (rt->fib6_flags & RTF_REJECT) {
2461 ret = net->ipv6.ip6_null_entry;
2465 if (rt == net->ipv6.fib6_null_entry) {
2466 fn = fib6_backtrack(fn, &fl6->saddr);
2473 ip6_hold_safe(net, &ret, true);
2475 ret = ip6_create_rt_rcu(rt);
2479 trace_fib6_table_lookup(net, rt, table, fl6);
2483 static struct dst_entry *ip6_route_redirect(struct net *net,
2484 const struct flowi6 *fl6,
2485 const struct sk_buff *skb,
2486 const struct in6_addr *gateway)
2488 int flags = RT6_LOOKUP_F_HAS_SADDR;
2489 struct ip6rd_flowi rdfl;
2492 rdfl.gateway = *gateway;
2494 return fib6_rule_lookup(net, &rdfl.fl6, skb,
2495 flags, __ip6_route_redirect);
2498 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
2501 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2502 struct dst_entry *dst;
2503 struct flowi6 fl6 = {
2504 .flowi6_iif = LOOPBACK_IFINDEX,
2506 .flowi6_mark = mark,
2507 .daddr = iph->daddr,
2508 .saddr = iph->saddr,
2509 .flowlabel = ip6_flowinfo(iph),
2513 dst = ip6_route_redirect(net, &fl6, skb, &ipv6_hdr(skb)->saddr);
2514 rt6_do_redirect(dst, NULL, skb);
2517 EXPORT_SYMBOL_GPL(ip6_redirect);
2519 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif)
2521 const struct ipv6hdr *iph = ipv6_hdr(skb);
2522 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
2523 struct dst_entry *dst;
2524 struct flowi6 fl6 = {
2525 .flowi6_iif = LOOPBACK_IFINDEX,
2528 .saddr = iph->daddr,
2529 .flowi6_uid = sock_net_uid(net, NULL),
2532 dst = ip6_route_redirect(net, &fl6, skb, &iph->saddr);
2533 rt6_do_redirect(dst, NULL, skb);
2537 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
2539 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark,
2542 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
2544 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
2546 struct net_device *dev = dst->dev;
2547 unsigned int mtu = dst_mtu(dst);
2548 struct net *net = dev_net(dev);
2550 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
2552 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
2553 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
2556 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
2557 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
2558 * IPV6_MAXPLEN is also valid and means: "any MSS,
2559 * rely only on pmtu discovery"
2561 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
2566 static unsigned int ip6_mtu(const struct dst_entry *dst)
2568 struct inet6_dev *idev;
2571 mtu = dst_metric_raw(dst, RTAX_MTU);
2578 idev = __in6_dev_get(dst->dev);
2580 mtu = idev->cnf.mtu6;
2584 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
2586 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
2590 * 1. mtu on route is locked - use it
2591 * 2. mtu from nexthop exception
2592 * 3. mtu from egress device
2594 * based on ip6_dst_mtu_forward and exception logic of
2595 * rt6_find_cached_rt; called with rcu_read_lock
2597 u32 ip6_mtu_from_fib6(struct fib6_info *f6i, struct in6_addr *daddr,
2598 struct in6_addr *saddr)
2600 struct rt6_exception_bucket *bucket;
2601 struct rt6_exception *rt6_ex;
2602 struct in6_addr *src_key;
2603 struct inet6_dev *idev;
2606 if (unlikely(fib6_metric_locked(f6i, RTAX_MTU))) {
2607 mtu = f6i->fib6_pmtu;
2613 #ifdef CONFIG_IPV6_SUBTREES
2614 if (f6i->fib6_src.plen)
2618 bucket = rcu_dereference(f6i->rt6i_exception_bucket);
2619 rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
2620 if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
2621 mtu = dst_metric_raw(&rt6_ex->rt6i->dst, RTAX_MTU);
2624 struct net_device *dev = fib6_info_nh_dev(f6i);
2627 idev = __in6_dev_get(dev);
2628 if (idev && idev->cnf.mtu6 > mtu)
2629 mtu = idev->cnf.mtu6;
2632 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
2634 return mtu - lwtunnel_headroom(fib6_info_nh_lwt(f6i), mtu);
2637 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
2640 struct dst_entry *dst;
2641 struct rt6_info *rt;
2642 struct inet6_dev *idev = in6_dev_get(dev);
2643 struct net *net = dev_net(dev);
2645 if (unlikely(!idev))
2646 return ERR_PTR(-ENODEV);
2648 rt = ip6_dst_alloc(net, dev, 0);
2649 if (unlikely(!rt)) {
2651 dst = ERR_PTR(-ENOMEM);
2655 rt->dst.flags |= DST_HOST;
2656 rt->dst.input = ip6_input;
2657 rt->dst.output = ip6_output;
2658 rt->rt6i_gateway = fl6->daddr;
2659 rt->rt6i_dst.addr = fl6->daddr;
2660 rt->rt6i_dst.plen = 128;
2661 rt->rt6i_idev = idev;
2662 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
2664 /* Add this dst into uncached_list so that rt6_disable_ip() can
2665 * do proper release of the net_device
2667 rt6_uncached_list_add(rt);
2668 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
2670 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
2676 static int ip6_dst_gc(struct dst_ops *ops)
2678 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
2679 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
2680 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
2681 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
2682 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
2683 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
2686 entries = dst_entries_get_fast(ops);
2687 if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
2688 entries <= rt_max_size)
2691 net->ipv6.ip6_rt_gc_expire++;
2692 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
2693 entries = dst_entries_get_slow(ops);
2694 if (entries < ops->gc_thresh)
2695 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
2697 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
2698 return entries > rt_max_size;
2701 static struct rt6_info *ip6_nh_lookup_table(struct net *net,
2702 struct fib6_config *cfg,
2703 const struct in6_addr *gw_addr,
2704 u32 tbid, int flags)
2706 struct flowi6 fl6 = {
2707 .flowi6_oif = cfg->fc_ifindex,
2709 .saddr = cfg->fc_prefsrc,
2711 struct fib6_table *table;
2712 struct rt6_info *rt;
2714 table = fib6_get_table(net, tbid);
2718 if (!ipv6_addr_any(&cfg->fc_prefsrc))
2719 flags |= RT6_LOOKUP_F_HAS_SADDR;
2721 flags |= RT6_LOOKUP_F_IGNORE_LINKSTATE;
2722 rt = ip6_pol_route(net, table, cfg->fc_ifindex, &fl6, NULL, flags);
2724 /* if table lookup failed, fall back to full lookup */
2725 if (rt == net->ipv6.ip6_null_entry) {
2733 static int ip6_route_check_nh_onlink(struct net *net,
2734 struct fib6_config *cfg,
2735 const struct net_device *dev,
2736 struct netlink_ext_ack *extack)
2738 u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN;
2739 const struct in6_addr *gw_addr = &cfg->fc_gateway;
2740 u32 flags = RTF_LOCAL | RTF_ANYCAST | RTF_REJECT;
2741 struct rt6_info *grt;
2745 grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0);
2747 if (!grt->dst.error &&
2748 (grt->rt6i_flags & flags || dev != grt->dst.dev)) {
2749 NL_SET_ERR_MSG(extack,
2750 "Nexthop has invalid gateway or device mismatch");
2760 static int ip6_route_check_nh(struct net *net,
2761 struct fib6_config *cfg,
2762 struct net_device **_dev,
2763 struct inet6_dev **idev)
2765 const struct in6_addr *gw_addr = &cfg->fc_gateway;
2766 struct net_device *dev = _dev ? *_dev : NULL;
2767 struct rt6_info *grt = NULL;
2768 int err = -EHOSTUNREACH;
2770 if (cfg->fc_table) {
2771 int flags = RT6_LOOKUP_F_IFACE;
2773 grt = ip6_nh_lookup_table(net, cfg, gw_addr,
2774 cfg->fc_table, flags);
2776 if (grt->rt6i_flags & RTF_GATEWAY ||
2777 (dev && dev != grt->dst.dev)) {
2785 grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, NULL, 1);
2791 if (dev != grt->dst.dev) {
2796 *_dev = dev = grt->dst.dev;
2797 *idev = grt->rt6i_idev;
2799 in6_dev_hold(grt->rt6i_idev);
2802 if (!(grt->rt6i_flags & RTF_GATEWAY))
2811 static int ip6_validate_gw(struct net *net, struct fib6_config *cfg,
2812 struct net_device **_dev, struct inet6_dev **idev,
2813 struct netlink_ext_ack *extack)
2815 const struct in6_addr *gw_addr = &cfg->fc_gateway;
2816 int gwa_type = ipv6_addr_type(gw_addr);
2817 bool skip_dev = gwa_type & IPV6_ADDR_LINKLOCAL ? false : true;
2818 const struct net_device *dev = *_dev;
2819 bool need_addr_check = !dev;
2822 /* if gw_addr is local we will fail to detect this in case
2823 * address is still TENTATIVE (DAD in progress). rt6_lookup()
2824 * will return already-added prefix route via interface that
2825 * prefix route was assigned to, which might be non-loopback.
2828 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
2829 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
2833 if (gwa_type != (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST)) {
2834 /* IPv6 strictly inhibits using not link-local
2835 * addresses as nexthop address.
2836 * Otherwise, router will not able to send redirects.
2837 * It is very good, but in some (rare!) circumstances
2838 * (SIT, PtP, NBMA NOARP links) it is handy to allow
2839 * some exceptions. --ANK
2840 * We allow IPv4-mapped nexthops to support RFC4798-type
2843 if (!(gwa_type & (IPV6_ADDR_UNICAST | IPV6_ADDR_MAPPED))) {
2844 NL_SET_ERR_MSG(extack, "Invalid gateway address");
2848 if (cfg->fc_flags & RTNH_F_ONLINK)
2849 err = ip6_route_check_nh_onlink(net, cfg, dev, extack);
2851 err = ip6_route_check_nh(net, cfg, _dev, idev);
2857 /* reload in case device was changed */
2862 NL_SET_ERR_MSG(extack, "Egress device not specified");
2864 } else if (dev->flags & IFF_LOOPBACK) {
2865 NL_SET_ERR_MSG(extack,
2866 "Egress device can not be loopback device for this route");
2870 /* if we did not check gw_addr above, do so now that the
2871 * egress device has been resolved.
2873 if (need_addr_check &&
2874 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
2875 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
2884 static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
2886 struct netlink_ext_ack *extack)
2888 struct net *net = cfg->fc_nlinfo.nl_net;
2889 struct fib6_info *rt = NULL;
2890 struct net_device *dev = NULL;
2891 struct inet6_dev *idev = NULL;
2892 struct fib6_table *table;
2896 /* RTF_PCPU is an internal flag; can not be set by userspace */
2897 if (cfg->fc_flags & RTF_PCPU) {
2898 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
2902 /* RTF_CACHE is an internal flag; can not be set by userspace */
2903 if (cfg->fc_flags & RTF_CACHE) {
2904 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_CACHE");
2908 if (cfg->fc_type > RTN_MAX) {
2909 NL_SET_ERR_MSG(extack, "Invalid route type");
2913 if (cfg->fc_dst_len > 128) {
2914 NL_SET_ERR_MSG(extack, "Invalid prefix length");
2917 if (cfg->fc_src_len > 128) {
2918 NL_SET_ERR_MSG(extack, "Invalid source address length");
2921 #ifndef CONFIG_IPV6_SUBTREES
2922 if (cfg->fc_src_len) {
2923 NL_SET_ERR_MSG(extack,
2924 "Specifying source address requires IPV6_SUBTREES to be enabled");
2928 if (cfg->fc_ifindex) {
2930 dev = dev_get_by_index(net, cfg->fc_ifindex);
2933 idev = in6_dev_get(dev);
2938 if (cfg->fc_metric == 0)
2939 cfg->fc_metric = IP6_RT_PRIO_USER;
2941 if (cfg->fc_flags & RTNH_F_ONLINK) {
2943 NL_SET_ERR_MSG(extack,
2944 "Nexthop device required for onlink");
2949 if (!(dev->flags & IFF_UP)) {
2950 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
2957 if (cfg->fc_nlinfo.nlh &&
2958 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
2959 table = fib6_get_table(net, cfg->fc_table);
2961 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
2962 table = fib6_new_table(net, cfg->fc_table);
2965 table = fib6_new_table(net, cfg->fc_table);
2972 rt = fib6_info_alloc(gfp_flags);
2976 rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len);
2977 if (IS_ERR(rt->fib6_metrics)) {
2978 err = PTR_ERR(rt->fib6_metrics);
2982 if (cfg->fc_flags & RTF_ADDRCONF)
2983 rt->dst_nocount = true;
2985 if (cfg->fc_flags & RTF_EXPIRES)
2986 fib6_set_expires(rt, jiffies +
2987 clock_t_to_jiffies(cfg->fc_expires));
2989 fib6_clean_expires(rt);
2991 if (cfg->fc_protocol == RTPROT_UNSPEC)
2992 cfg->fc_protocol = RTPROT_BOOT;
2993 rt->fib6_protocol = cfg->fc_protocol;
2995 addr_type = ipv6_addr_type(&cfg->fc_dst);
2997 if (cfg->fc_encap) {
2998 struct lwtunnel_state *lwtstate;
3000 err = lwtunnel_build_state(cfg->fc_encap_type,
3001 cfg->fc_encap, AF_INET6, cfg,
3005 rt->fib6_nh.nh_lwtstate = lwtstate_get(lwtstate);
3008 ipv6_addr_prefix(&rt->fib6_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
3009 rt->fib6_dst.plen = cfg->fc_dst_len;
3010 if (rt->fib6_dst.plen == 128)
3011 rt->dst_host = true;
3013 #ifdef CONFIG_IPV6_SUBTREES
3014 ipv6_addr_prefix(&rt->fib6_src.addr, &cfg->fc_src, cfg->fc_src_len);
3015 rt->fib6_src.plen = cfg->fc_src_len;
3018 rt->fib6_metric = cfg->fc_metric;
3019 rt->fib6_nh.nh_weight = 1;
3021 rt->fib6_type = cfg->fc_type;
3023 /* We cannot add true routes via loopback here,
3024 they would result in kernel looping; promote them to reject routes
3026 if ((cfg->fc_flags & RTF_REJECT) ||
3027 (dev && (dev->flags & IFF_LOOPBACK) &&
3028 !(addr_type & IPV6_ADDR_LOOPBACK) &&
3029 !(cfg->fc_flags & RTF_LOCAL))) {
3030 /* hold loopback dev/idev if we haven't done so. */
3031 if (dev != net->loopback_dev) {
3036 dev = net->loopback_dev;
3038 idev = in6_dev_get(dev);
3044 rt->fib6_flags = RTF_REJECT|RTF_NONEXTHOP;
3048 if (cfg->fc_flags & RTF_GATEWAY) {
3049 err = ip6_validate_gw(net, cfg, &dev, &idev, extack);
3053 rt->fib6_nh.nh_gw = cfg->fc_gateway;
3060 if (idev->cnf.disable_ipv6) {
3061 NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device");
3066 if (!(dev->flags & IFF_UP)) {
3067 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3072 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
3073 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
3074 NL_SET_ERR_MSG(extack, "Invalid source address");
3078 rt->fib6_prefsrc.addr = cfg->fc_prefsrc;
3079 rt->fib6_prefsrc.plen = 128;
3081 rt->fib6_prefsrc.plen = 0;
3083 rt->fib6_flags = cfg->fc_flags;
3086 if (!(rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) &&
3087 !netif_carrier_ok(dev))
3088 rt->fib6_nh.nh_flags |= RTNH_F_LINKDOWN;
3089 rt->fib6_nh.nh_flags |= (cfg->fc_flags & RTNH_F_ONLINK);
3090 rt->fib6_nh.nh_dev = dev;
3091 rt->fib6_table = table;
3103 fib6_info_release(rt);
3104 return ERR_PTR(err);
3107 int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
3108 struct netlink_ext_ack *extack)
3110 struct fib6_info *rt;
3113 rt = ip6_route_info_create(cfg, gfp_flags, extack);
3117 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
3118 fib6_info_release(rt);
3123 static int __ip6_del_rt(struct fib6_info *rt, struct nl_info *info)
3125 struct net *net = info->nl_net;
3126 struct fib6_table *table;
3129 if (rt == net->ipv6.fib6_null_entry) {
3134 table = rt->fib6_table;
3135 spin_lock_bh(&table->tb6_lock);
3136 err = fib6_del(rt, info);
3137 spin_unlock_bh(&table->tb6_lock);
3140 fib6_info_release(rt);
3144 int ip6_del_rt(struct net *net, struct fib6_info *rt)
3146 struct nl_info info = { .nl_net = net };
3148 return __ip6_del_rt(rt, &info);
3151 static int __ip6_del_rt_siblings(struct fib6_info *rt, struct fib6_config *cfg)
3153 struct nl_info *info = &cfg->fc_nlinfo;
3154 struct net *net = info->nl_net;
3155 struct sk_buff *skb = NULL;
3156 struct fib6_table *table;
3159 if (rt == net->ipv6.fib6_null_entry)
3161 table = rt->fib6_table;
3162 spin_lock_bh(&table->tb6_lock);
3164 if (rt->fib6_nsiblings && cfg->fc_delete_all_nh) {
3165 struct fib6_info *sibling, *next_sibling;
3167 /* prefer to send a single notification with all hops */
3168 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
3170 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
3172 if (rt6_fill_node(net, skb, rt, NULL,
3173 NULL, NULL, 0, RTM_DELROUTE,
3174 info->portid, seq, 0) < 0) {
3178 info->skip_notify = 1;
3181 list_for_each_entry_safe(sibling, next_sibling,
3184 err = fib6_del(sibling, info);
3190 err = fib6_del(rt, info);
3192 spin_unlock_bh(&table->tb6_lock);
3194 fib6_info_release(rt);
3197 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
3198 info->nlh, gfp_any());
3203 static int ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg)
3207 if (cfg->fc_ifindex && rt->dst.dev->ifindex != cfg->fc_ifindex)
3210 if (cfg->fc_flags & RTF_GATEWAY &&
3211 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
3213 if (dst_hold_safe(&rt->dst))
3214 rc = rt6_remove_exception_rt(rt);
3219 static int ip6_route_del(struct fib6_config *cfg,
3220 struct netlink_ext_ack *extack)
3222 struct rt6_info *rt_cache;
3223 struct fib6_table *table;
3224 struct fib6_info *rt;
3225 struct fib6_node *fn;
3228 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
3230 NL_SET_ERR_MSG(extack, "FIB table does not exist");
3236 fn = fib6_locate(&table->tb6_root,
3237 &cfg->fc_dst, cfg->fc_dst_len,
3238 &cfg->fc_src, cfg->fc_src_len,
3239 !(cfg->fc_flags & RTF_CACHE));
3242 for_each_fib6_node_rt_rcu(fn) {
3243 if (cfg->fc_flags & RTF_CACHE) {
3246 rt_cache = rt6_find_cached_rt(rt, &cfg->fc_dst,
3249 rc = ip6_del_cached_rt(rt_cache, cfg);
3257 if (cfg->fc_ifindex &&
3258 (!rt->fib6_nh.nh_dev ||
3259 rt->fib6_nh.nh_dev->ifindex != cfg->fc_ifindex))
3261 if (cfg->fc_flags & RTF_GATEWAY &&
3262 !ipv6_addr_equal(&cfg->fc_gateway, &rt->fib6_nh.nh_gw))
3264 if (cfg->fc_metric && cfg->fc_metric != rt->fib6_metric)
3266 if (cfg->fc_protocol && cfg->fc_protocol != rt->fib6_protocol)
3268 if (!fib6_info_hold_safe(rt))
3272 /* if gateway was specified only delete the one hop */
3273 if (cfg->fc_flags & RTF_GATEWAY)
3274 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
3276 return __ip6_del_rt_siblings(rt, cfg);
3284 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
3286 struct netevent_redirect netevent;
3287 struct rt6_info *rt, *nrt = NULL;
3288 struct ndisc_options ndopts;
3289 struct inet6_dev *in6_dev;
3290 struct neighbour *neigh;
3291 struct fib6_info *from;
3293 int optlen, on_link;
3296 optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
3297 optlen -= sizeof(*msg);
3300 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
3304 msg = (struct rd_msg *)icmp6_hdr(skb);
3306 if (ipv6_addr_is_multicast(&msg->dest)) {
3307 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
3312 if (ipv6_addr_equal(&msg->dest, &msg->target)) {
3314 } else if (ipv6_addr_type(&msg->target) !=
3315 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
3316 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
3320 in6_dev = __in6_dev_get(skb->dev);
3323 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
3327 * The IP source address of the Redirect MUST be the same as the current
3328 * first-hop router for the specified ICMP Destination Address.
3331 if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
3332 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
3337 if (ndopts.nd_opts_tgt_lladdr) {
3338 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
3341 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
3346 rt = (struct rt6_info *) dst;
3347 if (rt->rt6i_flags & RTF_REJECT) {
3348 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
3352 /* Redirect received -> path was valid.
3353 * Look, redirects are sent only in response to data packets,
3354 * so that this nexthop apparently is reachable. --ANK
3356 dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
3358 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
3363 * We have finally decided to accept it.
3366 ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
3367 NEIGH_UPDATE_F_WEAK_OVERRIDE|
3368 NEIGH_UPDATE_F_OVERRIDE|
3369 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
3370 NEIGH_UPDATE_F_ISROUTER)),
3371 NDISC_REDIRECT, &ndopts);
3374 from = rcu_dereference(rt->from);
3375 /* This fib6_info_hold() is safe here because we hold reference to rt
3376 * and rt already holds reference to fib6_info.
3378 fib6_info_hold(from);
3381 nrt = ip6_rt_cache_alloc(from, &msg->dest, NULL);
3385 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
3387 nrt->rt6i_flags &= ~RTF_GATEWAY;
3389 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
3391 /* No need to remove rt from the exception table if rt is
3392 * a cached route because rt6_insert_exception() will
3395 if (rt6_insert_exception(nrt, from)) {
3396 dst_release_immediate(&nrt->dst);
3400 netevent.old = &rt->dst;
3401 netevent.new = &nrt->dst;
3402 netevent.daddr = &msg->dest;
3403 netevent.neigh = neigh;
3404 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
3407 fib6_info_release(from);
3408 neigh_release(neigh);
3411 #ifdef CONFIG_IPV6_ROUTE_INFO
3412 static struct fib6_info *rt6_get_route_info(struct net *net,
3413 const struct in6_addr *prefix, int prefixlen,
3414 const struct in6_addr *gwaddr,
3415 struct net_device *dev)
3417 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
3418 int ifindex = dev->ifindex;
3419 struct fib6_node *fn;
3420 struct fib6_info *rt = NULL;
3421 struct fib6_table *table;
3423 table = fib6_get_table(net, tb_id);
3428 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true);
3432 for_each_fib6_node_rt_rcu(fn) {
3433 if (rt->fib6_nh.nh_dev->ifindex != ifindex)
3435 if ((rt->fib6_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
3437 if (!ipv6_addr_equal(&rt->fib6_nh.nh_gw, gwaddr))
3439 if (!fib6_info_hold_safe(rt))
3448 static struct fib6_info *rt6_add_route_info(struct net *net,
3449 const struct in6_addr *prefix, int prefixlen,
3450 const struct in6_addr *gwaddr,
3451 struct net_device *dev,
3454 struct fib6_config cfg = {
3455 .fc_metric = IP6_RT_PRIO_USER,
3456 .fc_ifindex = dev->ifindex,
3457 .fc_dst_len = prefixlen,
3458 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
3459 RTF_UP | RTF_PREF(pref),
3460 .fc_protocol = RTPROT_RA,
3461 .fc_type = RTN_UNICAST,
3462 .fc_nlinfo.portid = 0,
3463 .fc_nlinfo.nlh = NULL,
3464 .fc_nlinfo.nl_net = net,
3467 cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO,
3468 cfg.fc_dst = *prefix;
3469 cfg.fc_gateway = *gwaddr;
3471 /* We should treat it as a default route if prefix length is 0. */
3473 cfg.fc_flags |= RTF_DEFAULT;
3475 ip6_route_add(&cfg, GFP_ATOMIC, NULL);
3477 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
3481 struct fib6_info *rt6_get_dflt_router(struct net *net,
3482 const struct in6_addr *addr,
3483 struct net_device *dev)
3485 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
3486 struct fib6_info *rt;
3487 struct fib6_table *table;
3489 table = fib6_get_table(net, tb_id);
3494 for_each_fib6_node_rt_rcu(&table->tb6_root) {
3495 if (dev == rt->fib6_nh.nh_dev &&
3496 ((rt->fib6_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
3497 ipv6_addr_equal(&rt->fib6_nh.nh_gw, addr))
3500 if (rt && !fib6_info_hold_safe(rt))
3506 struct fib6_info *rt6_add_dflt_router(struct net *net,
3507 const struct in6_addr *gwaddr,
3508 struct net_device *dev,
3511 struct fib6_config cfg = {
3512 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
3513 .fc_metric = IP6_RT_PRIO_USER,
3514 .fc_ifindex = dev->ifindex,
3515 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
3516 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
3517 .fc_protocol = RTPROT_RA,
3518 .fc_type = RTN_UNICAST,
3519 .fc_nlinfo.portid = 0,
3520 .fc_nlinfo.nlh = NULL,
3521 .fc_nlinfo.nl_net = net,
3524 cfg.fc_gateway = *gwaddr;
3526 if (!ip6_route_add(&cfg, GFP_ATOMIC, NULL)) {
3527 struct fib6_table *table;
3529 table = fib6_get_table(dev_net(dev), cfg.fc_table);
3531 table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
3534 return rt6_get_dflt_router(net, gwaddr, dev);
3537 static void __rt6_purge_dflt_routers(struct net *net,
3538 struct fib6_table *table)
3540 struct fib6_info *rt;
3544 for_each_fib6_node_rt_rcu(&table->tb6_root) {
3545 struct net_device *dev = fib6_info_nh_dev(rt);
3546 struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL;
3548 if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
3549 (!idev || idev->cnf.accept_ra != 2) &&
3550 fib6_info_hold_safe(rt)) {
3552 ip6_del_rt(net, rt);
3558 table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
3561 void rt6_purge_dflt_routers(struct net *net)
3563 struct fib6_table *table;
3564 struct hlist_head *head;
3569 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
3570 head = &net->ipv6.fib_table_hash[h];
3571 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
3572 if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
3573 __rt6_purge_dflt_routers(net, table);
3580 static void rtmsg_to_fib6_config(struct net *net,
3581 struct in6_rtmsg *rtmsg,
3582 struct fib6_config *cfg)
3584 *cfg = (struct fib6_config){
3585 .fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
3587 .fc_ifindex = rtmsg->rtmsg_ifindex,
3588 .fc_metric = rtmsg->rtmsg_metric,
3589 .fc_expires = rtmsg->rtmsg_info,
3590 .fc_dst_len = rtmsg->rtmsg_dst_len,
3591 .fc_src_len = rtmsg->rtmsg_src_len,
3592 .fc_flags = rtmsg->rtmsg_flags,
3593 .fc_type = rtmsg->rtmsg_type,
3595 .fc_nlinfo.nl_net = net,
3597 .fc_dst = rtmsg->rtmsg_dst,
3598 .fc_src = rtmsg->rtmsg_src,
3599 .fc_gateway = rtmsg->rtmsg_gateway,
3603 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
3605 struct fib6_config cfg;
3606 struct in6_rtmsg rtmsg;
3610 case SIOCADDRT: /* Add a route */
3611 case SIOCDELRT: /* Delete a route */
3612 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3614 err = copy_from_user(&rtmsg, arg,
3615 sizeof(struct in6_rtmsg));
3619 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
3624 err = ip6_route_add(&cfg, GFP_KERNEL, NULL);
3627 err = ip6_route_del(&cfg, NULL);
3641 * Drop the packet on the floor
3644 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
3647 struct dst_entry *dst = skb_dst(skb);
3648 switch (ipstats_mib_noroutes) {
3649 case IPSTATS_MIB_INNOROUTES:
3650 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
3651 if (type == IPV6_ADDR_ANY) {
3652 IP6_INC_STATS(dev_net(dst->dev),
3653 __in6_dev_get_safely(skb->dev),
3654 IPSTATS_MIB_INADDRERRORS);
3658 case IPSTATS_MIB_OUTNOROUTES:
3659 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
3660 ipstats_mib_noroutes);
3663 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
3668 static int ip6_pkt_discard(struct sk_buff *skb)
3670 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
3673 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
3675 skb->dev = skb_dst(skb)->dev;
3676 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
3679 static int ip6_pkt_prohibit(struct sk_buff *skb)
3681 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
3684 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
3686 skb->dev = skb_dst(skb)->dev;
3687 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
3691 * Allocate a dst for local (unicast / anycast) address.
3694 struct fib6_info *addrconf_f6i_alloc(struct net *net,
3695 struct inet6_dev *idev,
3696 const struct in6_addr *addr,
3697 bool anycast, gfp_t gfp_flags)
3700 struct net_device *dev = idev->dev;
3701 struct fib6_info *f6i;
3703 f6i = fib6_info_alloc(gfp_flags);
3705 return ERR_PTR(-ENOMEM);
3707 f6i->fib6_metrics = ip_fib_metrics_init(net, NULL, 0);
3708 f6i->dst_nocount = true;
3709 f6i->dst_host = true;
3710 f6i->fib6_protocol = RTPROT_KERNEL;
3711 f6i->fib6_flags = RTF_UP | RTF_NONEXTHOP;
3713 f6i->fib6_type = RTN_ANYCAST;
3714 f6i->fib6_flags |= RTF_ANYCAST;
3716 f6i->fib6_type = RTN_LOCAL;
3717 f6i->fib6_flags |= RTF_LOCAL;
3720 f6i->fib6_nh.nh_gw = *addr;
3722 f6i->fib6_nh.nh_dev = dev;
3723 f6i->fib6_dst.addr = *addr;
3724 f6i->fib6_dst.plen = 128;
3725 tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL;
3726 f6i->fib6_table = fib6_get_table(net, tb_id);
3731 /* remove deleted ip from prefsrc entries */
3732 struct arg_dev_net_ip {
3733 struct net_device *dev;
3735 struct in6_addr *addr;
3738 static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg)
3740 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
3741 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
3742 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
3744 if (((void *)rt->fib6_nh.nh_dev == dev || !dev) &&
3745 rt != net->ipv6.fib6_null_entry &&
3746 ipv6_addr_equal(addr, &rt->fib6_prefsrc.addr)) {
3747 spin_lock_bh(&rt6_exception_lock);
3748 /* remove prefsrc entry */
3749 rt->fib6_prefsrc.plen = 0;
3750 spin_unlock_bh(&rt6_exception_lock);
3755 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
3757 struct net *net = dev_net(ifp->idev->dev);
3758 struct arg_dev_net_ip adni = {
3759 .dev = ifp->idev->dev,
3763 fib6_clean_all(net, fib6_remove_prefsrc, &adni);
3766 #define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)
3768 /* Remove routers and update dst entries when gateway turn into host. */
3769 static int fib6_clean_tohost(struct fib6_info *rt, void *arg)
3771 struct in6_addr *gateway = (struct in6_addr *)arg;
3773 if (((rt->fib6_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
3774 ipv6_addr_equal(gateway, &rt->fib6_nh.nh_gw)) {
3778 /* Further clean up cached routes in exception table.
3779 * This is needed because cached route may have a different
3780 * gateway than its 'parent' in the case of an ip redirect.
3782 rt6_exceptions_clean_tohost(rt, gateway);
3787 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
3789 fib6_clean_all(net, fib6_clean_tohost, gateway);
3792 struct arg_netdev_event {
3793 const struct net_device *dev;
3795 unsigned int nh_flags;
3796 unsigned long event;
3800 static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt)
3802 struct fib6_info *iter;
3803 struct fib6_node *fn;
3805 fn = rcu_dereference_protected(rt->fib6_node,
3806 lockdep_is_held(&rt->fib6_table->tb6_lock));
3807 iter = rcu_dereference_protected(fn->leaf,
3808 lockdep_is_held(&rt->fib6_table->tb6_lock));
3810 if (iter->fib6_metric == rt->fib6_metric &&
3811 rt6_qualify_for_ecmp(iter))
3813 iter = rcu_dereference_protected(iter->fib6_next,
3814 lockdep_is_held(&rt->fib6_table->tb6_lock));
3820 static bool rt6_is_dead(const struct fib6_info *rt)
3822 if (rt->fib6_nh.nh_flags & RTNH_F_DEAD ||
3823 (rt->fib6_nh.nh_flags & RTNH_F_LINKDOWN &&
3824 fib6_ignore_linkdown(rt)))
3830 static int rt6_multipath_total_weight(const struct fib6_info *rt)
3832 struct fib6_info *iter;
3835 if (!rt6_is_dead(rt))
3836 total += rt->fib6_nh.nh_weight;
3838 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
3839 if (!rt6_is_dead(iter))
3840 total += iter->fib6_nh.nh_weight;
3846 static void rt6_upper_bound_set(struct fib6_info *rt, int *weight, int total)
3848 int upper_bound = -1;
3850 if (!rt6_is_dead(rt)) {
3851 *weight += rt->fib6_nh.nh_weight;
3852 upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31,
3855 atomic_set(&rt->fib6_nh.nh_upper_bound, upper_bound);
3858 static void rt6_multipath_upper_bound_set(struct fib6_info *rt, int total)
3860 struct fib6_info *iter;
3863 rt6_upper_bound_set(rt, &weight, total);
3865 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
3866 rt6_upper_bound_set(iter, &weight, total);
3869 void rt6_multipath_rebalance(struct fib6_info *rt)
3871 struct fib6_info *first;
3874 /* In case the entire multipath route was marked for flushing,
3875 * then there is no need to rebalance upon the removal of every
3878 if (!rt->fib6_nsiblings || rt->should_flush)
3881 /* During lookup routes are evaluated in order, so we need to
3882 * make sure upper bounds are assigned from the first sibling
3885 first = rt6_multipath_first_sibling(rt);
3886 if (WARN_ON_ONCE(!first))
3889 total = rt6_multipath_total_weight(first);
3890 rt6_multipath_upper_bound_set(first, total);
3893 static int fib6_ifup(struct fib6_info *rt, void *p_arg)
3895 const struct arg_netdev_event *arg = p_arg;
3896 struct net *net = dev_net(arg->dev);
3898 if (rt != net->ipv6.fib6_null_entry && rt->fib6_nh.nh_dev == arg->dev) {
3899 rt->fib6_nh.nh_flags &= ~arg->nh_flags;
3900 fib6_update_sernum_upto_root(net, rt);
3901 rt6_multipath_rebalance(rt);
3907 void rt6_sync_up(struct net_device *dev, unsigned int nh_flags)
3909 struct arg_netdev_event arg = {
3912 .nh_flags = nh_flags,
3916 if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev))
3917 arg.nh_flags |= RTNH_F_LINKDOWN;
3919 fib6_clean_all(dev_net(dev), fib6_ifup, &arg);
3922 static bool rt6_multipath_uses_dev(const struct fib6_info *rt,
3923 const struct net_device *dev)
3925 struct fib6_info *iter;
3927 if (rt->fib6_nh.nh_dev == dev)
3929 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
3930 if (iter->fib6_nh.nh_dev == dev)
3936 static void rt6_multipath_flush(struct fib6_info *rt)
3938 struct fib6_info *iter;
3940 rt->should_flush = 1;
3941 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
3942 iter->should_flush = 1;
3945 static unsigned int rt6_multipath_dead_count(const struct fib6_info *rt,
3946 const struct net_device *down_dev)
3948 struct fib6_info *iter;
3949 unsigned int dead = 0;
3951 if (rt->fib6_nh.nh_dev == down_dev ||
3952 rt->fib6_nh.nh_flags & RTNH_F_DEAD)
3954 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
3955 if (iter->fib6_nh.nh_dev == down_dev ||
3956 iter->fib6_nh.nh_flags & RTNH_F_DEAD)
3962 static void rt6_multipath_nh_flags_set(struct fib6_info *rt,
3963 const struct net_device *dev,
3964 unsigned int nh_flags)
3966 struct fib6_info *iter;
3968 if (rt->fib6_nh.nh_dev == dev)
3969 rt->fib6_nh.nh_flags |= nh_flags;
3970 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
3971 if (iter->fib6_nh.nh_dev == dev)
3972 iter->fib6_nh.nh_flags |= nh_flags;
3975 /* called with write lock held for table with rt */
3976 static int fib6_ifdown(struct fib6_info *rt, void *p_arg)
3978 const struct arg_netdev_event *arg = p_arg;
3979 const struct net_device *dev = arg->dev;
3980 struct net *net = dev_net(dev);
3982 if (rt == net->ipv6.fib6_null_entry)
3985 switch (arg->event) {
3986 case NETDEV_UNREGISTER:
3987 return rt->fib6_nh.nh_dev == dev ? -1 : 0;
3989 if (rt->should_flush)
3991 if (!rt->fib6_nsiblings)
3992 return rt->fib6_nh.nh_dev == dev ? -1 : 0;
3993 if (rt6_multipath_uses_dev(rt, dev)) {
3996 count = rt6_multipath_dead_count(rt, dev);
3997 if (rt->fib6_nsiblings + 1 == count) {
3998 rt6_multipath_flush(rt);
4001 rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD |
4003 fib6_update_sernum(net, rt);
4004 rt6_multipath_rebalance(rt);
4008 if (rt->fib6_nh.nh_dev != dev ||
4009 rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
4011 rt->fib6_nh.nh_flags |= RTNH_F_LINKDOWN;
4012 rt6_multipath_rebalance(rt);
4019 void rt6_sync_down_dev(struct net_device *dev, unsigned long event)
4021 struct arg_netdev_event arg = {
4028 fib6_clean_all(dev_net(dev), fib6_ifdown, &arg);
4031 void rt6_disable_ip(struct net_device *dev, unsigned long event)
4033 rt6_sync_down_dev(dev, event);
4034 rt6_uncached_list_flush_dev(dev_net(dev), dev);
4035 neigh_ifdown(&nd_tbl, dev);
4038 struct rt6_mtu_change_arg {
4039 struct net_device *dev;
4043 static int rt6_mtu_change_route(struct fib6_info *rt, void *p_arg)
4045 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
4046 struct inet6_dev *idev;
4048 /* In IPv6 pmtu discovery is not optional,
4049 so that RTAX_MTU lock cannot disable it.
4050 We still use this lock to block changes
4051 caused by addrconf/ndisc.
4054 idev = __in6_dev_get(arg->dev);
4058 /* For administrative MTU increase, there is no way to discover
4059 IPv6 PMTU increase, so PMTU increase should be updated here.
4060 Since RFC 1981 doesn't include administrative MTU increase
4061 update PMTU increase is a MUST. (i.e. jumbo frame)
4063 if (rt->fib6_nh.nh_dev == arg->dev &&
4064 !fib6_metric_locked(rt, RTAX_MTU)) {
4065 u32 mtu = rt->fib6_pmtu;
4067 if (mtu >= arg->mtu ||
4068 (mtu < arg->mtu && mtu == idev->cnf.mtu6))
4069 fib6_metric_set(rt, RTAX_MTU, arg->mtu);
4071 spin_lock_bh(&rt6_exception_lock);
4072 rt6_exceptions_update_pmtu(idev, rt, arg->mtu);
4073 spin_unlock_bh(&rt6_exception_lock);
4078 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
4080 struct rt6_mtu_change_arg arg = {
4085 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
4088 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
4089 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
4090 [RTA_PREFSRC] = { .len = sizeof(struct in6_addr) },
4091 [RTA_OIF] = { .type = NLA_U32 },
4092 [RTA_IIF] = { .type = NLA_U32 },
4093 [RTA_PRIORITY] = { .type = NLA_U32 },
4094 [RTA_METRICS] = { .type = NLA_NESTED },
4095 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
4096 [RTA_PREF] = { .type = NLA_U8 },
4097 [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
4098 [RTA_ENCAP] = { .type = NLA_NESTED },
4099 [RTA_EXPIRES] = { .type = NLA_U32 },
4100 [RTA_UID] = { .type = NLA_U32 },
4101 [RTA_MARK] = { .type = NLA_U32 },
4102 [RTA_TABLE] = { .type = NLA_U32 },
4103 [RTA_IP_PROTO] = { .type = NLA_U8 },
4104 [RTA_SPORT] = { .type = NLA_U16 },
4105 [RTA_DPORT] = { .type = NLA_U16 },
4108 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
4109 struct fib6_config *cfg,
4110 struct netlink_ext_ack *extack)
4113 struct nlattr *tb[RTA_MAX+1];
4117 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
4123 rtm = nlmsg_data(nlh);
4125 *cfg = (struct fib6_config){
4126 .fc_table = rtm->rtm_table,
4127 .fc_dst_len = rtm->rtm_dst_len,
4128 .fc_src_len = rtm->rtm_src_len,
4130 .fc_protocol = rtm->rtm_protocol,
4131 .fc_type = rtm->rtm_type,
4133 .fc_nlinfo.portid = NETLINK_CB(skb).portid,
4134 .fc_nlinfo.nlh = nlh,
4135 .fc_nlinfo.nl_net = sock_net(skb->sk),
4138 if (rtm->rtm_type == RTN_UNREACHABLE ||
4139 rtm->rtm_type == RTN_BLACKHOLE ||
4140 rtm->rtm_type == RTN_PROHIBIT ||
4141 rtm->rtm_type == RTN_THROW)
4142 cfg->fc_flags |= RTF_REJECT;
4144 if (rtm->rtm_type == RTN_LOCAL)
4145 cfg->fc_flags |= RTF_LOCAL;
4147 if (rtm->rtm_flags & RTM_F_CLONED)
4148 cfg->fc_flags |= RTF_CACHE;
4150 cfg->fc_flags |= (rtm->rtm_flags & RTNH_F_ONLINK);
4152 if (tb[RTA_GATEWAY]) {
4153 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
4154 cfg->fc_flags |= RTF_GATEWAY;
4158 int plen = (rtm->rtm_dst_len + 7) >> 3;
4160 if (nla_len(tb[RTA_DST]) < plen)
4163 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
4167 int plen = (rtm->rtm_src_len + 7) >> 3;
4169 if (nla_len(tb[RTA_SRC]) < plen)
4172 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
4175 if (tb[RTA_PREFSRC])
4176 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
4179 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
4181 if (tb[RTA_PRIORITY])
4182 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
4184 if (tb[RTA_METRICS]) {
4185 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
4186 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
4190 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
4192 if (tb[RTA_MULTIPATH]) {
4193 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
4194 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
4196 err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
4197 cfg->fc_mp_len, extack);
4203 pref = nla_get_u8(tb[RTA_PREF]);
4204 if (pref != ICMPV6_ROUTER_PREF_LOW &&
4205 pref != ICMPV6_ROUTER_PREF_HIGH)
4206 pref = ICMPV6_ROUTER_PREF_MEDIUM;
4207 cfg->fc_flags |= RTF_PREF(pref);
4211 cfg->fc_encap = tb[RTA_ENCAP];
4213 if (tb[RTA_ENCAP_TYPE]) {
4214 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
4216 err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
4221 if (tb[RTA_EXPIRES]) {
4222 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
4224 if (addrconf_finite_timeout(timeout)) {
4225 cfg->fc_expires = jiffies_to_clock_t(timeout * HZ);
4226 cfg->fc_flags |= RTF_EXPIRES;
4236 struct fib6_info *fib6_info;
4237 struct fib6_config r_cfg;
4238 struct list_head next;
4241 static void ip6_print_replace_route_err(struct list_head *rt6_nh_list)
4245 list_for_each_entry(nh, rt6_nh_list, next) {
4246 pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6c nexthop %pI6c ifi %d\n",
4247 &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway,
4248 nh->r_cfg.fc_ifindex);
4252 static int ip6_route_info_append(struct net *net,
4253 struct list_head *rt6_nh_list,
4254 struct fib6_info *rt,
4255 struct fib6_config *r_cfg)
4260 list_for_each_entry(nh, rt6_nh_list, next) {
4261 /* check if fib6_info already exists */
4262 if (rt6_duplicate_nexthop(nh->fib6_info, rt))
4266 nh = kzalloc(sizeof(*nh), GFP_KERNEL);
4270 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
4271 list_add_tail(&nh->next, rt6_nh_list);
4276 static void ip6_route_mpath_notify(struct fib6_info *rt,
4277 struct fib6_info *rt_last,
4278 struct nl_info *info,
4281 /* if this is an APPEND route, then rt points to the first route
4282 * inserted and rt_last points to last route inserted. Userspace
4283 * wants a consistent dump of the route which starts at the first
4284 * nexthop. Since sibling routes are always added at the end of
4285 * the list, find the first sibling of the last route appended
4287 if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->fib6_nsiblings) {
4288 rt = list_first_entry(&rt_last->fib6_siblings,
4294 inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
4297 static int ip6_route_multipath_add(struct fib6_config *cfg,
4298 struct netlink_ext_ack *extack)
4300 struct fib6_info *rt_notif = NULL, *rt_last = NULL;
4301 struct nl_info *info = &cfg->fc_nlinfo;
4302 struct fib6_config r_cfg;
4303 struct rtnexthop *rtnh;
4304 struct fib6_info *rt;
4305 struct rt6_nh *err_nh;
4306 struct rt6_nh *nh, *nh_safe;
4312 int replace = (cfg->fc_nlinfo.nlh &&
4313 (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
4314 LIST_HEAD(rt6_nh_list);
4316 nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE;
4317 if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND)
4318 nlflags |= NLM_F_APPEND;
4320 remaining = cfg->fc_mp_len;
4321 rtnh = (struct rtnexthop *)cfg->fc_mp;
4323 /* Parse a Multipath Entry and build a list (rt6_nh_list) of
4324 * fib6_info structs per nexthop
4326 while (rtnh_ok(rtnh, remaining)) {
4327 memcpy(&r_cfg, cfg, sizeof(*cfg));
4328 if (rtnh->rtnh_ifindex)
4329 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
4331 attrlen = rtnh_attrlen(rtnh);
4333 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
4335 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
4337 r_cfg.fc_gateway = nla_get_in6_addr(nla);
4338 r_cfg.fc_flags |= RTF_GATEWAY;
4340 r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
4341 nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
4343 r_cfg.fc_encap_type = nla_get_u16(nla);
4346 r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK);
4347 rt = ip6_route_info_create(&r_cfg, GFP_KERNEL, extack);
4353 if (!rt6_qualify_for_ecmp(rt)) {
4355 NL_SET_ERR_MSG(extack,
4356 "Device only routes can not be added for IPv6 using the multipath API.");
4357 fib6_info_release(rt);
4361 rt->fib6_nh.nh_weight = rtnh->rtnh_hops + 1;
4363 err = ip6_route_info_append(info->nl_net, &rt6_nh_list,
4366 fib6_info_release(rt);
4370 rtnh = rtnh_next(rtnh, &remaining);
4373 /* for add and replace send one notification with all nexthops.
4374 * Skip the notification in fib6_add_rt2node and send one with
4375 * the full route when done
4377 info->skip_notify = 1;
4380 list_for_each_entry(nh, &rt6_nh_list, next) {
4381 err = __ip6_ins_rt(nh->fib6_info, info, extack);
4382 fib6_info_release(nh->fib6_info);
4385 /* save reference to last route successfully inserted */
4386 rt_last = nh->fib6_info;
4388 /* save reference to first route for notification */
4390 rt_notif = nh->fib6_info;
4393 /* nh->fib6_info is used or freed at this point, reset to NULL*/
4394 nh->fib6_info = NULL;
4397 ip6_print_replace_route_err(&rt6_nh_list);
4402 /* Because each route is added like a single route we remove
4403 * these flags after the first nexthop: if there is a collision,
4404 * we have already failed to add the first nexthop:
4405 * fib6_add_rt2node() has rejected it; when replacing, old
4406 * nexthops have been replaced by first new, the rest should
4409 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
4414 /* success ... tell user about new route */
4415 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
4419 /* send notification for routes that were added so that
4420 * the delete notifications sent by ip6_route_del are
4424 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
4426 /* Delete routes that were already added */
4427 list_for_each_entry(nh, &rt6_nh_list, next) {
4430 ip6_route_del(&nh->r_cfg, extack);
4434 list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
4436 fib6_info_release(nh->fib6_info);
4437 list_del(&nh->next);
4444 static int ip6_route_multipath_del(struct fib6_config *cfg,
4445 struct netlink_ext_ack *extack)
4447 struct fib6_config r_cfg;
4448 struct rtnexthop *rtnh;
4451 int err = 1, last_err = 0;
4453 remaining = cfg->fc_mp_len;
4454 rtnh = (struct rtnexthop *)cfg->fc_mp;
4456 /* Parse a Multipath Entry */
4457 while (rtnh_ok(rtnh, remaining)) {
4458 memcpy(&r_cfg, cfg, sizeof(*cfg));
4459 if (rtnh->rtnh_ifindex)
4460 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
4462 attrlen = rtnh_attrlen(rtnh);
4464 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
4466 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
4468 nla_memcpy(&r_cfg.fc_gateway, nla, 16);
4469 r_cfg.fc_flags |= RTF_GATEWAY;
4472 err = ip6_route_del(&r_cfg, extack);
4476 rtnh = rtnh_next(rtnh, &remaining);
4482 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
4483 struct netlink_ext_ack *extack)
4485 struct fib6_config cfg;
4488 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
4493 return ip6_route_multipath_del(&cfg, extack);
4495 cfg.fc_delete_all_nh = 1;
4496 return ip6_route_del(&cfg, extack);
4500 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
4501 struct netlink_ext_ack *extack)
4503 struct fib6_config cfg;
4506 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
4511 return ip6_route_multipath_add(&cfg, extack);
4513 return ip6_route_add(&cfg, GFP_KERNEL, extack);
4516 static size_t rt6_nlmsg_size(struct fib6_info *rt)
4518 int nexthop_len = 0;
4520 if (rt->fib6_nsiblings) {
4521 nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */
4522 + NLA_ALIGN(sizeof(struct rtnexthop))
4523 + nla_total_size(16) /* RTA_GATEWAY */
4524 + lwtunnel_get_encap_size(rt->fib6_nh.nh_lwtstate);
4526 nexthop_len *= rt->fib6_nsiblings;
4529 return NLMSG_ALIGN(sizeof(struct rtmsg))
4530 + nla_total_size(16) /* RTA_SRC */
4531 + nla_total_size(16) /* RTA_DST */
4532 + nla_total_size(16) /* RTA_GATEWAY */
4533 + nla_total_size(16) /* RTA_PREFSRC */
4534 + nla_total_size(4) /* RTA_TABLE */
4535 + nla_total_size(4) /* RTA_IIF */
4536 + nla_total_size(4) /* RTA_OIF */
4537 + nla_total_size(4) /* RTA_PRIORITY */
4538 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
4539 + nla_total_size(sizeof(struct rta_cacheinfo))
4540 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
4541 + nla_total_size(1) /* RTA_PREF */
4542 + lwtunnel_get_encap_size(rt->fib6_nh.nh_lwtstate)
4546 static int rt6_nexthop_info(struct sk_buff *skb, struct fib6_info *rt,
4547 unsigned int *flags, bool skip_oif)
4549 if (rt->fib6_nh.nh_flags & RTNH_F_DEAD)
4550 *flags |= RTNH_F_DEAD;
4552 if (rt->fib6_nh.nh_flags & RTNH_F_LINKDOWN) {
4553 *flags |= RTNH_F_LINKDOWN;
4556 if (fib6_ignore_linkdown(rt))
4557 *flags |= RTNH_F_DEAD;
4561 if (rt->fib6_flags & RTF_GATEWAY) {
4562 if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->fib6_nh.nh_gw) < 0)
4563 goto nla_put_failure;
4566 *flags |= (rt->fib6_nh.nh_flags & RTNH_F_ONLINK);
4567 if (rt->fib6_nh.nh_flags & RTNH_F_OFFLOAD)
4568 *flags |= RTNH_F_OFFLOAD;
4570 /* not needed for multipath encoding b/c it has a rtnexthop struct */
4571 if (!skip_oif && rt->fib6_nh.nh_dev &&
4572 nla_put_u32(skb, RTA_OIF, rt->fib6_nh.nh_dev->ifindex))
4573 goto nla_put_failure;
4575 if (rt->fib6_nh.nh_lwtstate &&
4576 lwtunnel_fill_encap(skb, rt->fib6_nh.nh_lwtstate) < 0)
4577 goto nla_put_failure;
4585 /* add multipath next hop */
4586 static int rt6_add_nexthop(struct sk_buff *skb, struct fib6_info *rt)
4588 const struct net_device *dev = rt->fib6_nh.nh_dev;
4589 struct rtnexthop *rtnh;
4590 unsigned int flags = 0;
4592 rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
4594 goto nla_put_failure;
4596 rtnh->rtnh_hops = rt->fib6_nh.nh_weight - 1;
4597 rtnh->rtnh_ifindex = dev ? dev->ifindex : 0;
4599 if (rt6_nexthop_info(skb, rt, &flags, true) < 0)
4600 goto nla_put_failure;
4602 rtnh->rtnh_flags = flags;
4604 /* length of rtnetlink header + attributes */
4605 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
4613 static int rt6_fill_node(struct net *net, struct sk_buff *skb,
4614 struct fib6_info *rt, struct dst_entry *dst,
4615 struct in6_addr *dest, struct in6_addr *src,
4616 int iif, int type, u32 portid, u32 seq,
4619 struct rt6_info *rt6 = (struct rt6_info *)dst;
4620 struct rt6key *rt6_dst, *rt6_src;
4621 u32 *pmetrics, table, rt6_flags;
4622 struct nlmsghdr *nlh;
4626 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
4631 rt6_dst = &rt6->rt6i_dst;
4632 rt6_src = &rt6->rt6i_src;
4633 rt6_flags = rt6->rt6i_flags;
4635 rt6_dst = &rt->fib6_dst;
4636 rt6_src = &rt->fib6_src;
4637 rt6_flags = rt->fib6_flags;
4640 rtm = nlmsg_data(nlh);
4641 rtm->rtm_family = AF_INET6;
4642 rtm->rtm_dst_len = rt6_dst->plen;
4643 rtm->rtm_src_len = rt6_src->plen;
4646 table = rt->fib6_table->tb6_id;
4648 table = RT6_TABLE_UNSPEC;
4649 rtm->rtm_table = table;
4650 if (nla_put_u32(skb, RTA_TABLE, table))
4651 goto nla_put_failure;
4653 rtm->rtm_type = rt->fib6_type;
4655 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
4656 rtm->rtm_protocol = rt->fib6_protocol;
4658 if (rt6_flags & RTF_CACHE)
4659 rtm->rtm_flags |= RTM_F_CLONED;
4662 if (nla_put_in6_addr(skb, RTA_DST, dest))
4663 goto nla_put_failure;
4664 rtm->rtm_dst_len = 128;
4665 } else if (rtm->rtm_dst_len)
4666 if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr))
4667 goto nla_put_failure;
4668 #ifdef CONFIG_IPV6_SUBTREES
4670 if (nla_put_in6_addr(skb, RTA_SRC, src))
4671 goto nla_put_failure;
4672 rtm->rtm_src_len = 128;
4673 } else if (rtm->rtm_src_len &&
4674 nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr))
4675 goto nla_put_failure;
4678 #ifdef CONFIG_IPV6_MROUTE
4679 if (ipv6_addr_is_multicast(&rt6_dst->addr)) {
4680 int err = ip6mr_get_route(net, skb, rtm, portid);
4685 goto nla_put_failure;
4688 if (nla_put_u32(skb, RTA_IIF, iif))
4689 goto nla_put_failure;
4691 struct in6_addr saddr_buf;
4692 if (ip6_route_get_saddr(net, rt, dest, 0, &saddr_buf) == 0 &&
4693 nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
4694 goto nla_put_failure;
4697 if (rt->fib6_prefsrc.plen) {
4698 struct in6_addr saddr_buf;
4699 saddr_buf = rt->fib6_prefsrc.addr;
4700 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
4701 goto nla_put_failure;
4704 pmetrics = dst ? dst_metrics_ptr(dst) : rt->fib6_metrics->metrics;
4705 if (rtnetlink_put_metrics(skb, pmetrics) < 0)
4706 goto nla_put_failure;
4708 if (nla_put_u32(skb, RTA_PRIORITY, rt->fib6_metric))
4709 goto nla_put_failure;
4711 /* For multipath routes, walk the siblings list and add
4712 * each as a nexthop within RTA_MULTIPATH.
4715 if (rt6_flags & RTF_GATEWAY &&
4716 nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway))
4717 goto nla_put_failure;
4719 if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex))
4720 goto nla_put_failure;
4721 } else if (rt->fib6_nsiblings) {
4722 struct fib6_info *sibling, *next_sibling;
4725 mp = nla_nest_start(skb, RTA_MULTIPATH);
4727 goto nla_put_failure;
4729 if (rt6_add_nexthop(skb, rt) < 0)
4730 goto nla_put_failure;
4732 list_for_each_entry_safe(sibling, next_sibling,
4733 &rt->fib6_siblings, fib6_siblings) {
4734 if (rt6_add_nexthop(skb, sibling) < 0)
4735 goto nla_put_failure;
4738 nla_nest_end(skb, mp);
4740 if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags, false) < 0)
4741 goto nla_put_failure;
4744 if (rt6_flags & RTF_EXPIRES) {
4745 expires = dst ? dst->expires : rt->expires;
4749 if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0)
4750 goto nla_put_failure;
4752 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags)))
4753 goto nla_put_failure;
4756 nlmsg_end(skb, nlh);
4760 nlmsg_cancel(skb, nlh);
4764 int rt6_dump_route(struct fib6_info *rt, void *p_arg)
4766 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
4767 struct net *net = arg->net;
4769 if (rt == net->ipv6.fib6_null_entry)
4772 if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
4773 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
4775 /* user wants prefix routes only */
4776 if (rtm->rtm_flags & RTM_F_PREFIX &&
4777 !(rt->fib6_flags & RTF_PREFIX_RT)) {
4778 /* success since this is not a prefix route */
4783 return rt6_fill_node(net, arg->skb, rt, NULL, NULL, NULL, 0,
4784 RTM_NEWROUTE, NETLINK_CB(arg->cb->skb).portid,
4785 arg->cb->nlh->nlmsg_seq, NLM_F_MULTI);
4788 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
4789 struct netlink_ext_ack *extack)
4791 struct net *net = sock_net(in_skb->sk);
4792 struct nlattr *tb[RTA_MAX+1];
4793 int err, iif = 0, oif = 0;
4794 struct fib6_info *from;
4795 struct dst_entry *dst;
4796 struct rt6_info *rt;
4797 struct sk_buff *skb;
4799 struct flowi6 fl6 = {};
4802 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
4808 rtm = nlmsg_data(nlh);
4809 fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
4810 fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
4813 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
4816 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
4820 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
4823 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
4827 iif = nla_get_u32(tb[RTA_IIF]);
4830 oif = nla_get_u32(tb[RTA_OIF]);
4833 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
4836 fl6.flowi6_uid = make_kuid(current_user_ns(),
4837 nla_get_u32(tb[RTA_UID]));
4839 fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
4842 fl6.fl6_sport = nla_get_be16(tb[RTA_SPORT]);
4845 fl6.fl6_dport = nla_get_be16(tb[RTA_DPORT]);
4847 if (tb[RTA_IP_PROTO]) {
4848 err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
4849 &fl6.flowi6_proto, extack);
4855 struct net_device *dev;
4860 dev = dev_get_by_index_rcu(net, iif);
4867 fl6.flowi6_iif = iif;
4869 if (!ipv6_addr_any(&fl6.saddr))
4870 flags |= RT6_LOOKUP_F_HAS_SADDR;
4872 dst = ip6_route_input_lookup(net, dev, &fl6, NULL, flags);
4876 fl6.flowi6_oif = oif;
4878 dst = ip6_route_output(net, NULL, &fl6);
4882 rt = container_of(dst, struct rt6_info, dst);
4883 if (rt->dst.error) {
4884 err = rt->dst.error;
4889 if (rt == net->ipv6.ip6_null_entry) {
4890 err = rt->dst.error;
4895 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
4902 skb_dst_set(skb, &rt->dst);
4905 from = rcu_dereference(rt->from);
4908 err = rt6_fill_node(net, skb, from, NULL, NULL, NULL, iif,
4909 RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
4912 err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
4913 &fl6.saddr, iif, RTM_NEWROUTE,
4914 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
4923 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
4928 void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
4929 unsigned int nlm_flags)
4931 struct sk_buff *skb;
4932 struct net *net = info->nl_net;
4937 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
4939 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
4943 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
4944 event, info->portid, seq, nlm_flags);
4946 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
4947 WARN_ON(err == -EMSGSIZE);
4951 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
4952 info->nlh, gfp_any());
4956 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
4959 static int ip6_route_dev_notify(struct notifier_block *this,
4960 unsigned long event, void *ptr)
4962 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4963 struct net *net = dev_net(dev);
4965 if (!(dev->flags & IFF_LOOPBACK))
4968 if (event == NETDEV_REGISTER) {
4969 net->ipv6.fib6_null_entry->fib6_nh.nh_dev = dev;
4970 net->ipv6.ip6_null_entry->dst.dev = dev;
4971 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
4972 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
4973 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
4974 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
4975 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
4976 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
4978 } else if (event == NETDEV_UNREGISTER &&
4979 dev->reg_state != NETREG_UNREGISTERED) {
4980 /* NETDEV_UNREGISTER could be fired for multiple times by
4981 * netdev_wait_allrefs(). Make sure we only call this once.
4983 in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
4984 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
4985 in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
4986 in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
4997 #ifdef CONFIG_PROC_FS
4998 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
5000 struct net *net = (struct net *)seq->private;
5001 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
5002 net->ipv6.rt6_stats->fib_nodes,
5003 net->ipv6.rt6_stats->fib_route_nodes,
5004 atomic_read(&net->ipv6.rt6_stats->fib_rt_alloc),
5005 net->ipv6.rt6_stats->fib_rt_entries,
5006 net->ipv6.rt6_stats->fib_rt_cache,
5007 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
5008 net->ipv6.rt6_stats->fib_discarded_routes);
5012 #endif /* CONFIG_PROC_FS */
5014 #ifdef CONFIG_SYSCTL
5017 int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
5018 void __user *buffer, size_t *lenp, loff_t *ppos)
5025 net = (struct net *)ctl->extra1;
5026 delay = net->ipv6.sysctl.flush_delay;
5027 proc_dointvec(ctl, write, buffer, lenp, ppos);
5028 fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
5032 struct ctl_table ipv6_route_table_template[] = {
5034 .procname = "flush",
5035 .data = &init_net.ipv6.sysctl.flush_delay,
5036 .maxlen = sizeof(int),
5038 .proc_handler = ipv6_sysctl_rtcache_flush
5041 .procname = "gc_thresh",
5042 .data = &ip6_dst_ops_template.gc_thresh,
5043 .maxlen = sizeof(int),
5045 .proc_handler = proc_dointvec,
5048 .procname = "max_size",
5049 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
5050 .maxlen = sizeof(int),
5052 .proc_handler = proc_dointvec,
5055 .procname = "gc_min_interval",
5056 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
5057 .maxlen = sizeof(int),
5059 .proc_handler = proc_dointvec_jiffies,
5062 .procname = "gc_timeout",
5063 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
5064 .maxlen = sizeof(int),
5066 .proc_handler = proc_dointvec_jiffies,
5069 .procname = "gc_interval",
5070 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
5071 .maxlen = sizeof(int),
5073 .proc_handler = proc_dointvec_jiffies,
5076 .procname = "gc_elasticity",
5077 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
5078 .maxlen = sizeof(int),
5080 .proc_handler = proc_dointvec,
5083 .procname = "mtu_expires",
5084 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
5085 .maxlen = sizeof(int),
5087 .proc_handler = proc_dointvec_jiffies,
5090 .procname = "min_adv_mss",
5091 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
5092 .maxlen = sizeof(int),
5094 .proc_handler = proc_dointvec,
5097 .procname = "gc_min_interval_ms",
5098 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
5099 .maxlen = sizeof(int),
5101 .proc_handler = proc_dointvec_ms_jiffies,
5106 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
5108 struct ctl_table *table;
5110 table = kmemdup(ipv6_route_table_template,
5111 sizeof(ipv6_route_table_template),
5115 table[0].data = &net->ipv6.sysctl.flush_delay;
5116 table[0].extra1 = net;
5117 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
5118 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
5119 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
5120 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
5121 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
5122 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
5123 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
5124 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
5125 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
5127 /* Don't export sysctls to unprivileged users */
5128 if (net->user_ns != &init_user_ns)
5129 table[0].procname = NULL;
5136 static int __net_init ip6_route_net_init(struct net *net)
5140 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
5141 sizeof(net->ipv6.ip6_dst_ops));
5143 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
5144 goto out_ip6_dst_ops;
5146 net->ipv6.fib6_null_entry = kmemdup(&fib6_null_entry_template,
5147 sizeof(*net->ipv6.fib6_null_entry),
5149 if (!net->ipv6.fib6_null_entry)
5150 goto out_ip6_dst_entries;
5152 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
5153 sizeof(*net->ipv6.ip6_null_entry),
5155 if (!net->ipv6.ip6_null_entry)
5156 goto out_fib6_null_entry;
5157 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
5158 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
5159 ip6_template_metrics, true);
5161 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
5162 net->ipv6.fib6_has_custom_rules = false;
5163 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
5164 sizeof(*net->ipv6.ip6_prohibit_entry),
5166 if (!net->ipv6.ip6_prohibit_entry)
5167 goto out_ip6_null_entry;
5168 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
5169 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
5170 ip6_template_metrics, true);
5172 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
5173 sizeof(*net->ipv6.ip6_blk_hole_entry),
5175 if (!net->ipv6.ip6_blk_hole_entry)
5176 goto out_ip6_prohibit_entry;
5177 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
5178 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
5179 ip6_template_metrics, true);
5182 net->ipv6.sysctl.flush_delay = 0;
5183 net->ipv6.sysctl.ip6_rt_max_size = 4096;
5184 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
5185 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
5186 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
5187 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
5188 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
5189 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
5191 net->ipv6.ip6_rt_gc_expire = 30*HZ;
5197 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
5198 out_ip6_prohibit_entry:
5199 kfree(net->ipv6.ip6_prohibit_entry);
5201 kfree(net->ipv6.ip6_null_entry);
5203 out_fib6_null_entry:
5204 kfree(net->ipv6.fib6_null_entry);
5205 out_ip6_dst_entries:
5206 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
5211 static void __net_exit ip6_route_net_exit(struct net *net)
5213 kfree(net->ipv6.fib6_null_entry);
5214 kfree(net->ipv6.ip6_null_entry);
5215 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
5216 kfree(net->ipv6.ip6_prohibit_entry);
5217 kfree(net->ipv6.ip6_blk_hole_entry);
5219 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
5222 static int __net_init ip6_route_net_init_late(struct net *net)
5224 #ifdef CONFIG_PROC_FS
5225 proc_create_net("ipv6_route", 0, net->proc_net, &ipv6_route_seq_ops,
5226 sizeof(struct ipv6_route_iter));
5227 proc_create_net_single("rt6_stats", 0444, net->proc_net,
5228 rt6_stats_seq_show, NULL);
5233 static void __net_exit ip6_route_net_exit_late(struct net *net)
5235 #ifdef CONFIG_PROC_FS
5236 remove_proc_entry("ipv6_route", net->proc_net);
5237 remove_proc_entry("rt6_stats", net->proc_net);
5241 static struct pernet_operations ip6_route_net_ops = {
5242 .init = ip6_route_net_init,
5243 .exit = ip6_route_net_exit,
5246 static int __net_init ipv6_inetpeer_init(struct net *net)
5248 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
5252 inet_peer_base_init(bp);
5253 net->ipv6.peers = bp;
5257 static void __net_exit ipv6_inetpeer_exit(struct net *net)
5259 struct inet_peer_base *bp = net->ipv6.peers;
5261 net->ipv6.peers = NULL;
5262 inetpeer_invalidate_tree(bp);
5266 static struct pernet_operations ipv6_inetpeer_ops = {
5267 .init = ipv6_inetpeer_init,
5268 .exit = ipv6_inetpeer_exit,
5271 static struct pernet_operations ip6_route_net_late_ops = {
5272 .init = ip6_route_net_init_late,
5273 .exit = ip6_route_net_exit_late,
5276 static struct notifier_block ip6_route_dev_notifier = {
5277 .notifier_call = ip6_route_dev_notify,
5278 .priority = ADDRCONF_NOTIFY_PRIORITY - 10,
5281 void __init ip6_route_init_special_entries(void)
5283 /* Registering of the loopback is done before this portion of code,
5284 * the loopback reference in rt6_info will not be taken, do it
5285 * manually for init_net */
5286 init_net.ipv6.fib6_null_entry->fib6_nh.nh_dev = init_net.loopback_dev;
5287 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
5288 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
5289 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
5290 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
5291 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
5292 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
5293 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
5297 int __init ip6_route_init(void)
5303 ip6_dst_ops_template.kmem_cachep =
5304 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
5305 SLAB_HWCACHE_ALIGN, NULL);
5306 if (!ip6_dst_ops_template.kmem_cachep)
5309 ret = dst_entries_init(&ip6_dst_blackhole_ops);
5311 goto out_kmem_cache;
5313 ret = register_pernet_subsys(&ipv6_inetpeer_ops);
5315 goto out_dst_entries;
5317 ret = register_pernet_subsys(&ip6_route_net_ops);
5319 goto out_register_inetpeer;
5321 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
5325 goto out_register_subsys;
5331 ret = fib6_rules_init();
5335 ret = register_pernet_subsys(&ip6_route_net_late_ops);
5337 goto fib6_rules_init;
5339 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWROUTE,
5340 inet6_rtm_newroute, NULL, 0);
5342 goto out_register_late_subsys;
5344 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELROUTE,
5345 inet6_rtm_delroute, NULL, 0);
5347 goto out_register_late_subsys;
5349 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE,
5350 inet6_rtm_getroute, NULL,
5351 RTNL_FLAG_DOIT_UNLOCKED);
5353 goto out_register_late_subsys;
5355 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
5357 goto out_register_late_subsys;
5359 for_each_possible_cpu(cpu) {
5360 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
5362 INIT_LIST_HEAD(&ul->head);
5363 spin_lock_init(&ul->lock);
5369 out_register_late_subsys:
5370 rtnl_unregister_all(PF_INET6);
5371 unregister_pernet_subsys(&ip6_route_net_late_ops);
5373 fib6_rules_cleanup();
5378 out_register_subsys:
5379 unregister_pernet_subsys(&ip6_route_net_ops);
5380 out_register_inetpeer:
5381 unregister_pernet_subsys(&ipv6_inetpeer_ops);
5383 dst_entries_destroy(&ip6_dst_blackhole_ops);
5385 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
5389 void ip6_route_cleanup(void)
5391 unregister_netdevice_notifier(&ip6_route_dev_notifier);
5392 unregister_pernet_subsys(&ip6_route_net_late_ops);
5393 fib6_rules_cleanup();
5396 unregister_pernet_subsys(&ipv6_inetpeer_ops);
5397 unregister_pernet_subsys(&ip6_route_net_ops);
5398 dst_entries_destroy(&ip6_dst_blackhole_ops);
5399 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);