2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * ROUTE - implementation of the IP router.
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
15 * Alan Cox : Verify area fixes.
16 * Alan Cox : cli() protects routing changes
17 * Rui Oliveira : ICMP routing table updates
18 * (rco@di.uminho.pt) Routing table insertion and update
19 * Linus Torvalds : Rewrote bits to be sensible
20 * Alan Cox : Added BSD route gw semantics
21 * Alan Cox : Super /proc >4K
22 * Alan Cox : MTU in route table
23 * Alan Cox : MSS actually. Also added the window
25 * Sam Lantinga : Fixed route matching in rt_del()
26 * Alan Cox : Routing cache support.
27 * Alan Cox : Removed compatibility cruft.
28 * Alan Cox : RTF_REJECT support.
29 * Alan Cox : TCP irtt support.
30 * Jonathan Naylor : Added Metric support.
31 * Miquel van Smoorenburg : BSD API fixes.
32 * Miquel van Smoorenburg : Metrics.
33 * Alan Cox : Use __u32 properly
34 * Alan Cox : Aligned routing errors more closely with BSD
35 * our system is still very different.
36 * Alan Cox : Faster /proc handling
37 * Alexey Kuznetsov : Massive rework to support tree based routing,
38 * routing caches and better behaviour.
40 * Olaf Erb : irtt wasn't being copied right.
41 * Bjorn Ekwall : Kerneld route support.
42 * Alan Cox : Multicast fixed (I hope)
43 * Pavel Krauz : Limited broadcast fixed
44 * Mike McLagan : Routing by source
45 * Alexey Kuznetsov : End of old history. Split to fib.c and
46 * route.c and rewritten from scratch.
47 * Andi Kleen : Load-limit warning messages.
48 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
49 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
50 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
51 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
52 * Marc Boucher : routing by fwmark
53 * Robert Olsson : Added rt_cache statistics
54 * Arnaldo C. Melo : Convert proc stuff to seq_file
55 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
56 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
57 * Ilia Sotnikov : Removed TOS from hash calculations
59 * This program is free software; you can redistribute it and/or
60 * modify it under the terms of the GNU General Public License
61 * as published by the Free Software Foundation; either version
62 * 2 of the License, or (at your option) any later version.
65 #define pr_fmt(fmt) "IPv4: " fmt
67 #include <linux/module.h>
68 #include <linux/uaccess.h>
69 #include <linux/bitops.h>
70 #include <linux/types.h>
71 #include <linux/kernel.h>
73 #include <linux/string.h>
74 #include <linux/socket.h>
75 #include <linux/sockios.h>
76 #include <linux/errno.h>
78 #include <linux/inet.h>
79 #include <linux/netdevice.h>
80 #include <linux/proc_fs.h>
81 #include <linux/init.h>
82 #include <linux/skbuff.h>
83 #include <linux/inetdevice.h>
84 #include <linux/igmp.h>
85 #include <linux/pkt_sched.h>
86 #include <linux/mroute.h>
87 #include <linux/netfilter_ipv4.h>
88 #include <linux/random.h>
89 #include <linux/rcupdate.h>
90 #include <linux/times.h>
91 #include <linux/slab.h>
92 #include <linux/jhash.h>
94 #include <net/dst_metadata.h>
95 #include <net/net_namespace.h>
96 #include <net/protocol.h>
98 #include <net/route.h>
99 #include <net/inetpeer.h>
100 #include <net/sock.h>
101 #include <net/ip_fib.h>
104 #include <net/icmp.h>
105 #include <net/xfrm.h>
106 #include <net/lwtunnel.h>
107 #include <net/netevent.h>
108 #include <net/rtnetlink.h>
110 #include <linux/sysctl.h>
111 #include <linux/kmemleak.h>
113 #include <net/secure_seq.h>
114 #include <net/ip_tunnels.h>
115 #include <net/l3mdev.h>
117 #include "fib_lookup.h"
119 #define RT_FL_TOS(oldflp4) \
120 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
122 #define RT_GC_TIMEOUT (300*HZ)
124 static int ip_rt_max_size;
125 static int ip_rt_redirect_number __read_mostly = 9;
126 static int ip_rt_redirect_load __read_mostly = HZ / 50;
127 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
128 static int ip_rt_error_cost __read_mostly = HZ;
129 static int ip_rt_error_burst __read_mostly = 5 * HZ;
130 static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
131 static u32 ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
132 static int ip_rt_min_advmss __read_mostly = 256;
134 static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
137 * Interface to generic destination cache.
140 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
141 static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
142 static unsigned int ipv4_mtu(const struct dst_entry *dst);
143 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
144 static void ipv4_link_failure(struct sk_buff *skb);
145 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
146 struct sk_buff *skb, u32 mtu);
147 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
148 struct sk_buff *skb);
149 static void ipv4_dst_destroy(struct dst_entry *dst);
151 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
157 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
160 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr);
162 static struct dst_ops ipv4_dst_ops = {
164 .check = ipv4_dst_check,
165 .default_advmss = ipv4_default_advmss,
167 .cow_metrics = ipv4_cow_metrics,
168 .destroy = ipv4_dst_destroy,
169 .negative_advice = ipv4_negative_advice,
170 .link_failure = ipv4_link_failure,
171 .update_pmtu = ip_rt_update_pmtu,
172 .redirect = ip_do_redirect,
173 .local_out = __ip_local_out,
174 .neigh_lookup = ipv4_neigh_lookup,
175 .confirm_neigh = ipv4_confirm_neigh,
178 #define ECN_OR_COST(class) TC_PRIO_##class
180 const __u8 ip_tos2prio[16] = {
182 ECN_OR_COST(BESTEFFORT),
184 ECN_OR_COST(BESTEFFORT),
190 ECN_OR_COST(INTERACTIVE),
192 ECN_OR_COST(INTERACTIVE),
193 TC_PRIO_INTERACTIVE_BULK,
194 ECN_OR_COST(INTERACTIVE_BULK),
195 TC_PRIO_INTERACTIVE_BULK,
196 ECN_OR_COST(INTERACTIVE_BULK)
198 EXPORT_SYMBOL(ip_tos2prio);
200 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
201 #define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
203 #ifdef CONFIG_PROC_FS
204 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
208 return SEQ_START_TOKEN;
211 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
217 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
221 static int rt_cache_seq_show(struct seq_file *seq, void *v)
223 if (v == SEQ_START_TOKEN)
224 seq_printf(seq, "%-127s\n",
225 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
226 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
231 static const struct seq_operations rt_cache_seq_ops = {
232 .start = rt_cache_seq_start,
233 .next = rt_cache_seq_next,
234 .stop = rt_cache_seq_stop,
235 .show = rt_cache_seq_show,
238 static int rt_cache_seq_open(struct inode *inode, struct file *file)
240 return seq_open(file, &rt_cache_seq_ops);
243 static const struct file_operations rt_cache_seq_fops = {
244 .open = rt_cache_seq_open,
247 .release = seq_release,
251 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
256 return SEQ_START_TOKEN;
258 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
259 if (!cpu_possible(cpu))
262 return &per_cpu(rt_cache_stat, cpu);
267 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
271 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
272 if (!cpu_possible(cpu))
275 return &per_cpu(rt_cache_stat, cpu);
281 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
286 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
288 struct rt_cache_stat *st = v;
290 if (v == SEQ_START_TOKEN) {
291 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
295 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
296 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
297 dst_entries_get_slow(&ipv4_dst_ops),
310 0, /* st->gc_total */
311 0, /* st->gc_ignored */
312 0, /* st->gc_goal_miss */
313 0, /* st->gc_dst_overflow */
314 0, /* st->in_hlist_search */
315 0 /* st->out_hlist_search */
320 static const struct seq_operations rt_cpu_seq_ops = {
321 .start = rt_cpu_seq_start,
322 .next = rt_cpu_seq_next,
323 .stop = rt_cpu_seq_stop,
324 .show = rt_cpu_seq_show,
328 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
330 return seq_open(file, &rt_cpu_seq_ops);
333 static const struct file_operations rt_cpu_seq_fops = {
334 .open = rt_cpu_seq_open,
337 .release = seq_release,
340 #ifdef CONFIG_IP_ROUTE_CLASSID
341 static int rt_acct_proc_show(struct seq_file *m, void *v)
343 struct ip_rt_acct *dst, *src;
346 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
350 for_each_possible_cpu(i) {
351 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
352 for (j = 0; j < 256; j++) {
353 dst[j].o_bytes += src[j].o_bytes;
354 dst[j].o_packets += src[j].o_packets;
355 dst[j].i_bytes += src[j].i_bytes;
356 dst[j].i_packets += src[j].i_packets;
360 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
365 static int rt_acct_proc_open(struct inode *inode, struct file *file)
367 return single_open(file, rt_acct_proc_show, NULL);
370 static const struct file_operations rt_acct_proc_fops = {
371 .open = rt_acct_proc_open,
374 .release = single_release,
378 static int __net_init ip_rt_do_proc_init(struct net *net)
380 struct proc_dir_entry *pde;
382 pde = proc_create("rt_cache", S_IRUGO, net->proc_net,
387 pde = proc_create("rt_cache", S_IRUGO,
388 net->proc_net_stat, &rt_cpu_seq_fops);
392 #ifdef CONFIG_IP_ROUTE_CLASSID
393 pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
399 #ifdef CONFIG_IP_ROUTE_CLASSID
401 remove_proc_entry("rt_cache", net->proc_net_stat);
404 remove_proc_entry("rt_cache", net->proc_net);
409 static void __net_exit ip_rt_do_proc_exit(struct net *net)
411 remove_proc_entry("rt_cache", net->proc_net_stat);
412 remove_proc_entry("rt_cache", net->proc_net);
413 #ifdef CONFIG_IP_ROUTE_CLASSID
414 remove_proc_entry("rt_acct", net->proc_net);
418 static struct pernet_operations ip_rt_proc_ops __net_initdata = {
419 .init = ip_rt_do_proc_init,
420 .exit = ip_rt_do_proc_exit,
423 static int __init ip_rt_proc_init(void)
425 return register_pernet_subsys(&ip_rt_proc_ops);
429 static inline int ip_rt_proc_init(void)
433 #endif /* CONFIG_PROC_FS */
435 static inline bool rt_is_expired(const struct rtable *rth)
437 return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
440 void rt_cache_flush(struct net *net)
442 rt_genid_bump_ipv4(net);
445 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
449 struct net_device *dev = dst->dev;
450 const __be32 *pkey = daddr;
451 const struct rtable *rt;
454 rt = (const struct rtable *) dst;
456 pkey = (const __be32 *) &rt->rt_gateway;
458 pkey = &ip_hdr(skb)->daddr;
460 n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
463 return neigh_create(&arp_tbl, pkey, dev);
466 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
468 struct net_device *dev = dst->dev;
469 const __be32 *pkey = daddr;
470 const struct rtable *rt;
472 rt = (const struct rtable *)dst;
474 pkey = (const __be32 *)&rt->rt_gateway;
477 (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL)))
480 __ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
483 #define IP_IDENTS_SZ 2048u
485 static atomic_t *ip_idents __read_mostly;
486 static u32 *ip_tstamps __read_mostly;
488 /* In order to protect privacy, we add a perturbation to identifiers
489 * if one generator is seldom used. This makes hard for an attacker
490 * to infer how many packets were sent between two points in time.
492 u32 ip_idents_reserve(u32 hash, int segs)
494 u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
495 atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
496 u32 old = READ_ONCE(*p_tstamp);
497 u32 now = (u32)jiffies;
500 if (old != now && cmpxchg(p_tstamp, old, now) == old)
501 delta = prandom_u32_max(now - old);
503 /* Do not use atomic_add_return() as it makes UBSAN unhappy */
505 old = (u32)atomic_read(p_id);
506 new = old + delta + segs;
507 } while (atomic_cmpxchg(p_id, old, new) != old);
511 EXPORT_SYMBOL(ip_idents_reserve);
513 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
515 static u32 ip_idents_hashrnd __read_mostly;
518 net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
520 hash = jhash_3words((__force u32)iph->daddr,
521 (__force u32)iph->saddr,
522 iph->protocol ^ net_hash_mix(net),
524 id = ip_idents_reserve(hash, segs);
527 EXPORT_SYMBOL(__ip_select_ident);
529 static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
530 const struct sock *sk,
531 const struct iphdr *iph,
533 u8 prot, u32 mark, int flow_flags)
536 const struct inet_sock *inet = inet_sk(sk);
538 oif = sk->sk_bound_dev_if;
540 tos = RT_CONN_FLAGS(sk);
541 prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
543 flowi4_init_output(fl4, oif, mark, tos,
544 RT_SCOPE_UNIVERSE, prot,
546 iph->daddr, iph->saddr, 0, 0,
547 sock_net_uid(net, sk));
550 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
551 const struct sock *sk)
553 const struct net *net = dev_net(skb->dev);
554 const struct iphdr *iph = ip_hdr(skb);
555 int oif = skb->dev->ifindex;
556 u8 tos = RT_TOS(iph->tos);
557 u8 prot = iph->protocol;
558 u32 mark = skb->mark;
560 __build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0);
563 static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
565 const struct inet_sock *inet = inet_sk(sk);
566 const struct ip_options_rcu *inet_opt;
567 __be32 daddr = inet->inet_daddr;
570 inet_opt = rcu_dereference(inet->inet_opt);
571 if (inet_opt && inet_opt->opt.srr)
572 daddr = inet_opt->opt.faddr;
573 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
574 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
575 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
576 inet_sk_flowi_flags(sk),
577 daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
581 static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
582 const struct sk_buff *skb)
585 build_skb_flow_key(fl4, skb, sk);
587 build_sk_flow_key(fl4, sk);
590 static DEFINE_SPINLOCK(fnhe_lock);
592 static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
596 rt = rcu_dereference(fnhe->fnhe_rth_input);
598 RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
599 dst_dev_put(&rt->dst);
600 dst_release(&rt->dst);
602 rt = rcu_dereference(fnhe->fnhe_rth_output);
604 RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
605 dst_dev_put(&rt->dst);
606 dst_release(&rt->dst);
610 static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
612 struct fib_nh_exception *fnhe, *oldest;
614 oldest = rcu_dereference(hash->chain);
615 for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
616 fnhe = rcu_dereference(fnhe->fnhe_next)) {
617 if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
620 fnhe_flush_routes(oldest);
624 static inline u32 fnhe_hashfun(__be32 daddr)
626 static u32 fnhe_hashrnd __read_mostly;
629 net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd));
630 hval = jhash_1word((__force u32) daddr, fnhe_hashrnd);
631 return hash_32(hval, FNHE_HASH_SHIFT);
634 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
636 rt->rt_pmtu = fnhe->fnhe_pmtu;
637 rt->dst.expires = fnhe->fnhe_expires;
640 rt->rt_flags |= RTCF_REDIRECTED;
641 rt->rt_gateway = fnhe->fnhe_gw;
642 rt->rt_uses_gateway = 1;
646 static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
647 u32 pmtu, unsigned long expires)
649 struct fnhe_hash_bucket *hash;
650 struct fib_nh_exception *fnhe;
656 genid = fnhe_genid(dev_net(nh->nh_dev));
657 hval = fnhe_hashfun(daddr);
659 spin_lock_bh(&fnhe_lock);
661 hash = rcu_dereference(nh->nh_exceptions);
663 hash = kzalloc(FNHE_HASH_SIZE * sizeof(*hash), GFP_ATOMIC);
666 rcu_assign_pointer(nh->nh_exceptions, hash);
672 for (fnhe = rcu_dereference(hash->chain); fnhe;
673 fnhe = rcu_dereference(fnhe->fnhe_next)) {
674 if (fnhe->fnhe_daddr == daddr)
680 if (fnhe->fnhe_genid != genid)
681 fnhe->fnhe_genid = genid;
685 fnhe->fnhe_pmtu = pmtu;
686 fnhe->fnhe_expires = max(1UL, expires);
687 /* Update all cached dsts too */
688 rt = rcu_dereference(fnhe->fnhe_rth_input);
690 fill_route_from_fnhe(rt, fnhe);
691 rt = rcu_dereference(fnhe->fnhe_rth_output);
693 fill_route_from_fnhe(rt, fnhe);
695 if (depth > FNHE_RECLAIM_DEPTH)
696 fnhe = fnhe_oldest(hash);
698 fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
702 fnhe->fnhe_next = hash->chain;
703 rcu_assign_pointer(hash->chain, fnhe);
705 fnhe->fnhe_genid = genid;
706 fnhe->fnhe_daddr = daddr;
708 fnhe->fnhe_pmtu = pmtu;
709 fnhe->fnhe_expires = expires;
711 /* Exception created; mark the cached routes for the nexthop
712 * stale, so anyone caching it rechecks if this exception
715 rt = rcu_dereference(nh->nh_rth_input);
717 rt->dst.obsolete = DST_OBSOLETE_KILL;
719 for_each_possible_cpu(i) {
720 struct rtable __rcu **prt;
721 prt = per_cpu_ptr(nh->nh_pcpu_rth_output, i);
722 rt = rcu_dereference(*prt);
724 rt->dst.obsolete = DST_OBSOLETE_KILL;
728 fnhe->fnhe_stamp = jiffies;
731 spin_unlock_bh(&fnhe_lock);
734 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
737 __be32 new_gw = icmp_hdr(skb)->un.gateway;
738 __be32 old_gw = ip_hdr(skb)->saddr;
739 struct net_device *dev = skb->dev;
740 struct in_device *in_dev;
741 struct fib_result res;
745 switch (icmp_hdr(skb)->code & 7) {
747 case ICMP_REDIR_NETTOS:
748 case ICMP_REDIR_HOST:
749 case ICMP_REDIR_HOSTTOS:
756 if (rt->rt_gateway != old_gw)
759 in_dev = __in_dev_get_rcu(dev);
764 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
765 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
766 ipv4_is_zeronet(new_gw))
767 goto reject_redirect;
769 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
770 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
771 goto reject_redirect;
772 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
773 goto reject_redirect;
775 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
776 goto reject_redirect;
779 n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
781 n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
783 if (!(n->nud_state & NUD_VALID)) {
784 neigh_event_send(n, NULL);
786 if (fib_lookup(net, fl4, &res, 0) == 0) {
787 struct fib_nh *nh = &FIB_RES_NH(res);
789 update_or_create_fnhe(nh, fl4->daddr, new_gw,
790 0, jiffies + ip_rt_gc_timeout);
793 rt->dst.obsolete = DST_OBSOLETE_KILL;
794 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
801 #ifdef CONFIG_IP_ROUTE_VERBOSE
802 if (IN_DEV_LOG_MARTIANS(in_dev)) {
803 const struct iphdr *iph = (const struct iphdr *) skb->data;
804 __be32 daddr = iph->daddr;
805 __be32 saddr = iph->saddr;
807 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
808 " Advised path = %pI4 -> %pI4\n",
809 &old_gw, dev->name, &new_gw,
816 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
820 const struct iphdr *iph = (const struct iphdr *) skb->data;
821 struct net *net = dev_net(skb->dev);
822 int oif = skb->dev->ifindex;
823 u8 tos = RT_TOS(iph->tos);
824 u8 prot = iph->protocol;
825 u32 mark = skb->mark;
827 rt = (struct rtable *) dst;
829 __build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
830 __ip_do_redirect(rt, skb, &fl4, true);
833 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
835 struct rtable *rt = (struct rtable *)dst;
836 struct dst_entry *ret = dst;
839 if (dst->obsolete > 0) {
842 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
853 * 1. The first ip_rt_redirect_number redirects are sent
854 * with exponential backoff, then we stop sending them at all,
855 * assuming that the host ignores our redirects.
856 * 2. If we did not see packets requiring redirects
857 * during ip_rt_redirect_silence, we assume that the host
858 * forgot redirected route and start to send redirects again.
860 * This algorithm is much cheaper and more intelligent than dumb load limiting
863 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
864 * and "frag. need" (breaks PMTU discovery) in icmp.c.
867 void ip_rt_send_redirect(struct sk_buff *skb)
869 struct rtable *rt = skb_rtable(skb);
870 struct in_device *in_dev;
871 struct inet_peer *peer;
877 in_dev = __in_dev_get_rcu(rt->dst.dev);
878 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
882 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
883 vif = l3mdev_master_ifindex_rcu(rt->dst.dev);
886 net = dev_net(rt->dst.dev);
887 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1);
889 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
890 rt_nexthop(rt, ip_hdr(skb)->daddr));
894 /* No redirected packets during ip_rt_redirect_silence;
895 * reset the algorithm.
897 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
898 peer->rate_tokens = 0;
900 /* Too many ignored redirects; do not send anything
901 * set dst.rate_last to the last seen redirected packet.
903 if (peer->rate_tokens >= ip_rt_redirect_number) {
904 peer->rate_last = jiffies;
908 /* Check for load limit; set rate_last to the latest sent
911 if (peer->rate_tokens == 0 ||
914 (ip_rt_redirect_load << peer->rate_tokens)))) {
915 __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
917 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
918 peer->rate_last = jiffies;
920 #ifdef CONFIG_IP_ROUTE_VERBOSE
922 peer->rate_tokens == ip_rt_redirect_number)
923 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
924 &ip_hdr(skb)->saddr, inet_iif(skb),
925 &ip_hdr(skb)->daddr, &gw);
932 static int ip_error(struct sk_buff *skb)
934 struct rtable *rt = skb_rtable(skb);
935 struct net_device *dev = skb->dev;
936 struct in_device *in_dev;
937 struct inet_peer *peer;
943 if (netif_is_l3_master(skb->dev)) {
944 dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
949 in_dev = __in_dev_get_rcu(dev);
951 /* IP on this device is disabled. */
955 net = dev_net(rt->dst.dev);
956 if (!IN_DEV_FORWARD(in_dev)) {
957 switch (rt->dst.error) {
959 __IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS);
963 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
969 switch (rt->dst.error) {
974 code = ICMP_HOST_UNREACH;
977 code = ICMP_NET_UNREACH;
978 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
981 code = ICMP_PKT_FILTERED;
985 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
986 l3mdev_master_ifindex(skb->dev), 1);
991 peer->rate_tokens += now - peer->rate_last;
992 if (peer->rate_tokens > ip_rt_error_burst)
993 peer->rate_tokens = ip_rt_error_burst;
994 peer->rate_last = now;
995 if (peer->rate_tokens >= ip_rt_error_cost)
996 peer->rate_tokens -= ip_rt_error_cost;
1002 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1004 out: kfree_skb(skb);
1008 static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
1010 struct dst_entry *dst = &rt->dst;
1011 struct fib_result res;
1013 if (dst_metric_locked(dst, RTAX_MTU))
1016 if (ipv4_mtu(dst) < mtu)
1019 if (mtu < ip_rt_min_pmtu)
1020 mtu = ip_rt_min_pmtu;
1022 if (rt->rt_pmtu == mtu &&
1023 time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
1027 if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
1028 struct fib_nh *nh = &FIB_RES_NH(res);
1030 update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
1031 jiffies + ip_rt_mtu_expires);
1036 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1037 struct sk_buff *skb, u32 mtu)
1039 struct rtable *rt = (struct rtable *) dst;
1042 ip_rt_build_flow_key(&fl4, sk, skb);
1043 __ip_rt_update_pmtu(rt, &fl4, mtu);
1046 void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
1047 int oif, u32 mark, u8 protocol, int flow_flags)
1049 const struct iphdr *iph = (const struct iphdr *) skb->data;
1054 mark = IP4_REPLY_MARK(net, skb->mark);
1056 __build_flow_key(net, &fl4, NULL, iph, oif,
1057 RT_TOS(iph->tos), protocol, mark, flow_flags);
1058 rt = __ip_route_output_key(net, &fl4);
1060 __ip_rt_update_pmtu(rt, &fl4, mtu);
1064 EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
1066 static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1068 const struct iphdr *iph = (const struct iphdr *) skb->data;
1072 __build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0);
1074 if (!fl4.flowi4_mark)
1075 fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
1077 rt = __ip_route_output_key(sock_net(sk), &fl4);
1079 __ip_rt_update_pmtu(rt, &fl4, mtu);
1084 void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1086 const struct iphdr *iph = (const struct iphdr *) skb->data;
1089 struct dst_entry *odst = NULL;
1091 struct net *net = sock_net(sk);
1095 if (!ip_sk_accept_pmtu(sk))
1098 odst = sk_dst_get(sk);
1100 if (sock_owned_by_user(sk) || !odst) {
1101 __ipv4_sk_update_pmtu(skb, sk, mtu);
1105 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1107 rt = (struct rtable *)odst;
1108 if (odst->obsolete && !odst->ops->check(odst, 0)) {
1109 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1116 __ip_rt_update_pmtu((struct rtable *) xfrm_dst_path(&rt->dst), &fl4, mtu);
1118 if (!dst_check(&rt->dst, 0)) {
1120 dst_release(&rt->dst);
1122 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1130 sk_dst_set(sk, &rt->dst);
1136 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1138 void ipv4_redirect(struct sk_buff *skb, struct net *net,
1139 int oif, u32 mark, u8 protocol, int flow_flags)
1141 const struct iphdr *iph = (const struct iphdr *) skb->data;
1145 __build_flow_key(net, &fl4, NULL, iph, oif,
1146 RT_TOS(iph->tos), protocol, mark, flow_flags);
1147 rt = __ip_route_output_key(net, &fl4);
1149 __ip_do_redirect(rt, skb, &fl4, false);
1153 EXPORT_SYMBOL_GPL(ipv4_redirect);
1155 void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
1157 const struct iphdr *iph = (const struct iphdr *) skb->data;
1160 struct net *net = sock_net(sk);
1162 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1163 rt = __ip_route_output_key(net, &fl4);
1165 __ip_do_redirect(rt, skb, &fl4, false);
1169 EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
1171 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1173 struct rtable *rt = (struct rtable *) dst;
1175 /* All IPV4 dsts are created with ->obsolete set to the value
1176 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1177 * into this function always.
1179 * When a PMTU/redirect information update invalidates a route,
1180 * this is indicated by setting obsolete to DST_OBSOLETE_KILL or
1181 * DST_OBSOLETE_DEAD by dst_free().
1183 if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
1188 static void ipv4_link_failure(struct sk_buff *skb)
1192 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1194 rt = skb_rtable(skb);
1196 dst_set_expires(&rt->dst, 0);
1199 static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb)
1201 pr_debug("%s: %pI4 -> %pI4, %s\n",
1202 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1203 skb->dev ? skb->dev->name : "?");
1210 We do not cache source address of outgoing interface,
1211 because it is used only by IP RR, TS and SRR options,
1212 so that it out of fast path.
1214 BTW remember: "addr" is allowed to be not aligned
1218 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1222 if (rt_is_output_route(rt))
1223 src = ip_hdr(skb)->saddr;
1225 struct fib_result res;
1231 memset(&fl4, 0, sizeof(fl4));
1232 fl4.daddr = iph->daddr;
1233 fl4.saddr = iph->saddr;
1234 fl4.flowi4_tos = RT_TOS(iph->tos);
1235 fl4.flowi4_oif = rt->dst.dev->ifindex;
1236 fl4.flowi4_iif = skb->dev->ifindex;
1237 fl4.flowi4_mark = skb->mark;
1240 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
1241 src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
1243 src = inet_select_addr(rt->dst.dev,
1244 rt_nexthop(rt, iph->daddr),
1248 memcpy(addr, &src, 4);
1251 #ifdef CONFIG_IP_ROUTE_CLASSID
1252 static void set_class_tag(struct rtable *rt, u32 tag)
1254 if (!(rt->dst.tclassid & 0xFFFF))
1255 rt->dst.tclassid |= tag & 0xFFFF;
1256 if (!(rt->dst.tclassid & 0xFFFF0000))
1257 rt->dst.tclassid |= tag & 0xFFFF0000;
1261 static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1263 unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr);
1264 unsigned int advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
1267 return min(advmss, IPV4_MAX_PMTU - header_size);
1270 static unsigned int ipv4_mtu(const struct dst_entry *dst)
1272 const struct rtable *rt = (const struct rtable *) dst;
1273 unsigned int mtu = rt->rt_pmtu;
1275 if (!mtu || time_after_eq(jiffies, rt->dst.expires))
1276 mtu = dst_metric_raw(dst, RTAX_MTU);
1281 mtu = READ_ONCE(dst->dev->mtu);
1283 if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
1284 if (rt->rt_uses_gateway && mtu > 576)
1288 mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
1290 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
1293 static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
1295 struct fnhe_hash_bucket *hash = rcu_dereference(nh->nh_exceptions);
1296 struct fib_nh_exception *fnhe;
1302 hval = fnhe_hashfun(daddr);
1304 for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1305 fnhe = rcu_dereference(fnhe->fnhe_next)) {
1306 if (fnhe->fnhe_daddr == daddr)
1312 static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1313 __be32 daddr, const bool do_cache)
1317 spin_lock_bh(&fnhe_lock);
1319 if (daddr == fnhe->fnhe_daddr) {
1320 struct rtable __rcu **porig;
1321 struct rtable *orig;
1322 int genid = fnhe_genid(dev_net(rt->dst.dev));
1324 if (rt_is_input_route(rt))
1325 porig = &fnhe->fnhe_rth_input;
1327 porig = &fnhe->fnhe_rth_output;
1328 orig = rcu_dereference(*porig);
1330 if (fnhe->fnhe_genid != genid) {
1331 fnhe->fnhe_genid = genid;
1333 fnhe->fnhe_pmtu = 0;
1334 fnhe->fnhe_expires = 0;
1335 fnhe_flush_routes(fnhe);
1338 fill_route_from_fnhe(rt, fnhe);
1339 if (!rt->rt_gateway)
1340 rt->rt_gateway = daddr;
1344 rcu_assign_pointer(*porig, rt);
1346 dst_dev_put(&orig->dst);
1347 dst_release(&orig->dst);
1352 fnhe->fnhe_stamp = jiffies;
1354 spin_unlock_bh(&fnhe_lock);
1359 static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
1361 struct rtable *orig, *prev, **p;
1364 if (rt_is_input_route(rt)) {
1365 p = (struct rtable **)&nh->nh_rth_input;
1367 p = (struct rtable **)raw_cpu_ptr(nh->nh_pcpu_rth_output);
1371 /* hold dst before doing cmpxchg() to avoid race condition
1375 prev = cmpxchg(p, orig, rt);
1378 dst_dev_put(&orig->dst);
1379 dst_release(&orig->dst);
1382 dst_release(&rt->dst);
1389 struct uncached_list {
1391 struct list_head head;
1394 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
1396 void rt_add_uncached_list(struct rtable *rt)
1398 struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
1400 rt->rt_uncached_list = ul;
1402 spin_lock_bh(&ul->lock);
1403 list_add_tail(&rt->rt_uncached, &ul->head);
1404 spin_unlock_bh(&ul->lock);
1407 void rt_del_uncached_list(struct rtable *rt)
1409 if (!list_empty(&rt->rt_uncached)) {
1410 struct uncached_list *ul = rt->rt_uncached_list;
1412 spin_lock_bh(&ul->lock);
1413 list_del(&rt->rt_uncached);
1414 spin_unlock_bh(&ul->lock);
1418 static void ipv4_dst_destroy(struct dst_entry *dst)
1420 struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
1421 struct rtable *rt = (struct rtable *)dst;
1423 if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
1426 rt_del_uncached_list(rt);
1429 void rt_flush_dev(struct net_device *dev)
1431 struct net *net = dev_net(dev);
1435 for_each_possible_cpu(cpu) {
1436 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
1438 spin_lock_bh(&ul->lock);
1439 list_for_each_entry(rt, &ul->head, rt_uncached) {
1440 if (rt->dst.dev != dev)
1442 rt->dst.dev = net->loopback_dev;
1443 dev_hold(rt->dst.dev);
1446 spin_unlock_bh(&ul->lock);
1450 static bool rt_cache_valid(const struct rtable *rt)
1453 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1457 static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1458 const struct fib_result *res,
1459 struct fib_nh_exception *fnhe,
1460 struct fib_info *fi, u16 type, u32 itag,
1461 const bool do_cache)
1463 bool cached = false;
1466 struct fib_nh *nh = &FIB_RES_NH(*res);
1468 if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK) {
1469 rt->rt_gateway = nh->nh_gw;
1470 rt->rt_uses_gateway = 1;
1472 dst_init_metrics(&rt->dst, fi->fib_metrics->metrics, true);
1473 if (fi->fib_metrics != &dst_default_metrics) {
1474 rt->dst._metrics |= DST_METRICS_REFCOUNTED;
1475 refcount_inc(&fi->fib_metrics->refcnt);
1477 #ifdef CONFIG_IP_ROUTE_CLASSID
1478 rt->dst.tclassid = nh->nh_tclassid;
1480 rt->dst.lwtstate = lwtstate_get(nh->nh_lwtstate);
1482 cached = rt_bind_exception(rt, fnhe, daddr, do_cache);
1484 cached = rt_cache_route(nh, rt);
1485 if (unlikely(!cached)) {
1486 /* Routes we intend to cache in nexthop exception or
1487 * FIB nexthop have the DST_NOCACHE bit clear.
1488 * However, if we are unsuccessful at storing this
1489 * route into the cache we really need to set it.
1491 if (!rt->rt_gateway)
1492 rt->rt_gateway = daddr;
1493 rt_add_uncached_list(rt);
1496 rt_add_uncached_list(rt);
1498 #ifdef CONFIG_IP_ROUTE_CLASSID
1499 #ifdef CONFIG_IP_MULTIPLE_TABLES
1500 set_class_tag(rt, res->tclassid);
1502 set_class_tag(rt, itag);
1506 struct rtable *rt_dst_alloc(struct net_device *dev,
1507 unsigned int flags, u16 type,
1508 bool nopolicy, bool noxfrm, bool will_cache)
1512 rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1513 (will_cache ? 0 : DST_HOST) |
1514 (nopolicy ? DST_NOPOLICY : 0) |
1515 (noxfrm ? DST_NOXFRM : 0));
1518 rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1519 rt->rt_flags = flags;
1521 rt->rt_is_input = 0;
1525 rt->rt_uses_gateway = 0;
1526 rt->rt_table_id = 0;
1527 INIT_LIST_HEAD(&rt->rt_uncached);
1529 rt->dst.output = ip_output;
1530 if (flags & RTCF_LOCAL)
1531 rt->dst.input = ip_local_deliver;
1536 EXPORT_SYMBOL(rt_dst_alloc);
1538 /* called in rcu_read_lock() section */
1539 int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1540 u8 tos, struct net_device *dev,
1541 struct in_device *in_dev, u32 *itag)
1545 /* Primary sanity checks. */
1549 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1550 skb->protocol != htons(ETH_P_IP))
1553 if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
1556 if (ipv4_is_zeronet(saddr)) {
1557 if (!ipv4_is_local_multicast(daddr))
1560 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1568 /* called in rcu_read_lock() section */
1569 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1570 u8 tos, struct net_device *dev, int our)
1572 struct in_device *in_dev = __in_dev_get_rcu(dev);
1573 unsigned int flags = RTCF_MULTICAST;
1578 err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag);
1583 flags |= RTCF_LOCAL;
1585 rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
1586 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
1590 #ifdef CONFIG_IP_ROUTE_CLASSID
1591 rth->dst.tclassid = itag;
1593 rth->dst.output = ip_rt_bug;
1594 rth->rt_is_input= 1;
1596 #ifdef CONFIG_IP_MROUTE
1597 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1598 rth->dst.input = ip_mr_input;
1600 RT_CACHE_STAT_INC(in_slow_mc);
1602 skb_dst_set(skb, &rth->dst);
1607 static void ip_handle_martian_source(struct net_device *dev,
1608 struct in_device *in_dev,
1609 struct sk_buff *skb,
1613 RT_CACHE_STAT_INC(in_martian_src);
1614 #ifdef CONFIG_IP_ROUTE_VERBOSE
1615 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1617 * RFC1812 recommendation, if source is martian,
1618 * the only hint is MAC header.
1620 pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1621 &daddr, &saddr, dev->name);
1622 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1623 print_hex_dump(KERN_WARNING, "ll header: ",
1624 DUMP_PREFIX_OFFSET, 16, 1,
1625 skb_mac_header(skb),
1626 dev->hard_header_len, true);
1632 static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
1634 struct fnhe_hash_bucket *hash;
1635 struct fib_nh_exception *fnhe, __rcu **fnhe_p;
1636 u32 hval = fnhe_hashfun(daddr);
1638 spin_lock_bh(&fnhe_lock);
1640 hash = rcu_dereference_protected(nh->nh_exceptions,
1641 lockdep_is_held(&fnhe_lock));
1644 fnhe_p = &hash->chain;
1645 fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
1647 if (fnhe->fnhe_daddr == daddr) {
1648 rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
1649 fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
1650 fnhe_flush_routes(fnhe);
1651 kfree_rcu(fnhe, rcu);
1654 fnhe_p = &fnhe->fnhe_next;
1655 fnhe = rcu_dereference_protected(fnhe->fnhe_next,
1656 lockdep_is_held(&fnhe_lock));
1659 spin_unlock_bh(&fnhe_lock);
1662 static void set_lwt_redirect(struct rtable *rth)
1664 if (lwtunnel_output_redirect(rth->dst.lwtstate)) {
1665 rth->dst.lwtstate->orig_output = rth->dst.output;
1666 rth->dst.output = lwtunnel_output;
1669 if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
1670 rth->dst.lwtstate->orig_input = rth->dst.input;
1671 rth->dst.input = lwtunnel_input;
1675 /* called in rcu_read_lock() section */
1676 static int __mkroute_input(struct sk_buff *skb,
1677 const struct fib_result *res,
1678 struct in_device *in_dev,
1679 __be32 daddr, __be32 saddr, u32 tos)
1681 struct fib_nh_exception *fnhe;
1684 struct in_device *out_dev;
1688 /* get a working reference to the output device */
1689 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
1691 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1695 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1696 in_dev->dev, in_dev, &itag);
1698 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1704 do_cache = res->fi && !itag;
1705 if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
1706 skb->protocol == htons(ETH_P_IP) &&
1707 (IN_DEV_SHARED_MEDIA(out_dev) ||
1708 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1709 IPCB(skb)->flags |= IPSKB_DOREDIRECT;
1711 if (skb->protocol != htons(ETH_P_IP)) {
1712 /* Not IP (i.e. ARP). Do not create route, if it is
1713 * invalid for proxy arp. DNAT routes are always valid.
1715 * Proxy arp feature have been extended to allow, ARP
1716 * replies back to the same interface, to support
1717 * Private VLAN switch technologies. See arp.c.
1719 if (out_dev == in_dev &&
1720 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1726 fnhe = find_exception(&FIB_RES_NH(*res), daddr);
1729 rth = rcu_dereference(fnhe->fnhe_rth_input);
1730 if (rth && rth->dst.expires &&
1731 time_after(jiffies, rth->dst.expires)) {
1732 ip_del_fnhe(&FIB_RES_NH(*res), daddr);
1739 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
1742 if (rt_cache_valid(rth)) {
1743 skb_dst_set_noref(skb, &rth->dst);
1748 rth = rt_dst_alloc(out_dev->dev, 0, res->type,
1749 IN_DEV_CONF_GET(in_dev, NOPOLICY),
1750 IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
1756 rth->rt_is_input = 1;
1758 rth->rt_table_id = res->table->tb_id;
1759 RT_CACHE_STAT_INC(in_slow_tot);
1761 rth->dst.input = ip_forward;
1763 rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag,
1765 set_lwt_redirect(rth);
1766 skb_dst_set(skb, &rth->dst);
1773 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1774 /* To make ICMP packets follow the right flow, the multipath hash is
1775 * calculated from the inner IP addresses.
1777 static void ip_multipath_l3_keys(const struct sk_buff *skb,
1778 struct flow_keys *hash_keys)
1780 const struct iphdr *outer_iph = ip_hdr(skb);
1781 const struct iphdr *inner_iph;
1782 const struct icmphdr *icmph;
1783 struct iphdr _inner_iph;
1784 struct icmphdr _icmph;
1786 hash_keys->addrs.v4addrs.src = outer_iph->saddr;
1787 hash_keys->addrs.v4addrs.dst = outer_iph->daddr;
1788 if (likely(outer_iph->protocol != IPPROTO_ICMP))
1791 if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0))
1794 icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph),
1799 if (icmph->type != ICMP_DEST_UNREACH &&
1800 icmph->type != ICMP_REDIRECT &&
1801 icmph->type != ICMP_TIME_EXCEEDED &&
1802 icmph->type != ICMP_PARAMETERPROB)
1805 inner_iph = skb_header_pointer(skb,
1806 outer_iph->ihl * 4 + sizeof(_icmph),
1807 sizeof(_inner_iph), &_inner_iph);
1810 hash_keys->addrs.v4addrs.src = inner_iph->saddr;
1811 hash_keys->addrs.v4addrs.dst = inner_iph->daddr;
1814 /* if skb is set it will be used and fl4 can be NULL */
1815 int fib_multipath_hash(const struct fib_info *fi, const struct flowi4 *fl4,
1816 const struct sk_buff *skb)
1818 struct net *net = fi->fib_net;
1819 struct flow_keys hash_keys;
1822 switch (net->ipv4.sysctl_fib_multipath_hash_policy) {
1824 memset(&hash_keys, 0, sizeof(hash_keys));
1825 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1827 ip_multipath_l3_keys(skb, &hash_keys);
1829 hash_keys.addrs.v4addrs.src = fl4->saddr;
1830 hash_keys.addrs.v4addrs.dst = fl4->daddr;
1834 /* skb is currently provided only when forwarding */
1836 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
1837 struct flow_keys keys;
1839 /* short-circuit if we already have L4 hash present */
1841 return skb_get_hash_raw(skb) >> 1;
1842 memset(&hash_keys, 0, sizeof(hash_keys));
1843 skb_flow_dissect_flow_keys(skb, &keys, flag);
1845 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1846 hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
1847 hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
1848 hash_keys.ports.src = keys.ports.src;
1849 hash_keys.ports.dst = keys.ports.dst;
1850 hash_keys.basic.ip_proto = keys.basic.ip_proto;
1852 memset(&hash_keys, 0, sizeof(hash_keys));
1853 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1854 hash_keys.addrs.v4addrs.src = fl4->saddr;
1855 hash_keys.addrs.v4addrs.dst = fl4->daddr;
1856 hash_keys.ports.src = fl4->fl4_sport;
1857 hash_keys.ports.dst = fl4->fl4_dport;
1858 hash_keys.basic.ip_proto = fl4->flowi4_proto;
1862 mhash = flow_hash_from_keys(&hash_keys);
1866 EXPORT_SYMBOL_GPL(fib_multipath_hash);
1867 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
1869 static int ip_mkroute_input(struct sk_buff *skb,
1870 struct fib_result *res,
1871 struct in_device *in_dev,
1872 __be32 daddr, __be32 saddr, u32 tos)
1874 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1875 if (res->fi && res->fi->fib_nhs > 1) {
1876 int h = fib_multipath_hash(res->fi, NULL, skb);
1878 fib_select_multipath(res, h);
1882 /* create a routing cache entry */
1883 return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
1887 * NOTE. We drop all the packets that has local source
1888 * addresses, because every properly looped back packet
1889 * must have correct destination already attached by output routine.
1891 * Such approach solves two big problems:
1892 * 1. Not simplex devices are handled properly.
1893 * 2. IP spoofing attempts are filtered with 100% of guarantee.
1894 * called with rcu_read_lock()
1897 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1898 u8 tos, struct net_device *dev,
1899 struct fib_result *res)
1901 struct in_device *in_dev = __in_dev_get_rcu(dev);
1902 struct ip_tunnel_info *tun_info;
1904 unsigned int flags = 0;
1908 struct net *net = dev_net(dev);
1911 /* IP on this device is disabled. */
1916 /* Check for the most weird martians, which can be not detected
1920 tun_info = skb_tunnel_info(skb);
1921 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
1922 fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
1924 fl4.flowi4_tun_key.tun_id = 0;
1927 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
1928 goto martian_source;
1932 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
1935 /* Accept zero addresses only to limited broadcast;
1936 * I even do not know to fix it or not. Waiting for complains :-)
1938 if (ipv4_is_zeronet(saddr))
1939 goto martian_source;
1941 if (ipv4_is_zeronet(daddr))
1942 goto martian_destination;
1944 /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
1945 * and call it once if daddr or/and saddr are loopback addresses
1947 if (ipv4_is_loopback(daddr)) {
1948 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
1949 goto martian_destination;
1950 } else if (ipv4_is_loopback(saddr)) {
1951 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
1952 goto martian_source;
1956 * Now we are ready to route packet.
1959 fl4.flowi4_iif = dev->ifindex;
1960 fl4.flowi4_mark = skb->mark;
1961 fl4.flowi4_tos = tos;
1962 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
1963 fl4.flowi4_flags = 0;
1966 fl4.flowi4_uid = sock_net_uid(net, NULL);
1967 err = fib_lookup(net, &fl4, res, 0);
1969 if (!IN_DEV_FORWARD(in_dev))
1970 err = -EHOSTUNREACH;
1974 if (res->type == RTN_BROADCAST)
1977 if (res->type == RTN_LOCAL) {
1978 err = fib_validate_source(skb, saddr, daddr, tos,
1979 0, dev, in_dev, &itag);
1981 goto martian_source;
1985 if (!IN_DEV_FORWARD(in_dev)) {
1986 err = -EHOSTUNREACH;
1989 if (res->type != RTN_UNICAST)
1990 goto martian_destination;
1992 err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos);
1996 if (skb->protocol != htons(ETH_P_IP))
1999 if (!ipv4_is_zeronet(saddr)) {
2000 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
2003 goto martian_source;
2005 flags |= RTCF_BROADCAST;
2006 res->type = RTN_BROADCAST;
2007 RT_CACHE_STAT_INC(in_brd);
2013 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
2014 if (rt_cache_valid(rth)) {
2015 skb_dst_set_noref(skb, &rth->dst);
2023 rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
2024 flags | RTCF_LOCAL, res->type,
2025 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
2029 rth->dst.output= ip_rt_bug;
2030 #ifdef CONFIG_IP_ROUTE_CLASSID
2031 rth->dst.tclassid = itag;
2033 rth->rt_is_input = 1;
2035 rth->rt_table_id = res->table->tb_id;
2037 RT_CACHE_STAT_INC(in_slow_tot);
2038 if (res->type == RTN_UNREACHABLE) {
2039 rth->dst.input= ip_error;
2040 rth->dst.error= -err;
2041 rth->rt_flags &= ~RTCF_LOCAL;
2045 struct fib_nh *nh = &FIB_RES_NH(*res);
2047 rth->dst.lwtstate = lwtstate_get(nh->nh_lwtstate);
2048 if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
2049 WARN_ON(rth->dst.input == lwtunnel_input);
2050 rth->dst.lwtstate->orig_input = rth->dst.input;
2051 rth->dst.input = lwtunnel_input;
2054 if (unlikely(!rt_cache_route(nh, rth)))
2055 rt_add_uncached_list(rth);
2057 skb_dst_set(skb, &rth->dst);
2062 RT_CACHE_STAT_INC(in_no_route);
2063 res->type = RTN_UNREACHABLE;
2069 * Do not cache martian addresses: they should be logged (RFC1812)
2071 martian_destination:
2072 RT_CACHE_STAT_INC(in_martian_dst);
2073 #ifdef CONFIG_IP_ROUTE_VERBOSE
2074 if (IN_DEV_LOG_MARTIANS(in_dev))
2075 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
2076 &daddr, &saddr, dev->name);
2088 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2092 int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2093 u8 tos, struct net_device *dev)
2095 struct fib_result res;
2098 tos &= IPTOS_RT_MASK;
2100 err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res);
2105 EXPORT_SYMBOL(ip_route_input_noref);
2107 /* called with rcu_read_lock held */
2108 int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2109 u8 tos, struct net_device *dev, struct fib_result *res)
2111 /* Multicast recognition logic is moved from route cache to here.
2112 The problem was that too many Ethernet cards have broken/missing
2113 hardware multicast filters :-( As result the host on multicasting
2114 network acquires a lot of useless route cache entries, sort of
2115 SDR messages from all the world. Now we try to get rid of them.
2116 Really, provided software IP multicast filter is organized
2117 reasonably (at least, hashed), it does not result in a slowdown
2118 comparing with route cache reject entries.
2119 Note, that multicast routers are not affected, because
2120 route cache entry is created eventually.
2122 if (ipv4_is_multicast(daddr)) {
2123 struct in_device *in_dev = __in_dev_get_rcu(dev);
2128 our = ip_check_mc_rcu(in_dev, daddr, saddr,
2129 ip_hdr(skb)->protocol);
2131 /* check l3 master if no match yet */
2132 if ((!in_dev || !our) && netif_is_l3_slave(dev)) {
2133 struct in_device *l3_in_dev;
2135 l3_in_dev = __in_dev_get_rcu(skb->dev);
2137 our = ip_check_mc_rcu(l3_in_dev, daddr, saddr,
2138 ip_hdr(skb)->protocol);
2142 #ifdef CONFIG_IP_MROUTE
2144 (!ipv4_is_local_multicast(daddr) &&
2145 IN_DEV_MFORWARD(in_dev))
2148 err = ip_route_input_mc(skb, daddr, saddr,
2154 return ip_route_input_slow(skb, daddr, saddr, tos, dev, res);
2157 /* called with rcu_read_lock() */
2158 static struct rtable *__mkroute_output(const struct fib_result *res,
2159 const struct flowi4 *fl4, int orig_oif,
2160 struct net_device *dev_out,
2163 struct fib_info *fi = res->fi;
2164 struct fib_nh_exception *fnhe;
2165 struct in_device *in_dev;
2166 u16 type = res->type;
2170 in_dev = __in_dev_get_rcu(dev_out);
2172 return ERR_PTR(-EINVAL);
2174 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
2175 if (ipv4_is_loopback(fl4->saddr) &&
2176 !(dev_out->flags & IFF_LOOPBACK) &&
2177 !netif_is_l3_master(dev_out))
2178 return ERR_PTR(-EINVAL);
2180 if (ipv4_is_lbcast(fl4->daddr))
2181 type = RTN_BROADCAST;
2182 else if (ipv4_is_multicast(fl4->daddr))
2183 type = RTN_MULTICAST;
2184 else if (ipv4_is_zeronet(fl4->daddr))
2185 return ERR_PTR(-EINVAL);
2187 if (dev_out->flags & IFF_LOOPBACK)
2188 flags |= RTCF_LOCAL;
2191 if (type == RTN_BROADCAST) {
2192 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2194 } else if (type == RTN_MULTICAST) {
2195 flags |= RTCF_MULTICAST | RTCF_LOCAL;
2196 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2198 flags &= ~RTCF_LOCAL;
2201 /* If multicast route do not exist use
2202 * default one, but do not gateway in this case.
2205 if (fi && res->prefixlen < 4)
2207 } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
2208 (orig_oif != dev_out->ifindex)) {
2209 /* For local routes that require a particular output interface
2210 * we do not want to cache the result. Caching the result
2211 * causes incorrect behaviour when there are multiple source
2212 * addresses on the interface, the end result being that if the
2213 * intended recipient is waiting on that interface for the
2214 * packet he won't receive it because it will be delivered on
2215 * the loopback interface and the IP_PKTINFO ipi_ifindex will
2216 * be set to the loopback interface as well.
2222 do_cache &= fi != NULL;
2224 struct rtable __rcu **prth;
2225 struct fib_nh *nh = &FIB_RES_NH(*res);
2227 fnhe = find_exception(nh, fl4->daddr);
2229 prth = &fnhe->fnhe_rth_output;
2230 rth = rcu_dereference(*prth);
2231 if (rth && rth->dst.expires &&
2232 time_after(jiffies, rth->dst.expires)) {
2233 ip_del_fnhe(nh, fl4->daddr);
2240 if (unlikely(fl4->flowi4_flags &
2241 FLOWI_FLAG_KNOWN_NH &&
2243 nh->nh_scope == RT_SCOPE_LINK))) {
2247 prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
2248 rth = rcu_dereference(*prth);
2251 if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst))
2256 rth = rt_dst_alloc(dev_out, flags, type,
2257 IN_DEV_CONF_GET(in_dev, NOPOLICY),
2258 IN_DEV_CONF_GET(in_dev, NOXFRM),
2261 return ERR_PTR(-ENOBUFS);
2263 rth->rt_iif = orig_oif;
2265 rth->rt_table_id = res->table->tb_id;
2267 RT_CACHE_STAT_INC(out_slow_tot);
2269 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2270 if (flags & RTCF_LOCAL &&
2271 !(dev_out->flags & IFF_LOOPBACK)) {
2272 rth->dst.output = ip_mc_output;
2273 RT_CACHE_STAT_INC(out_slow_mc);
2275 #ifdef CONFIG_IP_MROUTE
2276 if (type == RTN_MULTICAST) {
2277 if (IN_DEV_MFORWARD(in_dev) &&
2278 !ipv4_is_local_multicast(fl4->daddr)) {
2279 rth->dst.input = ip_mr_input;
2280 rth->dst.output = ip_mc_output;
2286 rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0, do_cache);
2287 set_lwt_redirect(rth);
2293 * Major route resolver routine.
2296 struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
2297 const struct sk_buff *skb)
2299 __u8 tos = RT_FL_TOS(fl4);
2300 struct fib_result res;
2307 fl4->flowi4_iif = LOOPBACK_IFINDEX;
2308 fl4->flowi4_tos = tos & IPTOS_RT_MASK;
2309 fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
2310 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2313 rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb);
2318 EXPORT_SYMBOL_GPL(ip_route_output_key_hash);
2320 struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
2321 struct fib_result *res,
2322 const struct sk_buff *skb)
2324 struct net_device *dev_out = NULL;
2325 int orig_oif = fl4->flowi4_oif;
2326 unsigned int flags = 0;
2328 int err = -ENETUNREACH;
2331 rth = ERR_PTR(-EINVAL);
2332 if (ipv4_is_multicast(fl4->saddr) ||
2333 ipv4_is_lbcast(fl4->saddr) ||
2334 ipv4_is_zeronet(fl4->saddr))
2337 /* I removed check for oif == dev_out->oif here.
2338 It was wrong for two reasons:
2339 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2340 is assigned to multiple interfaces.
2341 2. Moreover, we are allowed to send packets with saddr
2342 of another iface. --ANK
2345 if (fl4->flowi4_oif == 0 &&
2346 (ipv4_is_multicast(fl4->daddr) ||
2347 ipv4_is_lbcast(fl4->daddr))) {
2348 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2349 dev_out = __ip_dev_find(net, fl4->saddr, false);
2353 /* Special hack: user can direct multicasts
2354 and limited broadcast via necessary interface
2355 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2356 This hack is not just for fun, it allows
2357 vic,vat and friends to work.
2358 They bind socket to loopback, set ttl to zero
2359 and expect that it will work.
2360 From the viewpoint of routing cache they are broken,
2361 because we are not allowed to build multicast path
2362 with loopback source addr (look, routing cache
2363 cannot know, that ttl is zero, so that packet
2364 will not leave this host and route is valid).
2365 Luckily, this hack is good workaround.
2368 fl4->flowi4_oif = dev_out->ifindex;
2372 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2373 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2374 if (!__ip_dev_find(net, fl4->saddr, false))
2380 if (fl4->flowi4_oif) {
2381 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2382 rth = ERR_PTR(-ENODEV);
2386 /* RACE: Check return value of inet_select_addr instead. */
2387 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2388 rth = ERR_PTR(-ENETUNREACH);
2391 if (ipv4_is_local_multicast(fl4->daddr) ||
2392 ipv4_is_lbcast(fl4->daddr) ||
2393 fl4->flowi4_proto == IPPROTO_IGMP) {
2395 fl4->saddr = inet_select_addr(dev_out, 0,
2400 if (ipv4_is_multicast(fl4->daddr))
2401 fl4->saddr = inet_select_addr(dev_out, 0,
2403 else if (!fl4->daddr)
2404 fl4->saddr = inet_select_addr(dev_out, 0,
2410 fl4->daddr = fl4->saddr;
2412 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2413 dev_out = net->loopback_dev;
2414 fl4->flowi4_oif = LOOPBACK_IFINDEX;
2415 res->type = RTN_LOCAL;
2416 flags |= RTCF_LOCAL;
2420 err = fib_lookup(net, fl4, res, 0);
2424 if (fl4->flowi4_oif &&
2425 (ipv4_is_multicast(fl4->daddr) ||
2426 !netif_index_is_l3_master(net, fl4->flowi4_oif))) {
2427 /* Apparently, routing tables are wrong. Assume,
2428 that the destination is on link.
2431 Because we are allowed to send to iface
2432 even if it has NO routes and NO assigned
2433 addresses. When oif is specified, routing
2434 tables are looked up with only one purpose:
2435 to catch if destination is gatewayed, rather than
2436 direct. Moreover, if MSG_DONTROUTE is set,
2437 we send packet, ignoring both routing tables
2438 and ifaddr state. --ANK
2441 We could make it even if oif is unknown,
2442 likely IPv6, but we do not.
2445 if (fl4->saddr == 0)
2446 fl4->saddr = inet_select_addr(dev_out, 0,
2448 res->type = RTN_UNICAST;
2455 if (res->type == RTN_LOCAL) {
2457 if (res->fi->fib_prefsrc)
2458 fl4->saddr = res->fi->fib_prefsrc;
2460 fl4->saddr = fl4->daddr;
2463 /* L3 master device is the loopback for that domain */
2464 dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) ? :
2467 /* make sure orig_oif points to fib result device even
2468 * though packet rx/tx happens over loopback or l3mdev
2470 orig_oif = FIB_RES_OIF(*res);
2472 fl4->flowi4_oif = dev_out->ifindex;
2473 flags |= RTCF_LOCAL;
2477 fib_select_path(net, res, fl4, skb);
2479 dev_out = FIB_RES_DEV(*res);
2480 fl4->flowi4_oif = dev_out->ifindex;
2484 rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags);
2490 static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2495 static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
2497 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2499 return mtu ? : dst->dev->mtu;
2502 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
2503 struct sk_buff *skb, u32 mtu)
2507 static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
2508 struct sk_buff *skb)
2512 static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
2518 static struct dst_ops ipv4_dst_blackhole_ops = {
2520 .check = ipv4_blackhole_dst_check,
2521 .mtu = ipv4_blackhole_mtu,
2522 .default_advmss = ipv4_default_advmss,
2523 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
2524 .redirect = ipv4_rt_blackhole_redirect,
2525 .cow_metrics = ipv4_rt_blackhole_cow_metrics,
2526 .neigh_lookup = ipv4_neigh_lookup,
2529 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2531 struct rtable *ort = (struct rtable *) dst_orig;
2534 rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_DEAD, 0);
2536 struct dst_entry *new = &rt->dst;
2539 new->input = dst_discard;
2540 new->output = dst_discard_out;
2542 new->dev = net->loopback_dev;
2546 rt->rt_is_input = ort->rt_is_input;
2547 rt->rt_iif = ort->rt_iif;
2548 rt->rt_pmtu = ort->rt_pmtu;
2550 rt->rt_genid = rt_genid_ipv4(net);
2551 rt->rt_flags = ort->rt_flags;
2552 rt->rt_type = ort->rt_type;
2553 rt->rt_gateway = ort->rt_gateway;
2554 rt->rt_uses_gateway = ort->rt_uses_gateway;
2556 INIT_LIST_HEAD(&rt->rt_uncached);
2559 dst_release(dst_orig);
2561 return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2564 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2565 const struct sock *sk)
2567 struct rtable *rt = __ip_route_output_key(net, flp4);
2572 if (flp4->flowi4_proto)
2573 rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
2574 flowi4_to_flowi(flp4),
2579 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2581 /* called with rcu_read_lock held */
2582 static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id,
2583 struct flowi4 *fl4, struct sk_buff *skb, u32 portid,
2586 struct rtable *rt = skb_rtable(skb);
2588 struct nlmsghdr *nlh;
2589 unsigned long expires = 0;
2591 u32 metrics[RTAX_MAX];
2593 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), 0);
2597 r = nlmsg_data(nlh);
2598 r->rtm_family = AF_INET;
2599 r->rtm_dst_len = 32;
2601 r->rtm_tos = fl4->flowi4_tos;
2602 r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT;
2603 if (nla_put_u32(skb, RTA_TABLE, table_id))
2604 goto nla_put_failure;
2605 r->rtm_type = rt->rt_type;
2606 r->rtm_scope = RT_SCOPE_UNIVERSE;
2607 r->rtm_protocol = RTPROT_UNSPEC;
2608 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2609 if (rt->rt_flags & RTCF_NOTIFY)
2610 r->rtm_flags |= RTM_F_NOTIFY;
2611 if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
2612 r->rtm_flags |= RTCF_DOREDIRECT;
2614 if (nla_put_in_addr(skb, RTA_DST, dst))
2615 goto nla_put_failure;
2617 r->rtm_src_len = 32;
2618 if (nla_put_in_addr(skb, RTA_SRC, src))
2619 goto nla_put_failure;
2622 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2623 goto nla_put_failure;
2624 #ifdef CONFIG_IP_ROUTE_CLASSID
2625 if (rt->dst.tclassid &&
2626 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2627 goto nla_put_failure;
2629 if (!rt_is_input_route(rt) &&
2630 fl4->saddr != src) {
2631 if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
2632 goto nla_put_failure;
2634 if (rt->rt_uses_gateway &&
2635 nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gateway))
2636 goto nla_put_failure;
2638 expires = rt->dst.expires;
2640 unsigned long now = jiffies;
2642 if (time_before(now, expires))
2648 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2649 if (rt->rt_pmtu && expires)
2650 metrics[RTAX_MTU - 1] = rt->rt_pmtu;
2651 if (rtnetlink_put_metrics(skb, metrics) < 0)
2652 goto nla_put_failure;
2654 if (fl4->flowi4_mark &&
2655 nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
2656 goto nla_put_failure;
2658 if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
2659 nla_put_u32(skb, RTA_UID,
2660 from_kuid_munged(current_user_ns(), fl4->flowi4_uid)))
2661 goto nla_put_failure;
2663 error = rt->dst.error;
2665 if (rt_is_input_route(rt)) {
2666 #ifdef CONFIG_IP_MROUTE
2667 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
2668 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2669 int err = ipmr_get_route(net, skb,
2670 fl4->saddr, fl4->daddr,
2676 goto nla_put_failure;
2680 if (nla_put_u32(skb, RTA_IIF, skb->dev->ifindex))
2681 goto nla_put_failure;
2684 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
2685 goto nla_put_failure;
2687 nlmsg_end(skb, nlh);
2691 nlmsg_cancel(skb, nlh);
2695 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2696 struct netlink_ext_ack *extack)
2698 struct net *net = sock_net(in_skb->sk);
2700 struct nlattr *tb[RTA_MAX+1];
2701 struct fib_result res = {};
2702 struct rtable *rt = NULL;
2709 struct sk_buff *skb;
2710 u32 table_id = RT_TABLE_MAIN;
2713 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy,
2718 rtm = nlmsg_data(nlh);
2720 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2726 /* Reserve room for dummy headers, this skb can pass
2727 through good chunk of routing engine.
2729 skb_reset_mac_header(skb);
2730 skb_reset_network_header(skb);
2732 src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
2733 dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
2734 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
2735 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
2737 uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
2739 uid = (iif ? INVALID_UID : current_uid());
2741 /* Bugfix: need to give ip_route_input enough of an IP header to
2744 ip_hdr(skb)->protocol = IPPROTO_UDP;
2745 ip_hdr(skb)->saddr = src;
2746 ip_hdr(skb)->daddr = dst;
2748 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2750 memset(&fl4, 0, sizeof(fl4));
2753 fl4.flowi4_tos = rtm->rtm_tos;
2754 fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
2755 fl4.flowi4_mark = mark;
2756 fl4.flowi4_uid = uid;
2761 struct net_device *dev;
2763 dev = dev_get_by_index_rcu(net, iif);
2769 skb->protocol = htons(ETH_P_IP);
2772 err = ip_route_input_rcu(skb, dst, src, rtm->rtm_tos,
2775 rt = skb_rtable(skb);
2776 if (err == 0 && rt->dst.error)
2777 err = -rt->dst.error;
2779 fl4.flowi4_iif = LOOPBACK_IFINDEX;
2780 rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
2785 skb_dst_set(skb, &rt->dst);
2791 if (rtm->rtm_flags & RTM_F_NOTIFY)
2792 rt->rt_flags |= RTCF_NOTIFY;
2794 if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE)
2795 table_id = rt->rt_table_id;
2797 if (rtm->rtm_flags & RTM_F_FIB_MATCH) {
2799 err = fib_props[res.type].error;
2801 err = -EHOSTUNREACH;
2804 err = fib_dump_info(skb, NETLINK_CB(in_skb).portid,
2805 nlh->nlmsg_seq, RTM_NEWROUTE, table_id,
2806 rt->rt_type, res.prefix, res.prefixlen,
2807 fl4.flowi4_tos, res.fi, 0);
2809 err = rt_fill_info(net, dst, src, table_id, &fl4, skb,
2810 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq);
2817 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2827 void ip_rt_multicast_event(struct in_device *in_dev)
2829 rt_cache_flush(dev_net(in_dev->dev));
2832 #ifdef CONFIG_SYSCTL
2833 static int ip_rt_gc_interval __read_mostly = 60 * HZ;
2834 static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
2835 static int ip_rt_gc_elasticity __read_mostly = 8;
2836 static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU;
2838 static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
2839 void __user *buffer,
2840 size_t *lenp, loff_t *ppos)
2842 struct net *net = (struct net *)__ctl->extra1;
2845 rt_cache_flush(net);
2846 fnhe_genid_bump(net);
2853 static struct ctl_table ipv4_route_table[] = {
2855 .procname = "gc_thresh",
2856 .data = &ipv4_dst_ops.gc_thresh,
2857 .maxlen = sizeof(int),
2859 .proc_handler = proc_dointvec,
2862 .procname = "max_size",
2863 .data = &ip_rt_max_size,
2864 .maxlen = sizeof(int),
2866 .proc_handler = proc_dointvec,
2869 /* Deprecated. Use gc_min_interval_ms */
2871 .procname = "gc_min_interval",
2872 .data = &ip_rt_gc_min_interval,
2873 .maxlen = sizeof(int),
2875 .proc_handler = proc_dointvec_jiffies,
2878 .procname = "gc_min_interval_ms",
2879 .data = &ip_rt_gc_min_interval,
2880 .maxlen = sizeof(int),
2882 .proc_handler = proc_dointvec_ms_jiffies,
2885 .procname = "gc_timeout",
2886 .data = &ip_rt_gc_timeout,
2887 .maxlen = sizeof(int),
2889 .proc_handler = proc_dointvec_jiffies,
2892 .procname = "gc_interval",
2893 .data = &ip_rt_gc_interval,
2894 .maxlen = sizeof(int),
2896 .proc_handler = proc_dointvec_jiffies,
2899 .procname = "redirect_load",
2900 .data = &ip_rt_redirect_load,
2901 .maxlen = sizeof(int),
2903 .proc_handler = proc_dointvec,
2906 .procname = "redirect_number",
2907 .data = &ip_rt_redirect_number,
2908 .maxlen = sizeof(int),
2910 .proc_handler = proc_dointvec,
2913 .procname = "redirect_silence",
2914 .data = &ip_rt_redirect_silence,
2915 .maxlen = sizeof(int),
2917 .proc_handler = proc_dointvec,
2920 .procname = "error_cost",
2921 .data = &ip_rt_error_cost,
2922 .maxlen = sizeof(int),
2924 .proc_handler = proc_dointvec,
2927 .procname = "error_burst",
2928 .data = &ip_rt_error_burst,
2929 .maxlen = sizeof(int),
2931 .proc_handler = proc_dointvec,
2934 .procname = "gc_elasticity",
2935 .data = &ip_rt_gc_elasticity,
2936 .maxlen = sizeof(int),
2938 .proc_handler = proc_dointvec,
2941 .procname = "mtu_expires",
2942 .data = &ip_rt_mtu_expires,
2943 .maxlen = sizeof(int),
2945 .proc_handler = proc_dointvec_jiffies,
2948 .procname = "min_pmtu",
2949 .data = &ip_rt_min_pmtu,
2950 .maxlen = sizeof(int),
2952 .proc_handler = proc_dointvec_minmax,
2953 .extra1 = &ip_min_valid_pmtu,
2956 .procname = "min_adv_mss",
2957 .data = &ip_rt_min_advmss,
2958 .maxlen = sizeof(int),
2960 .proc_handler = proc_dointvec,
2965 static struct ctl_table ipv4_route_flush_table[] = {
2967 .procname = "flush",
2968 .maxlen = sizeof(int),
2970 .proc_handler = ipv4_sysctl_rtcache_flush,
2975 static __net_init int sysctl_route_net_init(struct net *net)
2977 struct ctl_table *tbl;
2979 tbl = ipv4_route_flush_table;
2980 if (!net_eq(net, &init_net)) {
2981 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
2985 /* Don't export sysctls to unprivileged users */
2986 if (net->user_ns != &init_user_ns)
2987 tbl[0].procname = NULL;
2989 tbl[0].extra1 = net;
2991 net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
2992 if (!net->ipv4.route_hdr)
2997 if (tbl != ipv4_route_flush_table)
3003 static __net_exit void sysctl_route_net_exit(struct net *net)
3005 struct ctl_table *tbl;
3007 tbl = net->ipv4.route_hdr->ctl_table_arg;
3008 unregister_net_sysctl_table(net->ipv4.route_hdr);
3009 BUG_ON(tbl == ipv4_route_flush_table);
3013 static __net_initdata struct pernet_operations sysctl_route_ops = {
3014 .init = sysctl_route_net_init,
3015 .exit = sysctl_route_net_exit,
3019 static __net_init int rt_genid_init(struct net *net)
3021 atomic_set(&net->ipv4.rt_genid, 0);
3022 atomic_set(&net->fnhe_genid, 0);
3023 atomic_set(&net->ipv4.dev_addr_genid, get_random_int());
3027 static __net_initdata struct pernet_operations rt_genid_ops = {
3028 .init = rt_genid_init,
3031 static int __net_init ipv4_inetpeer_init(struct net *net)
3033 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3037 inet_peer_base_init(bp);
3038 net->ipv4.peers = bp;
3042 static void __net_exit ipv4_inetpeer_exit(struct net *net)
3044 struct inet_peer_base *bp = net->ipv4.peers;
3046 net->ipv4.peers = NULL;
3047 inetpeer_invalidate_tree(bp);
3051 static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
3052 .init = ipv4_inetpeer_init,
3053 .exit = ipv4_inetpeer_exit,
3056 #ifdef CONFIG_IP_ROUTE_CLASSID
3057 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3058 #endif /* CONFIG_IP_ROUTE_CLASSID */
3060 int __init ip_rt_init(void)
3064 ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
3066 panic("IP: failed to allocate ip_idents\n");
3068 prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
3070 ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL);
3072 panic("IP: failed to allocate ip_tstamps\n");
3074 for_each_possible_cpu(cpu) {
3075 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
3077 INIT_LIST_HEAD(&ul->head);
3078 spin_lock_init(&ul->lock);
3080 #ifdef CONFIG_IP_ROUTE_CLASSID
3081 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
3083 panic("IP: failed to allocate ip_rt_acct\n");
3086 ipv4_dst_ops.kmem_cachep =
3087 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3088 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3090 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3092 if (dst_entries_init(&ipv4_dst_ops) < 0)
3093 panic("IP: failed to allocate ipv4_dst_ops counter\n");
3095 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3096 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3098 ipv4_dst_ops.gc_thresh = ~0;
3099 ip_rt_max_size = INT_MAX;
3104 if (ip_rt_proc_init())
3105 pr_err("Unable to create route proc files\n");
3110 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL,
3111 RTNL_FLAG_DOIT_UNLOCKED);
3113 #ifdef CONFIG_SYSCTL
3114 register_pernet_subsys(&sysctl_route_ops);
3116 register_pernet_subsys(&rt_genid_ops);
3117 register_pernet_subsys(&ipv4_inetpeer_ops);
3121 #ifdef CONFIG_SYSCTL
3123 * We really need to sanitize the damn ipv4 init order, then all
3124 * this nonsense will go away.
3126 void __init ip_static_sysctl_init(void)
3128 register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);