2 * ip_vs_xmit.c: various packet transmitters for IPVS
4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
5 * Julian Anastasov <ja@ssi.bg>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
14 * Description of forwarding methods:
15 * - all transmitters are called from LOCAL_IN (remote clients) and
16 * LOCAL_OUT (local clients) but for ICMP can be called from FORWARD
17 * - not all connections have destination server, for example,
18 * connections in backup server when fwmark is used
19 * - bypass connections use daddr from packet
20 * - we can use dst without ref while sending in RCU section, we use
21 * ref when returning NF_ACCEPT for NAT-ed packet via loopback
23 * - skb->dev is NULL, skb->protocol is not set (both are set in POST_ROUTING)
24 * - skb->pkt_type is not set yet
25 * - the only place where we can see skb->sk != NULL
28 #define KMSG_COMPONENT "IPVS"
29 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/tcp.h> /* for tcphdr */
36 #include <net/tcp.h> /* for csum_tcpudp_magic */
38 #include <net/icmp.h> /* for icmp_send */
39 #include <net/route.h> /* for ip_route_output */
41 #include <net/ip6_route.h>
42 #include <net/ip_tunnels.h>
43 #include <net/addrconf.h>
44 #include <linux/icmpv6.h>
45 #include <linux/netfilter.h>
46 #include <linux/netfilter_ipv4.h>
48 #include <net/ip_vs.h>
51 IP_VS_RT_MODE_LOCAL = 1, /* Allow local dest */
52 IP_VS_RT_MODE_NON_LOCAL = 2, /* Allow non-local dest */
53 IP_VS_RT_MODE_RDR = 4, /* Allow redirect from remote daddr to
56 IP_VS_RT_MODE_CONNECT = 8, /* Always bind route to saddr */
57 IP_VS_RT_MODE_KNOWN_NH = 16,/* Route via remote addr */
58 IP_VS_RT_MODE_TUNNEL = 32,/* Tunnel mode */
61 static inline struct ip_vs_dest_dst *ip_vs_dest_dst_alloc(void)
63 return kmalloc(sizeof(struct ip_vs_dest_dst), GFP_ATOMIC);
66 static inline void ip_vs_dest_dst_free(struct ip_vs_dest_dst *dest_dst)
72 * Destination cache to speed up outgoing route lookup
75 __ip_vs_dst_set(struct ip_vs_dest *dest, struct ip_vs_dest_dst *dest_dst,
76 struct dst_entry *dst, u32 dst_cookie)
78 struct ip_vs_dest_dst *old;
80 old = rcu_dereference_protected(dest->dest_dst,
81 lockdep_is_held(&dest->dst_lock));
84 dest_dst->dst_cache = dst;
85 dest_dst->dst_cookie = dst_cookie;
87 rcu_assign_pointer(dest->dest_dst, dest_dst);
90 call_rcu(&old->rcu_head, ip_vs_dest_dst_rcu_free);
93 static inline struct ip_vs_dest_dst *
94 __ip_vs_dst_check(struct ip_vs_dest *dest)
96 struct ip_vs_dest_dst *dest_dst = rcu_dereference(dest->dest_dst);
97 struct dst_entry *dst;
101 dst = dest_dst->dst_cache;
103 dst->ops->check(dst, dest_dst->dst_cookie) == NULL)
109 __mtu_check_toobig_v6(const struct sk_buff *skb, u32 mtu)
111 if (IP6CB(skb)->frag_max_size) {
112 /* frag_max_size tell us that, this packet have been
113 * defragmented by netfilter IPv6 conntrack module.
115 if (IP6CB(skb)->frag_max_size > mtu)
116 return true; /* largest fragment violate MTU */
118 else if (skb->len > mtu && !skb_is_gso(skb)) {
119 return true; /* Packet size violate MTU size */
124 /* Get route to daddr, update *saddr, optionally bind route to saddr */
125 static struct rtable *do_output_route4(struct net *net, __be32 daddr,
126 int rt_mode, __be32 *saddr)
132 memset(&fl4, 0, sizeof(fl4));
134 fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ?
135 FLOWI_FLAG_KNOWN_NH : 0;
138 rt = ip_route_output_key(net, &fl4);
140 /* Invalid saddr ? */
141 if (PTR_ERR(rt) == -EINVAL && *saddr &&
142 rt_mode & IP_VS_RT_MODE_CONNECT && !loop) {
144 flowi4_update_output(&fl4, 0, 0, daddr, 0);
147 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n", &daddr);
149 } else if (!*saddr && rt_mode & IP_VS_RT_MODE_CONNECT && fl4.saddr) {
152 flowi4_update_output(&fl4, 0, 0, daddr, fl4.saddr);
160 #ifdef CONFIG_IP_VS_IPV6
161 static inline int __ip_vs_is_local_route6(struct rt6_info *rt)
163 return rt->dst.dev && rt->dst.dev->flags & IFF_LOOPBACK;
167 static inline bool crosses_local_route_boundary(int skb_af, struct sk_buff *skb,
169 bool new_rt_is_local)
171 bool rt_mode_allow_local = !!(rt_mode & IP_VS_RT_MODE_LOCAL);
172 bool rt_mode_allow_non_local = !!(rt_mode & IP_VS_RT_MODE_NON_LOCAL);
173 bool rt_mode_allow_redirect = !!(rt_mode & IP_VS_RT_MODE_RDR);
174 bool source_is_loopback;
175 bool old_rt_is_local;
177 #ifdef CONFIG_IP_VS_IPV6
178 if (skb_af == AF_INET6) {
179 int addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr);
182 (!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
183 (addr_type & IPV6_ADDR_LOOPBACK);
184 old_rt_is_local = __ip_vs_is_local_route6(
185 (struct rt6_info *)skb_dst(skb));
189 source_is_loopback = ipv4_is_loopback(ip_hdr(skb)->saddr);
190 old_rt_is_local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
193 if (unlikely(new_rt_is_local)) {
194 if (!rt_mode_allow_local)
196 if (!rt_mode_allow_redirect && !old_rt_is_local)
199 if (!rt_mode_allow_non_local)
201 if (source_is_loopback)
207 static inline void maybe_update_pmtu(int skb_af, struct sk_buff *skb, int mtu)
209 struct sock *sk = skb->sk;
210 struct rtable *ort = skb_rtable(skb);
212 if (!skb->dev && sk && sk_fullsock(sk))
213 ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu);
216 static inline bool ensure_mtu_is_adequate(struct netns_ipvs *ipvs, int skb_af,
218 struct ip_vs_iphdr *ipvsh,
219 struct sk_buff *skb, int mtu)
221 #ifdef CONFIG_IP_VS_IPV6
222 if (skb_af == AF_INET6) {
223 struct net *net = ipvs->net;
225 if (unlikely(__mtu_check_toobig_v6(skb, mtu))) {
227 skb->dev = net->loopback_dev;
228 /* only send ICMP too big on first fragment */
229 if (!ipvsh->fragoffs && !ip_vs_iph_icmp(ipvsh))
230 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
231 IP_VS_DBG(1, "frag needed for %pI6c\n",
232 &ipv6_hdr(skb)->saddr);
238 /* If we're going to tunnel the packet and pmtu discovery
239 * is disabled, we'll just fragment it anyway
241 if ((rt_mode & IP_VS_RT_MODE_TUNNEL) && !sysctl_pmtu_disc(ipvs))
244 if (unlikely(ip_hdr(skb)->frag_off & htons(IP_DF) &&
245 skb->len > mtu && !skb_is_gso(skb) &&
246 !ip_vs_iph_icmp(ipvsh))) {
247 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
249 IP_VS_DBG(1, "frag needed for %pI4\n",
250 &ip_hdr(skb)->saddr);
258 static inline bool decrement_ttl(struct netns_ipvs *ipvs,
262 struct net *net = ipvs->net;
264 #ifdef CONFIG_IP_VS_IPV6
265 if (skb_af == AF_INET6) {
266 struct dst_entry *dst = skb_dst(skb);
268 /* check and decrement ttl */
269 if (ipv6_hdr(skb)->hop_limit <= 1) {
270 struct inet6_dev *idev = __in6_dev_get_safely(skb->dev);
272 /* Force OUTPUT device used as source address */
274 icmpv6_send(skb, ICMPV6_TIME_EXCEED,
275 ICMPV6_EXC_HOPLIMIT, 0);
276 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
281 /* don't propagate ttl change to cloned packets */
282 if (!skb_make_writable(skb, sizeof(struct ipv6hdr)))
285 ipv6_hdr(skb)->hop_limit--;
289 if (ip_hdr(skb)->ttl <= 1) {
290 /* Tell the sender its packet died... */
291 __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
292 icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0);
296 /* don't propagate ttl change to cloned packets */
297 if (!skb_make_writable(skb, sizeof(struct iphdr)))
301 ip_decrease_ttl(ip_hdr(skb));
307 /* Get route to destination or remote server */
309 __ip_vs_get_out_rt(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
310 struct ip_vs_dest *dest,
311 __be32 daddr, int rt_mode, __be32 *ret_saddr,
312 struct ip_vs_iphdr *ipvsh)
314 struct net *net = ipvs->net;
315 struct ip_vs_dest_dst *dest_dst;
316 struct rtable *rt; /* Route to the other host */
318 int local, noref = 1;
321 dest_dst = __ip_vs_dst_check(dest);
322 if (likely(dest_dst))
323 rt = (struct rtable *) dest_dst->dst_cache;
325 dest_dst = ip_vs_dest_dst_alloc();
326 spin_lock_bh(&dest->dst_lock);
328 __ip_vs_dst_set(dest, NULL, NULL, 0);
329 spin_unlock_bh(&dest->dst_lock);
332 rt = do_output_route4(net, dest->addr.ip, rt_mode,
333 &dest_dst->dst_saddr.ip);
335 __ip_vs_dst_set(dest, NULL, NULL, 0);
336 spin_unlock_bh(&dest->dst_lock);
337 ip_vs_dest_dst_free(dest_dst);
340 __ip_vs_dst_set(dest, dest_dst, &rt->dst, 0);
341 spin_unlock_bh(&dest->dst_lock);
342 IP_VS_DBG(10, "new dst %pI4, src %pI4, refcnt=%d\n",
343 &dest->addr.ip, &dest_dst->dst_saddr.ip,
344 atomic_read(&rt->dst.__refcnt));
347 *ret_saddr = dest_dst->dst_saddr.ip;
349 __be32 saddr = htonl(INADDR_ANY);
353 /* For such unconfigured boxes avoid many route lookups
354 * for performance reasons because we do not remember saddr
356 rt_mode &= ~IP_VS_RT_MODE_CONNECT;
357 rt = do_output_route4(net, daddr, rt_mode, &saddr);
364 local = (rt->rt_flags & RTCF_LOCAL) ? 1 : 0;
365 if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode,
367 IP_VS_DBG_RL("We are crossing local and non-local addresses"
368 " daddr=%pI4\n", &daddr);
372 if (unlikely(local)) {
373 /* skb to local stack, preserve old route */
379 if (!decrement_ttl(ipvs, skb_af, skb))
382 if (likely(!(rt_mode & IP_VS_RT_MODE_TUNNEL))) {
383 mtu = dst_mtu(&rt->dst);
385 mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
388 if (dest->tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE)
389 mtu -= sizeof(struct udphdr) + sizeof(struct guehdr);
391 IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__);
394 maybe_update_pmtu(skb_af, skb, mtu);
397 if (!ensure_mtu_is_adequate(ipvs, skb_af, rt_mode, ipvsh, skb, mtu))
403 skb_dst_set_noref(skb, &rt->dst);
405 skb_dst_set(skb, dst_clone(&rt->dst));
407 skb_dst_set(skb, &rt->dst);
417 dst_link_failure(skb);
421 #ifdef CONFIG_IP_VS_IPV6
422 static struct dst_entry *
423 __ip_vs_route_output_v6(struct net *net, struct in6_addr *daddr,
424 struct in6_addr *ret_saddr, int do_xfrm, int rt_mode)
426 struct dst_entry *dst;
427 struct flowi6 fl6 = {
431 if (rt_mode & IP_VS_RT_MODE_KNOWN_NH)
432 fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
434 dst = ip6_route_output(net, NULL, &fl6);
439 if (ipv6_addr_any(&fl6.saddr) &&
440 ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
441 &fl6.daddr, 0, &fl6.saddr) < 0)
444 dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
450 *ret_saddr = fl6.saddr;
455 IP_VS_DBG_RL("ip6_route_output error, dest: %pI6\n", daddr);
460 * Get route to destination or remote server
463 __ip_vs_get_out_rt_v6(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
464 struct ip_vs_dest *dest,
465 struct in6_addr *daddr, struct in6_addr *ret_saddr,
466 struct ip_vs_iphdr *ipvsh, int do_xfrm, int rt_mode)
468 struct net *net = ipvs->net;
469 struct ip_vs_dest_dst *dest_dst;
470 struct rt6_info *rt; /* Route to the other host */
471 struct dst_entry *dst;
473 int local, noref = 1;
476 dest_dst = __ip_vs_dst_check(dest);
477 if (likely(dest_dst))
478 rt = (struct rt6_info *) dest_dst->dst_cache;
482 dest_dst = ip_vs_dest_dst_alloc();
483 spin_lock_bh(&dest->dst_lock);
485 __ip_vs_dst_set(dest, NULL, NULL, 0);
486 spin_unlock_bh(&dest->dst_lock);
489 dst = __ip_vs_route_output_v6(net, &dest->addr.in6,
490 &dest_dst->dst_saddr.in6,
493 __ip_vs_dst_set(dest, NULL, NULL, 0);
494 spin_unlock_bh(&dest->dst_lock);
495 ip_vs_dest_dst_free(dest_dst);
498 rt = (struct rt6_info *) dst;
499 cookie = rt6_get_cookie(rt);
500 __ip_vs_dst_set(dest, dest_dst, &rt->dst, cookie);
501 spin_unlock_bh(&dest->dst_lock);
502 IP_VS_DBG(10, "new dst %pI6, src %pI6, refcnt=%d\n",
503 &dest->addr.in6, &dest_dst->dst_saddr.in6,
504 atomic_read(&rt->dst.__refcnt));
507 *ret_saddr = dest_dst->dst_saddr.in6;
510 dst = __ip_vs_route_output_v6(net, daddr, ret_saddr, do_xfrm,
514 rt = (struct rt6_info *) dst;
517 local = __ip_vs_is_local_route6(rt);
519 if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode,
521 IP_VS_DBG_RL("We are crossing local and non-local addresses"
522 " daddr=%pI6\n", daddr);
526 if (unlikely(local)) {
527 /* skb to local stack, preserve old route */
529 dst_release(&rt->dst);
533 if (!decrement_ttl(ipvs, skb_af, skb))
537 if (likely(!(rt_mode & IP_VS_RT_MODE_TUNNEL)))
538 mtu = dst_mtu(&rt->dst);
540 mtu = dst_mtu(&rt->dst) - sizeof(struct ipv6hdr);
543 if (dest->tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE)
544 mtu -= sizeof(struct udphdr) + sizeof(struct guehdr);
545 if (mtu < IPV6_MIN_MTU) {
546 IP_VS_DBG_RL("%s(): mtu less than %d\n", __func__,
550 maybe_update_pmtu(skb_af, skb, mtu);
553 if (!ensure_mtu_is_adequate(ipvs, skb_af, rt_mode, ipvsh, skb, mtu))
559 skb_dst_set_noref(skb, &rt->dst);
561 skb_dst_set(skb, dst_clone(&rt->dst));
563 skb_dst_set(skb, &rt->dst);
569 dst_release(&rt->dst);
573 /* The ip6_link_failure function requires the dev field to be set
574 * in order to get the net (further for the sake of fwmark
578 skb->dev = skb_dst(skb)->dev;
580 dst_link_failure(skb);
586 /* return NF_ACCEPT to allow forwarding or other NF_xxx on error */
587 static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb,
588 struct ip_vs_conn *cp)
592 skb->ipvs_property = 1;
593 if (unlikely(cp->flags & IP_VS_CONN_F_NFCT))
594 ret = ip_vs_confirm_conntrack(skb);
595 if (ret == NF_ACCEPT) {
597 skb_forward_csum(skb);
602 /* In the event of a remote destination, it's possible that we would have
603 * matches against an old socket (particularly a TIME-WAIT socket). This
604 * causes havoc down the line (ip_local_out et. al. expect regular sockets
605 * and invalid memory accesses will happen) so simply drop the association
608 static inline void ip_vs_drop_early_demux_sk(struct sk_buff *skb)
610 /* If dev is set, the packet came from the LOCAL_IN callback and
611 * not from a local TCP socket.
617 /* return NF_STOLEN (sent) or NF_ACCEPT if local=1 (not sent) */
618 static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
619 struct ip_vs_conn *cp, int local)
623 skb->ipvs_property = 1;
624 if (likely(!(cp->flags & IP_VS_CONN_F_NFCT)))
627 ip_vs_update_conntrack(skb, cp, 1);
629 /* Remove the early_demux association unless it's bound for the
630 * exact same port and address on this host after translation.
632 if (!local || cp->vport != cp->dport ||
633 !ip_vs_addr_equal(cp->af, &cp->vaddr, &cp->daddr))
634 ip_vs_drop_early_demux_sk(skb);
637 skb_forward_csum(skb);
638 NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb,
639 NULL, skb_dst(skb)->dev, dst_output);
646 /* return NF_STOLEN (sent) or NF_ACCEPT if local=1 (not sent) */
647 static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb,
648 struct ip_vs_conn *cp, int local)
652 skb->ipvs_property = 1;
653 if (likely(!(cp->flags & IP_VS_CONN_F_NFCT)))
656 ip_vs_drop_early_demux_sk(skb);
657 skb_forward_csum(skb);
658 NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb,
659 NULL, skb_dst(skb)->dev, dst_output);
667 * NULL transmitter (do nothing except return NF_ACCEPT)
670 ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
671 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
673 /* we do not touch skb and do not need pskb ptr */
674 return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1);
680 * Let packets bypass the destination when the destination is not
681 * available, it may be only used in transparent cache cluster.
684 ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
685 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
687 struct iphdr *iph = ip_hdr(skb);
691 if (__ip_vs_get_out_rt(cp->ipvs, cp->af, skb, NULL, iph->daddr,
692 IP_VS_RT_MODE_NON_LOCAL, NULL, ipvsh) < 0)
697 /* Another hack: avoid icmp_send in ip_fragment */
700 ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0);
711 #ifdef CONFIG_IP_VS_IPV6
713 ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
714 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
716 struct ipv6hdr *iph = ipv6_hdr(skb);
720 if (__ip_vs_get_out_rt_v6(cp->ipvs, cp->af, skb, NULL,
722 ipvsh, 0, IP_VS_RT_MODE_NON_LOCAL) < 0)
725 /* Another hack: avoid icmp_send in ip_fragment */
728 ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0);
741 * NAT transmitter (only for outside-to-inside nat forwarding)
742 * Not used for related ICMP
745 ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
746 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
748 struct rtable *rt; /* Route to the other host */
749 int local, rc, was_input;
753 /* check if it is a connection of no-client-port */
754 if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) {
757 p = skb_header_pointer(skb, ipvsh->len, sizeof(_pt), &_pt);
760 ip_vs_conn_fill_cport(cp, *p);
761 IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
764 was_input = rt_is_input_route(skb_rtable(skb));
765 local = __ip_vs_get_out_rt(cp->ipvs, cp->af, skb, cp->dest, cp->daddr.ip,
766 IP_VS_RT_MODE_LOCAL |
767 IP_VS_RT_MODE_NON_LOCAL |
768 IP_VS_RT_MODE_RDR, NULL, ipvsh);
771 rt = skb_rtable(skb);
773 * Avoid duplicate tuple in reply direction for NAT traffic
774 * to local address when connection is sync-ed
776 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
777 if (cp->flags & IP_VS_CONN_F_SYNC && local) {
778 enum ip_conntrack_info ctinfo;
779 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
782 IP_VS_DBG_RL_PKT(10, AF_INET, pp, skb, ipvsh->off,
784 "stopping DNAT to local address");
790 /* From world but DNAT to loopback address? */
791 if (local && ipv4_is_loopback(cp->daddr.ip) && was_input) {
792 IP_VS_DBG_RL_PKT(1, AF_INET, pp, skb, ipvsh->off,
793 "ip_vs_nat_xmit(): stopping DNAT to loopback "
798 /* copy-on-write the packet before mangling it */
799 if (!skb_make_writable(skb, sizeof(struct iphdr)))
802 if (skb_cow(skb, rt->dst.dev->hard_header_len))
805 /* mangle the packet */
806 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp, ipvsh))
808 ip_hdr(skb)->daddr = cp->daddr.ip;
809 ip_send_check(ip_hdr(skb));
811 IP_VS_DBG_PKT(10, AF_INET, pp, skb, ipvsh->off, "After DNAT");
813 /* FIXME: when application helper enlarges the packet and the length
814 is larger than the MTU of outgoing device, there will be still
817 /* Another hack: avoid icmp_send in ip_fragment */
820 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local);
831 #ifdef CONFIG_IP_VS_IPV6
833 ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
834 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
836 struct rt6_info *rt; /* Route to the other host */
841 /* check if it is a connection of no-client-port */
842 if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT && !ipvsh->fragoffs)) {
844 p = skb_header_pointer(skb, ipvsh->len, sizeof(_pt), &_pt);
847 ip_vs_conn_fill_cport(cp, *p);
848 IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
851 local = __ip_vs_get_out_rt_v6(cp->ipvs, cp->af, skb, cp->dest,
854 IP_VS_RT_MODE_LOCAL |
855 IP_VS_RT_MODE_NON_LOCAL |
859 rt = (struct rt6_info *) skb_dst(skb);
861 * Avoid duplicate tuple in reply direction for NAT traffic
862 * to local address when connection is sync-ed
864 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
865 if (cp->flags & IP_VS_CONN_F_SYNC && local) {
866 enum ip_conntrack_info ctinfo;
867 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
870 IP_VS_DBG_RL_PKT(10, AF_INET6, pp, skb, ipvsh->off,
871 "ip_vs_nat_xmit_v6(): "
872 "stopping DNAT to local address");
878 /* From world but DNAT to loopback address? */
879 if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) &&
880 ipv6_addr_type(&cp->daddr.in6) & IPV6_ADDR_LOOPBACK) {
881 IP_VS_DBG_RL_PKT(1, AF_INET6, pp, skb, ipvsh->off,
882 "ip_vs_nat_xmit_v6(): "
883 "stopping DNAT to loopback address");
887 /* copy-on-write the packet before mangling it */
888 if (!skb_make_writable(skb, sizeof(struct ipv6hdr)))
891 if (skb_cow(skb, rt->dst.dev->hard_header_len))
894 /* mangle the packet */
895 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp, ipvsh))
897 ipv6_hdr(skb)->daddr = cp->daddr.in6;
899 IP_VS_DBG_PKT(10, AF_INET6, pp, skb, ipvsh->off, "After DNAT");
901 /* FIXME: when application helper enlarges the packet and the length
902 is larger than the MTU of outgoing device, there will be still
905 /* Another hack: avoid icmp_send in ip_fragment */
908 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local);
920 /* When forwarding a packet, we must ensure that we've got enough headroom
921 * for the encapsulation packet in the skb. This also gives us an
922 * opportunity to figure out what the payload_len, dsfield, ttl, and df
923 * values should be, so that we won't need to look at the old ip header
926 static struct sk_buff *
927 ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
928 unsigned int max_headroom, __u8 *next_protocol,
929 __u32 *payload_len, __u8 *dsfield, __u8 *ttl,
932 struct sk_buff *new_skb = NULL;
933 struct iphdr *old_iph = NULL;
935 #ifdef CONFIG_IP_VS_IPV6
936 struct ipv6hdr *old_ipv6h = NULL;
939 ip_vs_drop_early_demux_sk(skb);
941 if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) {
942 new_skb = skb_realloc_headroom(skb, max_headroom);
946 skb_set_owner_w(new_skb, skb->sk);
951 #ifdef CONFIG_IP_VS_IPV6
952 if (skb_af == AF_INET6) {
953 old_ipv6h = ipv6_hdr(skb);
954 *next_protocol = IPPROTO_IPV6;
957 ntohs(old_ipv6h->payload_len) +
959 old_dsfield = ipv6_get_dsfield(old_ipv6h);
960 *ttl = old_ipv6h->hop_limit;
966 old_iph = ip_hdr(skb);
967 /* Copy DF, reset fragment offset and MF */
969 *df = (old_iph->frag_off & htons(IP_DF));
970 *next_protocol = IPPROTO_IPIP;
972 /* fix old IP header checksum */
973 ip_send_check(old_iph);
974 old_dsfield = ipv4_get_dsfield(old_iph);
977 *payload_len = ntohs(old_iph->tot_len);
980 /* Implement full-functionality option for ECN encapsulation */
981 *dsfield = INET_ECN_encapsulate(old_dsfield, old_dsfield);
986 return ERR_PTR(-ENOMEM);
989 static inline int __tun_gso_type_mask(int encaps_af, int orig_af)
993 return SKB_GSO_IPXIP4;
995 return SKB_GSO_IPXIP6;
1002 ipvs_gue_encap(struct net *net, struct sk_buff *skb,
1003 struct ip_vs_conn *cp, __u8 *next_protocol)
1006 __be16 sport = udp_flow_src_port(net, skb, 0, 0, false);
1007 struct udphdr *udph; /* Our new UDP header */
1008 struct guehdr *gueh; /* Our new GUE header */
1010 skb_push(skb, sizeof(struct guehdr));
1012 gueh = (struct guehdr *)skb->data;
1018 gueh->proto_ctype = *next_protocol;
1020 skb_push(skb, sizeof(struct udphdr));
1021 skb_reset_transport_header(skb);
1023 udph = udp_hdr(skb);
1025 dport = cp->dest->tun_port;
1027 udph->source = sport;
1028 udph->len = htons(skb->len);
1031 *next_protocol = IPPROTO_UDP;
1037 * IP Tunneling transmitter
1039 * This function encapsulates the packet in a new IP packet, its
1040 * destination will be set to cp->daddr. Most code of this function
1041 * is taken from ipip.c.
1043 * It is used in VS/TUN cluster. The load balancer selects a real
1044 * server from a cluster based on a scheduling algorithm,
1045 * encapsulates the request packet and forwards it to the selected
1046 * server. For example, all real servers are configured with
1047 * "ifconfig tunl0 <Virtual IP Address> up". When the server receives
1048 * the encapsulated packet, it will decapsulate the packet, processe
1049 * the request and return the response packets directly to the client
1050 * without passing the load balancer. This can greatly increase the
1051 * scalability of virtual server.
1053 * Used for ANY protocol
1056 ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1057 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
1059 struct netns_ipvs *ipvs = cp->ipvs;
1060 struct net *net = ipvs->net;
1061 struct rtable *rt; /* Route to the other host */
1062 __be32 saddr; /* Source for tunnel */
1063 struct net_device *tdev; /* Device to other host */
1064 __u8 next_protocol = 0;
1069 struct iphdr *iph; /* Our new IP header */
1070 unsigned int max_headroom; /* The extra header space needed */
1072 int tun_type, gso_type;
1076 local = __ip_vs_get_out_rt(ipvs, cp->af, skb, cp->dest, cp->daddr.ip,
1077 IP_VS_RT_MODE_LOCAL |
1078 IP_VS_RT_MODE_NON_LOCAL |
1079 IP_VS_RT_MODE_CONNECT |
1080 IP_VS_RT_MODE_TUNNEL, &saddr, ipvsh);
1084 return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1);
1086 rt = skb_rtable(skb);
1090 * Okay, now see if we can stuff it in the buffer as-is.
1092 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr);
1094 tun_type = cp->dest->tun_type;
1096 if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE)
1097 max_headroom += sizeof(struct udphdr) + sizeof(struct guehdr);
1099 /* We only care about the df field if sysctl_pmtu_disc(ipvs) is set */
1100 dfp = sysctl_pmtu_disc(ipvs) ? &df : NULL;
1101 skb = ip_vs_prepare_tunneled_skb(skb, cp->af, max_headroom,
1102 &next_protocol, NULL, &dsfield,
1107 gso_type = __tun_gso_type_mask(AF_INET, cp->af);
1108 if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE)
1109 gso_type |= SKB_GSO_UDP_TUNNEL;
1111 if (iptunnel_handle_offloads(skb, gso_type))
1114 skb->transport_header = skb->network_header;
1116 skb_set_inner_ipproto(skb, next_protocol);
1118 if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE)
1119 ipvs_gue_encap(net, skb, cp, &next_protocol);
1121 skb_push(skb, sizeof(struct iphdr));
1122 skb_reset_network_header(skb);
1123 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1126 * Push down and install the IPIP header.
1130 iph->ihl = sizeof(struct iphdr)>>2;
1132 iph->protocol = next_protocol;
1134 iph->daddr = cp->daddr.ip;
1137 ip_select_ident(net, skb, NULL);
1139 /* Another hack: avoid icmp_send in ip_fragment */
1142 ret = ip_vs_tunnel_xmit_prepare(skb, cp);
1143 if (ret == NF_ACCEPT)
1144 ip_local_out(net, skb->sk, skb);
1145 else if (ret == NF_DROP)
1159 #ifdef CONFIG_IP_VS_IPV6
1161 ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1162 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
1164 struct netns_ipvs *ipvs = cp->ipvs;
1165 struct net *net = ipvs->net;
1166 struct rt6_info *rt; /* Route to the other host */
1167 struct in6_addr saddr; /* Source for tunnel */
1168 struct net_device *tdev; /* Device to other host */
1169 __u8 next_protocol = 0;
1170 __u32 payload_len = 0;
1173 struct ipv6hdr *iph; /* Our new IP header */
1174 unsigned int max_headroom; /* The extra header space needed */
1176 int tun_type, gso_type;
1180 local = __ip_vs_get_out_rt_v6(ipvs, cp->af, skb, cp->dest,
1183 IP_VS_RT_MODE_LOCAL |
1184 IP_VS_RT_MODE_NON_LOCAL |
1185 IP_VS_RT_MODE_TUNNEL);
1189 return ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 1);
1191 rt = (struct rt6_info *) skb_dst(skb);
1195 * Okay, now see if we can stuff it in the buffer as-is.
1197 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr);
1199 tun_type = cp->dest->tun_type;
1201 if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE)
1202 max_headroom += sizeof(struct udphdr) + sizeof(struct guehdr);
1204 skb = ip_vs_prepare_tunneled_skb(skb, cp->af, max_headroom,
1205 &next_protocol, &payload_len,
1206 &dsfield, &ttl, NULL);
1210 gso_type = __tun_gso_type_mask(AF_INET6, cp->af);
1211 if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE)
1212 gso_type |= SKB_GSO_UDP_TUNNEL;
1214 if (iptunnel_handle_offloads(skb, gso_type))
1217 skb->transport_header = skb->network_header;
1219 skb_set_inner_ipproto(skb, next_protocol);
1221 if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE)
1222 ipvs_gue_encap(net, skb, cp, &next_protocol);
1224 skb_push(skb, sizeof(struct ipv6hdr));
1225 skb_reset_network_header(skb);
1226 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1229 * Push down and install the IPIP header.
1231 iph = ipv6_hdr(skb);
1233 iph->nexthdr = next_protocol;
1234 iph->payload_len = htons(payload_len);
1235 memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
1236 ipv6_change_dsfield(iph, 0, dsfield);
1237 iph->daddr = cp->daddr.in6;
1239 iph->hop_limit = ttl;
1241 /* Another hack: avoid icmp_send in ip_fragment */
1244 ret = ip_vs_tunnel_xmit_prepare(skb, cp);
1245 if (ret == NF_ACCEPT)
1246 ip6_local_out(net, skb->sk, skb);
1247 else if (ret == NF_DROP)
1264 * Direct Routing transmitter
1265 * Used for ANY protocol
1268 ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1269 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
1275 local = __ip_vs_get_out_rt(cp->ipvs, cp->af, skb, cp->dest, cp->daddr.ip,
1276 IP_VS_RT_MODE_LOCAL |
1277 IP_VS_RT_MODE_NON_LOCAL |
1278 IP_VS_RT_MODE_KNOWN_NH, NULL, ipvsh);
1282 return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1);
1284 ip_send_check(ip_hdr(skb));
1286 /* Another hack: avoid icmp_send in ip_fragment */
1289 ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0);
1300 #ifdef CONFIG_IP_VS_IPV6
1302 ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1303 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
1309 local = __ip_vs_get_out_rt_v6(cp->ipvs, cp->af, skb, cp->dest,
1312 IP_VS_RT_MODE_LOCAL |
1313 IP_VS_RT_MODE_NON_LOCAL |
1314 IP_VS_RT_MODE_KNOWN_NH);
1318 return ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 1);
1320 /* Another hack: avoid icmp_send in ip_fragment */
1323 ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0);
1337 * ICMP packet transmitter
1338 * called by the ip_vs_in_icmp
1341 ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1342 struct ip_vs_protocol *pp, int offset, unsigned int hooknum,
1343 struct ip_vs_iphdr *iph)
1345 struct rtable *rt; /* Route to the other host */
1348 int rt_mode, was_input;
1352 /* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
1353 forwarded directly here, because there is no need to
1354 translate address/port back */
1355 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
1356 if (cp->packet_xmit)
1357 rc = cp->packet_xmit(skb, cp, pp, iph);
1360 /* do not touch skb anymore */
1361 atomic_inc(&cp->in_pkts);
1366 * mangle and send the packet here (only for VS/NAT)
1368 was_input = rt_is_input_route(skb_rtable(skb));
1370 /* LOCALNODE from FORWARD hook is not supported */
1371 rt_mode = (hooknum != NF_INET_FORWARD) ?
1372 IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL |
1373 IP_VS_RT_MODE_RDR : IP_VS_RT_MODE_NON_LOCAL;
1374 local = __ip_vs_get_out_rt(cp->ipvs, cp->af, skb, cp->dest, cp->daddr.ip, rt_mode,
1378 rt = skb_rtable(skb);
1381 * Avoid duplicate tuple in reply direction for NAT traffic
1382 * to local address when connection is sync-ed
1384 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
1385 if (cp->flags & IP_VS_CONN_F_SYNC && local) {
1386 enum ip_conntrack_info ctinfo;
1387 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1390 IP_VS_DBG(10, "%s(): "
1391 "stopping DNAT to local address %pI4\n",
1392 __func__, &cp->daddr.ip);
1398 /* From world but DNAT to loopback address? */
1399 if (local && ipv4_is_loopback(cp->daddr.ip) && was_input) {
1400 IP_VS_DBG(1, "%s(): "
1401 "stopping DNAT to loopback %pI4\n",
1402 __func__, &cp->daddr.ip);
1406 /* copy-on-write the packet before mangling it */
1407 if (!skb_make_writable(skb, offset))
1410 if (skb_cow(skb, rt->dst.dev->hard_header_len))
1413 ip_vs_nat_icmp(skb, pp, cp, 0);
1415 /* Another hack: avoid icmp_send in ip_fragment */
1418 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local);
1429 #ifdef CONFIG_IP_VS_IPV6
1431 ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1432 struct ip_vs_protocol *pp, int offset, unsigned int hooknum,
1433 struct ip_vs_iphdr *ipvsh)
1435 struct rt6_info *rt; /* Route to the other host */
1442 /* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
1443 forwarded directly here, because there is no need to
1444 translate address/port back */
1445 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
1446 if (cp->packet_xmit)
1447 rc = cp->packet_xmit(skb, cp, pp, ipvsh);
1450 /* do not touch skb anymore */
1451 atomic_inc(&cp->in_pkts);
1456 * mangle and send the packet here (only for VS/NAT)
1459 /* LOCALNODE from FORWARD hook is not supported */
1460 rt_mode = (hooknum != NF_INET_FORWARD) ?
1461 IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL |
1462 IP_VS_RT_MODE_RDR : IP_VS_RT_MODE_NON_LOCAL;
1463 local = __ip_vs_get_out_rt_v6(cp->ipvs, cp->af, skb, cp->dest,
1464 &cp->daddr.in6, NULL, ipvsh, 0, rt_mode);
1467 rt = (struct rt6_info *) skb_dst(skb);
1469 * Avoid duplicate tuple in reply direction for NAT traffic
1470 * to local address when connection is sync-ed
1472 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
1473 if (cp->flags & IP_VS_CONN_F_SYNC && local) {
1474 enum ip_conntrack_info ctinfo;
1475 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1478 IP_VS_DBG(10, "%s(): "
1479 "stopping DNAT to local address %pI6\n",
1480 __func__, &cp->daddr.in6);
1486 /* From world but DNAT to loopback address? */
1487 if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) &&
1488 ipv6_addr_type(&cp->daddr.in6) & IPV6_ADDR_LOOPBACK) {
1489 IP_VS_DBG(1, "%s(): "
1490 "stopping DNAT to loopback %pI6\n",
1491 __func__, &cp->daddr.in6);
1495 /* copy-on-write the packet before mangling it */
1496 if (!skb_make_writable(skb, offset))
1499 if (skb_cow(skb, rt->dst.dev->hard_header_len))
1502 ip_vs_nat_icmp_v6(skb, pp, cp, 0);
1504 /* Another hack: avoid icmp_send in ip_fragment */
1507 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local);