1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/netfilter.h>
6 #include <linux/rhashtable.h>
8 #include <linux/ipv6.h>
9 #include <linux/netdevice.h>
12 #include <net/ip6_route.h>
13 #include <net/neighbour.h>
14 #include <net/netfilter/nf_flow_table.h>
15 /* For layer 4 checksum field offset. */
16 #include <linux/tcp.h>
17 #include <linux/udp.h>
19 static int nf_flow_state_check(struct flow_offload *flow, int proto,
20 struct sk_buff *skb, unsigned int thoff)
24 if (proto != IPPROTO_TCP)
27 if (!pskb_may_pull(skb, thoff + sizeof(*tcph)))
30 tcph = (void *)(skb_network_header(skb) + thoff);
31 if (unlikely(tcph->fin || tcph->rst)) {
32 flow_offload_teardown(flow);
39 static int nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff,
40 __be32 addr, __be32 new_addr)
44 if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
45 skb_try_make_writable(skb, thoff + sizeof(*tcph)))
48 tcph = (void *)(skb_network_header(skb) + thoff);
49 inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true);
54 static int nf_flow_nat_ip_udp(struct sk_buff *skb, unsigned int thoff,
55 __be32 addr, __be32 new_addr)
59 if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
60 skb_try_make_writable(skb, thoff + sizeof(*udph)))
63 udph = (void *)(skb_network_header(skb) + thoff);
64 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
65 inet_proto_csum_replace4(&udph->check, skb, addr,
68 udph->check = CSUM_MANGLED_0;
74 static int nf_flow_nat_ip_l4proto(struct sk_buff *skb, struct iphdr *iph,
75 unsigned int thoff, __be32 addr,
78 switch (iph->protocol) {
80 if (nf_flow_nat_ip_tcp(skb, thoff, addr, new_addr) < 0)
84 if (nf_flow_nat_ip_udp(skb, thoff, addr, new_addr) < 0)
92 static int nf_flow_snat_ip(const struct flow_offload *flow, struct sk_buff *skb,
93 struct iphdr *iph, unsigned int thoff,
94 enum flow_offload_tuple_dir dir)
96 __be32 addr, new_addr;
99 case FLOW_OFFLOAD_DIR_ORIGINAL:
101 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr;
102 iph->saddr = new_addr;
104 case FLOW_OFFLOAD_DIR_REPLY:
106 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr;
107 iph->daddr = new_addr;
112 csum_replace4(&iph->check, addr, new_addr);
114 return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
117 static int nf_flow_dnat_ip(const struct flow_offload *flow, struct sk_buff *skb,
118 struct iphdr *iph, unsigned int thoff,
119 enum flow_offload_tuple_dir dir)
121 __be32 addr, new_addr;
124 case FLOW_OFFLOAD_DIR_ORIGINAL:
126 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr;
127 iph->daddr = new_addr;
129 case FLOW_OFFLOAD_DIR_REPLY:
131 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr;
132 iph->saddr = new_addr;
137 csum_replace4(&iph->check, addr, new_addr);
139 return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
142 static int nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb,
143 unsigned int thoff, enum flow_offload_tuple_dir dir)
145 struct iphdr *iph = ip_hdr(skb);
147 if (flow->flags & FLOW_OFFLOAD_SNAT &&
148 (nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
149 nf_flow_snat_ip(flow, skb, iph, thoff, dir) < 0))
151 if (flow->flags & FLOW_OFFLOAD_DNAT &&
152 (nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
153 nf_flow_dnat_ip(flow, skb, iph, thoff, dir) < 0))
159 static bool ip_has_options(unsigned int thoff)
161 return thoff != sizeof(struct iphdr);
164 static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
165 struct flow_offload_tuple *tuple)
167 struct flow_ports *ports;
171 if (!pskb_may_pull(skb, sizeof(*iph)))
175 thoff = iph->ihl * 4;
177 if (ip_is_fragment(iph) ||
178 unlikely(ip_has_options(thoff)))
181 if (iph->protocol != IPPROTO_TCP &&
182 iph->protocol != IPPROTO_UDP)
188 thoff = iph->ihl * 4;
189 if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
192 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
194 tuple->src_v4.s_addr = iph->saddr;
195 tuple->dst_v4.s_addr = iph->daddr;
196 tuple->src_port = ports->source;
197 tuple->dst_port = ports->dest;
198 tuple->l3proto = AF_INET;
199 tuple->l4proto = iph->protocol;
200 tuple->iifidx = dev->ifindex;
205 /* Based on ip_exceeds_mtu(). */
206 static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
211 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
217 static int nf_flow_offload_dst_check(struct dst_entry *dst)
219 if (unlikely(dst_xfrm(dst)))
220 return dst_check(dst, 0) ? 0 : -1;
225 static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
226 const struct nf_hook_state *state,
227 struct dst_entry *dst)
230 skb_dst_set_noref(skb, dst);
232 dst_output(state->net, state->sk, skb);
237 nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
238 const struct nf_hook_state *state)
240 struct flow_offload_tuple_rhash *tuplehash;
241 struct nf_flowtable *flow_table = priv;
242 struct flow_offload_tuple tuple = {};
243 enum flow_offload_tuple_dir dir;
244 struct flow_offload *flow;
245 struct net_device *outdev;
251 if (skb->protocol != htons(ETH_P_IP))
254 if (nf_flow_tuple_ip(skb, state->in, &tuple) < 0)
257 tuplehash = flow_offload_lookup(flow_table, &tuple);
258 if (tuplehash == NULL)
261 dir = tuplehash->tuple.dir;
262 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
263 rt = (struct rtable *)flow->tuplehash[dir].tuple.dst_cache;
264 outdev = rt->dst.dev;
266 if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
269 if (skb_try_make_writable(skb, sizeof(*iph)))
272 thoff = ip_hdr(skb)->ihl * 4;
273 if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff))
276 if (nf_flow_offload_dst_check(&rt->dst)) {
277 flow_offload_teardown(flow);
281 if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0)
284 flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
286 ip_decrease_ttl(iph);
288 if (unlikely(dst_xfrm(&rt->dst))) {
289 memset(skb->cb, 0, sizeof(struct inet_skb_parm));
290 IPCB(skb)->iif = skb->dev->ifindex;
291 IPCB(skb)->flags = IPSKB_FORWARDED;
292 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
296 nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
297 skb_dst_set_noref(skb, &rt->dst);
298 neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
302 EXPORT_SYMBOL_GPL(nf_flow_offload_ip_hook);
304 static int nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff,
305 struct in6_addr *addr,
306 struct in6_addr *new_addr)
310 if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
311 skb_try_make_writable(skb, thoff + sizeof(*tcph)))
314 tcph = (void *)(skb_network_header(skb) + thoff);
315 inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32,
316 new_addr->s6_addr32, true);
321 static int nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff,
322 struct in6_addr *addr,
323 struct in6_addr *new_addr)
327 if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
328 skb_try_make_writable(skb, thoff + sizeof(*udph)))
331 udph = (void *)(skb_network_header(skb) + thoff);
332 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
333 inet_proto_csum_replace16(&udph->check, skb, addr->s6_addr32,
334 new_addr->s6_addr32, true);
336 udph->check = CSUM_MANGLED_0;
342 static int nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h,
343 unsigned int thoff, struct in6_addr *addr,
344 struct in6_addr *new_addr)
346 switch (ip6h->nexthdr) {
348 if (nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr) < 0)
352 if (nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr) < 0)
360 static int nf_flow_snat_ipv6(const struct flow_offload *flow,
361 struct sk_buff *skb, struct ipv6hdr *ip6h,
363 enum flow_offload_tuple_dir dir)
365 struct in6_addr addr, new_addr;
368 case FLOW_OFFLOAD_DIR_ORIGINAL:
370 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6;
371 ip6h->saddr = new_addr;
373 case FLOW_OFFLOAD_DIR_REPLY:
375 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6;
376 ip6h->daddr = new_addr;
382 return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
385 static int nf_flow_dnat_ipv6(const struct flow_offload *flow,
386 struct sk_buff *skb, struct ipv6hdr *ip6h,
388 enum flow_offload_tuple_dir dir)
390 struct in6_addr addr, new_addr;
393 case FLOW_OFFLOAD_DIR_ORIGINAL:
395 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6;
396 ip6h->daddr = new_addr;
398 case FLOW_OFFLOAD_DIR_REPLY:
400 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6;
401 ip6h->saddr = new_addr;
407 return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
410 static int nf_flow_nat_ipv6(const struct flow_offload *flow,
412 enum flow_offload_tuple_dir dir)
414 struct ipv6hdr *ip6h = ipv6_hdr(skb);
415 unsigned int thoff = sizeof(*ip6h);
417 if (flow->flags & FLOW_OFFLOAD_SNAT &&
418 (nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
419 nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
421 if (flow->flags & FLOW_OFFLOAD_DNAT &&
422 (nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
423 nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
429 static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
430 struct flow_offload_tuple *tuple)
432 struct flow_ports *ports;
433 struct ipv6hdr *ip6h;
436 if (!pskb_may_pull(skb, sizeof(*ip6h)))
439 ip6h = ipv6_hdr(skb);
441 if (ip6h->nexthdr != IPPROTO_TCP &&
442 ip6h->nexthdr != IPPROTO_UDP)
445 if (ip6h->hop_limit <= 1)
448 thoff = sizeof(*ip6h);
449 if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
452 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
454 tuple->src_v6 = ip6h->saddr;
455 tuple->dst_v6 = ip6h->daddr;
456 tuple->src_port = ports->source;
457 tuple->dst_port = ports->dest;
458 tuple->l3proto = AF_INET6;
459 tuple->l4proto = ip6h->nexthdr;
460 tuple->iifidx = dev->ifindex;
466 nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
467 const struct nf_hook_state *state)
469 struct flow_offload_tuple_rhash *tuplehash;
470 struct nf_flowtable *flow_table = priv;
471 struct flow_offload_tuple tuple = {};
472 enum flow_offload_tuple_dir dir;
473 const struct in6_addr *nexthop;
474 struct flow_offload *flow;
475 struct net_device *outdev;
476 struct ipv6hdr *ip6h;
479 if (skb->protocol != htons(ETH_P_IPV6))
482 if (nf_flow_tuple_ipv6(skb, state->in, &tuple) < 0)
485 tuplehash = flow_offload_lookup(flow_table, &tuple);
486 if (tuplehash == NULL)
489 dir = tuplehash->tuple.dir;
490 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
491 rt = (struct rt6_info *)flow->tuplehash[dir].tuple.dst_cache;
492 outdev = rt->dst.dev;
494 if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
497 if (nf_flow_state_check(flow, ipv6_hdr(skb)->nexthdr, skb,
501 if (nf_flow_offload_dst_check(&rt->dst)) {
502 flow_offload_teardown(flow);
506 if (skb_try_make_writable(skb, sizeof(*ip6h)))
509 if (nf_flow_nat_ipv6(flow, skb, dir) < 0)
512 flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
513 ip6h = ipv6_hdr(skb);
516 if (unlikely(dst_xfrm(&rt->dst))) {
517 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
518 IP6CB(skb)->iif = skb->dev->ifindex;
519 IP6CB(skb)->flags = IP6SKB_FORWARDED;
520 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
524 nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
525 skb_dst_set_noref(skb, &rt->dst);
526 neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb);
530 EXPORT_SYMBOL_GPL(nf_flow_offload_ipv6_hook);