1 #include <linux/kernel.h>
2 #include <linux/init.h>
3 #include <linux/module.h>
4 #include <linux/netfilter.h>
5 #include <linux/rhashtable.h>
6 #include <linux/netdevice.h>
8 #include <net/ip6_route.h>
9 #include <net/netfilter/nf_tables.h>
10 #include <net/netfilter/nf_flow_table.h>
11 #include <net/netfilter/nf_conntrack.h>
12 #include <net/netfilter/nf_conntrack_core.h>
13 #include <net/netfilter/nf_conntrack_tuple.h>
15 struct flow_offload_entry {
16 struct flow_offload flow;
18 struct rcu_head rcu_head;
21 static DEFINE_MUTEX(flowtable_lock);
22 static LIST_HEAD(flowtables);
25 flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
26 struct nf_flow_route *route,
27 enum flow_offload_tuple_dir dir)
29 struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
30 struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple;
31 struct dst_entry *other_dst = route->tuple[!dir].dst;
32 struct dst_entry *dst = route->tuple[dir].dst;
36 switch (ctt->src.l3num) {
38 ft->src_v4 = ctt->src.u3.in;
39 ft->dst_v4 = ctt->dst.u3.in;
40 ft->mtu = ip_dst_mtu_maybe_forward(dst, true);
43 ft->src_v6 = ctt->src.u3.in6;
44 ft->dst_v6 = ctt->dst.u3.in6;
45 ft->mtu = ip6_dst_mtu_forward(dst);
49 ft->l3proto = ctt->src.l3num;
50 ft->l4proto = ctt->dst.protonum;
51 ft->src_port = ctt->src.u.tcp.port;
52 ft->dst_port = ctt->dst.u.tcp.port;
54 ft->iifidx = other_dst->dev->ifindex;
55 ft->oifidx = dst->dev->ifindex;
60 flow_offload_alloc(struct nf_conn *ct, struct nf_flow_route *route)
62 struct flow_offload_entry *entry;
63 struct flow_offload *flow;
65 if (unlikely(nf_ct_is_dying(ct) ||
66 !atomic_inc_not_zero(&ct->ct_general.use)))
69 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
75 if (!dst_hold_safe(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst))
76 goto err_dst_cache_original;
78 if (!dst_hold_safe(route->tuple[FLOW_OFFLOAD_DIR_REPLY].dst))
79 goto err_dst_cache_reply;
83 flow_offload_fill_dir(flow, ct, route, FLOW_OFFLOAD_DIR_ORIGINAL);
84 flow_offload_fill_dir(flow, ct, route, FLOW_OFFLOAD_DIR_REPLY);
86 if (ct->status & IPS_SRC_NAT)
87 flow->flags |= FLOW_OFFLOAD_SNAT;
88 if (ct->status & IPS_DST_NAT)
89 flow->flags |= FLOW_OFFLOAD_DNAT;
94 dst_release(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst);
95 err_dst_cache_original:
102 EXPORT_SYMBOL_GPL(flow_offload_alloc);
104 static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
106 tcp->state = TCP_CONNTRACK_ESTABLISHED;
107 tcp->seen[0].td_maxwin = 0;
108 tcp->seen[1].td_maxwin = 0;
111 #define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ)
112 #define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ)
114 static void flow_offload_fixup_ct_state(struct nf_conn *ct)
116 const struct nf_conntrack_l4proto *l4proto;
117 unsigned int timeout;
120 l4num = nf_ct_protonum(ct);
121 if (l4num == IPPROTO_TCP)
122 flow_offload_fixup_tcp(&ct->proto.tcp);
124 l4proto = __nf_ct_l4proto_find(l4num);
128 if (l4num == IPPROTO_TCP)
129 timeout = NF_FLOWTABLE_TCP_PICKUP_TIMEOUT;
130 else if (l4num == IPPROTO_UDP)
131 timeout = NF_FLOWTABLE_UDP_PICKUP_TIMEOUT;
135 ct->timeout = nfct_time_stamp + timeout;
138 void flow_offload_free(struct flow_offload *flow)
140 struct flow_offload_entry *e;
142 dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache);
143 dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache);
144 e = container_of(flow, struct flow_offload_entry, flow);
145 if (flow->flags & FLOW_OFFLOAD_DYING)
146 nf_ct_delete(e->ct, 0, 0);
148 kfree_rcu(e, rcu_head);
150 EXPORT_SYMBOL_GPL(flow_offload_free);
152 static u32 flow_offload_hash(const void *data, u32 len, u32 seed)
154 const struct flow_offload_tuple *tuple = data;
156 return jhash(tuple, offsetof(struct flow_offload_tuple, dir), seed);
159 static u32 flow_offload_hash_obj(const void *data, u32 len, u32 seed)
161 const struct flow_offload_tuple_rhash *tuplehash = data;
163 return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, dir), seed);
166 static int flow_offload_hash_cmp(struct rhashtable_compare_arg *arg,
169 const struct flow_offload_tuple *tuple = arg->key;
170 const struct flow_offload_tuple_rhash *x = ptr;
172 if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, dir)))
178 static const struct rhashtable_params nf_flow_offload_rhash_params = {
179 .head_offset = offsetof(struct flow_offload_tuple_rhash, node),
180 .hashfn = flow_offload_hash,
181 .obj_hashfn = flow_offload_hash_obj,
182 .obj_cmpfn = flow_offload_hash_cmp,
183 .automatic_shrinking = true,
186 int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
188 flow->timeout = (u32)jiffies;
190 rhashtable_insert_fast(&flow_table->rhashtable,
191 &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
192 nf_flow_offload_rhash_params);
193 rhashtable_insert_fast(&flow_table->rhashtable,
194 &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
195 nf_flow_offload_rhash_params);
198 EXPORT_SYMBOL_GPL(flow_offload_add);
200 static void flow_offload_del(struct nf_flowtable *flow_table,
201 struct flow_offload *flow)
203 struct flow_offload_entry *e;
205 rhashtable_remove_fast(&flow_table->rhashtable,
206 &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
207 nf_flow_offload_rhash_params);
208 rhashtable_remove_fast(&flow_table->rhashtable,
209 &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
210 nf_flow_offload_rhash_params);
212 e = container_of(flow, struct flow_offload_entry, flow);
213 clear_bit(IPS_OFFLOAD_BIT, &e->ct->status);
215 flow_offload_free(flow);
218 void flow_offload_teardown(struct flow_offload *flow)
220 struct flow_offload_entry *e;
222 flow->flags |= FLOW_OFFLOAD_TEARDOWN;
224 e = container_of(flow, struct flow_offload_entry, flow);
225 flow_offload_fixup_ct_state(e->ct);
227 EXPORT_SYMBOL_GPL(flow_offload_teardown);
229 struct flow_offload_tuple_rhash *
230 flow_offload_lookup(struct nf_flowtable *flow_table,
231 struct flow_offload_tuple *tuple)
233 struct flow_offload_tuple_rhash *tuplehash;
234 struct flow_offload *flow;
237 tuplehash = rhashtable_lookup(&flow_table->rhashtable, tuple,
238 nf_flow_offload_rhash_params);
242 dir = tuplehash->tuple.dir;
243 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
244 if (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN))
249 EXPORT_SYMBOL_GPL(flow_offload_lookup);
252 nf_flow_table_iterate(struct nf_flowtable *flow_table,
253 void (*iter)(struct flow_offload *flow, void *data),
256 struct flow_offload_tuple_rhash *tuplehash;
257 struct rhashtable_iter hti;
258 struct flow_offload *flow;
261 rhashtable_walk_enter(&flow_table->rhashtable, &hti);
262 rhashtable_walk_start(&hti);
264 while ((tuplehash = rhashtable_walk_next(&hti))) {
265 if (IS_ERR(tuplehash)) {
266 if (PTR_ERR(tuplehash) != -EAGAIN) {
267 err = PTR_ERR(tuplehash);
272 if (tuplehash->tuple.dir)
275 flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
279 rhashtable_walk_stop(&hti);
280 rhashtable_walk_exit(&hti);
285 static inline bool nf_flow_has_expired(const struct flow_offload *flow)
287 return (__s32)(flow->timeout - (u32)jiffies) <= 0;
290 static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
292 struct nf_flowtable *flow_table = data;
294 if (nf_flow_has_expired(flow) ||
295 (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN)))
296 flow_offload_del(flow_table, flow);
299 static void nf_flow_offload_work_gc(struct work_struct *work)
301 struct nf_flowtable *flow_table;
303 flow_table = container_of(work, struct nf_flowtable, gc_work.work);
304 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
305 queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
308 static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
309 __be16 port, __be16 new_port)
313 if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
314 skb_try_make_writable(skb, thoff + sizeof(*tcph)))
317 tcph = (void *)(skb_network_header(skb) + thoff);
318 inet_proto_csum_replace2(&tcph->check, skb, port, new_port, true);
323 static int nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
324 __be16 port, __be16 new_port)
328 if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
329 skb_try_make_writable(skb, thoff + sizeof(*udph)))
332 udph = (void *)(skb_network_header(skb) + thoff);
333 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
334 inet_proto_csum_replace2(&udph->check, skb, port,
337 udph->check = CSUM_MANGLED_0;
343 static int nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff,
344 u8 protocol, __be16 port, __be16 new_port)
348 if (nf_flow_nat_port_tcp(skb, thoff, port, new_port) < 0)
352 if (nf_flow_nat_port_udp(skb, thoff, port, new_port) < 0)
360 int nf_flow_snat_port(const struct flow_offload *flow,
361 struct sk_buff *skb, unsigned int thoff,
362 u8 protocol, enum flow_offload_tuple_dir dir)
364 struct flow_ports *hdr;
365 __be16 port, new_port;
367 if (!pskb_may_pull(skb, thoff + sizeof(*hdr)) ||
368 skb_try_make_writable(skb, thoff + sizeof(*hdr)))
371 hdr = (void *)(skb_network_header(skb) + thoff);
374 case FLOW_OFFLOAD_DIR_ORIGINAL:
376 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
377 hdr->source = new_port;
379 case FLOW_OFFLOAD_DIR_REPLY:
381 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
382 hdr->dest = new_port;
388 return nf_flow_nat_port(skb, thoff, protocol, port, new_port);
390 EXPORT_SYMBOL_GPL(nf_flow_snat_port);
392 int nf_flow_dnat_port(const struct flow_offload *flow,
393 struct sk_buff *skb, unsigned int thoff,
394 u8 protocol, enum flow_offload_tuple_dir dir)
396 struct flow_ports *hdr;
397 __be16 port, new_port;
399 if (!pskb_may_pull(skb, thoff + sizeof(*hdr)) ||
400 skb_try_make_writable(skb, thoff + sizeof(*hdr)))
403 hdr = (void *)(skb_network_header(skb) + thoff);
406 case FLOW_OFFLOAD_DIR_ORIGINAL:
408 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port;
409 hdr->dest = new_port;
411 case FLOW_OFFLOAD_DIR_REPLY:
413 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port;
414 hdr->source = new_port;
420 return nf_flow_nat_port(skb, thoff, protocol, port, new_port);
422 EXPORT_SYMBOL_GPL(nf_flow_dnat_port);
424 int nf_flow_table_init(struct nf_flowtable *flowtable)
428 INIT_DEFERRABLE_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
430 err = rhashtable_init(&flowtable->rhashtable,
431 &nf_flow_offload_rhash_params);
435 queue_delayed_work(system_power_efficient_wq,
436 &flowtable->gc_work, HZ);
438 mutex_lock(&flowtable_lock);
439 list_add(&flowtable->list, &flowtables);
440 mutex_unlock(&flowtable_lock);
444 EXPORT_SYMBOL_GPL(nf_flow_table_init);
446 static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
448 struct net_device *dev = data;
449 struct flow_offload_entry *e;
451 e = container_of(flow, struct flow_offload_entry, flow);
454 flow_offload_teardown(flow);
457 if (net_eq(nf_ct_net(e->ct), dev_net(dev)) &&
458 (flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
459 flow->tuplehash[1].tuple.iifidx == dev->ifindex))
460 flow_offload_dead(flow);
463 static void nf_flow_table_iterate_cleanup(struct nf_flowtable *flowtable,
464 struct net_device *dev)
466 nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev);
467 flush_delayed_work(&flowtable->gc_work);
470 void nf_flow_table_cleanup(struct net_device *dev)
472 struct nf_flowtable *flowtable;
474 mutex_lock(&flowtable_lock);
475 list_for_each_entry(flowtable, &flowtables, list)
476 nf_flow_table_iterate_cleanup(flowtable, dev);
477 mutex_unlock(&flowtable_lock);
479 EXPORT_SYMBOL_GPL(nf_flow_table_cleanup);
481 void nf_flow_table_free(struct nf_flowtable *flow_table)
483 mutex_lock(&flowtable_lock);
484 list_del(&flow_table->list);
485 mutex_unlock(&flowtable_lock);
486 cancel_delayed_work_sync(&flow_table->gc_work);
487 nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
488 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
489 rhashtable_destroy(&flow_table->rhashtable);
491 EXPORT_SYMBOL_GPL(nf_flow_table_free);
493 MODULE_LICENSE("GPL");
494 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");