xfs: fix overflow in xfs_attr3_leaf_verify
[sfrench/cifs-2.6.git] / net / netfilter / nf_conntrack_core.c
1 /* Connection state tracking for netfilter.  This is separated from,
2    but required by, the NAT layer; it can also be used by an iptables
3    extension. */
4
5 /* (C) 1999-2001 Paul `Rusty' Russell
6  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
7  * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
8  * (C) 2005-2012 Patrick McHardy <kaber@trash.net>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17 #include <linux/types.h>
18 #include <linux/netfilter.h>
19 #include <linux/module.h>
20 #include <linux/sched.h>
21 #include <linux/skbuff.h>
22 #include <linux/proc_fs.h>
23 #include <linux/vmalloc.h>
24 #include <linux/stddef.h>
25 #include <linux/slab.h>
26 #include <linux/random.h>
27 #include <linux/jhash.h>
28 #include <linux/err.h>
29 #include <linux/percpu.h>
30 #include <linux/moduleparam.h>
31 #include <linux/notifier.h>
32 #include <linux/kernel.h>
33 #include <linux/netdevice.h>
34 #include <linux/socket.h>
35 #include <linux/mm.h>
36 #include <linux/nsproxy.h>
37 #include <linux/rculist_nulls.h>
38
39 #include <net/netfilter/nf_conntrack.h>
40 #include <net/netfilter/nf_conntrack_l4proto.h>
41 #include <net/netfilter/nf_conntrack_expect.h>
42 #include <net/netfilter/nf_conntrack_helper.h>
43 #include <net/netfilter/nf_conntrack_seqadj.h>
44 #include <net/netfilter/nf_conntrack_core.h>
45 #include <net/netfilter/nf_conntrack_extend.h>
46 #include <net/netfilter/nf_conntrack_acct.h>
47 #include <net/netfilter/nf_conntrack_ecache.h>
48 #include <net/netfilter/nf_conntrack_zones.h>
49 #include <net/netfilter/nf_conntrack_timestamp.h>
50 #include <net/netfilter/nf_conntrack_timeout.h>
51 #include <net/netfilter/nf_conntrack_labels.h>
52 #include <net/netfilter/nf_conntrack_synproxy.h>
53 #include <net/netfilter/nf_nat.h>
54 #include <net/netfilter/nf_nat_core.h>
55 #include <net/netfilter/nf_nat_helper.h>
56 #include <net/netns/hash.h>
57 #include <net/ip.h>
58
59 #include "nf_internals.h"
60
61 __cacheline_aligned_in_smp spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
62 EXPORT_SYMBOL_GPL(nf_conntrack_locks);
63
64 __cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock);
65 EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);
66
67 struct hlist_nulls_head *nf_conntrack_hash __read_mostly;
68 EXPORT_SYMBOL_GPL(nf_conntrack_hash);
69
70 struct conntrack_gc_work {
71         struct delayed_work     dwork;
72         u32                     last_bucket;
73         bool                    exiting;
74         bool                    early_drop;
75         long                    next_gc_run;
76 };
77
78 static __read_mostly struct kmem_cache *nf_conntrack_cachep;
79 static __read_mostly spinlock_t nf_conntrack_locks_all_lock;
80 static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
81 static __read_mostly bool nf_conntrack_locks_all;
82
83 /* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */
84 #define GC_MAX_BUCKETS_DIV      128u
85 /* upper bound of full table scan */
86 #define GC_MAX_SCAN_JIFFIES     (16u * HZ)
87 /* desired ratio of entries found to be expired */
88 #define GC_EVICT_RATIO  50u
89
90 static struct conntrack_gc_work conntrack_gc_work;
91
92 void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
93 {
94         /* 1) Acquire the lock */
95         spin_lock(lock);
96
97         /* 2) read nf_conntrack_locks_all, with ACQUIRE semantics
98          * It pairs with the smp_store_release() in nf_conntrack_all_unlock()
99          */
100         if (likely(smp_load_acquire(&nf_conntrack_locks_all) == false))
101                 return;
102
103         /* fast path failed, unlock */
104         spin_unlock(lock);
105
106         /* Slow path 1) get global lock */
107         spin_lock(&nf_conntrack_locks_all_lock);
108
109         /* Slow path 2) get the lock we want */
110         spin_lock(lock);
111
112         /* Slow path 3) release the global lock */
113         spin_unlock(&nf_conntrack_locks_all_lock);
114 }
115 EXPORT_SYMBOL_GPL(nf_conntrack_lock);
116
117 static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2)
118 {
119         h1 %= CONNTRACK_LOCKS;
120         h2 %= CONNTRACK_LOCKS;
121         spin_unlock(&nf_conntrack_locks[h1]);
122         if (h1 != h2)
123                 spin_unlock(&nf_conntrack_locks[h2]);
124 }
125
126 /* return true if we need to recompute hashes (in case hash table was resized) */
127 static bool nf_conntrack_double_lock(struct net *net, unsigned int h1,
128                                      unsigned int h2, unsigned int sequence)
129 {
130         h1 %= CONNTRACK_LOCKS;
131         h2 %= CONNTRACK_LOCKS;
132         if (h1 <= h2) {
133                 nf_conntrack_lock(&nf_conntrack_locks[h1]);
134                 if (h1 != h2)
135                         spin_lock_nested(&nf_conntrack_locks[h2],
136                                          SINGLE_DEPTH_NESTING);
137         } else {
138                 nf_conntrack_lock(&nf_conntrack_locks[h2]);
139                 spin_lock_nested(&nf_conntrack_locks[h1],
140                                  SINGLE_DEPTH_NESTING);
141         }
142         if (read_seqcount_retry(&nf_conntrack_generation, sequence)) {
143                 nf_conntrack_double_unlock(h1, h2);
144                 return true;
145         }
146         return false;
147 }
148
149 static void nf_conntrack_all_lock(void)
150 {
151         int i;
152
153         spin_lock(&nf_conntrack_locks_all_lock);
154
155         nf_conntrack_locks_all = true;
156
157         for (i = 0; i < CONNTRACK_LOCKS; i++) {
158                 spin_lock(&nf_conntrack_locks[i]);
159
160                 /* This spin_unlock provides the "release" to ensure that
161                  * nf_conntrack_locks_all==true is visible to everyone that
162                  * acquired spin_lock(&nf_conntrack_locks[]).
163                  */
164                 spin_unlock(&nf_conntrack_locks[i]);
165         }
166 }
167
168 static void nf_conntrack_all_unlock(void)
169 {
170         /* All prior stores must be complete before we clear
171          * 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock()
172          * might observe the false value but not the entire
173          * critical section.
174          * It pairs with the smp_load_acquire() in nf_conntrack_lock()
175          */
176         smp_store_release(&nf_conntrack_locks_all, false);
177         spin_unlock(&nf_conntrack_locks_all_lock);
178 }
179
180 unsigned int nf_conntrack_htable_size __read_mostly;
181 EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
182
183 unsigned int nf_conntrack_max __read_mostly;
184 EXPORT_SYMBOL_GPL(nf_conntrack_max);
185 seqcount_t nf_conntrack_generation __read_mostly;
186 static unsigned int nf_conntrack_hash_rnd __read_mostly;
187
188 static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
189                               const struct net *net)
190 {
191         unsigned int n;
192         u32 seed;
193
194         get_random_once(&nf_conntrack_hash_rnd, sizeof(nf_conntrack_hash_rnd));
195
196         /* The direction must be ignored, so we hash everything up to the
197          * destination ports (which is a multiple of 4) and treat the last
198          * three bytes manually.
199          */
200         seed = nf_conntrack_hash_rnd ^ net_hash_mix(net);
201         n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
202         return jhash2((u32 *)tuple, n, seed ^
203                       (((__force __u16)tuple->dst.u.all << 16) |
204                       tuple->dst.protonum));
205 }
206
207 static u32 scale_hash(u32 hash)
208 {
209         return reciprocal_scale(hash, nf_conntrack_htable_size);
210 }
211
212 static u32 __hash_conntrack(const struct net *net,
213                             const struct nf_conntrack_tuple *tuple,
214                             unsigned int size)
215 {
216         return reciprocal_scale(hash_conntrack_raw(tuple, net), size);
217 }
218
219 static u32 hash_conntrack(const struct net *net,
220                           const struct nf_conntrack_tuple *tuple)
221 {
222         return scale_hash(hash_conntrack_raw(tuple, net));
223 }
224
225 static bool
226 nf_ct_get_tuple(const struct sk_buff *skb,
227                 unsigned int nhoff,
228                 unsigned int dataoff,
229                 u_int16_t l3num,
230                 u_int8_t protonum,
231                 struct net *net,
232                 struct nf_conntrack_tuple *tuple,
233                 const struct nf_conntrack_l4proto *l4proto)
234 {
235         unsigned int size;
236         const __be32 *ap;
237         __be32 _addrs[8];
238         struct {
239                 __be16 sport;
240                 __be16 dport;
241         } _inet_hdr, *inet_hdr;
242
243         memset(tuple, 0, sizeof(*tuple));
244
245         tuple->src.l3num = l3num;
246         switch (l3num) {
247         case NFPROTO_IPV4:
248                 nhoff += offsetof(struct iphdr, saddr);
249                 size = 2 * sizeof(__be32);
250                 break;
251         case NFPROTO_IPV6:
252                 nhoff += offsetof(struct ipv6hdr, saddr);
253                 size = sizeof(_addrs);
254                 break;
255         default:
256                 return true;
257         }
258
259         ap = skb_header_pointer(skb, nhoff, size, _addrs);
260         if (!ap)
261                 return false;
262
263         switch (l3num) {
264         case NFPROTO_IPV4:
265                 tuple->src.u3.ip = ap[0];
266                 tuple->dst.u3.ip = ap[1];
267                 break;
268         case NFPROTO_IPV6:
269                 memcpy(tuple->src.u3.ip6, ap, sizeof(tuple->src.u3.ip6));
270                 memcpy(tuple->dst.u3.ip6, ap + 4, sizeof(tuple->dst.u3.ip6));
271                 break;
272         }
273
274         tuple->dst.protonum = protonum;
275         tuple->dst.dir = IP_CT_DIR_ORIGINAL;
276
277         if (unlikely(l4proto->pkt_to_tuple))
278                 return l4proto->pkt_to_tuple(skb, dataoff, net, tuple);
279
280         /* Actually only need first 4 bytes to get ports. */
281         inet_hdr = skb_header_pointer(skb, dataoff, sizeof(_inet_hdr), &_inet_hdr);
282         if (!inet_hdr)
283                 return false;
284
285         tuple->src.u.udp.port = inet_hdr->sport;
286         tuple->dst.u.udp.port = inet_hdr->dport;
287         return true;
288 }
289
290 static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
291                             u_int8_t *protonum)
292 {
293         int dataoff = -1;
294         const struct iphdr *iph;
295         struct iphdr _iph;
296
297         iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
298         if (!iph)
299                 return -1;
300
301         /* Conntrack defragments packets, we might still see fragments
302          * inside ICMP packets though.
303          */
304         if (iph->frag_off & htons(IP_OFFSET))
305                 return -1;
306
307         dataoff = nhoff + (iph->ihl << 2);
308         *protonum = iph->protocol;
309
310         /* Check bogus IP headers */
311         if (dataoff > skb->len) {
312                 pr_debug("bogus IPv4 packet: nhoff %u, ihl %u, skblen %u\n",
313                          nhoff, iph->ihl << 2, skb->len);
314                 return -1;
315         }
316         return dataoff;
317 }
318
319 #if IS_ENABLED(CONFIG_IPV6)
320 static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
321                             u8 *protonum)
322 {
323         int protoff = -1;
324         unsigned int extoff = nhoff + sizeof(struct ipv6hdr);
325         __be16 frag_off;
326         u8 nexthdr;
327
328         if (skb_copy_bits(skb, nhoff + offsetof(struct ipv6hdr, nexthdr),
329                           &nexthdr, sizeof(nexthdr)) != 0) {
330                 pr_debug("can't get nexthdr\n");
331                 return -1;
332         }
333         protoff = ipv6_skip_exthdr(skb, extoff, &nexthdr, &frag_off);
334         /*
335          * (protoff == skb->len) means the packet has not data, just
336          * IPv6 and possibly extensions headers, but it is tracked anyway
337          */
338         if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
339                 pr_debug("can't find proto in pkt\n");
340                 return -1;
341         }
342
343         *protonum = nexthdr;
344         return protoff;
345 }
346 #endif
347
348 static int get_l4proto(const struct sk_buff *skb,
349                        unsigned int nhoff, u8 pf, u8 *l4num)
350 {
351         switch (pf) {
352         case NFPROTO_IPV4:
353                 return ipv4_get_l4proto(skb, nhoff, l4num);
354 #if IS_ENABLED(CONFIG_IPV6)
355         case NFPROTO_IPV6:
356                 return ipv6_get_l4proto(skb, nhoff, l4num);
357 #endif
358         default:
359                 *l4num = 0;
360                 break;
361         }
362         return -1;
363 }
364
365 bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
366                        u_int16_t l3num,
367                        struct net *net, struct nf_conntrack_tuple *tuple)
368 {
369         const struct nf_conntrack_l4proto *l4proto;
370         u8 protonum;
371         int protoff;
372         int ret;
373
374         rcu_read_lock();
375
376         protoff = get_l4proto(skb, nhoff, l3num, &protonum);
377         if (protoff <= 0) {
378                 rcu_read_unlock();
379                 return false;
380         }
381
382         l4proto = __nf_ct_l4proto_find(protonum);
383
384         ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple,
385                               l4proto);
386
387         rcu_read_unlock();
388         return ret;
389 }
390 EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
391
392 bool
393 nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
394                    const struct nf_conntrack_tuple *orig,
395                    const struct nf_conntrack_l4proto *l4proto)
396 {
397         memset(inverse, 0, sizeof(*inverse));
398
399         inverse->src.l3num = orig->src.l3num;
400
401         switch (orig->src.l3num) {
402         case NFPROTO_IPV4:
403                 inverse->src.u3.ip = orig->dst.u3.ip;
404                 inverse->dst.u3.ip = orig->src.u3.ip;
405                 break;
406         case NFPROTO_IPV6:
407                 inverse->src.u3.in6 = orig->dst.u3.in6;
408                 inverse->dst.u3.in6 = orig->src.u3.in6;
409                 break;
410         default:
411                 break;
412         }
413
414         inverse->dst.dir = !orig->dst.dir;
415
416         inverse->dst.protonum = orig->dst.protonum;
417
418         if (unlikely(l4proto->invert_tuple))
419                 return l4proto->invert_tuple(inverse, orig);
420
421         inverse->src.u.all = orig->dst.u.all;
422         inverse->dst.u.all = orig->src.u.all;
423         return true;
424 }
425 EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
426
427 static void
428 clean_from_lists(struct nf_conn *ct)
429 {
430         pr_debug("clean_from_lists(%p)\n", ct);
431         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
432         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
433
434         /* Destroy all pending expectations */
435         nf_ct_remove_expectations(ct);
436 }
437
438 /* must be called with local_bh_disable */
439 static void nf_ct_add_to_dying_list(struct nf_conn *ct)
440 {
441         struct ct_pcpu *pcpu;
442
443         /* add this conntrack to the (per cpu) dying list */
444         ct->cpu = smp_processor_id();
445         pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
446
447         spin_lock(&pcpu->lock);
448         hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
449                              &pcpu->dying);
450         spin_unlock(&pcpu->lock);
451 }
452
453 /* must be called with local_bh_disable */
454 static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct)
455 {
456         struct ct_pcpu *pcpu;
457
458         /* add this conntrack to the (per cpu) unconfirmed list */
459         ct->cpu = smp_processor_id();
460         pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
461
462         spin_lock(&pcpu->lock);
463         hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
464                              &pcpu->unconfirmed);
465         spin_unlock(&pcpu->lock);
466 }
467
468 /* must be called with local_bh_disable */
469 static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
470 {
471         struct ct_pcpu *pcpu;
472
473         /* We overload first tuple to link into unconfirmed or dying list.*/
474         pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
475
476         spin_lock(&pcpu->lock);
477         BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
478         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
479         spin_unlock(&pcpu->lock);
480 }
481
482 #define NFCT_ALIGN(len) (((len) + NFCT_INFOMASK) & ~NFCT_INFOMASK)
483
484 /* Released via destroy_conntrack() */
485 struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
486                                  const struct nf_conntrack_zone *zone,
487                                  gfp_t flags)
488 {
489         struct nf_conn *tmpl, *p;
490
491         if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK) {
492                 tmpl = kzalloc(sizeof(*tmpl) + NFCT_INFOMASK, flags);
493                 if (!tmpl)
494                         return NULL;
495
496                 p = tmpl;
497                 tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
498                 if (tmpl != p) {
499                         tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
500                         tmpl->proto.tmpl_padto = (char *)tmpl - (char *)p;
501                 }
502         } else {
503                 tmpl = kzalloc(sizeof(*tmpl), flags);
504                 if (!tmpl)
505                         return NULL;
506         }
507
508         tmpl->status = IPS_TEMPLATE;
509         write_pnet(&tmpl->ct_net, net);
510         nf_ct_zone_add(tmpl, zone);
511         atomic_set(&tmpl->ct_general.use, 0);
512
513         return tmpl;
514 }
515 EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
516
517 void nf_ct_tmpl_free(struct nf_conn *tmpl)
518 {
519         nf_ct_ext_destroy(tmpl);
520         nf_ct_ext_free(tmpl);
521
522         if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK)
523                 kfree((char *)tmpl - tmpl->proto.tmpl_padto);
524         else
525                 kfree(tmpl);
526 }
527 EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
528
529 static void
530 destroy_conntrack(struct nf_conntrack *nfct)
531 {
532         struct nf_conn *ct = (struct nf_conn *)nfct;
533         const struct nf_conntrack_l4proto *l4proto;
534
535         pr_debug("destroy_conntrack(%p)\n", ct);
536         WARN_ON(atomic_read(&nfct->use) != 0);
537
538         if (unlikely(nf_ct_is_template(ct))) {
539                 nf_ct_tmpl_free(ct);
540                 return;
541         }
542         l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct));
543         if (l4proto->destroy)
544                 l4proto->destroy(ct);
545
546         local_bh_disable();
547         /* Expectations will have been removed in clean_from_lists,
548          * except TFTP can create an expectation on the first packet,
549          * before connection is in the list, so we need to clean here,
550          * too.
551          */
552         nf_ct_remove_expectations(ct);
553
554         nf_ct_del_from_dying_or_unconfirmed_list(ct);
555
556         local_bh_enable();
557
558         if (ct->master)
559                 nf_ct_put(ct->master);
560
561         pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
562         nf_conntrack_free(ct);
563 }
564
565 static void nf_ct_delete_from_lists(struct nf_conn *ct)
566 {
567         struct net *net = nf_ct_net(ct);
568         unsigned int hash, reply_hash;
569         unsigned int sequence;
570
571         nf_ct_helper_destroy(ct);
572
573         local_bh_disable();
574         do {
575                 sequence = read_seqcount_begin(&nf_conntrack_generation);
576                 hash = hash_conntrack(net,
577                                       &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
578                 reply_hash = hash_conntrack(net,
579                                            &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
580         } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
581
582         clean_from_lists(ct);
583         nf_conntrack_double_unlock(hash, reply_hash);
584
585         nf_ct_add_to_dying_list(ct);
586
587         local_bh_enable();
588 }
589
590 bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
591 {
592         struct nf_conn_tstamp *tstamp;
593
594         if (test_and_set_bit(IPS_DYING_BIT, &ct->status))
595                 return false;
596
597         tstamp = nf_conn_tstamp_find(ct);
598         if (tstamp && tstamp->stop == 0)
599                 tstamp->stop = ktime_get_real_ns();
600
601         if (nf_conntrack_event_report(IPCT_DESTROY, ct,
602                                     portid, report) < 0) {
603                 /* destroy event was not delivered. nf_ct_put will
604                  * be done by event cache worker on redelivery.
605                  */
606                 nf_ct_delete_from_lists(ct);
607                 nf_conntrack_ecache_delayed_work(nf_ct_net(ct));
608                 return false;
609         }
610
611         nf_conntrack_ecache_work(nf_ct_net(ct));
612         nf_ct_delete_from_lists(ct);
613         nf_ct_put(ct);
614         return true;
615 }
616 EXPORT_SYMBOL_GPL(nf_ct_delete);
617
618 static inline bool
619 nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
620                 const struct nf_conntrack_tuple *tuple,
621                 const struct nf_conntrack_zone *zone,
622                 const struct net *net)
623 {
624         struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
625
626         /* A conntrack can be recreated with the equal tuple,
627          * so we need to check that the conntrack is confirmed
628          */
629         return nf_ct_tuple_equal(tuple, &h->tuple) &&
630                nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
631                nf_ct_is_confirmed(ct) &&
632                net_eq(net, nf_ct_net(ct));
633 }
634
635 static inline bool
636 nf_ct_match(const struct nf_conn *ct1, const struct nf_conn *ct2)
637 {
638         return nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
639                                  &ct2->tuplehash[IP_CT_DIR_ORIGINAL].tuple) &&
640                nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_REPLY].tuple,
641                                  &ct2->tuplehash[IP_CT_DIR_REPLY].tuple) &&
642                nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_ORIGINAL) &&
643                nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_REPLY) &&
644                net_eq(nf_ct_net(ct1), nf_ct_net(ct2));
645 }
646
647 /* caller must hold rcu readlock and none of the nf_conntrack_locks */
648 static void nf_ct_gc_expired(struct nf_conn *ct)
649 {
650         if (!atomic_inc_not_zero(&ct->ct_general.use))
651                 return;
652
653         if (nf_ct_should_gc(ct))
654                 nf_ct_kill(ct);
655
656         nf_ct_put(ct);
657 }
658
659 /*
660  * Warning :
661  * - Caller must take a reference on returned object
662  *   and recheck nf_ct_tuple_equal(tuple, &h->tuple)
663  */
664 static struct nf_conntrack_tuple_hash *
665 ____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
666                       const struct nf_conntrack_tuple *tuple, u32 hash)
667 {
668         struct nf_conntrack_tuple_hash *h;
669         struct hlist_nulls_head *ct_hash;
670         struct hlist_nulls_node *n;
671         unsigned int bucket, hsize;
672
673 begin:
674         nf_conntrack_get_ht(&ct_hash, &hsize);
675         bucket = reciprocal_scale(hash, hsize);
676
677         hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) {
678                 struct nf_conn *ct;
679
680                 ct = nf_ct_tuplehash_to_ctrack(h);
681                 if (nf_ct_is_expired(ct)) {
682                         nf_ct_gc_expired(ct);
683                         continue;
684                 }
685
686                 if (nf_ct_is_dying(ct))
687                         continue;
688
689                 if (nf_ct_key_equal(h, tuple, zone, net))
690                         return h;
691         }
692         /*
693          * if the nulls value we got at the end of this lookup is
694          * not the expected one, we must restart lookup.
695          * We probably met an item that was moved to another chain.
696          */
697         if (get_nulls_value(n) != bucket) {
698                 NF_CT_STAT_INC_ATOMIC(net, search_restart);
699                 goto begin;
700         }
701
702         return NULL;
703 }
704
705 /* Find a connection corresponding to a tuple. */
706 static struct nf_conntrack_tuple_hash *
707 __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
708                         const struct nf_conntrack_tuple *tuple, u32 hash)
709 {
710         struct nf_conntrack_tuple_hash *h;
711         struct nf_conn *ct;
712
713         rcu_read_lock();
714 begin:
715         h = ____nf_conntrack_find(net, zone, tuple, hash);
716         if (h) {
717                 ct = nf_ct_tuplehash_to_ctrack(h);
718                 if (unlikely(nf_ct_is_dying(ct) ||
719                              !atomic_inc_not_zero(&ct->ct_general.use)))
720                         h = NULL;
721                 else {
722                         if (unlikely(!nf_ct_key_equal(h, tuple, zone, net))) {
723                                 nf_ct_put(ct);
724                                 goto begin;
725                         }
726                 }
727         }
728         rcu_read_unlock();
729
730         return h;
731 }
732
733 struct nf_conntrack_tuple_hash *
734 nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
735                       const struct nf_conntrack_tuple *tuple)
736 {
737         return __nf_conntrack_find_get(net, zone, tuple,
738                                        hash_conntrack_raw(tuple, net));
739 }
740 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
741
742 static void __nf_conntrack_hash_insert(struct nf_conn *ct,
743                                        unsigned int hash,
744                                        unsigned int reply_hash)
745 {
746         hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
747                            &nf_conntrack_hash[hash]);
748         hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
749                            &nf_conntrack_hash[reply_hash]);
750 }
751
752 int
753 nf_conntrack_hash_check_insert(struct nf_conn *ct)
754 {
755         const struct nf_conntrack_zone *zone;
756         struct net *net = nf_ct_net(ct);
757         unsigned int hash, reply_hash;
758         struct nf_conntrack_tuple_hash *h;
759         struct hlist_nulls_node *n;
760         unsigned int sequence;
761
762         zone = nf_ct_zone(ct);
763
764         local_bh_disable();
765         do {
766                 sequence = read_seqcount_begin(&nf_conntrack_generation);
767                 hash = hash_conntrack(net,
768                                       &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
769                 reply_hash = hash_conntrack(net,
770                                            &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
771         } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
772
773         /* See if there's one in the list already, including reverse */
774         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
775                 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
776                                     zone, net))
777                         goto out;
778
779         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
780                 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
781                                     zone, net))
782                         goto out;
783
784         smp_wmb();
785         /* The caller holds a reference to this object */
786         atomic_set(&ct->ct_general.use, 2);
787         __nf_conntrack_hash_insert(ct, hash, reply_hash);
788         nf_conntrack_double_unlock(hash, reply_hash);
789         NF_CT_STAT_INC(net, insert);
790         local_bh_enable();
791         return 0;
792
793 out:
794         nf_conntrack_double_unlock(hash, reply_hash);
795         NF_CT_STAT_INC(net, insert_failed);
796         local_bh_enable();
797         return -EEXIST;
798 }
799 EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
800
801 static inline void nf_ct_acct_update(struct nf_conn *ct,
802                                      enum ip_conntrack_info ctinfo,
803                                      unsigned int len)
804 {
805         struct nf_conn_acct *acct;
806
807         acct = nf_conn_acct_find(ct);
808         if (acct) {
809                 struct nf_conn_counter *counter = acct->counter;
810
811                 atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets);
812                 atomic64_add(len, &counter[CTINFO2DIR(ctinfo)].bytes);
813         }
814 }
815
816 static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
817                              const struct nf_conn *loser_ct)
818 {
819         struct nf_conn_acct *acct;
820
821         acct = nf_conn_acct_find(loser_ct);
822         if (acct) {
823                 struct nf_conn_counter *counter = acct->counter;
824                 unsigned int bytes;
825
826                 /* u32 should be fine since we must have seen one packet. */
827                 bytes = atomic64_read(&counter[CTINFO2DIR(ctinfo)].bytes);
828                 nf_ct_acct_update(ct, ctinfo, bytes);
829         }
830 }
831
832 /* Resolve race on insertion if this protocol allows this. */
833 static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
834                                enum ip_conntrack_info ctinfo,
835                                struct nf_conntrack_tuple_hash *h)
836 {
837         /* This is the conntrack entry already in hashes that won race. */
838         struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
839         const struct nf_conntrack_l4proto *l4proto;
840         enum ip_conntrack_info oldinfo;
841         struct nf_conn *loser_ct = nf_ct_get(skb, &oldinfo);
842
843         l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct));
844         if (l4proto->allow_clash &&
845             !nf_ct_is_dying(ct) &&
846             atomic_inc_not_zero(&ct->ct_general.use)) {
847                 if (((ct->status & IPS_NAT_DONE_MASK) == 0) ||
848                     nf_ct_match(ct, loser_ct)) {
849                         nf_ct_acct_merge(ct, ctinfo, loser_ct);
850                         nf_conntrack_put(&loser_ct->ct_general);
851                         nf_ct_set(skb, ct, oldinfo);
852                         return NF_ACCEPT;
853                 }
854                 nf_ct_put(ct);
855         }
856         NF_CT_STAT_INC(net, drop);
857         return NF_DROP;
858 }
859
860 /* Confirm a connection given skb; places it in hash table */
861 int
862 __nf_conntrack_confirm(struct sk_buff *skb)
863 {
864         const struct nf_conntrack_zone *zone;
865         unsigned int hash, reply_hash;
866         struct nf_conntrack_tuple_hash *h;
867         struct nf_conn *ct;
868         struct nf_conn_help *help;
869         struct nf_conn_tstamp *tstamp;
870         struct hlist_nulls_node *n;
871         enum ip_conntrack_info ctinfo;
872         struct net *net;
873         unsigned int sequence;
874         int ret = NF_DROP;
875
876         ct = nf_ct_get(skb, &ctinfo);
877         net = nf_ct_net(ct);
878
879         /* ipt_REJECT uses nf_conntrack_attach to attach related
880            ICMP/TCP RST packets in other direction.  Actual packet
881            which created connection will be IP_CT_NEW or for an
882            expected connection, IP_CT_RELATED. */
883         if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
884                 return NF_ACCEPT;
885
886         zone = nf_ct_zone(ct);
887         local_bh_disable();
888
889         do {
890                 sequence = read_seqcount_begin(&nf_conntrack_generation);
891                 /* reuse the hash saved before */
892                 hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
893                 hash = scale_hash(hash);
894                 reply_hash = hash_conntrack(net,
895                                            &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
896
897         } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
898
899         /* We're not in hash table, and we refuse to set up related
900          * connections for unconfirmed conns.  But packet copies and
901          * REJECT will give spurious warnings here.
902          */
903
904         /* No external references means no one else could have
905          * confirmed us.
906          */
907         WARN_ON(nf_ct_is_confirmed(ct));
908         pr_debug("Confirming conntrack %p\n", ct);
909         /* We have to check the DYING flag after unlink to prevent
910          * a race against nf_ct_get_next_corpse() possibly called from
911          * user context, else we insert an already 'dead' hash, blocking
912          * further use of that particular connection -JM.
913          */
914         nf_ct_del_from_dying_or_unconfirmed_list(ct);
915
916         if (unlikely(nf_ct_is_dying(ct))) {
917                 nf_ct_add_to_dying_list(ct);
918                 goto dying;
919         }
920
921         /* See if there's one in the list already, including reverse:
922            NAT could have grabbed it without realizing, since we're
923            not in the hash.  If there is, we lost race. */
924         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
925                 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
926                                     zone, net))
927                         goto out;
928
929         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
930                 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
931                                     zone, net))
932                         goto out;
933
934         /* Timer relative to confirmation time, not original
935            setting time, otherwise we'd get timer wrap in
936            weird delay cases. */
937         ct->timeout += nfct_time_stamp;
938         atomic_inc(&ct->ct_general.use);
939         ct->status |= IPS_CONFIRMED;
940
941         /* set conntrack timestamp, if enabled. */
942         tstamp = nf_conn_tstamp_find(ct);
943         if (tstamp) {
944                 if (skb->tstamp == 0)
945                         __net_timestamp(skb);
946
947                 tstamp->start = ktime_to_ns(skb->tstamp);
948         }
949         /* Since the lookup is lockless, hash insertion must be done after
950          * starting the timer and setting the CONFIRMED bit. The RCU barriers
951          * guarantee that no other CPU can find the conntrack before the above
952          * stores are visible.
953          */
954         __nf_conntrack_hash_insert(ct, hash, reply_hash);
955         nf_conntrack_double_unlock(hash, reply_hash);
956         local_bh_enable();
957
958         help = nfct_help(ct);
959         if (help && help->helper)
960                 nf_conntrack_event_cache(IPCT_HELPER, ct);
961
962         nf_conntrack_event_cache(master_ct(ct) ?
963                                  IPCT_RELATED : IPCT_NEW, ct);
964         return NF_ACCEPT;
965
966 out:
967         nf_ct_add_to_dying_list(ct);
968         ret = nf_ct_resolve_clash(net, skb, ctinfo, h);
969 dying:
970         nf_conntrack_double_unlock(hash, reply_hash);
971         NF_CT_STAT_INC(net, insert_failed);
972         local_bh_enable();
973         return ret;
974 }
975 EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
976
977 /* Returns true if a connection correspondings to the tuple (required
978    for NAT). */
979 int
980 nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
981                          const struct nf_conn *ignored_conntrack)
982 {
983         struct net *net = nf_ct_net(ignored_conntrack);
984         const struct nf_conntrack_zone *zone;
985         struct nf_conntrack_tuple_hash *h;
986         struct hlist_nulls_head *ct_hash;
987         unsigned int hash, hsize;
988         struct hlist_nulls_node *n;
989         struct nf_conn *ct;
990
991         zone = nf_ct_zone(ignored_conntrack);
992
993         rcu_read_lock();
994  begin:
995         nf_conntrack_get_ht(&ct_hash, &hsize);
996         hash = __hash_conntrack(net, tuple, hsize);
997
998         hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) {
999                 ct = nf_ct_tuplehash_to_ctrack(h);
1000
1001                 if (ct == ignored_conntrack)
1002                         continue;
1003
1004                 if (nf_ct_is_expired(ct)) {
1005                         nf_ct_gc_expired(ct);
1006                         continue;
1007                 }
1008
1009                 if (nf_ct_key_equal(h, tuple, zone, net)) {
1010                         NF_CT_STAT_INC_ATOMIC(net, found);
1011                         rcu_read_unlock();
1012                         return 1;
1013                 }
1014         }
1015
1016         if (get_nulls_value(n) != hash) {
1017                 NF_CT_STAT_INC_ATOMIC(net, search_restart);
1018                 goto begin;
1019         }
1020
1021         rcu_read_unlock();
1022
1023         return 0;
1024 }
1025 EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
1026
1027 #define NF_CT_EVICTION_RANGE    8
1028
1029 /* There's a small race here where we may free a just-assured
1030    connection.  Too bad: we're in trouble anyway. */
1031 static unsigned int early_drop_list(struct net *net,
1032                                     struct hlist_nulls_head *head)
1033 {
1034         struct nf_conntrack_tuple_hash *h;
1035         struct hlist_nulls_node *n;
1036         unsigned int drops = 0;
1037         struct nf_conn *tmp;
1038
1039         hlist_nulls_for_each_entry_rcu(h, n, head, hnnode) {
1040                 tmp = nf_ct_tuplehash_to_ctrack(h);
1041
1042                 if (test_bit(IPS_OFFLOAD_BIT, &tmp->status))
1043                         continue;
1044
1045                 if (nf_ct_is_expired(tmp)) {
1046                         nf_ct_gc_expired(tmp);
1047                         continue;
1048                 }
1049
1050                 if (test_bit(IPS_ASSURED_BIT, &tmp->status) ||
1051                     !net_eq(nf_ct_net(tmp), net) ||
1052                     nf_ct_is_dying(tmp))
1053                         continue;
1054
1055                 if (!atomic_inc_not_zero(&tmp->ct_general.use))
1056                         continue;
1057
1058                 /* kill only if still in same netns -- might have moved due to
1059                  * SLAB_TYPESAFE_BY_RCU rules.
1060                  *
1061                  * We steal the timer reference.  If that fails timer has
1062                  * already fired or someone else deleted it. Just drop ref
1063                  * and move to next entry.
1064                  */
1065                 if (net_eq(nf_ct_net(tmp), net) &&
1066                     nf_ct_is_confirmed(tmp) &&
1067                     nf_ct_delete(tmp, 0, 0))
1068                         drops++;
1069
1070                 nf_ct_put(tmp);
1071         }
1072
1073         return drops;
1074 }
1075
1076 static noinline int early_drop(struct net *net, unsigned int _hash)
1077 {
1078         unsigned int i;
1079
1080         for (i = 0; i < NF_CT_EVICTION_RANGE; i++) {
1081                 struct hlist_nulls_head *ct_hash;
1082                 unsigned int hash, hsize, drops;
1083
1084                 rcu_read_lock();
1085                 nf_conntrack_get_ht(&ct_hash, &hsize);
1086                 hash = reciprocal_scale(_hash++, hsize);
1087
1088                 drops = early_drop_list(net, &ct_hash[hash]);
1089                 rcu_read_unlock();
1090
1091                 if (drops) {
1092                         NF_CT_STAT_ADD_ATOMIC(net, early_drop, drops);
1093                         return true;
1094                 }
1095         }
1096
1097         return false;
1098 }
1099
1100 static bool gc_worker_skip_ct(const struct nf_conn *ct)
1101 {
1102         return !nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct);
1103 }
1104
1105 static bool gc_worker_can_early_drop(const struct nf_conn *ct)
1106 {
1107         const struct nf_conntrack_l4proto *l4proto;
1108
1109         if (!test_bit(IPS_ASSURED_BIT, &ct->status))
1110                 return true;
1111
1112         l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct));
1113         if (l4proto->can_early_drop && l4proto->can_early_drop(ct))
1114                 return true;
1115
1116         return false;
1117 }
1118
1119 #define DAY     (86400 * HZ)
1120
1121 /* Set an arbitrary timeout large enough not to ever expire, this save
1122  * us a check for the IPS_OFFLOAD_BIT from the packet path via
1123  * nf_ct_is_expired().
1124  */
1125 static void nf_ct_offload_timeout(struct nf_conn *ct)
1126 {
1127         if (nf_ct_expires(ct) < DAY / 2)
1128                 ct->timeout = nfct_time_stamp + DAY;
1129 }
1130
1131 static void gc_worker(struct work_struct *work)
1132 {
1133         unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u);
1134         unsigned int i, goal, buckets = 0, expired_count = 0;
1135         unsigned int nf_conntrack_max95 = 0;
1136         struct conntrack_gc_work *gc_work;
1137         unsigned int ratio, scanned = 0;
1138         unsigned long next_run;
1139
1140         gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
1141
1142         goal = nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV;
1143         i = gc_work->last_bucket;
1144         if (gc_work->early_drop)
1145                 nf_conntrack_max95 = nf_conntrack_max / 100u * 95u;
1146
1147         do {
1148                 struct nf_conntrack_tuple_hash *h;
1149                 struct hlist_nulls_head *ct_hash;
1150                 struct hlist_nulls_node *n;
1151                 unsigned int hashsz;
1152                 struct nf_conn *tmp;
1153
1154                 i++;
1155                 rcu_read_lock();
1156
1157                 nf_conntrack_get_ht(&ct_hash, &hashsz);
1158                 if (i >= hashsz)
1159                         i = 0;
1160
1161                 hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) {
1162                         struct net *net;
1163
1164                         tmp = nf_ct_tuplehash_to_ctrack(h);
1165
1166                         scanned++;
1167                         if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) {
1168                                 nf_ct_offload_timeout(tmp);
1169                                 continue;
1170                         }
1171
1172                         if (nf_ct_is_expired(tmp)) {
1173                                 nf_ct_gc_expired(tmp);
1174                                 expired_count++;
1175                                 continue;
1176                         }
1177
1178                         if (nf_conntrack_max95 == 0 || gc_worker_skip_ct(tmp))
1179                                 continue;
1180
1181                         net = nf_ct_net(tmp);
1182                         if (atomic_read(&net->ct.count) < nf_conntrack_max95)
1183                                 continue;
1184
1185                         /* need to take reference to avoid possible races */
1186                         if (!atomic_inc_not_zero(&tmp->ct_general.use))
1187                                 continue;
1188
1189                         if (gc_worker_skip_ct(tmp)) {
1190                                 nf_ct_put(tmp);
1191                                 continue;
1192                         }
1193
1194                         if (gc_worker_can_early_drop(tmp))
1195                                 nf_ct_kill(tmp);
1196
1197                         nf_ct_put(tmp);
1198                 }
1199
1200                 /* could check get_nulls_value() here and restart if ct
1201                  * was moved to another chain.  But given gc is best-effort
1202                  * we will just continue with next hash slot.
1203                  */
1204                 rcu_read_unlock();
1205                 cond_resched();
1206         } while (++buckets < goal);
1207
1208         if (gc_work->exiting)
1209                 return;
1210
1211         /*
1212          * Eviction will normally happen from the packet path, and not
1213          * from this gc worker.
1214          *
1215          * This worker is only here to reap expired entries when system went
1216          * idle after a busy period.
1217          *
1218          * The heuristics below are supposed to balance conflicting goals:
1219          *
1220          * 1. Minimize time until we notice a stale entry
1221          * 2. Maximize scan intervals to not waste cycles
1222          *
1223          * Normally, expire ratio will be close to 0.
1224          *
1225          * As soon as a sizeable fraction of the entries have expired
1226          * increase scan frequency.
1227          */
1228         ratio = scanned ? expired_count * 100 / scanned : 0;
1229         if (ratio > GC_EVICT_RATIO) {
1230                 gc_work->next_gc_run = min_interval;
1231         } else {
1232                 unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV;
1233
1234                 BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0);
1235
1236                 gc_work->next_gc_run += min_interval;
1237                 if (gc_work->next_gc_run > max)
1238                         gc_work->next_gc_run = max;
1239         }
1240
1241         next_run = gc_work->next_gc_run;
1242         gc_work->last_bucket = i;
1243         gc_work->early_drop = false;
1244         queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, next_run);
1245 }
1246
1247 static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
1248 {
1249         INIT_DEFERRABLE_WORK(&gc_work->dwork, gc_worker);
1250         gc_work->next_gc_run = HZ;
1251         gc_work->exiting = false;
1252 }
1253
1254 static struct nf_conn *
1255 __nf_conntrack_alloc(struct net *net,
1256                      const struct nf_conntrack_zone *zone,
1257                      const struct nf_conntrack_tuple *orig,
1258                      const struct nf_conntrack_tuple *repl,
1259                      gfp_t gfp, u32 hash)
1260 {
1261         struct nf_conn *ct;
1262
1263         /* We don't want any race condition at early drop stage */
1264         atomic_inc(&net->ct.count);
1265
1266         if (nf_conntrack_max &&
1267             unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
1268                 if (!early_drop(net, hash)) {
1269                         if (!conntrack_gc_work.early_drop)
1270                                 conntrack_gc_work.early_drop = true;
1271                         atomic_dec(&net->ct.count);
1272                         net_warn_ratelimited("nf_conntrack: table full, dropping packet\n");
1273                         return ERR_PTR(-ENOMEM);
1274                 }
1275         }
1276
1277         /*
1278          * Do not use kmem_cache_zalloc(), as this cache uses
1279          * SLAB_TYPESAFE_BY_RCU.
1280          */
1281         ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
1282         if (ct == NULL)
1283                 goto out;
1284
1285         spin_lock_init(&ct->lock);
1286         ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
1287         ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
1288         ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
1289         /* save hash for reusing when confirming */
1290         *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
1291         ct->status = 0;
1292         write_pnet(&ct->ct_net, net);
1293         memset(&ct->__nfct_init_offset[0], 0,
1294                offsetof(struct nf_conn, proto) -
1295                offsetof(struct nf_conn, __nfct_init_offset[0]));
1296
1297         nf_ct_zone_add(ct, zone);
1298
1299         /* Because we use RCU lookups, we set ct_general.use to zero before
1300          * this is inserted in any list.
1301          */
1302         atomic_set(&ct->ct_general.use, 0);
1303         return ct;
1304 out:
1305         atomic_dec(&net->ct.count);
1306         return ERR_PTR(-ENOMEM);
1307 }
1308
1309 struct nf_conn *nf_conntrack_alloc(struct net *net,
1310                                    const struct nf_conntrack_zone *zone,
1311                                    const struct nf_conntrack_tuple *orig,
1312                                    const struct nf_conntrack_tuple *repl,
1313                                    gfp_t gfp)
1314 {
1315         return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
1316 }
1317 EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
1318
1319 void nf_conntrack_free(struct nf_conn *ct)
1320 {
1321         struct net *net = nf_ct_net(ct);
1322
1323         /* A freed object has refcnt == 0, that's
1324          * the golden rule for SLAB_TYPESAFE_BY_RCU
1325          */
1326         WARN_ON(atomic_read(&ct->ct_general.use) != 0);
1327
1328         nf_ct_ext_destroy(ct);
1329         nf_ct_ext_free(ct);
1330         kmem_cache_free(nf_conntrack_cachep, ct);
1331         smp_mb__before_atomic();
1332         atomic_dec(&net->ct.count);
1333 }
1334 EXPORT_SYMBOL_GPL(nf_conntrack_free);
1335
1336
1337 /* Allocate a new conntrack: we return -ENOMEM if classification
1338    failed due to stress.  Otherwise it really is unclassifiable. */
1339 static noinline struct nf_conntrack_tuple_hash *
1340 init_conntrack(struct net *net, struct nf_conn *tmpl,
1341                const struct nf_conntrack_tuple *tuple,
1342                const struct nf_conntrack_l4proto *l4proto,
1343                struct sk_buff *skb,
1344                unsigned int dataoff, u32 hash)
1345 {
1346         struct nf_conn *ct;
1347         struct nf_conn_help *help;
1348         struct nf_conntrack_tuple repl_tuple;
1349         struct nf_conntrack_ecache *ecache;
1350         struct nf_conntrack_expect *exp = NULL;
1351         const struct nf_conntrack_zone *zone;
1352         struct nf_conn_timeout *timeout_ext;
1353         struct nf_conntrack_zone tmp;
1354
1355         if (!nf_ct_invert_tuple(&repl_tuple, tuple, l4proto)) {
1356                 pr_debug("Can't invert tuple.\n");
1357                 return NULL;
1358         }
1359
1360         zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
1361         ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
1362                                   hash);
1363         if (IS_ERR(ct))
1364                 return (struct nf_conntrack_tuple_hash *)ct;
1365
1366         if (!nf_ct_add_synproxy(ct, tmpl)) {
1367                 nf_conntrack_free(ct);
1368                 return ERR_PTR(-ENOMEM);
1369         }
1370
1371         timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
1372
1373         if (timeout_ext)
1374                 nf_ct_timeout_ext_add(ct, rcu_dereference(timeout_ext->timeout),
1375                                       GFP_ATOMIC);
1376
1377         nf_ct_acct_ext_add(ct, GFP_ATOMIC);
1378         nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
1379         nf_ct_labels_ext_add(ct);
1380
1381         ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
1382         nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
1383                                  ecache ? ecache->expmask : 0,
1384                              GFP_ATOMIC);
1385
1386         local_bh_disable();
1387         if (net->ct.expect_count) {
1388                 spin_lock(&nf_conntrack_expect_lock);
1389                 exp = nf_ct_find_expectation(net, zone, tuple);
1390                 if (exp) {
1391                         pr_debug("expectation arrives ct=%p exp=%p\n",
1392                                  ct, exp);
1393                         /* Welcome, Mr. Bond.  We've been expecting you... */
1394                         __set_bit(IPS_EXPECTED_BIT, &ct->status);
1395                         /* exp->master safe, refcnt bumped in nf_ct_find_expectation */
1396                         ct->master = exp->master;
1397                         if (exp->helper) {
1398                                 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
1399                                 if (help)
1400                                         rcu_assign_pointer(help->helper, exp->helper);
1401                         }
1402
1403 #ifdef CONFIG_NF_CONNTRACK_MARK
1404                         ct->mark = exp->master->mark;
1405 #endif
1406 #ifdef CONFIG_NF_CONNTRACK_SECMARK
1407                         ct->secmark = exp->master->secmark;
1408 #endif
1409                         NF_CT_STAT_INC(net, expect_new);
1410                 }
1411                 spin_unlock(&nf_conntrack_expect_lock);
1412         }
1413         if (!exp)
1414                 __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
1415
1416         /* Now it is inserted into the unconfirmed list, bump refcount */
1417         nf_conntrack_get(&ct->ct_general);
1418         nf_ct_add_to_unconfirmed_list(ct);
1419
1420         local_bh_enable();
1421
1422         if (exp) {
1423                 if (exp->expectfn)
1424                         exp->expectfn(ct, exp);
1425                 nf_ct_expect_put(exp);
1426         }
1427
1428         return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
1429 }
1430
1431 /* On success, returns 0, sets skb->_nfct | ctinfo */
1432 static int
1433 resolve_normal_ct(struct nf_conn *tmpl,
1434                   struct sk_buff *skb,
1435                   unsigned int dataoff,
1436                   u_int8_t protonum,
1437                   const struct nf_conntrack_l4proto *l4proto,
1438                   const struct nf_hook_state *state)
1439 {
1440         const struct nf_conntrack_zone *zone;
1441         struct nf_conntrack_tuple tuple;
1442         struct nf_conntrack_tuple_hash *h;
1443         enum ip_conntrack_info ctinfo;
1444         struct nf_conntrack_zone tmp;
1445         struct nf_conn *ct;
1446         u32 hash;
1447
1448         if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
1449                              dataoff, state->pf, protonum, state->net,
1450                              &tuple, l4proto)) {
1451                 pr_debug("Can't get tuple\n");
1452                 return 0;
1453         }
1454
1455         /* look for tuple match */
1456         zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
1457         hash = hash_conntrack_raw(&tuple, state->net);
1458         h = __nf_conntrack_find_get(state->net, zone, &tuple, hash);
1459         if (!h) {
1460                 h = init_conntrack(state->net, tmpl, &tuple, l4proto,
1461                                    skb, dataoff, hash);
1462                 if (!h)
1463                         return 0;
1464                 if (IS_ERR(h))
1465                         return PTR_ERR(h);
1466         }
1467         ct = nf_ct_tuplehash_to_ctrack(h);
1468
1469         /* It exists; we have (non-exclusive) reference. */
1470         if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
1471                 ctinfo = IP_CT_ESTABLISHED_REPLY;
1472         } else {
1473                 /* Once we've had two way comms, always ESTABLISHED. */
1474                 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
1475                         pr_debug("normal packet for %p\n", ct);
1476                         ctinfo = IP_CT_ESTABLISHED;
1477                 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
1478                         pr_debug("related packet for %p\n", ct);
1479                         ctinfo = IP_CT_RELATED;
1480                 } else {
1481                         pr_debug("new packet for %p\n", ct);
1482                         ctinfo = IP_CT_NEW;
1483                 }
1484         }
1485         nf_ct_set(skb, ct, ctinfo);
1486         return 0;
1487 }
1488
1489 /*
1490  * icmp packets need special treatment to handle error messages that are
1491  * related to a connection.
1492  *
1493  * Callers need to check if skb has a conntrack assigned when this
1494  * helper returns; in such case skb belongs to an already known connection.
1495  */
1496 static unsigned int __cold
1497 nf_conntrack_handle_icmp(struct nf_conn *tmpl,
1498                          struct sk_buff *skb,
1499                          unsigned int dataoff,
1500                          u8 protonum,
1501                          const struct nf_hook_state *state)
1502 {
1503         int ret;
1504
1505         if (state->pf == NFPROTO_IPV4 && protonum == IPPROTO_ICMP)
1506                 ret = nf_conntrack_icmpv4_error(tmpl, skb, dataoff, state);
1507 #if IS_ENABLED(CONFIG_IPV6)
1508         else if (state->pf == NFPROTO_IPV6 && protonum == IPPROTO_ICMPV6)
1509                 ret = nf_conntrack_icmpv6_error(tmpl, skb, dataoff, state);
1510 #endif
1511         else
1512                 return NF_ACCEPT;
1513
1514         if (ret <= 0) {
1515                 NF_CT_STAT_INC_ATOMIC(state->net, error);
1516                 NF_CT_STAT_INC_ATOMIC(state->net, invalid);
1517         }
1518
1519         return ret;
1520 }
1521
1522 unsigned int
1523 nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state)
1524 {
1525         const struct nf_conntrack_l4proto *l4proto;
1526         enum ip_conntrack_info ctinfo;
1527         struct nf_conn *ct, *tmpl;
1528         u_int8_t protonum;
1529         int dataoff, ret;
1530
1531         tmpl = nf_ct_get(skb, &ctinfo);
1532         if (tmpl || ctinfo == IP_CT_UNTRACKED) {
1533                 /* Previously seen (loopback or untracked)?  Ignore. */
1534                 if ((tmpl && !nf_ct_is_template(tmpl)) ||
1535                      ctinfo == IP_CT_UNTRACKED) {
1536                         NF_CT_STAT_INC_ATOMIC(state->net, ignore);
1537                         return NF_ACCEPT;
1538                 }
1539                 skb->_nfct = 0;
1540         }
1541
1542         /* rcu_read_lock()ed by nf_hook_thresh */
1543         dataoff = get_l4proto(skb, skb_network_offset(skb), state->pf, &protonum);
1544         if (dataoff <= 0) {
1545                 pr_debug("not prepared to track yet or error occurred\n");
1546                 NF_CT_STAT_INC_ATOMIC(state->net, error);
1547                 NF_CT_STAT_INC_ATOMIC(state->net, invalid);
1548                 ret = NF_ACCEPT;
1549                 goto out;
1550         }
1551
1552         l4proto = __nf_ct_l4proto_find(protonum);
1553
1554         if (protonum == IPPROTO_ICMP || protonum == IPPROTO_ICMPV6) {
1555                 ret = nf_conntrack_handle_icmp(tmpl, skb, dataoff,
1556                                                protonum, state);
1557                 if (ret <= 0) {
1558                         ret = -ret;
1559                         goto out;
1560                 }
1561                 /* ICMP[v6] protocol trackers may assign one conntrack. */
1562                 if (skb->_nfct)
1563                         goto out;
1564         }
1565 repeat:
1566         ret = resolve_normal_ct(tmpl, skb, dataoff,
1567                                 protonum, l4proto, state);
1568         if (ret < 0) {
1569                 /* Too stressed to deal. */
1570                 NF_CT_STAT_INC_ATOMIC(state->net, drop);
1571                 ret = NF_DROP;
1572                 goto out;
1573         }
1574
1575         ct = nf_ct_get(skb, &ctinfo);
1576         if (!ct) {
1577                 /* Not valid part of a connection */
1578                 NF_CT_STAT_INC_ATOMIC(state->net, invalid);
1579                 ret = NF_ACCEPT;
1580                 goto out;
1581         }
1582
1583         ret = l4proto->packet(ct, skb, dataoff, ctinfo, state);
1584         if (ret <= 0) {
1585                 /* Invalid: inverse of the return code tells
1586                  * the netfilter core what to do */
1587                 pr_debug("nf_conntrack_in: Can't track with proto module\n");
1588                 nf_conntrack_put(&ct->ct_general);
1589                 skb->_nfct = 0;
1590                 NF_CT_STAT_INC_ATOMIC(state->net, invalid);
1591                 if (ret == -NF_DROP)
1592                         NF_CT_STAT_INC_ATOMIC(state->net, drop);
1593                 /* Special case: TCP tracker reports an attempt to reopen a
1594                  * closed/aborted connection. We have to go back and create a
1595                  * fresh conntrack.
1596                  */
1597                 if (ret == -NF_REPEAT)
1598                         goto repeat;
1599                 ret = -ret;
1600                 goto out;
1601         }
1602
1603         if (ctinfo == IP_CT_ESTABLISHED_REPLY &&
1604             !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
1605                 nf_conntrack_event_cache(IPCT_REPLY, ct);
1606 out:
1607         if (tmpl)
1608                 nf_ct_put(tmpl);
1609
1610         return ret;
1611 }
1612 EXPORT_SYMBOL_GPL(nf_conntrack_in);
1613
1614 bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
1615                           const struct nf_conntrack_tuple *orig)
1616 {
1617         bool ret;
1618
1619         rcu_read_lock();
1620         ret = nf_ct_invert_tuple(inverse, orig,
1621                                  __nf_ct_l4proto_find(orig->dst.protonum));
1622         rcu_read_unlock();
1623         return ret;
1624 }
1625 EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);
1626
1627 /* Alter reply tuple (maybe alter helper).  This is for NAT, and is
1628    implicitly racy: see __nf_conntrack_confirm */
1629 void nf_conntrack_alter_reply(struct nf_conn *ct,
1630                               const struct nf_conntrack_tuple *newreply)
1631 {
1632         struct nf_conn_help *help = nfct_help(ct);
1633
1634         /* Should be unconfirmed, so not in hash table yet */
1635         WARN_ON(nf_ct_is_confirmed(ct));
1636
1637         pr_debug("Altering reply tuple of %p to ", ct);
1638         nf_ct_dump_tuple(newreply);
1639
1640         ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
1641         if (ct->master || (help && !hlist_empty(&help->expectations)))
1642                 return;
1643
1644         rcu_read_lock();
1645         __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
1646         rcu_read_unlock();
1647 }
1648 EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
1649
1650 /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
1651 void __nf_ct_refresh_acct(struct nf_conn *ct,
1652                           enum ip_conntrack_info ctinfo,
1653                           const struct sk_buff *skb,
1654                           unsigned long extra_jiffies,
1655                           int do_acct)
1656 {
1657         WARN_ON(!skb);
1658
1659         /* Only update if this is not a fixed timeout */
1660         if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
1661                 goto acct;
1662
1663         /* If not in hash table, timer will not be active yet */
1664         if (nf_ct_is_confirmed(ct))
1665                 extra_jiffies += nfct_time_stamp;
1666
1667         ct->timeout = extra_jiffies;
1668 acct:
1669         if (do_acct)
1670                 nf_ct_acct_update(ct, ctinfo, skb->len);
1671 }
1672 EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
1673
1674 bool nf_ct_kill_acct(struct nf_conn *ct,
1675                      enum ip_conntrack_info ctinfo,
1676                      const struct sk_buff *skb)
1677 {
1678         nf_ct_acct_update(ct, ctinfo, skb->len);
1679
1680         return nf_ct_delete(ct, 0, 0);
1681 }
1682 EXPORT_SYMBOL_GPL(nf_ct_kill_acct);
1683
1684 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1685
1686 #include <linux/netfilter/nfnetlink.h>
1687 #include <linux/netfilter/nfnetlink_conntrack.h>
1688 #include <linux/mutex.h>
1689
1690 /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
1691  * in ip_conntrack_core, since we don't want the protocols to autoload
1692  * or depend on ctnetlink */
1693 int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
1694                                const struct nf_conntrack_tuple *tuple)
1695 {
1696         if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) ||
1697             nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port))
1698                 goto nla_put_failure;
1699         return 0;
1700
1701 nla_put_failure:
1702         return -1;
1703 }
1704 EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
1705
1706 const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
1707         [CTA_PROTO_SRC_PORT]  = { .type = NLA_U16 },
1708         [CTA_PROTO_DST_PORT]  = { .type = NLA_U16 },
1709 };
1710 EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
1711
1712 int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
1713                                struct nf_conntrack_tuple *t)
1714 {
1715         if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])
1716                 return -EINVAL;
1717
1718         t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
1719         t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
1720
1721         return 0;
1722 }
1723 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
1724
1725 unsigned int nf_ct_port_nlattr_tuple_size(void)
1726 {
1727         static unsigned int size __read_mostly;
1728
1729         if (!size)
1730                 size = nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
1731
1732         return size;
1733 }
1734 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
1735 #endif
1736
1737 /* Used by ipt_REJECT and ip6t_REJECT. */
1738 static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
1739 {
1740         struct nf_conn *ct;
1741         enum ip_conntrack_info ctinfo;
1742
1743         /* This ICMP is in reverse direction to the packet which caused it */
1744         ct = nf_ct_get(skb, &ctinfo);
1745         if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
1746                 ctinfo = IP_CT_RELATED_REPLY;
1747         else
1748                 ctinfo = IP_CT_RELATED;
1749
1750         /* Attach to new skbuff, and increment count */
1751         nf_ct_set(nskb, ct, ctinfo);
1752         nf_conntrack_get(skb_nfct(nskb));
1753 }
1754
1755 static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
1756 {
1757         const struct nf_conntrack_l4proto *l4proto;
1758         struct nf_conntrack_tuple_hash *h;
1759         struct nf_conntrack_tuple tuple;
1760         enum ip_conntrack_info ctinfo;
1761         struct nf_nat_hook *nat_hook;
1762         unsigned int status;
1763         struct nf_conn *ct;
1764         int dataoff;
1765         u16 l3num;
1766         u8 l4num;
1767
1768         ct = nf_ct_get(skb, &ctinfo);
1769         if (!ct || nf_ct_is_confirmed(ct))
1770                 return 0;
1771
1772         l3num = nf_ct_l3num(ct);
1773
1774         dataoff = get_l4proto(skb, skb_network_offset(skb), l3num, &l4num);
1775         if (dataoff <= 0)
1776                 return -1;
1777
1778         l4proto = nf_ct_l4proto_find_get(l4num);
1779
1780         if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num,
1781                              l4num, net, &tuple, l4proto))
1782                 return -1;
1783
1784         if (ct->status & IPS_SRC_NAT) {
1785                 memcpy(tuple.src.u3.all,
1786                        ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.all,
1787                        sizeof(tuple.src.u3.all));
1788                 tuple.src.u.all =
1789                         ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.all;
1790         }
1791
1792         if (ct->status & IPS_DST_NAT) {
1793                 memcpy(tuple.dst.u3.all,
1794                        ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.all,
1795                        sizeof(tuple.dst.u3.all));
1796                 tuple.dst.u.all =
1797                         ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all;
1798         }
1799
1800         h = nf_conntrack_find_get(net, nf_ct_zone(ct), &tuple);
1801         if (!h)
1802                 return 0;
1803
1804         /* Store status bits of the conntrack that is clashing to re-do NAT
1805          * mangling according to what it has been done already to this packet.
1806          */
1807         status = ct->status;
1808
1809         nf_ct_put(ct);
1810         ct = nf_ct_tuplehash_to_ctrack(h);
1811         nf_ct_set(skb, ct, ctinfo);
1812
1813         nat_hook = rcu_dereference(nf_nat_hook);
1814         if (!nat_hook)
1815                 return 0;
1816
1817         if (status & IPS_SRC_NAT &&
1818             nat_hook->manip_pkt(skb, ct, NF_NAT_MANIP_SRC,
1819                                 IP_CT_DIR_ORIGINAL) == NF_DROP)
1820                 return -1;
1821
1822         if (status & IPS_DST_NAT &&
1823             nat_hook->manip_pkt(skb, ct, NF_NAT_MANIP_DST,
1824                                 IP_CT_DIR_ORIGINAL) == NF_DROP)
1825                 return -1;
1826
1827         return 0;
1828 }
1829
1830 static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
1831                                        const struct sk_buff *skb)
1832 {
1833         const struct nf_conntrack_tuple *src_tuple;
1834         const struct nf_conntrack_tuple_hash *hash;
1835         struct nf_conntrack_tuple srctuple;
1836         enum ip_conntrack_info ctinfo;
1837         struct nf_conn *ct;
1838
1839         ct = nf_ct_get(skb, &ctinfo);
1840         if (ct) {
1841                 src_tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo));
1842                 memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
1843                 return true;
1844         }
1845
1846         if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
1847                                NFPROTO_IPV4, dev_net(skb->dev),
1848                                &srctuple))
1849                 return false;
1850
1851         hash = nf_conntrack_find_get(dev_net(skb->dev),
1852                                      &nf_ct_zone_dflt,
1853                                      &srctuple);
1854         if (!hash)
1855                 return false;
1856
1857         ct = nf_ct_tuplehash_to_ctrack(hash);
1858         src_tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir);
1859         memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
1860         nf_ct_put(ct);
1861
1862         return true;
1863 }
1864
1865 /* Bring out ya dead! */
1866 static struct nf_conn *
1867 get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
1868                 void *data, unsigned int *bucket)
1869 {
1870         struct nf_conntrack_tuple_hash *h;
1871         struct nf_conn *ct;
1872         struct hlist_nulls_node *n;
1873         spinlock_t *lockp;
1874
1875         for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
1876                 lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
1877                 local_bh_disable();
1878                 nf_conntrack_lock(lockp);
1879                 if (*bucket < nf_conntrack_htable_size) {
1880                         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnnode) {
1881                                 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
1882                                         continue;
1883                                 ct = nf_ct_tuplehash_to_ctrack(h);
1884                                 if (iter(ct, data))
1885                                         goto found;
1886                         }
1887                 }
1888                 spin_unlock(lockp);
1889                 local_bh_enable();
1890                 cond_resched();
1891         }
1892
1893         return NULL;
1894 found:
1895         atomic_inc(&ct->ct_general.use);
1896         spin_unlock(lockp);
1897         local_bh_enable();
1898         return ct;
1899 }
1900
1901 static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data),
1902                                   void *data, u32 portid, int report)
1903 {
1904         unsigned int bucket = 0, sequence;
1905         struct nf_conn *ct;
1906
1907         might_sleep();
1908
1909         for (;;) {
1910                 sequence = read_seqcount_begin(&nf_conntrack_generation);
1911
1912                 while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
1913                         /* Time to push up daises... */
1914
1915                         nf_ct_delete(ct, portid, report);
1916                         nf_ct_put(ct);
1917                         cond_resched();
1918                 }
1919
1920                 if (!read_seqcount_retry(&nf_conntrack_generation, sequence))
1921                         break;
1922                 bucket = 0;
1923         }
1924 }
1925
1926 struct iter_data {
1927         int (*iter)(struct nf_conn *i, void *data);
1928         void *data;
1929         struct net *net;
1930 };
1931
1932 static int iter_net_only(struct nf_conn *i, void *data)
1933 {
1934         struct iter_data *d = data;
1935
1936         if (!net_eq(d->net, nf_ct_net(i)))
1937                 return 0;
1938
1939         return d->iter(i, d->data);
1940 }
1941
1942 static void
1943 __nf_ct_unconfirmed_destroy(struct net *net)
1944 {
1945         int cpu;
1946
1947         for_each_possible_cpu(cpu) {
1948                 struct nf_conntrack_tuple_hash *h;
1949                 struct hlist_nulls_node *n;
1950                 struct ct_pcpu *pcpu;
1951
1952                 pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
1953
1954                 spin_lock_bh(&pcpu->lock);
1955                 hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) {
1956                         struct nf_conn *ct;
1957
1958                         ct = nf_ct_tuplehash_to_ctrack(h);
1959
1960                         /* we cannot call iter() on unconfirmed list, the
1961                          * owning cpu can reallocate ct->ext at any time.
1962                          */
1963                         set_bit(IPS_DYING_BIT, &ct->status);
1964                 }
1965                 spin_unlock_bh(&pcpu->lock);
1966                 cond_resched();
1967         }
1968 }
1969
1970 void nf_ct_unconfirmed_destroy(struct net *net)
1971 {
1972         might_sleep();
1973
1974         if (atomic_read(&net->ct.count) > 0) {
1975                 __nf_ct_unconfirmed_destroy(net);
1976                 nf_queue_nf_hook_drop(net);
1977                 synchronize_net();
1978         }
1979 }
1980 EXPORT_SYMBOL_GPL(nf_ct_unconfirmed_destroy);
1981
1982 void nf_ct_iterate_cleanup_net(struct net *net,
1983                                int (*iter)(struct nf_conn *i, void *data),
1984                                void *data, u32 portid, int report)
1985 {
1986         struct iter_data d;
1987
1988         might_sleep();
1989
1990         if (atomic_read(&net->ct.count) == 0)
1991                 return;
1992
1993         d.iter = iter;
1994         d.data = data;
1995         d.net = net;
1996
1997         nf_ct_iterate_cleanup(iter_net_only, &d, portid, report);
1998 }
1999 EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup_net);
2000
2001 /**
2002  * nf_ct_iterate_destroy - destroy unconfirmed conntracks and iterate table
2003  * @iter: callback to invoke for each conntrack
2004  * @data: data to pass to @iter
2005  *
2006  * Like nf_ct_iterate_cleanup, but first marks conntracks on the
2007  * unconfirmed list as dying (so they will not be inserted into
2008  * main table).
2009  *
2010  * Can only be called in module exit path.
2011  */
2012 void
2013 nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data), void *data)
2014 {
2015         struct net *net;
2016
2017         down_read(&net_rwsem);
2018         for_each_net(net) {
2019                 if (atomic_read(&net->ct.count) == 0)
2020                         continue;
2021                 __nf_ct_unconfirmed_destroy(net);
2022                 nf_queue_nf_hook_drop(net);
2023         }
2024         up_read(&net_rwsem);
2025
2026         /* Need to wait for netns cleanup worker to finish, if its
2027          * running -- it might have deleted a net namespace from
2028          * the global list, so our __nf_ct_unconfirmed_destroy() might
2029          * not have affected all namespaces.
2030          */
2031         net_ns_barrier();
2032
2033         /* a conntrack could have been unlinked from unconfirmed list
2034          * before we grabbed pcpu lock in __nf_ct_unconfirmed_destroy().
2035          * This makes sure its inserted into conntrack table.
2036          */
2037         synchronize_net();
2038
2039         nf_ct_iterate_cleanup(iter, data, 0, 0);
2040 }
2041 EXPORT_SYMBOL_GPL(nf_ct_iterate_destroy);
2042
2043 static int kill_all(struct nf_conn *i, void *data)
2044 {
2045         return net_eq(nf_ct_net(i), data);
2046 }
2047
2048 void nf_conntrack_cleanup_start(void)
2049 {
2050         conntrack_gc_work.exiting = true;
2051         RCU_INIT_POINTER(ip_ct_attach, NULL);
2052 }
2053
2054 void nf_conntrack_cleanup_end(void)
2055 {
2056         RCU_INIT_POINTER(nf_ct_hook, NULL);
2057         cancel_delayed_work_sync(&conntrack_gc_work.dwork);
2058         kvfree(nf_conntrack_hash);
2059
2060         nf_conntrack_proto_fini();
2061         nf_conntrack_seqadj_fini();
2062         nf_conntrack_labels_fini();
2063         nf_conntrack_helper_fini();
2064         nf_conntrack_timeout_fini();
2065         nf_conntrack_ecache_fini();
2066         nf_conntrack_tstamp_fini();
2067         nf_conntrack_acct_fini();
2068         nf_conntrack_expect_fini();
2069
2070         kmem_cache_destroy(nf_conntrack_cachep);
2071 }
2072
2073 /*
2074  * Mishearing the voices in his head, our hero wonders how he's
2075  * supposed to kill the mall.
2076  */
2077 void nf_conntrack_cleanup_net(struct net *net)
2078 {
2079         LIST_HEAD(single);
2080
2081         list_add(&net->exit_list, &single);
2082         nf_conntrack_cleanup_net_list(&single);
2083 }
2084
2085 void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
2086 {
2087         int busy;
2088         struct net *net;
2089
2090         /*
2091          * This makes sure all current packets have passed through
2092          *  netfilter framework.  Roll on, two-stage module
2093          *  delete...
2094          */
2095         synchronize_net();
2096 i_see_dead_people:
2097         busy = 0;
2098         list_for_each_entry(net, net_exit_list, exit_list) {
2099                 nf_ct_iterate_cleanup(kill_all, net, 0, 0);
2100                 if (atomic_read(&net->ct.count) != 0)
2101                         busy = 1;
2102         }
2103         if (busy) {
2104                 schedule();
2105                 goto i_see_dead_people;
2106         }
2107
2108         list_for_each_entry(net, net_exit_list, exit_list) {
2109                 nf_conntrack_proto_pernet_fini(net);
2110                 nf_conntrack_helper_pernet_fini(net);
2111                 nf_conntrack_ecache_pernet_fini(net);
2112                 nf_conntrack_tstamp_pernet_fini(net);
2113                 nf_conntrack_acct_pernet_fini(net);
2114                 nf_conntrack_expect_pernet_fini(net);
2115                 free_percpu(net->ct.stat);
2116                 free_percpu(net->ct.pcpu_lists);
2117         }
2118 }
2119
2120 void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
2121 {
2122         struct hlist_nulls_head *hash;
2123         unsigned int nr_slots, i;
2124
2125         if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head)))
2126                 return NULL;
2127
2128         BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
2129         nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
2130
2131         hash = kvmalloc_array(nr_slots, sizeof(struct hlist_nulls_head),
2132                               GFP_KERNEL | __GFP_ZERO);
2133
2134         if (hash && nulls)
2135                 for (i = 0; i < nr_slots; i++)
2136                         INIT_HLIST_NULLS_HEAD(&hash[i], i);
2137
2138         return hash;
2139 }
2140 EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
2141
2142 int nf_conntrack_hash_resize(unsigned int hashsize)
2143 {
2144         int i, bucket;
2145         unsigned int old_size;
2146         struct hlist_nulls_head *hash, *old_hash;
2147         struct nf_conntrack_tuple_hash *h;
2148         struct nf_conn *ct;
2149
2150         if (!hashsize)
2151                 return -EINVAL;
2152
2153         hash = nf_ct_alloc_hashtable(&hashsize, 1);
2154         if (!hash)
2155                 return -ENOMEM;
2156
2157         old_size = nf_conntrack_htable_size;
2158         if (old_size == hashsize) {
2159                 kvfree(hash);
2160                 return 0;
2161         }
2162
2163         local_bh_disable();
2164         nf_conntrack_all_lock();
2165         write_seqcount_begin(&nf_conntrack_generation);
2166
2167         /* Lookups in the old hash might happen in parallel, which means we
2168          * might get false negatives during connection lookup. New connections
2169          * created because of a false negative won't make it into the hash
2170          * though since that required taking the locks.
2171          */
2172
2173         for (i = 0; i < nf_conntrack_htable_size; i++) {
2174                 while (!hlist_nulls_empty(&nf_conntrack_hash[i])) {
2175                         h = hlist_nulls_entry(nf_conntrack_hash[i].first,
2176                                               struct nf_conntrack_tuple_hash, hnnode);
2177                         ct = nf_ct_tuplehash_to_ctrack(h);
2178                         hlist_nulls_del_rcu(&h->hnnode);
2179                         bucket = __hash_conntrack(nf_ct_net(ct),
2180                                                   &h->tuple, hashsize);
2181                         hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
2182                 }
2183         }
2184         old_size = nf_conntrack_htable_size;
2185         old_hash = nf_conntrack_hash;
2186
2187         nf_conntrack_hash = hash;
2188         nf_conntrack_htable_size = hashsize;
2189
2190         write_seqcount_end(&nf_conntrack_generation);
2191         nf_conntrack_all_unlock();
2192         local_bh_enable();
2193
2194         synchronize_net();
2195         kvfree(old_hash);
2196         return 0;
2197 }
2198
2199 int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp)
2200 {
2201         unsigned int hashsize;
2202         int rc;
2203
2204         if (current->nsproxy->net_ns != &init_net)
2205                 return -EOPNOTSUPP;
2206
2207         /* On boot, we can set this without any fancy locking. */
2208         if (!nf_conntrack_hash)
2209                 return param_set_uint(val, kp);
2210
2211         rc = kstrtouint(val, 0, &hashsize);
2212         if (rc)
2213                 return rc;
2214
2215         return nf_conntrack_hash_resize(hashsize);
2216 }
2217 EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
2218
2219 static __always_inline unsigned int total_extension_size(void)
2220 {
2221         /* remember to add new extensions below */
2222         BUILD_BUG_ON(NF_CT_EXT_NUM > 9);
2223
2224         return sizeof(struct nf_ct_ext) +
2225                sizeof(struct nf_conn_help)
2226 #if IS_ENABLED(CONFIG_NF_NAT)
2227                 + sizeof(struct nf_conn_nat)
2228 #endif
2229                 + sizeof(struct nf_conn_seqadj)
2230                 + sizeof(struct nf_conn_acct)
2231 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2232                 + sizeof(struct nf_conntrack_ecache)
2233 #endif
2234 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
2235                 + sizeof(struct nf_conn_tstamp)
2236 #endif
2237 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
2238                 + sizeof(struct nf_conn_timeout)
2239 #endif
2240 #ifdef CONFIG_NF_CONNTRACK_LABELS
2241                 + sizeof(struct nf_conn_labels)
2242 #endif
2243 #if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
2244                 + sizeof(struct nf_conn_synproxy)
2245 #endif
2246         ;
2247 };
2248
2249 int nf_conntrack_init_start(void)
2250 {
2251         int max_factor = 8;
2252         int ret = -ENOMEM;
2253         int i;
2254
2255         /* struct nf_ct_ext uses u8 to store offsets/size */
2256         BUILD_BUG_ON(total_extension_size() > 255u);
2257
2258         seqcount_init(&nf_conntrack_generation);
2259
2260         for (i = 0; i < CONNTRACK_LOCKS; i++)
2261                 spin_lock_init(&nf_conntrack_locks[i]);
2262
2263         if (!nf_conntrack_htable_size) {
2264                 /* Idea from tcp.c: use 1/16384 of memory.
2265                  * On i386: 32MB machine has 512 buckets.
2266                  * >= 1GB machines have 16384 buckets.
2267                  * >= 4GB machines have 65536 buckets.
2268                  */
2269                 nf_conntrack_htable_size
2270                         = (((totalram_pages << PAGE_SHIFT) / 16384)
2271                            / sizeof(struct hlist_head));
2272                 if (totalram_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE)))
2273                         nf_conntrack_htable_size = 65536;
2274                 else if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
2275                         nf_conntrack_htable_size = 16384;
2276                 if (nf_conntrack_htable_size < 32)
2277                         nf_conntrack_htable_size = 32;
2278
2279                 /* Use a max. factor of four by default to get the same max as
2280                  * with the old struct list_heads. When a table size is given
2281                  * we use the old value of 8 to avoid reducing the max.
2282                  * entries. */
2283                 max_factor = 4;
2284         }
2285
2286         nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1);
2287         if (!nf_conntrack_hash)
2288                 return -ENOMEM;
2289
2290         nf_conntrack_max = max_factor * nf_conntrack_htable_size;
2291
2292         nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
2293                                                 sizeof(struct nf_conn),
2294                                                 NFCT_INFOMASK + 1,
2295                                                 SLAB_TYPESAFE_BY_RCU | SLAB_HWCACHE_ALIGN, NULL);
2296         if (!nf_conntrack_cachep)
2297                 goto err_cachep;
2298
2299         ret = nf_conntrack_expect_init();
2300         if (ret < 0)
2301                 goto err_expect;
2302
2303         ret = nf_conntrack_acct_init();
2304         if (ret < 0)
2305                 goto err_acct;
2306
2307         ret = nf_conntrack_tstamp_init();
2308         if (ret < 0)
2309                 goto err_tstamp;
2310
2311         ret = nf_conntrack_ecache_init();
2312         if (ret < 0)
2313                 goto err_ecache;
2314
2315         ret = nf_conntrack_timeout_init();
2316         if (ret < 0)
2317                 goto err_timeout;
2318
2319         ret = nf_conntrack_helper_init();
2320         if (ret < 0)
2321                 goto err_helper;
2322
2323         ret = nf_conntrack_labels_init();
2324         if (ret < 0)
2325                 goto err_labels;
2326
2327         ret = nf_conntrack_seqadj_init();
2328         if (ret < 0)
2329                 goto err_seqadj;
2330
2331         ret = nf_conntrack_proto_init();
2332         if (ret < 0)
2333                 goto err_proto;
2334
2335         conntrack_gc_work_init(&conntrack_gc_work);
2336         queue_delayed_work(system_power_efficient_wq, &conntrack_gc_work.dwork, HZ);
2337
2338         return 0;
2339
2340 err_proto:
2341         nf_conntrack_seqadj_fini();
2342 err_seqadj:
2343         nf_conntrack_labels_fini();
2344 err_labels:
2345         nf_conntrack_helper_fini();
2346 err_helper:
2347         nf_conntrack_timeout_fini();
2348 err_timeout:
2349         nf_conntrack_ecache_fini();
2350 err_ecache:
2351         nf_conntrack_tstamp_fini();
2352 err_tstamp:
2353         nf_conntrack_acct_fini();
2354 err_acct:
2355         nf_conntrack_expect_fini();
2356 err_expect:
2357         kmem_cache_destroy(nf_conntrack_cachep);
2358 err_cachep:
2359         kvfree(nf_conntrack_hash);
2360         return ret;
2361 }
2362
2363 static struct nf_ct_hook nf_conntrack_hook = {
2364         .update         = nf_conntrack_update,
2365         .destroy        = destroy_conntrack,
2366         .get_tuple_skb  = nf_conntrack_get_tuple_skb,
2367 };
2368
2369 void nf_conntrack_init_end(void)
2370 {
2371         /* For use by REJECT target */
2372         RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
2373         RCU_INIT_POINTER(nf_ct_hook, &nf_conntrack_hook);
2374 }
2375
2376 /*
2377  * We need to use special "null" values, not used in hash table
2378  */
2379 #define UNCONFIRMED_NULLS_VAL   ((1<<30)+0)
2380 #define DYING_NULLS_VAL         ((1<<30)+1)
2381 #define TEMPLATE_NULLS_VAL      ((1<<30)+2)
2382
2383 int nf_conntrack_init_net(struct net *net)
2384 {
2385         int ret = -ENOMEM;
2386         int cpu;
2387
2388         BUILD_BUG_ON(IP_CT_UNTRACKED == IP_CT_NUMBER);
2389         atomic_set(&net->ct.count, 0);
2390
2391         net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu);
2392         if (!net->ct.pcpu_lists)
2393                 goto err_stat;
2394
2395         for_each_possible_cpu(cpu) {
2396                 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
2397
2398                 spin_lock_init(&pcpu->lock);
2399                 INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
2400                 INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL);
2401         }
2402
2403         net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
2404         if (!net->ct.stat)
2405                 goto err_pcpu_lists;
2406
2407         ret = nf_conntrack_expect_pernet_init(net);
2408         if (ret < 0)
2409                 goto err_expect;
2410         ret = nf_conntrack_acct_pernet_init(net);
2411         if (ret < 0)
2412                 goto err_acct;
2413         ret = nf_conntrack_tstamp_pernet_init(net);
2414         if (ret < 0)
2415                 goto err_tstamp;
2416         ret = nf_conntrack_ecache_pernet_init(net);
2417         if (ret < 0)
2418                 goto err_ecache;
2419         ret = nf_conntrack_helper_pernet_init(net);
2420         if (ret < 0)
2421                 goto err_helper;
2422         ret = nf_conntrack_proto_pernet_init(net);
2423         if (ret < 0)
2424                 goto err_proto;
2425         return 0;
2426
2427 err_proto:
2428         nf_conntrack_helper_pernet_fini(net);
2429 err_helper:
2430         nf_conntrack_ecache_pernet_fini(net);
2431 err_ecache:
2432         nf_conntrack_tstamp_pernet_fini(net);
2433 err_tstamp:
2434         nf_conntrack_acct_pernet_fini(net);
2435 err_acct:
2436         nf_conntrack_expect_pernet_fini(net);
2437 err_expect:
2438         free_percpu(net->ct.stat);
2439 err_pcpu_lists:
2440         free_percpu(net->ct.pcpu_lists);
2441 err_stat:
2442         return ret;
2443 }