netfilter: conntrack: move helper destruction to nf_ct_helper_destroy()
[sfrench/cifs-2.6.git] / net / netfilter / nf_conntrack_core.c
index 8020db6274b86471149533b40eecd89c496c24d5..14235b144cb5cd861025c669c82017801bed415b 100644 (file)
@@ -39,6 +39,7 @@
 #include <net/netfilter/nf_conntrack_core.h>
 #include <net/netfilter/nf_conntrack_extend.h>
 #include <net/netfilter/nf_conntrack_acct.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
 #include <net/netfilter/nf_nat.h>
 #include <net/netfilter/nf_nat_core.h>
 
@@ -223,17 +224,8 @@ static void death_by_timeout(unsigned long ul_conntrack)
 {
        struct nf_conn *ct = (void *)ul_conntrack;
        struct net *net = nf_ct_net(ct);
-       struct nf_conn_help *help = nfct_help(ct);
-       struct nf_conntrack_helper *helper;
-
-       if (help) {
-               rcu_read_lock();
-               helper = rcu_dereference(help->helper);
-               if (helper && helper->destroy)
-                       helper->destroy(ct);
-               rcu_read_unlock();
-       }
 
+       nf_ct_helper_destroy(ct);
        spin_lock_bh(&nf_conntrack_lock);
        /* Inside lock so preempt is disabled on module removal path.
         * Otherwise we can get spurious warnings. */
@@ -398,11 +390,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
        help = nfct_help(ct);
        if (help && help->helper)
                nf_conntrack_event_cache(IPCT_HELPER, ct);
-#ifdef CONFIG_NF_NAT_NEEDED
-       if (test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status) ||
-           test_bit(IPS_DST_NAT_DONE_BIT, &ct->status))
-               nf_conntrack_event_cache(IPCT_NATINFO, ct);
-#endif
+
        nf_conntrack_event_cache(master_ct(ct) ?
                                 IPCT_RELATED : IPCT_NEW, ct);
        return NF_ACCEPT;
@@ -523,6 +511,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
                return ERR_PTR(-ENOMEM);
        }
 
+       spin_lock_init(&ct->lock);
        atomic_set(&ct->ct_general.use, 1);
        ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
        ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
@@ -580,6 +569,7 @@ init_conntrack(struct net *net,
        }
 
        nf_ct_acct_ext_add(ct, GFP_ATOMIC);
+       nf_ct_ecache_ext_add(ct, GFP_ATOMIC);
 
        spin_lock_bh(&nf_conntrack_lock);
        exp = nf_ct_find_expectation(net, tuple);
@@ -807,13 +797,9 @@ void __nf_ct_refresh_acct(struct nf_conn *ct,
                          unsigned long extra_jiffies,
                          int do_acct)
 {
-       int event = 0;
-
        NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
        NF_CT_ASSERT(skb);
 
-       spin_lock_bh(&nf_conntrack_lock);
-
        /* Only update if this is not a fixed timeout */
        if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
                goto acct;
@@ -821,19 +807,14 @@ void __nf_ct_refresh_acct(struct nf_conn *ct,
        /* If not in hash table, timer will not be active yet */
        if (!nf_ct_is_confirmed(ct)) {
                ct->timeout.expires = extra_jiffies;
-               event = IPCT_REFRESH;
        } else {
                unsigned long newtime = jiffies + extra_jiffies;
 
                /* Only update the timeout if the new timeout is at least
                   HZ jiffies from the old timeout. Need del_timer for race
                   avoidance (may already be dying). */
-               if (newtime - ct->timeout.expires >= HZ
-                   && del_timer(&ct->timeout)) {
-                       ct->timeout.expires = newtime;
-                       add_timer(&ct->timeout);
-                       event = IPCT_REFRESH;
-               }
+               if (newtime - ct->timeout.expires >= HZ)
+                       mod_timer_pending(&ct->timeout, newtime);
        }
 
 acct:
@@ -842,17 +823,13 @@ acct:
 
                acct = nf_conn_acct_find(ct);
                if (acct) {
+                       spin_lock_bh(&ct->lock);
                        acct[CTINFO2DIR(ctinfo)].packets++;
                        acct[CTINFO2DIR(ctinfo)].bytes +=
                                skb->len - skb_network_offset(skb);
+                       spin_unlock_bh(&ct->lock);
                }
        }
-
-       spin_unlock_bh(&nf_conntrack_lock);
-
-       /* must be unlocked when calling event cache */
-       if (event)
-               nf_conntrack_event_cache(event, ct);
 }
 EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
 
@@ -864,14 +841,14 @@ bool __nf_ct_kill_acct(struct nf_conn *ct,
        if (do_acct) {
                struct nf_conn_counter *acct;
 
-               spin_lock_bh(&nf_conntrack_lock);
                acct = nf_conn_acct_find(ct);
                if (acct) {
+                       spin_lock_bh(&ct->lock);
                        acct[CTINFO2DIR(ctinfo)].packets++;
                        acct[CTINFO2DIR(ctinfo)].bytes +=
                                skb->len - skb_network_offset(skb);
+                       spin_unlock_bh(&ct->lock);
                }
-               spin_unlock_bh(&nf_conntrack_lock);
        }
 
        if (del_timer(&ct->timeout)) {
@@ -1001,7 +978,7 @@ struct __nf_ct_flush_report {
        int report;
 };
 
-static int kill_all(struct nf_conn *i, void *data)
+static int kill_report(struct nf_conn *i, void *data)
 {
        struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data;
 
@@ -1013,6 +990,11 @@ static int kill_all(struct nf_conn *i, void *data)
        return 1;
 }
 
+static int kill_all(struct nf_conn *i, void *data)
+{
+       return 1;
+}
+
 void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size)
 {
        if (vmalloced)
@@ -1023,15 +1005,15 @@ void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size)
 }
 EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
 
-void nf_conntrack_flush(struct net *net, u32 pid, int report)
+void nf_conntrack_flush_report(struct net *net, u32 pid, int report)
 {
        struct __nf_ct_flush_report fr = {
                .pid    = pid,
                .report = report,
        };
-       nf_ct_iterate_cleanup(net, kill_all, &fr);
+       nf_ct_iterate_cleanup(net, kill_report, &fr);
 }
-EXPORT_SYMBOL_GPL(nf_conntrack_flush);
+EXPORT_SYMBOL_GPL(nf_conntrack_flush_report);
 
 static void nf_conntrack_cleanup_init_net(void)
 {
@@ -1042,10 +1024,8 @@ static void nf_conntrack_cleanup_init_net(void)
 
 static void nf_conntrack_cleanup_net(struct net *net)
 {
-       nf_ct_event_cache_flush(net);
-       nf_conntrack_ecache_fini(net);
  i_see_dead_people:
-       nf_conntrack_flush(net, 0, 0);
+       nf_ct_iterate_cleanup(net, kill_all, NULL);
        if (atomic_read(&net->ct.count) != 0) {
                schedule();
                goto i_see_dead_people;
@@ -1056,6 +1036,7 @@ static void nf_conntrack_cleanup_net(struct net *net)
 
        nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
                             nf_conntrack_htable_size);
+       nf_conntrack_ecache_fini(net);
        nf_conntrack_acct_fini(net);
        nf_conntrack_expect_fini(net);
        free_percpu(net->ct.stat);
@@ -1231,9 +1212,6 @@ static int nf_conntrack_init_net(struct net *net)
                ret = -ENOMEM;
                goto err_stat;
        }
-       ret = nf_conntrack_ecache_init(net);
-       if (ret < 0)
-               goto err_ecache;
        net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size,
                                             &net->ct.hash_vmalloc, 1);
        if (!net->ct.hash) {
@@ -1247,6 +1225,9 @@ static int nf_conntrack_init_net(struct net *net)
        ret = nf_conntrack_acct_init(net);
        if (ret < 0)
                goto err_acct;
+       ret = nf_conntrack_ecache_init(net);
+       if (ret < 0)
+               goto err_ecache;
 
        /* Set up fake conntrack:
            - to never be deleted, not in any hashes */
@@ -1259,14 +1240,14 @@ static int nf_conntrack_init_net(struct net *net)
 
        return 0;
 
+err_ecache:
+       nf_conntrack_acct_fini(net);
 err_acct:
        nf_conntrack_expect_fini(net);
 err_expect:
        nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
                             nf_conntrack_htable_size);
 err_hash:
-       nf_conntrack_ecache_fini(net);
-err_ecache:
        free_percpu(net->ct.stat);
 err_stat:
        return ret;