net: Convert atomic_t net::count to refcount_t
authorKirill Tkhai <ktkhai@virtuozzo.com>
Fri, 12 Jan 2018 15:28:31 +0000 (18:28 +0300)
committerDavid S. Miller <davem@davemloft.net>
Mon, 15 Jan 2018 19:23:42 +0000 (14:23 -0500)
Since net could be obtained from RCU lists,
and there is a race with net destruction,
the patch converts net::count to refcount_t.

This provides sanity checks for the cases of
incrementing counter of already dead net,
when maybe_get_net() has to used instead
of get_net().

Drivers: allyesconfig and allmodconfig are OK.

Suggested-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/net_namespace.h
net/core/net-sysfs.c
net/core/net_namespace.c
net/ipv4/inet_timewait_sock.c
net/ipv4/tcp_metrics.c

index 10f99dafd5acb16f2c477baeab912a730c410344..f8a84a2c2341ee288488d390c912c811a14d4939 100644 (file)
@@ -51,7 +51,7 @@ struct net {
        refcount_t              passive;        /* To decided when the network
                                                 * namespace should be freed.
                                                 */
-       atomic_t                count;          /* To decided when the network
+       refcount_t              count;          /* To decided when the network
                                                 *  namespace should be shut down.
                                                 */
        spinlock_t              rules_mod_lock;
@@ -195,7 +195,7 @@ void __put_net(struct net *net);
 
 static inline struct net *get_net(struct net *net)
 {
-       atomic_inc(&net->count);
+       refcount_inc(&net->count);
        return net;
 }
 
@@ -206,14 +206,14 @@ static inline struct net *maybe_get_net(struct net *net)
         * exists.  If the reference count is zero this
         * function fails and returns NULL.
         */
-       if (!atomic_inc_not_zero(&net->count))
+       if (!refcount_inc_not_zero(&net->count))
                net = NULL;
        return net;
 }
 
 static inline void put_net(struct net *net)
 {
-       if (atomic_dec_and_test(&net->count))
+       if (refcount_dec_and_test(&net->count))
                __put_net(net);
 }
 
index 799b75268291a2564b3ed83dabdee20c35cb1e16..7bf8b85ade16e910c6d2884b5a10254d28aaaaf8 100644 (file)
@@ -961,7 +961,7 @@ net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
        while (--i >= new_num) {
                struct kobject *kobj = &dev->_rx[i].kobj;
 
-               if (!atomic_read(&dev_net(dev)->count))
+               if (!refcount_read(&dev_net(dev)->count))
                        kobj->uevent_suppress = 1;
                if (dev->sysfs_rx_queue_group)
                        sysfs_remove_group(kobj, dev->sysfs_rx_queue_group);
@@ -1367,7 +1367,7 @@ netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
        while (--i >= new_num) {
                struct netdev_queue *queue = dev->_tx + i;
 
-               if (!atomic_read(&dev_net(dev)->count))
+               if (!refcount_read(&dev_net(dev)->count))
                        queue->kobj.uevent_suppress = 1;
 #ifdef CONFIG_BQL
                sysfs_remove_group(&queue->kobj, &dql_group);
@@ -1558,7 +1558,7 @@ void netdev_unregister_kobject(struct net_device *ndev)
 {
        struct device *dev = &ndev->dev;
 
-       if (!atomic_read(&dev_net(ndev)->count))
+       if (!refcount_read(&dev_net(ndev)->count))
                dev_set_uevent_suppress(dev, 1);
 
        kobject_get(&dev->kobj);
index 60a71be75aea063b418a48ade2a1e1c7804ab35c..2213d45fcafd9b45c78edf988af8e8c41a01b425 100644 (file)
@@ -35,7 +35,7 @@ LIST_HEAD(net_namespace_list);
 EXPORT_SYMBOL_GPL(net_namespace_list);
 
 struct net init_net = {
-       .count          = ATOMIC_INIT(1),
+       .count          = REFCOUNT_INIT(1),
        .dev_base_head  = LIST_HEAD_INIT(init_net.dev_base_head),
 };
 EXPORT_SYMBOL(init_net);
@@ -224,10 +224,10 @@ int peernet2id_alloc(struct net *net, struct net *peer)
        bool alloc;
        int id;
 
-       if (atomic_read(&net->count) == 0)
+       if (refcount_read(&net->count) == 0)
                return NETNSA_NSID_NOT_ASSIGNED;
        spin_lock_bh(&net->nsid_lock);
-       alloc = atomic_read(&peer->count) == 0 ? false : true;
+       alloc = refcount_read(&peer->count) == 0 ? false : true;
        id = __peernet2id_alloc(net, peer, &alloc);
        spin_unlock_bh(&net->nsid_lock);
        if (alloc && id >= 0)
@@ -284,7 +284,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
        int error = 0;
        LIST_HEAD(net_exit_list);
 
-       atomic_set(&net->count, 1);
+       refcount_set(&net->count, 1);
        refcount_set(&net->passive, 1);
        net->dev_base_seq = 1;
        net->user_ns = user_ns;
index 277ff69a312dca1d0bc04be4b0b36db133aaf63b..c3ea4906d237e17dd03553834dc9b6a9bb87a889 100644 (file)
@@ -270,14 +270,14 @@ restart:
                                continue;
                        tw = inet_twsk(sk);
                        if ((tw->tw_family != family) ||
-                               atomic_read(&twsk_net(tw)->count))
+                               refcount_read(&twsk_net(tw)->count))
                                continue;
 
                        if (unlikely(!refcount_inc_not_zero(&tw->tw_refcnt)))
                                continue;
 
                        if (unlikely((tw->tw_family != family) ||
-                                    atomic_read(&twsk_net(tw)->count))) {
+                                    refcount_read(&twsk_net(tw)->count))) {
                                inet_twsk_put(tw);
                                goto restart;
                        }
index 759e6bc8327b1e6122fd6485cdbbe756d81ed608..03b51cdcc731f5d85cbe51337f864bc8abc40135 100644 (file)
@@ -892,7 +892,7 @@ static void tcp_metrics_flush_all(struct net *net)
                pp = &hb->chain;
                for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
                        match = net ? net_eq(tm_net(tm), net) :
-                               !atomic_read(&tm_net(tm)->count);
+                               !refcount_read(&tm_net(tm)->count);
                        if (match) {
                                *pp = tm->tcpm_next;
                                kfree_rcu(tm, rcu_head);