net: Make cleanup_list and net::cleanup_list of llist type
authorKirill Tkhai <ktkhai@virtuozzo.com>
Mon, 19 Feb 2018 09:58:45 +0000 (12:58 +0300)
committerDavid S. Miller <davem@davemloft.net>
Tue, 20 Feb 2018 18:23:27 +0000 (13:23 -0500)
This simplifies cleanup queueing and makes cleanup lists
to use llist primitives. Since llist has its own cmpxchg()
ordering, cleanup_list_lock is not more need.

Also, struct llist_node is smaller, than struct list_head,
so we save some bytes in struct net with this patch.

Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/net_namespace.h
net/core/net_namespace.c

index 115b01b92f4d29cb4fb873326c0de81f86826be1..d4417495773a7e7220da21f4310e4d85bbe3e7da 100644 (file)
@@ -59,12 +59,13 @@ struct net {
        atomic64_t              cookie_gen;
 
        struct list_head        list;           /* list of network namespaces */
-       struct list_head        cleanup_list;   /* namespaces on death row */
        struct list_head        exit_list;      /* To linked to call pernet exit
                                                 * methods on dead net (net_sem
                                                 * read locked), or to unregister
                                                 * pernet ops (net_sem wr locked).
                                                 */
+       struct llist_node       cleanup_list;   /* namespaces on death row */
+
        struct user_namespace   *user_ns;       /* Owning user namespace */
        struct ucounts          *ucounts;
        spinlock_t              nsid_lock;
index e89a516620ddce749c3a1c08356536991b0a1c5c..abf8a46e94e2fa679ad7476b73d080b180ecf8c2 100644 (file)
@@ -481,21 +481,18 @@ static void unhash_nsid(struct net *net, struct net *last)
        spin_unlock_bh(&net->nsid_lock);
 }
 
-static DEFINE_SPINLOCK(cleanup_list_lock);
-static LIST_HEAD(cleanup_list);  /* Must hold cleanup_list_lock to touch */
+static LLIST_HEAD(cleanup_list);
 
 static void cleanup_net(struct work_struct *work)
 {
        const struct pernet_operations *ops;
        struct net *net, *tmp, *last;
-       struct list_head net_kill_list;
+       struct llist_node *net_kill_list;
        LIST_HEAD(net_exit_list);
        unsigned write;
 
        /* Atomically snapshot the list of namespaces to cleanup */
-       spin_lock_irq(&cleanup_list_lock);
-       list_replace_init(&cleanup_list, &net_kill_list);
-       spin_unlock_irq(&cleanup_list_lock);
+       net_kill_list = llist_del_all(&cleanup_list);
 again:
        write = READ_ONCE(nr_sync_pernet_ops);
        if (write)
@@ -510,7 +507,7 @@ again:
 
        /* Don't let anyone else find us. */
        rtnl_lock();
-       list_for_each_entry(net, &net_kill_list, cleanup_list)
+       llist_for_each_entry(net, net_kill_list, cleanup_list)
                list_del_rcu(&net->list);
        /* Cache last net. After we unlock rtnl, no one new net
         * added to net_namespace_list can assign nsid pointer
@@ -525,7 +522,7 @@ again:
        last = list_last_entry(&net_namespace_list, struct net, list);
        rtnl_unlock();
 
-       list_for_each_entry(net, &net_kill_list, cleanup_list) {
+       llist_for_each_entry(net, net_kill_list, cleanup_list) {
                unhash_nsid(net, last);
                list_add_tail(&net->exit_list, &net_exit_list);
        }
@@ -585,12 +582,7 @@ static DECLARE_WORK(net_cleanup_work, cleanup_net);
 void __put_net(struct net *net)
 {
        /* Cleanup the network namespace in process context */
-       unsigned long flags;
-
-       spin_lock_irqsave(&cleanup_list_lock, flags);
-       list_add(&net->cleanup_list, &cleanup_list);
-       spin_unlock_irqrestore(&cleanup_list_lock, flags);
-
+       llist_add(&net->cleanup_list, &cleanup_list);
        queue_work(netns_wq, &net_cleanup_work);
 }
 EXPORT_SYMBOL_GPL(__put_net);