nfsd: move blocked lock handling under a dedicated spinlock
authorJeff Layton <jlayton@redhat.com>
Thu, 20 Oct 2016 13:34:31 +0000 (09:34 -0400)
committerJ. Bruce Fields <bfields@redhat.com>
Mon, 24 Oct 2016 20:51:21 +0000 (16:51 -0400)
Bruce was hitting some lockdep warnings in testing, showing that we
could hit a deadlock with the new CB_NOTIFY_LOCK handling, involving a
rather complex situation involving four different spinlocks.

The crux of the matter is that we end up taking the nn->client_lock in
the lm_notify handler. The simplest fix is to just declare a new
per-nfsd_net spinlock to protect the new CB_NOTIFY_LOCK structures.

Signed-off-by: Jeff Layton <jlayton@redhat.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
fs/nfsd/netns.h
fs/nfsd/nfs4state.c

index b10d557f9c9ef0033a6dcbcfcf30141745132e8f..ee36efd5aece83e08712bb0d839423bb30b8674a 100644 (file)
@@ -84,6 +84,8 @@ struct nfsd_net {
        struct list_head client_lru;
        struct list_head close_lru;
        struct list_head del_recall_lru;
+
+       /* protected by blocked_locks_lock */
        struct list_head blocked_locks_lru;
 
        struct delayed_work laundromat_work;
@@ -91,6 +93,9 @@ struct nfsd_net {
        /* client_lock protects the client lru list and session hash table */
        spinlock_t client_lock;
 
+       /* protects blocked_locks_lru */
+       spinlock_t blocked_locks_lock;
+
        struct file *rec_file;
        bool in_grace;
        const struct nfsd4_client_tracking_ops *client_tracking_ops;
index 9752beb78659dd1f02a4411a397fb27b9a8cf4ef..95474196a17ee187b53260d9d46162c72ea1cc85 100644 (file)
@@ -217,7 +217,7 @@ find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
 {
        struct nfsd4_blocked_lock *cur, *found = NULL;
 
-       spin_lock(&nn->client_lock);
+       spin_lock(&nn->blocked_locks_lock);
        list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
                if (fh_match(fh, &cur->nbl_fh)) {
                        list_del_init(&cur->nbl_list);
@@ -226,7 +226,7 @@ find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
                        break;
                }
        }
-       spin_unlock(&nn->client_lock);
+       spin_unlock(&nn->blocked_locks_lock);
        if (found)
                posix_unblock_lock(&found->nbl_lock);
        return found;
@@ -4665,7 +4665,7 @@ nfs4_laundromat(struct nfsd_net *nn)
         * indefinitely once the lock does become free.
         */
        BUG_ON(!list_empty(&reaplist));
-       spin_lock(&nn->client_lock);
+       spin_lock(&nn->blocked_locks_lock);
        while (!list_empty(&nn->blocked_locks_lru)) {
                nbl = list_first_entry(&nn->blocked_locks_lru,
                                        struct nfsd4_blocked_lock, nbl_lru);
@@ -4678,7 +4678,7 @@ nfs4_laundromat(struct nfsd_net *nn)
                list_move(&nbl->nbl_lru, &reaplist);
                list_del_init(&nbl->nbl_list);
        }
-       spin_unlock(&nn->client_lock);
+       spin_unlock(&nn->blocked_locks_lock);
 
        while (!list_empty(&reaplist)) {
                nbl = list_first_entry(&nn->blocked_locks_lru,
@@ -5439,13 +5439,13 @@ nfsd4_lm_notify(struct file_lock *fl)
        bool queue = false;
 
        /* An empty list means that something else is going to be using it */
-       spin_lock(&nn->client_lock);
+       spin_lock(&nn->blocked_locks_lock);
        if (!list_empty(&nbl->nbl_list)) {
                list_del_init(&nbl->nbl_list);
                list_del_init(&nbl->nbl_lru);
                queue = true;
        }
-       spin_unlock(&nn->client_lock);
+       spin_unlock(&nn->blocked_locks_lock);
 
        if (queue)
                nfsd4_run_cb(&nbl->nbl_cb);
@@ -5868,10 +5868,10 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 
        if (fl_flags & FL_SLEEP) {
                nbl->nbl_time = jiffies;
-               spin_lock(&nn->client_lock);
+               spin_lock(&nn->blocked_locks_lock);
                list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
                list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
-               spin_unlock(&nn->client_lock);
+               spin_unlock(&nn->blocked_locks_lock);
        }
 
        err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
@@ -5900,10 +5900,10 @@ out:
        if (nbl) {
                /* dequeue it if we queued it before */
                if (fl_flags & FL_SLEEP) {
-                       spin_lock(&nn->client_lock);
+                       spin_lock(&nn->blocked_locks_lock);
                        list_del_init(&nbl->nbl_list);
                        list_del_init(&nbl->nbl_lru);
-                       spin_unlock(&nn->client_lock);
+                       spin_unlock(&nn->blocked_locks_lock);
                }
                free_blocked_lock(nbl);
        }
@@ -6943,9 +6943,11 @@ static int nfs4_state_create_net(struct net *net)
        INIT_LIST_HEAD(&nn->client_lru);
        INIT_LIST_HEAD(&nn->close_lru);
        INIT_LIST_HEAD(&nn->del_recall_lru);
-       INIT_LIST_HEAD(&nn->blocked_locks_lru);
        spin_lock_init(&nn->client_lock);
 
+       spin_lock_init(&nn->blocked_locks_lock);
+       INIT_LIST_HEAD(&nn->blocked_locks_lru);
+
        INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
        get_net(net);
 
@@ -7063,14 +7065,14 @@ nfs4_state_shutdown_net(struct net *net)
        }
 
        BUG_ON(!list_empty(&reaplist));
-       spin_lock(&nn->client_lock);
+       spin_lock(&nn->blocked_locks_lock);
        while (!list_empty(&nn->blocked_locks_lru)) {
                nbl = list_first_entry(&nn->blocked_locks_lru,
                                        struct nfsd4_blocked_lock, nbl_lru);
                list_move(&nbl->nbl_lru, &reaplist);
                list_del_init(&nbl->nbl_list);
        }
-       spin_unlock(&nn->client_lock);
+       spin_unlock(&nn->blocked_locks_lock);
 
        while (!list_empty(&reaplist)) {
                nbl = list_first_entry(&nn->blocked_locks_lru,