Merge tag 'gfs2-4.11.addendum' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2...
[sfrench/cifs-2.6.git] / fs / gfs2 / glock.c
index 94f50cac91c617b03025d4d596d8d9d65d2c185b..ec0848fcca02d8960ba671b9b0be5a56ed3fd7ed 100644 (file)
@@ -658,9 +658,11 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
        struct kmem_cache *cachep;
        int ret, tries = 0;
 
+       rcu_read_lock();
        gl = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms);
        if (gl && !lockref_get_not_dead(&gl->gl_lockref))
                gl = NULL;
+       rcu_read_unlock();
 
        *glp = gl;
        if (gl)
@@ -728,15 +730,18 @@ again:
 
        if (ret == -EEXIST) {
                ret = 0;
+               rcu_read_lock();
                tmp = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms);
                if (tmp == NULL || !lockref_get_not_dead(&tmp->gl_lockref)) {
                        if (++tries < 100) {
+                               rcu_read_unlock();
                                cond_resched();
                                goto again;
                        }
                        tmp = NULL;
                        ret = -ENOMEM;
                }
+               rcu_read_unlock();
        } else {
                WARN_ON_ONCE(ret);
        }
@@ -1420,26 +1425,32 @@ static struct shrinker glock_shrinker = {
  * @sdp: the filesystem
  * @bucket: the bucket
  *
+ * Note that the function can be called multiple times on the same
+ * object.  So the user must ensure that the function can cope with
+ * that.
  */
 
 static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
 {
        struct gfs2_glock *gl;
-       struct rhash_head *pos;
-       const struct bucket_table *tbl;
-       int i;
+       struct rhashtable_iter iter;
 
-       rcu_read_lock();
-       tbl = rht_dereference_rcu(gl_hash_table.tbl, &gl_hash_table);
-       for (i = 0; i < tbl->size; i++) {
-               rht_for_each_entry_rcu(gl, pos, tbl, i, gl_node) {
+       rhashtable_walk_enter(&gl_hash_table, &iter);
+
+       do {
+               gl = ERR_PTR(rhashtable_walk_start(&iter));
+               if (gl)
+                       continue;
+
+               while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl))
                        if ((gl->gl_name.ln_sbd == sdp) &&
                            lockref_get_not_dead(&gl->gl_lockref))
                                examiner(gl);
-               }
-       }
-       rcu_read_unlock();
-       cond_resched();
+
+               rhashtable_walk_stop(&iter);
+       } while (cond_resched(), gl == ERR_PTR(-EAGAIN));
+
+       rhashtable_walk_exit(&iter);
 }
 
 /**
@@ -1802,16 +1813,18 @@ void gfs2_glock_exit(void)
 
 static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
 {
-       do {
-               gi->gl = rhashtable_walk_next(&gi->hti);
+       while ((gi->gl = rhashtable_walk_next(&gi->hti))) {
                if (IS_ERR(gi->gl)) {
                        if (PTR_ERR(gi->gl) == -EAGAIN)
                                continue;
                        gi->gl = NULL;
+                       return;
                }
-       /* Skip entries for other sb and dead entries */
-       } while ((gi->gl) && ((gi->sdp != gi->gl->gl_name.ln_sbd) ||
-                             __lockref_is_dead(&gi->gl->gl_lockref)));
+               /* Skip entries for other sb and dead entries */
+               if (gi->sdp == gi->gl->gl_name.ln_sbd &&
+                   !__lockref_is_dead(&gi->gl->gl_lockref))
+                       return;
+       }
 }
 
 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)