Merge tag 'for-4.15/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 15 Dec 2017 20:53:37 +0000 (12:53 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 15 Dec 2017 20:53:37 +0000 (12:53 -0800)
Pull device mapper fixes from Mike Snitzer:

 - fix a particularly nasty DM core bug in a 4.15 refcount_t conversion.

 - fix various targets to dm_register_target after module __init
   resources created; otherwise racing lvm2 commands could result in a
   NULL pointer during initialization of associated DM kernel module.

 - fix regression in bio-based DM multipath queue_if_no_path handling.

 - fix DM bufio's shrinker to reclaim more than one buffer per scan.

* tag 'for-4.15/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm bufio: fix shrinker scans when (nr_to_scan < retain_target)
  dm mpath: fix bio-based multipath queue_if_no_path handling
  dm: fix various targets to dm_register_target after module __init resources created
  dm table: fix regression from improper dm_dev_internal.count refcount_t conversion

drivers/md/dm-bufio.c
drivers/md/dm-cache-target.c
drivers/md/dm-mpath.c
drivers/md/dm-snap.c
drivers/md/dm-table.c
drivers/md/dm-thin.c

index b8ac591aaaa7070bfbd6d32c20993fb9130961f8..c546b567f3b50a3f43b0c074e9319ca908ec5971 100644 (file)
@@ -1611,7 +1611,8 @@ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
        int l;
        struct dm_buffer *b, *tmp;
        unsigned long freed = 0;
-       unsigned long count = nr_to_scan;
+       unsigned long count = c->n_buffers[LIST_CLEAN] +
+                             c->n_buffers[LIST_DIRTY];
        unsigned long retain_target = get_retain_buffers(c);
 
        for (l = 0; l < LIST_SIZE; l++) {
@@ -1647,8 +1648,11 @@ static unsigned long
 dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
 {
        struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
+       unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) +
+                             READ_ONCE(c->n_buffers[LIST_DIRTY]);
+       unsigned long retain_target = get_retain_buffers(c);
 
-       return READ_ONCE(c->n_buffers[LIST_CLEAN]) + READ_ONCE(c->n_buffers[LIST_DIRTY]);
+       return (count < retain_target) ? 0 : (count - retain_target);
 }
 
 /*
index cf23a14f9c6a6572955746f040802dcfd34d801d..47407e43b96a168eed8660a7284a60132284d189 100644 (file)
@@ -3472,18 +3472,18 @@ static int __init dm_cache_init(void)
 {
        int r;
 
-       r = dm_register_target(&cache_target);
-       if (r) {
-               DMERR("cache target registration failed: %d", r);
-               return r;
-       }
-
        migration_cache = KMEM_CACHE(dm_cache_migration, 0);
        if (!migration_cache) {
                dm_unregister_target(&cache_target);
                return -ENOMEM;
        }
 
+       r = dm_register_target(&cache_target);
+       if (r) {
+               DMERR("cache target registration failed: %d", r);
+               return r;
+       }
+
        return 0;
 }
 
index c8faa2b8584268f75a8177f39677b94edba55289..f7810cc869ac883e11b60e0ad3f29253444d411a 100644 (file)
@@ -457,6 +457,38 @@ do {                                                                       \
                 dm_noflush_suspending((m)->ti));                       \
 } while (0)
 
+/*
+ * Check whether bios must be queued in the device-mapper core rather
+ * than here in the target.
+ *
+ * If MPATHF_QUEUE_IF_NO_PATH and MPATHF_SAVED_QUEUE_IF_NO_PATH hold
+ * the same value then we are not between multipath_presuspend()
+ * and multipath_resume() calls and we have no need to check
+ * for the DMF_NOFLUSH_SUSPENDING flag.
+ */
+static bool __must_push_back(struct multipath *m, unsigned long flags)
+{
+       return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) !=
+                test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &flags)) &&
+               dm_noflush_suspending(m->ti));
+}
+
+/*
+ * Following functions use READ_ONCE to get atomic access to
+ * all m->flags to avoid taking spinlock
+ */
+static bool must_push_back_rq(struct multipath *m)
+{
+       unsigned long flags = READ_ONCE(m->flags);
+       return test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) || __must_push_back(m, flags);
+}
+
+static bool must_push_back_bio(struct multipath *m)
+{
+       unsigned long flags = READ_ONCE(m->flags);
+       return __must_push_back(m, flags);
+}
+
 /*
  * Map cloned requests (request-based multipath)
  */
@@ -478,7 +510,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
                pgpath = choose_pgpath(m, nr_bytes);
 
        if (!pgpath) {
-               if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
+               if (must_push_back_rq(m))
                        return DM_MAPIO_DELAY_REQUEUE;
                dm_report_EIO(m);       /* Failed */
                return DM_MAPIO_KILL;
@@ -553,7 +585,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
        }
 
        if (!pgpath) {
-               if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
+               if (must_push_back_bio(m))
                        return DM_MAPIO_REQUEUE;
                dm_report_EIO(m);
                return DM_MAPIO_KILL;
@@ -651,8 +683,7 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
        assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags,
                   (save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) ||
                   (!save_old_value && queue_if_no_path));
-       assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags,
-                  queue_if_no_path || dm_noflush_suspending(m->ti));
+       assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path);
        spin_unlock_irqrestore(&m->lock, flags);
 
        if (!queue_if_no_path) {
@@ -1486,7 +1517,7 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
                        fail_path(pgpath);
 
                if (atomic_read(&m->nr_valid_paths) == 0 &&
-                   !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
+                   !must_push_back_rq(m)) {
                        if (error == BLK_STS_IOERR)
                                dm_report_EIO(m);
                        /* complete with the original error */
@@ -1521,8 +1552,12 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
 
        if (atomic_read(&m->nr_valid_paths) == 0 &&
            !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
-               dm_report_EIO(m);
-               *error = BLK_STS_IOERR;
+               if (must_push_back_bio(m)) {
+                       r = DM_ENDIO_REQUEUE;
+               } else {
+                       dm_report_EIO(m);
+                       *error = BLK_STS_IOERR;
+               }
                goto done;
        }
 
@@ -1957,13 +1992,6 @@ static int __init dm_multipath_init(void)
 {
        int r;
 
-       r = dm_register_target(&multipath_target);
-       if (r < 0) {
-               DMERR("request-based register failed %d", r);
-               r = -EINVAL;
-               goto bad_register_target;
-       }
-
        kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
        if (!kmultipathd) {
                DMERR("failed to create workqueue kmpathd");
@@ -1985,13 +2013,20 @@ static int __init dm_multipath_init(void)
                goto bad_alloc_kmpath_handlerd;
        }
 
+       r = dm_register_target(&multipath_target);
+       if (r < 0) {
+               DMERR("request-based register failed %d", r);
+               r = -EINVAL;
+               goto bad_register_target;
+       }
+
        return 0;
 
+bad_register_target:
+       destroy_workqueue(kmpath_handlerd);
 bad_alloc_kmpath_handlerd:
        destroy_workqueue(kmultipathd);
 bad_alloc_kmultipathd:
-       dm_unregister_target(&multipath_target);
-bad_register_target:
        return r;
 }
 
index 1113b42e1edae4029f550b71c635ea80c76a46b9..a0613bd8ed00efc17d545a3335c39cb6cfb83919 100644 (file)
@@ -2411,24 +2411,6 @@ static int __init dm_snapshot_init(void)
                return r;
        }
 
-       r = dm_register_target(&snapshot_target);
-       if (r < 0) {
-               DMERR("snapshot target register failed %d", r);
-               goto bad_register_snapshot_target;
-       }
-
-       r = dm_register_target(&origin_target);
-       if (r < 0) {
-               DMERR("Origin target register failed %d", r);
-               goto bad_register_origin_target;
-       }
-
-       r = dm_register_target(&merge_target);
-       if (r < 0) {
-               DMERR("Merge target register failed %d", r);
-               goto bad_register_merge_target;
-       }
-
        r = init_origin_hash();
        if (r) {
                DMERR("init_origin_hash failed.");
@@ -2449,19 +2431,37 @@ static int __init dm_snapshot_init(void)
                goto bad_pending_cache;
        }
 
+       r = dm_register_target(&snapshot_target);
+       if (r < 0) {
+               DMERR("snapshot target register failed %d", r);
+               goto bad_register_snapshot_target;
+       }
+
+       r = dm_register_target(&origin_target);
+       if (r < 0) {
+               DMERR("Origin target register failed %d", r);
+               goto bad_register_origin_target;
+       }
+
+       r = dm_register_target(&merge_target);
+       if (r < 0) {
+               DMERR("Merge target register failed %d", r);
+               goto bad_register_merge_target;
+       }
+
        return 0;
 
-bad_pending_cache:
-       kmem_cache_destroy(exception_cache);
-bad_exception_cache:
-       exit_origin_hash();
-bad_origin_hash:
-       dm_unregister_target(&merge_target);
 bad_register_merge_target:
        dm_unregister_target(&origin_target);
 bad_register_origin_target:
        dm_unregister_target(&snapshot_target);
 bad_register_snapshot_target:
+       kmem_cache_destroy(pending_cache);
+bad_pending_cache:
+       kmem_cache_destroy(exception_cache);
+bad_exception_cache:
+       exit_origin_hash();
+bad_origin_hash:
        dm_exception_store_exit();
 
        return r;
index 88130b5d95f909ead8441dec7f3fb5d80a7914c7..aaffd0c0ee9a76c71f23f9bb1074ec8057b8c6f7 100644 (file)
@@ -453,14 +453,15 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
 
                refcount_set(&dd->count, 1);
                list_add(&dd->list, &t->devices);
+               goto out;
 
        } else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
                r = upgrade_mode(dd, mode, t->md);
                if (r)
                        return r;
-               refcount_inc(&dd->count);
        }
-
+       refcount_inc(&dd->count);
+out:
        *result = dd->dm_dev;
        return 0;
 }
index 89e5dff9b4cfc1b87049529238c5c01978345b81..f91d771fff4b6e9d9a488a7a67916326a1e85897 100644 (file)
@@ -4355,30 +4355,28 @@ static struct target_type thin_target = {
 
 static int __init dm_thin_init(void)
 {
-       int r;
+       int r = -ENOMEM;
 
        pool_table_init();
 
+       _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
+       if (!_new_mapping_cache)
+               return r;
+
        r = dm_register_target(&thin_target);
        if (r)
-               return r;
+               goto bad_new_mapping_cache;
 
        r = dm_register_target(&pool_target);
        if (r)
-               goto bad_pool_target;
-
-       r = -ENOMEM;
-
-       _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
-       if (!_new_mapping_cache)
-               goto bad_new_mapping_cache;
+               goto bad_thin_target;
 
        return 0;
 
-bad_new_mapping_cache:
-       dm_unregister_target(&pool_target);
-bad_pool_target:
+bad_thin_target:
        dm_unregister_target(&thin_target);
+bad_new_mapping_cache:
+       kmem_cache_destroy(_new_mapping_cache);
 
        return r;
 }