Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md...
authorJens Axboe <axboe@kernel.dk>
Thu, 3 Jan 2019 15:21:02 +0000 (08:21 -0700)
committerJens Axboe <axboe@kernel.dk>
Thu, 3 Jan 2019 15:21:02 +0000 (08:21 -0700)
Pull the pending 4.21 changes for md from Shaohua.

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md:
  md: fix raid10 hang issue caused by barrier
  raid10: refactor common wait code from regular read/write request
  md: remvoe redundant condition check
  lib/raid6: add option to skip algo benchmarking
  lib/raid6: sort algos in rough performance order
  lib/raid6: check for assembler SSSE3 support
  lib/raid6: avoid __attribute_const__ redefinition
  lib/raid6: add missing include for raid6test
  md: remove set but not used variable 'bi_rdev'

drivers/md/md.c
drivers/md/raid10.c
include/linux/raid/pq.h
lib/Kconfig
lib/raid6/algos.c
lib/raid6/test/Makefile

index 9a0a1e0934d5da3334e56c6b1ed9973d9daa00f7..fd4af4de03b40117c98a4584bf0c4b7eedd3dba9 100644 (file)
@@ -2147,14 +2147,12 @@ EXPORT_SYMBOL(md_integrity_register);
  */
 int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
 {
-       struct blk_integrity *bi_rdev;
        struct blk_integrity *bi_mddev;
        char name[BDEVNAME_SIZE];
 
        if (!mddev->gendisk)
                return 0;
 
-       bi_rdev = bdev_get_integrity(rdev->bdev);
        bi_mddev = blk_get_integrity(mddev->gendisk);
 
        if (!bi_mddev) /* nothing to do */
@@ -5693,14 +5691,10 @@ int md_run(struct mddev *mddev)
        return 0;
 
 abort:
-       if (mddev->flush_bio_pool) {
-               mempool_destroy(mddev->flush_bio_pool);
-               mddev->flush_bio_pool = NULL;
-       }
-       if (mddev->flush_pool){
-               mempool_destroy(mddev->flush_pool);
-               mddev->flush_pool = NULL;
-       }
+       mempool_destroy(mddev->flush_bio_pool);
+       mddev->flush_bio_pool = NULL;
+       mempool_destroy(mddev->flush_pool);
+       mddev->flush_pool = NULL;
 
        return err;
 }
index b98e746e7fc4fd05fb8c0eaf2118f9f6a4f778d3..abb5d382f64d1db9fd53f71d89d96064a2a2c437 100644 (file)
@@ -1124,6 +1124,29 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
        kfree(plug);
 }
 
+/*
+ * 1. Register the new request and wait if the reconstruction thread has put
+ * up a bar for new requests. Continue immediately if no resync is active
+ * currently.
+ * 2. If IO spans the reshape position.  Need to wait for reshape to pass.
+ */
+static void regular_request_wait(struct mddev *mddev, struct r10conf *conf,
+                                struct bio *bio, sector_t sectors)
+{
+       wait_barrier(conf);
+       while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
+           bio->bi_iter.bi_sector < conf->reshape_progress &&
+           bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
+               raid10_log(conf->mddev, "wait reshape");
+               allow_barrier(conf);
+               wait_event(conf->wait_barrier,
+                          conf->reshape_progress <= bio->bi_iter.bi_sector ||
+                          conf->reshape_progress >= bio->bi_iter.bi_sector +
+                          sectors);
+               wait_barrier(conf);
+       }
+}
+
 static void raid10_read_request(struct mddev *mddev, struct bio *bio,
                                struct r10bio *r10_bio)
 {
@@ -1132,7 +1155,6 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
        const int op = bio_op(bio);
        const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
        int max_sectors;
-       sector_t sectors;
        struct md_rdev *rdev;
        char b[BDEVNAME_SIZE];
        int slot = r10_bio->read_slot;
@@ -1166,30 +1188,8 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
                }
                rcu_read_unlock();
        }
-       /*
-        * Register the new request and wait if the reconstruction
-        * thread has put up a bar for new requests.
-        * Continue immediately if no resync is active currently.
-        */
-       wait_barrier(conf);
-
-       sectors = r10_bio->sectors;
-       while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
-           bio->bi_iter.bi_sector < conf->reshape_progress &&
-           bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
-               /*
-                * IO spans the reshape position.  Need to wait for reshape to
-                * pass
-                */
-               raid10_log(conf->mddev, "wait reshape");
-               allow_barrier(conf);
-               wait_event(conf->wait_barrier,
-                          conf->reshape_progress <= bio->bi_iter.bi_sector ||
-                          conf->reshape_progress >= bio->bi_iter.bi_sector +
-                          sectors);
-               wait_barrier(conf);
-       }
 
+       regular_request_wait(mddev, conf, bio, r10_bio->sectors);
        rdev = read_balance(conf, r10_bio, &max_sectors);
        if (!rdev) {
                if (err_rdev) {
@@ -1209,7 +1209,9 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
                struct bio *split = bio_split(bio, max_sectors,
                                              gfp, &conf->bio_split);
                bio_chain(split, bio);
+               allow_barrier(conf);
                generic_make_request(bio);
+               wait_barrier(conf);
                bio = split;
                r10_bio->master_bio = bio;
                r10_bio->sectors = max_sectors;
@@ -1332,30 +1334,8 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
                finish_wait(&conf->wait_barrier, &w);
        }
 
-       /*
-        * Register the new request and wait if the reconstruction
-        * thread has put up a bar for new requests.
-        * Continue immediately if no resync is active currently.
-        */
-       wait_barrier(conf);
-
        sectors = r10_bio->sectors;
-       while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
-           bio->bi_iter.bi_sector < conf->reshape_progress &&
-           bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
-               /*
-                * IO spans the reshape position.  Need to wait for reshape to
-                * pass
-                */
-               raid10_log(conf->mddev, "wait reshape");
-               allow_barrier(conf);
-               wait_event(conf->wait_barrier,
-                          conf->reshape_progress <= bio->bi_iter.bi_sector ||
-                          conf->reshape_progress >= bio->bi_iter.bi_sector +
-                          sectors);
-               wait_barrier(conf);
-       }
-
+       regular_request_wait(mddev, conf, bio, sectors);
        if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
            (mddev->reshape_backwards
             ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
@@ -1514,7 +1494,9 @@ retry_write:
                struct bio *split = bio_split(bio, r10_bio->sectors,
                                              GFP_NOIO, &conf->bio_split);
                bio_chain(split, bio);
+               allow_barrier(conf);
                generic_make_request(bio);
+               wait_barrier(conf);
                bio = split;
                r10_bio->master_bio = bio;
        }
index ea8505204fdfc6dd17cab030f9ead1af419455ef..605cf46c17bd315919e1c4f3425eb9ebc5e89028 100644 (file)
@@ -35,6 +35,7 @@ extern const char raid6_empty_zero_page[PAGE_SIZE];
 #include <limits.h>
 #include <stddef.h>
 #include <sys/mman.h>
+#include <sys/time.h>
 #include <sys/types.h>
 
 /* Not standard, but glibc defines it */
@@ -52,7 +53,9 @@ extern const char raid6_empty_zero_page[PAGE_SIZE];
 
 #define __init
 #define __exit
-#define __attribute_const__ __attribute__((const))
+#ifndef __attribute_const__
+# define __attribute_const__ __attribute__((const))
+#endif
 #define noinline __attribute__((noinline))
 
 #define preempt_enable()
@@ -67,6 +70,9 @@ extern const char raid6_empty_zero_page[PAGE_SIZE];
 #define MODULE_DESCRIPTION(desc)
 #define subsys_initcall(x)
 #define module_exit(x)
+
+#define IS_ENABLED(x) (x)
+#define CONFIG_RAID6_PQ_BENCHMARK 1
 #endif /* __KERNEL__ */
 
 /* Routine choices */
index 79bc2eef9c14c067cd8616c0d10f7c96b2e5f6ab..a9e56539bd11687881ec3e43c1e11cd85d8a4157 100644 (file)
@@ -10,6 +10,14 @@ menu "Library routines"
 config RAID6_PQ
        tristate
 
+config RAID6_PQ_BENCHMARK
+       bool "Automatically choose fastest RAID6 PQ functions"
+       depends on RAID6_PQ
+       default y
+       help
+         Benchmark all available RAID6 PQ functions on init and choose the
+         fastest one.
+
 config BITREVERSE
        tristate
 
index 5065b1e7e32759535942fed1e149079e61c1dcda..7e4f7a8ffa8e2adcf6def548c9ddfc4233728547 100644 (file)
@@ -34,64 +34,64 @@ struct raid6_calls raid6_call;
 EXPORT_SYMBOL_GPL(raid6_call);
 
 const struct raid6_calls * const raid6_algos[] = {
-#if defined(__ia64__)
-       &raid6_intx16,
-       &raid6_intx32,
-#endif
 #if defined(__i386__) && !defined(__arch_um__)
-       &raid6_mmxx1,
-       &raid6_mmxx2,
-       &raid6_sse1x1,
-       &raid6_sse1x2,
-       &raid6_sse2x1,
-       &raid6_sse2x2,
-#ifdef CONFIG_AS_AVX2
-       &raid6_avx2x1,
-       &raid6_avx2x2,
-#endif
 #ifdef CONFIG_AS_AVX512
-       &raid6_avx512x1,
        &raid6_avx512x2,
+       &raid6_avx512x1,
 #endif
-#endif
-#if defined(__x86_64__) && !defined(__arch_um__)
-       &raid6_sse2x1,
-       &raid6_sse2x2,
-       &raid6_sse2x4,
 #ifdef CONFIG_AS_AVX2
-       &raid6_avx2x1,
        &raid6_avx2x2,
-       &raid6_avx2x4,
+       &raid6_avx2x1,
+#endif
+       &raid6_sse2x2,
+       &raid6_sse2x1,
+       &raid6_sse1x2,
+       &raid6_sse1x1,
+       &raid6_mmxx2,
+       &raid6_mmxx1,
 #endif
+#if defined(__x86_64__) && !defined(__arch_um__)
 #ifdef CONFIG_AS_AVX512
-       &raid6_avx512x1,
-       &raid6_avx512x2,
        &raid6_avx512x4,
+       &raid6_avx512x2,
+       &raid6_avx512x1,
 #endif
+#ifdef CONFIG_AS_AVX2
+       &raid6_avx2x4,
+       &raid6_avx2x2,
+       &raid6_avx2x1,
+#endif
+       &raid6_sse2x4,
+       &raid6_sse2x2,
+       &raid6_sse2x1,
 #endif
 #ifdef CONFIG_ALTIVEC
-       &raid6_altivec1,
-       &raid6_altivec2,
-       &raid6_altivec4,
-       &raid6_altivec8,
-       &raid6_vpermxor1,
-       &raid6_vpermxor2,
-       &raid6_vpermxor4,
        &raid6_vpermxor8,
+       &raid6_vpermxor4,
+       &raid6_vpermxor2,
+       &raid6_vpermxor1,
+       &raid6_altivec8,
+       &raid6_altivec4,
+       &raid6_altivec2,
+       &raid6_altivec1,
 #endif
 #if defined(CONFIG_S390)
        &raid6_s390vx8,
 #endif
-       &raid6_intx1,
-       &raid6_intx2,
-       &raid6_intx4,
-       &raid6_intx8,
 #ifdef CONFIG_KERNEL_MODE_NEON
-       &raid6_neonx1,
-       &raid6_neonx2,
-       &raid6_neonx4,
        &raid6_neonx8,
+       &raid6_neonx4,
+       &raid6_neonx2,
+       &raid6_neonx1,
 #endif
+#if defined(__ia64__)
+       &raid6_intx32,
+       &raid6_intx16,
+#endif
+       &raid6_intx8,
+       &raid6_intx4,
+       &raid6_intx2,
+       &raid6_intx1,
        NULL
 };
 
@@ -163,6 +163,11 @@ static inline const struct raid6_calls *raid6_choose_gen(
                        if ((*algo)->valid && !(*algo)->valid())
                                continue;
 
+                       if (!IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) {
+                               best = *algo;
+                               break;
+                       }
+
                        perf = 0;
 
                        preempt_disable();
index 79777645cac9c1243518f4f4bf403cdc567aa9ea..3ab8720aa2f843cad399f30300b0bbfa60f314e6 100644 (file)
@@ -34,6 +34,9 @@ endif
 
 ifeq ($(IS_X86),yes)
         OBJS   += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o avx512.o recov_avx512.o
+        CFLAGS += $(shell echo "pshufb %xmm0, %xmm0" |         \
+                    gcc -c -x assembler - >&/dev/null &&       \
+                    rm ./-.o && echo -DCONFIG_AS_SSSE3=1)
         CFLAGS += $(shell echo "vpbroadcastb %xmm0, %ymm1" |   \
                     gcc -c -x assembler - >&/dev/null &&       \
                     rm ./-.o && echo -DCONFIG_AS_AVX2=1)