md: remove 'go_faster' option from ->sync_request()
[sfrench/cifs-2.6.git] / drivers / md / raid1.c
index d34e238afa54c24ccaefbc7c6d58974dc2104be6..9157a29c8dbf133c713d67bdaaea45aaf650a255 100644 (file)
@@ -539,7 +539,13 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
        has_nonrot_disk = 0;
        choose_next_idle = 0;
 
-       choose_first = (conf->mddev->recovery_cp < this_sector + sectors);
+       if ((conf->mddev->recovery_cp < this_sector + sectors) ||
+           (mddev_is_clustered(conf->mddev) &&
+           md_cluster_ops->area_resyncing(conf->mddev, this_sector,
+                   this_sector + sectors)))
+               choose_first = 1;
+       else
+               choose_first = 0;
 
        for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
                sector_t dist;
@@ -1102,8 +1108,10 @@ static void make_request(struct mddev *mddev, struct bio * bio)
        md_write_start(mddev, bio); /* wait on superblock update early */
 
        if (bio_data_dir(bio) == WRITE &&
-           bio_end_sector(bio) > mddev->suspend_lo &&
-           bio->bi_iter.bi_sector < mddev->suspend_hi) {
+           ((bio_end_sector(bio) > mddev->suspend_lo &&
+           bio->bi_iter.bi_sector < mddev->suspend_hi) ||
+           (mddev_is_clustered(mddev) &&
+            md_cluster_ops->area_resyncing(mddev, bio->bi_iter.bi_sector, bio_end_sector(bio))))) {
                /* As the suspend_* range is controlled by
                 * userspace, we want an interruptible
                 * wait.
@@ -1114,7 +1122,10 @@ static void make_request(struct mddev *mddev, struct bio * bio)
                        prepare_to_wait(&conf->wait_barrier,
                                        &w, TASK_INTERRUPTIBLE);
                        if (bio_end_sector(bio) <= mddev->suspend_lo ||
-                           bio->bi_iter.bi_sector >= mddev->suspend_hi)
+                           bio->bi_iter.bi_sector >= mddev->suspend_hi ||
+                           (mddev_is_clustered(mddev) &&
+                            !md_cluster_ops->area_resyncing(mddev,
+                                    bio->bi_iter.bi_sector, bio_end_sector(bio))))
                                break;
                        schedule();
                }
@@ -1561,6 +1572,7 @@ static int raid1_spare_active(struct mddev *mddev)
                struct md_rdev *rdev = conf->mirrors[i].rdev;
                struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
                if (repl
+                   && !test_bit(Candidate, &repl->flags)
                    && repl->recovery_offset == MaxSector
                    && !test_bit(Faulty, &repl->flags)
                    && !test_and_set_bit(In_sync, &repl->flags)) {
@@ -2468,7 +2480,7 @@ static int init_resync(struct r1conf *conf)
  * that can be installed to exclude normal IO requests.
  */
 
-static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
+static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
 {
        struct r1conf *conf = mddev->private;
        struct r1bio *r1_bio;
@@ -2521,13 +2533,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
                *skipped = 1;
                return sync_blocks;
        }
-       /*
-        * If there is non-resync activity waiting for a turn,
-        * and resync is going fast enough,
-        * then let it though before starting on this new sync request.
-        */
-       if (!go_faster && conf->nr_waiting)
-               msleep_interruptible(1000);
 
        bitmap_cond_end_sync(mddev->bitmap, sector_nr);
        r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);