Merge tag 'x86_cleanups_for_v5.19_rc1' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / drivers / md / raid10.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * raid10.c : Multiple Devices driver for Linux
4  *
5  * Copyright (C) 2000-2004 Neil Brown
6  *
7  * RAID-10 support for md.
8  *
9  * Base on code in raid1.c.  See raid1.c for further copyright information.
10  */
11
12 #include <linux/slab.h>
13 #include <linux/delay.h>
14 #include <linux/blkdev.h>
15 #include <linux/module.h>
16 #include <linux/seq_file.h>
17 #include <linux/ratelimit.h>
18 #include <linux/kthread.h>
19 #include <linux/raid/md_p.h>
20 #include <trace/events/block.h>
21 #include "md.h"
22 #include "raid10.h"
23 #include "raid0.h"
24 #include "md-bitmap.h"
25
26 /*
27  * RAID10 provides a combination of RAID0 and RAID1 functionality.
28  * The layout of data is defined by
29  *    chunk_size
30  *    raid_disks
31  *    near_copies (stored in low byte of layout)
32  *    far_copies (stored in second byte of layout)
33  *    far_offset (stored in bit 16 of layout )
34  *    use_far_sets (stored in bit 17 of layout )
35  *    use_far_sets_bugfixed (stored in bit 18 of layout )
36  *
37  * The data to be stored is divided into chunks using chunksize.  Each device
38  * is divided into far_copies sections.   In each section, chunks are laid out
39  * in a style similar to raid0, but near_copies copies of each chunk is stored
40  * (each on a different drive).  The starting device for each section is offset
41  * near_copies from the starting device of the previous section.  Thus there
42  * are (near_copies * far_copies) of each chunk, and each is on a different
43  * drive.  near_copies and far_copies must be at least one, and their product
44  * is at most raid_disks.
45  *
46  * If far_offset is true, then the far_copies are handled a bit differently.
47  * The copies are still in different stripes, but instead of being very far
48  * apart on disk, there are adjacent stripes.
49  *
50  * The far and offset algorithms are handled slightly differently if
51  * 'use_far_sets' is true.  In this case, the array's devices are grouped into
52  * sets that are (near_copies * far_copies) in size.  The far copied stripes
53  * are still shifted by 'near_copies' devices, but this shifting stays confined
54  * to the set rather than the entire array.  This is done to improve the number
55  * of device combinations that can fail without causing the array to fail.
56  * Example 'far' algorithm w/o 'use_far_sets' (each letter represents a chunk
57  * on a device):
58  *    A B C D    A B C D E
59  *      ...         ...
60  *    D A B C    E A B C D
61  * Example 'far' algorithm w/ 'use_far_sets' enabled (sets illustrated w/ []'s):
62  *    [A B] [C D]    [A B] [C D E]
63  *    |...| |...|    |...| | ... |
64  *    [B A] [D C]    [B A] [E C D]
65  */
66
67 static void allow_barrier(struct r10conf *conf);
68 static void lower_barrier(struct r10conf *conf);
69 static int _enough(struct r10conf *conf, int previous, int ignore);
70 static int enough(struct r10conf *conf, int ignore);
71 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
72                                 int *skipped);
73 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
74 static void end_reshape_write(struct bio *bio);
75 static void end_reshape(struct r10conf *conf);
76
77 #define raid10_log(md, fmt, args...)                            \
78         do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0)
79
80 #include "raid1-10.c"
81
82 /*
83  * for resync bio, r10bio pointer can be retrieved from the per-bio
84  * 'struct resync_pages'.
85  */
86 static inline struct r10bio *get_resync_r10bio(struct bio *bio)
87 {
88         return get_resync_pages(bio)->raid_bio;
89 }
90
91 static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
92 {
93         struct r10conf *conf = data;
94         int size = offsetof(struct r10bio, devs[conf->geo.raid_disks]);
95
96         /* allocate a r10bio with room for raid_disks entries in the
97          * bios array */
98         return kzalloc(size, gfp_flags);
99 }
100
101 #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
102 /* amount of memory to reserve for resync requests */
103 #define RESYNC_WINDOW (1024*1024)
104 /* maximum number of concurrent requests, memory permitting */
105 #define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
106 #define CLUSTER_RESYNC_WINDOW (32 * RESYNC_WINDOW)
107 #define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
108
109 /*
110  * When performing a resync, we need to read and compare, so
111  * we need as many pages are there are copies.
112  * When performing a recovery, we need 2 bios, one for read,
113  * one for write (we recover only one drive per r10buf)
114  *
115  */
116 static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
117 {
118         struct r10conf *conf = data;
119         struct r10bio *r10_bio;
120         struct bio *bio;
121         int j;
122         int nalloc, nalloc_rp;
123         struct resync_pages *rps;
124
125         r10_bio = r10bio_pool_alloc(gfp_flags, conf);
126         if (!r10_bio)
127                 return NULL;
128
129         if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
130             test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
131                 nalloc = conf->copies; /* resync */
132         else
133                 nalloc = 2; /* recovery */
134
135         /* allocate once for all bios */
136         if (!conf->have_replacement)
137                 nalloc_rp = nalloc;
138         else
139                 nalloc_rp = nalloc * 2;
140         rps = kmalloc_array(nalloc_rp, sizeof(struct resync_pages), gfp_flags);
141         if (!rps)
142                 goto out_free_r10bio;
143
144         /*
145          * Allocate bios.
146          */
147         for (j = nalloc ; j-- ; ) {
148                 bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
149                 if (!bio)
150                         goto out_free_bio;
151                 bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0);
152                 r10_bio->devs[j].bio = bio;
153                 if (!conf->have_replacement)
154                         continue;
155                 bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
156                 if (!bio)
157                         goto out_free_bio;
158                 bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0);
159                 r10_bio->devs[j].repl_bio = bio;
160         }
161         /*
162          * Allocate RESYNC_PAGES data pages and attach them
163          * where needed.
164          */
165         for (j = 0; j < nalloc; j++) {
166                 struct bio *rbio = r10_bio->devs[j].repl_bio;
167                 struct resync_pages *rp, *rp_repl;
168
169                 rp = &rps[j];
170                 if (rbio)
171                         rp_repl = &rps[nalloc + j];
172
173                 bio = r10_bio->devs[j].bio;
174
175                 if (!j || test_bit(MD_RECOVERY_SYNC,
176                                    &conf->mddev->recovery)) {
177                         if (resync_alloc_pages(rp, gfp_flags))
178                                 goto out_free_pages;
179                 } else {
180                         memcpy(rp, &rps[0], sizeof(*rp));
181                         resync_get_all_pages(rp);
182                 }
183
184                 rp->raid_bio = r10_bio;
185                 bio->bi_private = rp;
186                 if (rbio) {
187                         memcpy(rp_repl, rp, sizeof(*rp));
188                         rbio->bi_private = rp_repl;
189                 }
190         }
191
192         return r10_bio;
193
194 out_free_pages:
195         while (--j >= 0)
196                 resync_free_pages(&rps[j]);
197
198         j = 0;
199 out_free_bio:
200         for ( ; j < nalloc; j++) {
201                 if (r10_bio->devs[j].bio)
202                         bio_uninit(r10_bio->devs[j].bio);
203                 kfree(r10_bio->devs[j].bio);
204                 if (r10_bio->devs[j].repl_bio)
205                         bio_uninit(r10_bio->devs[j].repl_bio);
206                 kfree(r10_bio->devs[j].repl_bio);
207         }
208         kfree(rps);
209 out_free_r10bio:
210         rbio_pool_free(r10_bio, conf);
211         return NULL;
212 }
213
214 static void r10buf_pool_free(void *__r10_bio, void *data)
215 {
216         struct r10conf *conf = data;
217         struct r10bio *r10bio = __r10_bio;
218         int j;
219         struct resync_pages *rp = NULL;
220
221         for (j = conf->copies; j--; ) {
222                 struct bio *bio = r10bio->devs[j].bio;
223
224                 if (bio) {
225                         rp = get_resync_pages(bio);
226                         resync_free_pages(rp);
227                         bio_uninit(bio);
228                         kfree(bio);
229                 }
230
231                 bio = r10bio->devs[j].repl_bio;
232                 if (bio) {
233                         bio_uninit(bio);
234                         kfree(bio);
235                 }
236         }
237
238         /* resync pages array stored in the 1st bio's .bi_private */
239         kfree(rp);
240
241         rbio_pool_free(r10bio, conf);
242 }
243
244 static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
245 {
246         int i;
247
248         for (i = 0; i < conf->geo.raid_disks; i++) {
249                 struct bio **bio = & r10_bio->devs[i].bio;
250                 if (!BIO_SPECIAL(*bio))
251                         bio_put(*bio);
252                 *bio = NULL;
253                 bio = &r10_bio->devs[i].repl_bio;
254                 if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
255                         bio_put(*bio);
256                 *bio = NULL;
257         }
258 }
259
260 static void free_r10bio(struct r10bio *r10_bio)
261 {
262         struct r10conf *conf = r10_bio->mddev->private;
263
264         put_all_bios(conf, r10_bio);
265         mempool_free(r10_bio, &conf->r10bio_pool);
266 }
267
268 static void put_buf(struct r10bio *r10_bio)
269 {
270         struct r10conf *conf = r10_bio->mddev->private;
271
272         mempool_free(r10_bio, &conf->r10buf_pool);
273
274         lower_barrier(conf);
275 }
276
277 static void reschedule_retry(struct r10bio *r10_bio)
278 {
279         unsigned long flags;
280         struct mddev *mddev = r10_bio->mddev;
281         struct r10conf *conf = mddev->private;
282
283         spin_lock_irqsave(&conf->device_lock, flags);
284         list_add(&r10_bio->retry_list, &conf->retry_list);
285         conf->nr_queued ++;
286         spin_unlock_irqrestore(&conf->device_lock, flags);
287
288         /* wake up frozen array... */
289         wake_up(&conf->wait_barrier);
290
291         md_wakeup_thread(mddev->thread);
292 }
293
294 /*
295  * raid_end_bio_io() is called when we have finished servicing a mirrored
296  * operation and are ready to return a success/failure code to the buffer
297  * cache layer.
298  */
299 static void raid_end_bio_io(struct r10bio *r10_bio)
300 {
301         struct bio *bio = r10_bio->master_bio;
302         struct r10conf *conf = r10_bio->mddev->private;
303
304         if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
305                 bio->bi_status = BLK_STS_IOERR;
306
307         if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
308                 bio_end_io_acct(bio, r10_bio->start_time);
309         bio_endio(bio);
310         /*
311          * Wake up any possible resync thread that waits for the device
312          * to go idle.
313          */
314         allow_barrier(conf);
315
316         free_r10bio(r10_bio);
317 }
318
319 /*
320  * Update disk head position estimator based on IRQ completion info.
321  */
322 static inline void update_head_pos(int slot, struct r10bio *r10_bio)
323 {
324         struct r10conf *conf = r10_bio->mddev->private;
325
326         conf->mirrors[r10_bio->devs[slot].devnum].head_position =
327                 r10_bio->devs[slot].addr + (r10_bio->sectors);
328 }
329
330 /*
331  * Find the disk number which triggered given bio
332  */
333 static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
334                          struct bio *bio, int *slotp, int *replp)
335 {
336         int slot;
337         int repl = 0;
338
339         for (slot = 0; slot < conf->geo.raid_disks; slot++) {
340                 if (r10_bio->devs[slot].bio == bio)
341                         break;
342                 if (r10_bio->devs[slot].repl_bio == bio) {
343                         repl = 1;
344                         break;
345                 }
346         }
347
348         update_head_pos(slot, r10_bio);
349
350         if (slotp)
351                 *slotp = slot;
352         if (replp)
353                 *replp = repl;
354         return r10_bio->devs[slot].devnum;
355 }
356
357 static void raid10_end_read_request(struct bio *bio)
358 {
359         int uptodate = !bio->bi_status;
360         struct r10bio *r10_bio = bio->bi_private;
361         int slot;
362         struct md_rdev *rdev;
363         struct r10conf *conf = r10_bio->mddev->private;
364
365         slot = r10_bio->read_slot;
366         rdev = r10_bio->devs[slot].rdev;
367         /*
368          * this branch is our 'one mirror IO has finished' event handler:
369          */
370         update_head_pos(slot, r10_bio);
371
372         if (uptodate) {
373                 /*
374                  * Set R10BIO_Uptodate in our master bio, so that
375                  * we will return a good error code to the higher
376                  * levels even if IO on some other mirrored buffer fails.
377                  *
378                  * The 'master' represents the composite IO operation to
379                  * user-side. So if something waits for IO, then it will
380                  * wait for the 'master' bio.
381                  */
382                 set_bit(R10BIO_Uptodate, &r10_bio->state);
383         } else {
384                 /* If all other devices that store this block have
385                  * failed, we want to return the error upwards rather
386                  * than fail the last device.  Here we redefine
387                  * "uptodate" to mean "Don't want to retry"
388                  */
389                 if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state),
390                              rdev->raid_disk))
391                         uptodate = 1;
392         }
393         if (uptodate) {
394                 raid_end_bio_io(r10_bio);
395                 rdev_dec_pending(rdev, conf->mddev);
396         } else {
397                 /*
398                  * oops, read error - keep the refcount on the rdev
399                  */
400                 char b[BDEVNAME_SIZE];
401                 pr_err_ratelimited("md/raid10:%s: %s: rescheduling sector %llu\n",
402                                    mdname(conf->mddev),
403                                    bdevname(rdev->bdev, b),
404                                    (unsigned long long)r10_bio->sector);
405                 set_bit(R10BIO_ReadError, &r10_bio->state);
406                 reschedule_retry(r10_bio);
407         }
408 }
409
410 static void close_write(struct r10bio *r10_bio)
411 {
412         /* clear the bitmap if all writes complete successfully */
413         md_bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
414                            r10_bio->sectors,
415                            !test_bit(R10BIO_Degraded, &r10_bio->state),
416                            0);
417         md_write_end(r10_bio->mddev);
418 }
419
420 static void one_write_done(struct r10bio *r10_bio)
421 {
422         if (atomic_dec_and_test(&r10_bio->remaining)) {
423                 if (test_bit(R10BIO_WriteError, &r10_bio->state))
424                         reschedule_retry(r10_bio);
425                 else {
426                         close_write(r10_bio);
427                         if (test_bit(R10BIO_MadeGood, &r10_bio->state))
428                                 reschedule_retry(r10_bio);
429                         else
430                                 raid_end_bio_io(r10_bio);
431                 }
432         }
433 }
434
435 static void raid10_end_write_request(struct bio *bio)
436 {
437         struct r10bio *r10_bio = bio->bi_private;
438         int dev;
439         int dec_rdev = 1;
440         struct r10conf *conf = r10_bio->mddev->private;
441         int slot, repl;
442         struct md_rdev *rdev = NULL;
443         struct bio *to_put = NULL;
444         bool discard_error;
445
446         discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
447
448         dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
449
450         if (repl)
451                 rdev = conf->mirrors[dev].replacement;
452         if (!rdev) {
453                 smp_rmb();
454                 repl = 0;
455                 rdev = conf->mirrors[dev].rdev;
456         }
457         /*
458          * this branch is our 'one mirror IO has finished' event handler:
459          */
460         if (bio->bi_status && !discard_error) {
461                 if (repl)
462                         /* Never record new bad blocks to replacement,
463                          * just fail it.
464                          */
465                         md_error(rdev->mddev, rdev);
466                 else {
467                         set_bit(WriteErrorSeen, &rdev->flags);
468                         if (!test_and_set_bit(WantReplacement, &rdev->flags))
469                                 set_bit(MD_RECOVERY_NEEDED,
470                                         &rdev->mddev->recovery);
471
472                         dec_rdev = 0;
473                         if (test_bit(FailFast, &rdev->flags) &&
474                             (bio->bi_opf & MD_FAILFAST)) {
475                                 md_error(rdev->mddev, rdev);
476                         }
477
478                         /*
479                          * When the device is faulty, it is not necessary to
480                          * handle write error.
481                          */
482                         if (!test_bit(Faulty, &rdev->flags))
483                                 set_bit(R10BIO_WriteError, &r10_bio->state);
484                         else {
485                                 /* Fail the request */
486                                 set_bit(R10BIO_Degraded, &r10_bio->state);
487                                 r10_bio->devs[slot].bio = NULL;
488                                 to_put = bio;
489                                 dec_rdev = 1;
490                         }
491                 }
492         } else {
493                 /*
494                  * Set R10BIO_Uptodate in our master bio, so that
495                  * we will return a good error code for to the higher
496                  * levels even if IO on some other mirrored buffer fails.
497                  *
498                  * The 'master' represents the composite IO operation to
499                  * user-side. So if something waits for IO, then it will
500                  * wait for the 'master' bio.
501                  */
502                 sector_t first_bad;
503                 int bad_sectors;
504
505                 /*
506                  * Do not set R10BIO_Uptodate if the current device is
507                  * rebuilding or Faulty. This is because we cannot use
508                  * such device for properly reading the data back (we could
509                  * potentially use it, if the current write would have felt
510                  * before rdev->recovery_offset, but for simplicity we don't
511                  * check this here.
512                  */
513                 if (test_bit(In_sync, &rdev->flags) &&
514                     !test_bit(Faulty, &rdev->flags))
515                         set_bit(R10BIO_Uptodate, &r10_bio->state);
516
517                 /* Maybe we can clear some bad blocks. */
518                 if (is_badblock(rdev,
519                                 r10_bio->devs[slot].addr,
520                                 r10_bio->sectors,
521                                 &first_bad, &bad_sectors) && !discard_error) {
522                         bio_put(bio);
523                         if (repl)
524                                 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
525                         else
526                                 r10_bio->devs[slot].bio = IO_MADE_GOOD;
527                         dec_rdev = 0;
528                         set_bit(R10BIO_MadeGood, &r10_bio->state);
529                 }
530         }
531
532         /*
533          *
534          * Let's see if all mirrored write operations have finished
535          * already.
536          */
537         one_write_done(r10_bio);
538         if (dec_rdev)
539                 rdev_dec_pending(rdev, conf->mddev);
540         if (to_put)
541                 bio_put(to_put);
542 }
543
544 /*
545  * RAID10 layout manager
546  * As well as the chunksize and raid_disks count, there are two
547  * parameters: near_copies and far_copies.
548  * near_copies * far_copies must be <= raid_disks.
549  * Normally one of these will be 1.
550  * If both are 1, we get raid0.
551  * If near_copies == raid_disks, we get raid1.
552  *
553  * Chunks are laid out in raid0 style with near_copies copies of the
554  * first chunk, followed by near_copies copies of the next chunk and
555  * so on.
556  * If far_copies > 1, then after 1/far_copies of the array has been assigned
557  * as described above, we start again with a device offset of near_copies.
558  * So we effectively have another copy of the whole array further down all
559  * the drives, but with blocks on different drives.
560  * With this layout, and block is never stored twice on the one device.
561  *
562  * raid10_find_phys finds the sector offset of a given virtual sector
563  * on each device that it is on.
564  *
565  * raid10_find_virt does the reverse mapping, from a device and a
566  * sector offset to a virtual address
567  */
568
569 static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
570 {
571         int n,f;
572         sector_t sector;
573         sector_t chunk;
574         sector_t stripe;
575         int dev;
576         int slot = 0;
577         int last_far_set_start, last_far_set_size;
578
579         last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
580         last_far_set_start *= geo->far_set_size;
581
582         last_far_set_size = geo->far_set_size;
583         last_far_set_size += (geo->raid_disks % geo->far_set_size);
584
585         /* now calculate first sector/dev */
586         chunk = r10bio->sector >> geo->chunk_shift;
587         sector = r10bio->sector & geo->chunk_mask;
588
589         chunk *= geo->near_copies;
590         stripe = chunk;
591         dev = sector_div(stripe, geo->raid_disks);
592         if (geo->far_offset)
593                 stripe *= geo->far_copies;
594
595         sector += stripe << geo->chunk_shift;
596
597         /* and calculate all the others */
598         for (n = 0; n < geo->near_copies; n++) {
599                 int d = dev;
600                 int set;
601                 sector_t s = sector;
602                 r10bio->devs[slot].devnum = d;
603                 r10bio->devs[slot].addr = s;
604                 slot++;
605
606                 for (f = 1; f < geo->far_copies; f++) {
607                         set = d / geo->far_set_size;
608                         d += geo->near_copies;
609
610                         if ((geo->raid_disks % geo->far_set_size) &&
611                             (d > last_far_set_start)) {
612                                 d -= last_far_set_start;
613                                 d %= last_far_set_size;
614                                 d += last_far_set_start;
615                         } else {
616                                 d %= geo->far_set_size;
617                                 d += geo->far_set_size * set;
618                         }
619                         s += geo->stride;
620                         r10bio->devs[slot].devnum = d;
621                         r10bio->devs[slot].addr = s;
622                         slot++;
623                 }
624                 dev++;
625                 if (dev >= geo->raid_disks) {
626                         dev = 0;
627                         sector += (geo->chunk_mask + 1);
628                 }
629         }
630 }
631
632 static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio)
633 {
634         struct geom *geo = &conf->geo;
635
636         if (conf->reshape_progress != MaxSector &&
637             ((r10bio->sector >= conf->reshape_progress) !=
638              conf->mddev->reshape_backwards)) {
639                 set_bit(R10BIO_Previous, &r10bio->state);
640                 geo = &conf->prev;
641         } else
642                 clear_bit(R10BIO_Previous, &r10bio->state);
643
644         __raid10_find_phys(geo, r10bio);
645 }
646
647 static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
648 {
649         sector_t offset, chunk, vchunk;
650         /* Never use conf->prev as this is only called during resync
651          * or recovery, so reshape isn't happening
652          */
653         struct geom *geo = &conf->geo;
654         int far_set_start = (dev / geo->far_set_size) * geo->far_set_size;
655         int far_set_size = geo->far_set_size;
656         int last_far_set_start;
657
658         if (geo->raid_disks % geo->far_set_size) {
659                 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
660                 last_far_set_start *= geo->far_set_size;
661
662                 if (dev >= last_far_set_start) {
663                         far_set_size = geo->far_set_size;
664                         far_set_size += (geo->raid_disks % geo->far_set_size);
665                         far_set_start = last_far_set_start;
666                 }
667         }
668
669         offset = sector & geo->chunk_mask;
670         if (geo->far_offset) {
671                 int fc;
672                 chunk = sector >> geo->chunk_shift;
673                 fc = sector_div(chunk, geo->far_copies);
674                 dev -= fc * geo->near_copies;
675                 if (dev < far_set_start)
676                         dev += far_set_size;
677         } else {
678                 while (sector >= geo->stride) {
679                         sector -= geo->stride;
680                         if (dev < (geo->near_copies + far_set_start))
681                                 dev += far_set_size - geo->near_copies;
682                         else
683                                 dev -= geo->near_copies;
684                 }
685                 chunk = sector >> geo->chunk_shift;
686         }
687         vchunk = chunk * geo->raid_disks + dev;
688         sector_div(vchunk, geo->near_copies);
689         return (vchunk << geo->chunk_shift) + offset;
690 }
691
692 /*
693  * This routine returns the disk from which the requested read should
694  * be done. There is a per-array 'next expected sequential IO' sector
695  * number - if this matches on the next IO then we use the last disk.
696  * There is also a per-disk 'last know head position' sector that is
697  * maintained from IRQ contexts, both the normal and the resync IO
698  * completion handlers update this position correctly. If there is no
699  * perfect sequential match then we pick the disk whose head is closest.
700  *
701  * If there are 2 mirrors in the same 2 devices, performance degrades
702  * because position is mirror, not device based.
703  *
704  * The rdev for the device selected will have nr_pending incremented.
705  */
706
707 /*
708  * FIXME: possibly should rethink readbalancing and do it differently
709  * depending on near_copies / far_copies geometry.
710  */
711 static struct md_rdev *read_balance(struct r10conf *conf,
712                                     struct r10bio *r10_bio,
713                                     int *max_sectors)
714 {
715         const sector_t this_sector = r10_bio->sector;
716         int disk, slot;
717         int sectors = r10_bio->sectors;
718         int best_good_sectors;
719         sector_t new_distance, best_dist;
720         struct md_rdev *best_dist_rdev, *best_pending_rdev, *rdev = NULL;
721         int do_balance;
722         int best_dist_slot, best_pending_slot;
723         bool has_nonrot_disk = false;
724         unsigned int min_pending;
725         struct geom *geo = &conf->geo;
726
727         raid10_find_phys(conf, r10_bio);
728         rcu_read_lock();
729         best_dist_slot = -1;
730         min_pending = UINT_MAX;
731         best_dist_rdev = NULL;
732         best_pending_rdev = NULL;
733         best_dist = MaxSector;
734         best_good_sectors = 0;
735         do_balance = 1;
736         clear_bit(R10BIO_FailFast, &r10_bio->state);
737         /*
738          * Check if we can balance. We can balance on the whole
739          * device if no resync is going on (recovery is ok), or below
740          * the resync window. We take the first readable disk when
741          * above the resync window.
742          */
743         if ((conf->mddev->recovery_cp < MaxSector
744              && (this_sector + sectors >= conf->next_resync)) ||
745             (mddev_is_clustered(conf->mddev) &&
746              md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
747                                             this_sector + sectors)))
748                 do_balance = 0;
749
750         for (slot = 0; slot < conf->copies ; slot++) {
751                 sector_t first_bad;
752                 int bad_sectors;
753                 sector_t dev_sector;
754                 unsigned int pending;
755                 bool nonrot;
756
757                 if (r10_bio->devs[slot].bio == IO_BLOCKED)
758                         continue;
759                 disk = r10_bio->devs[slot].devnum;
760                 rdev = rcu_dereference(conf->mirrors[disk].replacement);
761                 if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
762                     r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
763                         rdev = rcu_dereference(conf->mirrors[disk].rdev);
764                 if (rdev == NULL ||
765                     test_bit(Faulty, &rdev->flags))
766                         continue;
767                 if (!test_bit(In_sync, &rdev->flags) &&
768                     r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
769                         continue;
770
771                 dev_sector = r10_bio->devs[slot].addr;
772                 if (is_badblock(rdev, dev_sector, sectors,
773                                 &first_bad, &bad_sectors)) {
774                         if (best_dist < MaxSector)
775                                 /* Already have a better slot */
776                                 continue;
777                         if (first_bad <= dev_sector) {
778                                 /* Cannot read here.  If this is the
779                                  * 'primary' device, then we must not read
780                                  * beyond 'bad_sectors' from another device.
781                                  */
782                                 bad_sectors -= (dev_sector - first_bad);
783                                 if (!do_balance && sectors > bad_sectors)
784                                         sectors = bad_sectors;
785                                 if (best_good_sectors > sectors)
786                                         best_good_sectors = sectors;
787                         } else {
788                                 sector_t good_sectors =
789                                         first_bad - dev_sector;
790                                 if (good_sectors > best_good_sectors) {
791                                         best_good_sectors = good_sectors;
792                                         best_dist_slot = slot;
793                                         best_dist_rdev = rdev;
794                                 }
795                                 if (!do_balance)
796                                         /* Must read from here */
797                                         break;
798                         }
799                         continue;
800                 } else
801                         best_good_sectors = sectors;
802
803                 if (!do_balance)
804                         break;
805
806                 nonrot = bdev_nonrot(rdev->bdev);
807                 has_nonrot_disk |= nonrot;
808                 pending = atomic_read(&rdev->nr_pending);
809                 if (min_pending > pending && nonrot) {
810                         min_pending = pending;
811                         best_pending_slot = slot;
812                         best_pending_rdev = rdev;
813                 }
814
815                 if (best_dist_slot >= 0)
816                         /* At least 2 disks to choose from so failfast is OK */
817                         set_bit(R10BIO_FailFast, &r10_bio->state);
818                 /* This optimisation is debatable, and completely destroys
819                  * sequential read speed for 'far copies' arrays.  So only
820                  * keep it for 'near' arrays, and review those later.
821                  */
822                 if (geo->near_copies > 1 && !pending)
823                         new_distance = 0;
824
825                 /* for far > 1 always use the lowest address */
826                 else if (geo->far_copies > 1)
827                         new_distance = r10_bio->devs[slot].addr;
828                 else
829                         new_distance = abs(r10_bio->devs[slot].addr -
830                                            conf->mirrors[disk].head_position);
831
832                 if (new_distance < best_dist) {
833                         best_dist = new_distance;
834                         best_dist_slot = slot;
835                         best_dist_rdev = rdev;
836                 }
837         }
838         if (slot >= conf->copies) {
839                 if (has_nonrot_disk) {
840                         slot = best_pending_slot;
841                         rdev = best_pending_rdev;
842                 } else {
843                         slot = best_dist_slot;
844                         rdev = best_dist_rdev;
845                 }
846         }
847
848         if (slot >= 0) {
849                 atomic_inc(&rdev->nr_pending);
850                 r10_bio->read_slot = slot;
851         } else
852                 rdev = NULL;
853         rcu_read_unlock();
854         *max_sectors = best_good_sectors;
855
856         return rdev;
857 }
858
859 static void flush_pending_writes(struct r10conf *conf)
860 {
861         /* Any writes that have been queued but are awaiting
862          * bitmap updates get flushed here.
863          */
864         spin_lock_irq(&conf->device_lock);
865
866         if (conf->pending_bio_list.head) {
867                 struct blk_plug plug;
868                 struct bio *bio;
869
870                 bio = bio_list_get(&conf->pending_bio_list);
871                 spin_unlock_irq(&conf->device_lock);
872
873                 /*
874                  * As this is called in a wait_event() loop (see freeze_array),
875                  * current->state might be TASK_UNINTERRUPTIBLE which will
876                  * cause a warning when we prepare to wait again.  As it is
877                  * rare that this path is taken, it is perfectly safe to force
878                  * us to go around the wait_event() loop again, so the warning
879                  * is a false-positive. Silence the warning by resetting
880                  * thread state
881                  */
882                 __set_current_state(TASK_RUNNING);
883
884                 blk_start_plug(&plug);
885                 /* flush any pending bitmap writes to disk
886                  * before proceeding w/ I/O */
887                 md_bitmap_unplug(conf->mddev->bitmap);
888                 wake_up(&conf->wait_barrier);
889
890                 while (bio) { /* submit pending writes */
891                         struct bio *next = bio->bi_next;
892                         struct md_rdev *rdev = (void*)bio->bi_bdev;
893                         bio->bi_next = NULL;
894                         bio_set_dev(bio, rdev->bdev);
895                         if (test_bit(Faulty, &rdev->flags)) {
896                                 bio_io_error(bio);
897                         } else if (unlikely((bio_op(bio) ==  REQ_OP_DISCARD) &&
898                                             !bdev_max_discard_sectors(bio->bi_bdev)))
899                                 /* Just ignore it */
900                                 bio_endio(bio);
901                         else
902                                 submit_bio_noacct(bio);
903                         bio = next;
904                 }
905                 blk_finish_plug(&plug);
906         } else
907                 spin_unlock_irq(&conf->device_lock);
908 }
909
910 /* Barriers....
911  * Sometimes we need to suspend IO while we do something else,
912  * either some resync/recovery, or reconfigure the array.
913  * To do this we raise a 'barrier'.
914  * The 'barrier' is a counter that can be raised multiple times
915  * to count how many activities are happening which preclude
916  * normal IO.
917  * We can only raise the barrier if there is no pending IO.
918  * i.e. if nr_pending == 0.
919  * We choose only to raise the barrier if no-one is waiting for the
920  * barrier to go down.  This means that as soon as an IO request
921  * is ready, no other operations which require a barrier will start
922  * until the IO request has had a chance.
923  *
924  * So: regular IO calls 'wait_barrier'.  When that returns there
925  *    is no backgroup IO happening,  It must arrange to call
926  *    allow_barrier when it has finished its IO.
927  * backgroup IO calls must call raise_barrier.  Once that returns
928  *    there is no normal IO happeing.  It must arrange to call
929  *    lower_barrier when the particular background IO completes.
930  */
931
932 static void raise_barrier(struct r10conf *conf, int force)
933 {
934         BUG_ON(force && !conf->barrier);
935         spin_lock_irq(&conf->resync_lock);
936
937         /* Wait until no block IO is waiting (unless 'force') */
938         wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
939                             conf->resync_lock);
940
941         /* block any new IO from starting */
942         conf->barrier++;
943
944         /* Now wait for all pending IO to complete */
945         wait_event_lock_irq(conf->wait_barrier,
946                             !atomic_read(&conf->nr_pending) && conf->barrier < RESYNC_DEPTH,
947                             conf->resync_lock);
948
949         spin_unlock_irq(&conf->resync_lock);
950 }
951
952 static void lower_barrier(struct r10conf *conf)
953 {
954         unsigned long flags;
955         spin_lock_irqsave(&conf->resync_lock, flags);
956         conf->barrier--;
957         spin_unlock_irqrestore(&conf->resync_lock, flags);
958         wake_up(&conf->wait_barrier);
959 }
960
961 static bool wait_barrier(struct r10conf *conf, bool nowait)
962 {
963         bool ret = true;
964
965         spin_lock_irq(&conf->resync_lock);
966         if (conf->barrier) {
967                 struct bio_list *bio_list = current->bio_list;
968                 conf->nr_waiting++;
969                 /* Wait for the barrier to drop.
970                  * However if there are already pending
971                  * requests (preventing the barrier from
972                  * rising completely), and the
973                  * pre-process bio queue isn't empty,
974                  * then don't wait, as we need to empty
975                  * that queue to get the nr_pending
976                  * count down.
977                  */
978                 /* Return false when nowait flag is set */
979                 if (nowait) {
980                         ret = false;
981                 } else {
982                         raid10_log(conf->mddev, "wait barrier");
983                         wait_event_lock_irq(conf->wait_barrier,
984                                             !conf->barrier ||
985                                             (atomic_read(&conf->nr_pending) &&
986                                              bio_list &&
987                                              (!bio_list_empty(&bio_list[0]) ||
988                                               !bio_list_empty(&bio_list[1]))) ||
989                                              /* move on if recovery thread is
990                                               * blocked by us
991                                               */
992                                              (conf->mddev->thread->tsk == current &&
993                                               test_bit(MD_RECOVERY_RUNNING,
994                                                        &conf->mddev->recovery) &&
995                                               conf->nr_queued > 0),
996                                             conf->resync_lock);
997                 }
998                 conf->nr_waiting--;
999                 if (!conf->nr_waiting)
1000                         wake_up(&conf->wait_barrier);
1001         }
1002         /* Only increment nr_pending when we wait */
1003         if (ret)
1004                 atomic_inc(&conf->nr_pending);
1005         spin_unlock_irq(&conf->resync_lock);
1006         return ret;
1007 }
1008
1009 static void allow_barrier(struct r10conf *conf)
1010 {
1011         if ((atomic_dec_and_test(&conf->nr_pending)) ||
1012                         (conf->array_freeze_pending))
1013                 wake_up(&conf->wait_barrier);
1014 }
1015
1016 static void freeze_array(struct r10conf *conf, int extra)
1017 {
1018         /* stop syncio and normal IO and wait for everything to
1019          * go quiet.
1020          * We increment barrier and nr_waiting, and then
1021          * wait until nr_pending match nr_queued+extra
1022          * This is called in the context of one normal IO request
1023          * that has failed. Thus any sync request that might be pending
1024          * will be blocked by nr_pending, and we need to wait for
1025          * pending IO requests to complete or be queued for re-try.
1026          * Thus the number queued (nr_queued) plus this request (extra)
1027          * must match the number of pending IOs (nr_pending) before
1028          * we continue.
1029          */
1030         spin_lock_irq(&conf->resync_lock);
1031         conf->array_freeze_pending++;
1032         conf->barrier++;
1033         conf->nr_waiting++;
1034         wait_event_lock_irq_cmd(conf->wait_barrier,
1035                                 atomic_read(&conf->nr_pending) == conf->nr_queued+extra,
1036                                 conf->resync_lock,
1037                                 flush_pending_writes(conf));
1038
1039         conf->array_freeze_pending--;
1040         spin_unlock_irq(&conf->resync_lock);
1041 }
1042
1043 static void unfreeze_array(struct r10conf *conf)
1044 {
1045         /* reverse the effect of the freeze */
1046         spin_lock_irq(&conf->resync_lock);
1047         conf->barrier--;
1048         conf->nr_waiting--;
1049         wake_up(&conf->wait_barrier);
1050         spin_unlock_irq(&conf->resync_lock);
1051 }
1052
1053 static sector_t choose_data_offset(struct r10bio *r10_bio,
1054                                    struct md_rdev *rdev)
1055 {
1056         if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) ||
1057             test_bit(R10BIO_Previous, &r10_bio->state))
1058                 return rdev->data_offset;
1059         else
1060                 return rdev->new_data_offset;
1061 }
1062
1063 static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
1064 {
1065         struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb, cb);
1066         struct mddev *mddev = plug->cb.data;
1067         struct r10conf *conf = mddev->private;
1068         struct bio *bio;
1069
1070         if (from_schedule || current->bio_list) {
1071                 spin_lock_irq(&conf->device_lock);
1072                 bio_list_merge(&conf->pending_bio_list, &plug->pending);
1073                 spin_unlock_irq(&conf->device_lock);
1074                 wake_up(&conf->wait_barrier);
1075                 md_wakeup_thread(mddev->thread);
1076                 kfree(plug);
1077                 return;
1078         }
1079
1080         /* we aren't scheduling, so we can do the write-out directly. */
1081         bio = bio_list_get(&plug->pending);
1082         md_bitmap_unplug(mddev->bitmap);
1083         wake_up(&conf->wait_barrier);
1084
1085         while (bio) { /* submit pending writes */
1086                 struct bio *next = bio->bi_next;
1087                 struct md_rdev *rdev = (void*)bio->bi_bdev;
1088                 bio->bi_next = NULL;
1089                 bio_set_dev(bio, rdev->bdev);
1090                 if (test_bit(Faulty, &rdev->flags)) {
1091                         bio_io_error(bio);
1092                 } else if (unlikely((bio_op(bio) ==  REQ_OP_DISCARD) &&
1093                                     !bdev_max_discard_sectors(bio->bi_bdev)))
1094                         /* Just ignore it */
1095                         bio_endio(bio);
1096                 else
1097                         submit_bio_noacct(bio);
1098                 bio = next;
1099         }
1100         kfree(plug);
1101 }
1102
1103 /*
1104  * 1. Register the new request and wait if the reconstruction thread has put
1105  * up a bar for new requests. Continue immediately if no resync is active
1106  * currently.
1107  * 2. If IO spans the reshape position.  Need to wait for reshape to pass.
1108  */
1109 static bool regular_request_wait(struct mddev *mddev, struct r10conf *conf,
1110                                  struct bio *bio, sector_t sectors)
1111 {
1112         /* Bail out if REQ_NOWAIT is set for the bio */
1113         if (!wait_barrier(conf, bio->bi_opf & REQ_NOWAIT)) {
1114                 bio_wouldblock_error(bio);
1115                 return false;
1116         }
1117         while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1118             bio->bi_iter.bi_sector < conf->reshape_progress &&
1119             bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
1120                 allow_barrier(conf);
1121                 if (bio->bi_opf & REQ_NOWAIT) {
1122                         bio_wouldblock_error(bio);
1123                         return false;
1124                 }
1125                 raid10_log(conf->mddev, "wait reshape");
1126                 wait_event(conf->wait_barrier,
1127                            conf->reshape_progress <= bio->bi_iter.bi_sector ||
1128                            conf->reshape_progress >= bio->bi_iter.bi_sector +
1129                            sectors);
1130                 wait_barrier(conf, false);
1131         }
1132         return true;
1133 }
1134
1135 static void raid10_read_request(struct mddev *mddev, struct bio *bio,
1136                                 struct r10bio *r10_bio)
1137 {
1138         struct r10conf *conf = mddev->private;
1139         struct bio *read_bio;
1140         const int op = bio_op(bio);
1141         const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1142         int max_sectors;
1143         struct md_rdev *rdev;
1144         char b[BDEVNAME_SIZE];
1145         int slot = r10_bio->read_slot;
1146         struct md_rdev *err_rdev = NULL;
1147         gfp_t gfp = GFP_NOIO;
1148
1149         if (slot >= 0 && r10_bio->devs[slot].rdev) {
1150                 /*
1151                  * This is an error retry, but we cannot
1152                  * safely dereference the rdev in the r10_bio,
1153                  * we must use the one in conf.
1154                  * If it has already been disconnected (unlikely)
1155                  * we lose the device name in error messages.
1156                  */
1157                 int disk;
1158                 /*
1159                  * As we are blocking raid10, it is a little safer to
1160                  * use __GFP_HIGH.
1161                  */
1162                 gfp = GFP_NOIO | __GFP_HIGH;
1163
1164                 rcu_read_lock();
1165                 disk = r10_bio->devs[slot].devnum;
1166                 err_rdev = rcu_dereference(conf->mirrors[disk].rdev);
1167                 if (err_rdev)
1168                         bdevname(err_rdev->bdev, b);
1169                 else {
1170                         strcpy(b, "???");
1171                         /* This never gets dereferenced */
1172                         err_rdev = r10_bio->devs[slot].rdev;
1173                 }
1174                 rcu_read_unlock();
1175         }
1176
1177         if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors))
1178                 return;
1179         rdev = read_balance(conf, r10_bio, &max_sectors);
1180         if (!rdev) {
1181                 if (err_rdev) {
1182                         pr_crit_ratelimited("md/raid10:%s: %s: unrecoverable I/O read error for block %llu\n",
1183                                             mdname(mddev), b,
1184                                             (unsigned long long)r10_bio->sector);
1185                 }
1186                 raid_end_bio_io(r10_bio);
1187                 return;
1188         }
1189         if (err_rdev)
1190                 pr_err_ratelimited("md/raid10:%s: %s: redirecting sector %llu to another mirror\n",
1191                                    mdname(mddev),
1192                                    bdevname(rdev->bdev, b),
1193                                    (unsigned long long)r10_bio->sector);
1194         if (max_sectors < bio_sectors(bio)) {
1195                 struct bio *split = bio_split(bio, max_sectors,
1196                                               gfp, &conf->bio_split);
1197                 bio_chain(split, bio);
1198                 allow_barrier(conf);
1199                 submit_bio_noacct(bio);
1200                 wait_barrier(conf, false);
1201                 bio = split;
1202                 r10_bio->master_bio = bio;
1203                 r10_bio->sectors = max_sectors;
1204         }
1205         slot = r10_bio->read_slot;
1206
1207         if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
1208                 r10_bio->start_time = bio_start_io_acct(bio);
1209         read_bio = bio_alloc_clone(rdev->bdev, bio, gfp, &mddev->bio_set);
1210
1211         r10_bio->devs[slot].bio = read_bio;
1212         r10_bio->devs[slot].rdev = rdev;
1213
1214         read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
1215                 choose_data_offset(r10_bio, rdev);
1216         read_bio->bi_end_io = raid10_end_read_request;
1217         bio_set_op_attrs(read_bio, op, do_sync);
1218         if (test_bit(FailFast, &rdev->flags) &&
1219             test_bit(R10BIO_FailFast, &r10_bio->state))
1220                 read_bio->bi_opf |= MD_FAILFAST;
1221         read_bio->bi_private = r10_bio;
1222
1223         if (mddev->gendisk)
1224                 trace_block_bio_remap(read_bio, disk_devt(mddev->gendisk),
1225                                       r10_bio->sector);
1226         submit_bio_noacct(read_bio);
1227         return;
1228 }
1229
1230 static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
1231                                   struct bio *bio, bool replacement,
1232                                   int n_copy)
1233 {
1234         const int op = bio_op(bio);
1235         const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1236         const unsigned long do_fua = (bio->bi_opf & REQ_FUA);
1237         unsigned long flags;
1238         struct blk_plug_cb *cb;
1239         struct raid1_plug_cb *plug = NULL;
1240         struct r10conf *conf = mddev->private;
1241         struct md_rdev *rdev;
1242         int devnum = r10_bio->devs[n_copy].devnum;
1243         struct bio *mbio;
1244
1245         if (replacement) {
1246                 rdev = conf->mirrors[devnum].replacement;
1247                 if (rdev == NULL) {
1248                         /* Replacement just got moved to main 'rdev' */
1249                         smp_mb();
1250                         rdev = conf->mirrors[devnum].rdev;
1251                 }
1252         } else
1253                 rdev = conf->mirrors[devnum].rdev;
1254
1255         mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO, &mddev->bio_set);
1256         if (replacement)
1257                 r10_bio->devs[n_copy].repl_bio = mbio;
1258         else
1259                 r10_bio->devs[n_copy].bio = mbio;
1260
1261         mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr +
1262                                    choose_data_offset(r10_bio, rdev));
1263         mbio->bi_end_io = raid10_end_write_request;
1264         bio_set_op_attrs(mbio, op, do_sync | do_fua);
1265         if (!replacement && test_bit(FailFast,
1266                                      &conf->mirrors[devnum].rdev->flags)
1267                          && enough(conf, devnum))
1268                 mbio->bi_opf |= MD_FAILFAST;
1269         mbio->bi_private = r10_bio;
1270
1271         if (conf->mddev->gendisk)
1272                 trace_block_bio_remap(mbio, disk_devt(conf->mddev->gendisk),
1273                                       r10_bio->sector);
1274         /* flush_pending_writes() needs access to the rdev so...*/
1275         mbio->bi_bdev = (void *)rdev;
1276
1277         atomic_inc(&r10_bio->remaining);
1278
1279         cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug));
1280         if (cb)
1281                 plug = container_of(cb, struct raid1_plug_cb, cb);
1282         else
1283                 plug = NULL;
1284         if (plug) {
1285                 bio_list_add(&plug->pending, mbio);
1286         } else {
1287                 spin_lock_irqsave(&conf->device_lock, flags);
1288                 bio_list_add(&conf->pending_bio_list, mbio);
1289                 spin_unlock_irqrestore(&conf->device_lock, flags);
1290                 md_wakeup_thread(mddev->thread);
1291         }
1292 }
1293
1294 static void wait_blocked_dev(struct mddev *mddev, struct r10bio *r10_bio)
1295 {
1296         int i;
1297         struct r10conf *conf = mddev->private;
1298         struct md_rdev *blocked_rdev;
1299
1300 retry_wait:
1301         blocked_rdev = NULL;
1302         rcu_read_lock();
1303         for (i = 0; i < conf->copies; i++) {
1304                 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1305                 struct md_rdev *rrdev = rcu_dereference(
1306                         conf->mirrors[i].replacement);
1307                 if (rdev == rrdev)
1308                         rrdev = NULL;
1309                 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1310                         atomic_inc(&rdev->nr_pending);
1311                         blocked_rdev = rdev;
1312                         break;
1313                 }
1314                 if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) {
1315                         atomic_inc(&rrdev->nr_pending);
1316                         blocked_rdev = rrdev;
1317                         break;
1318                 }
1319
1320                 if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
1321                         sector_t first_bad;
1322                         sector_t dev_sector = r10_bio->devs[i].addr;
1323                         int bad_sectors;
1324                         int is_bad;
1325
1326                         /*
1327                          * Discard request doesn't care the write result
1328                          * so it doesn't need to wait blocked disk here.
1329                          */
1330                         if (!r10_bio->sectors)
1331                                 continue;
1332
1333                         is_bad = is_badblock(rdev, dev_sector, r10_bio->sectors,
1334                                              &first_bad, &bad_sectors);
1335                         if (is_bad < 0) {
1336                                 /*
1337                                  * Mustn't write here until the bad block
1338                                  * is acknowledged
1339                                  */
1340                                 atomic_inc(&rdev->nr_pending);
1341                                 set_bit(BlockedBadBlocks, &rdev->flags);
1342                                 blocked_rdev = rdev;
1343                                 break;
1344                         }
1345                 }
1346         }
1347         rcu_read_unlock();
1348
1349         if (unlikely(blocked_rdev)) {
1350                 /* Have to wait for this device to get unblocked, then retry */
1351                 allow_barrier(conf);
1352                 raid10_log(conf->mddev, "%s wait rdev %d blocked",
1353                                 __func__, blocked_rdev->raid_disk);
1354                 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1355                 wait_barrier(conf, false);
1356                 goto retry_wait;
1357         }
1358 }
1359
1360 static void raid10_write_request(struct mddev *mddev, struct bio *bio,
1361                                  struct r10bio *r10_bio)
1362 {
1363         struct r10conf *conf = mddev->private;
1364         int i;
1365         sector_t sectors;
1366         int max_sectors;
1367
1368         if ((mddev_is_clustered(mddev) &&
1369              md_cluster_ops->area_resyncing(mddev, WRITE,
1370                                             bio->bi_iter.bi_sector,
1371                                             bio_end_sector(bio)))) {
1372                 DEFINE_WAIT(w);
1373                 /* Bail out if REQ_NOWAIT is set for the bio */
1374                 if (bio->bi_opf & REQ_NOWAIT) {
1375                         bio_wouldblock_error(bio);
1376                         return;
1377                 }
1378                 for (;;) {
1379                         prepare_to_wait(&conf->wait_barrier,
1380                                         &w, TASK_IDLE);
1381                         if (!md_cluster_ops->area_resyncing(mddev, WRITE,
1382                                  bio->bi_iter.bi_sector, bio_end_sector(bio)))
1383                                 break;
1384                         schedule();
1385                 }
1386                 finish_wait(&conf->wait_barrier, &w);
1387         }
1388
1389         sectors = r10_bio->sectors;
1390         if (!regular_request_wait(mddev, conf, bio, sectors))
1391                 return;
1392         if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1393             (mddev->reshape_backwards
1394              ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
1395                 bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
1396              : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe &&
1397                 bio->bi_iter.bi_sector < conf->reshape_progress))) {
1398                 /* Need to update reshape_position in metadata */
1399                 mddev->reshape_position = conf->reshape_progress;
1400                 set_mask_bits(&mddev->sb_flags, 0,
1401                               BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1402                 md_wakeup_thread(mddev->thread);
1403                 if (bio->bi_opf & REQ_NOWAIT) {
1404                         allow_barrier(conf);
1405                         bio_wouldblock_error(bio);
1406                         return;
1407                 }
1408                 raid10_log(conf->mddev, "wait reshape metadata");
1409                 wait_event(mddev->sb_wait,
1410                            !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
1411
1412                 conf->reshape_safe = mddev->reshape_position;
1413         }
1414
1415         /* first select target devices under rcu_lock and
1416          * inc refcount on their rdev.  Record them by setting
1417          * bios[x] to bio
1418          * If there are known/acknowledged bad blocks on any device
1419          * on which we have seen a write error, we want to avoid
1420          * writing to those blocks.  This potentially requires several
1421          * writes to write around the bad blocks.  Each set of writes
1422          * gets its own r10_bio with a set of bios attached.
1423          */
1424
1425         r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
1426         raid10_find_phys(conf, r10_bio);
1427
1428         wait_blocked_dev(mddev, r10_bio);
1429
1430         rcu_read_lock();
1431         max_sectors = r10_bio->sectors;
1432
1433         for (i = 0;  i < conf->copies; i++) {
1434                 int d = r10_bio->devs[i].devnum;
1435                 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
1436                 struct md_rdev *rrdev = rcu_dereference(
1437                         conf->mirrors[d].replacement);
1438                 if (rdev == rrdev)
1439                         rrdev = NULL;
1440                 if (rdev && (test_bit(Faulty, &rdev->flags)))
1441                         rdev = NULL;
1442                 if (rrdev && (test_bit(Faulty, &rrdev->flags)))
1443                         rrdev = NULL;
1444
1445                 r10_bio->devs[i].bio = NULL;
1446                 r10_bio->devs[i].repl_bio = NULL;
1447
1448                 if (!rdev && !rrdev) {
1449                         set_bit(R10BIO_Degraded, &r10_bio->state);
1450                         continue;
1451                 }
1452                 if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
1453                         sector_t first_bad;
1454                         sector_t dev_sector = r10_bio->devs[i].addr;
1455                         int bad_sectors;
1456                         int is_bad;
1457
1458                         is_bad = is_badblock(rdev, dev_sector, max_sectors,
1459                                              &first_bad, &bad_sectors);
1460                         if (is_bad && first_bad <= dev_sector) {
1461                                 /* Cannot write here at all */
1462                                 bad_sectors -= (dev_sector - first_bad);
1463                                 if (bad_sectors < max_sectors)
1464                                         /* Mustn't write more than bad_sectors
1465                                          * to other devices yet
1466                                          */
1467                                         max_sectors = bad_sectors;
1468                                 /* We don't set R10BIO_Degraded as that
1469                                  * only applies if the disk is missing,
1470                                  * so it might be re-added, and we want to
1471                                  * know to recover this chunk.
1472                                  * In this case the device is here, and the
1473                                  * fact that this chunk is not in-sync is
1474                                  * recorded in the bad block log.
1475                                  */
1476                                 continue;
1477                         }
1478                         if (is_bad) {
1479                                 int good_sectors = first_bad - dev_sector;
1480                                 if (good_sectors < max_sectors)
1481                                         max_sectors = good_sectors;
1482                         }
1483                 }
1484                 if (rdev) {
1485                         r10_bio->devs[i].bio = bio;
1486                         atomic_inc(&rdev->nr_pending);
1487                 }
1488                 if (rrdev) {
1489                         r10_bio->devs[i].repl_bio = bio;
1490                         atomic_inc(&rrdev->nr_pending);
1491                 }
1492         }
1493         rcu_read_unlock();
1494
1495         if (max_sectors < r10_bio->sectors)
1496                 r10_bio->sectors = max_sectors;
1497
1498         if (r10_bio->sectors < bio_sectors(bio)) {
1499                 struct bio *split = bio_split(bio, r10_bio->sectors,
1500                                               GFP_NOIO, &conf->bio_split);
1501                 bio_chain(split, bio);
1502                 allow_barrier(conf);
1503                 submit_bio_noacct(bio);
1504                 wait_barrier(conf, false);
1505                 bio = split;
1506                 r10_bio->master_bio = bio;
1507         }
1508
1509         if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
1510                 r10_bio->start_time = bio_start_io_acct(bio);
1511         atomic_set(&r10_bio->remaining, 1);
1512         md_bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
1513
1514         for (i = 0; i < conf->copies; i++) {
1515                 if (r10_bio->devs[i].bio)
1516                         raid10_write_one_disk(mddev, r10_bio, bio, false, i);
1517                 if (r10_bio->devs[i].repl_bio)
1518                         raid10_write_one_disk(mddev, r10_bio, bio, true, i);
1519         }
1520         one_write_done(r10_bio);
1521 }
1522
1523 static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
1524 {
1525         struct r10conf *conf = mddev->private;
1526         struct r10bio *r10_bio;
1527
1528         r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
1529
1530         r10_bio->master_bio = bio;
1531         r10_bio->sectors = sectors;
1532
1533         r10_bio->mddev = mddev;
1534         r10_bio->sector = bio->bi_iter.bi_sector;
1535         r10_bio->state = 0;
1536         r10_bio->read_slot = -1;
1537         memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) *
1538                         conf->geo.raid_disks);
1539
1540         if (bio_data_dir(bio) == READ)
1541                 raid10_read_request(mddev, bio, r10_bio);
1542         else
1543                 raid10_write_request(mddev, bio, r10_bio);
1544 }
1545
1546 static void raid_end_discard_bio(struct r10bio *r10bio)
1547 {
1548         struct r10conf *conf = r10bio->mddev->private;
1549         struct r10bio *first_r10bio;
1550
1551         while (atomic_dec_and_test(&r10bio->remaining)) {
1552
1553                 allow_barrier(conf);
1554
1555                 if (!test_bit(R10BIO_Discard, &r10bio->state)) {
1556                         first_r10bio = (struct r10bio *)r10bio->master_bio;
1557                         free_r10bio(r10bio);
1558                         r10bio = first_r10bio;
1559                 } else {
1560                         md_write_end(r10bio->mddev);
1561                         bio_endio(r10bio->master_bio);
1562                         free_r10bio(r10bio);
1563                         break;
1564                 }
1565         }
1566 }
1567
1568 static void raid10_end_discard_request(struct bio *bio)
1569 {
1570         struct r10bio *r10_bio = bio->bi_private;
1571         struct r10conf *conf = r10_bio->mddev->private;
1572         struct md_rdev *rdev = NULL;
1573         int dev;
1574         int slot, repl;
1575
1576         /*
1577          * We don't care the return value of discard bio
1578          */
1579         if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
1580                 set_bit(R10BIO_Uptodate, &r10_bio->state);
1581
1582         dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
1583         if (repl)
1584                 rdev = conf->mirrors[dev].replacement;
1585         if (!rdev) {
1586                 /*
1587                  * raid10_remove_disk uses smp_mb to make sure rdev is set to
1588                  * replacement before setting replacement to NULL. It can read
1589                  * rdev first without barrier protect even replacment is NULL
1590                  */
1591                 smp_rmb();
1592                 rdev = conf->mirrors[dev].rdev;
1593         }
1594
1595         raid_end_discard_bio(r10_bio);
1596         rdev_dec_pending(rdev, conf->mddev);
1597 }
1598
1599 /*
1600  * There are some limitations to handle discard bio
1601  * 1st, the discard size is bigger than stripe_size*2.
1602  * 2st, if the discard bio spans reshape progress, we use the old way to
1603  * handle discard bio
1604  */
1605 static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
1606 {
1607         struct r10conf *conf = mddev->private;
1608         struct geom *geo = &conf->geo;
1609         int far_copies = geo->far_copies;
1610         bool first_copy = true;
1611         struct r10bio *r10_bio, *first_r10bio;
1612         struct bio *split;
1613         int disk;
1614         sector_t chunk;
1615         unsigned int stripe_size;
1616         unsigned int stripe_data_disks;
1617         sector_t split_size;
1618         sector_t bio_start, bio_end;
1619         sector_t first_stripe_index, last_stripe_index;
1620         sector_t start_disk_offset;
1621         unsigned int start_disk_index;
1622         sector_t end_disk_offset;
1623         unsigned int end_disk_index;
1624         unsigned int remainder;
1625
1626         if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
1627                 return -EAGAIN;
1628
1629         if (WARN_ON_ONCE(bio->bi_opf & REQ_NOWAIT)) {
1630                 bio_wouldblock_error(bio);
1631                 return 0;
1632         }
1633         wait_barrier(conf, false);
1634
1635         /*
1636          * Check reshape again to avoid reshape happens after checking
1637          * MD_RECOVERY_RESHAPE and before wait_barrier
1638          */
1639         if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
1640                 goto out;
1641
1642         if (geo->near_copies)
1643                 stripe_data_disks = geo->raid_disks / geo->near_copies +
1644                                         geo->raid_disks % geo->near_copies;
1645         else
1646                 stripe_data_disks = geo->raid_disks;
1647
1648         stripe_size = stripe_data_disks << geo->chunk_shift;
1649
1650         bio_start = bio->bi_iter.bi_sector;
1651         bio_end = bio_end_sector(bio);
1652
1653         /*
1654          * Maybe one discard bio is smaller than strip size or across one
1655          * stripe and discard region is larger than one stripe size. For far
1656          * offset layout, if the discard region is not aligned with stripe
1657          * size, there is hole when we submit discard bio to member disk.
1658          * For simplicity, we only handle discard bio which discard region
1659          * is bigger than stripe_size * 2
1660          */
1661         if (bio_sectors(bio) < stripe_size*2)
1662                 goto out;
1663
1664         /*
1665          * Keep bio aligned with strip size.
1666          */
1667         div_u64_rem(bio_start, stripe_size, &remainder);
1668         if (remainder) {
1669                 split_size = stripe_size - remainder;
1670                 split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split);
1671                 bio_chain(split, bio);
1672                 allow_barrier(conf);
1673                 /* Resend the fist split part */
1674                 submit_bio_noacct(split);
1675                 wait_barrier(conf, false);
1676         }
1677         div_u64_rem(bio_end, stripe_size, &remainder);
1678         if (remainder) {
1679                 split_size = bio_sectors(bio) - remainder;
1680                 split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split);
1681                 bio_chain(split, bio);
1682                 allow_barrier(conf);
1683                 /* Resend the second split part */
1684                 submit_bio_noacct(bio);
1685                 bio = split;
1686                 wait_barrier(conf, false);
1687         }
1688
1689         bio_start = bio->bi_iter.bi_sector;
1690         bio_end = bio_end_sector(bio);
1691
1692         /*
1693          * Raid10 uses chunk as the unit to store data. It's similar like raid0.
1694          * One stripe contains the chunks from all member disk (one chunk from
1695          * one disk at the same HBA address). For layout detail, see 'man md 4'
1696          */
1697         chunk = bio_start >> geo->chunk_shift;
1698         chunk *= geo->near_copies;
1699         first_stripe_index = chunk;
1700         start_disk_index = sector_div(first_stripe_index, geo->raid_disks);
1701         if (geo->far_offset)
1702                 first_stripe_index *= geo->far_copies;
1703         start_disk_offset = (bio_start & geo->chunk_mask) +
1704                                 (first_stripe_index << geo->chunk_shift);
1705
1706         chunk = bio_end >> geo->chunk_shift;
1707         chunk *= geo->near_copies;
1708         last_stripe_index = chunk;
1709         end_disk_index = sector_div(last_stripe_index, geo->raid_disks);
1710         if (geo->far_offset)
1711                 last_stripe_index *= geo->far_copies;
1712         end_disk_offset = (bio_end & geo->chunk_mask) +
1713                                 (last_stripe_index << geo->chunk_shift);
1714
1715 retry_discard:
1716         r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
1717         r10_bio->mddev = mddev;
1718         r10_bio->state = 0;
1719         r10_bio->sectors = 0;
1720         memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * geo->raid_disks);
1721         wait_blocked_dev(mddev, r10_bio);
1722
1723         /*
1724          * For far layout it needs more than one r10bio to cover all regions.
1725          * Inspired by raid10_sync_request, we can use the first r10bio->master_bio
1726          * to record the discard bio. Other r10bio->master_bio record the first
1727          * r10bio. The first r10bio only release after all other r10bios finish.
1728          * The discard bio returns only first r10bio finishes
1729          */
1730         if (first_copy) {
1731                 r10_bio->master_bio = bio;
1732                 set_bit(R10BIO_Discard, &r10_bio->state);
1733                 first_copy = false;
1734                 first_r10bio = r10_bio;
1735         } else
1736                 r10_bio->master_bio = (struct bio *)first_r10bio;
1737
1738         /*
1739          * first select target devices under rcu_lock and
1740          * inc refcount on their rdev.  Record them by setting
1741          * bios[x] to bio
1742          */
1743         rcu_read_lock();
1744         for (disk = 0; disk < geo->raid_disks; disk++) {
1745                 struct md_rdev *rdev = rcu_dereference(conf->mirrors[disk].rdev);
1746                 struct md_rdev *rrdev = rcu_dereference(
1747                         conf->mirrors[disk].replacement);
1748
1749                 r10_bio->devs[disk].bio = NULL;
1750                 r10_bio->devs[disk].repl_bio = NULL;
1751
1752                 if (rdev && (test_bit(Faulty, &rdev->flags)))
1753                         rdev = NULL;
1754                 if (rrdev && (test_bit(Faulty, &rrdev->flags)))
1755                         rrdev = NULL;
1756                 if (!rdev && !rrdev)
1757                         continue;
1758
1759                 if (rdev) {
1760                         r10_bio->devs[disk].bio = bio;
1761                         atomic_inc(&rdev->nr_pending);
1762                 }
1763                 if (rrdev) {
1764                         r10_bio->devs[disk].repl_bio = bio;
1765                         atomic_inc(&rrdev->nr_pending);
1766                 }
1767         }
1768         rcu_read_unlock();
1769
1770         atomic_set(&r10_bio->remaining, 1);
1771         for (disk = 0; disk < geo->raid_disks; disk++) {
1772                 sector_t dev_start, dev_end;
1773                 struct bio *mbio, *rbio = NULL;
1774
1775                 /*
1776                  * Now start to calculate the start and end address for each disk.
1777                  * The space between dev_start and dev_end is the discard region.
1778                  *
1779                  * For dev_start, it needs to consider three conditions:
1780                  * 1st, the disk is before start_disk, you can imagine the disk in
1781                  * the next stripe. So the dev_start is the start address of next
1782                  * stripe.
1783                  * 2st, the disk is after start_disk, it means the disk is at the
1784                  * same stripe of first disk
1785                  * 3st, the first disk itself, we can use start_disk_offset directly
1786                  */
1787                 if (disk < start_disk_index)
1788                         dev_start = (first_stripe_index + 1) * mddev->chunk_sectors;
1789                 else if (disk > start_disk_index)
1790                         dev_start = first_stripe_index * mddev->chunk_sectors;
1791                 else
1792                         dev_start = start_disk_offset;
1793
1794                 if (disk < end_disk_index)
1795                         dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
1796                 else if (disk > end_disk_index)
1797                         dev_end = last_stripe_index * mddev->chunk_sectors;
1798                 else
1799                         dev_end = end_disk_offset;
1800
1801                 /*
1802                  * It only handles discard bio which size is >= stripe size, so
1803                  * dev_end > dev_start all the time.
1804                  * It doesn't need to use rcu lock to get rdev here. We already
1805                  * add rdev->nr_pending in the first loop.
1806                  */
1807                 if (r10_bio->devs[disk].bio) {
1808                         struct md_rdev *rdev = conf->mirrors[disk].rdev;
1809                         mbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
1810                                                &mddev->bio_set);
1811                         mbio->bi_end_io = raid10_end_discard_request;
1812                         mbio->bi_private = r10_bio;
1813                         r10_bio->devs[disk].bio = mbio;
1814                         r10_bio->devs[disk].devnum = disk;
1815                         atomic_inc(&r10_bio->remaining);
1816                         md_submit_discard_bio(mddev, rdev, mbio,
1817                                         dev_start + choose_data_offset(r10_bio, rdev),
1818                                         dev_end - dev_start);
1819                         bio_endio(mbio);
1820                 }
1821                 if (r10_bio->devs[disk].repl_bio) {
1822                         struct md_rdev *rrdev = conf->mirrors[disk].replacement;
1823                         rbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
1824                                                &mddev->bio_set);
1825                         rbio->bi_end_io = raid10_end_discard_request;
1826                         rbio->bi_private = r10_bio;
1827                         r10_bio->devs[disk].repl_bio = rbio;
1828                         r10_bio->devs[disk].devnum = disk;
1829                         atomic_inc(&r10_bio->remaining);
1830                         md_submit_discard_bio(mddev, rrdev, rbio,
1831                                         dev_start + choose_data_offset(r10_bio, rrdev),
1832                                         dev_end - dev_start);
1833                         bio_endio(rbio);
1834                 }
1835         }
1836
1837         if (!geo->far_offset && --far_copies) {
1838                 first_stripe_index += geo->stride >> geo->chunk_shift;
1839                 start_disk_offset += geo->stride;
1840                 last_stripe_index += geo->stride >> geo->chunk_shift;
1841                 end_disk_offset += geo->stride;
1842                 atomic_inc(&first_r10bio->remaining);
1843                 raid_end_discard_bio(r10_bio);
1844                 wait_barrier(conf, false);
1845                 goto retry_discard;
1846         }
1847
1848         raid_end_discard_bio(r10_bio);
1849
1850         return 0;
1851 out:
1852         allow_barrier(conf);
1853         return -EAGAIN;
1854 }
1855
1856 static bool raid10_make_request(struct mddev *mddev, struct bio *bio)
1857 {
1858         struct r10conf *conf = mddev->private;
1859         sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
1860         int chunk_sects = chunk_mask + 1;
1861         int sectors = bio_sectors(bio);
1862
1863         if (unlikely(bio->bi_opf & REQ_PREFLUSH)
1864             && md_flush_request(mddev, bio))
1865                 return true;
1866
1867         if (!md_write_start(mddev, bio))
1868                 return false;
1869
1870         if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
1871                 if (!raid10_handle_discard(mddev, bio))
1872                         return true;
1873
1874         /*
1875          * If this request crosses a chunk boundary, we need to split
1876          * it.
1877          */
1878         if (unlikely((bio->bi_iter.bi_sector & chunk_mask) +
1879                      sectors > chunk_sects
1880                      && (conf->geo.near_copies < conf->geo.raid_disks
1881                          || conf->prev.near_copies <
1882                          conf->prev.raid_disks)))
1883                 sectors = chunk_sects -
1884                         (bio->bi_iter.bi_sector &
1885                          (chunk_sects - 1));
1886         __make_request(mddev, bio, sectors);
1887
1888         /* In case raid10d snuck in to freeze_array */
1889         wake_up(&conf->wait_barrier);
1890         return true;
1891 }
1892
1893 static void raid10_status(struct seq_file *seq, struct mddev *mddev)
1894 {
1895         struct r10conf *conf = mddev->private;
1896         int i;
1897
1898         if (conf->geo.near_copies < conf->geo.raid_disks)
1899                 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
1900         if (conf->geo.near_copies > 1)
1901                 seq_printf(seq, " %d near-copies", conf->geo.near_copies);
1902         if (conf->geo.far_copies > 1) {
1903                 if (conf->geo.far_offset)
1904                         seq_printf(seq, " %d offset-copies", conf->geo.far_copies);
1905                 else
1906                         seq_printf(seq, " %d far-copies", conf->geo.far_copies);
1907                 if (conf->geo.far_set_size != conf->geo.raid_disks)
1908                         seq_printf(seq, " %d devices per set", conf->geo.far_set_size);
1909         }
1910         seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
1911                                         conf->geo.raid_disks - mddev->degraded);
1912         rcu_read_lock();
1913         for (i = 0; i < conf->geo.raid_disks; i++) {
1914                 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1915                 seq_printf(seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1916         }
1917         rcu_read_unlock();
1918         seq_printf(seq, "]");
1919 }
1920
1921 /* check if there are enough drives for
1922  * every block to appear on atleast one.
1923  * Don't consider the device numbered 'ignore'
1924  * as we might be about to remove it.
1925  */
1926 static int _enough(struct r10conf *conf, int previous, int ignore)
1927 {
1928         int first = 0;
1929         int has_enough = 0;
1930         int disks, ncopies;
1931         if (previous) {
1932                 disks = conf->prev.raid_disks;
1933                 ncopies = conf->prev.near_copies;
1934         } else {
1935                 disks = conf->geo.raid_disks;
1936                 ncopies = conf->geo.near_copies;
1937         }
1938
1939         rcu_read_lock();
1940         do {
1941                 int n = conf->copies;
1942                 int cnt = 0;
1943                 int this = first;
1944                 while (n--) {
1945                         struct md_rdev *rdev;
1946                         if (this != ignore &&
1947                             (rdev = rcu_dereference(conf->mirrors[this].rdev)) &&
1948                             test_bit(In_sync, &rdev->flags))
1949                                 cnt++;
1950                         this = (this+1) % disks;
1951                 }
1952                 if (cnt == 0)
1953                         goto out;
1954                 first = (first + ncopies) % disks;
1955         } while (first != 0);
1956         has_enough = 1;
1957 out:
1958         rcu_read_unlock();
1959         return has_enough;
1960 }
1961
1962 static int enough(struct r10conf *conf, int ignore)
1963 {
1964         /* when calling 'enough', both 'prev' and 'geo' must
1965          * be stable.
1966          * This is ensured if ->reconfig_mutex or ->device_lock
1967          * is held.
1968          */
1969         return _enough(conf, 0, ignore) &&
1970                 _enough(conf, 1, ignore);
1971 }
1972
1973 /**
1974  * raid10_error() - RAID10 error handler.
1975  * @mddev: affected md device.
1976  * @rdev: member device to fail.
1977  *
1978  * The routine acknowledges &rdev failure and determines new @mddev state.
1979  * If it failed, then:
1980  *      - &MD_BROKEN flag is set in &mddev->flags.
1981  * Otherwise, it must be degraded:
1982  *      - recovery is interrupted.
1983  *      - &mddev->degraded is bumped.
1984
1985  * @rdev is marked as &Faulty excluding case when array is failed and
1986  * &mddev->fail_last_dev is off.
1987  */
1988 static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
1989 {
1990         char b[BDEVNAME_SIZE];
1991         struct r10conf *conf = mddev->private;
1992         unsigned long flags;
1993
1994         spin_lock_irqsave(&conf->device_lock, flags);
1995
1996         if (test_bit(In_sync, &rdev->flags) && !enough(conf, rdev->raid_disk)) {
1997                 set_bit(MD_BROKEN, &mddev->flags);
1998
1999                 if (!mddev->fail_last_dev) {
2000                         spin_unlock_irqrestore(&conf->device_lock, flags);
2001                         return;
2002                 }
2003         }
2004         if (test_and_clear_bit(In_sync, &rdev->flags))
2005                 mddev->degraded++;
2006
2007         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2008         set_bit(Blocked, &rdev->flags);
2009         set_bit(Faulty, &rdev->flags);
2010         set_mask_bits(&mddev->sb_flags, 0,
2011                       BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
2012         spin_unlock_irqrestore(&conf->device_lock, flags);
2013         pr_crit("md/raid10:%s: Disk failure on %s, disabling device.\n"
2014                 "md/raid10:%s: Operation continuing on %d devices.\n",
2015                 mdname(mddev), bdevname(rdev->bdev, b),
2016                 mdname(mddev), conf->geo.raid_disks - mddev->degraded);
2017 }
2018
2019 static void print_conf(struct r10conf *conf)
2020 {
2021         int i;
2022         struct md_rdev *rdev;
2023
2024         pr_debug("RAID10 conf printout:\n");
2025         if (!conf) {
2026                 pr_debug("(!conf)\n");
2027                 return;
2028         }
2029         pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
2030                  conf->geo.raid_disks);
2031
2032         /* This is only called with ->reconfix_mutex held, so
2033          * rcu protection of rdev is not needed */
2034         for (i = 0; i < conf->geo.raid_disks; i++) {
2035                 char b[BDEVNAME_SIZE];
2036                 rdev = conf->mirrors[i].rdev;
2037                 if (rdev)
2038                         pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
2039                                  i, !test_bit(In_sync, &rdev->flags),
2040                                  !test_bit(Faulty, &rdev->flags),
2041                                  bdevname(rdev->bdev,b));
2042         }
2043 }
2044
2045 static void close_sync(struct r10conf *conf)
2046 {
2047         wait_barrier(conf, false);
2048         allow_barrier(conf);
2049
2050         mempool_exit(&conf->r10buf_pool);
2051 }
2052
2053 static int raid10_spare_active(struct mddev *mddev)
2054 {
2055         int i;
2056         struct r10conf *conf = mddev->private;
2057         struct raid10_info *tmp;
2058         int count = 0;
2059         unsigned long flags;
2060
2061         /*
2062          * Find all non-in_sync disks within the RAID10 configuration
2063          * and mark them in_sync
2064          */
2065         for (i = 0; i < conf->geo.raid_disks; i++) {
2066                 tmp = conf->mirrors + i;
2067                 if (tmp->replacement
2068                     && tmp->replacement->recovery_offset == MaxSector
2069                     && !test_bit(Faulty, &tmp->replacement->flags)
2070                     && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
2071                         /* Replacement has just become active */
2072                         if (!tmp->rdev
2073                             || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
2074                                 count++;
2075                         if (tmp->rdev) {
2076                                 /* Replaced device not technically faulty,
2077                                  * but we need to be sure it gets removed
2078                                  * and never re-added.
2079                                  */
2080                                 set_bit(Faulty, &tmp->rdev->flags);
2081                                 sysfs_notify_dirent_safe(
2082                                         tmp->rdev->sysfs_state);
2083                         }
2084                         sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
2085                 } else if (tmp->rdev
2086                            && tmp->rdev->recovery_offset == MaxSector
2087                            && !test_bit(Faulty, &tmp->rdev->flags)
2088                            && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
2089                         count++;
2090                         sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
2091                 }
2092         }
2093         spin_lock_irqsave(&conf->device_lock, flags);
2094         mddev->degraded -= count;
2095         spin_unlock_irqrestore(&conf->device_lock, flags);
2096
2097         print_conf(conf);
2098         return count;
2099 }
2100
2101 static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
2102 {
2103         struct r10conf *conf = mddev->private;
2104         int err = -EEXIST;
2105         int mirror;
2106         int first = 0;
2107         int last = conf->geo.raid_disks - 1;
2108
2109         if (mddev->recovery_cp < MaxSector)
2110                 /* only hot-add to in-sync arrays, as recovery is
2111                  * very different from resync
2112                  */
2113                 return -EBUSY;
2114         if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1))
2115                 return -EINVAL;
2116
2117         if (md_integrity_add_rdev(rdev, mddev))
2118                 return -ENXIO;
2119
2120         if (rdev->raid_disk >= 0)
2121                 first = last = rdev->raid_disk;
2122
2123         if (rdev->saved_raid_disk >= first &&
2124             rdev->saved_raid_disk < conf->geo.raid_disks &&
2125             conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
2126                 mirror = rdev->saved_raid_disk;
2127         else
2128                 mirror = first;
2129         for ( ; mirror <= last ; mirror++) {
2130                 struct raid10_info *p = &conf->mirrors[mirror];
2131                 if (p->recovery_disabled == mddev->recovery_disabled)
2132                         continue;
2133                 if (p->rdev) {
2134                         if (!test_bit(WantReplacement, &p->rdev->flags) ||
2135                             p->replacement != NULL)
2136                                 continue;
2137                         clear_bit(In_sync, &rdev->flags);
2138                         set_bit(Replacement, &rdev->flags);
2139                         rdev->raid_disk = mirror;
2140                         err = 0;
2141                         if (mddev->gendisk)
2142                                 disk_stack_limits(mddev->gendisk, rdev->bdev,
2143                                                   rdev->data_offset << 9);
2144                         conf->fullsync = 1;
2145                         rcu_assign_pointer(p->replacement, rdev);
2146                         break;
2147                 }
2148
2149                 if (mddev->gendisk)
2150                         disk_stack_limits(mddev->gendisk, rdev->bdev,
2151                                           rdev->data_offset << 9);
2152
2153                 p->head_position = 0;
2154                 p->recovery_disabled = mddev->recovery_disabled - 1;
2155                 rdev->raid_disk = mirror;
2156                 err = 0;
2157                 if (rdev->saved_raid_disk != mirror)
2158                         conf->fullsync = 1;
2159                 rcu_assign_pointer(p->rdev, rdev);
2160                 break;
2161         }
2162
2163         print_conf(conf);
2164         return err;
2165 }
2166
2167 static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
2168 {
2169         struct r10conf *conf = mddev->private;
2170         int err = 0;
2171         int number = rdev->raid_disk;
2172         struct md_rdev **rdevp;
2173         struct raid10_info *p = conf->mirrors + number;
2174
2175         print_conf(conf);
2176         if (rdev == p->rdev)
2177                 rdevp = &p->rdev;
2178         else if (rdev == p->replacement)
2179                 rdevp = &p->replacement;
2180         else
2181                 return 0;
2182
2183         if (test_bit(In_sync, &rdev->flags) ||
2184             atomic_read(&rdev->nr_pending)) {
2185                 err = -EBUSY;
2186                 goto abort;
2187         }
2188         /* Only remove non-faulty devices if recovery
2189          * is not possible.
2190          */
2191         if (!test_bit(Faulty, &rdev->flags) &&
2192             mddev->recovery_disabled != p->recovery_disabled &&
2193             (!p->replacement || p->replacement == rdev) &&
2194             number < conf->geo.raid_disks &&
2195             enough(conf, -1)) {
2196                 err = -EBUSY;
2197                 goto abort;
2198         }
2199         *rdevp = NULL;
2200         if (!test_bit(RemoveSynchronized, &rdev->flags)) {
2201                 synchronize_rcu();
2202                 if (atomic_read(&rdev->nr_pending)) {
2203                         /* lost the race, try later */
2204                         err = -EBUSY;
2205                         *rdevp = rdev;
2206                         goto abort;
2207                 }
2208         }
2209         if (p->replacement) {
2210                 /* We must have just cleared 'rdev' */
2211                 p->rdev = p->replacement;
2212                 clear_bit(Replacement, &p->replacement->flags);
2213                 smp_mb(); /* Make sure other CPUs may see both as identical
2214                            * but will never see neither -- if they are careful.
2215                            */
2216                 p->replacement = NULL;
2217         }
2218
2219         clear_bit(WantReplacement, &rdev->flags);
2220         err = md_integrity_register(mddev);
2221
2222 abort:
2223
2224         print_conf(conf);
2225         return err;
2226 }
2227
2228 static void __end_sync_read(struct r10bio *r10_bio, struct bio *bio, int d)
2229 {
2230         struct r10conf *conf = r10_bio->mddev->private;
2231
2232         if (!bio->bi_status)
2233                 set_bit(R10BIO_Uptodate, &r10_bio->state);
2234         else
2235                 /* The write handler will notice the lack of
2236                  * R10BIO_Uptodate and record any errors etc
2237                  */
2238                 atomic_add(r10_bio->sectors,
2239                            &conf->mirrors[d].rdev->corrected_errors);
2240
2241         /* for reconstruct, we always reschedule after a read.
2242          * for resync, only after all reads
2243          */
2244         rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
2245         if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
2246             atomic_dec_and_test(&r10_bio->remaining)) {
2247                 /* we have read all the blocks,
2248                  * do the comparison in process context in raid10d
2249                  */
2250                 reschedule_retry(r10_bio);
2251         }
2252 }
2253
2254 static void end_sync_read(struct bio *bio)
2255 {
2256         struct r10bio *r10_bio = get_resync_r10bio(bio);
2257         struct r10conf *conf = r10_bio->mddev->private;
2258         int d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
2259
2260         __end_sync_read(r10_bio, bio, d);
2261 }
2262
2263 static void end_reshape_read(struct bio *bio)
2264 {
2265         /* reshape read bio isn't allocated from r10buf_pool */
2266         struct r10bio *r10_bio = bio->bi_private;
2267
2268         __end_sync_read(r10_bio, bio, r10_bio->read_slot);
2269 }
2270
2271 static void end_sync_request(struct r10bio *r10_bio)
2272 {
2273         struct mddev *mddev = r10_bio->mddev;
2274
2275         while (atomic_dec_and_test(&r10_bio->remaining)) {
2276                 if (r10_bio->master_bio == NULL) {
2277                         /* the primary of several recovery bios */
2278                         sector_t s = r10_bio->sectors;
2279                         if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2280                             test_bit(R10BIO_WriteError, &r10_bio->state))
2281                                 reschedule_retry(r10_bio);
2282                         else
2283                                 put_buf(r10_bio);
2284                         md_done_sync(mddev, s, 1);
2285                         break;
2286                 } else {
2287                         struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio;
2288                         if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2289                             test_bit(R10BIO_WriteError, &r10_bio->state))
2290                                 reschedule_retry(r10_bio);
2291                         else
2292                                 put_buf(r10_bio);
2293                         r10_bio = r10_bio2;
2294                 }
2295         }
2296 }
2297
2298 static void end_sync_write(struct bio *bio)
2299 {
2300         struct r10bio *r10_bio = get_resync_r10bio(bio);
2301         struct mddev *mddev = r10_bio->mddev;
2302         struct r10conf *conf = mddev->private;
2303         int d;
2304         sector_t first_bad;
2305         int bad_sectors;
2306         int slot;
2307         int repl;
2308         struct md_rdev *rdev = NULL;
2309
2310         d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
2311         if (repl)
2312                 rdev = conf->mirrors[d].replacement;
2313         else
2314                 rdev = conf->mirrors[d].rdev;
2315
2316         if (bio->bi_status) {
2317                 if (repl)
2318                         md_error(mddev, rdev);
2319                 else {
2320                         set_bit(WriteErrorSeen, &rdev->flags);
2321                         if (!test_and_set_bit(WantReplacement, &rdev->flags))
2322                                 set_bit(MD_RECOVERY_NEEDED,
2323                                         &rdev->mddev->recovery);
2324                         set_bit(R10BIO_WriteError, &r10_bio->state);
2325                 }
2326         } else if (is_badblock(rdev,
2327                              r10_bio->devs[slot].addr,
2328                              r10_bio->sectors,
2329                              &first_bad, &bad_sectors))
2330                 set_bit(R10BIO_MadeGood, &r10_bio->state);
2331
2332         rdev_dec_pending(rdev, mddev);
2333
2334         end_sync_request(r10_bio);
2335 }
2336
2337 /*
2338  * Note: sync and recover and handled very differently for raid10
2339  * This code is for resync.
2340  * For resync, we read through virtual addresses and read all blocks.
2341  * If there is any error, we schedule a write.  The lowest numbered
2342  * drive is authoritative.
2343  * However requests come for physical address, so we need to map.
2344  * For every physical address there are raid_disks/copies virtual addresses,
2345  * which is always are least one, but is not necessarly an integer.
2346  * This means that a physical address can span multiple chunks, so we may
2347  * have to submit multiple io requests for a single sync request.
2348  */
2349 /*
2350  * We check if all blocks are in-sync and only write to blocks that
2351  * aren't in sync
2352  */
2353 static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2354 {
2355         struct r10conf *conf = mddev->private;
2356         int i, first;
2357         struct bio *tbio, *fbio;
2358         int vcnt;
2359         struct page **tpages, **fpages;
2360
2361         atomic_set(&r10_bio->remaining, 1);
2362
2363         /* find the first device with a block */
2364         for (i=0; i<conf->copies; i++)
2365                 if (!r10_bio->devs[i].bio->bi_status)
2366                         break;
2367
2368         if (i == conf->copies)
2369                 goto done;
2370
2371         first = i;
2372         fbio = r10_bio->devs[i].bio;
2373         fbio->bi_iter.bi_size = r10_bio->sectors << 9;
2374         fbio->bi_iter.bi_idx = 0;
2375         fpages = get_resync_pages(fbio)->pages;
2376
2377         vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
2378         /* now find blocks with errors */
2379         for (i=0 ; i < conf->copies ; i++) {
2380                 int  j, d;
2381                 struct md_rdev *rdev;
2382                 struct resync_pages *rp;
2383
2384                 tbio = r10_bio->devs[i].bio;
2385
2386                 if (tbio->bi_end_io != end_sync_read)
2387                         continue;
2388                 if (i == first)
2389                         continue;
2390
2391                 tpages = get_resync_pages(tbio)->pages;
2392                 d = r10_bio->devs[i].devnum;
2393                 rdev = conf->mirrors[d].rdev;
2394                 if (!r10_bio->devs[i].bio->bi_status) {
2395                         /* We know that the bi_io_vec layout is the same for
2396                          * both 'first' and 'i', so we just compare them.
2397                          * All vec entries are PAGE_SIZE;
2398                          */
2399                         int sectors = r10_bio->sectors;
2400                         for (j = 0; j < vcnt; j++) {
2401                                 int len = PAGE_SIZE;
2402                                 if (sectors < (len / 512))
2403                                         len = sectors * 512;
2404                                 if (memcmp(page_address(fpages[j]),
2405                                            page_address(tpages[j]),
2406                                            len))
2407                                         break;
2408                                 sectors -= len/512;
2409                         }
2410                         if (j == vcnt)
2411                                 continue;
2412                         atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
2413                         if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2414                                 /* Don't fix anything. */
2415                                 continue;
2416                 } else if (test_bit(FailFast, &rdev->flags)) {
2417                         /* Just give up on this device */
2418                         md_error(rdev->mddev, rdev);
2419                         continue;
2420                 }
2421                 /* Ok, we need to write this bio, either to correct an
2422                  * inconsistency or to correct an unreadable block.
2423                  * First we need to fixup bv_offset, bv_len and
2424                  * bi_vecs, as the read request might have corrupted these
2425                  */
2426                 rp = get_resync_pages(tbio);
2427                 bio_reset(tbio, conf->mirrors[d].rdev->bdev, REQ_OP_WRITE);
2428
2429                 md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size);
2430
2431                 rp->raid_bio = r10_bio;
2432                 tbio->bi_private = rp;
2433                 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
2434                 tbio->bi_end_io = end_sync_write;
2435
2436                 bio_copy_data(tbio, fbio);
2437
2438                 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2439                 atomic_inc(&r10_bio->remaining);
2440                 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
2441
2442                 if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
2443                         tbio->bi_opf |= MD_FAILFAST;
2444                 tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
2445                 submit_bio_noacct(tbio);
2446         }
2447
2448         /* Now write out to any replacement devices
2449          * that are active
2450          */
2451         for (i = 0; i < conf->copies; i++) {
2452                 int d;
2453
2454                 tbio = r10_bio->devs[i].repl_bio;
2455                 if (!tbio || !tbio->bi_end_io)
2456                         continue;
2457                 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
2458                     && r10_bio->devs[i].bio != fbio)
2459                         bio_copy_data(tbio, fbio);
2460                 d = r10_bio->devs[i].devnum;
2461                 atomic_inc(&r10_bio->remaining);
2462                 md_sync_acct(conf->mirrors[d].replacement->bdev,
2463                              bio_sectors(tbio));
2464                 submit_bio_noacct(tbio);
2465         }
2466
2467 done:
2468         if (atomic_dec_and_test(&r10_bio->remaining)) {
2469                 md_done_sync(mddev, r10_bio->sectors, 1);
2470                 put_buf(r10_bio);
2471         }
2472 }
2473
2474 /*
2475  * Now for the recovery code.
2476  * Recovery happens across physical sectors.
2477  * We recover all non-is_sync drives by finding the virtual address of
2478  * each, and then choose a working drive that also has that virt address.
2479  * There is a separate r10_bio for each non-in_sync drive.
2480  * Only the first two slots are in use. The first for reading,
2481  * The second for writing.
2482  *
2483  */
2484 static void fix_recovery_read_error(struct r10bio *r10_bio)
2485 {
2486         /* We got a read error during recovery.
2487          * We repeat the read in smaller page-sized sections.
2488          * If a read succeeds, write it to the new device or record
2489          * a bad block if we cannot.
2490          * If a read fails, record a bad block on both old and
2491          * new devices.
2492          */
2493         struct mddev *mddev = r10_bio->mddev;
2494         struct r10conf *conf = mddev->private;
2495         struct bio *bio = r10_bio->devs[0].bio;
2496         sector_t sect = 0;
2497         int sectors = r10_bio->sectors;
2498         int idx = 0;
2499         int dr = r10_bio->devs[0].devnum;
2500         int dw = r10_bio->devs[1].devnum;
2501         struct page **pages = get_resync_pages(bio)->pages;
2502
2503         while (sectors) {
2504                 int s = sectors;
2505                 struct md_rdev *rdev;
2506                 sector_t addr;
2507                 int ok;
2508
2509                 if (s > (PAGE_SIZE>>9))
2510                         s = PAGE_SIZE >> 9;
2511
2512                 rdev = conf->mirrors[dr].rdev;
2513                 addr = r10_bio->devs[0].addr + sect,
2514                 ok = sync_page_io(rdev,
2515                                   addr,
2516                                   s << 9,
2517                                   pages[idx],
2518                                   REQ_OP_READ, 0, false);
2519                 if (ok) {
2520                         rdev = conf->mirrors[dw].rdev;
2521                         addr = r10_bio->devs[1].addr + sect;
2522                         ok = sync_page_io(rdev,
2523                                           addr,
2524                                           s << 9,
2525                                           pages[idx],
2526                                           REQ_OP_WRITE, 0, false);
2527                         if (!ok) {
2528                                 set_bit(WriteErrorSeen, &rdev->flags);
2529                                 if (!test_and_set_bit(WantReplacement,
2530                                                       &rdev->flags))
2531                                         set_bit(MD_RECOVERY_NEEDED,
2532                                                 &rdev->mddev->recovery);
2533                         }
2534                 }
2535                 if (!ok) {
2536                         /* We don't worry if we cannot set a bad block -
2537                          * it really is bad so there is no loss in not
2538                          * recording it yet
2539                          */
2540                         rdev_set_badblocks(rdev, addr, s, 0);
2541
2542                         if (rdev != conf->mirrors[dw].rdev) {
2543                                 /* need bad block on destination too */
2544                                 struct md_rdev *rdev2 = conf->mirrors[dw].rdev;
2545                                 addr = r10_bio->devs[1].addr + sect;
2546                                 ok = rdev_set_badblocks(rdev2, addr, s, 0);
2547                                 if (!ok) {
2548                                         /* just abort the recovery */
2549                                         pr_notice("md/raid10:%s: recovery aborted due to read error\n",
2550                                                   mdname(mddev));
2551
2552                                         conf->mirrors[dw].recovery_disabled
2553                                                 = mddev->recovery_disabled;
2554                                         set_bit(MD_RECOVERY_INTR,
2555                                                 &mddev->recovery);
2556                                         break;
2557                                 }
2558                         }
2559                 }
2560
2561                 sectors -= s;
2562                 sect += s;
2563                 idx++;
2564         }
2565 }
2566
2567 static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2568 {
2569         struct r10conf *conf = mddev->private;
2570         int d;
2571         struct bio *wbio, *wbio2;
2572
2573         if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
2574                 fix_recovery_read_error(r10_bio);
2575                 end_sync_request(r10_bio);
2576                 return;
2577         }
2578
2579         /*
2580          * share the pages with the first bio
2581          * and submit the write request
2582          */
2583         d = r10_bio->devs[1].devnum;
2584         wbio = r10_bio->devs[1].bio;
2585         wbio2 = r10_bio->devs[1].repl_bio;
2586         /* Need to test wbio2->bi_end_io before we call
2587          * submit_bio_noacct as if the former is NULL,
2588          * the latter is free to free wbio2.
2589          */
2590         if (wbio2 && !wbio2->bi_end_io)
2591                 wbio2 = NULL;
2592         if (wbio->bi_end_io) {
2593                 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2594                 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
2595                 submit_bio_noacct(wbio);
2596         }
2597         if (wbio2) {
2598                 atomic_inc(&conf->mirrors[d].replacement->nr_pending);
2599                 md_sync_acct(conf->mirrors[d].replacement->bdev,
2600                              bio_sectors(wbio2));
2601                 submit_bio_noacct(wbio2);
2602         }
2603 }
2604
2605 /*
2606  * Used by fix_read_error() to decay the per rdev read_errors.
2607  * We halve the read error count for every hour that has elapsed
2608  * since the last recorded read error.
2609  *
2610  */
2611 static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
2612 {
2613         long cur_time_mon;
2614         unsigned long hours_since_last;
2615         unsigned int read_errors = atomic_read(&rdev->read_errors);
2616
2617         cur_time_mon = ktime_get_seconds();
2618
2619         if (rdev->last_read_error == 0) {
2620                 /* first time we've seen a read error */
2621                 rdev->last_read_error = cur_time_mon;
2622                 return;
2623         }
2624
2625         hours_since_last = (long)(cur_time_mon -
2626                             rdev->last_read_error) / 3600;
2627
2628         rdev->last_read_error = cur_time_mon;
2629
2630         /*
2631          * if hours_since_last is > the number of bits in read_errors
2632          * just set read errors to 0. We do this to avoid
2633          * overflowing the shift of read_errors by hours_since_last.
2634          */
2635         if (hours_since_last >= 8 * sizeof(read_errors))
2636                 atomic_set(&rdev->read_errors, 0);
2637         else
2638                 atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
2639 }
2640
2641 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
2642                             int sectors, struct page *page, int rw)
2643 {
2644         sector_t first_bad;
2645         int bad_sectors;
2646
2647         if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
2648             && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
2649                 return -1;
2650         if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
2651                 /* success */
2652                 return 1;
2653         if (rw == WRITE) {
2654                 set_bit(WriteErrorSeen, &rdev->flags);
2655                 if (!test_and_set_bit(WantReplacement, &rdev->flags))
2656                         set_bit(MD_RECOVERY_NEEDED,
2657                                 &rdev->mddev->recovery);
2658         }
2659         /* need to record an error - either for the block or the device */
2660         if (!rdev_set_badblocks(rdev, sector, sectors, 0))
2661                 md_error(rdev->mddev, rdev);
2662         return 0;
2663 }
2664
2665 /*
2666  * This is a kernel thread which:
2667  *
2668  *      1.      Retries failed read operations on working mirrors.
2669  *      2.      Updates the raid superblock when problems encounter.
2670  *      3.      Performs writes following reads for array synchronising.
2671  */
2672
2673 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio)
2674 {
2675         int sect = 0; /* Offset from r10_bio->sector */
2676         int sectors = r10_bio->sectors;
2677         struct md_rdev *rdev;
2678         int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
2679         int d = r10_bio->devs[r10_bio->read_slot].devnum;
2680
2681         /* still own a reference to this rdev, so it cannot
2682          * have been cleared recently.
2683          */
2684         rdev = conf->mirrors[d].rdev;
2685
2686         if (test_bit(Faulty, &rdev->flags))
2687                 /* drive has already been failed, just ignore any
2688                    more fix_read_error() attempts */
2689                 return;
2690
2691         check_decay_read_errors(mddev, rdev);
2692         atomic_inc(&rdev->read_errors);
2693         if (atomic_read(&rdev->read_errors) > max_read_errors) {
2694                 char b[BDEVNAME_SIZE];
2695                 bdevname(rdev->bdev, b);
2696
2697                 pr_notice("md/raid10:%s: %s: Raid device exceeded read_error threshold [cur %d:max %d]\n",
2698                           mdname(mddev), b,
2699                           atomic_read(&rdev->read_errors), max_read_errors);
2700                 pr_notice("md/raid10:%s: %s: Failing raid device\n",
2701                           mdname(mddev), b);
2702                 md_error(mddev, rdev);
2703                 r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
2704                 return;
2705         }
2706
2707         while(sectors) {
2708                 int s = sectors;
2709                 int sl = r10_bio->read_slot;
2710                 int success = 0;
2711                 int start;
2712
2713                 if (s > (PAGE_SIZE>>9))
2714                         s = PAGE_SIZE >> 9;
2715
2716                 rcu_read_lock();
2717                 do {
2718                         sector_t first_bad;
2719                         int bad_sectors;
2720
2721                         d = r10_bio->devs[sl].devnum;
2722                         rdev = rcu_dereference(conf->mirrors[d].rdev);
2723                         if (rdev &&
2724                             test_bit(In_sync, &rdev->flags) &&
2725                             !test_bit(Faulty, &rdev->flags) &&
2726                             is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
2727                                         &first_bad, &bad_sectors) == 0) {
2728                                 atomic_inc(&rdev->nr_pending);
2729                                 rcu_read_unlock();
2730                                 success = sync_page_io(rdev,
2731                                                        r10_bio->devs[sl].addr +
2732                                                        sect,
2733                                                        s<<9,
2734                                                        conf->tmppage,
2735                                                        REQ_OP_READ, 0, false);
2736                                 rdev_dec_pending(rdev, mddev);
2737                                 rcu_read_lock();
2738                                 if (success)
2739                                         break;
2740                         }
2741                         sl++;
2742                         if (sl == conf->copies)
2743                                 sl = 0;
2744                 } while (!success && sl != r10_bio->read_slot);
2745                 rcu_read_unlock();
2746
2747                 if (!success) {
2748                         /* Cannot read from anywhere, just mark the block
2749                          * as bad on the first device to discourage future
2750                          * reads.
2751                          */
2752                         int dn = r10_bio->devs[r10_bio->read_slot].devnum;
2753                         rdev = conf->mirrors[dn].rdev;
2754
2755                         if (!rdev_set_badblocks(
2756                                     rdev,
2757                                     r10_bio->devs[r10_bio->read_slot].addr
2758                                     + sect,
2759                                     s, 0)) {
2760                                 md_error(mddev, rdev);
2761                                 r10_bio->devs[r10_bio->read_slot].bio
2762                                         = IO_BLOCKED;
2763                         }
2764                         break;
2765                 }
2766
2767                 start = sl;
2768                 /* write it back and re-read */
2769                 rcu_read_lock();
2770                 while (sl != r10_bio->read_slot) {
2771                         char b[BDEVNAME_SIZE];
2772
2773                         if (sl==0)
2774                                 sl = conf->copies;
2775                         sl--;
2776                         d = r10_bio->devs[sl].devnum;
2777                         rdev = rcu_dereference(conf->mirrors[d].rdev);
2778                         if (!rdev ||
2779                             test_bit(Faulty, &rdev->flags) ||
2780                             !test_bit(In_sync, &rdev->flags))
2781                                 continue;
2782
2783                         atomic_inc(&rdev->nr_pending);
2784                         rcu_read_unlock();
2785                         if (r10_sync_page_io(rdev,
2786                                              r10_bio->devs[sl].addr +
2787                                              sect,
2788                                              s, conf->tmppage, WRITE)
2789                             == 0) {
2790                                 /* Well, this device is dead */
2791                                 pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %s)\n",
2792                                           mdname(mddev), s,
2793                                           (unsigned long long)(
2794                                                   sect +
2795                                                   choose_data_offset(r10_bio,
2796                                                                      rdev)),
2797                                           bdevname(rdev->bdev, b));
2798                                 pr_notice("md/raid10:%s: %s: failing drive\n",
2799                                           mdname(mddev),
2800                                           bdevname(rdev->bdev, b));
2801                         }
2802                         rdev_dec_pending(rdev, mddev);
2803                         rcu_read_lock();
2804                 }
2805                 sl = start;
2806                 while (sl != r10_bio->read_slot) {
2807                         char b[BDEVNAME_SIZE];
2808
2809                         if (sl==0)
2810                                 sl = conf->copies;
2811                         sl--;
2812                         d = r10_bio->devs[sl].devnum;
2813                         rdev = rcu_dereference(conf->mirrors[d].rdev);
2814                         if (!rdev ||
2815                             test_bit(Faulty, &rdev->flags) ||
2816                             !test_bit(In_sync, &rdev->flags))
2817                                 continue;
2818
2819                         atomic_inc(&rdev->nr_pending);
2820                         rcu_read_unlock();
2821                         switch (r10_sync_page_io(rdev,
2822                                              r10_bio->devs[sl].addr +
2823                                              sect,
2824                                              s, conf->tmppage,
2825                                                  READ)) {
2826                         case 0:
2827                                 /* Well, this device is dead */
2828                                 pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %s)\n",
2829                                        mdname(mddev), s,
2830                                        (unsigned long long)(
2831                                                sect +
2832                                                choose_data_offset(r10_bio, rdev)),
2833                                        bdevname(rdev->bdev, b));
2834                                 pr_notice("md/raid10:%s: %s: failing drive\n",
2835                                        mdname(mddev),
2836                                        bdevname(rdev->bdev, b));
2837                                 break;
2838                         case 1:
2839                                 pr_info("md/raid10:%s: read error corrected (%d sectors at %llu on %s)\n",
2840                                        mdname(mddev), s,
2841                                        (unsigned long long)(
2842                                                sect +
2843                                                choose_data_offset(r10_bio, rdev)),
2844                                        bdevname(rdev->bdev, b));
2845                                 atomic_add(s, &rdev->corrected_errors);
2846                         }
2847
2848                         rdev_dec_pending(rdev, mddev);
2849                         rcu_read_lock();
2850                 }
2851                 rcu_read_unlock();
2852
2853                 sectors -= s;
2854                 sect += s;
2855         }
2856 }
2857
2858 static int narrow_write_error(struct r10bio *r10_bio, int i)
2859 {
2860         struct bio *bio = r10_bio->master_bio;
2861         struct mddev *mddev = r10_bio->mddev;
2862         struct r10conf *conf = mddev->private;
2863         struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
2864         /* bio has the data to be written to slot 'i' where
2865          * we just recently had a write error.
2866          * We repeatedly clone the bio and trim down to one block,
2867          * then try the write.  Where the write fails we record
2868          * a bad block.
2869          * It is conceivable that the bio doesn't exactly align with
2870          * blocks.  We must handle this.
2871          *
2872          * We currently own a reference to the rdev.
2873          */
2874
2875         int block_sectors;
2876         sector_t sector;
2877         int sectors;
2878         int sect_to_write = r10_bio->sectors;
2879         int ok = 1;
2880
2881         if (rdev->badblocks.shift < 0)
2882                 return 0;
2883
2884         block_sectors = roundup(1 << rdev->badblocks.shift,
2885                                 bdev_logical_block_size(rdev->bdev) >> 9);
2886         sector = r10_bio->sector;
2887         sectors = ((r10_bio->sector + block_sectors)
2888                    & ~(sector_t)(block_sectors - 1))
2889                 - sector;
2890
2891         while (sect_to_write) {
2892                 struct bio *wbio;
2893                 sector_t wsector;
2894                 if (sectors > sect_to_write)
2895                         sectors = sect_to_write;
2896                 /* Write at 'sector' for 'sectors' */
2897                 wbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO,
2898                                        &mddev->bio_set);
2899                 bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
2900                 wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
2901                 wbio->bi_iter.bi_sector = wsector +
2902                                    choose_data_offset(r10_bio, rdev);
2903                 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2904
2905                 if (submit_bio_wait(wbio) < 0)
2906                         /* Failure! */
2907                         ok = rdev_set_badblocks(rdev, wsector,
2908                                                 sectors, 0)
2909                                 && ok;
2910
2911                 bio_put(wbio);
2912                 sect_to_write -= sectors;
2913                 sector += sectors;
2914                 sectors = block_sectors;
2915         }
2916         return ok;
2917 }
2918
2919 static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
2920 {
2921         int slot = r10_bio->read_slot;
2922         struct bio *bio;
2923         struct r10conf *conf = mddev->private;
2924         struct md_rdev *rdev = r10_bio->devs[slot].rdev;
2925
2926         /* we got a read error. Maybe the drive is bad.  Maybe just
2927          * the block and we can fix it.
2928          * We freeze all other IO, and try reading the block from
2929          * other devices.  When we find one, we re-write
2930          * and check it that fixes the read error.
2931          * This is all done synchronously while the array is
2932          * frozen.
2933          */
2934         bio = r10_bio->devs[slot].bio;
2935         bio_put(bio);
2936         r10_bio->devs[slot].bio = NULL;
2937
2938         if (mddev->ro)
2939                 r10_bio->devs[slot].bio = IO_BLOCKED;
2940         else if (!test_bit(FailFast, &rdev->flags)) {
2941                 freeze_array(conf, 1);
2942                 fix_read_error(conf, mddev, r10_bio);
2943                 unfreeze_array(conf);
2944         } else
2945                 md_error(mddev, rdev);
2946
2947         rdev_dec_pending(rdev, mddev);
2948         allow_barrier(conf);
2949         r10_bio->state = 0;
2950         raid10_read_request(mddev, r10_bio->master_bio, r10_bio);
2951 }
2952
2953 static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2954 {
2955         /* Some sort of write request has finished and it
2956          * succeeded in writing where we thought there was a
2957          * bad block.  So forget the bad block.
2958          * Or possibly if failed and we need to record
2959          * a bad block.
2960          */
2961         int m;
2962         struct md_rdev *rdev;
2963
2964         if (test_bit(R10BIO_IsSync, &r10_bio->state) ||
2965             test_bit(R10BIO_IsRecover, &r10_bio->state)) {
2966                 for (m = 0; m < conf->copies; m++) {
2967                         int dev = r10_bio->devs[m].devnum;
2968                         rdev = conf->mirrors[dev].rdev;
2969                         if (r10_bio->devs[m].bio == NULL ||
2970                                 r10_bio->devs[m].bio->bi_end_io == NULL)
2971                                 continue;
2972                         if (!r10_bio->devs[m].bio->bi_status) {
2973                                 rdev_clear_badblocks(
2974                                         rdev,
2975                                         r10_bio->devs[m].addr,
2976                                         r10_bio->sectors, 0);
2977                         } else {
2978                                 if (!rdev_set_badblocks(
2979                                             rdev,
2980                                             r10_bio->devs[m].addr,
2981                                             r10_bio->sectors, 0))
2982                                         md_error(conf->mddev, rdev);
2983                         }
2984                         rdev = conf->mirrors[dev].replacement;
2985                         if (r10_bio->devs[m].repl_bio == NULL ||
2986                                 r10_bio->devs[m].repl_bio->bi_end_io == NULL)
2987                                 continue;
2988
2989                         if (!r10_bio->devs[m].repl_bio->bi_status) {
2990                                 rdev_clear_badblocks(
2991                                         rdev,
2992                                         r10_bio->devs[m].addr,
2993                                         r10_bio->sectors, 0);
2994                         } else {
2995                                 if (!rdev_set_badblocks(
2996                                             rdev,
2997                                             r10_bio->devs[m].addr,
2998                                             r10_bio->sectors, 0))
2999                                         md_error(conf->mddev, rdev);
3000                         }
3001                 }
3002                 put_buf(r10_bio);
3003         } else {
3004                 bool fail = false;
3005                 for (m = 0; m < conf->copies; m++) {
3006                         int dev = r10_bio->devs[m].devnum;
3007                         struct bio *bio = r10_bio->devs[m].bio;
3008                         rdev = conf->mirrors[dev].rdev;
3009                         if (bio == IO_MADE_GOOD) {
3010                                 rdev_clear_badblocks(
3011                                         rdev,
3012                                         r10_bio->devs[m].addr,
3013                                         r10_bio->sectors, 0);
3014                                 rdev_dec_pending(rdev, conf->mddev);
3015                         } else if (bio != NULL && bio->bi_status) {
3016                                 fail = true;
3017                                 if (!narrow_write_error(r10_bio, m)) {
3018                                         md_error(conf->mddev, rdev);
3019                                         set_bit(R10BIO_Degraded,
3020                                                 &r10_bio->state);
3021                                 }
3022                                 rdev_dec_pending(rdev, conf->mddev);
3023                         }
3024                         bio = r10_bio->devs[m].repl_bio;
3025                         rdev = conf->mirrors[dev].replacement;
3026                         if (rdev && bio == IO_MADE_GOOD) {
3027                                 rdev_clear_badblocks(
3028                                         rdev,
3029                                         r10_bio->devs[m].addr,
3030                                         r10_bio->sectors, 0);
3031                                 rdev_dec_pending(rdev, conf->mddev);
3032                         }
3033                 }
3034                 if (fail) {
3035                         spin_lock_irq(&conf->device_lock);
3036                         list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
3037                         conf->nr_queued++;
3038                         spin_unlock_irq(&conf->device_lock);
3039                         /*
3040                          * In case freeze_array() is waiting for condition
3041                          * nr_pending == nr_queued + extra to be true.
3042                          */
3043                         wake_up(&conf->wait_barrier);
3044                         md_wakeup_thread(conf->mddev->thread);
3045                 } else {
3046                         if (test_bit(R10BIO_WriteError,
3047                                      &r10_bio->state))
3048                                 close_write(r10_bio);
3049                         raid_end_bio_io(r10_bio);
3050                 }
3051         }
3052 }
3053
3054 static void raid10d(struct md_thread *thread)
3055 {
3056         struct mddev *mddev = thread->mddev;
3057         struct r10bio *r10_bio;
3058         unsigned long flags;
3059         struct r10conf *conf = mddev->private;
3060         struct list_head *head = &conf->retry_list;
3061         struct blk_plug plug;
3062
3063         md_check_recovery(mddev);
3064
3065         if (!list_empty_careful(&conf->bio_end_io_list) &&
3066             !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
3067                 LIST_HEAD(tmp);
3068                 spin_lock_irqsave(&conf->device_lock, flags);
3069                 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
3070                         while (!list_empty(&conf->bio_end_io_list)) {
3071                                 list_move(conf->bio_end_io_list.prev, &tmp);
3072                                 conf->nr_queued--;
3073                         }
3074                 }
3075                 spin_unlock_irqrestore(&conf->device_lock, flags);
3076                 while (!list_empty(&tmp)) {
3077                         r10_bio = list_first_entry(&tmp, struct r10bio,
3078                                                    retry_list);
3079                         list_del(&r10_bio->retry_list);
3080                         if (mddev->degraded)
3081                                 set_bit(R10BIO_Degraded, &r10_bio->state);
3082
3083                         if (test_bit(R10BIO_WriteError,
3084                                      &r10_bio->state))
3085                                 close_write(r10_bio);
3086                         raid_end_bio_io(r10_bio);
3087                 }
3088         }
3089
3090         blk_start_plug(&plug);
3091         for (;;) {
3092
3093                 flush_pending_writes(conf);
3094
3095                 spin_lock_irqsave(&conf->device_lock, flags);
3096                 if (list_empty(head)) {
3097                         spin_unlock_irqrestore(&conf->device_lock, flags);
3098                         break;
3099                 }
3100                 r10_bio = list_entry(head->prev, struct r10bio, retry_list);
3101                 list_del(head->prev);
3102                 conf->nr_queued--;
3103                 spin_unlock_irqrestore(&conf->device_lock, flags);
3104
3105                 mddev = r10_bio->mddev;
3106                 conf = mddev->private;
3107                 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
3108                     test_bit(R10BIO_WriteError, &r10_bio->state))
3109                         handle_write_completed(conf, r10_bio);
3110                 else if (test_bit(R10BIO_IsReshape, &r10_bio->state))
3111                         reshape_request_write(mddev, r10_bio);
3112                 else if (test_bit(R10BIO_IsSync, &r10_bio->state))
3113                         sync_request_write(mddev, r10_bio);
3114                 else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
3115                         recovery_request_write(mddev, r10_bio);
3116                 else if (test_bit(R10BIO_ReadError, &r10_bio->state))
3117                         handle_read_error(mddev, r10_bio);
3118                 else
3119                         WARN_ON_ONCE(1);
3120
3121                 cond_resched();
3122                 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
3123                         md_check_recovery(mddev);
3124         }
3125         blk_finish_plug(&plug);
3126 }
3127
3128 static int init_resync(struct r10conf *conf)
3129 {
3130         int ret, buffs, i;
3131
3132         buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
3133         BUG_ON(mempool_initialized(&conf->r10buf_pool));
3134         conf->have_replacement = 0;
3135         for (i = 0; i < conf->geo.raid_disks; i++)
3136                 if (conf->mirrors[i].replacement)
3137                         conf->have_replacement = 1;
3138         ret = mempool_init(&conf->r10buf_pool, buffs,
3139                            r10buf_pool_alloc, r10buf_pool_free, conf);
3140         if (ret)
3141                 return ret;
3142         conf->next_resync = 0;
3143         return 0;
3144 }
3145
3146 static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf)
3147 {
3148         struct r10bio *r10bio = mempool_alloc(&conf->r10buf_pool, GFP_NOIO);
3149         struct rsync_pages *rp;
3150         struct bio *bio;
3151         int nalloc;
3152         int i;
3153
3154         if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
3155             test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
3156                 nalloc = conf->copies; /* resync */
3157         else
3158                 nalloc = 2; /* recovery */
3159
3160         for (i = 0; i < nalloc; i++) {
3161                 bio = r10bio->devs[i].bio;
3162                 rp = bio->bi_private;
3163                 bio_reset(bio, NULL, 0);
3164                 bio->bi_private = rp;
3165                 bio = r10bio->devs[i].repl_bio;
3166                 if (bio) {
3167                         rp = bio->bi_private;
3168                         bio_reset(bio, NULL, 0);
3169                         bio->bi_private = rp;
3170                 }
3171         }
3172         return r10bio;
3173 }
3174
3175 /*
3176  * Set cluster_sync_high since we need other nodes to add the
3177  * range [cluster_sync_low, cluster_sync_high] to suspend list.
3178  */
3179 static void raid10_set_cluster_sync_high(struct r10conf *conf)
3180 {
3181         sector_t window_size;
3182         int extra_chunk, chunks;
3183
3184         /*
3185          * First, here we define "stripe" as a unit which across
3186          * all member devices one time, so we get chunks by use
3187          * raid_disks / near_copies. Otherwise, if near_copies is
3188          * close to raid_disks, then resync window could increases
3189          * linearly with the increase of raid_disks, which means
3190          * we will suspend a really large IO window while it is not
3191          * necessary. If raid_disks is not divisible by near_copies,
3192          * an extra chunk is needed to ensure the whole "stripe" is
3193          * covered.
3194          */
3195
3196         chunks = conf->geo.raid_disks / conf->geo.near_copies;
3197         if (conf->geo.raid_disks % conf->geo.near_copies == 0)
3198                 extra_chunk = 0;
3199         else
3200                 extra_chunk = 1;
3201         window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors;
3202
3203         /*
3204          * At least use a 32M window to align with raid1's resync window
3205          */
3206         window_size = (CLUSTER_RESYNC_WINDOW_SECTORS > window_size) ?
3207                         CLUSTER_RESYNC_WINDOW_SECTORS : window_size;
3208
3209         conf->cluster_sync_high = conf->cluster_sync_low + window_size;
3210 }
3211
3212 /*
3213  * perform a "sync" on one "block"
3214  *
3215  * We need to make sure that no normal I/O request - particularly write
3216  * requests - conflict with active sync requests.
3217  *
3218  * This is achieved by tracking pending requests and a 'barrier' concept
3219  * that can be installed to exclude normal IO requests.
3220  *
3221  * Resync and recovery are handled very differently.
3222  * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
3223  *
3224  * For resync, we iterate over virtual addresses, read all copies,
3225  * and update if there are differences.  If only one copy is live,
3226  * skip it.
3227  * For recovery, we iterate over physical addresses, read a good
3228  * value for each non-in_sync drive, and over-write.
3229  *
3230  * So, for recovery we may have several outstanding complex requests for a
3231  * given address, one for each out-of-sync device.  We model this by allocating
3232  * a number of r10_bio structures, one for each out-of-sync device.
3233  * As we setup these structures, we collect all bio's together into a list
3234  * which we then process collectively to add pages, and then process again
3235  * to pass to submit_bio_noacct.
3236  *
3237  * The r10_bio structures are linked using a borrowed master_bio pointer.
3238  * This link is counted in ->remaining.  When the r10_bio that points to NULL
3239  * has its remaining count decremented to 0, the whole complex operation
3240  * is complete.
3241  *
3242  */
3243
3244 static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
3245                              int *skipped)
3246 {
3247         struct r10conf *conf = mddev->private;
3248         struct r10bio *r10_bio;
3249         struct bio *biolist = NULL, *bio;
3250         sector_t max_sector, nr_sectors;
3251         int i;
3252         int max_sync;
3253         sector_t sync_blocks;
3254         sector_t sectors_skipped = 0;
3255         int chunks_skipped = 0;
3256         sector_t chunk_mask = conf->geo.chunk_mask;
3257         int page_idx = 0;
3258
3259         if (!mempool_initialized(&conf->r10buf_pool))
3260                 if (init_resync(conf))
3261                         return 0;
3262
3263         /*
3264          * Allow skipping a full rebuild for incremental assembly
3265          * of a clean array, like RAID1 does.
3266          */
3267         if (mddev->bitmap == NULL &&
3268             mddev->recovery_cp == MaxSector &&
3269             mddev->reshape_position == MaxSector &&
3270             !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
3271             !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
3272             !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
3273             conf->fullsync == 0) {
3274                 *skipped = 1;
3275                 return mddev->dev_sectors - sector_nr;
3276         }
3277
3278  skipped:
3279         max_sector = mddev->dev_sectors;
3280         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
3281             test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3282                 max_sector = mddev->resync_max_sectors;
3283         if (sector_nr >= max_sector) {
3284                 conf->cluster_sync_low = 0;
3285                 conf->cluster_sync_high = 0;
3286
3287                 /* If we aborted, we need to abort the
3288                  * sync on the 'current' bitmap chucks (there can
3289                  * be several when recovering multiple devices).
3290                  * as we may have started syncing it but not finished.
3291                  * We can find the current address in
3292                  * mddev->curr_resync, but for recovery,
3293                  * we need to convert that to several
3294                  * virtual addresses.
3295                  */
3296                 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
3297                         end_reshape(conf);
3298                         close_sync(conf);
3299                         return 0;
3300                 }
3301
3302                 if (mddev->curr_resync < max_sector) { /* aborted */
3303                         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3304                                 md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
3305                                                    &sync_blocks, 1);
3306                         else for (i = 0; i < conf->geo.raid_disks; i++) {
3307                                 sector_t sect =
3308                                         raid10_find_virt(conf, mddev->curr_resync, i);
3309                                 md_bitmap_end_sync(mddev->bitmap, sect,
3310                                                    &sync_blocks, 1);
3311                         }
3312                 } else {
3313                         /* completed sync */
3314                         if ((!mddev->bitmap || conf->fullsync)
3315                             && conf->have_replacement
3316                             && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3317                                 /* Completed a full sync so the replacements
3318                                  * are now fully recovered.
3319                                  */
3320                                 rcu_read_lock();
3321                                 for (i = 0; i < conf->geo.raid_disks; i++) {
3322                                         struct md_rdev *rdev =
3323                                                 rcu_dereference(conf->mirrors[i].replacement);
3324                                         if (rdev)
3325                                                 rdev->recovery_offset = MaxSector;
3326                                 }
3327                                 rcu_read_unlock();
3328                         }
3329                         conf->fullsync = 0;
3330                 }
3331                 md_bitmap_close_sync(mddev->bitmap);
3332                 close_sync(conf);
3333                 *skipped = 1;
3334                 return sectors_skipped;
3335         }
3336
3337         if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3338                 return reshape_request(mddev, sector_nr, skipped);
3339
3340         if (chunks_skipped >= conf->geo.raid_disks) {
3341                 /* if there has been nothing to do on any drive,
3342                  * then there is nothing to do at all..
3343                  */
3344                 *skipped = 1;
3345                 return (max_sector - sector_nr) + sectors_skipped;
3346         }
3347
3348         if (max_sector > mddev->resync_max)
3349                 max_sector = mddev->resync_max; /* Don't do IO beyond here */
3350
3351         /* make sure whole request will fit in a chunk - if chunks
3352          * are meaningful
3353          */
3354         if (conf->geo.near_copies < conf->geo.raid_disks &&
3355             max_sector > (sector_nr | chunk_mask))
3356                 max_sector = (sector_nr | chunk_mask) + 1;
3357
3358         /*
3359          * If there is non-resync activity waiting for a turn, then let it
3360          * though before starting on this new sync request.
3361          */
3362         if (conf->nr_waiting)
3363                 schedule_timeout_uninterruptible(1);
3364
3365         /* Again, very different code for resync and recovery.
3366          * Both must result in an r10bio with a list of bios that
3367          * have bi_end_io, bi_sector, bi_bdev set,
3368          * and bi_private set to the r10bio.
3369          * For recovery, we may actually create several r10bios
3370          * with 2 bios in each, that correspond to the bios in the main one.
3371          * In this case, the subordinate r10bios link back through a
3372          * borrowed master_bio pointer, and the counter in the master
3373          * includes a ref from each subordinate.
3374          */
3375         /* First, we decide what to do and set ->bi_end_io
3376          * To end_sync_read if we want to read, and
3377          * end_sync_write if we will want to write.
3378          */
3379
3380         max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
3381         if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3382                 /* recovery... the complicated one */
3383                 int j;
3384                 r10_bio = NULL;
3385
3386                 for (i = 0 ; i < conf->geo.raid_disks; i++) {
3387                         int still_degraded;
3388                         struct r10bio *rb2;
3389                         sector_t sect;
3390                         int must_sync;
3391                         int any_working;
3392                         int need_recover = 0;
3393                         int need_replace = 0;
3394                         struct raid10_info *mirror = &conf->mirrors[i];
3395                         struct md_rdev *mrdev, *mreplace;
3396
3397                         rcu_read_lock();
3398                         mrdev = rcu_dereference(mirror->rdev);
3399                         mreplace = rcu_dereference(mirror->replacement);
3400
3401                         if (mrdev != NULL &&
3402                             !test_bit(Faulty, &mrdev->flags) &&
3403                             !test_bit(In_sync, &mrdev->flags))
3404                                 need_recover = 1;
3405                         if (mreplace != NULL &&
3406                             !test_bit(Faulty, &mreplace->flags))
3407                                 need_replace = 1;
3408
3409                         if (!need_recover && !need_replace) {
3410                                 rcu_read_unlock();
3411                                 continue;
3412                         }
3413
3414                         still_degraded = 0;
3415                         /* want to reconstruct this device */
3416                         rb2 = r10_bio;
3417                         sect = raid10_find_virt(conf, sector_nr, i);
3418                         if (sect >= mddev->resync_max_sectors) {
3419                                 /* last stripe is not complete - don't
3420                                  * try to recover this sector.
3421                                  */
3422                                 rcu_read_unlock();
3423                                 continue;
3424                         }
3425                         if (mreplace && test_bit(Faulty, &mreplace->flags))
3426                                 mreplace = NULL;
3427                         /* Unless we are doing a full sync, or a replacement
3428                          * we only need to recover the block if it is set in
3429                          * the bitmap
3430                          */
3431                         must_sync = md_bitmap_start_sync(mddev->bitmap, sect,
3432                                                          &sync_blocks, 1);
3433                         if (sync_blocks < max_sync)
3434                                 max_sync = sync_blocks;
3435                         if (!must_sync &&
3436                             mreplace == NULL &&
3437                             !conf->fullsync) {
3438                                 /* yep, skip the sync_blocks here, but don't assume
3439                                  * that there will never be anything to do here
3440                                  */
3441                                 chunks_skipped = -1;
3442                                 rcu_read_unlock();
3443                                 continue;
3444                         }
3445                         atomic_inc(&mrdev->nr_pending);
3446                         if (mreplace)
3447                                 atomic_inc(&mreplace->nr_pending);
3448                         rcu_read_unlock();
3449
3450                         r10_bio = raid10_alloc_init_r10buf(conf);
3451                         r10_bio->state = 0;
3452                         raise_barrier(conf, rb2 != NULL);
3453                         atomic_set(&r10_bio->remaining, 0);
3454
3455                         r10_bio->master_bio = (struct bio*)rb2;
3456                         if (rb2)
3457                                 atomic_inc(&rb2->remaining);
3458                         r10_bio->mddev = mddev;
3459                         set_bit(R10BIO_IsRecover, &r10_bio->state);
3460                         r10_bio->sector = sect;
3461
3462                         raid10_find_phys(conf, r10_bio);
3463
3464                         /* Need to check if the array will still be
3465                          * degraded
3466                          */
3467                         rcu_read_lock();
3468                         for (j = 0; j < conf->geo.raid_disks; j++) {
3469                                 struct md_rdev *rdev = rcu_dereference(
3470                                         conf->mirrors[j].rdev);
3471                                 if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
3472                                         still_degraded = 1;
3473                                         break;
3474                                 }
3475                         }
3476
3477                         must_sync = md_bitmap_start_sync(mddev->bitmap, sect,
3478                                                          &sync_blocks, still_degraded);
3479
3480                         any_working = 0;
3481                         for (j=0; j<conf->copies;j++) {
3482                                 int k;
3483                                 int d = r10_bio->devs[j].devnum;
3484                                 sector_t from_addr, to_addr;
3485                                 struct md_rdev *rdev =
3486                                         rcu_dereference(conf->mirrors[d].rdev);
3487                                 sector_t sector, first_bad;
3488                                 int bad_sectors;
3489                                 if (!rdev ||
3490                                     !test_bit(In_sync, &rdev->flags))
3491                                         continue;
3492                                 /* This is where we read from */
3493                                 any_working = 1;
3494                                 sector = r10_bio->devs[j].addr;
3495
3496                                 if (is_badblock(rdev, sector, max_sync,
3497                                                 &first_bad, &bad_sectors)) {
3498                                         if (first_bad > sector)
3499                                                 max_sync = first_bad - sector;
3500                                         else {
3501                                                 bad_sectors -= (sector
3502                                                                 - first_bad);
3503                                                 if (max_sync > bad_sectors)
3504                                                         max_sync = bad_sectors;
3505                                                 continue;
3506                                         }
3507                                 }
3508                                 bio = r10_bio->devs[0].bio;
3509                                 bio->bi_next = biolist;
3510                                 biolist = bio;
3511                                 bio->bi_end_io = end_sync_read;
3512                                 bio_set_op_attrs(bio, REQ_OP_READ, 0);
3513                                 if (test_bit(FailFast, &rdev->flags))
3514                                         bio->bi_opf |= MD_FAILFAST;
3515                                 from_addr = r10_bio->devs[j].addr;
3516                                 bio->bi_iter.bi_sector = from_addr +
3517                                         rdev->data_offset;
3518                                 bio_set_dev(bio, rdev->bdev);
3519                                 atomic_inc(&rdev->nr_pending);
3520                                 /* and we write to 'i' (if not in_sync) */
3521
3522                                 for (k=0; k<conf->copies; k++)
3523                                         if (r10_bio->devs[k].devnum == i)
3524                                                 break;
3525                                 BUG_ON(k == conf->copies);
3526                                 to_addr = r10_bio->devs[k].addr;
3527                                 r10_bio->devs[0].devnum = d;
3528                                 r10_bio->devs[0].addr = from_addr;
3529                                 r10_bio->devs[1].devnum = i;
3530                                 r10_bio->devs[1].addr = to_addr;
3531
3532                                 if (need_recover) {
3533                                         bio = r10_bio->devs[1].bio;
3534                                         bio->bi_next = biolist;
3535                                         biolist = bio;
3536                                         bio->bi_end_io = end_sync_write;
3537                                         bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
3538                                         bio->bi_iter.bi_sector = to_addr
3539                                                 + mrdev->data_offset;
3540                                         bio_set_dev(bio, mrdev->bdev);
3541                                         atomic_inc(&r10_bio->remaining);
3542                                 } else
3543                                         r10_bio->devs[1].bio->bi_end_io = NULL;
3544
3545                                 /* and maybe write to replacement */
3546                                 bio = r10_bio->devs[1].repl_bio;
3547                                 if (bio)
3548                                         bio->bi_end_io = NULL;
3549                                 /* Note: if need_replace, then bio
3550                                  * cannot be NULL as r10buf_pool_alloc will
3551                                  * have allocated it.
3552                                  */
3553                                 if (!need_replace)
3554                                         break;
3555                                 bio->bi_next = biolist;
3556                                 biolist = bio;
3557                                 bio->bi_end_io = end_sync_write;
3558                                 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
3559                                 bio->bi_iter.bi_sector = to_addr +
3560                                         mreplace->data_offset;
3561                                 bio_set_dev(bio, mreplace->bdev);
3562                                 atomic_inc(&r10_bio->remaining);
3563                                 break;
3564                         }
3565                         rcu_read_unlock();
3566                         if (j == conf->copies) {
3567                                 /* Cannot recover, so abort the recovery or
3568                                  * record a bad block */
3569                                 if (any_working) {
3570                                         /* problem is that there are bad blocks
3571                                          * on other device(s)
3572                                          */
3573                                         int k;
3574                                         for (k = 0; k < conf->copies; k++)
3575                                                 if (r10_bio->devs[k].devnum == i)
3576                                                         break;
3577                                         if (!test_bit(In_sync,
3578                                                       &mrdev->flags)
3579                                             && !rdev_set_badblocks(
3580                                                     mrdev,
3581                                                     r10_bio->devs[k].addr,
3582                                                     max_sync, 0))
3583                                                 any_working = 0;
3584                                         if (mreplace &&
3585                                             !rdev_set_badblocks(
3586                                                     mreplace,
3587                                                     r10_bio->devs[k].addr,
3588                                                     max_sync, 0))
3589                                                 any_working = 0;
3590                                 }
3591                                 if (!any_working)  {
3592                                         if (!test_and_set_bit(MD_RECOVERY_INTR,
3593                                                               &mddev->recovery))
3594                                                 pr_warn("md/raid10:%s: insufficient working devices for recovery.\n",
3595                                                        mdname(mddev));
3596                                         mirror->recovery_disabled
3597                                                 = mddev->recovery_disabled;
3598                                 }
3599                                 put_buf(r10_bio);
3600                                 if (rb2)
3601                                         atomic_dec(&rb2->remaining);
3602                                 r10_bio = rb2;
3603                                 rdev_dec_pending(mrdev, mddev);
3604                                 if (mreplace)
3605                                         rdev_dec_pending(mreplace, mddev);
3606                                 break;
3607                         }
3608                         rdev_dec_pending(mrdev, mddev);
3609                         if (mreplace)
3610                                 rdev_dec_pending(mreplace, mddev);
3611                         if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) {
3612                                 /* Only want this if there is elsewhere to
3613                                  * read from. 'j' is currently the first
3614                                  * readable copy.
3615                                  */
3616                                 int targets = 1;
3617                                 for (; j < conf->copies; j++) {
3618                                         int d = r10_bio->devs[j].devnum;
3619                                         if (conf->mirrors[d].rdev &&
3620                                             test_bit(In_sync,
3621                                                       &conf->mirrors[d].rdev->flags))
3622                                                 targets++;
3623                                 }
3624                                 if (targets == 1)
3625                                         r10_bio->devs[0].bio->bi_opf
3626                                                 &= ~MD_FAILFAST;
3627                         }
3628                 }
3629                 if (biolist == NULL) {
3630                         while (r10_bio) {
3631                                 struct r10bio *rb2 = r10_bio;
3632                                 r10_bio = (struct r10bio*) rb2->master_bio;
3633                                 rb2->master_bio = NULL;
3634                                 put_buf(rb2);
3635                         }
3636                         goto giveup;
3637                 }
3638         } else {
3639                 /* resync. Schedule a read for every block at this virt offset */
3640                 int count = 0;
3641
3642                 /*
3643                  * Since curr_resync_completed could probably not update in
3644                  * time, and we will set cluster_sync_low based on it.
3645                  * Let's check against "sector_nr + 2 * RESYNC_SECTORS" for
3646                  * safety reason, which ensures curr_resync_completed is
3647                  * updated in bitmap_cond_end_sync.
3648                  */
3649                 md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
3650                                         mddev_is_clustered(mddev) &&
3651                                         (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
3652
3653                 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
3654                                           &sync_blocks, mddev->degraded) &&
3655                     !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
3656                                                  &mddev->recovery)) {
3657                         /* We can skip this block */
3658                         *skipped = 1;
3659                         return sync_blocks + sectors_skipped;
3660                 }
3661                 if (sync_blocks < max_sync)
3662                         max_sync = sync_blocks;
3663                 r10_bio = raid10_alloc_init_r10buf(conf);
3664                 r10_bio->state = 0;
3665
3666                 r10_bio->mddev = mddev;
3667                 atomic_set(&r10_bio->remaining, 0);
3668                 raise_barrier(conf, 0);
3669                 conf->next_resync = sector_nr;
3670
3671                 r10_bio->master_bio = NULL;
3672                 r10_bio->sector = sector_nr;
3673                 set_bit(R10BIO_IsSync, &r10_bio->state);
3674                 raid10_find_phys(conf, r10_bio);
3675                 r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1;
3676
3677                 for (i = 0; i < conf->copies; i++) {
3678                         int d = r10_bio->devs[i].devnum;
3679                         sector_t first_bad, sector;
3680                         int bad_sectors;
3681                         struct md_rdev *rdev;
3682
3683                         if (r10_bio->devs[i].repl_bio)
3684                                 r10_bio->devs[i].repl_bio->bi_end_io = NULL;
3685
3686                         bio = r10_bio->devs[i].bio;
3687                         bio->bi_status = BLK_STS_IOERR;
3688                         rcu_read_lock();
3689                         rdev = rcu_dereference(conf->mirrors[d].rdev);
3690                         if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
3691                                 rcu_read_unlock();
3692                                 continue;
3693                         }
3694                         sector = r10_bio->devs[i].addr;
3695                         if (is_badblock(rdev, sector, max_sync,
3696                                         &first_bad, &bad_sectors)) {
3697                                 if (first_bad > sector)
3698                                         max_sync = first_bad - sector;
3699                                 else {
3700                                         bad_sectors -= (sector - first_bad);
3701                                         if (max_sync > bad_sectors)
3702                                                 max_sync = bad_sectors;
3703                                         rcu_read_unlock();
3704                                         continue;
3705                                 }
3706                         }
3707                         atomic_inc(&rdev->nr_pending);
3708                         atomic_inc(&r10_bio->remaining);
3709                         bio->bi_next = biolist;
3710                         biolist = bio;
3711                         bio->bi_end_io = end_sync_read;
3712                         bio_set_op_attrs(bio, REQ_OP_READ, 0);
3713                         if (test_bit(FailFast, &rdev->flags))
3714                                 bio->bi_opf |= MD_FAILFAST;
3715                         bio->bi_iter.bi_sector = sector + rdev->data_offset;
3716                         bio_set_dev(bio, rdev->bdev);
3717                         count++;
3718
3719                         rdev = rcu_dereference(conf->mirrors[d].replacement);
3720                         if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
3721                                 rcu_read_unlock();
3722                                 continue;
3723                         }
3724                         atomic_inc(&rdev->nr_pending);
3725
3726                         /* Need to set up for writing to the replacement */
3727                         bio = r10_bio->devs[i].repl_bio;
3728                         bio->bi_status = BLK_STS_IOERR;
3729
3730                         sector = r10_bio->devs[i].addr;
3731                         bio->bi_next = biolist;
3732                         biolist = bio;
3733                         bio->bi_end_io = end_sync_write;
3734                         bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
3735                         if (test_bit(FailFast, &rdev->flags))
3736                                 bio->bi_opf |= MD_FAILFAST;
3737                         bio->bi_iter.bi_sector = sector + rdev->data_offset;
3738                         bio_set_dev(bio, rdev->bdev);
3739                         count++;
3740                         rcu_read_unlock();
3741                 }
3742
3743                 if (count < 2) {
3744                         for (i=0; i<conf->copies; i++) {
3745                                 int d = r10_bio->devs[i].devnum;
3746                                 if (r10_bio->devs[i].bio->bi_end_io)
3747                                         rdev_dec_pending(conf->mirrors[d].rdev,
3748                                                          mddev);
3749                                 if (r10_bio->devs[i].repl_bio &&
3750                                     r10_bio->devs[i].repl_bio->bi_end_io)
3751                                         rdev_dec_pending(
3752                                                 conf->mirrors[d].replacement,
3753                                                 mddev);
3754                         }
3755                         put_buf(r10_bio);
3756                         biolist = NULL;
3757                         goto giveup;
3758                 }
3759         }
3760
3761         nr_sectors = 0;
3762         if (sector_nr + max_sync < max_sector)
3763                 max_sector = sector_nr + max_sync;
3764         do {
3765                 struct page *page;
3766                 int len = PAGE_SIZE;
3767                 if (sector_nr + (len>>9) > max_sector)
3768                         len = (max_sector - sector_nr) << 9;
3769                 if (len == 0)
3770                         break;
3771                 for (bio= biolist ; bio ; bio=bio->bi_next) {
3772                         struct resync_pages *rp = get_resync_pages(bio);
3773                         page = resync_fetch_page(rp, page_idx);
3774                         /*
3775                          * won't fail because the vec table is big enough
3776                          * to hold all these pages
3777                          */
3778                         bio_add_page(bio, page, len, 0);
3779                 }
3780                 nr_sectors += len>>9;
3781                 sector_nr += len>>9;
3782         } while (++page_idx < RESYNC_PAGES);
3783         r10_bio->sectors = nr_sectors;
3784
3785         if (mddev_is_clustered(mddev) &&
3786             test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3787                 /* It is resync not recovery */
3788                 if (conf->cluster_sync_high < sector_nr + nr_sectors) {
3789                         conf->cluster_sync_low = mddev->curr_resync_completed;
3790                         raid10_set_cluster_sync_high(conf);
3791                         /* Send resync message */
3792                         md_cluster_ops->resync_info_update(mddev,
3793                                                 conf->cluster_sync_low,
3794                                                 conf->cluster_sync_high);
3795                 }
3796         } else if (mddev_is_clustered(mddev)) {
3797                 /* This is recovery not resync */
3798                 sector_t sect_va1, sect_va2;
3799                 bool broadcast_msg = false;
3800
3801                 for (i = 0; i < conf->geo.raid_disks; i++) {
3802                         /*
3803                          * sector_nr is a device address for recovery, so we
3804                          * need translate it to array address before compare
3805                          * with cluster_sync_high.
3806                          */
3807                         sect_va1 = raid10_find_virt(conf, sector_nr, i);
3808
3809                         if (conf->cluster_sync_high < sect_va1 + nr_sectors) {
3810                                 broadcast_msg = true;
3811                                 /*
3812                                  * curr_resync_completed is similar as
3813                                  * sector_nr, so make the translation too.
3814                                  */
3815                                 sect_va2 = raid10_find_virt(conf,
3816                                         mddev->curr_resync_completed, i);
3817
3818                                 if (conf->cluster_sync_low == 0 ||
3819                                     conf->cluster_sync_low > sect_va2)
3820                                         conf->cluster_sync_low = sect_va2;
3821                         }
3822                 }
3823                 if (broadcast_msg) {
3824                         raid10_set_cluster_sync_high(conf);
3825                         md_cluster_ops->resync_info_update(mddev,
3826                                                 conf->cluster_sync_low,
3827                                                 conf->cluster_sync_high);
3828                 }
3829         }
3830
3831         while (biolist) {
3832                 bio = biolist;
3833                 biolist = biolist->bi_next;
3834
3835                 bio->bi_next = NULL;
3836                 r10_bio = get_resync_r10bio(bio);
3837                 r10_bio->sectors = nr_sectors;
3838
3839                 if (bio->bi_end_io == end_sync_read) {
3840                         md_sync_acct_bio(bio, nr_sectors);
3841                         bio->bi_status = 0;
3842                         submit_bio_noacct(bio);
3843                 }
3844         }
3845
3846         if (sectors_skipped)
3847                 /* pretend they weren't skipped, it makes
3848                  * no important difference in this case
3849                  */
3850                 md_done_sync(mddev, sectors_skipped, 1);
3851
3852         return sectors_skipped + nr_sectors;
3853  giveup:
3854         /* There is nowhere to write, so all non-sync
3855          * drives must be failed or in resync, all drives
3856          * have a bad block, so try the next chunk...
3857          */
3858         if (sector_nr + max_sync < max_sector)
3859                 max_sector = sector_nr + max_sync;
3860
3861         sectors_skipped += (max_sector - sector_nr);
3862         chunks_skipped ++;
3863         sector_nr = max_sector;
3864         goto skipped;
3865 }
3866
3867 static sector_t
3868 raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks)
3869 {
3870         sector_t size;
3871         struct r10conf *conf = mddev->private;
3872
3873         if (!raid_disks)
3874                 raid_disks = min(conf->geo.raid_disks,
3875                                  conf->prev.raid_disks);
3876         if (!sectors)
3877                 sectors = conf->dev_sectors;
3878
3879         size = sectors >> conf->geo.chunk_shift;
3880         sector_div(size, conf->geo.far_copies);
3881         size = size * raid_disks;
3882         sector_div(size, conf->geo.near_copies);
3883
3884         return size << conf->geo.chunk_shift;
3885 }
3886
3887 static void calc_sectors(struct r10conf *conf, sector_t size)
3888 {
3889         /* Calculate the number of sectors-per-device that will
3890          * actually be used, and set conf->dev_sectors and
3891          * conf->stride
3892          */
3893
3894         size = size >> conf->geo.chunk_shift;
3895         sector_div(size, conf->geo.far_copies);
3896         size = size * conf->geo.raid_disks;
3897         sector_div(size, conf->geo.near_copies);
3898         /* 'size' is now the number of chunks in the array */
3899         /* calculate "used chunks per device" */
3900         size = size * conf->copies;
3901
3902         /* We need to round up when dividing by raid_disks to
3903          * get the stride size.
3904          */
3905         size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks);
3906
3907         conf->dev_sectors = size << conf->geo.chunk_shift;
3908
3909         if (conf->geo.far_offset)
3910                 conf->geo.stride = 1 << conf->geo.chunk_shift;
3911         else {
3912                 sector_div(size, conf->geo.far_copies);
3913                 conf->geo.stride = size << conf->geo.chunk_shift;
3914         }
3915 }
3916
3917 enum geo_type {geo_new, geo_old, geo_start};
3918 static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
3919 {
3920         int nc, fc, fo;
3921         int layout, chunk, disks;
3922         switch (new) {
3923         case geo_old:
3924                 layout = mddev->layout;
3925                 chunk = mddev->chunk_sectors;
3926                 disks = mddev->raid_disks - mddev->delta_disks;
3927                 break;
3928         case geo_new:
3929                 layout = mddev->new_layout;
3930                 chunk = mddev->new_chunk_sectors;
3931                 disks = mddev->raid_disks;
3932                 break;
3933         default: /* avoid 'may be unused' warnings */
3934         case geo_start: /* new when starting reshape - raid_disks not
3935                          * updated yet. */
3936                 layout = mddev->new_layout;
3937                 chunk = mddev->new_chunk_sectors;
3938                 disks = mddev->raid_disks + mddev->delta_disks;
3939                 break;
3940         }
3941         if (layout >> 19)
3942                 return -1;
3943         if (chunk < (PAGE_SIZE >> 9) ||
3944             !is_power_of_2(chunk))
3945                 return -2;
3946         nc = layout & 255;
3947         fc = (layout >> 8) & 255;
3948         fo = layout & (1<<16);
3949         geo->raid_disks = disks;
3950         geo->near_copies = nc;
3951         geo->far_copies = fc;
3952         geo->far_offset = fo;
3953         switch (layout >> 17) {
3954         case 0: /* original layout.  simple but not always optimal */
3955                 geo->far_set_size = disks;
3956                 break;
3957         case 1: /* "improved" layout which was buggy.  Hopefully no-one is
3958                  * actually using this, but leave code here just in case.*/
3959                 geo->far_set_size = disks/fc;
3960                 WARN(geo->far_set_size < fc,
3961                      "This RAID10 layout does not provide data safety - please backup and create new array\n");
3962                 break;
3963         case 2: /* "improved" layout fixed to match documentation */
3964                 geo->far_set_size = fc * nc;
3965                 break;
3966         default: /* Not a valid layout */
3967                 return -1;
3968         }
3969         geo->chunk_mask = chunk - 1;
3970         geo->chunk_shift = ffz(~chunk);
3971         return nc*fc;
3972 }
3973
3974 static struct r10conf *setup_conf(struct mddev *mddev)
3975 {
3976         struct r10conf *conf = NULL;
3977         int err = -EINVAL;
3978         struct geom geo;
3979         int copies;
3980
3981         copies = setup_geo(&geo, mddev, geo_new);
3982
3983         if (copies == -2) {
3984                 pr_warn("md/raid10:%s: chunk size must be at least PAGE_SIZE(%ld) and be a power of 2.\n",
3985                         mdname(mddev), PAGE_SIZE);
3986                 goto out;
3987         }
3988
3989         if (copies < 2 || copies > mddev->raid_disks) {
3990                 pr_warn("md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
3991                         mdname(mddev), mddev->new_layout);
3992                 goto out;
3993         }
3994
3995         err = -ENOMEM;
3996         conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL);
3997         if (!conf)
3998                 goto out;
3999
4000         /* FIXME calc properly */
4001         conf->mirrors = kcalloc(mddev->raid_disks + max(0, -mddev->delta_disks),
4002                                 sizeof(struct raid10_info),
4003                                 GFP_KERNEL);
4004         if (!conf->mirrors)
4005                 goto out;
4006
4007         conf->tmppage = alloc_page(GFP_KERNEL);
4008         if (!conf->tmppage)
4009                 goto out;
4010
4011         conf->geo = geo;
4012         conf->copies = copies;
4013         err = mempool_init(&conf->r10bio_pool, NR_RAID_BIOS, r10bio_pool_alloc,
4014                            rbio_pool_free, conf);
4015         if (err)
4016                 goto out;
4017
4018         err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
4019         if (err)
4020                 goto out;
4021
4022         calc_sectors(conf, mddev->dev_sectors);
4023         if (mddev->reshape_position == MaxSector) {
4024                 conf->prev = conf->geo;
4025                 conf->reshape_progress = MaxSector;
4026         } else {
4027                 if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) {
4028                         err = -EINVAL;
4029                         goto out;
4030                 }
4031                 conf->reshape_progress = mddev->reshape_position;
4032                 if (conf->prev.far_offset)
4033                         conf->prev.stride = 1 << conf->prev.chunk_shift;
4034                 else
4035                         /* far_copies must be 1 */
4036                         conf->prev.stride = conf->dev_sectors;
4037         }
4038         conf->reshape_safe = conf->reshape_progress;
4039         spin_lock_init(&conf->device_lock);
4040         INIT_LIST_HEAD(&conf->retry_list);
4041         INIT_LIST_HEAD(&conf->bio_end_io_list);
4042
4043         spin_lock_init(&conf->resync_lock);
4044         init_waitqueue_head(&conf->wait_barrier);
4045         atomic_set(&conf->nr_pending, 0);
4046
4047         err = -ENOMEM;
4048         conf->thread = md_register_thread(raid10d, mddev, "raid10");
4049         if (!conf->thread)
4050                 goto out;
4051
4052         conf->mddev = mddev;
4053         return conf;
4054
4055  out:
4056         if (conf) {
4057                 mempool_exit(&conf->r10bio_pool);
4058                 kfree(conf->mirrors);
4059                 safe_put_page(conf->tmppage);
4060                 bioset_exit(&conf->bio_split);
4061                 kfree(conf);
4062         }
4063         return ERR_PTR(err);
4064 }
4065
4066 static void raid10_set_io_opt(struct r10conf *conf)
4067 {
4068         int raid_disks = conf->geo.raid_disks;
4069
4070         if (!(conf->geo.raid_disks % conf->geo.near_copies))
4071                 raid_disks /= conf->geo.near_copies;
4072         blk_queue_io_opt(conf->mddev->queue, (conf->mddev->chunk_sectors << 9) *
4073                          raid_disks);
4074 }
4075
4076 static int raid10_run(struct mddev *mddev)
4077 {
4078         struct r10conf *conf;
4079         int i, disk_idx;
4080         struct raid10_info *disk;
4081         struct md_rdev *rdev;
4082         sector_t size;
4083         sector_t min_offset_diff = 0;
4084         int first = 1;
4085
4086         if (mddev_init_writes_pending(mddev) < 0)
4087                 return -ENOMEM;
4088
4089         if (mddev->private == NULL) {
4090                 conf = setup_conf(mddev);
4091                 if (IS_ERR(conf))
4092                         return PTR_ERR(conf);
4093                 mddev->private = conf;
4094         }
4095         conf = mddev->private;
4096         if (!conf)
4097                 goto out;
4098
4099         if (mddev_is_clustered(conf->mddev)) {
4100                 int fc, fo;
4101
4102                 fc = (mddev->layout >> 8) & 255;
4103                 fo = mddev->layout & (1<<16);
4104                 if (fc > 1 || fo > 0) {
4105                         pr_err("only near layout is supported by clustered"
4106                                 " raid10\n");
4107                         goto out_free_conf;
4108                 }
4109         }
4110
4111         mddev->thread = conf->thread;
4112         conf->thread = NULL;
4113
4114         if (mddev->queue) {
4115                 blk_queue_max_discard_sectors(mddev->queue,
4116                                               UINT_MAX);
4117                 blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
4118                 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
4119                 raid10_set_io_opt(conf);
4120         }
4121
4122         rdev_for_each(rdev, mddev) {
4123                 long long diff;
4124
4125                 disk_idx = rdev->raid_disk;
4126                 if (disk_idx < 0)
4127                         continue;
4128                 if (disk_idx >= conf->geo.raid_disks &&
4129                     disk_idx >= conf->prev.raid_disks)
4130                         continue;
4131                 disk = conf->mirrors + disk_idx;
4132
4133                 if (test_bit(Replacement, &rdev->flags)) {
4134                         if (disk->replacement)
4135                                 goto out_free_conf;
4136                         disk->replacement = rdev;
4137                 } else {
4138                         if (disk->rdev)
4139                                 goto out_free_conf;
4140                         disk->rdev = rdev;
4141                 }
4142                 diff = (rdev->new_data_offset - rdev->data_offset);
4143                 if (!mddev->reshape_backwards)
4144                         diff = -diff;
4145                 if (diff < 0)
4146                         diff = 0;
4147                 if (first || diff < min_offset_diff)
4148                         min_offset_diff = diff;
4149
4150                 if (mddev->gendisk)
4151                         disk_stack_limits(mddev->gendisk, rdev->bdev,
4152                                           rdev->data_offset << 9);
4153
4154                 disk->head_position = 0;
4155                 first = 0;
4156         }
4157
4158         /* need to check that every block has at least one working mirror */
4159         if (!enough(conf, -1)) {
4160                 pr_err("md/raid10:%s: not enough operational mirrors.\n",
4161                        mdname(mddev));
4162                 goto out_free_conf;
4163         }
4164
4165         if (conf->reshape_progress != MaxSector) {
4166                 /* must ensure that shape change is supported */
4167                 if (conf->geo.far_copies != 1 &&
4168                     conf->geo.far_offset == 0)
4169                         goto out_free_conf;
4170                 if (conf->prev.far_copies != 1 &&
4171                     conf->prev.far_offset == 0)
4172                         goto out_free_conf;
4173         }
4174
4175         mddev->degraded = 0;
4176         for (i = 0;
4177              i < conf->geo.raid_disks
4178                      || i < conf->prev.raid_disks;
4179              i++) {
4180
4181                 disk = conf->mirrors + i;
4182
4183                 if (!disk->rdev && disk->replacement) {
4184                         /* The replacement is all we have - use it */
4185                         disk->rdev = disk->replacement;
4186                         disk->replacement = NULL;
4187                         clear_bit(Replacement, &disk->rdev->flags);
4188                 }
4189
4190                 if (!disk->rdev ||
4191                     !test_bit(In_sync, &disk->rdev->flags)) {
4192                         disk->head_position = 0;
4193                         mddev->degraded++;
4194                         if (disk->rdev &&
4195                             disk->rdev->saved_raid_disk < 0)
4196                                 conf->fullsync = 1;
4197                 }
4198
4199                 if (disk->replacement &&
4200                     !test_bit(In_sync, &disk->replacement->flags) &&
4201                     disk->replacement->saved_raid_disk < 0) {
4202                         conf->fullsync = 1;
4203                 }
4204
4205                 disk->recovery_disabled = mddev->recovery_disabled - 1;
4206         }
4207
4208         if (mddev->recovery_cp != MaxSector)
4209                 pr_notice("md/raid10:%s: not clean -- starting background reconstruction\n",
4210                           mdname(mddev));
4211         pr_info("md/raid10:%s: active with %d out of %d devices\n",
4212                 mdname(mddev), conf->geo.raid_disks - mddev->degraded,
4213                 conf->geo.raid_disks);
4214         /*
4215          * Ok, everything is just fine now
4216          */
4217         mddev->dev_sectors = conf->dev_sectors;
4218         size = raid10_size(mddev, 0, 0);
4219         md_set_array_sectors(mddev, size);
4220         mddev->resync_max_sectors = size;
4221         set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
4222
4223         if (md_integrity_register(mddev))
4224                 goto out_free_conf;
4225
4226         if (conf->reshape_progress != MaxSector) {
4227                 unsigned long before_length, after_length;
4228
4229                 before_length = ((1 << conf->prev.chunk_shift) *
4230                                  conf->prev.far_copies);
4231                 after_length = ((1 << conf->geo.chunk_shift) *
4232                                 conf->geo.far_copies);
4233
4234                 if (max(before_length, after_length) > min_offset_diff) {
4235                         /* This cannot work */
4236                         pr_warn("md/raid10: offset difference not enough to continue reshape\n");
4237                         goto out_free_conf;
4238                 }
4239                 conf->offset_diff = min_offset_diff;
4240
4241                 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4242                 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4243                 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4244                 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4245                 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
4246                                                         "reshape");
4247                 if (!mddev->sync_thread)
4248                         goto out_free_conf;
4249         }
4250
4251         return 0;
4252
4253 out_free_conf:
4254         md_unregister_thread(&mddev->thread);
4255         mempool_exit(&conf->r10bio_pool);
4256         safe_put_page(conf->tmppage);
4257         kfree(conf->mirrors);
4258         kfree(conf);
4259         mddev->private = NULL;
4260 out:
4261         return -EIO;
4262 }
4263
4264 static void raid10_free(struct mddev *mddev, void *priv)
4265 {
4266         struct r10conf *conf = priv;
4267
4268         mempool_exit(&conf->r10bio_pool);
4269         safe_put_page(conf->tmppage);
4270         kfree(conf->mirrors);
4271         kfree(conf->mirrors_old);
4272         kfree(conf->mirrors_new);
4273         bioset_exit(&conf->bio_split);
4274         kfree(conf);
4275 }
4276
4277 static void raid10_quiesce(struct mddev *mddev, int quiesce)
4278 {
4279         struct r10conf *conf = mddev->private;
4280
4281         if (quiesce)
4282                 raise_barrier(conf, 0);
4283         else
4284                 lower_barrier(conf);
4285 }
4286
4287 static int raid10_resize(struct mddev *mddev, sector_t sectors)
4288 {
4289         /* Resize of 'far' arrays is not supported.
4290          * For 'near' and 'offset' arrays we can set the
4291          * number of sectors used to be an appropriate multiple
4292          * of the chunk size.
4293          * For 'offset', this is far_copies*chunksize.
4294          * For 'near' the multiplier is the LCM of
4295          * near_copies and raid_disks.
4296          * So if far_copies > 1 && !far_offset, fail.
4297          * Else find LCM(raid_disks, near_copy)*far_copies and
4298          * multiply by chunk_size.  Then round to this number.
4299          * This is mostly done by raid10_size()
4300          */
4301         struct r10conf *conf = mddev->private;
4302         sector_t oldsize, size;
4303
4304         if (mddev->reshape_position != MaxSector)
4305                 return -EBUSY;
4306
4307         if (conf->geo.far_copies > 1 && !conf->geo.far_offset)
4308                 return -EINVAL;
4309
4310         oldsize = raid10_size(mddev, 0, 0);
4311         size = raid10_size(mddev, sectors, 0);
4312         if (mddev->external_size &&
4313             mddev->array_sectors > size)
4314                 return -EINVAL;
4315         if (mddev->bitmap) {
4316                 int ret = md_bitmap_resize(mddev->bitmap, size, 0, 0);
4317                 if (ret)
4318                         return ret;
4319         }
4320         md_set_array_sectors(mddev, size);
4321         if (sectors > mddev->dev_sectors &&
4322             mddev->recovery_cp > oldsize) {
4323                 mddev->recovery_cp = oldsize;
4324                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4325         }
4326         calc_sectors(conf, sectors);
4327         mddev->dev_sectors = conf->dev_sectors;
4328         mddev->resync_max_sectors = size;
4329         return 0;
4330 }
4331
4332 static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
4333 {
4334         struct md_rdev *rdev;
4335         struct r10conf *conf;
4336
4337         if (mddev->degraded > 0) {
4338                 pr_warn("md/raid10:%s: Error: degraded raid0!\n",
4339                         mdname(mddev));
4340                 return ERR_PTR(-EINVAL);
4341         }
4342         sector_div(size, devs);
4343
4344         /* Set new parameters */
4345         mddev->new_level = 10;
4346         /* new layout: far_copies = 1, near_copies = 2 */
4347         mddev->new_layout = (1<<8) + 2;
4348         mddev->new_chunk_sectors = mddev->chunk_sectors;
4349         mddev->delta_disks = mddev->raid_disks;
4350         mddev->raid_disks *= 2;
4351         /* make sure it will be not marked as dirty */
4352         mddev->recovery_cp = MaxSector;
4353         mddev->dev_sectors = size;
4354
4355         conf = setup_conf(mddev);
4356         if (!IS_ERR(conf)) {
4357                 rdev_for_each(rdev, mddev)
4358                         if (rdev->raid_disk >= 0) {
4359                                 rdev->new_raid_disk = rdev->raid_disk * 2;
4360                                 rdev->sectors = size;
4361                         }
4362                 conf->barrier = 1;
4363         }
4364
4365         return conf;
4366 }
4367
4368 static void *raid10_takeover(struct mddev *mddev)
4369 {
4370         struct r0conf *raid0_conf;
4371
4372         /* raid10 can take over:
4373          *  raid0 - providing it has only two drives
4374          */
4375         if (mddev->level == 0) {
4376                 /* for raid0 takeover only one zone is supported */
4377                 raid0_conf = mddev->private;
4378                 if (raid0_conf->nr_strip_zones > 1) {
4379                         pr_warn("md/raid10:%s: cannot takeover raid 0 with more than one zone.\n",
4380                                 mdname(mddev));
4381                         return ERR_PTR(-EINVAL);
4382                 }
4383                 return raid10_takeover_raid0(mddev,
4384                         raid0_conf->strip_zone->zone_end,
4385                         raid0_conf->strip_zone->nb_dev);
4386         }
4387         return ERR_PTR(-EINVAL);
4388 }
4389
4390 static int raid10_check_reshape(struct mddev *mddev)
4391 {
4392         /* Called when there is a request to change
4393          * - layout (to ->new_layout)
4394          * - chunk size (to ->new_chunk_sectors)
4395          * - raid_disks (by delta_disks)
4396          * or when trying to restart a reshape that was ongoing.
4397          *
4398          * We need to validate the request and possibly allocate
4399          * space if that might be an issue later.
4400          *
4401          * Currently we reject any reshape of a 'far' mode array,
4402          * allow chunk size to change if new is generally acceptable,
4403          * allow raid_disks to increase, and allow
4404          * a switch between 'near' mode and 'offset' mode.
4405          */
4406         struct r10conf *conf = mddev->private;
4407         struct geom geo;
4408
4409         if (conf->geo.far_copies != 1 && !conf->geo.far_offset)
4410                 return -EINVAL;
4411
4412         if (setup_geo(&geo, mddev, geo_start) != conf->copies)
4413                 /* mustn't change number of copies */
4414                 return -EINVAL;
4415         if (geo.far_copies > 1 && !geo.far_offset)
4416                 /* Cannot switch to 'far' mode */
4417                 return -EINVAL;
4418
4419         if (mddev->array_sectors & geo.chunk_mask)
4420                         /* not factor of array size */
4421                         return -EINVAL;
4422
4423         if (!enough(conf, -1))
4424                 return -EINVAL;
4425
4426         kfree(conf->mirrors_new);
4427         conf->mirrors_new = NULL;
4428         if (mddev->delta_disks > 0) {
4429                 /* allocate new 'mirrors' list */
4430                 conf->mirrors_new =
4431                         kcalloc(mddev->raid_disks + mddev->delta_disks,
4432                                 sizeof(struct raid10_info),
4433                                 GFP_KERNEL);
4434                 if (!conf->mirrors_new)
4435                         return -ENOMEM;
4436         }
4437         return 0;
4438 }
4439
4440 /*
4441  * Need to check if array has failed when deciding whether to:
4442  *  - start an array
4443  *  - remove non-faulty devices
4444  *  - add a spare
4445  *  - allow a reshape
4446  * This determination is simple when no reshape is happening.
4447  * However if there is a reshape, we need to carefully check
4448  * both the before and after sections.
4449  * This is because some failed devices may only affect one
4450  * of the two sections, and some non-in_sync devices may
4451  * be insync in the section most affected by failed devices.
4452  */
4453 static int calc_degraded(struct r10conf *conf)
4454 {
4455         int degraded, degraded2;
4456         int i;
4457
4458         rcu_read_lock();
4459         degraded = 0;
4460         /* 'prev' section first */
4461         for (i = 0; i < conf->prev.raid_disks; i++) {
4462                 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
4463                 if (!rdev || test_bit(Faulty, &rdev->flags))
4464                         degraded++;
4465                 else if (!test_bit(In_sync, &rdev->flags))
4466                         /* When we can reduce the number of devices in
4467                          * an array, this might not contribute to
4468                          * 'degraded'.  It does now.
4469                          */
4470                         degraded++;
4471         }
4472         rcu_read_unlock();
4473         if (conf->geo.raid_disks == conf->prev.raid_disks)
4474                 return degraded;
4475         rcu_read_lock();
4476         degraded2 = 0;
4477         for (i = 0; i < conf->geo.raid_disks; i++) {
4478                 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
4479                 if (!rdev || test_bit(Faulty, &rdev->flags))
4480                         degraded2++;
4481                 else if (!test_bit(In_sync, &rdev->flags)) {
4482                         /* If reshape is increasing the number of devices,
4483                          * this section has already been recovered, so
4484                          * it doesn't contribute to degraded.
4485                          * else it does.
4486                          */
4487                         if (conf->geo.raid_disks <= conf->prev.raid_disks)
4488                                 degraded2++;
4489                 }
4490         }
4491         rcu_read_unlock();
4492         if (degraded2 > degraded)
4493                 return degraded2;
4494         return degraded;
4495 }
4496
4497 static int raid10_start_reshape(struct mddev *mddev)
4498 {
4499         /* A 'reshape' has been requested. This commits
4500          * the various 'new' fields and sets MD_RECOVER_RESHAPE
4501          * This also checks if there are enough spares and adds them
4502          * to the array.
4503          * We currently require enough spares to make the final
4504          * array non-degraded.  We also require that the difference
4505          * between old and new data_offset - on each device - is
4506          * enough that we never risk over-writing.
4507          */
4508
4509         unsigned long before_length, after_length;
4510         sector_t min_offset_diff = 0;
4511         int first = 1;
4512         struct geom new;
4513         struct r10conf *conf = mddev->private;
4514         struct md_rdev *rdev;
4515         int spares = 0;
4516         int ret;
4517
4518         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4519                 return -EBUSY;
4520
4521         if (setup_geo(&new, mddev, geo_start) != conf->copies)
4522                 return -EINVAL;
4523
4524         before_length = ((1 << conf->prev.chunk_shift) *
4525                          conf->prev.far_copies);
4526         after_length = ((1 << conf->geo.chunk_shift) *
4527                         conf->geo.far_copies);
4528
4529         rdev_for_each(rdev, mddev) {
4530                 if (!test_bit(In_sync, &rdev->flags)
4531                     && !test_bit(Faulty, &rdev->flags))
4532                         spares++;
4533                 if (rdev->raid_disk >= 0) {
4534                         long long diff = (rdev->new_data_offset
4535                                           - rdev->data_offset);
4536                         if (!mddev->reshape_backwards)
4537                                 diff = -diff;
4538                         if (diff < 0)
4539                                 diff = 0;
4540                         if (first || diff < min_offset_diff)
4541                                 min_offset_diff = diff;
4542                         first = 0;
4543                 }
4544         }
4545
4546         if (max(before_length, after_length) > min_offset_diff)
4547                 return -EINVAL;
4548
4549         if (spares < mddev->delta_disks)
4550                 return -EINVAL;
4551
4552         conf->offset_diff = min_offset_diff;
4553         spin_lock_irq(&conf->device_lock);
4554         if (conf->mirrors_new) {
4555                 memcpy(conf->mirrors_new, conf->mirrors,
4556                        sizeof(struct raid10_info)*conf->prev.raid_disks);
4557                 smp_mb();
4558                 kfree(conf->mirrors_old);
4559                 conf->mirrors_old = conf->mirrors;
4560                 conf->mirrors = conf->mirrors_new;
4561                 conf->mirrors_new = NULL;
4562         }
4563         setup_geo(&conf->geo, mddev, geo_start);
4564         smp_mb();
4565         if (mddev->reshape_backwards) {
4566                 sector_t size = raid10_size(mddev, 0, 0);
4567                 if (size < mddev->array_sectors) {
4568                         spin_unlock_irq(&conf->device_lock);
4569                         pr_warn("md/raid10:%s: array size must be reduce before number of disks\n",
4570                                 mdname(mddev));
4571                         return -EINVAL;
4572                 }
4573                 mddev->resync_max_sectors = size;
4574                 conf->reshape_progress = size;
4575         } else
4576                 conf->reshape_progress = 0;
4577         conf->reshape_safe = conf->reshape_progress;
4578         spin_unlock_irq(&conf->device_lock);
4579
4580         if (mddev->delta_disks && mddev->bitmap) {
4581                 struct mdp_superblock_1 *sb = NULL;
4582                 sector_t oldsize, newsize;
4583
4584                 oldsize = raid10_size(mddev, 0, 0);
4585                 newsize = raid10_size(mddev, 0, conf->geo.raid_disks);
4586
4587                 if (!mddev_is_clustered(mddev)) {
4588                         ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
4589                         if (ret)
4590                                 goto abort;
4591                         else
4592                                 goto out;
4593                 }
4594
4595                 rdev_for_each(rdev, mddev) {
4596                         if (rdev->raid_disk > -1 &&
4597                             !test_bit(Faulty, &rdev->flags))
4598                                 sb = page_address(rdev->sb_page);
4599                 }
4600
4601                 /*
4602                  * some node is already performing reshape, and no need to
4603                  * call md_bitmap_resize again since it should be called when
4604                  * receiving BITMAP_RESIZE msg
4605                  */
4606                 if ((sb && (le32_to_cpu(sb->feature_map) &
4607                             MD_FEATURE_RESHAPE_ACTIVE)) || (oldsize == newsize))
4608                         goto out;
4609
4610                 ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
4611                 if (ret)
4612                         goto abort;
4613
4614                 ret = md_cluster_ops->resize_bitmaps(mddev, newsize, oldsize);
4615                 if (ret) {
4616                         md_bitmap_resize(mddev->bitmap, oldsize, 0, 0);
4617                         goto abort;
4618                 }
4619         }
4620 out:
4621         if (mddev->delta_disks > 0) {
4622                 rdev_for_each(rdev, mddev)
4623                         if (rdev->raid_disk < 0 &&
4624                             !test_bit(Faulty, &rdev->flags)) {
4625                                 if (raid10_add_disk(mddev, rdev) == 0) {
4626                                         if (rdev->raid_disk >=
4627                                             conf->prev.raid_disks)
4628                                                 set_bit(In_sync, &rdev->flags);
4629                                         else
4630                                                 rdev->recovery_offset = 0;
4631
4632                                         /* Failure here is OK */
4633                                         sysfs_link_rdev(mddev, rdev);
4634                                 }
4635                         } else if (rdev->raid_disk >= conf->prev.raid_disks
4636                                    && !test_bit(Faulty, &rdev->flags)) {
4637                                 /* This is a spare that was manually added */
4638                                 set_bit(In_sync, &rdev->flags);
4639                         }
4640         }
4641         /* When a reshape changes the number of devices,
4642          * ->degraded is measured against the larger of the
4643          * pre and  post numbers.
4644          */
4645         spin_lock_irq(&conf->device_lock);
4646         mddev->degraded = calc_degraded(conf);
4647         spin_unlock_irq(&conf->device_lock);
4648         mddev->raid_disks = conf->geo.raid_disks;
4649         mddev->reshape_position = conf->reshape_progress;
4650         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4651
4652         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4653         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4654         clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
4655         set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4656         set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4657
4658         mddev->sync_thread = md_register_thread(md_do_sync, mddev,
4659                                                 "reshape");
4660         if (!mddev->sync_thread) {
4661                 ret = -EAGAIN;
4662                 goto abort;
4663         }
4664         conf->reshape_checkpoint = jiffies;
4665         md_wakeup_thread(mddev->sync_thread);
4666         md_new_event();
4667         return 0;
4668
4669 abort:
4670         mddev->recovery = 0;
4671         spin_lock_irq(&conf->device_lock);
4672         conf->geo = conf->prev;
4673         mddev->raid_disks = conf->geo.raid_disks;
4674         rdev_for_each(rdev, mddev)
4675                 rdev->new_data_offset = rdev->data_offset;
4676         smp_wmb();
4677         conf->reshape_progress = MaxSector;
4678         conf->reshape_safe = MaxSector;
4679         mddev->reshape_position = MaxSector;
4680         spin_unlock_irq(&conf->device_lock);
4681         return ret;
4682 }
4683
4684 /* Calculate the last device-address that could contain
4685  * any block from the chunk that includes the array-address 's'
4686  * and report the next address.
4687  * i.e. the address returned will be chunk-aligned and after
4688  * any data that is in the chunk containing 's'.
4689  */
4690 static sector_t last_dev_address(sector_t s, struct geom *geo)
4691 {
4692         s = (s | geo->chunk_mask) + 1;
4693         s >>= geo->chunk_shift;
4694         s *= geo->near_copies;
4695         s = DIV_ROUND_UP_SECTOR_T(s, geo->raid_disks);
4696         s *= geo->far_copies;
4697         s <<= geo->chunk_shift;
4698         return s;
4699 }
4700
4701 /* Calculate the first device-address that could contain
4702  * any block from the chunk that includes the array-address 's'.
4703  * This too will be the start of a chunk
4704  */
4705 static sector_t first_dev_address(sector_t s, struct geom *geo)
4706 {
4707         s >>= geo->chunk_shift;
4708         s *= geo->near_copies;
4709         sector_div(s, geo->raid_disks);
4710         s *= geo->far_copies;
4711         s <<= geo->chunk_shift;
4712         return s;
4713 }
4714
4715 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
4716                                 int *skipped)
4717 {
4718         /* We simply copy at most one chunk (smallest of old and new)
4719          * at a time, possibly less if that exceeds RESYNC_PAGES,
4720          * or we hit a bad block or something.
4721          * This might mean we pause for normal IO in the middle of
4722          * a chunk, but that is not a problem as mddev->reshape_position
4723          * can record any location.
4724          *
4725          * If we will want to write to a location that isn't
4726          * yet recorded as 'safe' (i.e. in metadata on disk) then
4727          * we need to flush all reshape requests and update the metadata.
4728          *
4729          * When reshaping forwards (e.g. to more devices), we interpret
4730          * 'safe' as the earliest block which might not have been copied
4731          * down yet.  We divide this by previous stripe size and multiply
4732          * by previous stripe length to get lowest device offset that we
4733          * cannot write to yet.
4734          * We interpret 'sector_nr' as an address that we want to write to.
4735          * From this we use last_device_address() to find where we might
4736          * write to, and first_device_address on the  'safe' position.
4737          * If this 'next' write position is after the 'safe' position,
4738          * we must update the metadata to increase the 'safe' position.
4739          *
4740          * When reshaping backwards, we round in the opposite direction
4741          * and perform the reverse test:  next write position must not be
4742          * less than current safe position.
4743          *
4744          * In all this the minimum difference in data offsets
4745          * (conf->offset_diff - always positive) allows a bit of slack,
4746          * so next can be after 'safe', but not by more than offset_diff
4747          *
4748          * We need to prepare all the bios here before we start any IO
4749          * to ensure the size we choose is acceptable to all devices.
4750          * The means one for each copy for write-out and an extra one for
4751          * read-in.
4752          * We store the read-in bio in ->master_bio and the others in
4753          * ->devs[x].bio and ->devs[x].repl_bio.
4754          */
4755         struct r10conf *conf = mddev->private;
4756         struct r10bio *r10_bio;
4757         sector_t next, safe, last;
4758         int max_sectors;
4759         int nr_sectors;
4760         int s;
4761         struct md_rdev *rdev;
4762         int need_flush = 0;
4763         struct bio *blist;
4764         struct bio *bio, *read_bio;
4765         int sectors_done = 0;
4766         struct page **pages;
4767
4768         if (sector_nr == 0) {
4769                 /* If restarting in the middle, skip the initial sectors */
4770                 if (mddev->reshape_backwards &&
4771                     conf->reshape_progress < raid10_size(mddev, 0, 0)) {
4772                         sector_nr = (raid10_size(mddev, 0, 0)
4773                                      - conf->reshape_progress);
4774                 } else if (!mddev->reshape_backwards &&
4775                            conf->reshape_progress > 0)
4776                         sector_nr = conf->reshape_progress;
4777                 if (sector_nr) {
4778                         mddev->curr_resync_completed = sector_nr;
4779                         sysfs_notify_dirent_safe(mddev->sysfs_completed);
4780                         *skipped = 1;
4781                         return sector_nr;
4782                 }
4783         }
4784
4785         /* We don't use sector_nr to track where we are up to
4786          * as that doesn't work well for ->reshape_backwards.
4787          * So just use ->reshape_progress.
4788          */
4789         if (mddev->reshape_backwards) {
4790                 /* 'next' is the earliest device address that we might
4791                  * write to for this chunk in the new layout
4792                  */
4793                 next = first_dev_address(conf->reshape_progress - 1,
4794                                          &conf->geo);
4795
4796                 /* 'safe' is the last device address that we might read from
4797                  * in the old layout after a restart
4798                  */
4799                 safe = last_dev_address(conf->reshape_safe - 1,
4800                                         &conf->prev);
4801
4802                 if (next + conf->offset_diff < safe)
4803                         need_flush = 1;
4804
4805                 last = conf->reshape_progress - 1;
4806                 sector_nr = last & ~(sector_t)(conf->geo.chunk_mask
4807                                                & conf->prev.chunk_mask);
4808                 if (sector_nr + RESYNC_SECTORS < last)
4809                         sector_nr = last + 1 - RESYNC_SECTORS;
4810         } else {
4811                 /* 'next' is after the last device address that we
4812                  * might write to for this chunk in the new layout
4813                  */
4814                 next = last_dev_address(conf->reshape_progress, &conf->geo);
4815
4816                 /* 'safe' is the earliest device address that we might
4817                  * read from in the old layout after a restart
4818                  */
4819                 safe = first_dev_address(conf->reshape_safe, &conf->prev);
4820
4821                 /* Need to update metadata if 'next' might be beyond 'safe'
4822                  * as that would possibly corrupt data
4823                  */
4824                 if (next > safe + conf->offset_diff)
4825                         need_flush = 1;
4826
4827                 sector_nr = conf->reshape_progress;
4828                 last  = sector_nr | (conf->geo.chunk_mask
4829                                      & conf->prev.chunk_mask);
4830
4831                 if (sector_nr + RESYNC_SECTORS <= last)
4832                         last = sector_nr + RESYNC_SECTORS - 1;
4833         }
4834
4835         if (need_flush ||
4836             time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
4837                 /* Need to update reshape_position in metadata */
4838                 wait_barrier(conf, false);
4839                 mddev->reshape_position = conf->reshape_progress;
4840                 if (mddev->reshape_backwards)
4841                         mddev->curr_resync_completed = raid10_size(mddev, 0, 0)
4842                                 - conf->reshape_progress;
4843                 else
4844                         mddev->curr_resync_completed = conf->reshape_progress;
4845                 conf->reshape_checkpoint = jiffies;
4846                 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4847                 md_wakeup_thread(mddev->thread);
4848                 wait_event(mddev->sb_wait, mddev->sb_flags == 0 ||
4849                            test_bit(MD_RECOVERY_INTR, &mddev->recovery));
4850                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
4851                         allow_barrier(conf);
4852                         return sectors_done;
4853                 }
4854                 conf->reshape_safe = mddev->reshape_position;
4855                 allow_barrier(conf);
4856         }
4857
4858         raise_barrier(conf, 0);
4859 read_more:
4860         /* Now schedule reads for blocks from sector_nr to last */
4861         r10_bio = raid10_alloc_init_r10buf(conf);
4862         r10_bio->state = 0;
4863         raise_barrier(conf, 1);
4864         atomic_set(&r10_bio->remaining, 0);
4865         r10_bio->mddev = mddev;
4866         r10_bio->sector = sector_nr;
4867         set_bit(R10BIO_IsReshape, &r10_bio->state);
4868         r10_bio->sectors = last - sector_nr + 1;
4869         rdev = read_balance(conf, r10_bio, &max_sectors);
4870         BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state));
4871
4872         if (!rdev) {
4873                 /* Cannot read from here, so need to record bad blocks
4874                  * on all the target devices.
4875                  */
4876                 // FIXME
4877                 mempool_free(r10_bio, &conf->r10buf_pool);
4878                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4879                 return sectors_done;
4880         }
4881
4882         read_bio = bio_alloc_bioset(rdev->bdev, RESYNC_PAGES, REQ_OP_READ,
4883                                     GFP_KERNEL, &mddev->bio_set);
4884         read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
4885                                + rdev->data_offset);
4886         read_bio->bi_private = r10_bio;
4887         read_bio->bi_end_io = end_reshape_read;
4888         r10_bio->master_bio = read_bio;
4889         r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
4890
4891         /*
4892          * Broadcast RESYNC message to other nodes, so all nodes would not
4893          * write to the region to avoid conflict.
4894         */
4895         if (mddev_is_clustered(mddev) && conf->cluster_sync_high <= sector_nr) {
4896                 struct mdp_superblock_1 *sb = NULL;
4897                 int sb_reshape_pos = 0;
4898
4899                 conf->cluster_sync_low = sector_nr;
4900                 conf->cluster_sync_high = sector_nr + CLUSTER_RESYNC_WINDOW_SECTORS;
4901                 sb = page_address(rdev->sb_page);
4902                 if (sb) {
4903                         sb_reshape_pos = le64_to_cpu(sb->reshape_position);
4904                         /*
4905                          * Set cluster_sync_low again if next address for array
4906                          * reshape is less than cluster_sync_low. Since we can't
4907                          * update cluster_sync_low until it has finished reshape.
4908                          */
4909                         if (sb_reshape_pos < conf->cluster_sync_low)
4910                                 conf->cluster_sync_low = sb_reshape_pos;
4911                 }
4912
4913                 md_cluster_ops->resync_info_update(mddev, conf->cluster_sync_low,
4914                                                           conf->cluster_sync_high);
4915         }
4916
4917         /* Now find the locations in the new layout */
4918         __raid10_find_phys(&conf->geo, r10_bio);
4919
4920         blist = read_bio;
4921         read_bio->bi_next = NULL;
4922
4923         rcu_read_lock();
4924         for (s = 0; s < conf->copies*2; s++) {
4925                 struct bio *b;
4926                 int d = r10_bio->devs[s/2].devnum;
4927                 struct md_rdev *rdev2;
4928                 if (s&1) {
4929                         rdev2 = rcu_dereference(conf->mirrors[d].replacement);
4930                         b = r10_bio->devs[s/2].repl_bio;
4931                 } else {
4932                         rdev2 = rcu_dereference(conf->mirrors[d].rdev);
4933                         b = r10_bio->devs[s/2].bio;
4934                 }
4935                 if (!rdev2 || test_bit(Faulty, &rdev2->flags))
4936                         continue;
4937
4938                 bio_set_dev(b, rdev2->bdev);
4939                 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
4940                         rdev2->new_data_offset;
4941                 b->bi_end_io = end_reshape_write;
4942                 bio_set_op_attrs(b, REQ_OP_WRITE, 0);
4943                 b->bi_next = blist;
4944                 blist = b;
4945         }
4946
4947         /* Now add as many pages as possible to all of these bios. */
4948
4949         nr_sectors = 0;
4950         pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
4951         for (s = 0 ; s < max_sectors; s += PAGE_SIZE >> 9) {
4952                 struct page *page = pages[s / (PAGE_SIZE >> 9)];
4953                 int len = (max_sectors - s) << 9;
4954                 if (len > PAGE_SIZE)
4955                         len = PAGE_SIZE;
4956                 for (bio = blist; bio ; bio = bio->bi_next) {
4957                         /*
4958                          * won't fail because the vec table is big enough
4959                          * to hold all these pages
4960                          */
4961                         bio_add_page(bio, page, len, 0);
4962                 }
4963                 sector_nr += len >> 9;
4964                 nr_sectors += len >> 9;
4965         }
4966         rcu_read_unlock();
4967         r10_bio->sectors = nr_sectors;
4968
4969         /* Now submit the read */
4970         md_sync_acct_bio(read_bio, r10_bio->sectors);
4971         atomic_inc(&r10_bio->remaining);
4972         read_bio->bi_next = NULL;
4973         submit_bio_noacct(read_bio);
4974         sectors_done += nr_sectors;
4975         if (sector_nr <= last)
4976                 goto read_more;
4977
4978         lower_barrier(conf);
4979
4980         /* Now that we have done the whole section we can
4981          * update reshape_progress
4982          */
4983         if (mddev->reshape_backwards)
4984                 conf->reshape_progress -= sectors_done;
4985         else
4986                 conf->reshape_progress += sectors_done;
4987
4988         return sectors_done;
4989 }
4990
4991 static void end_reshape_request(struct r10bio *r10_bio);
4992 static int handle_reshape_read_error(struct mddev *mddev,
4993                                      struct r10bio *r10_bio);
4994 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
4995 {
4996         /* Reshape read completed.  Hopefully we have a block
4997          * to write out.
4998          * If we got a read error then we do sync 1-page reads from
4999          * elsewhere until we find the data - or give up.
5000          */
5001         struct r10conf *conf = mddev->private;
5002         int s;
5003
5004         if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
5005                 if (handle_reshape_read_error(mddev, r10_bio) < 0) {
5006                         /* Reshape has been aborted */
5007                         md_done_sync(mddev, r10_bio->sectors, 0);
5008                         return;
5009                 }
5010
5011         /* We definitely have the data in the pages, schedule the
5012          * writes.
5013          */
5014         atomic_set(&r10_bio->remaining, 1);
5015         for (s = 0; s < conf->copies*2; s++) {
5016                 struct bio *b;
5017                 int d = r10_bio->devs[s/2].devnum;
5018                 struct md_rdev *rdev;
5019                 rcu_read_lock();
5020                 if (s&1) {
5021                         rdev = rcu_dereference(conf->mirrors[d].replacement);
5022                         b = r10_bio->devs[s/2].repl_bio;
5023                 } else {
5024                         rdev = rcu_dereference(conf->mirrors[d].rdev);
5025                         b = r10_bio->devs[s/2].bio;
5026                 }
5027                 if (!rdev || test_bit(Faulty, &rdev->flags)) {
5028                         rcu_read_unlock();
5029                         continue;
5030                 }
5031                 atomic_inc(&rdev->nr_pending);
5032                 rcu_read_unlock();
5033                 md_sync_acct_bio(b, r10_bio->sectors);
5034                 atomic_inc(&r10_bio->remaining);
5035                 b->bi_next = NULL;
5036                 submit_bio_noacct(b);
5037         }
5038         end_reshape_request(r10_bio);
5039 }
5040
5041 static void end_reshape(struct r10conf *conf)
5042 {
5043         if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery))
5044                 return;
5045
5046         spin_lock_irq(&conf->device_lock);
5047         conf->prev = conf->geo;
5048         md_finish_reshape(conf->mddev);
5049         smp_wmb();
5050         conf->reshape_progress = MaxSector;
5051         conf->reshape_safe = MaxSector;
5052         spin_unlock_irq(&conf->device_lock);
5053
5054         if (conf->mddev->queue)
5055                 raid10_set_io_opt(conf);
5056         conf->fullsync = 0;
5057 }
5058
5059 static void raid10_update_reshape_pos(struct mddev *mddev)
5060 {
5061         struct r10conf *conf = mddev->private;
5062         sector_t lo, hi;
5063
5064         md_cluster_ops->resync_info_get(mddev, &lo, &hi);
5065         if (((mddev->reshape_position <= hi) && (mddev->reshape_position >= lo))
5066             || mddev->reshape_position == MaxSector)
5067                 conf->reshape_progress = mddev->reshape_position;
5068         else
5069                 WARN_ON_ONCE(1);
5070 }
5071
5072 static int handle_reshape_read_error(struct mddev *mddev,
5073                                      struct r10bio *r10_bio)
5074 {
5075         /* Use sync reads to get the blocks from somewhere else */
5076         int sectors = r10_bio->sectors;
5077         struct r10conf *conf = mddev->private;
5078         struct r10bio *r10b;
5079         int slot = 0;
5080         int idx = 0;
5081         struct page **pages;
5082
5083         r10b = kmalloc(struct_size(r10b, devs, conf->copies), GFP_NOIO);
5084         if (!r10b) {
5085                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5086                 return -ENOMEM;
5087         }
5088
5089         /* reshape IOs share pages from .devs[0].bio */
5090         pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
5091
5092         r10b->sector = r10_bio->sector;
5093         __raid10_find_phys(&conf->prev, r10b);
5094
5095         while (sectors) {
5096                 int s = sectors;
5097                 int success = 0;
5098                 int first_slot = slot;
5099
5100                 if (s > (PAGE_SIZE >> 9))
5101                         s = PAGE_SIZE >> 9;
5102
5103                 rcu_read_lock();
5104                 while (!success) {
5105                         int d = r10b->devs[slot].devnum;
5106                         struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
5107                         sector_t addr;
5108                         if (rdev == NULL ||
5109                             test_bit(Faulty, &rdev->flags) ||
5110                             !test_bit(In_sync, &rdev->flags))
5111                                 goto failed;
5112
5113                         addr = r10b->devs[slot].addr + idx * PAGE_SIZE;
5114                         atomic_inc(&rdev->nr_pending);
5115                         rcu_read_unlock();
5116                         success = sync_page_io(rdev,
5117                                                addr,
5118                                                s << 9,
5119                                                pages[idx],
5120                                                REQ_OP_READ, 0, false);
5121                         rdev_dec_pending(rdev, mddev);
5122                         rcu_read_lock();
5123                         if (success)
5124                                 break;
5125                 failed:
5126                         slot++;
5127                         if (slot >= conf->copies)
5128                                 slot = 0;
5129                         if (slot == first_slot)
5130                                 break;
5131                 }
5132                 rcu_read_unlock();
5133                 if (!success) {
5134                         /* couldn't read this block, must give up */
5135                         set_bit(MD_RECOVERY_INTR,
5136                                 &mddev->recovery);
5137                         kfree(r10b);
5138                         return -EIO;
5139                 }
5140                 sectors -= s;
5141                 idx++;
5142         }
5143         kfree(r10b);
5144         return 0;
5145 }
5146
5147 static void end_reshape_write(struct bio *bio)
5148 {
5149         struct r10bio *r10_bio = get_resync_r10bio(bio);
5150         struct mddev *mddev = r10_bio->mddev;
5151         struct r10conf *conf = mddev->private;
5152         int d;
5153         int slot;
5154         int repl;
5155         struct md_rdev *rdev = NULL;
5156
5157         d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
5158         if (repl)
5159                 rdev = conf->mirrors[d].replacement;
5160         if (!rdev) {
5161                 smp_mb();
5162                 rdev = conf->mirrors[d].rdev;
5163         }
5164
5165         if (bio->bi_status) {
5166                 /* FIXME should record badblock */
5167                 md_error(mddev, rdev);
5168         }
5169
5170         rdev_dec_pending(rdev, mddev);
5171         end_reshape_request(r10_bio);
5172 }
5173
5174 static void end_reshape_request(struct r10bio *r10_bio)
5175 {
5176         if (!atomic_dec_and_test(&r10_bio->remaining))
5177                 return;
5178         md_done_sync(r10_bio->mddev, r10_bio->sectors, 1);
5179         bio_put(r10_bio->master_bio);
5180         put_buf(r10_bio);
5181 }
5182
5183 static void raid10_finish_reshape(struct mddev *mddev)
5184 {
5185         struct r10conf *conf = mddev->private;
5186
5187         if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5188                 return;
5189
5190         if (mddev->delta_disks > 0) {
5191                 if (mddev->recovery_cp > mddev->resync_max_sectors) {
5192                         mddev->recovery_cp = mddev->resync_max_sectors;
5193                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5194                 }
5195                 mddev->resync_max_sectors = mddev->array_sectors;
5196         } else {
5197                 int d;
5198                 rcu_read_lock();
5199                 for (d = conf->geo.raid_disks ;
5200                      d < conf->geo.raid_disks - mddev->delta_disks;
5201                      d++) {
5202                         struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
5203                         if (rdev)
5204                                 clear_bit(In_sync, &rdev->flags);
5205                         rdev = rcu_dereference(conf->mirrors[d].replacement);
5206                         if (rdev)
5207                                 clear_bit(In_sync, &rdev->flags);
5208                 }
5209                 rcu_read_unlock();
5210         }
5211         mddev->layout = mddev->new_layout;
5212         mddev->chunk_sectors = 1 << conf->geo.chunk_shift;
5213         mddev->reshape_position = MaxSector;
5214         mddev->delta_disks = 0;
5215         mddev->reshape_backwards = 0;
5216 }
5217
5218 static struct md_personality raid10_personality =
5219 {
5220         .name           = "raid10",
5221         .level          = 10,
5222         .owner          = THIS_MODULE,
5223         .make_request   = raid10_make_request,
5224         .run            = raid10_run,
5225         .free           = raid10_free,
5226         .status         = raid10_status,
5227         .error_handler  = raid10_error,
5228         .hot_add_disk   = raid10_add_disk,
5229         .hot_remove_disk= raid10_remove_disk,
5230         .spare_active   = raid10_spare_active,
5231         .sync_request   = raid10_sync_request,
5232         .quiesce        = raid10_quiesce,
5233         .size           = raid10_size,
5234         .resize         = raid10_resize,
5235         .takeover       = raid10_takeover,
5236         .check_reshape  = raid10_check_reshape,
5237         .start_reshape  = raid10_start_reshape,
5238         .finish_reshape = raid10_finish_reshape,
5239         .update_reshape_pos = raid10_update_reshape_pos,
5240 };
5241
5242 static int __init raid_init(void)
5243 {
5244         return register_md_personality(&raid10_personality);
5245 }
5246
5247 static void raid_exit(void)
5248 {
5249         unregister_md_personality(&raid10_personality);
5250 }
5251
5252 module_init(raid_init);
5253 module_exit(raid_exit);
5254 MODULE_LICENSE("GPL");
5255 MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
5256 MODULE_ALIAS("md-personality-9"); /* RAID10 */
5257 MODULE_ALIAS("md-raid10");
5258 MODULE_ALIAS("md-level-10");