2 * raid5.c : Multiple Devices driver for Linux
3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4 * Copyright (C) 1999, 2000 Ingo Molnar
5 * Copyright (C) 2002, 2003 H. Peter Anvin
7 * RAID-4/5/6 management functions.
8 * Thanks to Penguin Computing for making the RAID-6 development possible
9 * by donating a test server!
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 * The sequencing for updating the bitmap reliably is a little
25 * subtle (and I got it wrong the first time) so it deserves some
28 * We group bitmap updates into batches. Each batch has a number.
29 * We may write out several batches at once, but that isn't very important.
30 * conf->bm_write is the number of the last batch successfully written.
31 * conf->bm_flush is the number of the last batch that was closed to
33 * When we discover that we will need to write to any block in a stripe
34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
35 * the number of the batch it will be in. This is bm_flush+1.
36 * When we are ready to do a write, if that batch hasn't been written yet,
37 * we plug the array and queue the stripe for later.
38 * When an unplug happens, we increment bm_flush, thus closing the current
40 * When we notice that bm_flush > bm_write, we write out all pending updates
41 * to the bitmap, and advance bm_write to where bm_flush was.
42 * This may occasionally write a bit out twice, but is sure never to
46 #include <linux/module.h>
47 #include <linux/slab.h>
48 #include <linux/highmem.h>
49 #include <linux/bitops.h>
50 #include <linux/kthread.h>
51 #include <asm/atomic.h>
54 #include <linux/raid/bitmap.h>
55 #include <linux/async_tx.h>
61 #define NR_STRIPES 256
62 #define STRIPE_SIZE PAGE_SIZE
63 #define STRIPE_SHIFT (PAGE_SHIFT - 9)
64 #define STRIPE_SECTORS (STRIPE_SIZE>>9)
65 #define IO_THRESHOLD 1
66 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
67 #define HASH_MASK (NR_HASH - 1)
69 #define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
71 /* bio's attached to a stripe+device for I/O are linked together in bi_sector
72 * order without overlap. There may be several bio's per stripe+device, and
73 * a bio could span several devices.
74 * When walking this list for a particular stripe+device, we must never proceed
75 * beyond a bio that extends past this device, as the next bio might no longer
77 * This macro is used to determine the 'next' bio in the list, given the sector
78 * of the current stripe+device
80 #define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL)
82 * The following can be used to debug the driver
84 #define RAID5_PARANOIA 1
85 #if RAID5_PARANOIA && defined(CONFIG_SMP)
86 # define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
88 # define CHECK_DEVLOCK()
96 #if !RAID6_USE_EMPTY_ZERO_PAGE
97 /* In .bss so it's zeroed */
98 const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256)));
101 static inline int raid6_next_disk(int disk, int raid_disks)
104 return (disk < raid_disks) ? disk : 0;
107 static void return_io(struct bio *return_bi)
109 struct bio *bi = return_bi;
112 return_bi = bi->bi_next;
116 test_bit(BIO_UPTODATE, &bi->bi_flags)
122 static void print_raid5_conf (raid5_conf_t *conf);
124 static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
126 if (atomic_dec_and_test(&sh->count)) {
127 BUG_ON(!list_empty(&sh->lru));
128 BUG_ON(atomic_read(&conf->active_stripes)==0);
129 if (test_bit(STRIPE_HANDLE, &sh->state)) {
130 if (test_bit(STRIPE_DELAYED, &sh->state)) {
131 list_add_tail(&sh->lru, &conf->delayed_list);
132 blk_plug_device(conf->mddev->queue);
133 } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
134 sh->bm_seq - conf->seq_write > 0) {
135 list_add_tail(&sh->lru, &conf->bitmap_list);
136 blk_plug_device(conf->mddev->queue);
138 clear_bit(STRIPE_BIT_DELAY, &sh->state);
139 list_add_tail(&sh->lru, &conf->handle_list);
141 md_wakeup_thread(conf->mddev->thread);
143 BUG_ON(sh->ops.pending);
144 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
145 atomic_dec(&conf->preread_active_stripes);
146 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
147 md_wakeup_thread(conf->mddev->thread);
149 atomic_dec(&conf->active_stripes);
150 if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
151 list_add_tail(&sh->lru, &conf->inactive_list);
152 wake_up(&conf->wait_for_stripe);
153 if (conf->retry_read_aligned)
154 md_wakeup_thread(conf->mddev->thread);
159 static void release_stripe(struct stripe_head *sh)
161 raid5_conf_t *conf = sh->raid_conf;
164 spin_lock_irqsave(&conf->device_lock, flags);
165 __release_stripe(conf, sh);
166 spin_unlock_irqrestore(&conf->device_lock, flags);
169 static inline void remove_hash(struct stripe_head *sh)
171 pr_debug("remove_hash(), stripe %llu\n",
172 (unsigned long long)sh->sector);
174 hlist_del_init(&sh->hash);
177 static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
179 struct hlist_head *hp = stripe_hash(conf, sh->sector);
181 pr_debug("insert_hash(), stripe %llu\n",
182 (unsigned long long)sh->sector);
185 hlist_add_head(&sh->hash, hp);
189 /* find an idle stripe, make sure it is unhashed, and return it. */
190 static struct stripe_head *get_free_stripe(raid5_conf_t *conf)
192 struct stripe_head *sh = NULL;
193 struct list_head *first;
196 if (list_empty(&conf->inactive_list))
198 first = conf->inactive_list.next;
199 sh = list_entry(first, struct stripe_head, lru);
200 list_del_init(first);
202 atomic_inc(&conf->active_stripes);
207 static void shrink_buffers(struct stripe_head *sh, int num)
212 for (i=0; i<num ; i++) {
216 sh->dev[i].page = NULL;
221 static int grow_buffers(struct stripe_head *sh, int num)
225 for (i=0; i<num; i++) {
228 if (!(page = alloc_page(GFP_KERNEL))) {
231 sh->dev[i].page = page;
236 static void raid5_build_block (struct stripe_head *sh, int i);
238 static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int disks)
240 raid5_conf_t *conf = sh->raid_conf;
243 BUG_ON(atomic_read(&sh->count) != 0);
244 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
245 BUG_ON(sh->ops.pending || sh->ops.ack || sh->ops.complete);
248 pr_debug("init_stripe called, stripe %llu\n",
249 (unsigned long long)sh->sector);
259 for (i = sh->disks; i--; ) {
260 struct r5dev *dev = &sh->dev[i];
262 if (dev->toread || dev->read || dev->towrite || dev->written ||
263 test_bit(R5_LOCKED, &dev->flags)) {
264 printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n",
265 (unsigned long long)sh->sector, i, dev->toread,
266 dev->read, dev->towrite, dev->written,
267 test_bit(R5_LOCKED, &dev->flags));
271 raid5_build_block(sh, i);
273 insert_hash(conf, sh);
276 static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, int disks)
278 struct stripe_head *sh;
279 struct hlist_node *hn;
282 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
283 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
284 if (sh->sector == sector && sh->disks == disks)
286 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
290 static void unplug_slaves(mddev_t *mddev);
291 static void raid5_unplug_device(struct request_queue *q);
293 static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, int disks,
294 int pd_idx, int noblock)
296 struct stripe_head *sh;
298 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
300 spin_lock_irq(&conf->device_lock);
303 wait_event_lock_irq(conf->wait_for_stripe,
305 conf->device_lock, /* nothing */);
306 sh = __find_stripe(conf, sector, disks);
308 if (!conf->inactive_blocked)
309 sh = get_free_stripe(conf);
310 if (noblock && sh == NULL)
313 conf->inactive_blocked = 1;
314 wait_event_lock_irq(conf->wait_for_stripe,
315 !list_empty(&conf->inactive_list) &&
316 (atomic_read(&conf->active_stripes)
317 < (conf->max_nr_stripes *3/4)
318 || !conf->inactive_blocked),
320 raid5_unplug_device(conf->mddev->queue)
322 conf->inactive_blocked = 0;
324 init_stripe(sh, sector, pd_idx, disks);
326 if (atomic_read(&sh->count)) {
327 BUG_ON(!list_empty(&sh->lru));
329 if (!test_bit(STRIPE_HANDLE, &sh->state))
330 atomic_inc(&conf->active_stripes);
331 if (list_empty(&sh->lru) &&
332 !test_bit(STRIPE_EXPANDING, &sh->state))
334 list_del_init(&sh->lru);
337 } while (sh == NULL);
340 atomic_inc(&sh->count);
342 spin_unlock_irq(&conf->device_lock);
346 /* test_and_ack_op() ensures that we only dequeue an operation once */
347 #define test_and_ack_op(op, pend) \
349 if (test_bit(op, &sh->ops.pending) && \
350 !test_bit(op, &sh->ops.complete)) { \
351 if (test_and_set_bit(op, &sh->ops.ack)) \
352 clear_bit(op, &pend); \
356 clear_bit(op, &pend); \
359 /* find new work to run, do not resubmit work that is already
362 static unsigned long get_stripe_work(struct stripe_head *sh)
364 unsigned long pending;
367 pending = sh->ops.pending;
369 test_and_ack_op(STRIPE_OP_BIOFILL, pending);
370 test_and_ack_op(STRIPE_OP_COMPUTE_BLK, pending);
371 test_and_ack_op(STRIPE_OP_PREXOR, pending);
372 test_and_ack_op(STRIPE_OP_BIODRAIN, pending);
373 test_and_ack_op(STRIPE_OP_POSTXOR, pending);
374 test_and_ack_op(STRIPE_OP_CHECK, pending);
375 if (test_and_clear_bit(STRIPE_OP_IO, &sh->ops.pending))
378 sh->ops.count -= ack;
379 if (unlikely(sh->ops.count < 0)) {
380 printk(KERN_ERR "pending: %#lx ops.pending: %#lx ops.ack: %#lx "
381 "ops.complete: %#lx\n", pending, sh->ops.pending,
382 sh->ops.ack, sh->ops.complete);
390 raid5_end_read_request(struct bio *bi, int error);
392 raid5_end_write_request(struct bio *bi, int error);
394 static void ops_run_io(struct stripe_head *sh)
396 raid5_conf_t *conf = sh->raid_conf;
397 int i, disks = sh->disks;
401 for (i = disks; i--; ) {
405 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
407 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
412 bi = &sh->dev[i].req;
416 bi->bi_end_io = raid5_end_write_request;
418 bi->bi_end_io = raid5_end_read_request;
421 rdev = rcu_dereference(conf->disks[i].rdev);
422 if (rdev && test_bit(Faulty, &rdev->flags))
425 atomic_inc(&rdev->nr_pending);
429 if (test_bit(STRIPE_SYNCING, &sh->state) ||
430 test_bit(STRIPE_EXPAND_SOURCE, &sh->state) ||
431 test_bit(STRIPE_EXPAND_READY, &sh->state))
432 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
434 bi->bi_bdev = rdev->bdev;
435 pr_debug("%s: for %llu schedule op %ld on disc %d\n",
436 __FUNCTION__, (unsigned long long)sh->sector,
438 atomic_inc(&sh->count);
439 bi->bi_sector = sh->sector + rdev->data_offset;
440 bi->bi_flags = 1 << BIO_UPTODATE;
444 bi->bi_io_vec = &sh->dev[i].vec;
445 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
446 bi->bi_io_vec[0].bv_offset = 0;
447 bi->bi_size = STRIPE_SIZE;
450 test_bit(R5_ReWrite, &sh->dev[i].flags))
451 atomic_add(STRIPE_SECTORS,
452 &rdev->corrected_errors);
453 generic_make_request(bi);
456 set_bit(STRIPE_DEGRADED, &sh->state);
457 pr_debug("skip op %ld on disc %d for sector %llu\n",
458 bi->bi_rw, i, (unsigned long long)sh->sector);
459 clear_bit(R5_LOCKED, &sh->dev[i].flags);
460 set_bit(STRIPE_HANDLE, &sh->state);
465 static struct dma_async_tx_descriptor *
466 async_copy_data(int frombio, struct bio *bio, struct page *page,
467 sector_t sector, struct dma_async_tx_descriptor *tx)
470 struct page *bio_page;
474 if (bio->bi_sector >= sector)
475 page_offset = (signed)(bio->bi_sector - sector) * 512;
477 page_offset = (signed)(sector - bio->bi_sector) * -512;
478 bio_for_each_segment(bvl, bio, i) {
479 int len = bio_iovec_idx(bio, i)->bv_len;
483 if (page_offset < 0) {
484 b_offset = -page_offset;
485 page_offset += b_offset;
489 if (len > 0 && page_offset + len > STRIPE_SIZE)
490 clen = STRIPE_SIZE - page_offset;
495 b_offset += bio_iovec_idx(bio, i)->bv_offset;
496 bio_page = bio_iovec_idx(bio, i)->bv_page;
498 tx = async_memcpy(page, bio_page, page_offset,
503 tx = async_memcpy(bio_page, page, b_offset,
508 if (clen < len) /* hit end of page */
516 static void ops_complete_biofill(void *stripe_head_ref)
518 struct stripe_head *sh = stripe_head_ref;
519 struct bio *return_bi = NULL;
520 raid5_conf_t *conf = sh->raid_conf;
523 pr_debug("%s: stripe %llu\n", __FUNCTION__,
524 (unsigned long long)sh->sector);
526 /* clear completed biofills */
527 for (i = sh->disks; i--; ) {
528 struct r5dev *dev = &sh->dev[i];
530 /* acknowledge completion of a biofill operation */
531 /* and check if we need to reply to a read request,
532 * new R5_Wantfill requests are held off until
533 * !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending)
535 if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
536 struct bio *rbi, *rbi2;
538 /* The access to dev->read is outside of the
539 * spin_lock_irq(&conf->device_lock), but is protected
540 * by the STRIPE_OP_BIOFILL pending bit
545 while (rbi && rbi->bi_sector <
546 dev->sector + STRIPE_SECTORS) {
547 rbi2 = r5_next_bio(rbi, dev->sector);
548 spin_lock_irq(&conf->device_lock);
549 if (--rbi->bi_phys_segments == 0) {
550 rbi->bi_next = return_bi;
553 spin_unlock_irq(&conf->device_lock);
558 set_bit(STRIPE_OP_BIOFILL, &sh->ops.complete);
560 return_io(return_bi);
562 set_bit(STRIPE_HANDLE, &sh->state);
566 static void ops_run_biofill(struct stripe_head *sh)
568 struct dma_async_tx_descriptor *tx = NULL;
569 raid5_conf_t *conf = sh->raid_conf;
572 pr_debug("%s: stripe %llu\n", __FUNCTION__,
573 (unsigned long long)sh->sector);
575 for (i = sh->disks; i--; ) {
576 struct r5dev *dev = &sh->dev[i];
577 if (test_bit(R5_Wantfill, &dev->flags)) {
579 spin_lock_irq(&conf->device_lock);
580 dev->read = rbi = dev->toread;
582 spin_unlock_irq(&conf->device_lock);
583 while (rbi && rbi->bi_sector <
584 dev->sector + STRIPE_SECTORS) {
585 tx = async_copy_data(0, rbi, dev->page,
587 rbi = r5_next_bio(rbi, dev->sector);
592 atomic_inc(&sh->count);
593 async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx,
594 ops_complete_biofill, sh);
597 static void ops_complete_compute5(void *stripe_head_ref)
599 struct stripe_head *sh = stripe_head_ref;
600 int target = sh->ops.target;
601 struct r5dev *tgt = &sh->dev[target];
603 pr_debug("%s: stripe %llu\n", __FUNCTION__,
604 (unsigned long long)sh->sector);
606 set_bit(R5_UPTODATE, &tgt->flags);
607 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
608 clear_bit(R5_Wantcompute, &tgt->flags);
609 set_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete);
610 set_bit(STRIPE_HANDLE, &sh->state);
614 static struct dma_async_tx_descriptor *
615 ops_run_compute5(struct stripe_head *sh, unsigned long pending)
617 /* kernel stack size limits the total number of disks */
618 int disks = sh->disks;
619 struct page *xor_srcs[disks];
620 int target = sh->ops.target;
621 struct r5dev *tgt = &sh->dev[target];
622 struct page *xor_dest = tgt->page;
624 struct dma_async_tx_descriptor *tx;
627 pr_debug("%s: stripe %llu block: %d\n",
628 __FUNCTION__, (unsigned long long)sh->sector, target);
629 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
631 for (i = disks; i--; )
633 xor_srcs[count++] = sh->dev[i].page;
635 atomic_inc(&sh->count);
637 if (unlikely(count == 1))
638 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE,
639 0, NULL, ops_complete_compute5, sh);
641 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
642 ASYNC_TX_XOR_ZERO_DST, NULL,
643 ops_complete_compute5, sh);
645 /* ack now if postxor is not set to be run */
646 if (tx && !test_bit(STRIPE_OP_POSTXOR, &pending))
652 static void ops_complete_prexor(void *stripe_head_ref)
654 struct stripe_head *sh = stripe_head_ref;
656 pr_debug("%s: stripe %llu\n", __FUNCTION__,
657 (unsigned long long)sh->sector);
659 set_bit(STRIPE_OP_PREXOR, &sh->ops.complete);
662 static struct dma_async_tx_descriptor *
663 ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
665 /* kernel stack size limits the total number of disks */
666 int disks = sh->disks;
667 struct page *xor_srcs[disks];
668 int count = 0, pd_idx = sh->pd_idx, i;
670 /* existing parity data subtracted */
671 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
673 pr_debug("%s: stripe %llu\n", __FUNCTION__,
674 (unsigned long long)sh->sector);
676 for (i = disks; i--; ) {
677 struct r5dev *dev = &sh->dev[i];
678 /* Only process blocks that are known to be uptodate */
679 if (dev->towrite && test_bit(R5_Wantprexor, &dev->flags))
680 xor_srcs[count++] = dev->page;
683 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
684 ASYNC_TX_DEP_ACK | ASYNC_TX_XOR_DROP_DST, tx,
685 ops_complete_prexor, sh);
690 static struct dma_async_tx_descriptor *
691 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
692 unsigned long pending)
694 int disks = sh->disks;
695 int pd_idx = sh->pd_idx, i;
697 /* check if prexor is active which means only process blocks
698 * that are part of a read-modify-write (Wantprexor)
700 int prexor = test_bit(STRIPE_OP_PREXOR, &pending);
702 pr_debug("%s: stripe %llu\n", __FUNCTION__,
703 (unsigned long long)sh->sector);
705 for (i = disks; i--; ) {
706 struct r5dev *dev = &sh->dev[i];
711 if (prexor) { /* rmw */
713 test_bit(R5_Wantprexor, &dev->flags))
716 if (i != pd_idx && dev->towrite &&
717 test_bit(R5_LOCKED, &dev->flags))
724 spin_lock(&sh->lock);
725 chosen = dev->towrite;
727 BUG_ON(dev->written);
728 wbi = dev->written = chosen;
729 spin_unlock(&sh->lock);
731 while (wbi && wbi->bi_sector <
732 dev->sector + STRIPE_SECTORS) {
733 tx = async_copy_data(1, wbi, dev->page,
735 wbi = r5_next_bio(wbi, dev->sector);
743 static void ops_complete_postxor(void *stripe_head_ref)
745 struct stripe_head *sh = stripe_head_ref;
747 pr_debug("%s: stripe %llu\n", __FUNCTION__,
748 (unsigned long long)sh->sector);
750 set_bit(STRIPE_OP_POSTXOR, &sh->ops.complete);
751 set_bit(STRIPE_HANDLE, &sh->state);
755 static void ops_complete_write(void *stripe_head_ref)
757 struct stripe_head *sh = stripe_head_ref;
758 int disks = sh->disks, i, pd_idx = sh->pd_idx;
760 pr_debug("%s: stripe %llu\n", __FUNCTION__,
761 (unsigned long long)sh->sector);
763 for (i = disks; i--; ) {
764 struct r5dev *dev = &sh->dev[i];
765 if (dev->written || i == pd_idx)
766 set_bit(R5_UPTODATE, &dev->flags);
769 set_bit(STRIPE_OP_BIODRAIN, &sh->ops.complete);
770 set_bit(STRIPE_OP_POSTXOR, &sh->ops.complete);
772 set_bit(STRIPE_HANDLE, &sh->state);
777 ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
778 unsigned long pending)
780 /* kernel stack size limits the total number of disks */
781 int disks = sh->disks;
782 struct page *xor_srcs[disks];
784 int count = 0, pd_idx = sh->pd_idx, i;
785 struct page *xor_dest;
786 int prexor = test_bit(STRIPE_OP_PREXOR, &pending);
788 dma_async_tx_callback callback;
790 pr_debug("%s: stripe %llu\n", __FUNCTION__,
791 (unsigned long long)sh->sector);
793 /* check if prexor is active which means only process blocks
794 * that are part of a read-modify-write (written)
797 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
798 for (i = disks; i--; ) {
799 struct r5dev *dev = &sh->dev[i];
801 xor_srcs[count++] = dev->page;
804 xor_dest = sh->dev[pd_idx].page;
805 for (i = disks; i--; ) {
806 struct r5dev *dev = &sh->dev[i];
808 xor_srcs[count++] = dev->page;
812 /* check whether this postxor is part of a write */
813 callback = test_bit(STRIPE_OP_BIODRAIN, &pending) ?
814 ops_complete_write : ops_complete_postxor;
816 /* 1/ if we prexor'd then the dest is reused as a source
817 * 2/ if we did not prexor then we are redoing the parity
818 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
819 * for the synchronous xor case
821 flags = ASYNC_TX_DEP_ACK | ASYNC_TX_ACK |
822 (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
824 atomic_inc(&sh->count);
826 if (unlikely(count == 1)) {
827 flags &= ~(ASYNC_TX_XOR_DROP_DST | ASYNC_TX_XOR_ZERO_DST);
828 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE,
829 flags, tx, callback, sh);
831 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
832 flags, tx, callback, sh);
835 static void ops_complete_check(void *stripe_head_ref)
837 struct stripe_head *sh = stripe_head_ref;
838 int pd_idx = sh->pd_idx;
840 pr_debug("%s: stripe %llu\n", __FUNCTION__,
841 (unsigned long long)sh->sector);
843 if (test_and_clear_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending) &&
844 sh->ops.zero_sum_result == 0)
845 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
847 set_bit(STRIPE_OP_CHECK, &sh->ops.complete);
848 set_bit(STRIPE_HANDLE, &sh->state);
852 static void ops_run_check(struct stripe_head *sh)
854 /* kernel stack size limits the total number of disks */
855 int disks = sh->disks;
856 struct page *xor_srcs[disks];
857 struct dma_async_tx_descriptor *tx;
859 int count = 0, pd_idx = sh->pd_idx, i;
860 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
862 pr_debug("%s: stripe %llu\n", __FUNCTION__,
863 (unsigned long long)sh->sector);
865 for (i = disks; i--; ) {
866 struct r5dev *dev = &sh->dev[i];
868 xor_srcs[count++] = dev->page;
871 tx = async_xor_zero_sum(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
872 &sh->ops.zero_sum_result, 0, NULL, NULL, NULL);
875 set_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending);
877 clear_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending);
879 atomic_inc(&sh->count);
880 tx = async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx,
881 ops_complete_check, sh);
884 static void raid5_run_ops(struct stripe_head *sh, unsigned long pending)
886 int overlap_clear = 0, i, disks = sh->disks;
887 struct dma_async_tx_descriptor *tx = NULL;
889 if (test_bit(STRIPE_OP_BIOFILL, &pending)) {
894 if (test_bit(STRIPE_OP_COMPUTE_BLK, &pending))
895 tx = ops_run_compute5(sh, pending);
897 if (test_bit(STRIPE_OP_PREXOR, &pending))
898 tx = ops_run_prexor(sh, tx);
900 if (test_bit(STRIPE_OP_BIODRAIN, &pending)) {
901 tx = ops_run_biodrain(sh, tx, pending);
905 if (test_bit(STRIPE_OP_POSTXOR, &pending))
906 ops_run_postxor(sh, tx, pending);
908 if (test_bit(STRIPE_OP_CHECK, &pending))
911 if (test_bit(STRIPE_OP_IO, &pending))
915 for (i = disks; i--; ) {
916 struct r5dev *dev = &sh->dev[i];
917 if (test_and_clear_bit(R5_Overlap, &dev->flags))
918 wake_up(&sh->raid_conf->wait_for_overlap);
922 static int grow_one_stripe(raid5_conf_t *conf)
924 struct stripe_head *sh;
925 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL);
928 memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev));
929 sh->raid_conf = conf;
930 spin_lock_init(&sh->lock);
932 if (grow_buffers(sh, conf->raid_disks)) {
933 shrink_buffers(sh, conf->raid_disks);
934 kmem_cache_free(conf->slab_cache, sh);
937 sh->disks = conf->raid_disks;
938 /* we just created an active stripe so... */
939 atomic_set(&sh->count, 1);
940 atomic_inc(&conf->active_stripes);
941 INIT_LIST_HEAD(&sh->lru);
946 static int grow_stripes(raid5_conf_t *conf, int num)
948 struct kmem_cache *sc;
949 int devs = conf->raid_disks;
951 sprintf(conf->cache_name[0], "raid5-%s", mdname(conf->mddev));
952 sprintf(conf->cache_name[1], "raid5-%s-alt", mdname(conf->mddev));
953 conf->active_name = 0;
954 sc = kmem_cache_create(conf->cache_name[conf->active_name],
955 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
959 conf->slab_cache = sc;
960 conf->pool_size = devs;
962 if (!grow_one_stripe(conf))
967 #ifdef CONFIG_MD_RAID5_RESHAPE
968 static int resize_stripes(raid5_conf_t *conf, int newsize)
970 /* Make all the stripes able to hold 'newsize' devices.
971 * New slots in each stripe get 'page' set to a new page.
973 * This happens in stages:
974 * 1/ create a new kmem_cache and allocate the required number of
976 * 2/ gather all the old stripe_heads and tranfer the pages across
977 * to the new stripe_heads. This will have the side effect of
978 * freezing the array as once all stripe_heads have been collected,
979 * no IO will be possible. Old stripe heads are freed once their
980 * pages have been transferred over, and the old kmem_cache is
981 * freed when all stripes are done.
982 * 3/ reallocate conf->disks to be suitable bigger. If this fails,
983 * we simple return a failre status - no need to clean anything up.
984 * 4/ allocate new pages for the new slots in the new stripe_heads.
985 * If this fails, we don't bother trying the shrink the
986 * stripe_heads down again, we just leave them as they are.
987 * As each stripe_head is processed the new one is released into
990 * Once step2 is started, we cannot afford to wait for a write,
991 * so we use GFP_NOIO allocations.
993 struct stripe_head *osh, *nsh;
994 LIST_HEAD(newstripes);
995 struct disk_info *ndisks;
997 struct kmem_cache *sc;
1000 if (newsize <= conf->pool_size)
1001 return 0; /* never bother to shrink */
1003 md_allow_write(conf->mddev);
1006 sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
1007 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
1012 for (i = conf->max_nr_stripes; i; i--) {
1013 nsh = kmem_cache_alloc(sc, GFP_KERNEL);
1017 memset(nsh, 0, sizeof(*nsh) + (newsize-1)*sizeof(struct r5dev));
1019 nsh->raid_conf = conf;
1020 spin_lock_init(&nsh->lock);
1022 list_add(&nsh->lru, &newstripes);
1025 /* didn't get enough, give up */
1026 while (!list_empty(&newstripes)) {
1027 nsh = list_entry(newstripes.next, struct stripe_head, lru);
1028 list_del(&nsh->lru);
1029 kmem_cache_free(sc, nsh);
1031 kmem_cache_destroy(sc);
1034 /* Step 2 - Must use GFP_NOIO now.
1035 * OK, we have enough stripes, start collecting inactive
1036 * stripes and copying them over
1038 list_for_each_entry(nsh, &newstripes, lru) {
1039 spin_lock_irq(&conf->device_lock);
1040 wait_event_lock_irq(conf->wait_for_stripe,
1041 !list_empty(&conf->inactive_list),
1043 unplug_slaves(conf->mddev)
1045 osh = get_free_stripe(conf);
1046 spin_unlock_irq(&conf->device_lock);
1047 atomic_set(&nsh->count, 1);
1048 for(i=0; i<conf->pool_size; i++)
1049 nsh->dev[i].page = osh->dev[i].page;
1050 for( ; i<newsize; i++)
1051 nsh->dev[i].page = NULL;
1052 kmem_cache_free(conf->slab_cache, osh);
1054 kmem_cache_destroy(conf->slab_cache);
1057 * At this point, we are holding all the stripes so the array
1058 * is completely stalled, so now is a good time to resize
1061 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
1063 for (i=0; i<conf->raid_disks; i++)
1064 ndisks[i] = conf->disks[i];
1066 conf->disks = ndisks;
1070 /* Step 4, return new stripes to service */
1071 while(!list_empty(&newstripes)) {
1072 nsh = list_entry(newstripes.next, struct stripe_head, lru);
1073 list_del_init(&nsh->lru);
1074 for (i=conf->raid_disks; i < newsize; i++)
1075 if (nsh->dev[i].page == NULL) {
1076 struct page *p = alloc_page(GFP_NOIO);
1077 nsh->dev[i].page = p;
1081 release_stripe(nsh);
1083 /* critical section pass, GFP_NOIO no longer needed */
1085 conf->slab_cache = sc;
1086 conf->active_name = 1-conf->active_name;
1087 conf->pool_size = newsize;
1092 static int drop_one_stripe(raid5_conf_t *conf)
1094 struct stripe_head *sh;
1096 spin_lock_irq(&conf->device_lock);
1097 sh = get_free_stripe(conf);
1098 spin_unlock_irq(&conf->device_lock);
1101 BUG_ON(atomic_read(&sh->count));
1102 shrink_buffers(sh, conf->pool_size);
1103 kmem_cache_free(conf->slab_cache, sh);
1104 atomic_dec(&conf->active_stripes);
1108 static void shrink_stripes(raid5_conf_t *conf)
1110 while (drop_one_stripe(conf))
1113 if (conf->slab_cache)
1114 kmem_cache_destroy(conf->slab_cache);
1115 conf->slab_cache = NULL;
1118 static void raid5_end_read_request(struct bio * bi, int error)
1120 struct stripe_head *sh = bi->bi_private;
1121 raid5_conf_t *conf = sh->raid_conf;
1122 int disks = sh->disks, i;
1123 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
1124 char b[BDEVNAME_SIZE];
1128 for (i=0 ; i<disks; i++)
1129 if (bi == &sh->dev[i].req)
1132 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
1133 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
1141 set_bit(R5_UPTODATE, &sh->dev[i].flags);
1142 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
1143 rdev = conf->disks[i].rdev;
1144 printk(KERN_INFO "raid5:%s: read error corrected (%lu sectors at %llu on %s)\n",
1145 mdname(conf->mddev), STRIPE_SECTORS,
1146 (unsigned long long)(sh->sector + rdev->data_offset),
1147 bdevname(rdev->bdev, b));
1148 clear_bit(R5_ReadError, &sh->dev[i].flags);
1149 clear_bit(R5_ReWrite, &sh->dev[i].flags);
1151 if (atomic_read(&conf->disks[i].rdev->read_errors))
1152 atomic_set(&conf->disks[i].rdev->read_errors, 0);
1154 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
1156 rdev = conf->disks[i].rdev;
1158 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
1159 atomic_inc(&rdev->read_errors);
1160 if (conf->mddev->degraded)
1161 printk(KERN_WARNING "raid5:%s: read error not correctable (sector %llu on %s).\n",
1162 mdname(conf->mddev),
1163 (unsigned long long)(sh->sector + rdev->data_offset),
1165 else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
1167 printk(KERN_WARNING "raid5:%s: read error NOT corrected!! (sector %llu on %s).\n",
1168 mdname(conf->mddev),
1169 (unsigned long long)(sh->sector + rdev->data_offset),
1171 else if (atomic_read(&rdev->read_errors)
1172 > conf->max_nr_stripes)
1174 "raid5:%s: Too many read errors, failing device %s.\n",
1175 mdname(conf->mddev), bdn);
1179 set_bit(R5_ReadError, &sh->dev[i].flags);
1181 clear_bit(R5_ReadError, &sh->dev[i].flags);
1182 clear_bit(R5_ReWrite, &sh->dev[i].flags);
1183 md_error(conf->mddev, rdev);
1186 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
1187 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1188 set_bit(STRIPE_HANDLE, &sh->state);
1192 static void raid5_end_write_request (struct bio *bi, int error)
1194 struct stripe_head *sh = bi->bi_private;
1195 raid5_conf_t *conf = sh->raid_conf;
1196 int disks = sh->disks, i;
1197 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
1199 for (i=0 ; i<disks; i++)
1200 if (bi == &sh->dev[i].req)
1203 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
1204 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
1212 md_error(conf->mddev, conf->disks[i].rdev);
1214 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
1216 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1217 set_bit(STRIPE_HANDLE, &sh->state);
1222 static sector_t compute_blocknr(struct stripe_head *sh, int i);
1224 static void raid5_build_block (struct stripe_head *sh, int i)
1226 struct r5dev *dev = &sh->dev[i];
1228 bio_init(&dev->req);
1229 dev->req.bi_io_vec = &dev->vec;
1231 dev->req.bi_max_vecs++;
1232 dev->vec.bv_page = dev->page;
1233 dev->vec.bv_len = STRIPE_SIZE;
1234 dev->vec.bv_offset = 0;
1236 dev->req.bi_sector = sh->sector;
1237 dev->req.bi_private = sh;
1240 dev->sector = compute_blocknr(sh, i);
1243 static void error(mddev_t *mddev, mdk_rdev_t *rdev)
1245 char b[BDEVNAME_SIZE];
1246 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
1247 pr_debug("raid5: error called\n");
1249 if (!test_bit(Faulty, &rdev->flags)) {
1250 set_bit(MD_CHANGE_DEVS, &mddev->flags);
1251 if (test_and_clear_bit(In_sync, &rdev->flags)) {
1252 unsigned long flags;
1253 spin_lock_irqsave(&conf->device_lock, flags);
1255 spin_unlock_irqrestore(&conf->device_lock, flags);
1257 * if recovery was running, make sure it aborts.
1259 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
1261 set_bit(Faulty, &rdev->flags);
1263 "raid5: Disk failure on %s, disabling device."
1264 " Operation continuing on %d devices\n",
1265 bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
1270 * Input: a 'big' sector number,
1271 * Output: index of the data and parity disk, and the sector # in them.
1273 static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks,
1274 unsigned int data_disks, unsigned int * dd_idx,
1275 unsigned int * pd_idx, raid5_conf_t *conf)
1278 unsigned long chunk_number;
1279 unsigned int chunk_offset;
1280 sector_t new_sector;
1281 int sectors_per_chunk = conf->chunk_size >> 9;
1283 /* First compute the information on this sector */
1286 * Compute the chunk number and the sector offset inside the chunk
1288 chunk_offset = sector_div(r_sector, sectors_per_chunk);
1289 chunk_number = r_sector;
1290 BUG_ON(r_sector != chunk_number);
1293 * Compute the stripe number
1295 stripe = chunk_number / data_disks;
1298 * Compute the data disk and parity disk indexes inside the stripe
1300 *dd_idx = chunk_number % data_disks;
1303 * Select the parity disk based on the user selected algorithm.
1305 switch(conf->level) {
1307 *pd_idx = data_disks;
1310 switch (conf->algorithm) {
1311 case ALGORITHM_LEFT_ASYMMETRIC:
1312 *pd_idx = data_disks - stripe % raid_disks;
1313 if (*dd_idx >= *pd_idx)
1316 case ALGORITHM_RIGHT_ASYMMETRIC:
1317 *pd_idx = stripe % raid_disks;
1318 if (*dd_idx >= *pd_idx)
1321 case ALGORITHM_LEFT_SYMMETRIC:
1322 *pd_idx = data_disks - stripe % raid_disks;
1323 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks;
1325 case ALGORITHM_RIGHT_SYMMETRIC:
1326 *pd_idx = stripe % raid_disks;
1327 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks;
1330 printk(KERN_ERR "raid5: unsupported algorithm %d\n",
1336 /**** FIX THIS ****/
1337 switch (conf->algorithm) {
1338 case ALGORITHM_LEFT_ASYMMETRIC:
1339 *pd_idx = raid_disks - 1 - (stripe % raid_disks);
1340 if (*pd_idx == raid_disks-1)
1341 (*dd_idx)++; /* Q D D D P */
1342 else if (*dd_idx >= *pd_idx)
1343 (*dd_idx) += 2; /* D D P Q D */
1345 case ALGORITHM_RIGHT_ASYMMETRIC:
1346 *pd_idx = stripe % raid_disks;
1347 if (*pd_idx == raid_disks-1)
1348 (*dd_idx)++; /* Q D D D P */
1349 else if (*dd_idx >= *pd_idx)
1350 (*dd_idx) += 2; /* D D P Q D */
1352 case ALGORITHM_LEFT_SYMMETRIC:
1353 *pd_idx = raid_disks - 1 - (stripe % raid_disks);
1354 *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks;
1356 case ALGORITHM_RIGHT_SYMMETRIC:
1357 *pd_idx = stripe % raid_disks;
1358 *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks;
1361 printk (KERN_CRIT "raid6: unsupported algorithm %d\n",
1368 * Finally, compute the new sector number
1370 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
1375 static sector_t compute_blocknr(struct stripe_head *sh, int i)
1377 raid5_conf_t *conf = sh->raid_conf;
1378 int raid_disks = sh->disks;
1379 int data_disks = raid_disks - conf->max_degraded;
1380 sector_t new_sector = sh->sector, check;
1381 int sectors_per_chunk = conf->chunk_size >> 9;
1384 int chunk_number, dummy1, dummy2, dd_idx = i;
1388 chunk_offset = sector_div(new_sector, sectors_per_chunk);
1389 stripe = new_sector;
1390 BUG_ON(new_sector != stripe);
1392 if (i == sh->pd_idx)
1394 switch(conf->level) {
1397 switch (conf->algorithm) {
1398 case ALGORITHM_LEFT_ASYMMETRIC:
1399 case ALGORITHM_RIGHT_ASYMMETRIC:
1403 case ALGORITHM_LEFT_SYMMETRIC:
1404 case ALGORITHM_RIGHT_SYMMETRIC:
1407 i -= (sh->pd_idx + 1);
1410 printk(KERN_ERR "raid5: unsupported algorithm %d\n",
1415 if (i == raid6_next_disk(sh->pd_idx, raid_disks))
1416 return 0; /* It is the Q disk */
1417 switch (conf->algorithm) {
1418 case ALGORITHM_LEFT_ASYMMETRIC:
1419 case ALGORITHM_RIGHT_ASYMMETRIC:
1420 if (sh->pd_idx == raid_disks-1)
1421 i--; /* Q D D D P */
1422 else if (i > sh->pd_idx)
1423 i -= 2; /* D D P Q D */
1425 case ALGORITHM_LEFT_SYMMETRIC:
1426 case ALGORITHM_RIGHT_SYMMETRIC:
1427 if (sh->pd_idx == raid_disks-1)
1428 i--; /* Q D D D P */
1433 i -= (sh->pd_idx + 2);
1437 printk (KERN_CRIT "raid6: unsupported algorithm %d\n",
1443 chunk_number = stripe * data_disks + i;
1444 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset;
1446 check = raid5_compute_sector (r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf);
1447 if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) {
1448 printk(KERN_ERR "compute_blocknr: map not correct\n");
1457 * Copy data between a page in the stripe cache, and one or more bion
1458 * The page could align with the middle of the bio, or there could be
1459 * several bion, each with several bio_vecs, which cover part of the page
1460 * Multiple bion are linked together on bi_next. There may be extras
1461 * at the end of this list. We ignore them.
1463 static void copy_data(int frombio, struct bio *bio,
1467 char *pa = page_address(page);
1468 struct bio_vec *bvl;
1472 if (bio->bi_sector >= sector)
1473 page_offset = (signed)(bio->bi_sector - sector) * 512;
1475 page_offset = (signed)(sector - bio->bi_sector) * -512;
1476 bio_for_each_segment(bvl, bio, i) {
1477 int len = bio_iovec_idx(bio,i)->bv_len;
1481 if (page_offset < 0) {
1482 b_offset = -page_offset;
1483 page_offset += b_offset;
1487 if (len > 0 && page_offset + len > STRIPE_SIZE)
1488 clen = STRIPE_SIZE - page_offset;
1492 char *ba = __bio_kmap_atomic(bio, i, KM_USER0);
1494 memcpy(pa+page_offset, ba+b_offset, clen);
1496 memcpy(ba+b_offset, pa+page_offset, clen);
1497 __bio_kunmap_atomic(ba, KM_USER0);
1499 if (clen < len) /* hit end of page */
1505 #define check_xor() do { \
1506 if (count == MAX_XOR_BLOCKS) { \
1507 xor_blocks(count, STRIPE_SIZE, dest, ptr);\
1512 static void compute_parity6(struct stripe_head *sh, int method)
1514 raid6_conf_t *conf = sh->raid_conf;
1515 int i, pd_idx = sh->pd_idx, qd_idx, d0_idx, disks = sh->disks, count;
1517 /**** FIX THIS: This could be very bad if disks is close to 256 ****/
1520 qd_idx = raid6_next_disk(pd_idx, disks);
1521 d0_idx = raid6_next_disk(qd_idx, disks);
1523 pr_debug("compute_parity, stripe %llu, method %d\n",
1524 (unsigned long long)sh->sector, method);
1527 case READ_MODIFY_WRITE:
1528 BUG(); /* READ_MODIFY_WRITE N/A for RAID-6 */
1529 case RECONSTRUCT_WRITE:
1530 for (i= disks; i-- ;)
1531 if ( i != pd_idx && i != qd_idx && sh->dev[i].towrite ) {
1532 chosen = sh->dev[i].towrite;
1533 sh->dev[i].towrite = NULL;
1535 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
1536 wake_up(&conf->wait_for_overlap);
1538 BUG_ON(sh->dev[i].written);
1539 sh->dev[i].written = chosen;
1543 BUG(); /* Not implemented yet */
1546 for (i = disks; i--;)
1547 if (sh->dev[i].written) {
1548 sector_t sector = sh->dev[i].sector;
1549 struct bio *wbi = sh->dev[i].written;
1550 while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) {
1551 copy_data(1, wbi, sh->dev[i].page, sector);
1552 wbi = r5_next_bio(wbi, sector);
1555 set_bit(R5_LOCKED, &sh->dev[i].flags);
1556 set_bit(R5_UPTODATE, &sh->dev[i].flags);
1560 // case RECONSTRUCT_WRITE:
1561 // case CHECK_PARITY:
1562 // case UPDATE_PARITY:
1563 /* Note that unlike RAID-5, the ordering of the disks matters greatly. */
1564 /* FIX: Is this ordering of drives even remotely optimal? */
1568 ptrs[count++] = page_address(sh->dev[i].page);
1569 if (count <= disks-2 && !test_bit(R5_UPTODATE, &sh->dev[i].flags))
1570 printk("block %d/%d not uptodate on parity calc\n", i,count);
1571 i = raid6_next_disk(i, disks);
1572 } while ( i != d0_idx );
1576 raid6_call.gen_syndrome(disks, STRIPE_SIZE, ptrs);
1579 case RECONSTRUCT_WRITE:
1580 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
1581 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags);
1582 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
1583 set_bit(R5_LOCKED, &sh->dev[qd_idx].flags);
1586 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
1587 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags);
1593 /* Compute one missing block */
1594 static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero)
1596 int i, count, disks = sh->disks;
1597 void *ptr[MAX_XOR_BLOCKS], *dest, *p;
1598 int pd_idx = sh->pd_idx;
1599 int qd_idx = raid6_next_disk(pd_idx, disks);
1601 pr_debug("compute_block_1, stripe %llu, idx %d\n",
1602 (unsigned long long)sh->sector, dd_idx);
1604 if ( dd_idx == qd_idx ) {
1605 /* We're actually computing the Q drive */
1606 compute_parity6(sh, UPDATE_PARITY);
1608 dest = page_address(sh->dev[dd_idx].page);
1609 if (!nozero) memset(dest, 0, STRIPE_SIZE);
1611 for (i = disks ; i--; ) {
1612 if (i == dd_idx || i == qd_idx)
1614 p = page_address(sh->dev[i].page);
1615 if (test_bit(R5_UPTODATE, &sh->dev[i].flags))
1618 printk("compute_block() %d, stripe %llu, %d"
1619 " not present\n", dd_idx,
1620 (unsigned long long)sh->sector, i);
1625 xor_blocks(count, STRIPE_SIZE, dest, ptr);
1626 if (!nozero) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
1627 else clear_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
1631 /* Compute two missing blocks */
1632 static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2)
1634 int i, count, disks = sh->disks;
1635 int pd_idx = sh->pd_idx;
1636 int qd_idx = raid6_next_disk(pd_idx, disks);
1637 int d0_idx = raid6_next_disk(qd_idx, disks);
1640 /* faila and failb are disk numbers relative to d0_idx */
1641 /* pd_idx become disks-2 and qd_idx become disks-1 */
1642 faila = (dd_idx1 < d0_idx) ? dd_idx1+(disks-d0_idx) : dd_idx1-d0_idx;
1643 failb = (dd_idx2 < d0_idx) ? dd_idx2+(disks-d0_idx) : dd_idx2-d0_idx;
1645 BUG_ON(faila == failb);
1646 if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; }
1648 pr_debug("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n",
1649 (unsigned long long)sh->sector, dd_idx1, dd_idx2, faila, failb);
1651 if ( failb == disks-1 ) {
1652 /* Q disk is one of the missing disks */
1653 if ( faila == disks-2 ) {
1654 /* Missing P+Q, just recompute */
1655 compute_parity6(sh, UPDATE_PARITY);
1658 /* We're missing D+Q; recompute D from P */
1659 compute_block_1(sh, (dd_idx1 == qd_idx) ? dd_idx2 : dd_idx1, 0);
1660 compute_parity6(sh, UPDATE_PARITY); /* Is this necessary? */
1665 /* We're missing D+P or D+D; build pointer table */
1667 /**** FIX THIS: This could be very bad if disks is close to 256 ****/
1673 ptrs[count++] = page_address(sh->dev[i].page);
1674 i = raid6_next_disk(i, disks);
1675 if (i != dd_idx1 && i != dd_idx2 &&
1676 !test_bit(R5_UPTODATE, &sh->dev[i].flags))
1677 printk("compute_2 with missing block %d/%d\n", count, i);
1678 } while ( i != d0_idx );
1680 if ( failb == disks-2 ) {
1681 /* We're missing D+P. */
1682 raid6_datap_recov(disks, STRIPE_SIZE, faila, ptrs);
1684 /* We're missing D+D. */
1685 raid6_2data_recov(disks, STRIPE_SIZE, faila, failb, ptrs);
1688 /* Both the above update both missing blocks */
1689 set_bit(R5_UPTODATE, &sh->dev[dd_idx1].flags);
1690 set_bit(R5_UPTODATE, &sh->dev[dd_idx2].flags);
1695 handle_write_operations5(struct stripe_head *sh, int rcw, int expand)
1697 int i, pd_idx = sh->pd_idx, disks = sh->disks;
1701 /* if we are not expanding this is a proper write request, and
1702 * there will be bios with new data to be drained into the
1706 set_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending);
1710 set_bit(STRIPE_OP_POSTXOR, &sh->ops.pending);
1713 for (i = disks; i--; ) {
1714 struct r5dev *dev = &sh->dev[i];
1717 set_bit(R5_LOCKED, &dev->flags);
1719 clear_bit(R5_UPTODATE, &dev->flags);
1724 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
1725 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
1727 set_bit(STRIPE_OP_PREXOR, &sh->ops.pending);
1728 set_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending);
1729 set_bit(STRIPE_OP_POSTXOR, &sh->ops.pending);
1733 for (i = disks; i--; ) {
1734 struct r5dev *dev = &sh->dev[i];
1738 /* For a read-modify write there may be blocks that are
1739 * locked for reading while others are ready to be
1740 * written so we distinguish these blocks by the
1744 (test_bit(R5_UPTODATE, &dev->flags) ||
1745 test_bit(R5_Wantcompute, &dev->flags))) {
1746 set_bit(R5_Wantprexor, &dev->flags);
1747 set_bit(R5_LOCKED, &dev->flags);
1748 clear_bit(R5_UPTODATE, &dev->flags);
1754 /* keep the parity disk locked while asynchronous operations
1757 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
1758 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
1761 pr_debug("%s: stripe %llu locked: %d pending: %lx\n",
1762 __FUNCTION__, (unsigned long long)sh->sector,
1763 locked, sh->ops.pending);
1769 * Each stripe/dev can have one or more bion attached.
1770 * toread/towrite point to the first in a chain.
1771 * The bi_next chain must be in order.
1773 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
1776 raid5_conf_t *conf = sh->raid_conf;
1779 pr_debug("adding bh b#%llu to stripe s#%llu\n",
1780 (unsigned long long)bi->bi_sector,
1781 (unsigned long long)sh->sector);
1784 spin_lock(&sh->lock);
1785 spin_lock_irq(&conf->device_lock);
1787 bip = &sh->dev[dd_idx].towrite;
1788 if (*bip == NULL && sh->dev[dd_idx].written == NULL)
1791 bip = &sh->dev[dd_idx].toread;
1792 while (*bip && (*bip)->bi_sector < bi->bi_sector) {
1793 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector)
1795 bip = & (*bip)->bi_next;
1797 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
1800 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
1804 bi->bi_phys_segments ++;
1805 spin_unlock_irq(&conf->device_lock);
1806 spin_unlock(&sh->lock);
1808 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
1809 (unsigned long long)bi->bi_sector,
1810 (unsigned long long)sh->sector, dd_idx);
1812 if (conf->mddev->bitmap && firstwrite) {
1813 bitmap_startwrite(conf->mddev->bitmap, sh->sector,
1815 sh->bm_seq = conf->seq_flush+1;
1816 set_bit(STRIPE_BIT_DELAY, &sh->state);
1820 /* check if page is covered */
1821 sector_t sector = sh->dev[dd_idx].sector;
1822 for (bi=sh->dev[dd_idx].towrite;
1823 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
1824 bi && bi->bi_sector <= sector;
1825 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
1826 if (bi->bi_sector + (bi->bi_size>>9) >= sector)
1827 sector = bi->bi_sector + (bi->bi_size>>9);
1829 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
1830 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
1835 set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
1836 spin_unlock_irq(&conf->device_lock);
1837 spin_unlock(&sh->lock);
1841 static void end_reshape(raid5_conf_t *conf);
1843 static int page_is_zero(struct page *p)
1845 char *a = page_address(p);
1846 return ((*(u32*)a) == 0 &&
1847 memcmp(a, a+4, STRIPE_SIZE-4)==0);
1850 static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks)
1852 int sectors_per_chunk = conf->chunk_size >> 9;
1854 int chunk_offset = sector_div(stripe, sectors_per_chunk);
1856 raid5_compute_sector(stripe * (disks - conf->max_degraded)
1857 *sectors_per_chunk + chunk_offset,
1858 disks, disks - conf->max_degraded,
1859 &dd_idx, &pd_idx, conf);
1864 handle_requests_to_failed_array(raid5_conf_t *conf, struct stripe_head *sh,
1865 struct stripe_head_state *s, int disks,
1866 struct bio **return_bi)
1869 for (i = disks; i--; ) {
1873 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
1876 rdev = rcu_dereference(conf->disks[i].rdev);
1877 if (rdev && test_bit(In_sync, &rdev->flags))
1878 /* multiple read failures in one stripe */
1879 md_error(conf->mddev, rdev);
1882 spin_lock_irq(&conf->device_lock);
1883 /* fail all writes first */
1884 bi = sh->dev[i].towrite;
1885 sh->dev[i].towrite = NULL;
1891 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
1892 wake_up(&conf->wait_for_overlap);
1894 while (bi && bi->bi_sector <
1895 sh->dev[i].sector + STRIPE_SECTORS) {
1896 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
1897 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1898 if (--bi->bi_phys_segments == 0) {
1899 md_write_end(conf->mddev);
1900 bi->bi_next = *return_bi;
1905 /* and fail all 'written' */
1906 bi = sh->dev[i].written;
1907 sh->dev[i].written = NULL;
1908 if (bi) bitmap_end = 1;
1909 while (bi && bi->bi_sector <
1910 sh->dev[i].sector + STRIPE_SECTORS) {
1911 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
1912 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1913 if (--bi->bi_phys_segments == 0) {
1914 md_write_end(conf->mddev);
1915 bi->bi_next = *return_bi;
1921 /* fail any reads if this device is non-operational and
1922 * the data has not reached the cache yet.
1924 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
1925 (!test_bit(R5_Insync, &sh->dev[i].flags) ||
1926 test_bit(R5_ReadError, &sh->dev[i].flags))) {
1927 bi = sh->dev[i].toread;
1928 sh->dev[i].toread = NULL;
1929 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
1930 wake_up(&conf->wait_for_overlap);
1931 if (bi) s->to_read--;
1932 while (bi && bi->bi_sector <
1933 sh->dev[i].sector + STRIPE_SECTORS) {
1934 struct bio *nextbi =
1935 r5_next_bio(bi, sh->dev[i].sector);
1936 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1937 if (--bi->bi_phys_segments == 0) {
1938 bi->bi_next = *return_bi;
1944 spin_unlock_irq(&conf->device_lock);
1946 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
1947 STRIPE_SECTORS, 0, 0);
1952 /* __handle_issuing_new_read_requests5 - returns 0 if there are no more disks
1955 static int __handle_issuing_new_read_requests5(struct stripe_head *sh,
1956 struct stripe_head_state *s, int disk_idx, int disks)
1958 struct r5dev *dev = &sh->dev[disk_idx];
1959 struct r5dev *failed_dev = &sh->dev[s->failed_num];
1961 /* don't schedule compute operations or reads on the parity block while
1962 * a check is in flight
1964 if ((disk_idx == sh->pd_idx) &&
1965 test_bit(STRIPE_OP_CHECK, &sh->ops.pending))
1968 /* is the data in this block needed, and can we get it? */
1969 if (!test_bit(R5_LOCKED, &dev->flags) &&
1970 !test_bit(R5_UPTODATE, &dev->flags) && (dev->toread ||
1971 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
1972 s->syncing || s->expanding || (s->failed &&
1973 (failed_dev->toread || (failed_dev->towrite &&
1974 !test_bit(R5_OVERWRITE, &failed_dev->flags)
1976 /* 1/ We would like to get this block, possibly by computing it,
1977 * but we might not be able to.
1979 * 2/ Since parity check operations potentially make the parity
1980 * block !uptodate it will need to be refreshed before any
1981 * compute operations on data disks are scheduled.
1983 * 3/ We hold off parity block re-reads until check operations
1986 if ((s->uptodate == disks - 1) &&
1987 !test_bit(STRIPE_OP_CHECK, &sh->ops.pending)) {
1988 set_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending);
1989 set_bit(R5_Wantcompute, &dev->flags);
1990 sh->ops.target = disk_idx;
1993 /* Careful: from this point on 'uptodate' is in the eye
1994 * of raid5_run_ops which services 'compute' operations
1995 * before writes. R5_Wantcompute flags a block that will
1996 * be R5_UPTODATE by the time it is needed for a
1997 * subsequent operation.
2000 return 0; /* uptodate + compute == disks */
2001 } else if ((s->uptodate < disks - 1) &&
2002 test_bit(R5_Insync, &dev->flags)) {
2003 /* Note: we hold off compute operations while checks are
2004 * in flight, but we still prefer 'compute' over 'read'
2005 * hence we only read if (uptodate < * disks-1)
2007 set_bit(R5_LOCKED, &dev->flags);
2008 set_bit(R5_Wantread, &dev->flags);
2009 if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
2012 pr_debug("Reading block %d (sync=%d)\n", disk_idx,
2020 static void handle_issuing_new_read_requests5(struct stripe_head *sh,
2021 struct stripe_head_state *s, int disks)
2025 /* Clear completed compute operations. Parity recovery
2026 * (STRIPE_OP_MOD_REPAIR_PD) implies a write-back which is handled
2027 * later on in this routine
2029 if (test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete) &&
2030 !test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) {
2031 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete);
2032 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.ack);
2033 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending);
2036 /* look for blocks to read/compute, skip this if a compute
2037 * is already in flight, or if the stripe contents are in the
2038 * midst of changing due to a write
2040 if (!test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending) &&
2041 !test_bit(STRIPE_OP_PREXOR, &sh->ops.pending) &&
2042 !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) {
2043 for (i = disks; i--; )
2044 if (__handle_issuing_new_read_requests5(
2045 sh, s, i, disks) == 0)
2048 set_bit(STRIPE_HANDLE, &sh->state);
2051 static void handle_issuing_new_read_requests6(struct stripe_head *sh,
2052 struct stripe_head_state *s, struct r6_state *r6s,
2056 for (i = disks; i--; ) {
2057 struct r5dev *dev = &sh->dev[i];
2058 if (!test_bit(R5_LOCKED, &dev->flags) &&
2059 !test_bit(R5_UPTODATE, &dev->flags) &&
2060 (dev->toread || (dev->towrite &&
2061 !test_bit(R5_OVERWRITE, &dev->flags)) ||
2062 s->syncing || s->expanding ||
2064 (sh->dev[r6s->failed_num[0]].toread ||
2067 (sh->dev[r6s->failed_num[1]].toread ||
2069 /* we would like to get this block, possibly
2070 * by computing it, but we might not be able to
2072 if (s->uptodate == disks-1) {
2073 pr_debug("Computing stripe %llu block %d\n",
2074 (unsigned long long)sh->sector, i);
2075 compute_block_1(sh, i, 0);
2077 } else if ( s->uptodate == disks-2 && s->failed >= 2 ) {
2078 /* Computing 2-failure is *very* expensive; only
2079 * do it if failed >= 2
2082 for (other = disks; other--; ) {
2085 if (!test_bit(R5_UPTODATE,
2086 &sh->dev[other].flags))
2090 pr_debug("Computing stripe %llu blocks %d,%d\n",
2091 (unsigned long long)sh->sector,
2093 compute_block_2(sh, i, other);
2095 } else if (test_bit(R5_Insync, &dev->flags)) {
2096 set_bit(R5_LOCKED, &dev->flags);
2097 set_bit(R5_Wantread, &dev->flags);
2099 pr_debug("Reading block %d (sync=%d)\n",
2104 set_bit(STRIPE_HANDLE, &sh->state);
2108 /* handle_completed_write_requests
2109 * any written block on an uptodate or failed drive can be returned.
2110 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
2111 * never LOCKED, so we don't need to test 'failed' directly.
2113 static void handle_completed_write_requests(raid5_conf_t *conf,
2114 struct stripe_head *sh, int disks, struct bio **return_bi)
2119 for (i = disks; i--; )
2120 if (sh->dev[i].written) {
2122 if (!test_bit(R5_LOCKED, &dev->flags) &&
2123 test_bit(R5_UPTODATE, &dev->flags)) {
2124 /* We can return any write requests */
2125 struct bio *wbi, *wbi2;
2127 pr_debug("Return write for disc %d\n", i);
2128 spin_lock_irq(&conf->device_lock);
2130 dev->written = NULL;
2131 while (wbi && wbi->bi_sector <
2132 dev->sector + STRIPE_SECTORS) {
2133 wbi2 = r5_next_bio(wbi, dev->sector);
2134 if (--wbi->bi_phys_segments == 0) {
2135 md_write_end(conf->mddev);
2136 wbi->bi_next = *return_bi;
2141 if (dev->towrite == NULL)
2143 spin_unlock_irq(&conf->device_lock);
2145 bitmap_endwrite(conf->mddev->bitmap,
2148 !test_bit(STRIPE_DEGRADED, &sh->state),
2154 static void handle_issuing_new_write_requests5(raid5_conf_t *conf,
2155 struct stripe_head *sh, struct stripe_head_state *s, int disks)
2157 int rmw = 0, rcw = 0, i;
2158 for (i = disks; i--; ) {
2159 /* would I have to read this buffer for read_modify_write */
2160 struct r5dev *dev = &sh->dev[i];
2161 if ((dev->towrite || i == sh->pd_idx) &&
2162 !test_bit(R5_LOCKED, &dev->flags) &&
2163 !(test_bit(R5_UPTODATE, &dev->flags) ||
2164 test_bit(R5_Wantcompute, &dev->flags))) {
2165 if (test_bit(R5_Insync, &dev->flags))
2168 rmw += 2*disks; /* cannot read it */
2170 /* Would I have to read this buffer for reconstruct_write */
2171 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
2172 !test_bit(R5_LOCKED, &dev->flags) &&
2173 !(test_bit(R5_UPTODATE, &dev->flags) ||
2174 test_bit(R5_Wantcompute, &dev->flags))) {
2175 if (test_bit(R5_Insync, &dev->flags)) rcw++;
2180 pr_debug("for sector %llu, rmw=%d rcw=%d\n",
2181 (unsigned long long)sh->sector, rmw, rcw);
2182 set_bit(STRIPE_HANDLE, &sh->state);
2183 if (rmw < rcw && rmw > 0)
2184 /* prefer read-modify-write, but need to get some data */
2185 for (i = disks; i--; ) {
2186 struct r5dev *dev = &sh->dev[i];
2187 if ((dev->towrite || i == sh->pd_idx) &&
2188 !test_bit(R5_LOCKED, &dev->flags) &&
2189 !(test_bit(R5_UPTODATE, &dev->flags) ||
2190 test_bit(R5_Wantcompute, &dev->flags)) &&
2191 test_bit(R5_Insync, &dev->flags)) {
2193 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2194 pr_debug("Read_old block "
2195 "%d for r-m-w\n", i);
2196 set_bit(R5_LOCKED, &dev->flags);
2197 set_bit(R5_Wantread, &dev->flags);
2198 if (!test_and_set_bit(
2199 STRIPE_OP_IO, &sh->ops.pending))
2203 set_bit(STRIPE_DELAYED, &sh->state);
2204 set_bit(STRIPE_HANDLE, &sh->state);
2208 if (rcw <= rmw && rcw > 0)
2209 /* want reconstruct write, but need to get some data */
2210 for (i = disks; i--; ) {
2211 struct r5dev *dev = &sh->dev[i];
2212 if (!test_bit(R5_OVERWRITE, &dev->flags) &&
2214 !test_bit(R5_LOCKED, &dev->flags) &&
2215 !(test_bit(R5_UPTODATE, &dev->flags) ||
2216 test_bit(R5_Wantcompute, &dev->flags)) &&
2217 test_bit(R5_Insync, &dev->flags)) {
2219 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2220 pr_debug("Read_old block "
2221 "%d for Reconstruct\n", i);
2222 set_bit(R5_LOCKED, &dev->flags);
2223 set_bit(R5_Wantread, &dev->flags);
2224 if (!test_and_set_bit(
2225 STRIPE_OP_IO, &sh->ops.pending))
2229 set_bit(STRIPE_DELAYED, &sh->state);
2230 set_bit(STRIPE_HANDLE, &sh->state);
2234 /* now if nothing is locked, and if we have enough data,
2235 * we can start a write request
2237 /* since handle_stripe can be called at any time we need to handle the
2238 * case where a compute block operation has been submitted and then a
2239 * subsequent call wants to start a write request. raid5_run_ops only
2240 * handles the case where compute block and postxor are requested
2241 * simultaneously. If this is not the case then new writes need to be
2242 * held off until the compute completes.
2244 if ((s->req_compute ||
2245 !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) &&
2246 (s->locked == 0 && (rcw == 0 || rmw == 0) &&
2247 !test_bit(STRIPE_BIT_DELAY, &sh->state)))
2248 s->locked += handle_write_operations5(sh, rcw == 0, 0);
2251 static void handle_issuing_new_write_requests6(raid5_conf_t *conf,
2252 struct stripe_head *sh, struct stripe_head_state *s,
2253 struct r6_state *r6s, int disks)
2255 int rcw = 0, must_compute = 0, pd_idx = sh->pd_idx, i;
2256 int qd_idx = r6s->qd_idx;
2257 for (i = disks; i--; ) {
2258 struct r5dev *dev = &sh->dev[i];
2259 /* Would I have to read this buffer for reconstruct_write */
2260 if (!test_bit(R5_OVERWRITE, &dev->flags)
2261 && i != pd_idx && i != qd_idx
2262 && (!test_bit(R5_LOCKED, &dev->flags)
2264 !test_bit(R5_UPTODATE, &dev->flags)) {
2265 if (test_bit(R5_Insync, &dev->flags)) rcw++;
2267 pr_debug("raid6: must_compute: "
2268 "disk %d flags=%#lx\n", i, dev->flags);
2273 pr_debug("for sector %llu, rcw=%d, must_compute=%d\n",
2274 (unsigned long long)sh->sector, rcw, must_compute);
2275 set_bit(STRIPE_HANDLE, &sh->state);
2278 /* want reconstruct write, but need to get some data */
2279 for (i = disks; i--; ) {
2280 struct r5dev *dev = &sh->dev[i];
2281 if (!test_bit(R5_OVERWRITE, &dev->flags)
2282 && !(s->failed == 0 && (i == pd_idx || i == qd_idx))
2283 && !test_bit(R5_LOCKED, &dev->flags) &&
2284 !test_bit(R5_UPTODATE, &dev->flags) &&
2285 test_bit(R5_Insync, &dev->flags)) {
2287 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2288 pr_debug("Read_old stripe %llu "
2289 "block %d for Reconstruct\n",
2290 (unsigned long long)sh->sector, i);
2291 set_bit(R5_LOCKED, &dev->flags);
2292 set_bit(R5_Wantread, &dev->flags);
2295 pr_debug("Request delayed stripe %llu "
2296 "block %d for Reconstruct\n",
2297 (unsigned long long)sh->sector, i);
2298 set_bit(STRIPE_DELAYED, &sh->state);
2299 set_bit(STRIPE_HANDLE, &sh->state);
2303 /* now if nothing is locked, and if we have enough data, we can start a
2306 if (s->locked == 0 && rcw == 0 &&
2307 !test_bit(STRIPE_BIT_DELAY, &sh->state)) {
2308 if (must_compute > 0) {
2309 /* We have failed blocks and need to compute them */
2310 switch (s->failed) {
2314 compute_block_1(sh, r6s->failed_num[0], 0);
2317 compute_block_2(sh, r6s->failed_num[0],
2318 r6s->failed_num[1]);
2320 default: /* This request should have been failed? */
2325 pr_debug("Computing parity for stripe %llu\n",
2326 (unsigned long long)sh->sector);
2327 compute_parity6(sh, RECONSTRUCT_WRITE);
2328 /* now every locked buffer is ready to be written */
2329 for (i = disks; i--; )
2330 if (test_bit(R5_LOCKED, &sh->dev[i].flags)) {
2331 pr_debug("Writing stripe %llu block %d\n",
2332 (unsigned long long)sh->sector, i);
2334 set_bit(R5_Wantwrite, &sh->dev[i].flags);
2336 /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */
2337 set_bit(STRIPE_INSYNC, &sh->state);
2339 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2340 atomic_dec(&conf->preread_active_stripes);
2341 if (atomic_read(&conf->preread_active_stripes) <
2343 md_wakeup_thread(conf->mddev->thread);
2348 static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
2349 struct stripe_head_state *s, int disks)
2351 set_bit(STRIPE_HANDLE, &sh->state);
2352 /* Take one of the following actions:
2353 * 1/ start a check parity operation if (uptodate == disks)
2354 * 2/ finish a check parity operation and act on the result
2355 * 3/ skip to the writeback section if we previously
2356 * initiated a recovery operation
2358 if (s->failed == 0 &&
2359 !test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) {
2360 if (!test_and_set_bit(STRIPE_OP_CHECK, &sh->ops.pending)) {
2361 BUG_ON(s->uptodate != disks);
2362 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
2366 test_and_clear_bit(STRIPE_OP_CHECK, &sh->ops.complete)) {
2367 clear_bit(STRIPE_OP_CHECK, &sh->ops.ack);
2368 clear_bit(STRIPE_OP_CHECK, &sh->ops.pending);
2370 if (sh->ops.zero_sum_result == 0)
2371 /* parity is correct (on disc,
2372 * not in buffer any more)
2374 set_bit(STRIPE_INSYNC, &sh->state);
2376 conf->mddev->resync_mismatches +=
2379 MD_RECOVERY_CHECK, &conf->mddev->recovery))
2380 /* don't try to repair!! */
2381 set_bit(STRIPE_INSYNC, &sh->state);
2383 set_bit(STRIPE_OP_COMPUTE_BLK,
2385 set_bit(STRIPE_OP_MOD_REPAIR_PD,
2387 set_bit(R5_Wantcompute,
2388 &sh->dev[sh->pd_idx].flags);
2389 sh->ops.target = sh->pd_idx;
2397 /* check if we can clear a parity disk reconstruct */
2398 if (test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete) &&
2399 test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) {
2401 clear_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending);
2402 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete);
2403 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.ack);
2404 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending);
2407 /* Wait for check parity and compute block operations to complete
2410 if (!test_bit(STRIPE_INSYNC, &sh->state) &&
2411 !test_bit(STRIPE_OP_CHECK, &sh->ops.pending) &&
2412 !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) {
2414 /* either failed parity check, or recovery is happening */
2416 s->failed_num = sh->pd_idx;
2417 dev = &sh->dev[s->failed_num];
2418 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
2419 BUG_ON(s->uptodate != disks);
2421 set_bit(R5_LOCKED, &dev->flags);
2422 set_bit(R5_Wantwrite, &dev->flags);
2423 if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
2426 clear_bit(STRIPE_DEGRADED, &sh->state);
2428 set_bit(STRIPE_INSYNC, &sh->state);
2433 static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh,
2434 struct stripe_head_state *s,
2435 struct r6_state *r6s, struct page *tmp_page,
2438 int update_p = 0, update_q = 0;
2440 int pd_idx = sh->pd_idx;
2441 int qd_idx = r6s->qd_idx;
2443 set_bit(STRIPE_HANDLE, &sh->state);
2445 BUG_ON(s->failed > 2);
2446 BUG_ON(s->uptodate < disks);
2447 /* Want to check and possibly repair P and Q.
2448 * However there could be one 'failed' device, in which
2449 * case we can only check one of them, possibly using the
2450 * other to generate missing data
2453 /* If !tmp_page, we cannot do the calculations,
2454 * but as we have set STRIPE_HANDLE, we will soon be called
2455 * by stripe_handle with a tmp_page - just wait until then.
2458 if (s->failed == r6s->q_failed) {
2459 /* The only possible failed device holds 'Q', so it
2460 * makes sense to check P (If anything else were failed,
2461 * we would have used P to recreate it).
2463 compute_block_1(sh, pd_idx, 1);
2464 if (!page_is_zero(sh->dev[pd_idx].page)) {
2465 compute_block_1(sh, pd_idx, 0);
2469 if (!r6s->q_failed && s->failed < 2) {
2470 /* q is not failed, and we didn't use it to generate
2471 * anything, so it makes sense to check it
2473 memcpy(page_address(tmp_page),
2474 page_address(sh->dev[qd_idx].page),
2476 compute_parity6(sh, UPDATE_PARITY);
2477 if (memcmp(page_address(tmp_page),
2478 page_address(sh->dev[qd_idx].page),
2479 STRIPE_SIZE) != 0) {
2480 clear_bit(STRIPE_INSYNC, &sh->state);
2484 if (update_p || update_q) {
2485 conf->mddev->resync_mismatches += STRIPE_SECTORS;
2486 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
2487 /* don't try to repair!! */
2488 update_p = update_q = 0;
2491 /* now write out any block on a failed drive,
2492 * or P or Q if they need it
2495 if (s->failed == 2) {
2496 dev = &sh->dev[r6s->failed_num[1]];
2498 set_bit(R5_LOCKED, &dev->flags);
2499 set_bit(R5_Wantwrite, &dev->flags);
2501 if (s->failed >= 1) {
2502 dev = &sh->dev[r6s->failed_num[0]];
2504 set_bit(R5_LOCKED, &dev->flags);
2505 set_bit(R5_Wantwrite, &dev->flags);
2509 dev = &sh->dev[pd_idx];
2511 set_bit(R5_LOCKED, &dev->flags);
2512 set_bit(R5_Wantwrite, &dev->flags);
2515 dev = &sh->dev[qd_idx];
2517 set_bit(R5_LOCKED, &dev->flags);
2518 set_bit(R5_Wantwrite, &dev->flags);
2520 clear_bit(STRIPE_DEGRADED, &sh->state);
2522 set_bit(STRIPE_INSYNC, &sh->state);
2526 static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
2527 struct r6_state *r6s)
2531 /* We have read all the blocks in this stripe and now we need to
2532 * copy some of them into a target stripe for expand.
2534 struct dma_async_tx_descriptor *tx = NULL;
2535 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
2536 for (i = 0; i < sh->disks; i++)
2537 if (i != sh->pd_idx && (!r6s || i != r6s->qd_idx)) {
2538 int dd_idx, pd_idx, j;
2539 struct stripe_head *sh2;
2541 sector_t bn = compute_blocknr(sh, i);
2542 sector_t s = raid5_compute_sector(bn, conf->raid_disks,
2544 conf->max_degraded, &dd_idx,
2546 sh2 = get_active_stripe(conf, s, conf->raid_disks,
2549 /* so far only the early blocks of this stripe
2550 * have been requested. When later blocks
2551 * get requested, we will try again
2554 if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
2555 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
2556 /* must have already done this block */
2557 release_stripe(sh2);
2561 /* place all the copies on one channel */
2562 tx = async_memcpy(sh2->dev[dd_idx].page,
2563 sh->dev[i].page, 0, 0, STRIPE_SIZE,
2564 ASYNC_TX_DEP_ACK, tx, NULL, NULL);
2566 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
2567 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
2568 for (j = 0; j < conf->raid_disks; j++)
2569 if (j != sh2->pd_idx &&
2570 (!r6s || j != raid6_next_disk(sh2->pd_idx,
2572 !test_bit(R5_Expanded, &sh2->dev[j].flags))
2574 if (j == conf->raid_disks) {
2575 set_bit(STRIPE_EXPAND_READY, &sh2->state);
2576 set_bit(STRIPE_HANDLE, &sh2->state);
2578 release_stripe(sh2);
2581 /* done submitting copies, wait for them to complete */
2584 dma_wait_for_async_tx(tx);
2589 * handle_stripe - do things to a stripe.
2591 * We lock the stripe and then examine the state of various bits
2592 * to see what needs to be done.
2594 * return some read request which now have data
2595 * return some write requests which are safely on disc
2596 * schedule a read on some buffers
2597 * schedule a write of some buffers
2598 * return confirmation of parity correctness
2600 * buffers are taken off read_list or write_list, and bh_cache buffers
2601 * get BH_Lock set before the stripe lock is released.
2605 static void handle_stripe5(struct stripe_head *sh)
2607 raid5_conf_t *conf = sh->raid_conf;
2608 int disks = sh->disks, i;
2609 struct bio *return_bi = NULL;
2610 struct stripe_head_state s;
2612 unsigned long pending = 0;
2614 memset(&s, 0, sizeof(s));
2615 pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d "
2616 "ops=%lx:%lx:%lx\n", (unsigned long long)sh->sector, sh->state,
2617 atomic_read(&sh->count), sh->pd_idx,
2618 sh->ops.pending, sh->ops.ack, sh->ops.complete);
2620 spin_lock(&sh->lock);
2621 clear_bit(STRIPE_HANDLE, &sh->state);
2622 clear_bit(STRIPE_DELAYED, &sh->state);
2624 s.syncing = test_bit(STRIPE_SYNCING, &sh->state);
2625 s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
2626 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
2627 /* Now to look around and see what can be done */
2629 /* clean-up completed biofill operations */
2630 if (test_bit(STRIPE_OP_BIOFILL, &sh->ops.complete)) {
2631 clear_bit(STRIPE_OP_BIOFILL, &sh->ops.pending);
2632 clear_bit(STRIPE_OP_BIOFILL, &sh->ops.ack);
2633 clear_bit(STRIPE_OP_BIOFILL, &sh->ops.complete);
2637 for (i=disks; i--; ) {
2639 struct r5dev *dev = &sh->dev[i];
2640 clear_bit(R5_Insync, &dev->flags);
2642 pr_debug("check %d: state 0x%lx toread %p read %p write %p "
2643 "written %p\n", i, dev->flags, dev->toread, dev->read,
2644 dev->towrite, dev->written);
2646 /* maybe we can request a biofill operation
2648 * new wantfill requests are only permitted while
2649 * STRIPE_OP_BIOFILL is clear
2651 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
2652 !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending))
2653 set_bit(R5_Wantfill, &dev->flags);
2655 /* now count some things */
2656 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++;
2657 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++;
2658 if (test_bit(R5_Wantcompute, &dev->flags)) s.compute++;
2660 if (test_bit(R5_Wantfill, &dev->flags))
2662 else if (dev->toread)
2666 if (!test_bit(R5_OVERWRITE, &dev->flags))
2671 rdev = rcu_dereference(conf->disks[i].rdev);
2672 if (!rdev || !test_bit(In_sync, &rdev->flags)) {
2673 /* The ReadError flag will just be confusing now */
2674 clear_bit(R5_ReadError, &dev->flags);
2675 clear_bit(R5_ReWrite, &dev->flags);
2677 if (!rdev || !test_bit(In_sync, &rdev->flags)
2678 || test_bit(R5_ReadError, &dev->flags)) {
2682 set_bit(R5_Insync, &dev->flags);
2686 if (s.to_fill && !test_and_set_bit(STRIPE_OP_BIOFILL, &sh->ops.pending))
2689 pr_debug("locked=%d uptodate=%d to_read=%d"
2690 " to_write=%d failed=%d failed_num=%d\n",
2691 s.locked, s.uptodate, s.to_read, s.to_write,
2692 s.failed, s.failed_num);
2693 /* check if the array has lost two devices and, if so, some requests might
2696 if (s.failed > 1 && s.to_read+s.to_write+s.written)
2697 handle_requests_to_failed_array(conf, sh, &s, disks,
2699 if (s.failed > 1 && s.syncing) {
2700 md_done_sync(conf->mddev, STRIPE_SECTORS,0);
2701 clear_bit(STRIPE_SYNCING, &sh->state);
2705 /* might be able to return some write requests if the parity block
2706 * is safe, or on a failed drive
2708 dev = &sh->dev[sh->pd_idx];
2710 ((test_bit(R5_Insync, &dev->flags) &&
2711 !test_bit(R5_LOCKED, &dev->flags) &&
2712 test_bit(R5_UPTODATE, &dev->flags)) ||
2713 (s.failed == 1 && s.failed_num == sh->pd_idx)))
2714 handle_completed_write_requests(conf, sh, disks, &return_bi);
2716 /* Now we might consider reading some blocks, either to check/generate
2717 * parity, or to satisfy requests
2718 * or to load a block that is being partially written.
2720 if (s.to_read || s.non_overwrite ||
2721 (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding ||
2722 test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending))
2723 handle_issuing_new_read_requests5(sh, &s, disks);
2725 /* Now we check to see if any write operations have recently
2729 /* leave prexor set until postxor is done, allows us to distinguish
2730 * a rmw from a rcw during biodrain
2732 if (test_bit(STRIPE_OP_PREXOR, &sh->ops.complete) &&
2733 test_bit(STRIPE_OP_POSTXOR, &sh->ops.complete)) {
2735 clear_bit(STRIPE_OP_PREXOR, &sh->ops.complete);
2736 clear_bit(STRIPE_OP_PREXOR, &sh->ops.ack);
2737 clear_bit(STRIPE_OP_PREXOR, &sh->ops.pending);
2739 for (i = disks; i--; )
2740 clear_bit(R5_Wantprexor, &sh->dev[i].flags);
2743 /* if only POSTXOR is set then this is an 'expand' postxor */
2744 if (test_bit(STRIPE_OP_BIODRAIN, &sh->ops.complete) &&
2745 test_bit(STRIPE_OP_POSTXOR, &sh->ops.complete)) {
2747 clear_bit(STRIPE_OP_BIODRAIN, &sh->ops.complete);
2748 clear_bit(STRIPE_OP_BIODRAIN, &sh->ops.ack);
2749 clear_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending);
2751 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.complete);
2752 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.ack);
2753 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.pending);
2755 /* All the 'written' buffers and the parity block are ready to
2756 * be written back to disk
2758 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags));
2759 for (i = disks; i--; ) {
2761 if (test_bit(R5_LOCKED, &dev->flags) &&
2762 (i == sh->pd_idx || dev->written)) {
2763 pr_debug("Writing block %d\n", i);
2764 set_bit(R5_Wantwrite, &dev->flags);
2765 if (!test_and_set_bit(
2766 STRIPE_OP_IO, &sh->ops.pending))
2768 if (!test_bit(R5_Insync, &dev->flags) ||
2769 (i == sh->pd_idx && s.failed == 0))
2770 set_bit(STRIPE_INSYNC, &sh->state);
2773 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2774 atomic_dec(&conf->preread_active_stripes);
2775 if (atomic_read(&conf->preread_active_stripes) <
2777 md_wakeup_thread(conf->mddev->thread);
2781 /* Now to consider new write requests and what else, if anything
2782 * should be read. We do not handle new writes when:
2783 * 1/ A 'write' operation (copy+xor) is already in flight.
2784 * 2/ A 'check' operation is in flight, as it may clobber the parity
2787 if (s.to_write && !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending) &&
2788 !test_bit(STRIPE_OP_CHECK, &sh->ops.pending))
2789 handle_issuing_new_write_requests5(conf, sh, &s, disks);
2791 /* maybe we need to check and possibly fix the parity for this stripe
2792 * Any reads will already have been scheduled, so we just see if enough
2793 * data is available. The parity check is held off while parity
2794 * dependent operations are in flight.
2796 if ((s.syncing && s.locked == 0 &&
2797 !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending) &&
2798 !test_bit(STRIPE_INSYNC, &sh->state)) ||
2799 test_bit(STRIPE_OP_CHECK, &sh->ops.pending) ||
2800 test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending))
2801 handle_parity_checks5(conf, sh, &s, disks);
2803 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
2804 md_done_sync(conf->mddev, STRIPE_SECTORS,1);
2805 clear_bit(STRIPE_SYNCING, &sh->state);
2808 /* If the failed drive is just a ReadError, then we might need to progress
2809 * the repair/check process
2811 if (s.failed == 1 && !conf->mddev->ro &&
2812 test_bit(R5_ReadError, &sh->dev[s.failed_num].flags)
2813 && !test_bit(R5_LOCKED, &sh->dev[s.failed_num].flags)
2814 && test_bit(R5_UPTODATE, &sh->dev[s.failed_num].flags)
2816 dev = &sh->dev[s.failed_num];
2817 if (!test_bit(R5_ReWrite, &dev->flags)) {
2818 set_bit(R5_Wantwrite, &dev->flags);
2819 if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
2821 set_bit(R5_ReWrite, &dev->flags);
2822 set_bit(R5_LOCKED, &dev->flags);
2825 /* let's read it back */
2826 set_bit(R5_Wantread, &dev->flags);
2827 if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
2829 set_bit(R5_LOCKED, &dev->flags);
2834 /* Finish postxor operations initiated by the expansion
2837 if (test_bit(STRIPE_OP_POSTXOR, &sh->ops.complete) &&
2838 !test_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending)) {
2840 clear_bit(STRIPE_EXPANDING, &sh->state);
2842 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.pending);
2843 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.ack);
2844 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.complete);
2846 for (i = conf->raid_disks; i--; ) {
2847 set_bit(R5_Wantwrite, &sh->dev[i].flags);
2848 if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
2853 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
2854 !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) {
2855 /* Need to write out all blocks after computing parity */
2856 sh->disks = conf->raid_disks;
2857 sh->pd_idx = stripe_to_pdidx(sh->sector, conf,
2859 s.locked += handle_write_operations5(sh, 1, 1);
2860 } else if (s.expanded &&
2861 !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) {
2862 clear_bit(STRIPE_EXPAND_READY, &sh->state);
2863 atomic_dec(&conf->reshape_stripes);
2864 wake_up(&conf->wait_for_overlap);
2865 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
2868 if (s.expanding && s.locked == 0 &&
2869 !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending))
2870 handle_stripe_expansion(conf, sh, NULL);
2873 pending = get_stripe_work(sh);
2875 spin_unlock(&sh->lock);
2878 raid5_run_ops(sh, pending);
2880 return_io(return_bi);
2884 static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
2886 raid6_conf_t *conf = sh->raid_conf;
2887 int disks = sh->disks;
2888 struct bio *return_bi = NULL;
2889 int i, pd_idx = sh->pd_idx;
2890 struct stripe_head_state s;
2891 struct r6_state r6s;
2892 struct r5dev *dev, *pdev, *qdev;
2894 r6s.qd_idx = raid6_next_disk(pd_idx, disks);
2895 pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
2896 "pd_idx=%d, qd_idx=%d\n",
2897 (unsigned long long)sh->sector, sh->state,
2898 atomic_read(&sh->count), pd_idx, r6s.qd_idx);
2899 memset(&s, 0, sizeof(s));
2901 spin_lock(&sh->lock);
2902 clear_bit(STRIPE_HANDLE, &sh->state);
2903 clear_bit(STRIPE_DELAYED, &sh->state);
2905 s.syncing = test_bit(STRIPE_SYNCING, &sh->state);
2906 s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
2907 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
2908 /* Now to look around and see what can be done */
2911 for (i=disks; i--; ) {
2914 clear_bit(R5_Insync, &dev->flags);
2916 pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
2917 i, dev->flags, dev->toread, dev->towrite, dev->written);
2918 /* maybe we can reply to a read */
2919 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) {
2920 struct bio *rbi, *rbi2;
2921 pr_debug("Return read for disc %d\n", i);
2922 spin_lock_irq(&conf->device_lock);
2925 if (test_and_clear_bit(R5_Overlap, &dev->flags))
2926 wake_up(&conf->wait_for_overlap);
2927 spin_unlock_irq(&conf->device_lock);
2928 while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) {
2929 copy_data(0, rbi, dev->page, dev->sector);
2930 rbi2 = r5_next_bio(rbi, dev->sector);
2931 spin_lock_irq(&conf->device_lock);
2932 if (--rbi->bi_phys_segments == 0) {
2933 rbi->bi_next = return_bi;
2936 spin_unlock_irq(&conf->device_lock);
2941 /* now count some things */
2942 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++;
2943 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++;
2950 if (!test_bit(R5_OVERWRITE, &dev->flags))
2955 rdev = rcu_dereference(conf->disks[i].rdev);
2956 if (!rdev || !test_bit(In_sync, &rdev->flags)) {
2957 /* The ReadError flag will just be confusing now */
2958 clear_bit(R5_ReadError, &dev->flags);
2959 clear_bit(R5_ReWrite, &dev->flags);
2961 if (!rdev || !test_bit(In_sync, &rdev->flags)
2962 || test_bit(R5_ReadError, &dev->flags)) {
2964 r6s.failed_num[s.failed] = i;
2967 set_bit(R5_Insync, &dev->flags);
2970 pr_debug("locked=%d uptodate=%d to_read=%d"
2971 " to_write=%d failed=%d failed_num=%d,%d\n",
2972 s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
2973 r6s.failed_num[0], r6s.failed_num[1]);
2974 /* check if the array has lost >2 devices and, if so, some requests
2975 * might need to be failed