1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2012 Fusion-io All rights reserved.
4 * Copyright (C) 2012 Intel Corp. All rights reserved.
7 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/raid/pq.h>
12 #include <linux/hash.h>
13 #include <linux/list_sort.h>
14 #include <linux/raid/xor.h>
22 #include "async-thread.h"
24 /* set when additional merges to this rbio are not allowed */
25 #define RBIO_RMW_LOCKED_BIT 1
28 * set when this rbio is sitting in the hash, but it is just a cache
31 #define RBIO_CACHE_BIT 2
34 * set when it is safe to trust the stripe_pages for caching
36 #define RBIO_CACHE_READY_BIT 3
38 #define RBIO_CACHE_SIZE 1024
40 #define BTRFS_STRIPE_HASH_TABLE_BITS 11
42 /* Used by the raid56 code to lock stripes for read/modify/write */
43 struct btrfs_stripe_hash {
44 struct list_head hash_list;
48 /* Used by the raid56 code to lock stripes for read/modify/write */
49 struct btrfs_stripe_hash_table {
50 struct list_head stripe_cache;
51 spinlock_t cache_lock;
53 struct btrfs_stripe_hash table[];
57 * A bvec like structure to present a sector inside a page.
59 * Unlike bvec we don't need bvlen, as it's fixed to sectorsize.
63 unsigned int pgoff:24;
64 unsigned int uptodate:8;
67 static void rmw_rbio_work(struct work_struct *work);
68 static void rmw_rbio_work_locked(struct work_struct *work);
69 static void index_rbio_pages(struct btrfs_raid_bio *rbio);
70 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
72 static int finish_parity_scrub(struct btrfs_raid_bio *rbio, int need_check);
73 static void scrub_rbio_work_locked(struct work_struct *work);
75 static void free_raid_bio_pointers(struct btrfs_raid_bio *rbio)
77 bitmap_free(rbio->error_bitmap);
78 kfree(rbio->stripe_pages);
79 kfree(rbio->bio_sectors);
80 kfree(rbio->stripe_sectors);
81 kfree(rbio->finish_pointers);
84 static void free_raid_bio(struct btrfs_raid_bio *rbio)
88 if (!refcount_dec_and_test(&rbio->refs))
91 WARN_ON(!list_empty(&rbio->stripe_cache));
92 WARN_ON(!list_empty(&rbio->hash_list));
93 WARN_ON(!bio_list_empty(&rbio->bio_list));
95 for (i = 0; i < rbio->nr_pages; i++) {
96 if (rbio->stripe_pages[i]) {
97 __free_page(rbio->stripe_pages[i]);
98 rbio->stripe_pages[i] = NULL;
102 btrfs_put_bioc(rbio->bioc);
103 free_raid_bio_pointers(rbio);
107 static void start_async_work(struct btrfs_raid_bio *rbio, work_func_t work_func)
109 INIT_WORK(&rbio->work, work_func);
110 queue_work(rbio->bioc->fs_info->rmw_workers, &rbio->work);
114 * the stripe hash table is used for locking, and to collect
115 * bios in hopes of making a full stripe
117 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
119 struct btrfs_stripe_hash_table *table;
120 struct btrfs_stripe_hash_table *x;
121 struct btrfs_stripe_hash *cur;
122 struct btrfs_stripe_hash *h;
123 int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
126 if (info->stripe_hash_table)
130 * The table is large, starting with order 4 and can go as high as
131 * order 7 in case lock debugging is turned on.
133 * Try harder to allocate and fallback to vmalloc to lower the chance
134 * of a failing mount.
136 table = kvzalloc(struct_size(table, table, num_entries), GFP_KERNEL);
140 spin_lock_init(&table->cache_lock);
141 INIT_LIST_HEAD(&table->stripe_cache);
145 for (i = 0; i < num_entries; i++) {
147 INIT_LIST_HEAD(&cur->hash_list);
148 spin_lock_init(&cur->lock);
151 x = cmpxchg(&info->stripe_hash_table, NULL, table);
157 * caching an rbio means to copy anything from the
158 * bio_sectors array into the stripe_pages array. We
159 * use the page uptodate bit in the stripe cache array
160 * to indicate if it has valid data
162 * once the caching is done, we set the cache ready
165 static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
170 ret = alloc_rbio_pages(rbio);
174 for (i = 0; i < rbio->nr_sectors; i++) {
175 /* Some range not covered by bio (partial write), skip it */
176 if (!rbio->bio_sectors[i].page) {
178 * Even if the sector is not covered by bio, if it is
179 * a data sector it should still be uptodate as it is
182 if (i < rbio->nr_data * rbio->stripe_nsectors)
183 ASSERT(rbio->stripe_sectors[i].uptodate);
187 ASSERT(rbio->stripe_sectors[i].page);
188 memcpy_page(rbio->stripe_sectors[i].page,
189 rbio->stripe_sectors[i].pgoff,
190 rbio->bio_sectors[i].page,
191 rbio->bio_sectors[i].pgoff,
192 rbio->bioc->fs_info->sectorsize);
193 rbio->stripe_sectors[i].uptodate = 1;
195 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
199 * we hash on the first logical address of the stripe
201 static int rbio_bucket(struct btrfs_raid_bio *rbio)
203 u64 num = rbio->bioc->raid_map[0];
206 * we shift down quite a bit. We're using byte
207 * addressing, and most of the lower bits are zeros.
208 * This tends to upset hash_64, and it consistently
209 * returns just one or two different values.
211 * shifting off the lower bits fixes things.
213 return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
216 static bool full_page_sectors_uptodate(struct btrfs_raid_bio *rbio,
217 unsigned int page_nr)
219 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
220 const u32 sectors_per_page = PAGE_SIZE / sectorsize;
223 ASSERT(page_nr < rbio->nr_pages);
225 for (i = sectors_per_page * page_nr;
226 i < sectors_per_page * page_nr + sectors_per_page;
228 if (!rbio->stripe_sectors[i].uptodate)
235 * Update the stripe_sectors[] array to use correct page and pgoff
237 * Should be called every time any page pointer in stripes_pages[] got modified.
239 static void index_stripe_sectors(struct btrfs_raid_bio *rbio)
241 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
245 for (i = 0, offset = 0; i < rbio->nr_sectors; i++, offset += sectorsize) {
246 int page_index = offset >> PAGE_SHIFT;
248 ASSERT(page_index < rbio->nr_pages);
249 rbio->stripe_sectors[i].page = rbio->stripe_pages[page_index];
250 rbio->stripe_sectors[i].pgoff = offset_in_page(offset);
254 static void steal_rbio_page(struct btrfs_raid_bio *src,
255 struct btrfs_raid_bio *dest, int page_nr)
257 const u32 sectorsize = src->bioc->fs_info->sectorsize;
258 const u32 sectors_per_page = PAGE_SIZE / sectorsize;
261 if (dest->stripe_pages[page_nr])
262 __free_page(dest->stripe_pages[page_nr]);
263 dest->stripe_pages[page_nr] = src->stripe_pages[page_nr];
264 src->stripe_pages[page_nr] = NULL;
266 /* Also update the sector->uptodate bits. */
267 for (i = sectors_per_page * page_nr;
268 i < sectors_per_page * page_nr + sectors_per_page; i++)
269 dest->stripe_sectors[i].uptodate = true;
272 static bool is_data_stripe_page(struct btrfs_raid_bio *rbio, int page_nr)
274 const int sector_nr = (page_nr << PAGE_SHIFT) >>
275 rbio->bioc->fs_info->sectorsize_bits;
278 * We have ensured PAGE_SIZE is aligned with sectorsize, thus
279 * we won't have a page which is half data half parity.
281 * Thus if the first sector of the page belongs to data stripes, then
282 * the full page belongs to data stripes.
284 return (sector_nr < rbio->nr_data * rbio->stripe_nsectors);
288 * Stealing an rbio means taking all the uptodate pages from the stripe array
289 * in the source rbio and putting them into the destination rbio.
291 * This will also update the involved stripe_sectors[] which are referring to
294 static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
298 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
301 for (i = 0; i < dest->nr_pages; i++) {
302 struct page *p = src->stripe_pages[i];
305 * We don't need to steal P/Q pages as they will always be
306 * regenerated for RMW or full write anyway.
308 if (!is_data_stripe_page(src, i))
312 * If @src already has RBIO_CACHE_READY_BIT, it should have
313 * all data stripe pages present and uptodate.
316 ASSERT(full_page_sectors_uptodate(src, i));
317 steal_rbio_page(src, dest, i);
319 index_stripe_sectors(dest);
320 index_stripe_sectors(src);
324 * merging means we take the bio_list from the victim and
325 * splice it into the destination. The victim should
326 * be discarded afterwards.
328 * must be called with dest->rbio_list_lock held
330 static void merge_rbio(struct btrfs_raid_bio *dest,
331 struct btrfs_raid_bio *victim)
333 bio_list_merge(&dest->bio_list, &victim->bio_list);
334 dest->bio_list_bytes += victim->bio_list_bytes;
335 /* Also inherit the bitmaps from @victim. */
336 bitmap_or(&dest->dbitmap, &victim->dbitmap, &dest->dbitmap,
337 dest->stripe_nsectors);
338 bio_list_init(&victim->bio_list);
342 * used to prune items that are in the cache. The caller
343 * must hold the hash table lock.
345 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
347 int bucket = rbio_bucket(rbio);
348 struct btrfs_stripe_hash_table *table;
349 struct btrfs_stripe_hash *h;
353 * check the bit again under the hash table lock.
355 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
358 table = rbio->bioc->fs_info->stripe_hash_table;
359 h = table->table + bucket;
361 /* hold the lock for the bucket because we may be
362 * removing it from the hash table
367 * hold the lock for the bio list because we need
368 * to make sure the bio list is empty
370 spin_lock(&rbio->bio_list_lock);
372 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
373 list_del_init(&rbio->stripe_cache);
374 table->cache_size -= 1;
377 /* if the bio list isn't empty, this rbio is
378 * still involved in an IO. We take it out
379 * of the cache list, and drop the ref that
380 * was held for the list.
382 * If the bio_list was empty, we also remove
383 * the rbio from the hash_table, and drop
384 * the corresponding ref
386 if (bio_list_empty(&rbio->bio_list)) {
387 if (!list_empty(&rbio->hash_list)) {
388 list_del_init(&rbio->hash_list);
389 refcount_dec(&rbio->refs);
390 BUG_ON(!list_empty(&rbio->plug_list));
395 spin_unlock(&rbio->bio_list_lock);
396 spin_unlock(&h->lock);
403 * prune a given rbio from the cache
405 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
407 struct btrfs_stripe_hash_table *table;
410 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
413 table = rbio->bioc->fs_info->stripe_hash_table;
415 spin_lock_irqsave(&table->cache_lock, flags);
416 __remove_rbio_from_cache(rbio);
417 spin_unlock_irqrestore(&table->cache_lock, flags);
421 * remove everything in the cache
423 static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
425 struct btrfs_stripe_hash_table *table;
427 struct btrfs_raid_bio *rbio;
429 table = info->stripe_hash_table;
431 spin_lock_irqsave(&table->cache_lock, flags);
432 while (!list_empty(&table->stripe_cache)) {
433 rbio = list_entry(table->stripe_cache.next,
434 struct btrfs_raid_bio,
436 __remove_rbio_from_cache(rbio);
438 spin_unlock_irqrestore(&table->cache_lock, flags);
442 * remove all cached entries and free the hash table
445 void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
447 if (!info->stripe_hash_table)
449 btrfs_clear_rbio_cache(info);
450 kvfree(info->stripe_hash_table);
451 info->stripe_hash_table = NULL;
455 * insert an rbio into the stripe cache. It
456 * must have already been prepared by calling
459 * If this rbio was already cached, it gets
460 * moved to the front of the lru.
462 * If the size of the rbio cache is too big, we
465 static void cache_rbio(struct btrfs_raid_bio *rbio)
467 struct btrfs_stripe_hash_table *table;
470 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
473 table = rbio->bioc->fs_info->stripe_hash_table;
475 spin_lock_irqsave(&table->cache_lock, flags);
476 spin_lock(&rbio->bio_list_lock);
478 /* bump our ref if we were not in the list before */
479 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
480 refcount_inc(&rbio->refs);
482 if (!list_empty(&rbio->stripe_cache)){
483 list_move(&rbio->stripe_cache, &table->stripe_cache);
485 list_add(&rbio->stripe_cache, &table->stripe_cache);
486 table->cache_size += 1;
489 spin_unlock(&rbio->bio_list_lock);
491 if (table->cache_size > RBIO_CACHE_SIZE) {
492 struct btrfs_raid_bio *found;
494 found = list_entry(table->stripe_cache.prev,
495 struct btrfs_raid_bio,
499 __remove_rbio_from_cache(found);
502 spin_unlock_irqrestore(&table->cache_lock, flags);
506 * helper function to run the xor_blocks api. It is only
507 * able to do MAX_XOR_BLOCKS at a time, so we need to
510 static void run_xor(void **pages, int src_cnt, ssize_t len)
514 void *dest = pages[src_cnt];
517 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
518 xor_blocks(xor_src_cnt, len, dest, pages + src_off);
520 src_cnt -= xor_src_cnt;
521 src_off += xor_src_cnt;
526 * Returns true if the bio list inside this rbio covers an entire stripe (no
529 static int rbio_is_full(struct btrfs_raid_bio *rbio)
532 unsigned long size = rbio->bio_list_bytes;
535 spin_lock_irqsave(&rbio->bio_list_lock, flags);
536 if (size != rbio->nr_data * BTRFS_STRIPE_LEN)
538 BUG_ON(size > rbio->nr_data * BTRFS_STRIPE_LEN);
539 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
545 * returns 1 if it is safe to merge two rbios together.
546 * The merging is safe if the two rbios correspond to
547 * the same stripe and if they are both going in the same
548 * direction (read vs write), and if neither one is
549 * locked for final IO
551 * The caller is responsible for locking such that
552 * rmw_locked is safe to test
554 static int rbio_can_merge(struct btrfs_raid_bio *last,
555 struct btrfs_raid_bio *cur)
557 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
558 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
562 * we can't merge with cached rbios, since the
563 * idea is that when we merge the destination
564 * rbio is going to run our IO for us. We can
565 * steal from cached rbios though, other functions
568 if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
569 test_bit(RBIO_CACHE_BIT, &cur->flags))
572 if (last->bioc->raid_map[0] != cur->bioc->raid_map[0])
575 /* we can't merge with different operations */
576 if (last->operation != cur->operation)
579 * We've need read the full stripe from the drive.
580 * check and repair the parity and write the new results.
582 * We're not allowed to add any new bios to the
583 * bio list here, anyone else that wants to
584 * change this stripe needs to do their own rmw.
586 if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
589 if (last->operation == BTRFS_RBIO_REBUILD_MISSING ||
590 last->operation == BTRFS_RBIO_READ_REBUILD)
596 static unsigned int rbio_stripe_sector_index(const struct btrfs_raid_bio *rbio,
597 unsigned int stripe_nr,
598 unsigned int sector_nr)
600 ASSERT(stripe_nr < rbio->real_stripes);
601 ASSERT(sector_nr < rbio->stripe_nsectors);
603 return stripe_nr * rbio->stripe_nsectors + sector_nr;
606 /* Return a sector from rbio->stripe_sectors, not from the bio list */
607 static struct sector_ptr *rbio_stripe_sector(const struct btrfs_raid_bio *rbio,
608 unsigned int stripe_nr,
609 unsigned int sector_nr)
611 return &rbio->stripe_sectors[rbio_stripe_sector_index(rbio, stripe_nr,
615 /* Grab a sector inside P stripe */
616 static struct sector_ptr *rbio_pstripe_sector(const struct btrfs_raid_bio *rbio,
617 unsigned int sector_nr)
619 return rbio_stripe_sector(rbio, rbio->nr_data, sector_nr);
622 /* Grab a sector inside Q stripe, return NULL if not RAID6 */
623 static struct sector_ptr *rbio_qstripe_sector(const struct btrfs_raid_bio *rbio,
624 unsigned int sector_nr)
626 if (rbio->nr_data + 1 == rbio->real_stripes)
628 return rbio_stripe_sector(rbio, rbio->nr_data + 1, sector_nr);
632 * The first stripe in the table for a logical address
633 * has the lock. rbios are added in one of three ways:
635 * 1) Nobody has the stripe locked yet. The rbio is given
636 * the lock and 0 is returned. The caller must start the IO
639 * 2) Someone has the stripe locked, but we're able to merge
640 * with the lock owner. The rbio is freed and the IO will
641 * start automatically along with the existing rbio. 1 is returned.
643 * 3) Someone has the stripe locked, but we're not able to merge.
644 * The rbio is added to the lock owner's plug list, or merged into
645 * an rbio already on the plug list. When the lock owner unlocks,
646 * the next rbio on the list is run and the IO is started automatically.
649 * If we return 0, the caller still owns the rbio and must continue with
650 * IO submission. If we return 1, the caller must assume the rbio has
651 * already been freed.
653 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
655 struct btrfs_stripe_hash *h;
656 struct btrfs_raid_bio *cur;
657 struct btrfs_raid_bio *pending;
659 struct btrfs_raid_bio *freeit = NULL;
660 struct btrfs_raid_bio *cache_drop = NULL;
663 h = rbio->bioc->fs_info->stripe_hash_table->table + rbio_bucket(rbio);
665 spin_lock_irqsave(&h->lock, flags);
666 list_for_each_entry(cur, &h->hash_list, hash_list) {
667 if (cur->bioc->raid_map[0] != rbio->bioc->raid_map[0])
670 spin_lock(&cur->bio_list_lock);
672 /* Can we steal this cached rbio's pages? */
673 if (bio_list_empty(&cur->bio_list) &&
674 list_empty(&cur->plug_list) &&
675 test_bit(RBIO_CACHE_BIT, &cur->flags) &&
676 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
677 list_del_init(&cur->hash_list);
678 refcount_dec(&cur->refs);
680 steal_rbio(cur, rbio);
682 spin_unlock(&cur->bio_list_lock);
687 /* Can we merge into the lock owner? */
688 if (rbio_can_merge(cur, rbio)) {
689 merge_rbio(cur, rbio);
690 spin_unlock(&cur->bio_list_lock);
698 * We couldn't merge with the running rbio, see if we can merge
699 * with the pending ones. We don't have to check for rmw_locked
700 * because there is no way they are inside finish_rmw right now
702 list_for_each_entry(pending, &cur->plug_list, plug_list) {
703 if (rbio_can_merge(pending, rbio)) {
704 merge_rbio(pending, rbio);
705 spin_unlock(&cur->bio_list_lock);
713 * No merging, put us on the tail of the plug list, our rbio
714 * will be started with the currently running rbio unlocks
716 list_add_tail(&rbio->plug_list, &cur->plug_list);
717 spin_unlock(&cur->bio_list_lock);
722 refcount_inc(&rbio->refs);
723 list_add(&rbio->hash_list, &h->hash_list);
725 spin_unlock_irqrestore(&h->lock, flags);
727 remove_rbio_from_cache(cache_drop);
729 free_raid_bio(freeit);
733 static void recover_rbio_work_locked(struct work_struct *work);
736 * called as rmw or parity rebuild is completed. If the plug list has more
737 * rbios waiting for this stripe, the next one on the list will be started
739 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
742 struct btrfs_stripe_hash *h;
746 bucket = rbio_bucket(rbio);
747 h = rbio->bioc->fs_info->stripe_hash_table->table + bucket;
749 if (list_empty(&rbio->plug_list))
752 spin_lock_irqsave(&h->lock, flags);
753 spin_lock(&rbio->bio_list_lock);
755 if (!list_empty(&rbio->hash_list)) {
757 * if we're still cached and there is no other IO
758 * to perform, just leave this rbio here for others
759 * to steal from later
761 if (list_empty(&rbio->plug_list) &&
762 test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
764 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
765 BUG_ON(!bio_list_empty(&rbio->bio_list));
769 list_del_init(&rbio->hash_list);
770 refcount_dec(&rbio->refs);
773 * we use the plug list to hold all the rbios
774 * waiting for the chance to lock this stripe.
775 * hand the lock over to one of them.
777 if (!list_empty(&rbio->plug_list)) {
778 struct btrfs_raid_bio *next;
779 struct list_head *head = rbio->plug_list.next;
781 next = list_entry(head, struct btrfs_raid_bio,
784 list_del_init(&rbio->plug_list);
786 list_add(&next->hash_list, &h->hash_list);
787 refcount_inc(&next->refs);
788 spin_unlock(&rbio->bio_list_lock);
789 spin_unlock_irqrestore(&h->lock, flags);
791 if (next->operation == BTRFS_RBIO_READ_REBUILD)
792 start_async_work(next, recover_rbio_work_locked);
793 else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
794 steal_rbio(rbio, next);
795 start_async_work(next, recover_rbio_work_locked);
796 } else if (next->operation == BTRFS_RBIO_WRITE) {
797 steal_rbio(rbio, next);
798 start_async_work(next, rmw_rbio_work_locked);
799 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
800 steal_rbio(rbio, next);
801 start_async_work(next, scrub_rbio_work_locked);
808 spin_unlock(&rbio->bio_list_lock);
809 spin_unlock_irqrestore(&h->lock, flags);
813 remove_rbio_from_cache(rbio);
816 static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
823 cur->bi_status = err;
830 * this frees the rbio and runs through all the bios in the
831 * bio_list and calls end_io on them
833 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
835 struct bio *cur = bio_list_get(&rbio->bio_list);
839 * Clear the data bitmap, as the rbio may be cached for later usage.
840 * do this before before unlock_stripe() so there will be no new bio
843 bitmap_clear(&rbio->dbitmap, 0, rbio->stripe_nsectors);
846 * At this moment, rbio->bio_list is empty, however since rbio does not
847 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
848 * hash list, rbio may be merged with others so that rbio->bio_list
850 * Once unlock_stripe() is done, rbio->bio_list will not be updated any
851 * more and we can call bio_endio() on all queued bios.
854 extra = bio_list_get(&rbio->bio_list);
857 rbio_endio_bio_list(cur, err);
859 rbio_endio_bio_list(extra, err);
863 * Get a sector pointer specified by its @stripe_nr and @sector_nr.
865 * @rbio: The raid bio
866 * @stripe_nr: Stripe number, valid range [0, real_stripe)
867 * @sector_nr: Sector number inside the stripe,
868 * valid range [0, stripe_nsectors)
869 * @bio_list_only: Whether to use sectors inside the bio list only.
871 * The read/modify/write code wants to reuse the original bio page as much
872 * as possible, and only use stripe_sectors as fallback.
874 static struct sector_ptr *sector_in_rbio(struct btrfs_raid_bio *rbio,
875 int stripe_nr, int sector_nr,
878 struct sector_ptr *sector;
881 ASSERT(stripe_nr >= 0 && stripe_nr < rbio->real_stripes);
882 ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors);
884 index = stripe_nr * rbio->stripe_nsectors + sector_nr;
885 ASSERT(index >= 0 && index < rbio->nr_sectors);
887 spin_lock_irq(&rbio->bio_list_lock);
888 sector = &rbio->bio_sectors[index];
889 if (sector->page || bio_list_only) {
890 /* Don't return sector without a valid page pointer */
893 spin_unlock_irq(&rbio->bio_list_lock);
896 spin_unlock_irq(&rbio->bio_list_lock);
898 return &rbio->stripe_sectors[index];
902 * allocation and initial setup for the btrfs_raid_bio. Not
903 * this does not allocate any pages for rbio->pages.
905 static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
906 struct btrfs_io_context *bioc)
908 const unsigned int real_stripes = bioc->num_stripes - bioc->num_tgtdevs;
909 const unsigned int stripe_npages = BTRFS_STRIPE_LEN >> PAGE_SHIFT;
910 const unsigned int num_pages = stripe_npages * real_stripes;
911 const unsigned int stripe_nsectors =
912 BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits;
913 const unsigned int num_sectors = stripe_nsectors * real_stripes;
914 struct btrfs_raid_bio *rbio;
916 /* PAGE_SIZE must also be aligned to sectorsize for subpage support */
917 ASSERT(IS_ALIGNED(PAGE_SIZE, fs_info->sectorsize));
919 * Our current stripe len should be fixed to 64k thus stripe_nsectors
920 * (at most 16) should be no larger than BITS_PER_LONG.
922 ASSERT(stripe_nsectors <= BITS_PER_LONG);
924 rbio = kzalloc(sizeof(*rbio), GFP_NOFS);
926 return ERR_PTR(-ENOMEM);
927 rbio->stripe_pages = kcalloc(num_pages, sizeof(struct page *),
929 rbio->bio_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr),
931 rbio->stripe_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr),
933 rbio->finish_pointers = kcalloc(real_stripes, sizeof(void *), GFP_NOFS);
934 rbio->error_bitmap = bitmap_zalloc(num_sectors, GFP_NOFS);
936 if (!rbio->stripe_pages || !rbio->bio_sectors || !rbio->stripe_sectors ||
937 !rbio->finish_pointers || !rbio->error_bitmap) {
938 free_raid_bio_pointers(rbio);
940 return ERR_PTR(-ENOMEM);
943 bio_list_init(&rbio->bio_list);
944 init_waitqueue_head(&rbio->io_wait);
945 INIT_LIST_HEAD(&rbio->plug_list);
946 spin_lock_init(&rbio->bio_list_lock);
947 INIT_LIST_HEAD(&rbio->stripe_cache);
948 INIT_LIST_HEAD(&rbio->hash_list);
949 btrfs_get_bioc(bioc);
951 rbio->nr_pages = num_pages;
952 rbio->nr_sectors = num_sectors;
953 rbio->real_stripes = real_stripes;
954 rbio->stripe_npages = stripe_npages;
955 rbio->stripe_nsectors = stripe_nsectors;
956 refcount_set(&rbio->refs, 1);
957 atomic_set(&rbio->stripes_pending, 0);
959 ASSERT(btrfs_nr_parity_stripes(bioc->map_type));
960 rbio->nr_data = real_stripes - btrfs_nr_parity_stripes(bioc->map_type);
965 /* allocate pages for all the stripes in the bio, including parity */
966 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
970 ret = btrfs_alloc_page_array(rbio->nr_pages, rbio->stripe_pages);
973 /* Mapping all sectors */
974 index_stripe_sectors(rbio);
978 /* only allocate pages for p/q stripes */
979 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
981 const int data_pages = rbio->nr_data * rbio->stripe_npages;
984 ret = btrfs_alloc_page_array(rbio->nr_pages - data_pages,
985 rbio->stripe_pages + data_pages);
989 index_stripe_sectors(rbio);
994 * Return the total numer of errors found in the vertical stripe of @sector_nr.
996 * @faila and @failb will also be updated to the first and second stripe
997 * number of the errors.
999 static int get_rbio_veritical_errors(struct btrfs_raid_bio *rbio, int sector_nr,
1000 int *faila, int *failb)
1003 int found_errors = 0;
1005 if (faila || failb) {
1007 * Both @faila and @failb should be valid pointers if any of
1008 * them is specified.
1010 ASSERT(faila && failb);
1015 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
1016 int total_sector_nr = stripe_nr * rbio->stripe_nsectors + sector_nr;
1018 if (test_bit(total_sector_nr, rbio->error_bitmap)) {
1021 /* Update faila and failb. */
1024 else if (*failb < 0)
1029 return found_errors;
1033 * Add a single sector @sector into our list of bios for IO.
1035 * Return 0 if everything went well.
1036 * Return <0 for error.
1038 static int rbio_add_io_sector(struct btrfs_raid_bio *rbio,
1039 struct bio_list *bio_list,
1040 struct sector_ptr *sector,
1041 unsigned int stripe_nr,
1042 unsigned int sector_nr,
1045 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
1046 struct bio *last = bio_list->tail;
1049 struct btrfs_io_stripe *stripe;
1053 * Note: here stripe_nr has taken device replace into consideration,
1054 * thus it can be larger than rbio->real_stripe.
1055 * So here we check against bioc->num_stripes, not rbio->real_stripes.
1057 ASSERT(stripe_nr >= 0 && stripe_nr < rbio->bioc->num_stripes);
1058 ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors);
1059 ASSERT(sector->page);
1061 stripe = &rbio->bioc->stripes[stripe_nr];
1062 disk_start = stripe->physical + sector_nr * sectorsize;
1064 /* if the device is missing, just fail this stripe */
1065 if (!stripe->dev->bdev) {
1068 set_bit(stripe_nr * rbio->stripe_nsectors + sector_nr,
1069 rbio->error_bitmap);
1071 /* Check if we have reached tolerance early. */
1072 found_errors = get_rbio_veritical_errors(rbio, sector_nr,
1074 if (found_errors > rbio->bioc->max_errors)
1079 /* see if we can add this page onto our existing bio */
1081 u64 last_end = last->bi_iter.bi_sector << 9;
1082 last_end += last->bi_iter.bi_size;
1085 * we can't merge these if they are from different
1086 * devices or if they are not contiguous
1088 if (last_end == disk_start && !last->bi_status &&
1089 last->bi_bdev == stripe->dev->bdev) {
1090 ret = bio_add_page(last, sector->page, sectorsize,
1092 if (ret == sectorsize)
1097 /* put a new bio on the list */
1098 bio = bio_alloc(stripe->dev->bdev,
1099 max(BTRFS_STRIPE_LEN >> PAGE_SHIFT, 1),
1101 bio->bi_iter.bi_sector = disk_start >> 9;
1102 bio->bi_private = rbio;
1104 bio_add_page(bio, sector->page, sectorsize, sector->pgoff);
1105 bio_list_add(bio_list, bio);
1109 static void index_one_bio(struct btrfs_raid_bio *rbio, struct bio *bio)
1111 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
1112 struct bio_vec bvec;
1113 struct bvec_iter iter;
1114 u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
1115 rbio->bioc->raid_map[0];
1117 bio_for_each_segment(bvec, bio, iter) {
1120 for (bvec_offset = 0; bvec_offset < bvec.bv_len;
1121 bvec_offset += sectorsize, offset += sectorsize) {
1122 int index = offset / sectorsize;
1123 struct sector_ptr *sector = &rbio->bio_sectors[index];
1125 sector->page = bvec.bv_page;
1126 sector->pgoff = bvec.bv_offset + bvec_offset;
1127 ASSERT(sector->pgoff < PAGE_SIZE);
1133 * helper function to walk our bio list and populate the bio_pages array with
1134 * the result. This seems expensive, but it is faster than constantly
1135 * searching through the bio list as we setup the IO in finish_rmw or stripe
1138 * This must be called before you trust the answers from page_in_rbio
1140 static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1144 spin_lock_irq(&rbio->bio_list_lock);
1145 bio_list_for_each(bio, &rbio->bio_list)
1146 index_one_bio(rbio, bio);
1148 spin_unlock_irq(&rbio->bio_list_lock);
1151 static void bio_get_trace_info(struct btrfs_raid_bio *rbio, struct bio *bio,
1152 struct raid56_bio_trace_info *trace_info)
1154 const struct btrfs_io_context *bioc = rbio->bioc;
1159 /* We rely on bio->bi_bdev to find the stripe number. */
1163 for (i = 0; i < bioc->num_stripes; i++) {
1164 if (bio->bi_bdev != bioc->stripes[i].dev->bdev)
1166 trace_info->stripe_nr = i;
1167 trace_info->devid = bioc->stripes[i].dev->devid;
1168 trace_info->offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
1169 bioc->stripes[i].physical;
1174 trace_info->devid = -1;
1175 trace_info->offset = -1;
1176 trace_info->stripe_nr = -1;
1179 /* Generate PQ for one veritical stripe. */
1180 static void generate_pq_vertical(struct btrfs_raid_bio *rbio, int sectornr)
1182 void **pointers = rbio->finish_pointers;
1183 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
1184 struct sector_ptr *sector;
1186 const bool has_qstripe = rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6;
1188 /* First collect one sector from each data stripe */
1189 for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1190 sector = sector_in_rbio(rbio, stripe, sectornr, 0);
1191 pointers[stripe] = kmap_local_page(sector->page) +
1195 /* Then add the parity stripe */
1196 sector = rbio_pstripe_sector(rbio, sectornr);
1197 sector->uptodate = 1;
1198 pointers[stripe++] = kmap_local_page(sector->page) + sector->pgoff;
1202 * RAID6, add the qstripe and call the library function
1203 * to fill in our p/q
1205 sector = rbio_qstripe_sector(rbio, sectornr);
1206 sector->uptodate = 1;
1207 pointers[stripe++] = kmap_local_page(sector->page) +
1210 raid6_call.gen_syndrome(rbio->real_stripes, sectorsize,
1214 memcpy(pointers[rbio->nr_data], pointers[0], sectorsize);
1215 run_xor(pointers + 1, rbio->nr_data - 1, sectorsize);
1217 for (stripe = stripe - 1; stripe >= 0; stripe--)
1218 kunmap_local(pointers[stripe]);
1221 static int rmw_assemble_write_bios(struct btrfs_raid_bio *rbio,
1222 struct bio_list *bio_list)
1225 /* The total sector number inside the full stripe. */
1226 int total_sector_nr;
1231 ASSERT(bio_list_size(bio_list) == 0);
1233 /* We should have at least one data sector. */
1234 ASSERT(bitmap_weight(&rbio->dbitmap, rbio->stripe_nsectors));
1237 * Reset errors, as we may have errors inherited from from degraded
1240 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
1243 * Start assembly. Make bios for everything from the higher layers (the
1244 * bio_list in our rbio) and our P/Q. Ignore everything else.
1246 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
1247 total_sector_nr++) {
1248 struct sector_ptr *sector;
1250 stripe = total_sector_nr / rbio->stripe_nsectors;
1251 sectornr = total_sector_nr % rbio->stripe_nsectors;
1253 /* This vertical stripe has no data, skip it. */
1254 if (!test_bit(sectornr, &rbio->dbitmap))
1257 if (stripe < rbio->nr_data) {
1258 sector = sector_in_rbio(rbio, stripe, sectornr, 1);
1262 sector = rbio_stripe_sector(rbio, stripe, sectornr);
1265 ret = rbio_add_io_sector(rbio, bio_list, sector, stripe,
1266 sectornr, REQ_OP_WRITE);
1271 if (likely(!rbio->bioc->num_tgtdevs))
1274 /* Make a copy for the replace target device. */
1275 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
1276 total_sector_nr++) {
1277 struct sector_ptr *sector;
1279 stripe = total_sector_nr / rbio->stripe_nsectors;
1280 sectornr = total_sector_nr % rbio->stripe_nsectors;
1282 if (!rbio->bioc->tgtdev_map[stripe]) {
1284 * We can skip the whole stripe completely, note
1285 * total_sector_nr will be increased by one anyway.
1287 ASSERT(sectornr == 0);
1288 total_sector_nr += rbio->stripe_nsectors - 1;
1292 /* This vertical stripe has no data, skip it. */
1293 if (!test_bit(sectornr, &rbio->dbitmap))
1296 if (stripe < rbio->nr_data) {
1297 sector = sector_in_rbio(rbio, stripe, sectornr, 1);
1301 sector = rbio_stripe_sector(rbio, stripe, sectornr);
1304 ret = rbio_add_io_sector(rbio, bio_list, sector,
1305 rbio->bioc->tgtdev_map[stripe],
1306 sectornr, REQ_OP_WRITE);
1313 while ((bio = bio_list_pop(bio_list)))
1318 static void set_rbio_range_error(struct btrfs_raid_bio *rbio, struct bio *bio)
1320 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1321 u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
1322 rbio->bioc->raid_map[0];
1323 int total_nr_sector = offset >> fs_info->sectorsize_bits;
1325 ASSERT(total_nr_sector < rbio->nr_data * rbio->stripe_nsectors);
1327 bitmap_set(rbio->error_bitmap, total_nr_sector,
1328 bio->bi_iter.bi_size >> fs_info->sectorsize_bits);
1331 * Special handling for raid56_alloc_missing_rbio() used by
1332 * scrub/replace. Unlike call path in raid56_parity_recover(), they
1333 * pass an empty bio here. Thus we have to find out the missing device
1334 * and mark the stripe error instead.
1336 if (bio->bi_iter.bi_size == 0) {
1337 bool found_missing = false;
1340 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
1341 if (!rbio->bioc->stripes[stripe_nr].dev->bdev) {
1342 found_missing = true;
1343 bitmap_set(rbio->error_bitmap,
1344 stripe_nr * rbio->stripe_nsectors,
1345 rbio->stripe_nsectors);
1348 ASSERT(found_missing);
1353 * For subpage case, we can no longer set page Uptodate directly for
1354 * stripe_pages[], thus we need to locate the sector.
1356 static struct sector_ptr *find_stripe_sector(struct btrfs_raid_bio *rbio,
1362 for (i = 0; i < rbio->nr_sectors; i++) {
1363 struct sector_ptr *sector = &rbio->stripe_sectors[i];
1365 if (sector->page == page && sector->pgoff == pgoff)
1372 * this sets each page in the bio uptodate. It should only be used on private
1373 * rbio pages, nothing that comes in from the higher layers
1375 static void set_bio_pages_uptodate(struct btrfs_raid_bio *rbio, struct bio *bio)
1377 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
1378 struct bio_vec *bvec;
1379 struct bvec_iter_all iter_all;
1381 ASSERT(!bio_flagged(bio, BIO_CLONED));
1383 bio_for_each_segment_all(bvec, bio, iter_all) {
1384 struct sector_ptr *sector;
1387 for (pgoff = bvec->bv_offset; pgoff - bvec->bv_offset < bvec->bv_len;
1388 pgoff += sectorsize) {
1389 sector = find_stripe_sector(rbio, bvec->bv_page, pgoff);
1392 sector->uptodate = 1;
1397 static int get_bio_sector_nr(struct btrfs_raid_bio *rbio, struct bio *bio)
1399 struct bio_vec *bv = bio_first_bvec_all(bio);
1402 for (i = 0; i < rbio->nr_sectors; i++) {
1403 struct sector_ptr *sector;
1405 sector = &rbio->stripe_sectors[i];
1406 if (sector->page == bv->bv_page && sector->pgoff == bv->bv_offset)
1408 sector = &rbio->bio_sectors[i];
1409 if (sector->page == bv->bv_page && sector->pgoff == bv->bv_offset)
1412 ASSERT(i < rbio->nr_sectors);
1416 static void rbio_update_error_bitmap(struct btrfs_raid_bio *rbio, struct bio *bio)
1418 int total_sector_nr = get_bio_sector_nr(rbio, bio);
1420 struct bio_vec *bvec;
1421 struct bvec_iter_all iter_all;
1423 bio_for_each_segment_all(bvec, bio, iter_all)
1424 bio_size += bvec->bv_len;
1426 bitmap_set(rbio->error_bitmap, total_sector_nr,
1427 bio_size >> rbio->bioc->fs_info->sectorsize_bits);
1430 static void raid_wait_read_end_io(struct bio *bio)
1432 struct btrfs_raid_bio *rbio = bio->bi_private;
1435 rbio_update_error_bitmap(rbio, bio);
1437 set_bio_pages_uptodate(rbio, bio);
1440 if (atomic_dec_and_test(&rbio->stripes_pending))
1441 wake_up(&rbio->io_wait);
1444 static void submit_read_bios(struct btrfs_raid_bio *rbio,
1445 struct bio_list *bio_list)
1449 atomic_set(&rbio->stripes_pending, bio_list_size(bio_list));
1450 while ((bio = bio_list_pop(bio_list))) {
1451 bio->bi_end_io = raid_wait_read_end_io;
1453 if (trace_raid56_scrub_read_recover_enabled()) {
1454 struct raid56_bio_trace_info trace_info = { 0 };
1456 bio_get_trace_info(rbio, bio, &trace_info);
1457 trace_raid56_scrub_read_recover(rbio, bio, &trace_info);
1463 static int rmw_assemble_read_bios(struct btrfs_raid_bio *rbio,
1464 struct bio_list *bio_list)
1466 const int nr_data_sectors = rbio->stripe_nsectors * rbio->nr_data;
1468 int total_sector_nr;
1471 ASSERT(bio_list_size(bio_list) == 0);
1473 /* Build a list of bios to read all the missing data sectors. */
1474 for (total_sector_nr = 0; total_sector_nr < nr_data_sectors;
1475 total_sector_nr++) {
1476 struct sector_ptr *sector;
1477 int stripe = total_sector_nr / rbio->stripe_nsectors;
1478 int sectornr = total_sector_nr % rbio->stripe_nsectors;
1481 * We want to find all the sectors missing from the rbio and
1482 * read them from the disk. If sector_in_rbio() finds a page
1483 * in the bio list we don't need to read it off the stripe.
1485 sector = sector_in_rbio(rbio, stripe, sectornr, 1);
1489 sector = rbio_stripe_sector(rbio, stripe, sectornr);
1491 * The bio cache may have handed us an uptodate page. If so,
1494 if (sector->uptodate)
1497 ret = rbio_add_io_sector(rbio, bio_list, sector,
1498 stripe, sectornr, REQ_OP_READ);
1505 while ((bio = bio_list_pop(bio_list)))
1510 static int alloc_rbio_data_pages(struct btrfs_raid_bio *rbio)
1512 const int data_pages = rbio->nr_data * rbio->stripe_npages;
1515 ret = btrfs_alloc_page_array(data_pages, rbio->stripe_pages);
1519 index_stripe_sectors(rbio);
1524 * We use plugging call backs to collect full stripes.
1525 * Any time we get a partial stripe write while plugged
1526 * we collect it into a list. When the unplug comes down,
1527 * we sort the list by logical block number and merge
1528 * everything we can into the same rbios
1530 struct btrfs_plug_cb {
1531 struct blk_plug_cb cb;
1532 struct btrfs_fs_info *info;
1533 struct list_head rbio_list;
1534 struct work_struct work;
1538 * rbios on the plug list are sorted for easier merging.
1540 static int plug_cmp(void *priv, const struct list_head *a,
1541 const struct list_head *b)
1543 const struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1545 const struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1547 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1548 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1550 if (a_sector < b_sector)
1552 if (a_sector > b_sector)
1557 static void raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1559 struct btrfs_plug_cb *plug = container_of(cb, struct btrfs_plug_cb, cb);
1560 struct btrfs_raid_bio *cur;
1561 struct btrfs_raid_bio *last = NULL;
1563 list_sort(NULL, &plug->rbio_list, plug_cmp);
1565 while (!list_empty(&plug->rbio_list)) {
1566 cur = list_entry(plug->rbio_list.next,
1567 struct btrfs_raid_bio, plug_list);
1568 list_del_init(&cur->plug_list);
1570 if (rbio_is_full(cur)) {
1571 /* We have a full stripe, queue it down. */
1572 start_async_work(cur, rmw_rbio_work);
1576 if (rbio_can_merge(last, cur)) {
1577 merge_rbio(last, cur);
1581 start_async_work(last, rmw_rbio_work);
1586 start_async_work(last, rmw_rbio_work);
1590 /* Add the original bio into rbio->bio_list, and update rbio::dbitmap. */
1591 static void rbio_add_bio(struct btrfs_raid_bio *rbio, struct bio *orig_bio)
1593 const struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1594 const u64 orig_logical = orig_bio->bi_iter.bi_sector << SECTOR_SHIFT;
1595 const u64 full_stripe_start = rbio->bioc->raid_map[0];
1596 const u32 orig_len = orig_bio->bi_iter.bi_size;
1597 const u32 sectorsize = fs_info->sectorsize;
1600 ASSERT(orig_logical >= full_stripe_start &&
1601 orig_logical + orig_len <= full_stripe_start +
1602 rbio->nr_data * BTRFS_STRIPE_LEN);
1604 bio_list_add(&rbio->bio_list, orig_bio);
1605 rbio->bio_list_bytes += orig_bio->bi_iter.bi_size;
1607 /* Update the dbitmap. */
1608 for (cur_logical = orig_logical; cur_logical < orig_logical + orig_len;
1609 cur_logical += sectorsize) {
1610 int bit = ((u32)(cur_logical - full_stripe_start) >>
1611 fs_info->sectorsize_bits) % rbio->stripe_nsectors;
1613 set_bit(bit, &rbio->dbitmap);
1618 * our main entry point for writes from the rest of the FS.
1620 void raid56_parity_write(struct bio *bio, struct btrfs_io_context *bioc)
1622 struct btrfs_fs_info *fs_info = bioc->fs_info;
1623 struct btrfs_raid_bio *rbio;
1624 struct btrfs_plug_cb *plug = NULL;
1625 struct blk_plug_cb *cb;
1628 rbio = alloc_rbio(fs_info, bioc);
1630 ret = PTR_ERR(rbio);
1633 rbio->operation = BTRFS_RBIO_WRITE;
1634 rbio_add_bio(rbio, bio);
1637 * Don't plug on full rbios, just get them out the door
1638 * as quickly as we can
1640 if (rbio_is_full(rbio))
1643 cb = blk_check_plugged(raid_unplug, fs_info, sizeof(*plug));
1645 plug = container_of(cb, struct btrfs_plug_cb, cb);
1647 plug->info = fs_info;
1648 INIT_LIST_HEAD(&plug->rbio_list);
1650 list_add_tail(&rbio->plug_list, &plug->rbio_list);
1655 * Either we don't have any existing plug, or we're doing a full stripe,
1656 * can queue the rmw work now.
1658 start_async_work(rbio, rmw_rbio_work);
1663 bio->bi_status = errno_to_blk_status(ret);
1668 * Recover a vertical stripe specified by @sector_nr.
1669 * @*pointers are the pre-allocated pointers by the caller, so we don't
1670 * need to allocate/free the pointers again and again.
1672 static int recover_vertical(struct btrfs_raid_bio *rbio, int sector_nr,
1673 void **pointers, void **unmap_array)
1675 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1676 struct sector_ptr *sector;
1677 const u32 sectorsize = fs_info->sectorsize;
1684 * Now we just use bitmap to mark the horizontal stripes in
1685 * which we have data when doing parity scrub.
1687 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1688 !test_bit(sector_nr, &rbio->dbitmap))
1691 found_errors = get_rbio_veritical_errors(rbio, sector_nr, &faila,
1694 * No errors in the veritical stripe, skip it. Can happen for recovery
1695 * which only part of a stripe failed csum check.
1700 if (found_errors > rbio->bioc->max_errors)
1704 * Setup our array of pointers with sectors from each stripe
1706 * NOTE: store a duplicate array of pointers to preserve the
1709 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
1711 * If we're rebuilding a read, we have to use pages from the
1712 * bio list if possible.
1714 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1715 rbio->operation == BTRFS_RBIO_REBUILD_MISSING)) {
1716 sector = sector_in_rbio(rbio, stripe_nr, sector_nr, 0);
1718 sector = rbio_stripe_sector(rbio, stripe_nr, sector_nr);
1720 ASSERT(sector->page);
1721 pointers[stripe_nr] = kmap_local_page(sector->page) +
1723 unmap_array[stripe_nr] = pointers[stripe_nr];
1726 /* All raid6 handling here */
1727 if (rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1728 /* Single failure, rebuild from parity raid5 style */
1730 if (faila == rbio->nr_data)
1732 * Just the P stripe has failed, without
1733 * a bad data or Q stripe.
1734 * We have nothing to do, just skip the
1735 * recovery for this stripe.
1739 * a single failure in raid6 is rebuilt
1740 * in the pstripe code below
1746 * If the q stripe is failed, do a pstripe reconstruction from
1748 * If both the q stripe and the P stripe are failed, we're
1749 * here due to a crc mismatch and we can't give them the
1752 if (rbio->bioc->raid_map[failb] == RAID6_Q_STRIPE) {
1753 if (rbio->bioc->raid_map[faila] ==
1756 * Only P and Q are corrupted.
1757 * We only care about data stripes recovery,
1758 * can skip this vertical stripe.
1762 * Otherwise we have one bad data stripe and
1763 * a good P stripe. raid5!
1768 if (rbio->bioc->raid_map[failb] == RAID5_P_STRIPE) {
1769 raid6_datap_recov(rbio->real_stripes, sectorsize,
1772 raid6_2data_recov(rbio->real_stripes, sectorsize,
1773 faila, failb, pointers);
1778 /* Rebuild from P stripe here (raid5 or raid6). */
1779 ASSERT(failb == -1);
1781 /* Copy parity block into failed block to start with */
1782 memcpy(pointers[faila], pointers[rbio->nr_data], sectorsize);
1784 /* Rearrange the pointer array */
1785 p = pointers[faila];
1786 for (stripe_nr = faila; stripe_nr < rbio->nr_data - 1;
1788 pointers[stripe_nr] = pointers[stripe_nr + 1];
1789 pointers[rbio->nr_data - 1] = p;
1791 /* Xor in the rest */
1792 run_xor(pointers, rbio->nr_data - 1, sectorsize);
1797 * No matter if this is a RMW or recovery, we should have all
1798 * failed sectors repaired in the vertical stripe, thus they are now
1800 * Especially if we determine to cache the rbio, we need to
1801 * have at least all data sectors uptodate.
1804 sector = rbio_stripe_sector(rbio, faila, sector_nr);
1805 sector->uptodate = 1;
1808 sector = rbio_stripe_sector(rbio, failb, sector_nr);
1809 sector->uptodate = 1;
1813 for (stripe_nr = rbio->real_stripes - 1; stripe_nr >= 0; stripe_nr--)
1814 kunmap_local(unmap_array[stripe_nr]);
1818 static int recover_sectors(struct btrfs_raid_bio *rbio)
1820 void **pointers = NULL;
1821 void **unmap_array = NULL;
1826 * @pointers array stores the pointer for each sector.
1828 * @unmap_array stores copy of pointers that does not get reordered
1829 * during reconstruction so that kunmap_local works.
1831 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1832 unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1833 if (!pointers || !unmap_array) {
1838 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1839 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1840 spin_lock_irq(&rbio->bio_list_lock);
1841 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1842 spin_unlock_irq(&rbio->bio_list_lock);
1845 index_rbio_pages(rbio);
1847 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
1848 ret = recover_vertical(rbio, sectornr, pointers, unmap_array);
1859 static int recover_assemble_read_bios(struct btrfs_raid_bio *rbio,
1860 struct bio_list *bio_list)
1863 int total_sector_nr;
1866 ASSERT(bio_list_size(bio_list) == 0);
1868 * Read everything that hasn't failed. However this time we will
1869 * not trust any cached sector.
1870 * As we may read out some stale data but higher layer is not reading
1873 * So here we always re-read everything in recovery path.
1875 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
1876 total_sector_nr++) {
1877 int stripe = total_sector_nr / rbio->stripe_nsectors;
1878 int sectornr = total_sector_nr % rbio->stripe_nsectors;
1879 struct sector_ptr *sector;
1882 * Skip the range which has error. It can be a range which is
1883 * marked error (for csum mismatch), or it can be a missing
1886 if (!rbio->bioc->stripes[stripe].dev->bdev ||
1887 test_bit(total_sector_nr, rbio->error_bitmap)) {
1889 * Also set the error bit for missing device, which
1890 * may not yet have its error bit set.
1892 set_bit(total_sector_nr, rbio->error_bitmap);
1896 sector = rbio_stripe_sector(rbio, stripe, sectornr);
1897 ret = rbio_add_io_sector(rbio, bio_list, sector, stripe,
1898 sectornr, REQ_OP_READ);
1904 while ((bio = bio_list_pop(bio_list)))
1910 static int recover_rbio(struct btrfs_raid_bio *rbio)
1912 struct bio_list bio_list;
1917 * Either we're doing recover for a read failure or degraded write,
1918 * caller should have set error bitmap correctly.
1920 ASSERT(bitmap_weight(rbio->error_bitmap, rbio->nr_sectors));
1921 bio_list_init(&bio_list);
1923 /* For recovery, we need to read all sectors including P/Q. */
1924 ret = alloc_rbio_pages(rbio);
1928 index_rbio_pages(rbio);
1930 ret = recover_assemble_read_bios(rbio, &bio_list);
1934 submit_read_bios(rbio, &bio_list);
1935 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
1937 ret = recover_sectors(rbio);
1940 while ((bio = bio_list_pop(&bio_list)))
1946 static void recover_rbio_work(struct work_struct *work)
1948 struct btrfs_raid_bio *rbio;
1951 rbio = container_of(work, struct btrfs_raid_bio, work);
1953 ret = lock_stripe_add(rbio);
1955 ret = recover_rbio(rbio);
1956 rbio_orig_end_io(rbio, errno_to_blk_status(ret));
1960 static void recover_rbio_work_locked(struct work_struct *work)
1962 struct btrfs_raid_bio *rbio;
1965 rbio = container_of(work, struct btrfs_raid_bio, work);
1967 ret = recover_rbio(rbio);
1968 rbio_orig_end_io(rbio, errno_to_blk_status(ret));
1971 static void set_rbio_raid6_extra_error(struct btrfs_raid_bio *rbio, int mirror_num)
1977 * This is for RAID6 extra recovery tries, thus mirror number should
1979 * Mirror 1 means read from data stripes. Mirror 2 means rebuild using
1982 ASSERT(mirror_num > 2);
1983 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
1988 found_errors = get_rbio_veritical_errors(rbio, sector_nr,
1990 /* This vertical stripe doesn't have errors. */
1995 * If we found errors, there should be only one error marked
1996 * by previous set_rbio_range_error().
1998 ASSERT(found_errors == 1);
2001 /* Now select another stripe to mark as error. */
2002 failb = rbio->real_stripes - (mirror_num - 1);
2006 /* Set the extra bit in error bitmap. */
2008 set_bit(failb * rbio->stripe_nsectors + sector_nr,
2009 rbio->error_bitmap);
2012 /* We should found at least one vertical stripe with error.*/
2017 * the main entry point for reads from the higher layers. This
2018 * is really only called when the normal read path had a failure,
2019 * so we assume the bio they send down corresponds to a failed part
2022 void raid56_parity_recover(struct bio *bio, struct btrfs_io_context *bioc,
2025 struct btrfs_fs_info *fs_info = bioc->fs_info;
2026 struct btrfs_raid_bio *rbio;
2028 rbio = alloc_rbio(fs_info, bioc);
2030 bio->bi_status = errno_to_blk_status(PTR_ERR(rbio));
2035 rbio->operation = BTRFS_RBIO_READ_REBUILD;
2036 rbio_add_bio(rbio, bio);
2038 set_rbio_range_error(rbio, bio);
2042 * for 'mirror == 2', reconstruct from all other stripes.
2043 * for 'mirror_num > 2', select a stripe to fail on every retry.
2046 set_rbio_raid6_extra_error(rbio, mirror_num);
2048 start_async_work(rbio, recover_rbio_work);
2051 static int rmw_read_and_wait(struct btrfs_raid_bio *rbio)
2053 struct bio_list bio_list;
2057 bio_list_init(&bio_list);
2059 ret = rmw_assemble_read_bios(rbio, &bio_list);
2063 submit_read_bios(rbio, &bio_list);
2064 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
2067 while ((bio = bio_list_pop(&bio_list)))
2073 static void raid_wait_write_end_io(struct bio *bio)
2075 struct btrfs_raid_bio *rbio = bio->bi_private;
2076 blk_status_t err = bio->bi_status;
2079 rbio_update_error_bitmap(rbio, bio);
2081 if (atomic_dec_and_test(&rbio->stripes_pending))
2082 wake_up(&rbio->io_wait);
2085 static void submit_write_bios(struct btrfs_raid_bio *rbio,
2086 struct bio_list *bio_list)
2090 atomic_set(&rbio->stripes_pending, bio_list_size(bio_list));
2091 while ((bio = bio_list_pop(bio_list))) {
2092 bio->bi_end_io = raid_wait_write_end_io;
2094 if (trace_raid56_write_stripe_enabled()) {
2095 struct raid56_bio_trace_info trace_info = { 0 };
2097 bio_get_trace_info(rbio, bio, &trace_info);
2098 trace_raid56_write_stripe(rbio, bio, &trace_info);
2104 static int rmw_rbio(struct btrfs_raid_bio *rbio)
2106 struct bio_list bio_list;
2111 * Allocate the pages for parity first, as P/Q pages will always be
2112 * needed for both full-stripe and sub-stripe writes.
2114 ret = alloc_rbio_parity_pages(rbio);
2118 /* Full stripe write, can write the full stripe right now. */
2119 if (rbio_is_full(rbio))
2122 * Now we're doing sub-stripe write, also need all data stripes to do
2125 ret = alloc_rbio_data_pages(rbio);
2129 index_rbio_pages(rbio);
2131 ret = rmw_read_and_wait(rbio);
2135 /* We have read errors, try recovery path. */
2136 if (!bitmap_empty(rbio->error_bitmap, rbio->nr_sectors)) {
2137 ret = recover_rbio(rbio);
2143 * At this stage we're not allowed to add any new bios to the
2144 * bio list any more, anyone else that wants to change this stripe
2145 * needs to do their own rmw.
2147 spin_lock_irq(&rbio->bio_list_lock);
2148 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
2149 spin_unlock_irq(&rbio->bio_list_lock);
2151 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
2153 index_rbio_pages(rbio);
2156 * We don't cache full rbios because we're assuming
2157 * the higher layers are unlikely to use this area of
2158 * the disk again soon. If they do use it again,
2159 * hopefully they will send another full bio.
2161 if (!rbio_is_full(rbio))
2162 cache_rbio_pages(rbio);
2164 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2166 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++)
2167 generate_pq_vertical(rbio, sectornr);
2169 bio_list_init(&bio_list);
2170 ret = rmw_assemble_write_bios(rbio, &bio_list);
2174 /* We should have at least one bio assembled. */
2175 ASSERT(bio_list_size(&bio_list));
2176 submit_write_bios(rbio, &bio_list);
2177 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
2179 /* We may have more errors than our tolerance during the read. */
2180 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
2183 found_errors = get_rbio_veritical_errors(rbio, sectornr, NULL, NULL);
2184 if (found_errors > rbio->bioc->max_errors) {
2192 static void rmw_rbio_work(struct work_struct *work)
2194 struct btrfs_raid_bio *rbio;
2197 rbio = container_of(work, struct btrfs_raid_bio, work);
2199 ret = lock_stripe_add(rbio);
2201 ret = rmw_rbio(rbio);
2202 rbio_orig_end_io(rbio, errno_to_blk_status(ret));
2206 static void rmw_rbio_work_locked(struct work_struct *work)
2208 struct btrfs_raid_bio *rbio;
2211 rbio = container_of(work, struct btrfs_raid_bio, work);
2213 ret = rmw_rbio(rbio);
2214 rbio_orig_end_io(rbio, errno_to_blk_status(ret));
2218 * The following code is used to scrub/replace the parity stripe
2220 * Caller must have already increased bio_counter for getting @bioc.
2222 * Note: We need make sure all the pages that add into the scrub/replace
2223 * raid bio are correct and not be changed during the scrub/replace. That
2224 * is those pages just hold metadata or file data with checksum.
2227 struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio,
2228 struct btrfs_io_context *bioc,
2229 struct btrfs_device *scrub_dev,
2230 unsigned long *dbitmap, int stripe_nsectors)
2232 struct btrfs_fs_info *fs_info = bioc->fs_info;
2233 struct btrfs_raid_bio *rbio;
2236 rbio = alloc_rbio(fs_info, bioc);
2239 bio_list_add(&rbio->bio_list, bio);
2241 * This is a special bio which is used to hold the completion handler
2242 * and make the scrub rbio is similar to the other types
2244 ASSERT(!bio->bi_iter.bi_size);
2245 rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2248 * After mapping bioc with BTRFS_MAP_WRITE, parities have been sorted
2249 * to the end position, so this search can start from the first parity
2252 for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
2253 if (bioc->stripes[i].dev == scrub_dev) {
2258 ASSERT(i < rbio->real_stripes);
2260 bitmap_copy(&rbio->dbitmap, dbitmap, stripe_nsectors);
2264 /* Used for both parity scrub and missing. */
2265 void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2266 unsigned int pgoff, u64 logical)
2268 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
2272 ASSERT(logical >= rbio->bioc->raid_map[0]);
2273 ASSERT(logical + sectorsize <= rbio->bioc->raid_map[0] +
2274 BTRFS_STRIPE_LEN * rbio->nr_data);
2275 stripe_offset = (int)(logical - rbio->bioc->raid_map[0]);
2276 index = stripe_offset / sectorsize;
2277 rbio->bio_sectors[index].page = page;
2278 rbio->bio_sectors[index].pgoff = pgoff;
2282 * We just scrub the parity that we have correct data on the same horizontal,
2283 * so we needn't allocate all pages for all the stripes.
2285 static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2287 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
2288 int total_sector_nr;
2290 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
2291 total_sector_nr++) {
2293 int sectornr = total_sector_nr % rbio->stripe_nsectors;
2294 int index = (total_sector_nr * sectorsize) >> PAGE_SHIFT;
2296 if (!test_bit(sectornr, &rbio->dbitmap))
2298 if (rbio->stripe_pages[index])
2300 page = alloc_page(GFP_NOFS);
2303 rbio->stripe_pages[index] = page;
2305 index_stripe_sectors(rbio);
2309 static int finish_parity_scrub(struct btrfs_raid_bio *rbio, int need_check)
2311 struct btrfs_io_context *bioc = rbio->bioc;
2312 const u32 sectorsize = bioc->fs_info->sectorsize;
2313 void **pointers = rbio->finish_pointers;
2314 unsigned long *pbitmap = &rbio->finish_pbitmap;
2315 int nr_data = rbio->nr_data;
2319 struct sector_ptr p_sector = { 0 };
2320 struct sector_ptr q_sector = { 0 };
2321 struct bio_list bio_list;
2326 bio_list_init(&bio_list);
2328 if (rbio->real_stripes - rbio->nr_data == 1)
2329 has_qstripe = false;
2330 else if (rbio->real_stripes - rbio->nr_data == 2)
2335 if (bioc->num_tgtdevs && bioc->tgtdev_map[rbio->scrubp]) {
2337 bitmap_copy(pbitmap, &rbio->dbitmap, rbio->stripe_nsectors);
2341 * Because the higher layers(scrubber) are unlikely to
2342 * use this area of the disk again soon, so don't cache
2345 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2350 p_sector.page = alloc_page(GFP_NOFS);
2354 p_sector.uptodate = 1;
2357 /* RAID6, allocate and map temp space for the Q stripe */
2358 q_sector.page = alloc_page(GFP_NOFS);
2359 if (!q_sector.page) {
2360 __free_page(p_sector.page);
2361 p_sector.page = NULL;
2365 q_sector.uptodate = 1;
2366 pointers[rbio->real_stripes - 1] = kmap_local_page(q_sector.page);
2369 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
2371 /* Map the parity stripe just once */
2372 pointers[nr_data] = kmap_local_page(p_sector.page);
2374 for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) {
2375 struct sector_ptr *sector;
2378 /* first collect one page from each data stripe */
2379 for (stripe = 0; stripe < nr_data; stripe++) {
2380 sector = sector_in_rbio(rbio, stripe, sectornr, 0);
2381 pointers[stripe] = kmap_local_page(sector->page) +
2386 /* RAID6, call the library function to fill in our P/Q */
2387 raid6_call.gen_syndrome(rbio->real_stripes, sectorsize,
2391 memcpy(pointers[nr_data], pointers[0], sectorsize);
2392 run_xor(pointers + 1, nr_data - 1, sectorsize);
2395 /* Check scrubbing parity and repair it */
2396 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
2397 parity = kmap_local_page(sector->page) + sector->pgoff;
2398 if (memcmp(parity, pointers[rbio->scrubp], sectorsize) != 0)
2399 memcpy(parity, pointers[rbio->scrubp], sectorsize);
2401 /* Parity is right, needn't writeback */
2402 bitmap_clear(&rbio->dbitmap, sectornr, 1);
2403 kunmap_local(parity);
2405 for (stripe = nr_data - 1; stripe >= 0; stripe--)
2406 kunmap_local(pointers[stripe]);
2409 kunmap_local(pointers[nr_data]);
2410 __free_page(p_sector.page);
2411 p_sector.page = NULL;
2412 if (q_sector.page) {
2413 kunmap_local(pointers[rbio->real_stripes - 1]);
2414 __free_page(q_sector.page);
2415 q_sector.page = NULL;
2420 * time to start writing. Make bios for everything from the
2421 * higher layers (the bio_list in our rbio) and our p/q. Ignore
2424 for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) {
2425 struct sector_ptr *sector;
2427 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
2428 ret = rbio_add_io_sector(rbio, &bio_list, sector, rbio->scrubp,
2429 sectornr, REQ_OP_WRITE);
2437 for_each_set_bit(sectornr, pbitmap, rbio->stripe_nsectors) {
2438 struct sector_ptr *sector;
2440 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
2441 ret = rbio_add_io_sector(rbio, &bio_list, sector,
2442 bioc->tgtdev_map[rbio->scrubp],
2443 sectornr, REQ_OP_WRITE);
2449 submit_write_bios(rbio, &bio_list);
2453 while ((bio = bio_list_pop(&bio_list)))
2458 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2460 if (stripe >= 0 && stripe < rbio->nr_data)
2465 static int recover_scrub_rbio(struct btrfs_raid_bio *rbio)
2467 void **pointers = NULL;
2468 void **unmap_array = NULL;
2473 * @pointers array stores the pointer for each sector.
2475 * @unmap_array stores copy of pointers that does not get reordered
2476 * during reconstruction so that kunmap_local works.
2478 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
2479 unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
2480 if (!pointers || !unmap_array) {
2485 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
2486 int dfail = 0, failp = -1;
2491 found_errors = get_rbio_veritical_errors(rbio, sector_nr,
2493 if (found_errors > rbio->bioc->max_errors) {
2497 if (found_errors == 0)
2500 /* We should have at least one error here. */
2501 ASSERT(faila >= 0 || failb >= 0);
2503 if (is_data_stripe(rbio, faila))
2505 else if (is_parity_stripe(faila))
2508 if (is_data_stripe(rbio, failb))
2510 else if (is_parity_stripe(failb))
2513 * Because we can not use a scrubbing parity to repair the
2514 * data, so the capability of the repair is declined. (In the
2515 * case of RAID5, we can not repair anything.)
2517 if (dfail > rbio->bioc->max_errors - 1) {
2522 * If all data is good, only parity is correctly, just repair
2523 * the parity, no need to recover data stripes.
2529 * Here means we got one corrupted data stripe and one
2530 * corrupted parity on RAID6, if the corrupted parity is
2531 * scrubbing parity, luckily, use the other one to repair the
2532 * data, or we can not repair the data stripe.
2534 if (failp != rbio->scrubp) {
2539 ret = recover_vertical(rbio, sector_nr, pointers, unmap_array);
2549 static int scrub_assemble_read_bios(struct btrfs_raid_bio *rbio,
2550 struct bio_list *bio_list)
2553 int total_sector_nr;
2556 ASSERT(bio_list_size(bio_list) == 0);
2558 /* Build a list of bios to read all the missing parts. */
2559 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
2560 total_sector_nr++) {
2561 int sectornr = total_sector_nr % rbio->stripe_nsectors;
2562 int stripe = total_sector_nr / rbio->stripe_nsectors;
2563 struct sector_ptr *sector;
2565 /* No data in the vertical stripe, no need to read. */
2566 if (!test_bit(sectornr, &rbio->dbitmap))
2570 * We want to find all the sectors missing from the rbio and
2571 * read them from the disk. If sector_in_rbio() finds a sector
2572 * in the bio list we don't need to read it off the stripe.
2574 sector = sector_in_rbio(rbio, stripe, sectornr, 1);
2578 sector = rbio_stripe_sector(rbio, stripe, sectornr);
2580 * The bio cache may have handed us an uptodate sector. If so,
2583 if (sector->uptodate)
2586 ret = rbio_add_io_sector(rbio, bio_list, sector, stripe,
2587 sectornr, REQ_OP_READ);
2593 while ((bio = bio_list_pop(bio_list)))
2598 static int scrub_rbio(struct btrfs_raid_bio *rbio)
2600 bool need_check = false;
2601 struct bio_list bio_list;
2606 bio_list_init(&bio_list);
2608 ret = alloc_rbio_essential_pages(rbio);
2612 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
2614 ret = scrub_assemble_read_bios(rbio, &bio_list);
2618 submit_read_bios(rbio, &bio_list);
2619 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
2621 /* We may have some failures, recover the failed sectors first. */
2622 ret = recover_scrub_rbio(rbio);
2627 * We have every sector properly prepared. Can finish the scrub
2628 * and writeback the good content.
2630 ret = finish_parity_scrub(rbio, need_check);
2631 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
2632 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
2635 found_errors = get_rbio_veritical_errors(rbio, sector_nr, NULL, NULL);
2636 if (found_errors > rbio->bioc->max_errors) {
2644 while ((bio = bio_list_pop(&bio_list)))
2650 static void scrub_rbio_work_locked(struct work_struct *work)
2652 struct btrfs_raid_bio *rbio;
2655 rbio = container_of(work, struct btrfs_raid_bio, work);
2656 ret = scrub_rbio(rbio);
2657 rbio_orig_end_io(rbio, errno_to_blk_status(ret));
2660 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2662 if (!lock_stripe_add(rbio))
2663 start_async_work(rbio, scrub_rbio_work_locked);
2666 /* The following code is used for dev replace of a missing RAID 5/6 device. */
2668 struct btrfs_raid_bio *
2669 raid56_alloc_missing_rbio(struct bio *bio, struct btrfs_io_context *bioc)
2671 struct btrfs_fs_info *fs_info = bioc->fs_info;
2672 struct btrfs_raid_bio *rbio;
2674 rbio = alloc_rbio(fs_info, bioc);
2678 rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2679 bio_list_add(&rbio->bio_list, bio);
2681 * This is a special bio which is used to hold the completion handler
2682 * and make the scrub rbio is similar to the other types
2684 ASSERT(!bio->bi_iter.bi_size);
2686 set_rbio_range_error(rbio, bio);
2691 void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2693 start_async_work(rbio, recover_rbio_work);