2aa43d2094def3ff10e3fc1f15659415237c9f73
[sfrench/cifs-2.6.git] / fs / btrfs / scrub.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011, 2012 STRATO.  All rights reserved.
4  */
5
6 #include <linux/blkdev.h>
7 #include <linux/ratelimit.h>
8 #include <linux/sched/mm.h>
9 #include <crypto/hash.h>
10 #include "ctree.h"
11 #include "discard.h"
12 #include "volumes.h"
13 #include "disk-io.h"
14 #include "ordered-data.h"
15 #include "transaction.h"
16 #include "backref.h"
17 #include "extent_io.h"
18 #include "dev-replace.h"
19 #include "check-integrity.h"
20 #include "raid56.h"
21 #include "block-group.h"
22 #include "zoned.h"
23 #include "fs.h"
24 #include "accessors.h"
25 #include "file-item.h"
26 #include "scrub.h"
27
28 /*
29  * This is only the first step towards a full-features scrub. It reads all
30  * extent and super block and verifies the checksums. In case a bad checksum
31  * is found or the extent cannot be read, good data will be written back if
32  * any can be found.
33  *
34  * Future enhancements:
35  *  - In case an unrepairable extent is encountered, track which files are
36  *    affected and report them
37  *  - track and record media errors, throw out bad devices
38  *  - add a mode to also read unallocated space
39  */
40
41 struct scrub_ctx;
42
43 /*
44  * The following value only influences the performance.
45  *
46  * This determines the batch size for stripe submitted in one go.
47  */
48 #define SCRUB_STRIPES_PER_SCTX  8       /* That would be 8 64K stripe per-device. */
49
50 /*
51  * The following value times PAGE_SIZE needs to be large enough to match the
52  * largest node/leaf/sector size that shall be supported.
53  */
54 #define SCRUB_MAX_SECTORS_PER_BLOCK     (BTRFS_MAX_METADATA_BLOCKSIZE / SZ_4K)
55
56 /* Represent one sector and its needed info to verify the content. */
57 struct scrub_sector_verification {
58         bool is_metadata;
59
60         union {
61                 /*
62                  * Csum pointer for data csum verification.  Should point to a
63                  * sector csum inside scrub_stripe::csums.
64                  *
65                  * NULL if this data sector has no csum.
66                  */
67                 u8 *csum;
68
69                 /*
70                  * Extra info for metadata verification.  All sectors inside a
71                  * tree block share the same generation.
72                  */
73                 u64 generation;
74         };
75 };
76
77 enum scrub_stripe_flags {
78         /* Set when @mirror_num, @dev, @physical and @logical are set. */
79         SCRUB_STRIPE_FLAG_INITIALIZED,
80
81         /* Set when the read-repair is finished. */
82         SCRUB_STRIPE_FLAG_REPAIR_DONE,
83
84         /*
85          * Set for data stripes if it's triggered from P/Q stripe.
86          * During such scrub, we should not report errors in data stripes, nor
87          * update the accounting.
88          */
89         SCRUB_STRIPE_FLAG_NO_REPORT,
90 };
91
92 #define SCRUB_STRIPE_PAGES              (BTRFS_STRIPE_LEN / PAGE_SIZE)
93
94 /*
95  * Represent one contiguous range with a length of BTRFS_STRIPE_LEN.
96  */
97 struct scrub_stripe {
98         struct scrub_ctx *sctx;
99         struct btrfs_block_group *bg;
100
101         struct page *pages[SCRUB_STRIPE_PAGES];
102         struct scrub_sector_verification *sectors;
103
104         struct btrfs_device *dev;
105         u64 logical;
106         u64 physical;
107
108         u16 mirror_num;
109
110         /* Should be BTRFS_STRIPE_LEN / sectorsize. */
111         u16 nr_sectors;
112
113         /*
114          * How many data/meta extents are in this stripe.  Only for scrub status
115          * reporting purposes.
116          */
117         u16 nr_data_extents;
118         u16 nr_meta_extents;
119
120         atomic_t pending_io;
121         wait_queue_head_t io_wait;
122         wait_queue_head_t repair_wait;
123
124         /*
125          * Indicate the states of the stripe.  Bits are defined in
126          * scrub_stripe_flags enum.
127          */
128         unsigned long state;
129
130         /* Indicate which sectors are covered by extent items. */
131         unsigned long extent_sector_bitmap;
132
133         /*
134          * The errors hit during the initial read of the stripe.
135          *
136          * Would be utilized for error reporting and repair.
137          *
138          * The remaining init_nr_* records the number of errors hit, only used
139          * by error reporting.
140          */
141         unsigned long init_error_bitmap;
142         unsigned int init_nr_io_errors;
143         unsigned int init_nr_csum_errors;
144         unsigned int init_nr_meta_errors;
145
146         /*
147          * The following error bitmaps are all for the current status.
148          * Every time we submit a new read, these bitmaps may be updated.
149          *
150          * error_bitmap = io_error_bitmap | csum_error_bitmap | meta_error_bitmap;
151          *
152          * IO and csum errors can happen for both metadata and data.
153          */
154         unsigned long error_bitmap;
155         unsigned long io_error_bitmap;
156         unsigned long csum_error_bitmap;
157         unsigned long meta_error_bitmap;
158
159         /* For writeback (repair or replace) error reporting. */
160         unsigned long write_error_bitmap;
161
162         /* Writeback can be concurrent, thus we need to protect the bitmap. */
163         spinlock_t write_error_lock;
164
165         /*
166          * Checksum for the whole stripe if this stripe is inside a data block
167          * group.
168          */
169         u8 *csums;
170
171         struct work_struct work;
172 };
173
174 struct scrub_ctx {
175         struct scrub_stripe     stripes[SCRUB_STRIPES_PER_SCTX];
176         struct scrub_stripe     *raid56_data_stripes;
177         struct btrfs_fs_info    *fs_info;
178         int                     first_free;
179         int                     cur_stripe;
180         atomic_t                cancel_req;
181         int                     readonly;
182         int                     sectors_per_bio;
183
184         /* State of IO submission throttling affecting the associated device */
185         ktime_t                 throttle_deadline;
186         u64                     throttle_sent;
187
188         int                     is_dev_replace;
189         u64                     write_pointer;
190
191         struct mutex            wr_lock;
192         struct btrfs_device     *wr_tgtdev;
193
194         /*
195          * statistics
196          */
197         struct btrfs_scrub_progress stat;
198         spinlock_t              stat_lock;
199
200         /*
201          * Use a ref counter to avoid use-after-free issues. Scrub workers
202          * decrement bios_in_flight and workers_pending and then do a wakeup
203          * on the list_wait wait queue. We must ensure the main scrub task
204          * doesn't free the scrub context before or while the workers are
205          * doing the wakeup() call.
206          */
207         refcount_t              refs;
208 };
209
210 struct scrub_warning {
211         struct btrfs_path       *path;
212         u64                     extent_item_size;
213         const char              *errstr;
214         u64                     physical;
215         u64                     logical;
216         struct btrfs_device     *dev;
217 };
218
219 static void release_scrub_stripe(struct scrub_stripe *stripe)
220 {
221         if (!stripe)
222                 return;
223
224         for (int i = 0; i < SCRUB_STRIPE_PAGES; i++) {
225                 if (stripe->pages[i])
226                         __free_page(stripe->pages[i]);
227                 stripe->pages[i] = NULL;
228         }
229         kfree(stripe->sectors);
230         kfree(stripe->csums);
231         stripe->sectors = NULL;
232         stripe->csums = NULL;
233         stripe->sctx = NULL;
234         stripe->state = 0;
235 }
236
237 static int init_scrub_stripe(struct btrfs_fs_info *fs_info,
238                              struct scrub_stripe *stripe)
239 {
240         int ret;
241
242         memset(stripe, 0, sizeof(*stripe));
243
244         stripe->nr_sectors = BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits;
245         stripe->state = 0;
246
247         init_waitqueue_head(&stripe->io_wait);
248         init_waitqueue_head(&stripe->repair_wait);
249         atomic_set(&stripe->pending_io, 0);
250         spin_lock_init(&stripe->write_error_lock);
251
252         ret = btrfs_alloc_page_array(SCRUB_STRIPE_PAGES, stripe->pages);
253         if (ret < 0)
254                 goto error;
255
256         stripe->sectors = kcalloc(stripe->nr_sectors,
257                                   sizeof(struct scrub_sector_verification),
258                                   GFP_KERNEL);
259         if (!stripe->sectors)
260                 goto error;
261
262         stripe->csums = kcalloc(BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits,
263                                 fs_info->csum_size, GFP_KERNEL);
264         if (!stripe->csums)
265                 goto error;
266         return 0;
267 error:
268         release_scrub_stripe(stripe);
269         return -ENOMEM;
270 }
271
272 static void wait_scrub_stripe_io(struct scrub_stripe *stripe)
273 {
274         wait_event(stripe->io_wait, atomic_read(&stripe->pending_io) == 0);
275 }
276
277 static void scrub_put_ctx(struct scrub_ctx *sctx);
278
279 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
280 {
281         while (atomic_read(&fs_info->scrub_pause_req)) {
282                 mutex_unlock(&fs_info->scrub_lock);
283                 wait_event(fs_info->scrub_pause_wait,
284                    atomic_read(&fs_info->scrub_pause_req) == 0);
285                 mutex_lock(&fs_info->scrub_lock);
286         }
287 }
288
289 static void scrub_pause_on(struct btrfs_fs_info *fs_info)
290 {
291         atomic_inc(&fs_info->scrubs_paused);
292         wake_up(&fs_info->scrub_pause_wait);
293 }
294
295 static void scrub_pause_off(struct btrfs_fs_info *fs_info)
296 {
297         mutex_lock(&fs_info->scrub_lock);
298         __scrub_blocked_if_needed(fs_info);
299         atomic_dec(&fs_info->scrubs_paused);
300         mutex_unlock(&fs_info->scrub_lock);
301
302         wake_up(&fs_info->scrub_pause_wait);
303 }
304
305 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
306 {
307         scrub_pause_on(fs_info);
308         scrub_pause_off(fs_info);
309 }
310
311 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
312 {
313         int i;
314
315         if (!sctx)
316                 return;
317
318         for (i = 0; i < SCRUB_STRIPES_PER_SCTX; i++)
319                 release_scrub_stripe(&sctx->stripes[i]);
320
321         kfree(sctx);
322 }
323
324 static void scrub_put_ctx(struct scrub_ctx *sctx)
325 {
326         if (refcount_dec_and_test(&sctx->refs))
327                 scrub_free_ctx(sctx);
328 }
329
330 static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
331                 struct btrfs_fs_info *fs_info, int is_dev_replace)
332 {
333         struct scrub_ctx *sctx;
334         int             i;
335
336         sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
337         if (!sctx)
338                 goto nomem;
339         refcount_set(&sctx->refs, 1);
340         sctx->is_dev_replace = is_dev_replace;
341         sctx->fs_info = fs_info;
342         for (i = 0; i < SCRUB_STRIPES_PER_SCTX; i++) {
343                 int ret;
344
345                 ret = init_scrub_stripe(fs_info, &sctx->stripes[i]);
346                 if (ret < 0)
347                         goto nomem;
348                 sctx->stripes[i].sctx = sctx;
349         }
350         sctx->first_free = 0;
351         atomic_set(&sctx->cancel_req, 0);
352
353         spin_lock_init(&sctx->stat_lock);
354         sctx->throttle_deadline = 0;
355
356         mutex_init(&sctx->wr_lock);
357         if (is_dev_replace) {
358                 WARN_ON(!fs_info->dev_replace.tgtdev);
359                 sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
360         }
361
362         return sctx;
363
364 nomem:
365         scrub_free_ctx(sctx);
366         return ERR_PTR(-ENOMEM);
367 }
368
369 static int scrub_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
370                                      u64 root, void *warn_ctx)
371 {
372         u32 nlink;
373         int ret;
374         int i;
375         unsigned nofs_flag;
376         struct extent_buffer *eb;
377         struct btrfs_inode_item *inode_item;
378         struct scrub_warning *swarn = warn_ctx;
379         struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
380         struct inode_fs_paths *ipath = NULL;
381         struct btrfs_root *local_root;
382         struct btrfs_key key;
383
384         local_root = btrfs_get_fs_root(fs_info, root, true);
385         if (IS_ERR(local_root)) {
386                 ret = PTR_ERR(local_root);
387                 goto err;
388         }
389
390         /*
391          * this makes the path point to (inum INODE_ITEM ioff)
392          */
393         key.objectid = inum;
394         key.type = BTRFS_INODE_ITEM_KEY;
395         key.offset = 0;
396
397         ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
398         if (ret) {
399                 btrfs_put_root(local_root);
400                 btrfs_release_path(swarn->path);
401                 goto err;
402         }
403
404         eb = swarn->path->nodes[0];
405         inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
406                                         struct btrfs_inode_item);
407         nlink = btrfs_inode_nlink(eb, inode_item);
408         btrfs_release_path(swarn->path);
409
410         /*
411          * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub
412          * uses GFP_NOFS in this context, so we keep it consistent but it does
413          * not seem to be strictly necessary.
414          */
415         nofs_flag = memalloc_nofs_save();
416         ipath = init_ipath(4096, local_root, swarn->path);
417         memalloc_nofs_restore(nofs_flag);
418         if (IS_ERR(ipath)) {
419                 btrfs_put_root(local_root);
420                 ret = PTR_ERR(ipath);
421                 ipath = NULL;
422                 goto err;
423         }
424         ret = paths_from_inode(inum, ipath);
425
426         if (ret < 0)
427                 goto err;
428
429         /*
430          * we deliberately ignore the bit ipath might have been too small to
431          * hold all of the paths here
432          */
433         for (i = 0; i < ipath->fspath->elem_cnt; ++i)
434                 btrfs_warn_in_rcu(fs_info,
435 "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %u, links %u (path: %s)",
436                                   swarn->errstr, swarn->logical,
437                                   btrfs_dev_name(swarn->dev),
438                                   swarn->physical,
439                                   root, inum, offset,
440                                   fs_info->sectorsize, nlink,
441                                   (char *)(unsigned long)ipath->fspath->val[i]);
442
443         btrfs_put_root(local_root);
444         free_ipath(ipath);
445         return 0;
446
447 err:
448         btrfs_warn_in_rcu(fs_info,
449                           "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
450                           swarn->errstr, swarn->logical,
451                           btrfs_dev_name(swarn->dev),
452                           swarn->physical,
453                           root, inum, offset, ret);
454
455         free_ipath(ipath);
456         return 0;
457 }
458
459 static void scrub_print_common_warning(const char *errstr, struct btrfs_device *dev,
460                                        bool is_super, u64 logical, u64 physical)
461 {
462         struct btrfs_fs_info *fs_info = dev->fs_info;
463         struct btrfs_path *path;
464         struct btrfs_key found_key;
465         struct extent_buffer *eb;
466         struct btrfs_extent_item *ei;
467         struct scrub_warning swarn;
468         u64 flags = 0;
469         u32 item_size;
470         int ret;
471
472         /* Super block error, no need to search extent tree. */
473         if (is_super) {
474                 btrfs_warn_in_rcu(fs_info, "%s on device %s, physical %llu",
475                                   errstr, btrfs_dev_name(dev), physical);
476                 return;
477         }
478         path = btrfs_alloc_path();
479         if (!path)
480                 return;
481
482         swarn.physical = physical;
483         swarn.logical = logical;
484         swarn.errstr = errstr;
485         swarn.dev = NULL;
486
487         ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
488                                   &flags);
489         if (ret < 0)
490                 goto out;
491
492         swarn.extent_item_size = found_key.offset;
493
494         eb = path->nodes[0];
495         ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
496         item_size = btrfs_item_size(eb, path->slots[0]);
497
498         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
499                 unsigned long ptr = 0;
500                 u8 ref_level;
501                 u64 ref_root;
502
503                 while (true) {
504                         ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
505                                                       item_size, &ref_root,
506                                                       &ref_level);
507                         if (ret < 0) {
508                                 btrfs_warn(fs_info,
509                                 "failed to resolve tree backref for logical %llu: %d",
510                                                   swarn.logical, ret);
511                                 break;
512                         }
513                         if (ret > 0)
514                                 break;
515                         btrfs_warn_in_rcu(fs_info,
516 "%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
517                                 errstr, swarn.logical, btrfs_dev_name(dev),
518                                 swarn.physical, (ref_level ? "node" : "leaf"),
519                                 ref_level, ref_root);
520                 }
521                 btrfs_release_path(path);
522         } else {
523                 struct btrfs_backref_walk_ctx ctx = { 0 };
524
525                 btrfs_release_path(path);
526
527                 ctx.bytenr = found_key.objectid;
528                 ctx.extent_item_pos = swarn.logical - found_key.objectid;
529                 ctx.fs_info = fs_info;
530
531                 swarn.path = path;
532                 swarn.dev = dev;
533
534                 iterate_extent_inodes(&ctx, true, scrub_print_warning_inode, &swarn);
535         }
536
537 out:
538         btrfs_free_path(path);
539 }
540
541 static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical)
542 {
543         int ret = 0;
544         u64 length;
545
546         if (!btrfs_is_zoned(sctx->fs_info))
547                 return 0;
548
549         if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical))
550                 return 0;
551
552         if (sctx->write_pointer < physical) {
553                 length = physical - sctx->write_pointer;
554
555                 ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev,
556                                                 sctx->write_pointer, length);
557                 if (!ret)
558                         sctx->write_pointer = physical;
559         }
560         return ret;
561 }
562
563 static struct page *scrub_stripe_get_page(struct scrub_stripe *stripe, int sector_nr)
564 {
565         struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
566         int page_index = (sector_nr << fs_info->sectorsize_bits) >> PAGE_SHIFT;
567
568         return stripe->pages[page_index];
569 }
570
571 static unsigned int scrub_stripe_get_page_offset(struct scrub_stripe *stripe,
572                                                  int sector_nr)
573 {
574         struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
575
576         return offset_in_page(sector_nr << fs_info->sectorsize_bits);
577 }
578
579 static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr)
580 {
581         struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
582         const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
583         const u64 logical = stripe->logical + (sector_nr << fs_info->sectorsize_bits);
584         const struct page *first_page = scrub_stripe_get_page(stripe, sector_nr);
585         const unsigned int first_off = scrub_stripe_get_page_offset(stripe, sector_nr);
586         SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
587         u8 on_disk_csum[BTRFS_CSUM_SIZE];
588         u8 calculated_csum[BTRFS_CSUM_SIZE];
589         struct btrfs_header *header;
590
591         /*
592          * Here we don't have a good way to attach the pages (and subpages)
593          * to a dummy extent buffer, thus we have to directly grab the members
594          * from pages.
595          */
596         header = (struct btrfs_header *)(page_address(first_page) + first_off);
597         memcpy(on_disk_csum, header->csum, fs_info->csum_size);
598
599         if (logical != btrfs_stack_header_bytenr(header)) {
600                 bitmap_set(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree);
601                 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
602                 btrfs_warn_rl(fs_info,
603                 "tree block %llu mirror %u has bad bytenr, has %llu want %llu",
604                               logical, stripe->mirror_num,
605                               btrfs_stack_header_bytenr(header), logical);
606                 return;
607         }
608         if (memcmp(header->fsid, fs_info->fs_devices->metadata_uuid,
609                    BTRFS_FSID_SIZE) != 0) {
610                 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
611                 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
612                 btrfs_warn_rl(fs_info,
613                 "tree block %llu mirror %u has bad fsid, has %pU want %pU",
614                               logical, stripe->mirror_num,
615                               header->fsid, fs_info->fs_devices->fsid);
616                 return;
617         }
618         if (memcmp(header->chunk_tree_uuid, fs_info->chunk_tree_uuid,
619                    BTRFS_UUID_SIZE) != 0) {
620                 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
621                 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
622                 btrfs_warn_rl(fs_info,
623                 "tree block %llu mirror %u has bad chunk tree uuid, has %pU want %pU",
624                               logical, stripe->mirror_num,
625                               header->chunk_tree_uuid, fs_info->chunk_tree_uuid);
626                 return;
627         }
628
629         /* Now check tree block csum. */
630         shash->tfm = fs_info->csum_shash;
631         crypto_shash_init(shash);
632         crypto_shash_update(shash, page_address(first_page) + first_off +
633                             BTRFS_CSUM_SIZE, fs_info->sectorsize - BTRFS_CSUM_SIZE);
634
635         for (int i = sector_nr + 1; i < sector_nr + sectors_per_tree; i++) {
636                 struct page *page = scrub_stripe_get_page(stripe, i);
637                 unsigned int page_off = scrub_stripe_get_page_offset(stripe, i);
638
639                 crypto_shash_update(shash, page_address(page) + page_off,
640                                     fs_info->sectorsize);
641         }
642
643         crypto_shash_final(shash, calculated_csum);
644         if (memcmp(calculated_csum, on_disk_csum, fs_info->csum_size) != 0) {
645                 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
646                 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
647                 btrfs_warn_rl(fs_info,
648                 "tree block %llu mirror %u has bad csum, has " CSUM_FMT " want " CSUM_FMT,
649                               logical, stripe->mirror_num,
650                               CSUM_FMT_VALUE(fs_info->csum_size, on_disk_csum),
651                               CSUM_FMT_VALUE(fs_info->csum_size, calculated_csum));
652                 return;
653         }
654         if (stripe->sectors[sector_nr].generation !=
655             btrfs_stack_header_generation(header)) {
656                 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
657                 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
658                 btrfs_warn_rl(fs_info,
659                 "tree block %llu mirror %u has bad generation, has %llu want %llu",
660                               logical, stripe->mirror_num,
661                               btrfs_stack_header_generation(header),
662                               stripe->sectors[sector_nr].generation);
663                 return;
664         }
665         bitmap_clear(&stripe->error_bitmap, sector_nr, sectors_per_tree);
666         bitmap_clear(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree);
667         bitmap_clear(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
668 }
669
670 static void scrub_verify_one_sector(struct scrub_stripe *stripe, int sector_nr)
671 {
672         struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
673         struct scrub_sector_verification *sector = &stripe->sectors[sector_nr];
674         const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
675         struct page *page = scrub_stripe_get_page(stripe, sector_nr);
676         unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr);
677         u8 csum_buf[BTRFS_CSUM_SIZE];
678         int ret;
679
680         ASSERT(sector_nr >= 0 && sector_nr < stripe->nr_sectors);
681
682         /* Sector not utilized, skip it. */
683         if (!test_bit(sector_nr, &stripe->extent_sector_bitmap))
684                 return;
685
686         /* IO error, no need to check. */
687         if (test_bit(sector_nr, &stripe->io_error_bitmap))
688                 return;
689
690         /* Metadata, verify the full tree block. */
691         if (sector->is_metadata) {
692                 /*
693                  * Check if the tree block crosses the stripe boudary.  If
694                  * crossed the boundary, we cannot verify it but only give a
695                  * warning.
696                  *
697                  * This can only happen on a very old filesystem where chunks
698                  * are not ensured to be stripe aligned.
699                  */
700                 if (unlikely(sector_nr + sectors_per_tree > stripe->nr_sectors)) {
701                         btrfs_warn_rl(fs_info,
702                         "tree block at %llu crosses stripe boundary %llu",
703                                       stripe->logical +
704                                       (sector_nr << fs_info->sectorsize_bits),
705                                       stripe->logical);
706                         return;
707                 }
708                 scrub_verify_one_metadata(stripe, sector_nr);
709                 return;
710         }
711
712         /*
713          * Data is easier, we just verify the data csum (if we have it).  For
714          * cases without csum, we have no other choice but to trust it.
715          */
716         if (!sector->csum) {
717                 clear_bit(sector_nr, &stripe->error_bitmap);
718                 return;
719         }
720
721         ret = btrfs_check_sector_csum(fs_info, page, pgoff, csum_buf, sector->csum);
722         if (ret < 0) {
723                 set_bit(sector_nr, &stripe->csum_error_bitmap);
724                 set_bit(sector_nr, &stripe->error_bitmap);
725         } else {
726                 clear_bit(sector_nr, &stripe->csum_error_bitmap);
727                 clear_bit(sector_nr, &stripe->error_bitmap);
728         }
729 }
730
731 /* Verify specified sectors of a stripe. */
732 static void scrub_verify_one_stripe(struct scrub_stripe *stripe, unsigned long bitmap)
733 {
734         struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
735         const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
736         int sector_nr;
737
738         for_each_set_bit(sector_nr, &bitmap, stripe->nr_sectors) {
739                 scrub_verify_one_sector(stripe, sector_nr);
740                 if (stripe->sectors[sector_nr].is_metadata)
741                         sector_nr += sectors_per_tree - 1;
742         }
743 }
744
745 static int calc_sector_number(struct scrub_stripe *stripe, struct bio_vec *first_bvec)
746 {
747         int i;
748
749         for (i = 0; i < stripe->nr_sectors; i++) {
750                 if (scrub_stripe_get_page(stripe, i) == first_bvec->bv_page &&
751                     scrub_stripe_get_page_offset(stripe, i) == first_bvec->bv_offset)
752                         break;
753         }
754         ASSERT(i < stripe->nr_sectors);
755         return i;
756 }
757
758 /*
759  * Repair read is different to the regular read:
760  *
761  * - Only reads the failed sectors
762  * - May have extra blocksize limits
763  */
764 static void scrub_repair_read_endio(struct btrfs_bio *bbio)
765 {
766         struct scrub_stripe *stripe = bbio->private;
767         struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
768         struct bio_vec *bvec;
769         int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
770         u32 bio_size = 0;
771         int i;
772
773         ASSERT(sector_nr < stripe->nr_sectors);
774
775         bio_for_each_bvec_all(bvec, &bbio->bio, i)
776                 bio_size += bvec->bv_len;
777
778         if (bbio->bio.bi_status) {
779                 bitmap_set(&stripe->io_error_bitmap, sector_nr,
780                            bio_size >> fs_info->sectorsize_bits);
781                 bitmap_set(&stripe->error_bitmap, sector_nr,
782                            bio_size >> fs_info->sectorsize_bits);
783         } else {
784                 bitmap_clear(&stripe->io_error_bitmap, sector_nr,
785                              bio_size >> fs_info->sectorsize_bits);
786         }
787         bio_put(&bbio->bio);
788         if (atomic_dec_and_test(&stripe->pending_io))
789                 wake_up(&stripe->io_wait);
790 }
791
792 static int calc_next_mirror(int mirror, int num_copies)
793 {
794         ASSERT(mirror <= num_copies);
795         return (mirror + 1 > num_copies) ? 1 : mirror + 1;
796 }
797
798 static void scrub_stripe_submit_repair_read(struct scrub_stripe *stripe,
799                                             int mirror, int blocksize, bool wait)
800 {
801         struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
802         struct btrfs_bio *bbio = NULL;
803         const unsigned long old_error_bitmap = stripe->error_bitmap;
804         int i;
805
806         ASSERT(stripe->mirror_num >= 1);
807         ASSERT(atomic_read(&stripe->pending_io) == 0);
808
809         for_each_set_bit(i, &old_error_bitmap, stripe->nr_sectors) {
810                 struct page *page;
811                 int pgoff;
812                 int ret;
813
814                 page = scrub_stripe_get_page(stripe, i);
815                 pgoff = scrub_stripe_get_page_offset(stripe, i);
816
817                 /* The current sector cannot be merged, submit the bio. */
818                 if (bbio && ((i > 0 && !test_bit(i - 1, &stripe->error_bitmap)) ||
819                              bbio->bio.bi_iter.bi_size >= blocksize)) {
820                         ASSERT(bbio->bio.bi_iter.bi_size);
821                         atomic_inc(&stripe->pending_io);
822                         btrfs_submit_bio(bbio, mirror);
823                         if (wait)
824                                 wait_scrub_stripe_io(stripe);
825                         bbio = NULL;
826                 }
827
828                 if (!bbio) {
829                         bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ,
830                                 fs_info, scrub_repair_read_endio, stripe);
831                         bbio->bio.bi_iter.bi_sector = (stripe->logical +
832                                 (i << fs_info->sectorsize_bits)) >> SECTOR_SHIFT;
833                 }
834
835                 ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
836                 ASSERT(ret == fs_info->sectorsize);
837         }
838         if (bbio) {
839                 ASSERT(bbio->bio.bi_iter.bi_size);
840                 atomic_inc(&stripe->pending_io);
841                 btrfs_submit_bio(bbio, mirror);
842                 if (wait)
843                         wait_scrub_stripe_io(stripe);
844         }
845 }
846
847 static void scrub_stripe_report_errors(struct scrub_ctx *sctx,
848                                        struct scrub_stripe *stripe)
849 {
850         static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
851                                       DEFAULT_RATELIMIT_BURST);
852         struct btrfs_fs_info *fs_info = sctx->fs_info;
853         struct btrfs_device *dev = NULL;
854         u64 physical = 0;
855         int nr_data_sectors = 0;
856         int nr_meta_sectors = 0;
857         int nr_nodatacsum_sectors = 0;
858         int nr_repaired_sectors = 0;
859         int sector_nr;
860
861         if (test_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state))
862                 return;
863
864         /*
865          * Init needed infos for error reporting.
866          *
867          * Although our scrub_stripe infrastucture is mostly based on btrfs_submit_bio()
868          * thus no need for dev/physical, error reporting still needs dev and physical.
869          */
870         if (!bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors)) {
871                 u64 mapped_len = fs_info->sectorsize;
872                 struct btrfs_io_context *bioc = NULL;
873                 int stripe_index = stripe->mirror_num - 1;
874                 int ret;
875
876                 /* For scrub, our mirror_num should always start at 1. */
877                 ASSERT(stripe->mirror_num >= 1);
878                 ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
879                                       stripe->logical, &mapped_len, &bioc,
880                                       NULL, NULL, 1);
881                 /*
882                  * If we failed, dev will be NULL, and later detailed reports
883                  * will just be skipped.
884                  */
885                 if (ret < 0)
886                         goto skip;
887                 physical = bioc->stripes[stripe_index].physical;
888                 dev = bioc->stripes[stripe_index].dev;
889                 btrfs_put_bioc(bioc);
890         }
891
892 skip:
893         for_each_set_bit(sector_nr, &stripe->extent_sector_bitmap, stripe->nr_sectors) {
894                 bool repaired = false;
895
896                 if (stripe->sectors[sector_nr].is_metadata) {
897                         nr_meta_sectors++;
898                 } else {
899                         nr_data_sectors++;
900                         if (!stripe->sectors[sector_nr].csum)
901                                 nr_nodatacsum_sectors++;
902                 }
903
904                 if (test_bit(sector_nr, &stripe->init_error_bitmap) &&
905                     !test_bit(sector_nr, &stripe->error_bitmap)) {
906                         nr_repaired_sectors++;
907                         repaired = true;
908                 }
909
910                 /* Good sector from the beginning, nothing need to be done. */
911                 if (!test_bit(sector_nr, &stripe->init_error_bitmap))
912                         continue;
913
914                 /*
915                  * Report error for the corrupted sectors.  If repaired, just
916                  * output the message of repaired message.
917                  */
918                 if (repaired) {
919                         if (dev) {
920                                 btrfs_err_rl_in_rcu(fs_info,
921                         "fixed up error at logical %llu on dev %s physical %llu",
922                                             stripe->logical, btrfs_dev_name(dev),
923                                             physical);
924                         } else {
925                                 btrfs_err_rl_in_rcu(fs_info,
926                         "fixed up error at logical %llu on mirror %u",
927                                             stripe->logical, stripe->mirror_num);
928                         }
929                         continue;
930                 }
931
932                 /* The remaining are all for unrepaired. */
933                 if (dev) {
934                         btrfs_err_rl_in_rcu(fs_info,
935         "unable to fixup (regular) error at logical %llu on dev %s physical %llu",
936                                             stripe->logical, btrfs_dev_name(dev),
937                                             physical);
938                 } else {
939                         btrfs_err_rl_in_rcu(fs_info,
940         "unable to fixup (regular) error at logical %llu on mirror %u",
941                                             stripe->logical, stripe->mirror_num);
942                 }
943
944                 if (test_bit(sector_nr, &stripe->io_error_bitmap))
945                         if (__ratelimit(&rs) && dev)
946                                 scrub_print_common_warning("i/o error", dev, false,
947                                                      stripe->logical, physical);
948                 if (test_bit(sector_nr, &stripe->csum_error_bitmap))
949                         if (__ratelimit(&rs) && dev)
950                                 scrub_print_common_warning("checksum error", dev, false,
951                                                      stripe->logical, physical);
952                 if (test_bit(sector_nr, &stripe->meta_error_bitmap))
953                         if (__ratelimit(&rs) && dev)
954                                 scrub_print_common_warning("header error", dev, false,
955                                                      stripe->logical, physical);
956         }
957
958         spin_lock(&sctx->stat_lock);
959         sctx->stat.data_extents_scrubbed += stripe->nr_data_extents;
960         sctx->stat.tree_extents_scrubbed += stripe->nr_meta_extents;
961         sctx->stat.data_bytes_scrubbed += nr_data_sectors << fs_info->sectorsize_bits;
962         sctx->stat.tree_bytes_scrubbed += nr_meta_sectors << fs_info->sectorsize_bits;
963         sctx->stat.no_csum += nr_nodatacsum_sectors;
964         sctx->stat.read_errors += stripe->init_nr_io_errors;
965         sctx->stat.csum_errors += stripe->init_nr_csum_errors;
966         sctx->stat.verify_errors += stripe->init_nr_meta_errors;
967         sctx->stat.uncorrectable_errors +=
968                 bitmap_weight(&stripe->error_bitmap, stripe->nr_sectors);
969         sctx->stat.corrected_errors += nr_repaired_sectors;
970         spin_unlock(&sctx->stat_lock);
971 }
972
973 /*
974  * The main entrance for all read related scrub work, including:
975  *
976  * - Wait for the initial read to finish
977  * - Verify and locate any bad sectors
978  * - Go through the remaining mirrors and try to read as large blocksize as
979  *   possible
980  * - Go through all mirrors (including the failed mirror) sector-by-sector
981  *
982  * Writeback does not happen here, it needs extra synchronization.
983  */
984 static void scrub_stripe_read_repair_worker(struct work_struct *work)
985 {
986         struct scrub_stripe *stripe = container_of(work, struct scrub_stripe, work);
987         struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
988         int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
989                                           stripe->bg->length);
990         int mirror;
991         int i;
992
993         ASSERT(stripe->mirror_num > 0);
994
995         wait_scrub_stripe_io(stripe);
996         scrub_verify_one_stripe(stripe, stripe->extent_sector_bitmap);
997         /* Save the initial failed bitmap for later repair and report usage. */
998         stripe->init_error_bitmap = stripe->error_bitmap;
999         stripe->init_nr_io_errors = bitmap_weight(&stripe->io_error_bitmap,
1000                                                   stripe->nr_sectors);
1001         stripe->init_nr_csum_errors = bitmap_weight(&stripe->csum_error_bitmap,
1002                                                     stripe->nr_sectors);
1003         stripe->init_nr_meta_errors = bitmap_weight(&stripe->meta_error_bitmap,
1004                                                     stripe->nr_sectors);
1005
1006         if (bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors))
1007                 goto out;
1008
1009         /*
1010          * Try all remaining mirrors.
1011          *
1012          * Here we still try to read as large block as possible, as this is
1013          * faster and we have extra safety nets to rely on.
1014          */
1015         for (mirror = calc_next_mirror(stripe->mirror_num, num_copies);
1016              mirror != stripe->mirror_num;
1017              mirror = calc_next_mirror(mirror, num_copies)) {
1018                 const unsigned long old_error_bitmap = stripe->error_bitmap;
1019
1020                 scrub_stripe_submit_repair_read(stripe, mirror,
1021                                                 BTRFS_STRIPE_LEN, false);
1022                 wait_scrub_stripe_io(stripe);
1023                 scrub_verify_one_stripe(stripe, old_error_bitmap);
1024                 if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
1025                         goto out;
1026         }
1027
1028         /*
1029          * Last safety net, try re-checking all mirrors, including the failed
1030          * one, sector-by-sector.
1031          *
1032          * As if one sector failed the drive's internal csum, the whole read
1033          * containing the offending sector would be marked as error.
1034          * Thus here we do sector-by-sector read.
1035          *
1036          * This can be slow, thus we only try it as the last resort.
1037          */
1038
1039         for (i = 0, mirror = stripe->mirror_num;
1040              i < num_copies;
1041              i++, mirror = calc_next_mirror(mirror, num_copies)) {
1042                 const unsigned long old_error_bitmap = stripe->error_bitmap;
1043
1044                 scrub_stripe_submit_repair_read(stripe, mirror,
1045                                                 fs_info->sectorsize, true);
1046                 wait_scrub_stripe_io(stripe);
1047                 scrub_verify_one_stripe(stripe, old_error_bitmap);
1048                 if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
1049                         goto out;
1050         }
1051 out:
1052         scrub_stripe_report_errors(stripe->sctx, stripe);
1053         set_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state);
1054         wake_up(&stripe->repair_wait);
1055 }
1056
1057 static void scrub_read_endio(struct btrfs_bio *bbio)
1058 {
1059         struct scrub_stripe *stripe = bbio->private;
1060
1061         if (bbio->bio.bi_status) {
1062                 bitmap_set(&stripe->io_error_bitmap, 0, stripe->nr_sectors);
1063                 bitmap_set(&stripe->error_bitmap, 0, stripe->nr_sectors);
1064         } else {
1065                 bitmap_clear(&stripe->io_error_bitmap, 0, stripe->nr_sectors);
1066         }
1067         bio_put(&bbio->bio);
1068         if (atomic_dec_and_test(&stripe->pending_io)) {
1069                 wake_up(&stripe->io_wait);
1070                 INIT_WORK(&stripe->work, scrub_stripe_read_repair_worker);
1071                 queue_work(stripe->bg->fs_info->scrub_workers, &stripe->work);
1072         }
1073 }
1074
1075 static void scrub_write_endio(struct btrfs_bio *bbio)
1076 {
1077         struct scrub_stripe *stripe = bbio->private;
1078         struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1079         struct bio_vec *bvec;
1080         int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
1081         u32 bio_size = 0;
1082         int i;
1083
1084         bio_for_each_bvec_all(bvec, &bbio->bio, i)
1085                 bio_size += bvec->bv_len;
1086
1087         if (bbio->bio.bi_status) {
1088                 unsigned long flags;
1089
1090                 spin_lock_irqsave(&stripe->write_error_lock, flags);
1091                 bitmap_set(&stripe->write_error_bitmap, sector_nr,
1092                            bio_size >> fs_info->sectorsize_bits);
1093                 spin_unlock_irqrestore(&stripe->write_error_lock, flags);
1094         }
1095         bio_put(&bbio->bio);
1096
1097         if (atomic_dec_and_test(&stripe->pending_io))
1098                 wake_up(&stripe->io_wait);
1099 }
1100
1101 static void scrub_submit_write_bio(struct scrub_ctx *sctx,
1102                                    struct scrub_stripe *stripe,
1103                                    struct btrfs_bio *bbio, bool dev_replace)
1104 {
1105         struct btrfs_fs_info *fs_info = sctx->fs_info;
1106         u32 bio_len = bbio->bio.bi_iter.bi_size;
1107         u32 bio_off = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT) -
1108                       stripe->logical;
1109
1110         fill_writer_pointer_gap(sctx, stripe->physical + bio_off);
1111         atomic_inc(&stripe->pending_io);
1112         btrfs_submit_repair_write(bbio, stripe->mirror_num, dev_replace);
1113         if (!btrfs_is_zoned(fs_info))
1114                 return;
1115         /*
1116          * For zoned writeback, queue depth must be 1, thus we must wait for
1117          * the write to finish before the next write.
1118          */
1119         wait_scrub_stripe_io(stripe);
1120
1121         /*
1122          * And also need to update the write pointer if write finished
1123          * successfully.
1124          */
1125         if (!test_bit(bio_off >> fs_info->sectorsize_bits,
1126                       &stripe->write_error_bitmap))
1127                 sctx->write_pointer += bio_len;
1128 }
1129
1130 /*
1131  * Submit the write bio(s) for the sectors specified by @write_bitmap.
1132  *
1133  * Here we utilize btrfs_submit_repair_write(), which has some extra benefits:
1134  *
1135  * - Only needs logical bytenr and mirror_num
1136  *   Just like the scrub read path
1137  *
1138  * - Would only result in writes to the specified mirror
1139  *   Unlike the regular writeback path, which would write back to all stripes
1140  *
1141  * - Handle dev-replace and read-repair writeback differently
1142  */
1143 static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe,
1144                                 unsigned long write_bitmap, bool dev_replace)
1145 {
1146         struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1147         struct btrfs_bio *bbio = NULL;
1148         int sector_nr;
1149
1150         for_each_set_bit(sector_nr, &write_bitmap, stripe->nr_sectors) {
1151                 struct page *page = scrub_stripe_get_page(stripe, sector_nr);
1152                 unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr);
1153                 int ret;
1154
1155                 /* We should only writeback sectors covered by an extent. */
1156                 ASSERT(test_bit(sector_nr, &stripe->extent_sector_bitmap));
1157
1158                 /* Cannot merge with previous sector, submit the current one. */
1159                 if (bbio && sector_nr && !test_bit(sector_nr - 1, &write_bitmap)) {
1160                         scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
1161                         bbio = NULL;
1162                 }
1163                 if (!bbio) {
1164                         bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_WRITE,
1165                                                fs_info, scrub_write_endio, stripe);
1166                         bbio->bio.bi_iter.bi_sector = (stripe->logical +
1167                                 (sector_nr << fs_info->sectorsize_bits)) >>
1168                                 SECTOR_SHIFT;
1169                 }
1170                 ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
1171                 ASSERT(ret == fs_info->sectorsize);
1172         }
1173         if (bbio)
1174                 scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
1175 }
1176
1177 /*
1178  * Throttling of IO submission, bandwidth-limit based, the timeslice is 1
1179  * second.  Limit can be set via /sys/fs/UUID/devinfo/devid/scrub_speed_max.
1180  */
1181 static void scrub_throttle_dev_io(struct scrub_ctx *sctx, struct btrfs_device *device,
1182                                   unsigned int bio_size)
1183 {
1184         const int time_slice = 1000;
1185         s64 delta;
1186         ktime_t now;
1187         u32 div;
1188         u64 bwlimit;
1189
1190         bwlimit = READ_ONCE(device->scrub_speed_max);
1191         if (bwlimit == 0)
1192                 return;
1193
1194         /*
1195          * Slice is divided into intervals when the IO is submitted, adjust by
1196          * bwlimit and maximum of 64 intervals.
1197          */
1198         div = max_t(u32, 1, (u32)(bwlimit / (16 * 1024 * 1024)));
1199         div = min_t(u32, 64, div);
1200
1201         /* Start new epoch, set deadline */
1202         now = ktime_get();
1203         if (sctx->throttle_deadline == 0) {
1204                 sctx->throttle_deadline = ktime_add_ms(now, time_slice / div);
1205                 sctx->throttle_sent = 0;
1206         }
1207
1208         /* Still in the time to send? */
1209         if (ktime_before(now, sctx->throttle_deadline)) {
1210                 /* If current bio is within the limit, send it */
1211                 sctx->throttle_sent += bio_size;
1212                 if (sctx->throttle_sent <= div_u64(bwlimit, div))
1213                         return;
1214
1215                 /* We're over the limit, sleep until the rest of the slice */
1216                 delta = ktime_ms_delta(sctx->throttle_deadline, now);
1217         } else {
1218                 /* New request after deadline, start new epoch */
1219                 delta = 0;
1220         }
1221
1222         if (delta) {
1223                 long timeout;
1224
1225                 timeout = div_u64(delta * HZ, 1000);
1226                 schedule_timeout_interruptible(timeout);
1227         }
1228
1229         /* Next call will start the deadline period */
1230         sctx->throttle_deadline = 0;
1231 }
1232
1233 /*
1234  * Given a physical address, this will calculate it's
1235  * logical offset. if this is a parity stripe, it will return
1236  * the most left data stripe's logical offset.
1237  *
1238  * return 0 if it is a data stripe, 1 means parity stripe.
1239  */
1240 static int get_raid56_logic_offset(u64 physical, int num,
1241                                    struct map_lookup *map, u64 *offset,
1242                                    u64 *stripe_start)
1243 {
1244         int i;
1245         int j = 0;
1246         u64 last_offset;
1247         const int data_stripes = nr_data_stripes(map);
1248
1249         last_offset = (physical - map->stripes[num].physical) * data_stripes;
1250         if (stripe_start)
1251                 *stripe_start = last_offset;
1252
1253         *offset = last_offset;
1254         for (i = 0; i < data_stripes; i++) {
1255                 u32 stripe_nr;
1256                 u32 stripe_index;
1257                 u32 rot;
1258
1259                 *offset = last_offset + (i << BTRFS_STRIPE_LEN_SHIFT);
1260
1261                 stripe_nr = (u32)(*offset >> BTRFS_STRIPE_LEN_SHIFT) / data_stripes;
1262
1263                 /* Work out the disk rotation on this stripe-set */
1264                 rot = stripe_nr % map->num_stripes;
1265                 stripe_nr /= map->num_stripes;
1266                 /* calculate which stripe this data locates */
1267                 rot += i;
1268                 stripe_index = rot % map->num_stripes;
1269                 if (stripe_index == num)
1270                         return 0;
1271                 if (stripe_index < num)
1272                         j++;
1273         }
1274         *offset = last_offset + (j << BTRFS_STRIPE_LEN_SHIFT);
1275         return 1;
1276 }
1277
1278 /*
1279  * Return 0 if the extent item range covers any byte of the range.
1280  * Return <0 if the extent item is before @search_start.
1281  * Return >0 if the extent item is after @start_start + @search_len.
1282  */
1283 static int compare_extent_item_range(struct btrfs_path *path,
1284                                      u64 search_start, u64 search_len)
1285 {
1286         struct btrfs_fs_info *fs_info = path->nodes[0]->fs_info;
1287         u64 len;
1288         struct btrfs_key key;
1289
1290         btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1291         ASSERT(key.type == BTRFS_EXTENT_ITEM_KEY ||
1292                key.type == BTRFS_METADATA_ITEM_KEY);
1293         if (key.type == BTRFS_METADATA_ITEM_KEY)
1294                 len = fs_info->nodesize;
1295         else
1296                 len = key.offset;
1297
1298         if (key.objectid + len <= search_start)
1299                 return -1;
1300         if (key.objectid >= search_start + search_len)
1301                 return 1;
1302         return 0;
1303 }
1304
1305 /*
1306  * Locate one extent item which covers any byte in range
1307  * [@search_start, @search_start + @search_length)
1308  *
1309  * If the path is not initialized, we will initialize the search by doing
1310  * a btrfs_search_slot().
1311  * If the path is already initialized, we will use the path as the initial
1312  * slot, to avoid duplicated btrfs_search_slot() calls.
1313  *
1314  * NOTE: If an extent item starts before @search_start, we will still
1315  * return the extent item. This is for data extent crossing stripe boundary.
1316  *
1317  * Return 0 if we found such extent item, and @path will point to the extent item.
1318  * Return >0 if no such extent item can be found, and @path will be released.
1319  * Return <0 if hit fatal error, and @path will be released.
1320  */
1321 static int find_first_extent_item(struct btrfs_root *extent_root,
1322                                   struct btrfs_path *path,
1323                                   u64 search_start, u64 search_len)
1324 {
1325         struct btrfs_fs_info *fs_info = extent_root->fs_info;
1326         struct btrfs_key key;
1327         int ret;
1328
1329         /* Continue using the existing path */
1330         if (path->nodes[0])
1331                 goto search_forward;
1332
1333         if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1334                 key.type = BTRFS_METADATA_ITEM_KEY;
1335         else
1336                 key.type = BTRFS_EXTENT_ITEM_KEY;
1337         key.objectid = search_start;
1338         key.offset = (u64)-1;
1339
1340         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
1341         if (ret < 0)
1342                 return ret;
1343
1344         ASSERT(ret > 0);
1345         /*
1346          * Here we intentionally pass 0 as @min_objectid, as there could be
1347          * an extent item starting before @search_start.
1348          */
1349         ret = btrfs_previous_extent_item(extent_root, path, 0);
1350         if (ret < 0)
1351                 return ret;
1352         /*
1353          * No matter whether we have found an extent item, the next loop will
1354          * properly do every check on the key.
1355          */
1356 search_forward:
1357         while (true) {
1358                 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1359                 if (key.objectid >= search_start + search_len)
1360                         break;
1361                 if (key.type != BTRFS_METADATA_ITEM_KEY &&
1362                     key.type != BTRFS_EXTENT_ITEM_KEY)
1363                         goto next;
1364
1365                 ret = compare_extent_item_range(path, search_start, search_len);
1366                 if (ret == 0)
1367                         return ret;
1368                 if (ret > 0)
1369                         break;
1370 next:
1371                 path->slots[0]++;
1372                 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
1373                         ret = btrfs_next_leaf(extent_root, path);
1374                         if (ret) {
1375                                 /* Either no more item or fatal error */
1376                                 btrfs_release_path(path);
1377                                 return ret;
1378                         }
1379                 }
1380         }
1381         btrfs_release_path(path);
1382         return 1;
1383 }
1384
1385 static void get_extent_info(struct btrfs_path *path, u64 *extent_start_ret,
1386                             u64 *size_ret, u64 *flags_ret, u64 *generation_ret)
1387 {
1388         struct btrfs_key key;
1389         struct btrfs_extent_item *ei;
1390
1391         btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1392         ASSERT(key.type == BTRFS_METADATA_ITEM_KEY ||
1393                key.type == BTRFS_EXTENT_ITEM_KEY);
1394         *extent_start_ret = key.objectid;
1395         if (key.type == BTRFS_METADATA_ITEM_KEY)
1396                 *size_ret = path->nodes[0]->fs_info->nodesize;
1397         else
1398                 *size_ret = key.offset;
1399         ei = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_extent_item);
1400         *flags_ret = btrfs_extent_flags(path->nodes[0], ei);
1401         *generation_ret = btrfs_extent_generation(path->nodes[0], ei);
1402 }
1403
1404 static int sync_write_pointer_for_zoned(struct scrub_ctx *sctx, u64 logical,
1405                                         u64 physical, u64 physical_end)
1406 {
1407         struct btrfs_fs_info *fs_info = sctx->fs_info;
1408         int ret = 0;
1409
1410         if (!btrfs_is_zoned(fs_info))
1411                 return 0;
1412
1413         mutex_lock(&sctx->wr_lock);
1414         if (sctx->write_pointer < physical_end) {
1415                 ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical,
1416                                                     physical,
1417                                                     sctx->write_pointer);
1418                 if (ret)
1419                         btrfs_err(fs_info,
1420                                   "zoned: failed to recover write pointer");
1421         }
1422         mutex_unlock(&sctx->wr_lock);
1423         btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical);
1424
1425         return ret;
1426 }
1427
1428 static void fill_one_extent_info(struct btrfs_fs_info *fs_info,
1429                                  struct scrub_stripe *stripe,
1430                                  u64 extent_start, u64 extent_len,
1431                                  u64 extent_flags, u64 extent_gen)
1432 {
1433         for (u64 cur_logical = max(stripe->logical, extent_start);
1434              cur_logical < min(stripe->logical + BTRFS_STRIPE_LEN,
1435                                extent_start + extent_len);
1436              cur_logical += fs_info->sectorsize) {
1437                 const int nr_sector = (cur_logical - stripe->logical) >>
1438                                       fs_info->sectorsize_bits;
1439                 struct scrub_sector_verification *sector =
1440                                                 &stripe->sectors[nr_sector];
1441
1442                 set_bit(nr_sector, &stripe->extent_sector_bitmap);
1443                 if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1444                         sector->is_metadata = true;
1445                         sector->generation = extent_gen;
1446                 }
1447         }
1448 }
1449
1450 static void scrub_stripe_reset_bitmaps(struct scrub_stripe *stripe)
1451 {
1452         stripe->extent_sector_bitmap = 0;
1453         stripe->init_error_bitmap = 0;
1454         stripe->init_nr_io_errors = 0;
1455         stripe->init_nr_csum_errors = 0;
1456         stripe->init_nr_meta_errors = 0;
1457         stripe->error_bitmap = 0;
1458         stripe->io_error_bitmap = 0;
1459         stripe->csum_error_bitmap = 0;
1460         stripe->meta_error_bitmap = 0;
1461 }
1462
1463 /*
1464  * Locate one stripe which has at least one extent in its range.
1465  *
1466  * Return 0 if found such stripe, and store its info into @stripe.
1467  * Return >0 if there is no such stripe in the specified range.
1468  * Return <0 for error.
1469  */
1470 static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
1471                                         struct btrfs_device *dev, u64 physical,
1472                                         int mirror_num, u64 logical_start,
1473                                         u32 logical_len,
1474                                         struct scrub_stripe *stripe)
1475 {
1476         struct btrfs_fs_info *fs_info = bg->fs_info;
1477         struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bg->start);
1478         struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bg->start);
1479         const u64 logical_end = logical_start + logical_len;
1480         struct btrfs_path path = { 0 };
1481         u64 cur_logical = logical_start;
1482         u64 stripe_end;
1483         u64 extent_start;
1484         u64 extent_len;
1485         u64 extent_flags;
1486         u64 extent_gen;
1487         int ret;
1488
1489         memset(stripe->sectors, 0, sizeof(struct scrub_sector_verification) *
1490                                    stripe->nr_sectors);
1491         scrub_stripe_reset_bitmaps(stripe);
1492
1493         /* The range must be inside the bg. */
1494         ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
1495
1496         path.search_commit_root = 1;
1497         path.skip_locking = 1;
1498
1499         ret = find_first_extent_item(extent_root, &path, logical_start, logical_len);
1500         /* Either error or not found. */
1501         if (ret)
1502                 goto out;
1503         get_extent_info(&path, &extent_start, &extent_len, &extent_flags, &extent_gen);
1504         if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1505                 stripe->nr_meta_extents++;
1506         if (extent_flags & BTRFS_EXTENT_FLAG_DATA)
1507                 stripe->nr_data_extents++;
1508         cur_logical = max(extent_start, cur_logical);
1509
1510         /*
1511          * Round down to stripe boundary.
1512          *
1513          * The extra calculation against bg->start is to handle block groups
1514          * whose logical bytenr is not BTRFS_STRIPE_LEN aligned.
1515          */
1516         stripe->logical = round_down(cur_logical - bg->start, BTRFS_STRIPE_LEN) +
1517                           bg->start;
1518         stripe->physical = physical + stripe->logical - logical_start;
1519         stripe->dev = dev;
1520         stripe->bg = bg;
1521         stripe->mirror_num = mirror_num;
1522         stripe_end = stripe->logical + BTRFS_STRIPE_LEN - 1;
1523
1524         /* Fill the first extent info into stripe->sectors[] array. */
1525         fill_one_extent_info(fs_info, stripe, extent_start, extent_len,
1526                              extent_flags, extent_gen);
1527         cur_logical = extent_start + extent_len;
1528
1529         /* Fill the extent info for the remaining sectors. */
1530         while (cur_logical <= stripe_end) {
1531                 ret = find_first_extent_item(extent_root, &path, cur_logical,
1532                                              stripe_end - cur_logical + 1);
1533                 if (ret < 0)
1534                         goto out;
1535                 if (ret > 0) {
1536                         ret = 0;
1537                         break;
1538                 }
1539                 get_extent_info(&path, &extent_start, &extent_len,
1540                                 &extent_flags, &extent_gen);
1541                 if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1542                         stripe->nr_meta_extents++;
1543                 if (extent_flags & BTRFS_EXTENT_FLAG_DATA)
1544                         stripe->nr_data_extents++;
1545                 fill_one_extent_info(fs_info, stripe, extent_start, extent_len,
1546                                      extent_flags, extent_gen);
1547                 cur_logical = extent_start + extent_len;
1548         }
1549
1550         /* Now fill the data csum. */
1551         if (bg->flags & BTRFS_BLOCK_GROUP_DATA) {
1552                 int sector_nr;
1553                 unsigned long csum_bitmap = 0;
1554
1555                 /* Csum space should have already been allocated. */
1556                 ASSERT(stripe->csums);
1557
1558                 /*
1559                  * Our csum bitmap should be large enough, as BTRFS_STRIPE_LEN
1560                  * should contain at most 16 sectors.
1561                  */
1562                 ASSERT(BITS_PER_LONG >= BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
1563
1564                 ret = btrfs_lookup_csums_bitmap(csum_root, stripe->logical,
1565                                                 stripe_end, stripe->csums,
1566                                                 &csum_bitmap, true);
1567                 if (ret < 0)
1568                         goto out;
1569                 if (ret > 0)
1570                         ret = 0;
1571
1572                 for_each_set_bit(sector_nr, &csum_bitmap, stripe->nr_sectors) {
1573                         stripe->sectors[sector_nr].csum = stripe->csums +
1574                                 sector_nr * fs_info->csum_size;
1575                 }
1576         }
1577         set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state);
1578 out:
1579         btrfs_release_path(&path);
1580         return ret;
1581 }
1582
1583 static void scrub_reset_stripe(struct scrub_stripe *stripe)
1584 {
1585         scrub_stripe_reset_bitmaps(stripe);
1586
1587         stripe->nr_meta_extents = 0;
1588         stripe->nr_data_extents = 0;
1589         stripe->state = 0;
1590
1591         for (int i = 0; i < stripe->nr_sectors; i++) {
1592                 stripe->sectors[i].is_metadata = false;
1593                 stripe->sectors[i].csum = NULL;
1594                 stripe->sectors[i].generation = 0;
1595         }
1596 }
1597
1598 static void scrub_submit_initial_read(struct scrub_ctx *sctx,
1599                                       struct scrub_stripe *stripe)
1600 {
1601         struct btrfs_fs_info *fs_info = sctx->fs_info;
1602         struct btrfs_bio *bbio;
1603         int mirror = stripe->mirror_num;
1604
1605         ASSERT(stripe->bg);
1606         ASSERT(stripe->mirror_num > 0);
1607         ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state));
1608
1609         bbio = btrfs_bio_alloc(SCRUB_STRIPE_PAGES, REQ_OP_READ, fs_info,
1610                                scrub_read_endio, stripe);
1611
1612         /* Read the whole stripe. */
1613         bbio->bio.bi_iter.bi_sector = stripe->logical >> SECTOR_SHIFT;
1614         for (int i = 0; i < BTRFS_STRIPE_LEN >> PAGE_SHIFT; i++) {
1615                 int ret;
1616
1617                 ret = bio_add_page(&bbio->bio, stripe->pages[i], PAGE_SIZE, 0);
1618                 /* We should have allocated enough bio vectors. */
1619                 ASSERT(ret == PAGE_SIZE);
1620         }
1621         atomic_inc(&stripe->pending_io);
1622
1623         /*
1624          * For dev-replace, either user asks to avoid the source dev, or
1625          * the device is missing, we try the next mirror instead.
1626          */
1627         if (sctx->is_dev_replace &&
1628             (fs_info->dev_replace.cont_reading_from_srcdev_mode ==
1629              BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID ||
1630              !stripe->dev->bdev)) {
1631                 int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
1632                                                   stripe->bg->length);
1633
1634                 mirror = calc_next_mirror(mirror, num_copies);
1635         }
1636         btrfs_submit_bio(bbio, mirror);
1637 }
1638
1639 static bool stripe_has_metadata_error(struct scrub_stripe *stripe)
1640 {
1641         int i;
1642
1643         for_each_set_bit(i, &stripe->error_bitmap, stripe->nr_sectors) {
1644                 if (stripe->sectors[i].is_metadata) {
1645                         struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1646
1647                         btrfs_err(fs_info,
1648                         "stripe %llu has unrepaired metadata sector at %llu",
1649                                   stripe->logical,
1650                                   stripe->logical + (i << fs_info->sectorsize_bits));
1651                         return true;
1652                 }
1653         }
1654         return false;
1655 }
1656
1657 static int flush_scrub_stripes(struct scrub_ctx *sctx)
1658 {
1659         struct btrfs_fs_info *fs_info = sctx->fs_info;
1660         struct scrub_stripe *stripe;
1661         const int nr_stripes = sctx->cur_stripe;
1662         int ret = 0;
1663
1664         if (!nr_stripes)
1665                 return 0;
1666
1667         ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &sctx->stripes[0].state));
1668
1669         scrub_throttle_dev_io(sctx, sctx->stripes[0].dev,
1670                               nr_stripes << BTRFS_STRIPE_LEN_SHIFT);
1671         for (int i = 0; i < nr_stripes; i++) {
1672                 stripe = &sctx->stripes[i];
1673                 scrub_submit_initial_read(sctx, stripe);
1674         }
1675
1676         for (int i = 0; i < nr_stripes; i++) {
1677                 stripe = &sctx->stripes[i];
1678
1679                 wait_event(stripe->repair_wait,
1680                            test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state));
1681         }
1682
1683         /*
1684          * Submit the repaired sectors.  For zoned case, we cannot do repair
1685          * in-place, but queue the bg to be relocated.
1686          */
1687         if (btrfs_is_zoned(fs_info)) {
1688                 for (int i = 0; i < nr_stripes; i++) {
1689                         stripe = &sctx->stripes[i];
1690
1691                         if (!bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors)) {
1692                                 btrfs_repair_one_zone(fs_info,
1693                                                       sctx->stripes[0].bg->start);
1694                                 break;
1695                         }
1696                 }
1697         } else if (!sctx->readonly) {
1698                 for (int i = 0; i < nr_stripes; i++) {
1699                         unsigned long repaired;
1700
1701                         stripe = &sctx->stripes[i];
1702
1703                         bitmap_andnot(&repaired, &stripe->init_error_bitmap,
1704                                       &stripe->error_bitmap, stripe->nr_sectors);
1705                         scrub_write_sectors(sctx, stripe, repaired, false);
1706                 }
1707         }
1708
1709         /* Submit for dev-replace. */
1710         if (sctx->is_dev_replace) {
1711                 /*
1712                  * For dev-replace, if we know there is something wrong with
1713                  * metadata, we should immedately abort.
1714                  */
1715                 for (int i = 0; i < nr_stripes; i++) {
1716                         if (stripe_has_metadata_error(&sctx->stripes[i])) {
1717                                 ret = -EIO;
1718                                 goto out;
1719                         }
1720                 }
1721                 for (int i = 0; i < nr_stripes; i++) {
1722                         unsigned long good;
1723
1724                         stripe = &sctx->stripes[i];
1725
1726                         ASSERT(stripe->dev == fs_info->dev_replace.srcdev);
1727
1728                         bitmap_andnot(&good, &stripe->extent_sector_bitmap,
1729                                       &stripe->error_bitmap, stripe->nr_sectors);
1730                         scrub_write_sectors(sctx, stripe, good, true);
1731                 }
1732         }
1733
1734         /* Wait for the above writebacks to finish. */
1735         for (int i = 0; i < nr_stripes; i++) {
1736                 stripe = &sctx->stripes[i];
1737
1738                 wait_scrub_stripe_io(stripe);
1739                 scrub_reset_stripe(stripe);
1740         }
1741 out:
1742         sctx->cur_stripe = 0;
1743         return ret;
1744 }
1745
1746 static void raid56_scrub_wait_endio(struct bio *bio)
1747 {
1748         complete(bio->bi_private);
1749 }
1750
1751 static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *bg,
1752                               struct btrfs_device *dev, int mirror_num,
1753                               u64 logical, u32 length, u64 physical)
1754 {
1755         struct scrub_stripe *stripe;
1756         int ret;
1757
1758         /* No available slot, submit all stripes and wait for them. */
1759         if (sctx->cur_stripe >= SCRUB_STRIPES_PER_SCTX) {
1760                 ret = flush_scrub_stripes(sctx);
1761                 if (ret < 0)
1762                         return ret;
1763         }
1764
1765         stripe = &sctx->stripes[sctx->cur_stripe];
1766
1767         /* We can queue one stripe using the remaining slot. */
1768         scrub_reset_stripe(stripe);
1769         ret = scrub_find_fill_first_stripe(bg, dev, physical, mirror_num,
1770                                            logical, length, stripe);
1771         /* Either >0 as no more extents or <0 for error. */
1772         if (ret)
1773                 return ret;
1774         sctx->cur_stripe++;
1775         return 0;
1776 }
1777
1778 static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
1779                                       struct btrfs_device *scrub_dev,
1780                                       struct btrfs_block_group *bg,
1781                                       struct map_lookup *map,
1782                                       u64 full_stripe_start)
1783 {
1784         DECLARE_COMPLETION_ONSTACK(io_done);
1785         struct btrfs_fs_info *fs_info = sctx->fs_info;
1786         struct btrfs_raid_bio *rbio;
1787         struct btrfs_io_context *bioc = NULL;
1788         struct bio *bio;
1789         struct scrub_stripe *stripe;
1790         bool all_empty = true;
1791         const int data_stripes = nr_data_stripes(map);
1792         unsigned long extent_bitmap = 0;
1793         u64 length = data_stripes << BTRFS_STRIPE_LEN_SHIFT;
1794         int ret;
1795
1796         ASSERT(sctx->raid56_data_stripes);
1797
1798         for (int i = 0; i < data_stripes; i++) {
1799                 int stripe_index;
1800                 int rot;
1801                 u64 physical;
1802
1803                 stripe = &sctx->raid56_data_stripes[i];
1804                 rot = div_u64(full_stripe_start - bg->start,
1805                               data_stripes) >> BTRFS_STRIPE_LEN_SHIFT;
1806                 stripe_index = (i + rot) % map->num_stripes;
1807                 physical = map->stripes[stripe_index].physical +
1808                            (rot << BTRFS_STRIPE_LEN_SHIFT);
1809
1810                 scrub_reset_stripe(stripe);
1811                 set_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state);
1812                 ret = scrub_find_fill_first_stripe(bg,
1813                                 map->stripes[stripe_index].dev, physical, 1,
1814                                 full_stripe_start + (i << BTRFS_STRIPE_LEN_SHIFT),
1815                                 BTRFS_STRIPE_LEN, stripe);
1816                 if (ret < 0)
1817                         goto out;
1818                 /*
1819                  * No extent in this data stripe, need to manually mark them
1820                  * initialized to make later read submission happy.
1821                  */
1822                 if (ret > 0) {
1823                         stripe->logical = full_stripe_start +
1824                                           (i << BTRFS_STRIPE_LEN_SHIFT);
1825                         stripe->dev = map->stripes[stripe_index].dev;
1826                         stripe->mirror_num = 1;
1827                         set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state);
1828                 }
1829         }
1830
1831         /* Check if all data stripes are empty. */
1832         for (int i = 0; i < data_stripes; i++) {
1833                 stripe = &sctx->raid56_data_stripes[i];
1834                 if (!bitmap_empty(&stripe->extent_sector_bitmap, stripe->nr_sectors)) {
1835                         all_empty = false;
1836                         break;
1837                 }
1838         }
1839         if (all_empty) {
1840                 ret = 0;
1841                 goto out;
1842         }
1843
1844         for (int i = 0; i < data_stripes; i++) {
1845                 stripe = &sctx->raid56_data_stripes[i];
1846                 scrub_submit_initial_read(sctx, stripe);
1847         }
1848         for (int i = 0; i < data_stripes; i++) {
1849                 stripe = &sctx->raid56_data_stripes[i];
1850
1851                 wait_event(stripe->repair_wait,
1852                            test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state));
1853         }
1854         /* For now, no zoned support for RAID56. */
1855         ASSERT(!btrfs_is_zoned(sctx->fs_info));
1856
1857         /* Writeback for the repaired sectors. */
1858         for (int i = 0; i < data_stripes; i++) {
1859                 unsigned long repaired;
1860
1861                 stripe = &sctx->raid56_data_stripes[i];
1862
1863                 bitmap_andnot(&repaired, &stripe->init_error_bitmap,
1864                               &stripe->error_bitmap, stripe->nr_sectors);
1865                 scrub_write_sectors(sctx, stripe, repaired, false);
1866         }
1867
1868         /* Wait for the above writebacks to finish. */
1869         for (int i = 0; i < data_stripes; i++) {
1870                 stripe = &sctx->raid56_data_stripes[i];
1871
1872                 wait_scrub_stripe_io(stripe);
1873         }
1874
1875         /*
1876          * Now all data stripes are properly verified. Check if we have any
1877          * unrepaired, if so abort immediately or we could further corrupt the
1878          * P/Q stripes.
1879          *
1880          * During the loop, also populate extent_bitmap.
1881          */
1882         for (int i = 0; i < data_stripes; i++) {
1883                 unsigned long error;
1884
1885                 stripe = &sctx->raid56_data_stripes[i];
1886
1887                 /*
1888                  * We should only check the errors where there is an extent.
1889                  * As we may hit an empty data stripe while it's missing.
1890                  */
1891                 bitmap_and(&error, &stripe->error_bitmap,
1892                            &stripe->extent_sector_bitmap, stripe->nr_sectors);
1893                 if (!bitmap_empty(&error, stripe->nr_sectors)) {
1894                         btrfs_err(fs_info,
1895 "unrepaired sectors detected, full stripe %llu data stripe %u errors %*pbl",
1896                                   full_stripe_start, i, stripe->nr_sectors,
1897                                   &error);
1898                         ret = -EIO;
1899                         goto out;
1900                 }
1901                 bitmap_or(&extent_bitmap, &extent_bitmap,
1902                           &stripe->extent_sector_bitmap, stripe->nr_sectors);
1903         }
1904
1905         /* Now we can check and regenerate the P/Q stripe. */
1906         bio = bio_alloc(NULL, 1, REQ_OP_READ, GFP_NOFS);
1907         bio->bi_iter.bi_sector = full_stripe_start >> SECTOR_SHIFT;
1908         bio->bi_private = &io_done;
1909         bio->bi_end_io = raid56_scrub_wait_endio;
1910
1911         btrfs_bio_counter_inc_blocked(fs_info);
1912         ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, full_stripe_start,
1913                               &length, &bioc, NULL, NULL, 1);
1914         if (ret < 0) {
1915                 btrfs_put_bioc(bioc);
1916                 btrfs_bio_counter_dec(fs_info);
1917                 goto out;
1918         }
1919         rbio = raid56_parity_alloc_scrub_rbio(bio, bioc, scrub_dev, &extent_bitmap,
1920                                 BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
1921         btrfs_put_bioc(bioc);
1922         if (!rbio) {
1923                 ret = -ENOMEM;
1924                 btrfs_bio_counter_dec(fs_info);
1925                 goto out;
1926         }
1927         /* Use the recovered stripes as cache to avoid read them from disk again. */
1928         for (int i = 0; i < data_stripes; i++) {
1929                 stripe = &sctx->raid56_data_stripes[i];
1930
1931                 raid56_parity_cache_data_pages(rbio, stripe->pages,
1932                                 full_stripe_start + (i << BTRFS_STRIPE_LEN_SHIFT));
1933         }
1934         raid56_parity_submit_scrub_rbio(rbio);
1935         wait_for_completion_io(&io_done);
1936         ret = blk_status_to_errno(bio->bi_status);
1937         bio_put(bio);
1938         btrfs_bio_counter_dec(fs_info);
1939
1940 out:
1941         return ret;
1942 }
1943
1944 /*
1945  * Scrub one range which can only has simple mirror based profile.
1946  * (Including all range in SINGLE/DUP/RAID1/RAID1C*, and each stripe in
1947  *  RAID0/RAID10).
1948  *
1949  * Since we may need to handle a subset of block group, we need @logical_start
1950  * and @logical_length parameter.
1951  */
1952 static int scrub_simple_mirror(struct scrub_ctx *sctx,
1953                                struct btrfs_block_group *bg,
1954                                struct map_lookup *map,
1955                                u64 logical_start, u64 logical_length,
1956                                struct btrfs_device *device,
1957                                u64 physical, int mirror_num)
1958 {
1959         struct btrfs_fs_info *fs_info = sctx->fs_info;
1960         const u64 logical_end = logical_start + logical_length;
1961         /* An artificial limit, inherit from old scrub behavior */
1962         struct btrfs_path path = { 0 };
1963         u64 cur_logical = logical_start;
1964         int ret;
1965
1966         /* The range must be inside the bg */
1967         ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
1968
1969         path.search_commit_root = 1;
1970         path.skip_locking = 1;
1971         /* Go through each extent items inside the logical range */
1972         while (cur_logical < logical_end) {
1973                 u64 cur_physical = physical + cur_logical - logical_start;
1974
1975                 /* Canceled? */
1976                 if (atomic_read(&fs_info->scrub_cancel_req) ||
1977                     atomic_read(&sctx->cancel_req)) {
1978                         ret = -ECANCELED;
1979                         break;
1980                 }
1981                 /* Paused? */
1982                 if (atomic_read(&fs_info->scrub_pause_req)) {
1983                         /* Push queued extents */
1984                         scrub_blocked_if_needed(fs_info);
1985                 }
1986                 /* Block group removed? */
1987                 spin_lock(&bg->lock);
1988                 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) {
1989                         spin_unlock(&bg->lock);
1990                         ret = 0;
1991                         break;
1992                 }
1993                 spin_unlock(&bg->lock);
1994
1995                 ret = queue_scrub_stripe(sctx, bg, device, mirror_num,
1996                                          cur_logical, logical_end - cur_logical,
1997                                          cur_physical);
1998                 if (ret > 0) {
1999                         /* No more extent, just update the accounting */
2000                         sctx->stat.last_physical = physical + logical_length;
2001                         ret = 0;
2002                         break;
2003                 }
2004                 if (ret < 0)
2005                         break;
2006
2007                 ASSERT(sctx->cur_stripe > 0);
2008                 cur_logical = sctx->stripes[sctx->cur_stripe - 1].logical
2009                               + BTRFS_STRIPE_LEN;
2010
2011                 /* Don't hold CPU for too long time */
2012                 cond_resched();
2013         }
2014         btrfs_release_path(&path);
2015         return ret;
2016 }
2017
2018 /* Calculate the full stripe length for simple stripe based profiles */
2019 static u64 simple_stripe_full_stripe_len(const struct map_lookup *map)
2020 {
2021         ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2022                             BTRFS_BLOCK_GROUP_RAID10));
2023
2024         return (map->num_stripes / map->sub_stripes) << BTRFS_STRIPE_LEN_SHIFT;
2025 }
2026
2027 /* Get the logical bytenr for the stripe */
2028 static u64 simple_stripe_get_logical(struct map_lookup *map,
2029                                      struct btrfs_block_group *bg,
2030                                      int stripe_index)
2031 {
2032         ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2033                             BTRFS_BLOCK_GROUP_RAID10));
2034         ASSERT(stripe_index < map->num_stripes);
2035
2036         /*
2037          * (stripe_index / sub_stripes) gives how many data stripes we need to
2038          * skip.
2039          */
2040         return ((stripe_index / map->sub_stripes) << BTRFS_STRIPE_LEN_SHIFT) +
2041                bg->start;
2042 }
2043
2044 /* Get the mirror number for the stripe */
2045 static int simple_stripe_mirror_num(struct map_lookup *map, int stripe_index)
2046 {
2047         ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2048                             BTRFS_BLOCK_GROUP_RAID10));
2049         ASSERT(stripe_index < map->num_stripes);
2050
2051         /* For RAID0, it's fixed to 1, for RAID10 it's 0,1,0,1... */
2052         return stripe_index % map->sub_stripes + 1;
2053 }
2054
2055 static int scrub_simple_stripe(struct scrub_ctx *sctx,
2056                                struct btrfs_block_group *bg,
2057                                struct map_lookup *map,
2058                                struct btrfs_device *device,
2059                                int stripe_index)
2060 {
2061         const u64 logical_increment = simple_stripe_full_stripe_len(map);
2062         const u64 orig_logical = simple_stripe_get_logical(map, bg, stripe_index);
2063         const u64 orig_physical = map->stripes[stripe_index].physical;
2064         const int mirror_num = simple_stripe_mirror_num(map, stripe_index);
2065         u64 cur_logical = orig_logical;
2066         u64 cur_physical = orig_physical;
2067         int ret = 0;
2068
2069         while (cur_logical < bg->start + bg->length) {
2070                 /*
2071                  * Inside each stripe, RAID0 is just SINGLE, and RAID10 is
2072                  * just RAID1, so we can reuse scrub_simple_mirror() to scrub
2073                  * this stripe.
2074                  */
2075                 ret = scrub_simple_mirror(sctx, bg, map, cur_logical,
2076                                           BTRFS_STRIPE_LEN, device, cur_physical,
2077                                           mirror_num);
2078                 if (ret)
2079                         return ret;
2080                 /* Skip to next stripe which belongs to the target device */
2081                 cur_logical += logical_increment;
2082                 /* For physical offset, we just go to next stripe */
2083                 cur_physical += BTRFS_STRIPE_LEN;
2084         }
2085         return ret;
2086 }
2087
2088 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2089                                            struct btrfs_block_group *bg,
2090                                            struct extent_map *em,
2091                                            struct btrfs_device *scrub_dev,
2092                                            int stripe_index)
2093 {
2094         struct btrfs_fs_info *fs_info = sctx->fs_info;
2095         struct map_lookup *map = em->map_lookup;
2096         const u64 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
2097         const u64 chunk_logical = bg->start;
2098         int ret;
2099         int ret2;
2100         u64 physical = map->stripes[stripe_index].physical;
2101         const u64 dev_stripe_len = btrfs_calc_stripe_length(em);
2102         const u64 physical_end = physical + dev_stripe_len;
2103         u64 logical;
2104         u64 logic_end;
2105         /* The logical increment after finishing one stripe */
2106         u64 increment;
2107         /* Offset inside the chunk */
2108         u64 offset;
2109         u64 stripe_logical;
2110         int stop_loop = 0;
2111
2112         scrub_blocked_if_needed(fs_info);
2113
2114         if (sctx->is_dev_replace &&
2115             btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) {
2116                 mutex_lock(&sctx->wr_lock);
2117                 sctx->write_pointer = physical;
2118                 mutex_unlock(&sctx->wr_lock);
2119         }
2120
2121         /* Prepare the extra data stripes used by RAID56. */
2122         if (profile & BTRFS_BLOCK_GROUP_RAID56_MASK) {
2123                 ASSERT(sctx->raid56_data_stripes == NULL);
2124
2125                 sctx->raid56_data_stripes = kcalloc(nr_data_stripes(map),
2126                                                     sizeof(struct scrub_stripe),
2127                                                     GFP_KERNEL);
2128                 if (!sctx->raid56_data_stripes) {
2129                         ret = -ENOMEM;
2130                         goto out;
2131                 }
2132                 for (int i = 0; i < nr_data_stripes(map); i++) {
2133                         ret = init_scrub_stripe(fs_info,
2134                                                 &sctx->raid56_data_stripes[i]);
2135                         if (ret < 0)
2136                                 goto out;
2137                         sctx->raid56_data_stripes[i].bg = bg;
2138                         sctx->raid56_data_stripes[i].sctx = sctx;
2139                 }
2140         }
2141         /*
2142          * There used to be a big double loop to handle all profiles using the
2143          * same routine, which grows larger and more gross over time.
2144          *
2145          * So here we handle each profile differently, so simpler profiles
2146          * have simpler scrubbing function.
2147          */
2148         if (!(profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10 |
2149                          BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2150                 /*
2151                  * Above check rules out all complex profile, the remaining
2152                  * profiles are SINGLE|DUP|RAID1|RAID1C*, which is simple
2153                  * mirrored duplication without stripe.
2154                  *
2155                  * Only @physical and @mirror_num needs to calculated using
2156                  * @stripe_index.
2157                  */
2158                 ret = scrub_simple_mirror(sctx, bg, map, bg->start, bg->length,
2159                                 scrub_dev, map->stripes[stripe_index].physical,
2160                                 stripe_index + 1);
2161                 offset = 0;
2162                 goto out;
2163         }
2164         if (profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
2165                 ret = scrub_simple_stripe(sctx, bg, map, scrub_dev, stripe_index);
2166                 offset = (stripe_index / map->sub_stripes) << BTRFS_STRIPE_LEN_SHIFT;
2167                 goto out;
2168         }
2169
2170         /* Only RAID56 goes through the old code */
2171         ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK);
2172         ret = 0;
2173
2174         /* Calculate the logical end of the stripe */
2175         get_raid56_logic_offset(physical_end, stripe_index,
2176                                 map, &logic_end, NULL);
2177         logic_end += chunk_logical;
2178
2179         /* Initialize @offset in case we need to go to out: label */
2180         get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL);
2181         increment = nr_data_stripes(map) << BTRFS_STRIPE_LEN_SHIFT;
2182
2183         /*
2184          * Due to the rotation, for RAID56 it's better to iterate each stripe
2185          * using their physical offset.
2186          */
2187         while (physical < physical_end) {
2188                 ret = get_raid56_logic_offset(physical, stripe_index, map,
2189                                               &logical, &stripe_logical);
2190                 logical += chunk_logical;
2191                 if (ret) {
2192                         /* it is parity strip */
2193                         stripe_logical += chunk_logical;
2194                         ret = scrub_raid56_parity_stripe(sctx, scrub_dev, bg,
2195                                                          map, stripe_logical);
2196                         if (ret)
2197                                 goto out;
2198                         goto next;
2199                 }
2200
2201                 /*
2202                  * Now we're at a data stripe, scrub each extents in the range.
2203                  *
2204                  * At this stage, if we ignore the repair part, inside each data
2205                  * stripe it is no different than SINGLE profile.
2206                  * We can reuse scrub_simple_mirror() here, as the repair part
2207                  * is still based on @mirror_num.
2208                  */
2209                 ret = scrub_simple_mirror(sctx, bg, map, logical, BTRFS_STRIPE_LEN,
2210                                           scrub_dev, physical, 1);
2211                 if (ret < 0)
2212                         goto out;
2213 next:
2214                 logical += increment;
2215                 physical += BTRFS_STRIPE_LEN;
2216                 spin_lock(&sctx->stat_lock);
2217                 if (stop_loop)
2218                         sctx->stat.last_physical =
2219                                 map->stripes[stripe_index].physical + dev_stripe_len;
2220                 else
2221                         sctx->stat.last_physical = physical;
2222                 spin_unlock(&sctx->stat_lock);
2223                 if (stop_loop)
2224                         break;
2225         }
2226 out:
2227         ret2 = flush_scrub_stripes(sctx);
2228         if (!ret)
2229                 ret = ret2;
2230         if (sctx->raid56_data_stripes) {
2231                 for (int i = 0; i < nr_data_stripes(map); i++)
2232                         release_scrub_stripe(&sctx->raid56_data_stripes[i]);
2233                 kfree(sctx->raid56_data_stripes);
2234                 sctx->raid56_data_stripes = NULL;
2235         }
2236
2237         if (sctx->is_dev_replace && ret >= 0) {
2238                 int ret2;
2239
2240                 ret2 = sync_write_pointer_for_zoned(sctx,
2241                                 chunk_logical + offset,
2242                                 map->stripes[stripe_index].physical,
2243                                 physical_end);
2244                 if (ret2)
2245                         ret = ret2;
2246         }
2247
2248         return ret < 0 ? ret : 0;
2249 }
2250
2251 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
2252                                           struct btrfs_block_group *bg,
2253                                           struct btrfs_device *scrub_dev,
2254                                           u64 dev_offset,
2255                                           u64 dev_extent_len)
2256 {
2257         struct btrfs_fs_info *fs_info = sctx->fs_info;
2258         struct extent_map_tree *map_tree = &fs_info->mapping_tree;
2259         struct map_lookup *map;
2260         struct extent_map *em;
2261         int i;
2262         int ret = 0;
2263
2264         read_lock(&map_tree->lock);
2265         em = lookup_extent_mapping(map_tree, bg->start, bg->length);
2266         read_unlock(&map_tree->lock);
2267
2268         if (!em) {
2269                 /*
2270                  * Might have been an unused block group deleted by the cleaner
2271                  * kthread or relocation.
2272                  */
2273                 spin_lock(&bg->lock);
2274                 if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags))
2275                         ret = -EINVAL;
2276                 spin_unlock(&bg->lock);
2277
2278                 return ret;
2279         }
2280         if (em->start != bg->start)
2281                 goto out;
2282         if (em->len < dev_extent_len)
2283                 goto out;
2284
2285         map = em->map_lookup;
2286         for (i = 0; i < map->num_stripes; ++i) {
2287                 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
2288                     map->stripes[i].physical == dev_offset) {
2289                         ret = scrub_stripe(sctx, bg, em, scrub_dev, i);
2290                         if (ret)
2291                                 goto out;
2292                 }
2293         }
2294 out:
2295         free_extent_map(em);
2296
2297         return ret;
2298 }
2299
2300 static int finish_extent_writes_for_zoned(struct btrfs_root *root,
2301                                           struct btrfs_block_group *cache)
2302 {
2303         struct btrfs_fs_info *fs_info = cache->fs_info;
2304         struct btrfs_trans_handle *trans;
2305
2306         if (!btrfs_is_zoned(fs_info))
2307                 return 0;
2308
2309         btrfs_wait_block_group_reservations(cache);
2310         btrfs_wait_nocow_writers(cache);
2311         btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, cache->length);
2312
2313         trans = btrfs_join_transaction(root);
2314         if (IS_ERR(trans))
2315                 return PTR_ERR(trans);
2316         return btrfs_commit_transaction(trans);
2317 }
2318
2319 static noinline_for_stack
2320 int scrub_enumerate_chunks(struct scrub_ctx *sctx,
2321                            struct btrfs_device *scrub_dev, u64 start, u64 end)
2322 {
2323         struct btrfs_dev_extent *dev_extent = NULL;
2324         struct btrfs_path *path;
2325         struct btrfs_fs_info *fs_info = sctx->fs_info;
2326         struct btrfs_root *root = fs_info->dev_root;
2327         u64 chunk_offset;
2328         int ret = 0;
2329         int ro_set;
2330         int slot;
2331         struct extent_buffer *l;
2332         struct btrfs_key key;
2333         struct btrfs_key found_key;
2334         struct btrfs_block_group *cache;
2335         struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
2336
2337         path = btrfs_alloc_path();
2338         if (!path)
2339                 return -ENOMEM;
2340
2341         path->reada = READA_FORWARD;
2342         path->search_commit_root = 1;
2343         path->skip_locking = 1;
2344
2345         key.objectid = scrub_dev->devid;
2346         key.offset = 0ull;
2347         key.type = BTRFS_DEV_EXTENT_KEY;
2348
2349         while (1) {
2350                 u64 dev_extent_len;
2351
2352                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2353                 if (ret < 0)
2354                         break;
2355                 if (ret > 0) {
2356                         if (path->slots[0] >=
2357                             btrfs_header_nritems(path->nodes[0])) {
2358                                 ret = btrfs_next_leaf(root, path);
2359                                 if (ret < 0)
2360                                         break;
2361                                 if (ret > 0) {
2362                                         ret = 0;
2363                                         break;
2364                                 }
2365                         } else {
2366                                 ret = 0;
2367                         }
2368                 }
2369
2370                 l = path->nodes[0];
2371                 slot = path->slots[0];
2372
2373                 btrfs_item_key_to_cpu(l, &found_key, slot);
2374
2375                 if (found_key.objectid != scrub_dev->devid)
2376                         break;
2377
2378                 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
2379                         break;
2380
2381                 if (found_key.offset >= end)
2382                         break;
2383
2384                 if (found_key.offset < key.offset)
2385                         break;
2386
2387                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2388                 dev_extent_len = btrfs_dev_extent_length(l, dev_extent);
2389
2390                 if (found_key.offset + dev_extent_len <= start)
2391                         goto skip;
2392
2393                 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2394
2395                 /*
2396                  * get a reference on the corresponding block group to prevent
2397                  * the chunk from going away while we scrub it
2398                  */
2399                 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2400
2401                 /* some chunks are removed but not committed to disk yet,
2402                  * continue scrubbing */
2403                 if (!cache)
2404                         goto skip;
2405
2406                 ASSERT(cache->start <= chunk_offset);
2407                 /*
2408                  * We are using the commit root to search for device extents, so
2409                  * that means we could have found a device extent item from a
2410                  * block group that was deleted in the current transaction. The
2411                  * logical start offset of the deleted block group, stored at
2412                  * @chunk_offset, might be part of the logical address range of
2413                  * a new block group (which uses different physical extents).
2414                  * In this case btrfs_lookup_block_group() has returned the new
2415                  * block group, and its start address is less than @chunk_offset.
2416                  *
2417                  * We skip such new block groups, because it's pointless to
2418                  * process them, as we won't find their extents because we search
2419                  * for them using the commit root of the extent tree. For a device
2420                  * replace it's also fine to skip it, we won't miss copying them
2421                  * to the target device because we have the write duplication
2422                  * setup through the regular write path (by btrfs_map_block()),
2423                  * and we have committed a transaction when we started the device
2424                  * replace, right after setting up the device replace state.
2425                  */
2426                 if (cache->start < chunk_offset) {
2427                         btrfs_put_block_group(cache);
2428                         goto skip;
2429                 }
2430
2431                 if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) {
2432                         if (!test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags)) {
2433                                 btrfs_put_block_group(cache);
2434                                 goto skip;
2435                         }
2436                 }
2437
2438                 /*
2439                  * Make sure that while we are scrubbing the corresponding block
2440                  * group doesn't get its logical address and its device extents
2441                  * reused for another block group, which can possibly be of a
2442                  * different type and different profile. We do this to prevent
2443                  * false error detections and crashes due to bogus attempts to
2444                  * repair extents.
2445                  */
2446                 spin_lock(&cache->lock);
2447                 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) {
2448                         spin_unlock(&cache->lock);
2449                         btrfs_put_block_group(cache);
2450                         goto skip;
2451                 }
2452                 btrfs_freeze_block_group(cache);
2453                 spin_unlock(&cache->lock);
2454
2455                 /*
2456                  * we need call btrfs_inc_block_group_ro() with scrubs_paused,
2457                  * to avoid deadlock caused by:
2458                  * btrfs_inc_block_group_ro()
2459                  * -> btrfs_wait_for_commit()
2460                  * -> btrfs_commit_transaction()
2461                  * -> btrfs_scrub_pause()
2462                  */
2463                 scrub_pause_on(fs_info);
2464
2465                 /*
2466                  * Don't do chunk preallocation for scrub.
2467                  *
2468                  * This is especially important for SYSTEM bgs, or we can hit
2469                  * -EFBIG from btrfs_finish_chunk_alloc() like:
2470                  * 1. The only SYSTEM bg is marked RO.
2471                  *    Since SYSTEM bg is small, that's pretty common.
2472                  * 2. New SYSTEM bg will be allocated
2473                  *    Due to regular version will allocate new chunk.
2474                  * 3. New SYSTEM bg is empty and will get cleaned up
2475                  *    Before cleanup really happens, it's marked RO again.
2476                  * 4. Empty SYSTEM bg get scrubbed
2477                  *    We go back to 2.
2478                  *
2479                  * This can easily boost the amount of SYSTEM chunks if cleaner
2480                  * thread can't be triggered fast enough, and use up all space
2481                  * of btrfs_super_block::sys_chunk_array
2482                  *
2483                  * While for dev replace, we need to try our best to mark block
2484                  * group RO, to prevent race between:
2485                  * - Write duplication
2486                  *   Contains latest data
2487                  * - Scrub copy
2488                  *   Contains data from commit tree
2489                  *
2490                  * If target block group is not marked RO, nocow writes can
2491                  * be overwritten by scrub copy, causing data corruption.
2492                  * So for dev-replace, it's not allowed to continue if a block
2493                  * group is not RO.
2494                  */
2495                 ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace);
2496                 if (!ret && sctx->is_dev_replace) {
2497                         ret = finish_extent_writes_for_zoned(root, cache);
2498                         if (ret) {
2499                                 btrfs_dec_block_group_ro(cache);
2500                                 scrub_pause_off(fs_info);
2501                                 btrfs_put_block_group(cache);
2502                                 break;
2503                         }
2504                 }
2505
2506                 if (ret == 0) {
2507                         ro_set = 1;
2508                 } else if (ret == -ENOSPC && !sctx->is_dev_replace &&
2509                            !(cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) {
2510                         /*
2511                          * btrfs_inc_block_group_ro return -ENOSPC when it
2512                          * failed in creating new chunk for metadata.
2513                          * It is not a problem for scrub, because
2514                          * metadata are always cowed, and our scrub paused
2515                          * commit_transactions.
2516                          *
2517                          * For RAID56 chunks, we have to mark them read-only
2518                          * for scrub, as later we would use our own cache
2519                          * out of RAID56 realm.
2520                          * Thus we want the RAID56 bg to be marked RO to
2521                          * prevent RMW from screwing up out cache.
2522                          */
2523                         ro_set = 0;
2524                 } else if (ret == -ETXTBSY) {
2525                         btrfs_warn(fs_info,
2526                    "skipping scrub of block group %llu due to active swapfile",
2527                                    cache->start);
2528                         scrub_pause_off(fs_info);
2529                         ret = 0;
2530                         goto skip_unfreeze;
2531                 } else {
2532                         btrfs_warn(fs_info,
2533                                    "failed setting block group ro: %d", ret);
2534                         btrfs_unfreeze_block_group(cache);
2535                         btrfs_put_block_group(cache);
2536                         scrub_pause_off(fs_info);
2537                         break;
2538                 }
2539
2540                 /*
2541                  * Now the target block is marked RO, wait for nocow writes to
2542                  * finish before dev-replace.
2543                  * COW is fine, as COW never overwrites extents in commit tree.
2544                  */
2545                 if (sctx->is_dev_replace) {
2546                         btrfs_wait_nocow_writers(cache);
2547                         btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start,
2548                                         cache->length);
2549                 }
2550
2551                 scrub_pause_off(fs_info);
2552                 down_write(&dev_replace->rwsem);
2553                 dev_replace->cursor_right = found_key.offset + dev_extent_len;
2554                 dev_replace->cursor_left = found_key.offset;
2555                 dev_replace->item_needs_writeback = 1;
2556                 up_write(&dev_replace->rwsem);
2557
2558                 ret = scrub_chunk(sctx, cache, scrub_dev, found_key.offset,
2559                                   dev_extent_len);
2560                 if (sctx->is_dev_replace &&
2561                     !btrfs_finish_block_group_to_copy(dev_replace->srcdev,
2562                                                       cache, found_key.offset))
2563                         ro_set = 0;
2564
2565                 down_write(&dev_replace->rwsem);
2566                 dev_replace->cursor_left = dev_replace->cursor_right;
2567                 dev_replace->item_needs_writeback = 1;
2568                 up_write(&dev_replace->rwsem);
2569
2570                 if (ro_set)
2571                         btrfs_dec_block_group_ro(cache);
2572
2573                 /*
2574                  * We might have prevented the cleaner kthread from deleting
2575                  * this block group if it was already unused because we raced
2576                  * and set it to RO mode first. So add it back to the unused
2577                  * list, otherwise it might not ever be deleted unless a manual
2578                  * balance is triggered or it becomes used and unused again.
2579                  */
2580                 spin_lock(&cache->lock);
2581                 if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags) &&
2582                     !cache->ro && cache->reserved == 0 && cache->used == 0) {
2583                         spin_unlock(&cache->lock);
2584                         if (btrfs_test_opt(fs_info, DISCARD_ASYNC))
2585                                 btrfs_discard_queue_work(&fs_info->discard_ctl,
2586                                                          cache);
2587                         else
2588                                 btrfs_mark_bg_unused(cache);
2589                 } else {
2590                         spin_unlock(&cache->lock);
2591                 }
2592 skip_unfreeze:
2593                 btrfs_unfreeze_block_group(cache);
2594                 btrfs_put_block_group(cache);
2595                 if (ret)
2596                         break;
2597                 if (sctx->is_dev_replace &&
2598                     atomic64_read(&dev_replace->num_write_errors) > 0) {
2599                         ret = -EIO;
2600                         break;
2601                 }
2602                 if (sctx->stat.malloc_errors > 0) {
2603                         ret = -ENOMEM;
2604                         break;
2605                 }
2606 skip:
2607                 key.offset = found_key.offset + dev_extent_len;
2608                 btrfs_release_path(path);
2609         }
2610
2611         btrfs_free_path(path);
2612
2613         return ret;
2614 }
2615
2616 static int scrub_one_super(struct scrub_ctx *sctx, struct btrfs_device *dev,
2617                            struct page *page, u64 physical, u64 generation)
2618 {
2619         struct btrfs_fs_info *fs_info = sctx->fs_info;
2620         struct bio_vec bvec;
2621         struct bio bio;
2622         struct btrfs_super_block *sb = page_address(page);
2623         int ret;
2624
2625         bio_init(&bio, dev->bdev, &bvec, 1, REQ_OP_READ);
2626         bio.bi_iter.bi_sector = physical >> SECTOR_SHIFT;
2627         __bio_add_page(&bio, page, BTRFS_SUPER_INFO_SIZE, 0);
2628         ret = submit_bio_wait(&bio);
2629         bio_uninit(&bio);
2630
2631         if (ret < 0)
2632                 return ret;
2633         ret = btrfs_check_super_csum(fs_info, sb);
2634         if (ret != 0) {
2635                 btrfs_err_rl(fs_info,
2636                         "super block at physical %llu devid %llu has bad csum",
2637                         physical, dev->devid);
2638                 return -EIO;
2639         }
2640         if (btrfs_super_generation(sb) != generation) {
2641                 btrfs_err_rl(fs_info,
2642 "super block at physical %llu devid %llu has bad generation %llu expect %llu",
2643                              physical, dev->devid,
2644                              btrfs_super_generation(sb), generation);
2645                 return -EUCLEAN;
2646         }
2647
2648         return btrfs_validate_super(fs_info, sb, -1);
2649 }
2650
2651 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
2652                                            struct btrfs_device *scrub_dev)
2653 {
2654         int     i;
2655         u64     bytenr;
2656         u64     gen;
2657         int ret = 0;
2658         struct page *page;
2659         struct btrfs_fs_info *fs_info = sctx->fs_info;
2660
2661         if (BTRFS_FS_ERROR(fs_info))
2662                 return -EROFS;
2663
2664         page = alloc_page(GFP_KERNEL);
2665         if (!page) {
2666                 spin_lock(&sctx->stat_lock);
2667                 sctx->stat.malloc_errors++;
2668                 spin_unlock(&sctx->stat_lock);
2669                 return -ENOMEM;
2670         }
2671
2672         /* Seed devices of a new filesystem has their own generation. */
2673         if (scrub_dev->fs_devices != fs_info->fs_devices)
2674                 gen = scrub_dev->generation;
2675         else
2676                 gen = fs_info->last_trans_committed;
2677
2678         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
2679                 bytenr = btrfs_sb_offset(i);
2680                 if (bytenr + BTRFS_SUPER_INFO_SIZE >
2681                     scrub_dev->commit_total_bytes)
2682                         break;
2683                 if (!btrfs_check_super_location(scrub_dev, bytenr))
2684                         continue;
2685
2686                 ret = scrub_one_super(sctx, scrub_dev, page, bytenr, gen);
2687                 if (ret) {
2688                         spin_lock(&sctx->stat_lock);
2689                         sctx->stat.super_errors++;
2690                         spin_unlock(&sctx->stat_lock);
2691                 }
2692         }
2693         __free_page(page);
2694         return 0;
2695 }
2696
2697 static void scrub_workers_put(struct btrfs_fs_info *fs_info)
2698 {
2699         if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt,
2700                                         &fs_info->scrub_lock)) {
2701                 struct workqueue_struct *scrub_workers = fs_info->scrub_workers;
2702
2703                 fs_info->scrub_workers = NULL;
2704                 mutex_unlock(&fs_info->scrub_lock);
2705
2706                 if (scrub_workers)
2707                         destroy_workqueue(scrub_workers);
2708         }
2709 }
2710
2711 /*
2712  * get a reference count on fs_info->scrub_workers. start worker if necessary
2713  */
2714 static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
2715                                                 int is_dev_replace)
2716 {
2717         struct workqueue_struct *scrub_workers = NULL;
2718         unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
2719         int max_active = fs_info->thread_pool_size;
2720         int ret = -ENOMEM;
2721
2722         if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt))
2723                 return 0;
2724
2725         if (is_dev_replace)
2726                 scrub_workers = alloc_ordered_workqueue("btrfs-scrub", flags);
2727         else
2728                 scrub_workers = alloc_workqueue("btrfs-scrub", flags, max_active);
2729         if (!scrub_workers)
2730                 return -ENOMEM;
2731
2732         mutex_lock(&fs_info->scrub_lock);
2733         if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
2734                 ASSERT(fs_info->scrub_workers == NULL);
2735                 fs_info->scrub_workers = scrub_workers;
2736                 refcount_set(&fs_info->scrub_workers_refcnt, 1);
2737                 mutex_unlock(&fs_info->scrub_lock);
2738                 return 0;
2739         }
2740         /* Other thread raced in and created the workers for us */
2741         refcount_inc(&fs_info->scrub_workers_refcnt);
2742         mutex_unlock(&fs_info->scrub_lock);
2743
2744         ret = 0;
2745
2746         destroy_workqueue(scrub_workers);
2747         return ret;
2748 }
2749
2750 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
2751                     u64 end, struct btrfs_scrub_progress *progress,
2752                     int readonly, int is_dev_replace)
2753 {
2754         struct btrfs_dev_lookup_args args = { .devid = devid };
2755         struct scrub_ctx *sctx;
2756         int ret;
2757         struct btrfs_device *dev;
2758         unsigned int nofs_flag;
2759         bool need_commit = false;
2760
2761         if (btrfs_fs_closing(fs_info))
2762                 return -EAGAIN;
2763
2764         /* At mount time we have ensured nodesize is in the range of [4K, 64K]. */
2765         ASSERT(fs_info->nodesize <= BTRFS_STRIPE_LEN);
2766
2767         /*
2768          * SCRUB_MAX_SECTORS_PER_BLOCK is calculated using the largest possible
2769          * value (max nodesize / min sectorsize), thus nodesize should always
2770          * be fine.
2771          */
2772         ASSERT(fs_info->nodesize <=
2773                SCRUB_MAX_SECTORS_PER_BLOCK << fs_info->sectorsize_bits);
2774
2775         /* Allocate outside of device_list_mutex */
2776         sctx = scrub_setup_ctx(fs_info, is_dev_replace);
2777         if (IS_ERR(sctx))
2778                 return PTR_ERR(sctx);
2779
2780         ret = scrub_workers_get(fs_info, is_dev_replace);
2781         if (ret)
2782                 goto out_free_ctx;
2783
2784         mutex_lock(&fs_info->fs_devices->device_list_mutex);
2785         dev = btrfs_find_device(fs_info->fs_devices, &args);
2786         if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
2787                      !is_dev_replace)) {
2788                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2789                 ret = -ENODEV;
2790                 goto out;
2791         }
2792
2793         if (!is_dev_replace && !readonly &&
2794             !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
2795                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2796                 btrfs_err_in_rcu(fs_info,
2797                         "scrub on devid %llu: filesystem on %s is not writable",
2798                                  devid, btrfs_dev_name(dev));
2799                 ret = -EROFS;
2800                 goto out;
2801         }
2802
2803         mutex_lock(&fs_info->scrub_lock);
2804         if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
2805             test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
2806                 mutex_unlock(&fs_info->scrub_lock);
2807                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2808                 ret = -EIO;
2809                 goto out;
2810         }
2811
2812         down_read(&fs_info->dev_replace.rwsem);
2813         if (dev->scrub_ctx ||
2814             (!is_dev_replace &&
2815              btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
2816                 up_read(&fs_info->dev_replace.rwsem);
2817                 mutex_unlock(&fs_info->scrub_lock);
2818                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2819                 ret = -EINPROGRESS;
2820                 goto out;
2821         }
2822         up_read(&fs_info->dev_replace.rwsem);
2823
2824         sctx->readonly = readonly;
2825         dev->scrub_ctx = sctx;
2826         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2827
2828         /*
2829          * checking @scrub_pause_req here, we can avoid
2830          * race between committing transaction and scrubbing.
2831          */
2832         __scrub_blocked_if_needed(fs_info);
2833         atomic_inc(&fs_info->scrubs_running);
2834         mutex_unlock(&fs_info->scrub_lock);
2835
2836         /*
2837          * In order to avoid deadlock with reclaim when there is a transaction
2838          * trying to pause scrub, make sure we use GFP_NOFS for all the
2839          * allocations done at btrfs_scrub_sectors() and scrub_sectors_for_parity()
2840          * invoked by our callees. The pausing request is done when the
2841          * transaction commit starts, and it blocks the transaction until scrub
2842          * is paused (done at specific points at scrub_stripe() or right above
2843          * before incrementing fs_info->scrubs_running).
2844          */
2845         nofs_flag = memalloc_nofs_save();
2846         if (!is_dev_replace) {
2847                 u64 old_super_errors;
2848
2849                 spin_lock(&sctx->stat_lock);
2850                 old_super_errors = sctx->stat.super_errors;
2851                 spin_unlock(&sctx->stat_lock);
2852
2853                 btrfs_info(fs_info, "scrub: started on devid %llu", devid);
2854                 /*
2855                  * by holding device list mutex, we can
2856                  * kick off writing super in log tree sync.
2857                  */
2858                 mutex_lock(&fs_info->fs_devices->device_list_mutex);
2859                 ret = scrub_supers(sctx, dev);
2860                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2861
2862                 spin_lock(&sctx->stat_lock);
2863                 /*
2864                  * Super block errors found, but we can not commit transaction
2865                  * at current context, since btrfs_commit_transaction() needs
2866                  * to pause the current running scrub (hold by ourselves).
2867                  */
2868                 if (sctx->stat.super_errors > old_super_errors && !sctx->readonly)
2869                         need_commit = true;
2870                 spin_unlock(&sctx->stat_lock);
2871         }
2872
2873         if (!ret)
2874                 ret = scrub_enumerate_chunks(sctx, dev, start, end);
2875         memalloc_nofs_restore(nofs_flag);
2876
2877         atomic_dec(&fs_info->scrubs_running);
2878         wake_up(&fs_info->scrub_pause_wait);
2879
2880         if (progress)
2881                 memcpy(progress, &sctx->stat, sizeof(*progress));
2882
2883         if (!is_dev_replace)
2884                 btrfs_info(fs_info, "scrub: %s on devid %llu with status: %d",
2885                         ret ? "not finished" : "finished", devid, ret);
2886
2887         mutex_lock(&fs_info->scrub_lock);
2888         dev->scrub_ctx = NULL;
2889         mutex_unlock(&fs_info->scrub_lock);
2890
2891         scrub_workers_put(fs_info);
2892         scrub_put_ctx(sctx);
2893
2894         /*
2895          * We found some super block errors before, now try to force a
2896          * transaction commit, as scrub has finished.
2897          */
2898         if (need_commit) {
2899                 struct btrfs_trans_handle *trans;
2900
2901                 trans = btrfs_start_transaction(fs_info->tree_root, 0);
2902                 if (IS_ERR(trans)) {
2903                         ret = PTR_ERR(trans);
2904                         btrfs_err(fs_info,
2905         "scrub: failed to start transaction to fix super block errors: %d", ret);
2906                         return ret;
2907                 }
2908                 ret = btrfs_commit_transaction(trans);
2909                 if (ret < 0)
2910                         btrfs_err(fs_info,
2911         "scrub: failed to commit transaction to fix super block errors: %d", ret);
2912         }
2913         return ret;
2914 out:
2915         scrub_workers_put(fs_info);
2916 out_free_ctx:
2917         scrub_free_ctx(sctx);
2918
2919         return ret;
2920 }
2921
2922 void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
2923 {
2924         mutex_lock(&fs_info->scrub_lock);
2925         atomic_inc(&fs_info->scrub_pause_req);
2926         while (atomic_read(&fs_info->scrubs_paused) !=
2927                atomic_read(&fs_info->scrubs_running)) {
2928                 mutex_unlock(&fs_info->scrub_lock);
2929                 wait_event(fs_info->scrub_pause_wait,
2930                            atomic_read(&fs_info->scrubs_paused) ==
2931                            atomic_read(&fs_info->scrubs_running));
2932                 mutex_lock(&fs_info->scrub_lock);
2933         }
2934         mutex_unlock(&fs_info->scrub_lock);
2935 }
2936
2937 void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
2938 {
2939         atomic_dec(&fs_info->scrub_pause_req);
2940         wake_up(&fs_info->scrub_pause_wait);
2941 }
2942
2943 int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
2944 {
2945         mutex_lock(&fs_info->scrub_lock);
2946         if (!atomic_read(&fs_info->scrubs_running)) {
2947                 mutex_unlock(&fs_info->scrub_lock);
2948                 return -ENOTCONN;
2949         }
2950
2951         atomic_inc(&fs_info->scrub_cancel_req);
2952         while (atomic_read(&fs_info->scrubs_running)) {
2953                 mutex_unlock(&fs_info->scrub_lock);
2954                 wait_event(fs_info->scrub_pause_wait,
2955                            atomic_read(&fs_info->scrubs_running) == 0);
2956                 mutex_lock(&fs_info->scrub_lock);
2957         }
2958         atomic_dec(&fs_info->scrub_cancel_req);
2959         mutex_unlock(&fs_info->scrub_lock);
2960
2961         return 0;
2962 }
2963
2964 int btrfs_scrub_cancel_dev(struct btrfs_device *dev)
2965 {
2966         struct btrfs_fs_info *fs_info = dev->fs_info;
2967         struct scrub_ctx *sctx;
2968
2969         mutex_lock(&fs_info->scrub_lock);
2970         sctx = dev->scrub_ctx;
2971         if (!sctx) {
2972                 mutex_unlock(&fs_info->scrub_lock);
2973                 return -ENOTCONN;
2974         }
2975         atomic_inc(&sctx->cancel_req);
2976         while (dev->scrub_ctx) {
2977                 mutex_unlock(&fs_info->scrub_lock);
2978                 wait_event(fs_info->scrub_pause_wait,
2979                            dev->scrub_ctx == NULL);
2980                 mutex_lock(&fs_info->scrub_lock);
2981         }
2982         mutex_unlock(&fs_info->scrub_lock);
2983
2984         return 0;
2985 }
2986
2987 int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
2988                          struct btrfs_scrub_progress *progress)
2989 {
2990         struct btrfs_dev_lookup_args args = { .devid = devid };
2991         struct btrfs_device *dev;
2992         struct scrub_ctx *sctx = NULL;
2993
2994         mutex_lock(&fs_info->fs_devices->device_list_mutex);
2995         dev = btrfs_find_device(fs_info->fs_devices, &args);
2996         if (dev)
2997                 sctx = dev->scrub_ctx;
2998         if (sctx)
2999                 memcpy(progress, &sctx->stat, sizeof(*progress));
3000         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3001
3002         return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
3003 }