x86/gart: Exclude GART aperture from vmcore
[sfrench/cifs-2.6.git] / fs / btrfs / scrub.c
1 /*
2  * Copyright (C) 2011, 2012 STRATO.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/blkdev.h>
20 #include <linux/ratelimit.h>
21 #include <linux/sched/mm.h>
22 #include "ctree.h"
23 #include "volumes.h"
24 #include "disk-io.h"
25 #include "ordered-data.h"
26 #include "transaction.h"
27 #include "backref.h"
28 #include "extent_io.h"
29 #include "dev-replace.h"
30 #include "check-integrity.h"
31 #include "rcu-string.h"
32 #include "raid56.h"
33
34 /*
35  * This is only the first step towards a full-features scrub. It reads all
36  * extent and super block and verifies the checksums. In case a bad checksum
37  * is found or the extent cannot be read, good data will be written back if
38  * any can be found.
39  *
40  * Future enhancements:
41  *  - In case an unrepairable extent is encountered, track which files are
42  *    affected and report them
43  *  - track and record media errors, throw out bad devices
44  *  - add a mode to also read unallocated space
45  */
46
47 struct scrub_block;
48 struct scrub_ctx;
49
50 /*
51  * the following three values only influence the performance.
52  * The last one configures the number of parallel and outstanding I/O
53  * operations. The first two values configure an upper limit for the number
54  * of (dynamically allocated) pages that are added to a bio.
55  */
56 #define SCRUB_PAGES_PER_RD_BIO  32      /* 128k per bio */
57 #define SCRUB_PAGES_PER_WR_BIO  32      /* 128k per bio */
58 #define SCRUB_BIOS_PER_SCTX     64      /* 8MB per device in flight */
59
60 /*
61  * the following value times PAGE_SIZE needs to be large enough to match the
62  * largest node/leaf/sector size that shall be supported.
63  * Values larger than BTRFS_STRIPE_LEN are not supported.
64  */
65 #define SCRUB_MAX_PAGES_PER_BLOCK       16      /* 64k per node/leaf/sector */
66
67 struct scrub_recover {
68         refcount_t              refs;
69         struct btrfs_bio        *bbio;
70         u64                     map_length;
71 };
72
73 struct scrub_page {
74         struct scrub_block      *sblock;
75         struct page             *page;
76         struct btrfs_device     *dev;
77         struct list_head        list;
78         u64                     flags;  /* extent flags */
79         u64                     generation;
80         u64                     logical;
81         u64                     physical;
82         u64                     physical_for_dev_replace;
83         atomic_t                refs;
84         struct {
85                 unsigned int    mirror_num:8;
86                 unsigned int    have_csum:1;
87                 unsigned int    io_error:1;
88         };
89         u8                      csum[BTRFS_CSUM_SIZE];
90
91         struct scrub_recover    *recover;
92 };
93
94 struct scrub_bio {
95         int                     index;
96         struct scrub_ctx        *sctx;
97         struct btrfs_device     *dev;
98         struct bio              *bio;
99         blk_status_t            status;
100         u64                     logical;
101         u64                     physical;
102 #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
103         struct scrub_page       *pagev[SCRUB_PAGES_PER_WR_BIO];
104 #else
105         struct scrub_page       *pagev[SCRUB_PAGES_PER_RD_BIO];
106 #endif
107         int                     page_count;
108         int                     next_free;
109         struct btrfs_work       work;
110 };
111
112 struct scrub_block {
113         struct scrub_page       *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
114         int                     page_count;
115         atomic_t                outstanding_pages;
116         refcount_t              refs; /* free mem on transition to zero */
117         struct scrub_ctx        *sctx;
118         struct scrub_parity     *sparity;
119         struct {
120                 unsigned int    header_error:1;
121                 unsigned int    checksum_error:1;
122                 unsigned int    no_io_error_seen:1;
123                 unsigned int    generation_error:1; /* also sets header_error */
124
125                 /* The following is for the data used to check parity */
126                 /* It is for the data with checksum */
127                 unsigned int    data_corrected:1;
128         };
129         struct btrfs_work       work;
130 };
131
132 /* Used for the chunks with parity stripe such RAID5/6 */
133 struct scrub_parity {
134         struct scrub_ctx        *sctx;
135
136         struct btrfs_device     *scrub_dev;
137
138         u64                     logic_start;
139
140         u64                     logic_end;
141
142         int                     nsectors;
143
144         u64                     stripe_len;
145
146         refcount_t              refs;
147
148         struct list_head        spages;
149
150         /* Work of parity check and repair */
151         struct btrfs_work       work;
152
153         /* Mark the parity blocks which have data */
154         unsigned long           *dbitmap;
155
156         /*
157          * Mark the parity blocks which have data, but errors happen when
158          * read data or check data
159          */
160         unsigned long           *ebitmap;
161
162         unsigned long           bitmap[0];
163 };
164
165 struct scrub_ctx {
166         struct scrub_bio        *bios[SCRUB_BIOS_PER_SCTX];
167         struct btrfs_fs_info    *fs_info;
168         int                     first_free;
169         int                     curr;
170         atomic_t                bios_in_flight;
171         atomic_t                workers_pending;
172         spinlock_t              list_lock;
173         wait_queue_head_t       list_wait;
174         u16                     csum_size;
175         struct list_head        csum_list;
176         atomic_t                cancel_req;
177         int                     readonly;
178         int                     pages_per_rd_bio;
179
180         int                     is_dev_replace;
181
182         struct scrub_bio        *wr_curr_bio;
183         struct mutex            wr_lock;
184         int                     pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
185         struct btrfs_device     *wr_tgtdev;
186         bool                    flush_all_writes;
187
188         /*
189          * statistics
190          */
191         struct btrfs_scrub_progress stat;
192         spinlock_t              stat_lock;
193
194         /*
195          * Use a ref counter to avoid use-after-free issues. Scrub workers
196          * decrement bios_in_flight and workers_pending and then do a wakeup
197          * on the list_wait wait queue. We must ensure the main scrub task
198          * doesn't free the scrub context before or while the workers are
199          * doing the wakeup() call.
200          */
201         refcount_t              refs;
202 };
203
204 struct scrub_fixup_nodatasum {
205         struct scrub_ctx        *sctx;
206         struct btrfs_device     *dev;
207         u64                     logical;
208         struct btrfs_root       *root;
209         struct btrfs_work       work;
210         int                     mirror_num;
211 };
212
213 struct scrub_nocow_inode {
214         u64                     inum;
215         u64                     offset;
216         u64                     root;
217         struct list_head        list;
218 };
219
220 struct scrub_copy_nocow_ctx {
221         struct scrub_ctx        *sctx;
222         u64                     logical;
223         u64                     len;
224         int                     mirror_num;
225         u64                     physical_for_dev_replace;
226         struct list_head        inodes;
227         struct btrfs_work       work;
228 };
229
230 struct scrub_warning {
231         struct btrfs_path       *path;
232         u64                     extent_item_size;
233         const char              *errstr;
234         u64                     physical;
235         u64                     logical;
236         struct btrfs_device     *dev;
237 };
238
239 struct full_stripe_lock {
240         struct rb_node node;
241         u64 logical;
242         u64 refs;
243         struct mutex mutex;
244 };
245
246 static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
247 static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
248 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
249 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
250 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
251 static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
252                                      struct scrub_block *sblocks_for_recheck);
253 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
254                                 struct scrub_block *sblock,
255                                 int retry_failed_mirror);
256 static void scrub_recheck_block_checksum(struct scrub_block *sblock);
257 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
258                                              struct scrub_block *sblock_good);
259 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
260                                             struct scrub_block *sblock_good,
261                                             int page_num, int force_write);
262 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
263 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
264                                            int page_num);
265 static int scrub_checksum_data(struct scrub_block *sblock);
266 static int scrub_checksum_tree_block(struct scrub_block *sblock);
267 static int scrub_checksum_super(struct scrub_block *sblock);
268 static void scrub_block_get(struct scrub_block *sblock);
269 static void scrub_block_put(struct scrub_block *sblock);
270 static void scrub_page_get(struct scrub_page *spage);
271 static void scrub_page_put(struct scrub_page *spage);
272 static void scrub_parity_get(struct scrub_parity *sparity);
273 static void scrub_parity_put(struct scrub_parity *sparity);
274 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
275                                     struct scrub_page *spage);
276 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
277                        u64 physical, struct btrfs_device *dev, u64 flags,
278                        u64 gen, int mirror_num, u8 *csum, int force,
279                        u64 physical_for_dev_replace);
280 static void scrub_bio_end_io(struct bio *bio);
281 static void scrub_bio_end_io_worker(struct btrfs_work *work);
282 static void scrub_block_complete(struct scrub_block *sblock);
283 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
284                                u64 extent_logical, u64 extent_len,
285                                u64 *extent_physical,
286                                struct btrfs_device **extent_dev,
287                                int *extent_mirror_num);
288 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
289                                     struct scrub_page *spage);
290 static void scrub_wr_submit(struct scrub_ctx *sctx);
291 static void scrub_wr_bio_end_io(struct bio *bio);
292 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
293 static int write_page_nocow(struct scrub_ctx *sctx,
294                             u64 physical_for_dev_replace, struct page *page);
295 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
296                                       struct scrub_copy_nocow_ctx *ctx);
297 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
298                             int mirror_num, u64 physical_for_dev_replace);
299 static void copy_nocow_pages_worker(struct btrfs_work *work);
300 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
301 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
302 static void scrub_put_ctx(struct scrub_ctx *sctx);
303
304
305 static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
306 {
307         refcount_inc(&sctx->refs);
308         atomic_inc(&sctx->bios_in_flight);
309 }
310
311 static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
312 {
313         atomic_dec(&sctx->bios_in_flight);
314         wake_up(&sctx->list_wait);
315         scrub_put_ctx(sctx);
316 }
317
318 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
319 {
320         while (atomic_read(&fs_info->scrub_pause_req)) {
321                 mutex_unlock(&fs_info->scrub_lock);
322                 wait_event(fs_info->scrub_pause_wait,
323                    atomic_read(&fs_info->scrub_pause_req) == 0);
324                 mutex_lock(&fs_info->scrub_lock);
325         }
326 }
327
328 static void scrub_pause_on(struct btrfs_fs_info *fs_info)
329 {
330         atomic_inc(&fs_info->scrubs_paused);
331         wake_up(&fs_info->scrub_pause_wait);
332 }
333
334 static void scrub_pause_off(struct btrfs_fs_info *fs_info)
335 {
336         mutex_lock(&fs_info->scrub_lock);
337         __scrub_blocked_if_needed(fs_info);
338         atomic_dec(&fs_info->scrubs_paused);
339         mutex_unlock(&fs_info->scrub_lock);
340
341         wake_up(&fs_info->scrub_pause_wait);
342 }
343
344 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
345 {
346         scrub_pause_on(fs_info);
347         scrub_pause_off(fs_info);
348 }
349
350 /*
351  * Insert new full stripe lock into full stripe locks tree
352  *
353  * Return pointer to existing or newly inserted full_stripe_lock structure if
354  * everything works well.
355  * Return ERR_PTR(-ENOMEM) if we failed to allocate memory
356  *
357  * NOTE: caller must hold full_stripe_locks_root->lock before calling this
358  * function
359  */
360 static struct full_stripe_lock *insert_full_stripe_lock(
361                 struct btrfs_full_stripe_locks_tree *locks_root,
362                 u64 fstripe_logical)
363 {
364         struct rb_node **p;
365         struct rb_node *parent = NULL;
366         struct full_stripe_lock *entry;
367         struct full_stripe_lock *ret;
368
369         WARN_ON(!mutex_is_locked(&locks_root->lock));
370
371         p = &locks_root->root.rb_node;
372         while (*p) {
373                 parent = *p;
374                 entry = rb_entry(parent, struct full_stripe_lock, node);
375                 if (fstripe_logical < entry->logical) {
376                         p = &(*p)->rb_left;
377                 } else if (fstripe_logical > entry->logical) {
378                         p = &(*p)->rb_right;
379                 } else {
380                         entry->refs++;
381                         return entry;
382                 }
383         }
384
385         /* Insert new lock */
386         ret = kmalloc(sizeof(*ret), GFP_KERNEL);
387         if (!ret)
388                 return ERR_PTR(-ENOMEM);
389         ret->logical = fstripe_logical;
390         ret->refs = 1;
391         mutex_init(&ret->mutex);
392
393         rb_link_node(&ret->node, parent, p);
394         rb_insert_color(&ret->node, &locks_root->root);
395         return ret;
396 }
397
398 /*
399  * Search for a full stripe lock of a block group
400  *
401  * Return pointer to existing full stripe lock if found
402  * Return NULL if not found
403  */
404 static struct full_stripe_lock *search_full_stripe_lock(
405                 struct btrfs_full_stripe_locks_tree *locks_root,
406                 u64 fstripe_logical)
407 {
408         struct rb_node *node;
409         struct full_stripe_lock *entry;
410
411         WARN_ON(!mutex_is_locked(&locks_root->lock));
412
413         node = locks_root->root.rb_node;
414         while (node) {
415                 entry = rb_entry(node, struct full_stripe_lock, node);
416                 if (fstripe_logical < entry->logical)
417                         node = node->rb_left;
418                 else if (fstripe_logical > entry->logical)
419                         node = node->rb_right;
420                 else
421                         return entry;
422         }
423         return NULL;
424 }
425
426 /*
427  * Helper to get full stripe logical from a normal bytenr.
428  *
429  * Caller must ensure @cache is a RAID56 block group.
430  */
431 static u64 get_full_stripe_logical(struct btrfs_block_group_cache *cache,
432                                    u64 bytenr)
433 {
434         u64 ret;
435
436         /*
437          * Due to chunk item size limit, full stripe length should not be
438          * larger than U32_MAX. Just a sanity check here.
439          */
440         WARN_ON_ONCE(cache->full_stripe_len >= U32_MAX);
441
442         /*
443          * round_down() can only handle power of 2, while RAID56 full
444          * stripe length can be 64KiB * n, so we need to manually round down.
445          */
446         ret = div64_u64(bytenr - cache->key.objectid, cache->full_stripe_len) *
447                 cache->full_stripe_len + cache->key.objectid;
448         return ret;
449 }
450
451 /*
452  * Lock a full stripe to avoid concurrency of recovery and read
453  *
454  * It's only used for profiles with parities (RAID5/6), for other profiles it
455  * does nothing.
456  *
457  * Return 0 if we locked full stripe covering @bytenr, with a mutex held.
458  * So caller must call unlock_full_stripe() at the same context.
459  *
460  * Return <0 if encounters error.
461  */
462 static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
463                             bool *locked_ret)
464 {
465         struct btrfs_block_group_cache *bg_cache;
466         struct btrfs_full_stripe_locks_tree *locks_root;
467         struct full_stripe_lock *existing;
468         u64 fstripe_start;
469         int ret = 0;
470
471         *locked_ret = false;
472         bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
473         if (!bg_cache) {
474                 ASSERT(0);
475                 return -ENOENT;
476         }
477
478         /* Profiles not based on parity don't need full stripe lock */
479         if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
480                 goto out;
481         locks_root = &bg_cache->full_stripe_locks_root;
482
483         fstripe_start = get_full_stripe_logical(bg_cache, bytenr);
484
485         /* Now insert the full stripe lock */
486         mutex_lock(&locks_root->lock);
487         existing = insert_full_stripe_lock(locks_root, fstripe_start);
488         mutex_unlock(&locks_root->lock);
489         if (IS_ERR(existing)) {
490                 ret = PTR_ERR(existing);
491                 goto out;
492         }
493         mutex_lock(&existing->mutex);
494         *locked_ret = true;
495 out:
496         btrfs_put_block_group(bg_cache);
497         return ret;
498 }
499
500 /*
501  * Unlock a full stripe.
502  *
503  * NOTE: Caller must ensure it's the same context calling corresponding
504  * lock_full_stripe().
505  *
506  * Return 0 if we unlock full stripe without problem.
507  * Return <0 for error
508  */
509 static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
510                               bool locked)
511 {
512         struct btrfs_block_group_cache *bg_cache;
513         struct btrfs_full_stripe_locks_tree *locks_root;
514         struct full_stripe_lock *fstripe_lock;
515         u64 fstripe_start;
516         bool freeit = false;
517         int ret = 0;
518
519         /* If we didn't acquire full stripe lock, no need to continue */
520         if (!locked)
521                 return 0;
522
523         bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
524         if (!bg_cache) {
525                 ASSERT(0);
526                 return -ENOENT;
527         }
528         if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
529                 goto out;
530
531         locks_root = &bg_cache->full_stripe_locks_root;
532         fstripe_start = get_full_stripe_logical(bg_cache, bytenr);
533
534         mutex_lock(&locks_root->lock);
535         fstripe_lock = search_full_stripe_lock(locks_root, fstripe_start);
536         /* Unpaired unlock_full_stripe() detected */
537         if (!fstripe_lock) {
538                 WARN_ON(1);
539                 ret = -ENOENT;
540                 mutex_unlock(&locks_root->lock);
541                 goto out;
542         }
543
544         if (fstripe_lock->refs == 0) {
545                 WARN_ON(1);
546                 btrfs_warn(fs_info, "full stripe lock at %llu refcount underflow",
547                         fstripe_lock->logical);
548         } else {
549                 fstripe_lock->refs--;
550         }
551
552         if (fstripe_lock->refs == 0) {
553                 rb_erase(&fstripe_lock->node, &locks_root->root);
554                 freeit = true;
555         }
556         mutex_unlock(&locks_root->lock);
557
558         mutex_unlock(&fstripe_lock->mutex);
559         if (freeit)
560                 kfree(fstripe_lock);
561 out:
562         btrfs_put_block_group(bg_cache);
563         return ret;
564 }
565
566 /*
567  * used for workers that require transaction commits (i.e., for the
568  * NOCOW case)
569  */
570 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
571 {
572         struct btrfs_fs_info *fs_info = sctx->fs_info;
573
574         refcount_inc(&sctx->refs);
575         /*
576          * increment scrubs_running to prevent cancel requests from
577          * completing as long as a worker is running. we must also
578          * increment scrubs_paused to prevent deadlocking on pause
579          * requests used for transactions commits (as the worker uses a
580          * transaction context). it is safe to regard the worker
581          * as paused for all matters practical. effectively, we only
582          * avoid cancellation requests from completing.
583          */
584         mutex_lock(&fs_info->scrub_lock);
585         atomic_inc(&fs_info->scrubs_running);
586         atomic_inc(&fs_info->scrubs_paused);
587         mutex_unlock(&fs_info->scrub_lock);
588
589         /*
590          * check if @scrubs_running=@scrubs_paused condition
591          * inside wait_event() is not an atomic operation.
592          * which means we may inc/dec @scrub_running/paused
593          * at any time. Let's wake up @scrub_pause_wait as
594          * much as we can to let commit transaction blocked less.
595          */
596         wake_up(&fs_info->scrub_pause_wait);
597
598         atomic_inc(&sctx->workers_pending);
599 }
600
601 /* used for workers that require transaction commits */
602 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
603 {
604         struct btrfs_fs_info *fs_info = sctx->fs_info;
605
606         /*
607          * see scrub_pending_trans_workers_inc() why we're pretending
608          * to be paused in the scrub counters
609          */
610         mutex_lock(&fs_info->scrub_lock);
611         atomic_dec(&fs_info->scrubs_running);
612         atomic_dec(&fs_info->scrubs_paused);
613         mutex_unlock(&fs_info->scrub_lock);
614         atomic_dec(&sctx->workers_pending);
615         wake_up(&fs_info->scrub_pause_wait);
616         wake_up(&sctx->list_wait);
617         scrub_put_ctx(sctx);
618 }
619
620 static void scrub_free_csums(struct scrub_ctx *sctx)
621 {
622         while (!list_empty(&sctx->csum_list)) {
623                 struct btrfs_ordered_sum *sum;
624                 sum = list_first_entry(&sctx->csum_list,
625                                        struct btrfs_ordered_sum, list);
626                 list_del(&sum->list);
627                 kfree(sum);
628         }
629 }
630
631 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
632 {
633         int i;
634
635         if (!sctx)
636                 return;
637
638         /* this can happen when scrub is cancelled */
639         if (sctx->curr != -1) {
640                 struct scrub_bio *sbio = sctx->bios[sctx->curr];
641
642                 for (i = 0; i < sbio->page_count; i++) {
643                         WARN_ON(!sbio->pagev[i]->page);
644                         scrub_block_put(sbio->pagev[i]->sblock);
645                 }
646                 bio_put(sbio->bio);
647         }
648
649         for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
650                 struct scrub_bio *sbio = sctx->bios[i];
651
652                 if (!sbio)
653                         break;
654                 kfree(sbio);
655         }
656
657         kfree(sctx->wr_curr_bio);
658         scrub_free_csums(sctx);
659         kfree(sctx);
660 }
661
662 static void scrub_put_ctx(struct scrub_ctx *sctx)
663 {
664         if (refcount_dec_and_test(&sctx->refs))
665                 scrub_free_ctx(sctx);
666 }
667
668 static noinline_for_stack
669 struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
670 {
671         struct scrub_ctx *sctx;
672         int             i;
673         struct btrfs_fs_info *fs_info = dev->fs_info;
674
675         sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
676         if (!sctx)
677                 goto nomem;
678         refcount_set(&sctx->refs, 1);
679         sctx->is_dev_replace = is_dev_replace;
680         sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
681         sctx->curr = -1;
682         sctx->fs_info = dev->fs_info;
683         for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
684                 struct scrub_bio *sbio;
685
686                 sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
687                 if (!sbio)
688                         goto nomem;
689                 sctx->bios[i] = sbio;
690
691                 sbio->index = i;
692                 sbio->sctx = sctx;
693                 sbio->page_count = 0;
694                 btrfs_init_work(&sbio->work, btrfs_scrub_helper,
695                                 scrub_bio_end_io_worker, NULL, NULL);
696
697                 if (i != SCRUB_BIOS_PER_SCTX - 1)
698                         sctx->bios[i]->next_free = i + 1;
699                 else
700                         sctx->bios[i]->next_free = -1;
701         }
702         sctx->first_free = 0;
703         atomic_set(&sctx->bios_in_flight, 0);
704         atomic_set(&sctx->workers_pending, 0);
705         atomic_set(&sctx->cancel_req, 0);
706         sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
707         INIT_LIST_HEAD(&sctx->csum_list);
708
709         spin_lock_init(&sctx->list_lock);
710         spin_lock_init(&sctx->stat_lock);
711         init_waitqueue_head(&sctx->list_wait);
712
713         WARN_ON(sctx->wr_curr_bio != NULL);
714         mutex_init(&sctx->wr_lock);
715         sctx->wr_curr_bio = NULL;
716         if (is_dev_replace) {
717                 WARN_ON(!fs_info->dev_replace.tgtdev);
718                 sctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
719                 sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
720                 sctx->flush_all_writes = false;
721         }
722
723         return sctx;
724
725 nomem:
726         scrub_free_ctx(sctx);
727         return ERR_PTR(-ENOMEM);
728 }
729
730 static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
731                                      void *warn_ctx)
732 {
733         u64 isize;
734         u32 nlink;
735         int ret;
736         int i;
737         unsigned nofs_flag;
738         struct extent_buffer *eb;
739         struct btrfs_inode_item *inode_item;
740         struct scrub_warning *swarn = warn_ctx;
741         struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
742         struct inode_fs_paths *ipath = NULL;
743         struct btrfs_root *local_root;
744         struct btrfs_key root_key;
745         struct btrfs_key key;
746
747         root_key.objectid = root;
748         root_key.type = BTRFS_ROOT_ITEM_KEY;
749         root_key.offset = (u64)-1;
750         local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
751         if (IS_ERR(local_root)) {
752                 ret = PTR_ERR(local_root);
753                 goto err;
754         }
755
756         /*
757          * this makes the path point to (inum INODE_ITEM ioff)
758          */
759         key.objectid = inum;
760         key.type = BTRFS_INODE_ITEM_KEY;
761         key.offset = 0;
762
763         ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
764         if (ret) {
765                 btrfs_release_path(swarn->path);
766                 goto err;
767         }
768
769         eb = swarn->path->nodes[0];
770         inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
771                                         struct btrfs_inode_item);
772         isize = btrfs_inode_size(eb, inode_item);
773         nlink = btrfs_inode_nlink(eb, inode_item);
774         btrfs_release_path(swarn->path);
775
776         /*
777          * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub
778          * uses GFP_NOFS in this context, so we keep it consistent but it does
779          * not seem to be strictly necessary.
780          */
781         nofs_flag = memalloc_nofs_save();
782         ipath = init_ipath(4096, local_root, swarn->path);
783         memalloc_nofs_restore(nofs_flag);
784         if (IS_ERR(ipath)) {
785                 ret = PTR_ERR(ipath);
786                 ipath = NULL;
787                 goto err;
788         }
789         ret = paths_from_inode(inum, ipath);
790
791         if (ret < 0)
792                 goto err;
793
794         /*
795          * we deliberately ignore the bit ipath might have been too small to
796          * hold all of the paths here
797          */
798         for (i = 0; i < ipath->fspath->elem_cnt; ++i)
799                 btrfs_warn_in_rcu(fs_info,
800 "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %llu, links %u (path: %s)",
801                                   swarn->errstr, swarn->logical,
802                                   rcu_str_deref(swarn->dev->name),
803                                   swarn->physical,
804                                   root, inum, offset,
805                                   min(isize - offset, (u64)PAGE_SIZE), nlink,
806                                   (char *)(unsigned long)ipath->fspath->val[i]);
807
808         free_ipath(ipath);
809         return 0;
810
811 err:
812         btrfs_warn_in_rcu(fs_info,
813                           "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
814                           swarn->errstr, swarn->logical,
815                           rcu_str_deref(swarn->dev->name),
816                           swarn->physical,
817                           root, inum, offset, ret);
818
819         free_ipath(ipath);
820         return 0;
821 }
822
823 static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
824 {
825         struct btrfs_device *dev;
826         struct btrfs_fs_info *fs_info;
827         struct btrfs_path *path;
828         struct btrfs_key found_key;
829         struct extent_buffer *eb;
830         struct btrfs_extent_item *ei;
831         struct scrub_warning swarn;
832         unsigned long ptr = 0;
833         u64 extent_item_pos;
834         u64 flags = 0;
835         u64 ref_root;
836         u32 item_size;
837         u8 ref_level = 0;
838         int ret;
839
840         WARN_ON(sblock->page_count < 1);
841         dev = sblock->pagev[0]->dev;
842         fs_info = sblock->sctx->fs_info;
843
844         path = btrfs_alloc_path();
845         if (!path)
846                 return;
847
848         swarn.physical = sblock->pagev[0]->physical;
849         swarn.logical = sblock->pagev[0]->logical;
850         swarn.errstr = errstr;
851         swarn.dev = NULL;
852
853         ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
854                                   &flags);
855         if (ret < 0)
856                 goto out;
857
858         extent_item_pos = swarn.logical - found_key.objectid;
859         swarn.extent_item_size = found_key.offset;
860
861         eb = path->nodes[0];
862         ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
863         item_size = btrfs_item_size_nr(eb, path->slots[0]);
864
865         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
866                 do {
867                         ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
868                                                       item_size, &ref_root,
869                                                       &ref_level);
870                         btrfs_warn_in_rcu(fs_info,
871 "%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
872                                 errstr, swarn.logical,
873                                 rcu_str_deref(dev->name),
874                                 swarn.physical,
875                                 ref_level ? "node" : "leaf",
876                                 ret < 0 ? -1 : ref_level,
877                                 ret < 0 ? -1 : ref_root);
878                 } while (ret != 1);
879                 btrfs_release_path(path);
880         } else {
881                 btrfs_release_path(path);
882                 swarn.path = path;
883                 swarn.dev = dev;
884                 iterate_extent_inodes(fs_info, found_key.objectid,
885                                         extent_item_pos, 1,
886                                         scrub_print_warning_inode, &swarn, false);
887         }
888
889 out:
890         btrfs_free_path(path);
891 }
892
893 static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
894 {
895         struct page *page = NULL;
896         unsigned long index;
897         struct scrub_fixup_nodatasum *fixup = fixup_ctx;
898         int ret;
899         int corrected = 0;
900         struct btrfs_key key;
901         struct inode *inode = NULL;
902         struct btrfs_fs_info *fs_info;
903         u64 end = offset + PAGE_SIZE - 1;
904         struct btrfs_root *local_root;
905         int srcu_index;
906
907         key.objectid = root;
908         key.type = BTRFS_ROOT_ITEM_KEY;
909         key.offset = (u64)-1;
910
911         fs_info = fixup->root->fs_info;
912         srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
913
914         local_root = btrfs_read_fs_root_no_name(fs_info, &key);
915         if (IS_ERR(local_root)) {
916                 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
917                 return PTR_ERR(local_root);
918         }
919
920         key.type = BTRFS_INODE_ITEM_KEY;
921         key.objectid = inum;
922         key.offset = 0;
923         inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
924         srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
925         if (IS_ERR(inode))
926                 return PTR_ERR(inode);
927
928         index = offset >> PAGE_SHIFT;
929
930         page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
931         if (!page) {
932                 ret = -ENOMEM;
933                 goto out;
934         }
935
936         if (PageUptodate(page)) {
937                 if (PageDirty(page)) {
938                         /*
939                          * we need to write the data to the defect sector. the
940                          * data that was in that sector is not in memory,
941                          * because the page was modified. we must not write the
942                          * modified page to that sector.
943                          *
944                          * TODO: what could be done here: wait for the delalloc
945                          *       runner to write out that page (might involve
946                          *       COW) and see whether the sector is still
947                          *       referenced afterwards.
948                          *
949                          * For the meantime, we'll treat this error
950                          * incorrectable, although there is a chance that a
951                          * later scrub will find the bad sector again and that
952                          * there's no dirty page in memory, then.
953                          */
954                         ret = -EIO;
955                         goto out;
956                 }
957                 ret = repair_io_failure(fs_info, inum, offset, PAGE_SIZE,
958                                         fixup->logical, page,
959                                         offset - page_offset(page),
960                                         fixup->mirror_num);
961                 unlock_page(page);
962                 corrected = !ret;
963         } else {
964                 /*
965                  * we need to get good data first. the general readpage path
966                  * will call repair_io_failure for us, we just have to make
967                  * sure we read the bad mirror.
968                  */
969                 ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
970                                         EXTENT_DAMAGED);
971                 if (ret) {
972                         /* set_extent_bits should give proper error */
973                         WARN_ON(ret > 0);
974                         if (ret > 0)
975                                 ret = -EFAULT;
976                         goto out;
977                 }
978
979                 ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
980                                                 btrfs_get_extent,
981                                                 fixup->mirror_num);
982                 wait_on_page_locked(page);
983
984                 corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
985                                                 end, EXTENT_DAMAGED, 0, NULL);
986                 if (!corrected)
987                         clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
988                                                 EXTENT_DAMAGED);
989         }
990
991 out:
992         if (page)
993                 put_page(page);
994
995         iput(inode);
996
997         if (ret < 0)
998                 return ret;
999
1000         if (ret == 0 && corrected) {
1001                 /*
1002                  * we only need to call readpage for one of the inodes belonging
1003                  * to this extent. so make iterate_extent_inodes stop
1004                  */
1005                 return 1;
1006         }
1007
1008         return -EIO;
1009 }
1010
1011 static void scrub_fixup_nodatasum(struct btrfs_work *work)
1012 {
1013         struct btrfs_fs_info *fs_info;
1014         int ret;
1015         struct scrub_fixup_nodatasum *fixup;
1016         struct scrub_ctx *sctx;
1017         struct btrfs_trans_handle *trans = NULL;
1018         struct btrfs_path *path;
1019         int uncorrectable = 0;
1020
1021         fixup = container_of(work, struct scrub_fixup_nodatasum, work);
1022         sctx = fixup->sctx;
1023         fs_info = fixup->root->fs_info;
1024
1025         path = btrfs_alloc_path();
1026         if (!path) {
1027                 spin_lock(&sctx->stat_lock);
1028                 ++sctx->stat.malloc_errors;
1029                 spin_unlock(&sctx->stat_lock);
1030                 uncorrectable = 1;
1031                 goto out;
1032         }
1033
1034         trans = btrfs_join_transaction(fixup->root);
1035         if (IS_ERR(trans)) {
1036                 uncorrectable = 1;
1037                 goto out;
1038         }
1039
1040         /*
1041          * the idea is to trigger a regular read through the standard path. we
1042          * read a page from the (failed) logical address by specifying the
1043          * corresponding copynum of the failed sector. thus, that readpage is
1044          * expected to fail.
1045          * that is the point where on-the-fly error correction will kick in
1046          * (once it's finished) and rewrite the failed sector if a good copy
1047          * can be found.
1048          */
1049         ret = iterate_inodes_from_logical(fixup->logical, fs_info, path,
1050                                           scrub_fixup_readpage, fixup, false);
1051         if (ret < 0) {
1052                 uncorrectable = 1;
1053                 goto out;
1054         }
1055         WARN_ON(ret != 1);
1056
1057         spin_lock(&sctx->stat_lock);
1058         ++sctx->stat.corrected_errors;
1059         spin_unlock(&sctx->stat_lock);
1060
1061 out:
1062         if (trans && !IS_ERR(trans))
1063                 btrfs_end_transaction(trans);
1064         if (uncorrectable) {
1065                 spin_lock(&sctx->stat_lock);
1066                 ++sctx->stat.uncorrectable_errors;
1067                 spin_unlock(&sctx->stat_lock);
1068                 btrfs_dev_replace_stats_inc(
1069                         &fs_info->dev_replace.num_uncorrectable_read_errors);
1070                 btrfs_err_rl_in_rcu(fs_info,
1071                     "unable to fixup (nodatasum) error at logical %llu on dev %s",
1072                         fixup->logical, rcu_str_deref(fixup->dev->name));
1073         }
1074
1075         btrfs_free_path(path);
1076         kfree(fixup);
1077
1078         scrub_pending_trans_workers_dec(sctx);
1079 }
1080
1081 static inline void scrub_get_recover(struct scrub_recover *recover)
1082 {
1083         refcount_inc(&recover->refs);
1084 }
1085
1086 static inline void scrub_put_recover(struct btrfs_fs_info *fs_info,
1087                                      struct scrub_recover *recover)
1088 {
1089         if (refcount_dec_and_test(&recover->refs)) {
1090                 btrfs_bio_counter_dec(fs_info);
1091                 btrfs_put_bbio(recover->bbio);
1092                 kfree(recover);
1093         }
1094 }
1095
1096 /*
1097  * scrub_handle_errored_block gets called when either verification of the
1098  * pages failed or the bio failed to read, e.g. with EIO. In the latter
1099  * case, this function handles all pages in the bio, even though only one
1100  * may be bad.
1101  * The goal of this function is to repair the errored block by using the
1102  * contents of one of the mirrors.
1103  */
1104 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
1105 {
1106         struct scrub_ctx *sctx = sblock_to_check->sctx;
1107         struct btrfs_device *dev;
1108         struct btrfs_fs_info *fs_info;
1109         u64 length;
1110         u64 logical;
1111         unsigned int failed_mirror_index;
1112         unsigned int is_metadata;
1113         unsigned int have_csum;
1114         struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
1115         struct scrub_block *sblock_bad;
1116         int ret;
1117         int mirror_index;
1118         int page_num;
1119         int success;
1120         bool full_stripe_locked;
1121         static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
1122                                       DEFAULT_RATELIMIT_BURST);
1123
1124         BUG_ON(sblock_to_check->page_count < 1);
1125         fs_info = sctx->fs_info;
1126         if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
1127                 /*
1128                  * if we find an error in a super block, we just report it.
1129                  * They will get written with the next transaction commit
1130                  * anyway
1131                  */
1132                 spin_lock(&sctx->stat_lock);
1133                 ++sctx->stat.super_errors;
1134                 spin_unlock(&sctx->stat_lock);
1135                 return 0;
1136         }
1137         length = sblock_to_check->page_count * PAGE_SIZE;
1138         logical = sblock_to_check->pagev[0]->logical;
1139         BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
1140         failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
1141         is_metadata = !(sblock_to_check->pagev[0]->flags &
1142                         BTRFS_EXTENT_FLAG_DATA);
1143         have_csum = sblock_to_check->pagev[0]->have_csum;
1144         dev = sblock_to_check->pagev[0]->dev;
1145
1146         /*
1147          * For RAID5/6, race can happen for a different device scrub thread.
1148          * For data corruption, Parity and Data threads will both try
1149          * to recovery the data.
1150          * Race can lead to doubly added csum error, or even unrecoverable
1151          * error.
1152          */
1153         ret = lock_full_stripe(fs_info, logical, &full_stripe_locked);
1154         if (ret < 0) {
1155                 spin_lock(&sctx->stat_lock);
1156                 if (ret == -ENOMEM)
1157                         sctx->stat.malloc_errors++;
1158                 sctx->stat.read_errors++;
1159                 sctx->stat.uncorrectable_errors++;
1160                 spin_unlock(&sctx->stat_lock);
1161                 return ret;
1162         }
1163
1164         if (sctx->is_dev_replace && !is_metadata && !have_csum) {
1165                 sblocks_for_recheck = NULL;
1166                 goto nodatasum_case;
1167         }
1168
1169         /*
1170          * read all mirrors one after the other. This includes to
1171          * re-read the extent or metadata block that failed (that was
1172          * the cause that this fixup code is called) another time,
1173          * page by page this time in order to know which pages
1174          * caused I/O errors and which ones are good (for all mirrors).
1175          * It is the goal to handle the situation when more than one
1176          * mirror contains I/O errors, but the errors do not
1177          * overlap, i.e. the data can be repaired by selecting the
1178          * pages from those mirrors without I/O error on the
1179          * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
1180          * would be that mirror #1 has an I/O error on the first page,
1181          * the second page is good, and mirror #2 has an I/O error on
1182          * the second page, but the first page is good.
1183          * Then the first page of the first mirror can be repaired by
1184          * taking the first page of the second mirror, and the
1185          * second page of the second mirror can be repaired by
1186          * copying the contents of the 2nd page of the 1st mirror.
1187          * One more note: if the pages of one mirror contain I/O
1188          * errors, the checksum cannot be verified. In order to get
1189          * the best data for repairing, the first attempt is to find
1190          * a mirror without I/O errors and with a validated checksum.
1191          * Only if this is not possible, the pages are picked from
1192          * mirrors with I/O errors without considering the checksum.
1193          * If the latter is the case, at the end, the checksum of the
1194          * repaired area is verified in order to correctly maintain
1195          * the statistics.
1196          */
1197
1198         sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
1199                                       sizeof(*sblocks_for_recheck), GFP_NOFS);
1200         if (!sblocks_for_recheck) {
1201                 spin_lock(&sctx->stat_lock);
1202                 sctx->stat.malloc_errors++;
1203                 sctx->stat.read_errors++;
1204                 sctx->stat.uncorrectable_errors++;
1205                 spin_unlock(&sctx->stat_lock);
1206                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
1207                 goto out;
1208         }
1209
1210         /* setup the context, map the logical blocks and alloc the pages */
1211         ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
1212         if (ret) {
1213                 spin_lock(&sctx->stat_lock);
1214                 sctx->stat.read_errors++;
1215                 sctx->stat.uncorrectable_errors++;
1216                 spin_unlock(&sctx->stat_lock);
1217                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
1218                 goto out;
1219         }
1220         BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
1221         sblock_bad = sblocks_for_recheck + failed_mirror_index;
1222
1223         /* build and submit the bios for the failed mirror, check checksums */
1224         scrub_recheck_block(fs_info, sblock_bad, 1);
1225
1226         if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
1227             sblock_bad->no_io_error_seen) {
1228                 /*
1229                  * the error disappeared after reading page by page, or
1230                  * the area was part of a huge bio and other parts of the
1231                  * bio caused I/O errors, or the block layer merged several
1232                  * read requests into one and the error is caused by a
1233                  * different bio (usually one of the two latter cases is
1234                  * the cause)
1235                  */
1236                 spin_lock(&sctx->stat_lock);
1237                 sctx->stat.unverified_errors++;
1238                 sblock_to_check->data_corrected = 1;
1239                 spin_unlock(&sctx->stat_lock);
1240
1241                 if (sctx->is_dev_replace)
1242                         scrub_write_block_to_dev_replace(sblock_bad);
1243                 goto out;
1244         }
1245
1246         if (!sblock_bad->no_io_error_seen) {
1247                 spin_lock(&sctx->stat_lock);
1248                 sctx->stat.read_errors++;
1249                 spin_unlock(&sctx->stat_lock);
1250                 if (__ratelimit(&_rs))
1251                         scrub_print_warning("i/o error", sblock_to_check);
1252                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
1253         } else if (sblock_bad->checksum_error) {
1254                 spin_lock(&sctx->stat_lock);
1255                 sctx->stat.csum_errors++;
1256                 spin_unlock(&sctx->stat_lock);
1257                 if (__ratelimit(&_rs))
1258                         scrub_print_warning("checksum error", sblock_to_check);
1259                 btrfs_dev_stat_inc_and_print(dev,
1260                                              BTRFS_DEV_STAT_CORRUPTION_ERRS);
1261         } else if (sblock_bad->header_error) {
1262                 spin_lock(&sctx->stat_lock);
1263                 sctx->stat.verify_errors++;
1264                 spin_unlock(&sctx->stat_lock);
1265                 if (__ratelimit(&_rs))
1266                         scrub_print_warning("checksum/header error",
1267                                             sblock_to_check);
1268                 if (sblock_bad->generation_error)
1269                         btrfs_dev_stat_inc_and_print(dev,
1270                                 BTRFS_DEV_STAT_GENERATION_ERRS);
1271                 else
1272                         btrfs_dev_stat_inc_and_print(dev,
1273                                 BTRFS_DEV_STAT_CORRUPTION_ERRS);
1274         }
1275
1276         if (sctx->readonly) {
1277                 ASSERT(!sctx->is_dev_replace);
1278                 goto out;
1279         }
1280
1281         if (!is_metadata && !have_csum) {
1282                 struct scrub_fixup_nodatasum *fixup_nodatasum;
1283
1284                 WARN_ON(sctx->is_dev_replace);
1285
1286 nodatasum_case:
1287
1288                 /*
1289                  * !is_metadata and !have_csum, this means that the data
1290                  * might not be COWed, that it might be modified
1291                  * concurrently. The general strategy to work on the
1292                  * commit root does not help in the case when COW is not
1293                  * used.
1294                  */
1295                 fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
1296                 if (!fixup_nodatasum)
1297                         goto did_not_correct_error;
1298                 fixup_nodatasum->sctx = sctx;
1299                 fixup_nodatasum->dev = dev;
1300                 fixup_nodatasum->logical = logical;
1301                 fixup_nodatasum->root = fs_info->extent_root;
1302                 fixup_nodatasum->mirror_num = failed_mirror_index + 1;
1303                 scrub_pending_trans_workers_inc(sctx);
1304                 btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper,
1305                                 scrub_fixup_nodatasum, NULL, NULL);
1306                 btrfs_queue_work(fs_info->scrub_workers,
1307                                  &fixup_nodatasum->work);
1308                 goto out;
1309         }
1310
1311         /*
1312          * now build and submit the bios for the other mirrors, check
1313          * checksums.
1314          * First try to pick the mirror which is completely without I/O
1315          * errors and also does not have a checksum error.
1316          * If one is found, and if a checksum is present, the full block
1317          * that is known to contain an error is rewritten. Afterwards
1318          * the block is known to be corrected.
1319          * If a mirror is found which is completely correct, and no
1320          * checksum is present, only those pages are rewritten that had
1321          * an I/O error in the block to be repaired, since it cannot be
1322          * determined, which copy of the other pages is better (and it
1323          * could happen otherwise that a correct page would be
1324          * overwritten by a bad one).
1325          */
1326         for (mirror_index = 0;
1327              mirror_index < BTRFS_MAX_MIRRORS &&
1328              sblocks_for_recheck[mirror_index].page_count > 0;
1329              mirror_index++) {
1330                 struct scrub_block *sblock_other;
1331
1332                 if (mirror_index == failed_mirror_index)
1333                         continue;
1334                 sblock_other = sblocks_for_recheck + mirror_index;
1335
1336                 /* build and submit the bios, check checksums */
1337                 scrub_recheck_block(fs_info, sblock_other, 0);
1338
1339                 if (!sblock_other->header_error &&
1340                     !sblock_other->checksum_error &&
1341                     sblock_other->no_io_error_seen) {
1342                         if (sctx->is_dev_replace) {
1343                                 scrub_write_block_to_dev_replace(sblock_other);
1344                                 goto corrected_error;
1345                         } else {
1346                                 ret = scrub_repair_block_from_good_copy(
1347                                                 sblock_bad, sblock_other);
1348                                 if (!ret)
1349                                         goto corrected_error;
1350                         }
1351                 }
1352         }
1353
1354         if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
1355                 goto did_not_correct_error;
1356
1357         /*
1358          * In case of I/O errors in the area that is supposed to be
1359          * repaired, continue by picking good copies of those pages.
1360          * Select the good pages from mirrors to rewrite bad pages from
1361          * the area to fix. Afterwards verify the checksum of the block
1362          * that is supposed to be repaired. This verification step is
1363          * only done for the purpose of statistic counting and for the
1364          * final scrub report, whether errors remain.
1365          * A perfect algorithm could make use of the checksum and try
1366          * all possible combinations of pages from the different mirrors
1367          * until the checksum verification succeeds. For example, when
1368          * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1369          * of mirror #2 is readable but the final checksum test fails,
1370          * then the 2nd page of mirror #3 could be tried, whether now
1371          * the final checksum succeeds. But this would be a rare
1372          * exception and is therefore not implemented. At least it is
1373          * avoided that the good copy is overwritten.
1374          * A more useful improvement would be to pick the sectors
1375          * without I/O error based on sector sizes (512 bytes on legacy
1376          * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1377          * mirror could be repaired by taking 512 byte of a different
1378          * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1379          * area are unreadable.
1380          */
1381         success = 1;
1382         for (page_num = 0; page_num < sblock_bad->page_count;
1383              page_num++) {
1384                 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1385                 struct scrub_block *sblock_other = NULL;
1386
1387                 /* skip no-io-error page in scrub */
1388                 if (!page_bad->io_error && !sctx->is_dev_replace)
1389                         continue;
1390
1391                 /* try to find no-io-error page in mirrors */
1392                 if (page_bad->io_error) {
1393                         for (mirror_index = 0;
1394                              mirror_index < BTRFS_MAX_MIRRORS &&
1395                              sblocks_for_recheck[mirror_index].page_count > 0;
1396                              mirror_index++) {
1397                                 if (!sblocks_for_recheck[mirror_index].
1398                                     pagev[page_num]->io_error) {
1399                                         sblock_other = sblocks_for_recheck +
1400                                                        mirror_index;
1401                                         break;
1402                                 }
1403                         }
1404                         if (!sblock_other)
1405                                 success = 0;
1406                 }
1407
1408                 if (sctx->is_dev_replace) {
1409                         /*
1410                          * did not find a mirror to fetch the page
1411                          * from. scrub_write_page_to_dev_replace()
1412                          * handles this case (page->io_error), by
1413                          * filling the block with zeros before
1414                          * submitting the write request
1415                          */
1416                         if (!sblock_other)
1417                                 sblock_other = sblock_bad;
1418
1419                         if (scrub_write_page_to_dev_replace(sblock_other,
1420                                                             page_num) != 0) {
1421                                 btrfs_dev_replace_stats_inc(
1422                                         &fs_info->dev_replace.num_write_errors);
1423                                 success = 0;
1424                         }
1425                 } else if (sblock_other) {
1426                         ret = scrub_repair_page_from_good_copy(sblock_bad,
1427                                                                sblock_other,
1428                                                                page_num, 0);
1429                         if (0 == ret)
1430                                 page_bad->io_error = 0;
1431                         else
1432                                 success = 0;
1433                 }
1434         }
1435
1436         if (success && !sctx->is_dev_replace) {
1437                 if (is_metadata || have_csum) {
1438                         /*
1439                          * need to verify the checksum now that all
1440                          * sectors on disk are repaired (the write
1441                          * request for data to be repaired is on its way).
1442                          * Just be lazy and use scrub_recheck_block()
1443                          * which re-reads the data before the checksum
1444                          * is verified, but most likely the data comes out
1445                          * of the page cache.
1446                          */
1447                         scrub_recheck_block(fs_info, sblock_bad, 1);
1448                         if (!sblock_bad->header_error &&
1449                             !sblock_bad->checksum_error &&
1450                             sblock_bad->no_io_error_seen)
1451                                 goto corrected_error;
1452                         else
1453                                 goto did_not_correct_error;
1454                 } else {
1455 corrected_error:
1456                         spin_lock(&sctx->stat_lock);
1457                         sctx->stat.corrected_errors++;
1458                         sblock_to_check->data_corrected = 1;
1459                         spin_unlock(&sctx->stat_lock);
1460                         btrfs_err_rl_in_rcu(fs_info,
1461                                 "fixed up error at logical %llu on dev %s",
1462                                 logical, rcu_str_deref(dev->name));
1463                 }
1464         } else {
1465 did_not_correct_error:
1466                 spin_lock(&sctx->stat_lock);
1467                 sctx->stat.uncorrectable_errors++;
1468                 spin_unlock(&sctx->stat_lock);
1469                 btrfs_err_rl_in_rcu(fs_info,
1470                         "unable to fixup (regular) error at logical %llu on dev %s",
1471                         logical, rcu_str_deref(dev->name));
1472         }
1473
1474 out:
1475         if (sblocks_for_recheck) {
1476                 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
1477                      mirror_index++) {
1478                         struct scrub_block *sblock = sblocks_for_recheck +
1479                                                      mirror_index;
1480                         struct scrub_recover *recover;
1481                         int page_index;
1482
1483                         for (page_index = 0; page_index < sblock->page_count;
1484                              page_index++) {
1485                                 sblock->pagev[page_index]->sblock = NULL;
1486                                 recover = sblock->pagev[page_index]->recover;
1487                                 if (recover) {
1488                                         scrub_put_recover(fs_info, recover);
1489                                         sblock->pagev[page_index]->recover =
1490                                                                         NULL;
1491                                 }
1492                                 scrub_page_put(sblock->pagev[page_index]);
1493                         }
1494                 }
1495                 kfree(sblocks_for_recheck);
1496         }
1497
1498         ret = unlock_full_stripe(fs_info, logical, full_stripe_locked);
1499         if (ret < 0)
1500                 return ret;
1501         return 0;
1502 }
1503
1504 static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
1505 {
1506         if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1507                 return 2;
1508         else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1509                 return 3;
1510         else
1511                 return (int)bbio->num_stripes;
1512 }
1513
1514 static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
1515                                                  u64 *raid_map,
1516                                                  u64 mapped_length,
1517                                                  int nstripes, int mirror,
1518                                                  int *stripe_index,
1519                                                  u64 *stripe_offset)
1520 {
1521         int i;
1522
1523         if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
1524                 /* RAID5/6 */
1525                 for (i = 0; i < nstripes; i++) {
1526                         if (raid_map[i] == RAID6_Q_STRIPE ||
1527                             raid_map[i] == RAID5_P_STRIPE)
1528                                 continue;
1529
1530                         if (logical >= raid_map[i] &&
1531                             logical < raid_map[i] + mapped_length)
1532                                 break;
1533                 }
1534
1535                 *stripe_index = i;
1536                 *stripe_offset = logical - raid_map[i];
1537         } else {
1538                 /* The other RAID type */
1539                 *stripe_index = mirror;
1540                 *stripe_offset = 0;
1541         }
1542 }
1543
1544 static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
1545                                      struct scrub_block *sblocks_for_recheck)
1546 {
1547         struct scrub_ctx *sctx = original_sblock->sctx;
1548         struct btrfs_fs_info *fs_info = sctx->fs_info;
1549         u64 length = original_sblock->page_count * PAGE_SIZE;
1550         u64 logical = original_sblock->pagev[0]->logical;
1551         u64 generation = original_sblock->pagev[0]->generation;
1552         u64 flags = original_sblock->pagev[0]->flags;
1553         u64 have_csum = original_sblock->pagev[0]->have_csum;
1554         struct scrub_recover *recover;
1555         struct btrfs_bio *bbio;
1556         u64 sublen;
1557         u64 mapped_length;
1558         u64 stripe_offset;
1559         int stripe_index;
1560         int page_index = 0;
1561         int mirror_index;
1562         int nmirrors;
1563         int ret;
1564
1565         /*
1566          * note: the two members refs and outstanding_pages
1567          * are not used (and not set) in the blocks that are used for
1568          * the recheck procedure
1569          */
1570
1571         while (length > 0) {
1572                 sublen = min_t(u64, length, PAGE_SIZE);
1573                 mapped_length = sublen;
1574                 bbio = NULL;
1575
1576                 /*
1577                  * with a length of PAGE_SIZE, each returned stripe
1578                  * represents one mirror
1579                  */
1580                 btrfs_bio_counter_inc_blocked(fs_info);
1581                 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
1582                                 logical, &mapped_length, &bbio);
1583                 if (ret || !bbio || mapped_length < sublen) {
1584                         btrfs_put_bbio(bbio);
1585                         btrfs_bio_counter_dec(fs_info);
1586                         return -EIO;
1587                 }
1588
1589                 recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
1590                 if (!recover) {
1591                         btrfs_put_bbio(bbio);
1592                         btrfs_bio_counter_dec(fs_info);
1593                         return -ENOMEM;
1594                 }
1595
1596                 refcount_set(&recover->refs, 1);
1597                 recover->bbio = bbio;
1598                 recover->map_length = mapped_length;
1599
1600                 BUG_ON(page_index >= SCRUB_MAX_PAGES_PER_BLOCK);
1601
1602                 nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS);
1603
1604                 for (mirror_index = 0; mirror_index < nmirrors;
1605                      mirror_index++) {
1606                         struct scrub_block *sblock;
1607                         struct scrub_page *page;
1608
1609                         sblock = sblocks_for_recheck + mirror_index;
1610                         sblock->sctx = sctx;
1611
1612                         page = kzalloc(sizeof(*page), GFP_NOFS);
1613                         if (!page) {
1614 leave_nomem:
1615                                 spin_lock(&sctx->stat_lock);
1616                                 sctx->stat.malloc_errors++;
1617                                 spin_unlock(&sctx->stat_lock);
1618                                 scrub_put_recover(fs_info, recover);
1619                                 return -ENOMEM;
1620                         }
1621                         scrub_page_get(page);
1622                         sblock->pagev[page_index] = page;
1623                         page->sblock = sblock;
1624                         page->flags = flags;
1625                         page->generation = generation;
1626                         page->logical = logical;
1627                         page->have_csum = have_csum;
1628                         if (have_csum)
1629                                 memcpy(page->csum,
1630                                        original_sblock->pagev[0]->csum,
1631                                        sctx->csum_size);
1632
1633                         scrub_stripe_index_and_offset(logical,
1634                                                       bbio->map_type,
1635                                                       bbio->raid_map,
1636                                                       mapped_length,
1637                                                       bbio->num_stripes -
1638                                                       bbio->num_tgtdevs,
1639                                                       mirror_index,
1640                                                       &stripe_index,
1641                                                       &stripe_offset);
1642                         page->physical = bbio->stripes[stripe_index].physical +
1643                                          stripe_offset;
1644                         page->dev = bbio->stripes[stripe_index].dev;
1645
1646                         BUG_ON(page_index >= original_sblock->page_count);
1647                         page->physical_for_dev_replace =
1648                                 original_sblock->pagev[page_index]->
1649                                 physical_for_dev_replace;
1650                         /* for missing devices, dev->bdev is NULL */
1651                         page->mirror_num = mirror_index + 1;
1652                         sblock->page_count++;
1653                         page->page = alloc_page(GFP_NOFS);
1654                         if (!page->page)
1655                                 goto leave_nomem;
1656
1657                         scrub_get_recover(recover);
1658                         page->recover = recover;
1659                 }
1660                 scrub_put_recover(fs_info, recover);
1661                 length -= sublen;
1662                 logical += sublen;
1663                 page_index++;
1664         }
1665
1666         return 0;
1667 }
1668
1669 struct scrub_bio_ret {
1670         struct completion event;
1671         blk_status_t status;
1672 };
1673
1674 static void scrub_bio_wait_endio(struct bio *bio)
1675 {
1676         struct scrub_bio_ret *ret = bio->bi_private;
1677
1678         ret->status = bio->bi_status;
1679         complete(&ret->event);
1680 }
1681
1682 static inline int scrub_is_page_on_raid56(struct scrub_page *page)
1683 {
1684         return page->recover &&
1685                (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
1686 }
1687
1688 static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
1689                                         struct bio *bio,
1690                                         struct scrub_page *page)
1691 {
1692         struct scrub_bio_ret done;
1693         int ret;
1694
1695         init_completion(&done.event);
1696         done.status = 0;
1697         bio->bi_iter.bi_sector = page->logical >> 9;
1698         bio->bi_private = &done;
1699         bio->bi_end_io = scrub_bio_wait_endio;
1700
1701         ret = raid56_parity_recover(fs_info, bio, page->recover->bbio,
1702                                     page->recover->map_length,
1703                                     page->mirror_num, 0);
1704         if (ret)
1705                 return ret;
1706
1707         wait_for_completion_io(&done.event);
1708         if (done.status)
1709                 return -EIO;
1710
1711         return 0;
1712 }
1713
1714 /*
1715  * this function will check the on disk data for checksum errors, header
1716  * errors and read I/O errors. If any I/O errors happen, the exact pages
1717  * which are errored are marked as being bad. The goal is to enable scrub
1718  * to take those pages that are not errored from all the mirrors so that
1719  * the pages that are errored in the just handled mirror can be repaired.
1720  */
1721 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1722                                 struct scrub_block *sblock,
1723                                 int retry_failed_mirror)
1724 {
1725         int page_num;
1726
1727         sblock->no_io_error_seen = 1;
1728
1729         for (page_num = 0; page_num < sblock->page_count; page_num++) {
1730                 struct bio *bio;
1731                 struct scrub_page *page = sblock->pagev[page_num];
1732
1733                 if (page->dev->bdev == NULL) {
1734                         page->io_error = 1;
1735                         sblock->no_io_error_seen = 0;
1736                         continue;
1737                 }
1738
1739                 WARN_ON(!page->page);
1740                 bio = btrfs_io_bio_alloc(1);
1741                 bio_set_dev(bio, page->dev->bdev);
1742
1743                 bio_add_page(bio, page->page, PAGE_SIZE, 0);
1744                 if (!retry_failed_mirror && scrub_is_page_on_raid56(page)) {
1745                         if (scrub_submit_raid56_bio_wait(fs_info, bio, page)) {
1746                                 page->io_error = 1;
1747                                 sblock->no_io_error_seen = 0;
1748                         }
1749                 } else {
1750                         bio->bi_iter.bi_sector = page->physical >> 9;
1751                         bio_set_op_attrs(bio, REQ_OP_READ, 0);
1752
1753                         if (btrfsic_submit_bio_wait(bio)) {
1754                                 page->io_error = 1;
1755                                 sblock->no_io_error_seen = 0;
1756                         }
1757                 }
1758
1759                 bio_put(bio);
1760         }
1761
1762         if (sblock->no_io_error_seen)
1763                 scrub_recheck_block_checksum(sblock);
1764 }
1765
1766 static inline int scrub_check_fsid(u8 fsid[],
1767                                    struct scrub_page *spage)
1768 {
1769         struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
1770         int ret;
1771
1772         ret = memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1773         return !ret;
1774 }
1775
1776 static void scrub_recheck_block_checksum(struct scrub_block *sblock)
1777 {
1778         sblock->header_error = 0;
1779         sblock->checksum_error = 0;
1780         sblock->generation_error = 0;
1781
1782         if (sblock->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA)
1783                 scrub_checksum_data(sblock);
1784         else
1785                 scrub_checksum_tree_block(sblock);
1786 }
1787
1788 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1789                                              struct scrub_block *sblock_good)
1790 {
1791         int page_num;
1792         int ret = 0;
1793
1794         for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1795                 int ret_sub;
1796
1797                 ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1798                                                            sblock_good,
1799                                                            page_num, 1);
1800                 if (ret_sub)
1801                         ret = ret_sub;
1802         }
1803
1804         return ret;
1805 }
1806
1807 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1808                                             struct scrub_block *sblock_good,
1809                                             int page_num, int force_write)
1810 {
1811         struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1812         struct scrub_page *page_good = sblock_good->pagev[page_num];
1813         struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
1814
1815         BUG_ON(page_bad->page == NULL);
1816         BUG_ON(page_good->page == NULL);
1817         if (force_write || sblock_bad->header_error ||
1818             sblock_bad->checksum_error || page_bad->io_error) {
1819                 struct bio *bio;
1820                 int ret;
1821
1822                 if (!page_bad->dev->bdev) {
1823                         btrfs_warn_rl(fs_info,
1824                                 "scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
1825                         return -EIO;
1826                 }
1827
1828                 bio = btrfs_io_bio_alloc(1);
1829                 bio_set_dev(bio, page_bad->dev->bdev);
1830                 bio->bi_iter.bi_sector = page_bad->physical >> 9;
1831                 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1832
1833                 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1834                 if (PAGE_SIZE != ret) {
1835                         bio_put(bio);
1836                         return -EIO;
1837                 }
1838
1839                 if (btrfsic_submit_bio_wait(bio)) {
1840                         btrfs_dev_stat_inc_and_print(page_bad->dev,
1841                                 BTRFS_DEV_STAT_WRITE_ERRS);
1842                         btrfs_dev_replace_stats_inc(
1843                                 &fs_info->dev_replace.num_write_errors);
1844                         bio_put(bio);
1845                         return -EIO;
1846                 }
1847                 bio_put(bio);
1848         }
1849
1850         return 0;
1851 }
1852
1853 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1854 {
1855         struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
1856         int page_num;
1857
1858         /*
1859          * This block is used for the check of the parity on the source device,
1860          * so the data needn't be written into the destination device.
1861          */
1862         if (sblock->sparity)
1863                 return;
1864
1865         for (page_num = 0; page_num < sblock->page_count; page_num++) {
1866                 int ret;
1867
1868                 ret = scrub_write_page_to_dev_replace(sblock, page_num);
1869                 if (ret)
1870                         btrfs_dev_replace_stats_inc(
1871                                 &fs_info->dev_replace.num_write_errors);
1872         }
1873 }
1874
1875 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
1876                                            int page_num)
1877 {
1878         struct scrub_page *spage = sblock->pagev[page_num];
1879
1880         BUG_ON(spage->page == NULL);
1881         if (spage->io_error) {
1882                 void *mapped_buffer = kmap_atomic(spage->page);
1883
1884                 clear_page(mapped_buffer);
1885                 flush_dcache_page(spage->page);
1886                 kunmap_atomic(mapped_buffer);
1887         }
1888         return scrub_add_page_to_wr_bio(sblock->sctx, spage);
1889 }
1890
1891 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
1892                                     struct scrub_page *spage)
1893 {
1894         struct scrub_bio *sbio;
1895         int ret;
1896
1897         mutex_lock(&sctx->wr_lock);
1898 again:
1899         if (!sctx->wr_curr_bio) {
1900                 sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio),
1901                                               GFP_KERNEL);
1902                 if (!sctx->wr_curr_bio) {
1903                         mutex_unlock(&sctx->wr_lock);
1904                         return -ENOMEM;
1905                 }
1906                 sctx->wr_curr_bio->sctx = sctx;
1907                 sctx->wr_curr_bio->page_count = 0;
1908         }
1909         sbio = sctx->wr_curr_bio;
1910         if (sbio->page_count == 0) {
1911                 struct bio *bio;
1912
1913                 sbio->physical = spage->physical_for_dev_replace;
1914                 sbio->logical = spage->logical;
1915                 sbio->dev = sctx->wr_tgtdev;
1916                 bio = sbio->bio;
1917                 if (!bio) {
1918                         bio = btrfs_io_bio_alloc(sctx->pages_per_wr_bio);
1919                         sbio->bio = bio;
1920                 }
1921
1922                 bio->bi_private = sbio;
1923                 bio->bi_end_io = scrub_wr_bio_end_io;
1924                 bio_set_dev(bio, sbio->dev->bdev);
1925                 bio->bi_iter.bi_sector = sbio->physical >> 9;
1926                 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1927                 sbio->status = 0;
1928         } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1929                    spage->physical_for_dev_replace ||
1930                    sbio->logical + sbio->page_count * PAGE_SIZE !=
1931                    spage->logical) {
1932                 scrub_wr_submit(sctx);
1933                 goto again;
1934         }
1935
1936         ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1937         if (ret != PAGE_SIZE) {
1938                 if (sbio->page_count < 1) {
1939                         bio_put(sbio->bio);
1940                         sbio->bio = NULL;
1941                         mutex_unlock(&sctx->wr_lock);
1942                         return -EIO;
1943                 }
1944                 scrub_wr_submit(sctx);
1945                 goto again;
1946         }
1947
1948         sbio->pagev[sbio->page_count] = spage;
1949         scrub_page_get(spage);
1950         sbio->page_count++;
1951         if (sbio->page_count == sctx->pages_per_wr_bio)
1952                 scrub_wr_submit(sctx);
1953         mutex_unlock(&sctx->wr_lock);
1954
1955         return 0;
1956 }
1957
1958 static void scrub_wr_submit(struct scrub_ctx *sctx)
1959 {
1960         struct scrub_bio *sbio;
1961
1962         if (!sctx->wr_curr_bio)
1963                 return;
1964
1965         sbio = sctx->wr_curr_bio;
1966         sctx->wr_curr_bio = NULL;
1967         WARN_ON(!sbio->bio->bi_disk);
1968         scrub_pending_bio_inc(sctx);
1969         /* process all writes in a single worker thread. Then the block layer
1970          * orders the requests before sending them to the driver which
1971          * doubled the write performance on spinning disks when measured
1972          * with Linux 3.5 */
1973         btrfsic_submit_bio(sbio->bio);
1974 }
1975
1976 static void scrub_wr_bio_end_io(struct bio *bio)
1977 {
1978         struct scrub_bio *sbio = bio->bi_private;
1979         struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
1980
1981         sbio->status = bio->bi_status;
1982         sbio->bio = bio;
1983
1984         btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
1985                          scrub_wr_bio_end_io_worker, NULL, NULL);
1986         btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
1987 }
1988
1989 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
1990 {
1991         struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1992         struct scrub_ctx *sctx = sbio->sctx;
1993         int i;
1994
1995         WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
1996         if (sbio->status) {
1997                 struct btrfs_dev_replace *dev_replace =
1998                         &sbio->sctx->fs_info->dev_replace;
1999
2000                 for (i = 0; i < sbio->page_count; i++) {
2001                         struct scrub_page *spage = sbio->pagev[i];
2002
2003                         spage->io_error = 1;
2004                         btrfs_dev_replace_stats_inc(&dev_replace->
2005                                                     num_write_errors);
2006                 }
2007         }
2008
2009         for (i = 0; i < sbio->page_count; i++)
2010                 scrub_page_put(sbio->pagev[i]);
2011
2012         bio_put(sbio->bio);
2013         kfree(sbio);
2014         scrub_pending_bio_dec(sctx);
2015 }
2016
2017 static int scrub_checksum(struct scrub_block *sblock)
2018 {
2019         u64 flags;
2020         int ret;
2021
2022         /*
2023          * No need to initialize these stats currently,
2024          * because this function only use return value
2025          * instead of these stats value.
2026          *
2027          * Todo:
2028          * always use stats
2029          */
2030         sblock->header_error = 0;
2031         sblock->generation_error = 0;
2032         sblock->checksum_error = 0;
2033
2034         WARN_ON(sblock->page_count < 1);
2035         flags = sblock->pagev[0]->flags;
2036         ret = 0;
2037         if (flags & BTRFS_EXTENT_FLAG_DATA)
2038                 ret = scrub_checksum_data(sblock);
2039         else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2040                 ret = scrub_checksum_tree_block(sblock);
2041         else if (flags & BTRFS_EXTENT_FLAG_SUPER)
2042                 (void)scrub_checksum_super(sblock);
2043         else
2044                 WARN_ON(1);
2045         if (ret)
2046                 scrub_handle_errored_block(sblock);
2047
2048         return ret;
2049 }
2050
2051 static int scrub_checksum_data(struct scrub_block *sblock)
2052 {
2053         struct scrub_ctx *sctx = sblock->sctx;
2054         u8 csum[BTRFS_CSUM_SIZE];
2055         u8 *on_disk_csum;
2056         struct page *page;
2057         void *buffer;
2058         u32 crc = ~(u32)0;
2059         u64 len;
2060         int index;
2061
2062         BUG_ON(sblock->page_count < 1);
2063         if (!sblock->pagev[0]->have_csum)
2064                 return 0;
2065
2066         on_disk_csum = sblock->pagev[0]->csum;
2067         page = sblock->pagev[0]->page;
2068         buffer = kmap_atomic(page);
2069
2070         len = sctx->fs_info->sectorsize;
2071         index = 0;
2072         for (;;) {
2073                 u64 l = min_t(u64, len, PAGE_SIZE);
2074
2075                 crc = btrfs_csum_data(buffer, crc, l);
2076                 kunmap_atomic(buffer);
2077                 len -= l;
2078                 if (len == 0)
2079                         break;
2080                 index++;
2081                 BUG_ON(index >= sblock->page_count);
2082                 BUG_ON(!sblock->pagev[index]->page);
2083                 page = sblock->pagev[index]->page;
2084                 buffer = kmap_atomic(page);
2085         }
2086
2087         btrfs_csum_final(crc, csum);
2088         if (memcmp(csum, on_disk_csum, sctx->csum_size))
2089                 sblock->checksum_error = 1;
2090
2091         return sblock->checksum_error;
2092 }
2093
2094 static int scrub_checksum_tree_block(struct scrub_block *sblock)
2095 {
2096         struct scrub_ctx *sctx = sblock->sctx;
2097         struct btrfs_header *h;
2098         struct btrfs_fs_info *fs_info = sctx->fs_info;
2099         u8 calculated_csum[BTRFS_CSUM_SIZE];
2100         u8 on_disk_csum[BTRFS_CSUM_SIZE];
2101         struct page *page;
2102         void *mapped_buffer;
2103         u64 mapped_size;
2104         void *p;
2105         u32 crc = ~(u32)0;
2106         u64 len;
2107         int index;
2108
2109         BUG_ON(sblock->page_count < 1);
2110         page = sblock->pagev[0]->page;
2111         mapped_buffer = kmap_atomic(page);
2112         h = (struct btrfs_header *)mapped_buffer;
2113         memcpy(on_disk_csum, h->csum, sctx->csum_size);
2114
2115         /*
2116          * we don't use the getter functions here, as we
2117          * a) don't have an extent buffer and
2118          * b) the page is already kmapped
2119          */
2120         if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h))
2121                 sblock->header_error = 1;
2122
2123         if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h)) {
2124                 sblock->header_error = 1;
2125                 sblock->generation_error = 1;
2126         }
2127
2128         if (!scrub_check_fsid(h->fsid, sblock->pagev[0]))
2129                 sblock->header_error = 1;
2130
2131         if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
2132                    BTRFS_UUID_SIZE))
2133                 sblock->header_error = 1;
2134
2135         len = sctx->fs_info->nodesize - BTRFS_CSUM_SIZE;
2136         mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
2137         p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
2138         index = 0;
2139         for (;;) {
2140                 u64 l = min_t(u64, len, mapped_size);
2141
2142                 crc = btrfs_csum_data(p, crc, l);
2143                 kunmap_atomic(mapped_buffer);
2144                 len -= l;
2145                 if (len == 0)
2146                         break;
2147                 index++;
2148                 BUG_ON(index >= sblock->page_count);
2149                 BUG_ON(!sblock->pagev[index]->page);
2150                 page = sblock->pagev[index]->page;
2151                 mapped_buffer = kmap_atomic(page);
2152                 mapped_size = PAGE_SIZE;
2153                 p = mapped_buffer;
2154         }
2155
2156         btrfs_csum_final(crc, calculated_csum);
2157         if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
2158                 sblock->checksum_error = 1;
2159
2160         return sblock->header_error || sblock->checksum_error;
2161 }
2162
2163 static int scrub_checksum_super(struct scrub_block *sblock)
2164 {
2165         struct btrfs_super_block *s;
2166         struct scrub_ctx *sctx = sblock->sctx;
2167         u8 calculated_csum[BTRFS_CSUM_SIZE];
2168         u8 on_disk_csum[BTRFS_CSUM_SIZE];
2169         struct page *page;
2170         void *mapped_buffer;
2171         u64 mapped_size;
2172         void *p;
2173         u32 crc = ~(u32)0;
2174         int fail_gen = 0;
2175         int fail_cor = 0;
2176         u64 len;
2177         int index;
2178
2179         BUG_ON(sblock->page_count < 1);
2180         page = sblock->pagev[0]->page;
2181         mapped_buffer = kmap_atomic(page);
2182         s = (struct btrfs_super_block *)mapped_buffer;
2183         memcpy(on_disk_csum, s->csum, sctx->csum_size);
2184
2185         if (sblock->pagev[0]->logical != btrfs_super_bytenr(s))
2186                 ++fail_cor;
2187
2188         if (sblock->pagev[0]->generation != btrfs_super_generation(s))
2189                 ++fail_gen;
2190
2191         if (!scrub_check_fsid(s->fsid, sblock->pagev[0]))
2192                 ++fail_cor;
2193
2194         len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
2195         mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
2196         p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
2197         index = 0;
2198         for (;;) {
2199                 u64 l = min_t(u64, len, mapped_size);
2200
2201                 crc = btrfs_csum_data(p, crc, l);
2202                 kunmap_atomic(mapped_buffer);
2203                 len -= l;
2204                 if (len == 0)
2205                         break;
2206                 index++;
2207                 BUG_ON(index >= sblock->page_count);
2208                 BUG_ON(!sblock->pagev[index]->page);
2209                 page = sblock->pagev[index]->page;
2210                 mapped_buffer = kmap_atomic(page);
2211                 mapped_size = PAGE_SIZE;
2212                 p = mapped_buffer;
2213         }
2214
2215         btrfs_csum_final(crc, calculated_csum);
2216         if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
2217                 ++fail_cor;
2218
2219         if (fail_cor + fail_gen) {
2220                 /*
2221                  * if we find an error in a super block, we just report it.
2222                  * They will get written with the next transaction commit
2223                  * anyway
2224                  */
2225                 spin_lock(&sctx->stat_lock);
2226                 ++sctx->stat.super_errors;
2227                 spin_unlock(&sctx->stat_lock);
2228                 if (fail_cor)
2229                         btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
2230                                 BTRFS_DEV_STAT_CORRUPTION_ERRS);
2231                 else
2232                         btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
2233                                 BTRFS_DEV_STAT_GENERATION_ERRS);
2234         }
2235
2236         return fail_cor + fail_gen;
2237 }
2238
2239 static void scrub_block_get(struct scrub_block *sblock)
2240 {
2241         refcount_inc(&sblock->refs);
2242 }
2243
2244 static void scrub_block_put(struct scrub_block *sblock)
2245 {
2246         if (refcount_dec_and_test(&sblock->refs)) {
2247                 int i;
2248
2249                 if (sblock->sparity)
2250                         scrub_parity_put(sblock->sparity);
2251
2252                 for (i = 0; i < sblock->page_count; i++)
2253                         scrub_page_put(sblock->pagev[i]);
2254                 kfree(sblock);
2255         }
2256 }
2257
2258 static void scrub_page_get(struct scrub_page *spage)
2259 {
2260         atomic_inc(&spage->refs);
2261 }
2262
2263 static void scrub_page_put(struct scrub_page *spage)
2264 {
2265         if (atomic_dec_and_test(&spage->refs)) {
2266                 if (spage->page)
2267                         __free_page(spage->page);
2268                 kfree(spage);
2269         }
2270 }
2271
2272 static void scrub_submit(struct scrub_ctx *sctx)
2273 {
2274         struct scrub_bio *sbio;
2275
2276         if (sctx->curr == -1)
2277                 return;
2278
2279         sbio = sctx->bios[sctx->curr];
2280         sctx->curr = -1;
2281         scrub_pending_bio_inc(sctx);
2282         btrfsic_submit_bio(sbio->bio);
2283 }
2284
2285 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
2286                                     struct scrub_page *spage)
2287 {
2288         struct scrub_block *sblock = spage->sblock;
2289         struct scrub_bio *sbio;
2290         int ret;
2291
2292 again:
2293         /*
2294          * grab a fresh bio or wait for one to become available
2295          */
2296         while (sctx->curr == -1) {
2297                 spin_lock(&sctx->list_lock);
2298                 sctx->curr = sctx->first_free;
2299                 if (sctx->curr != -1) {
2300                         sctx->first_free = sctx->bios[sctx->curr]->next_free;
2301                         sctx->bios[sctx->curr]->next_free = -1;
2302                         sctx->bios[sctx->curr]->page_count = 0;
2303                         spin_unlock(&sctx->list_lock);
2304                 } else {
2305                         spin_unlock(&sctx->list_lock);
2306                         wait_event(sctx->list_wait, sctx->first_free != -1);
2307                 }
2308         }
2309         sbio = sctx->bios[sctx->curr];
2310         if (sbio->page_count == 0) {
2311                 struct bio *bio;
2312
2313                 sbio->physical = spage->physical;
2314                 sbio->logical = spage->logical;
2315                 sbio->dev = spage->dev;
2316                 bio = sbio->bio;
2317                 if (!bio) {
2318                         bio = btrfs_io_bio_alloc(sctx->pages_per_rd_bio);
2319                         sbio->bio = bio;
2320                 }
2321
2322                 bio->bi_private = sbio;
2323                 bio->bi_end_io = scrub_bio_end_io;
2324                 bio_set_dev(bio, sbio->dev->bdev);
2325                 bio->bi_iter.bi_sector = sbio->physical >> 9;
2326                 bio_set_op_attrs(bio, REQ_OP_READ, 0);
2327                 sbio->status = 0;
2328         } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
2329                    spage->physical ||
2330                    sbio->logical + sbio->page_count * PAGE_SIZE !=
2331                    spage->logical ||
2332                    sbio->dev != spage->dev) {
2333                 scrub_submit(sctx);
2334                 goto again;
2335         }
2336
2337         sbio->pagev[sbio->page_count] = spage;
2338         ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
2339         if (ret != PAGE_SIZE) {
2340                 if (sbio->page_count < 1) {
2341                         bio_put(sbio->bio);
2342                         sbio->bio = NULL;
2343                         return -EIO;
2344                 }
2345                 scrub_submit(sctx);
2346                 goto again;
2347         }
2348
2349         scrub_block_get(sblock); /* one for the page added to the bio */
2350         atomic_inc(&sblock->outstanding_pages);
2351         sbio->page_count++;
2352         if (sbio->page_count == sctx->pages_per_rd_bio)
2353                 scrub_submit(sctx);
2354
2355         return 0;
2356 }
2357
2358 static void scrub_missing_raid56_end_io(struct bio *bio)
2359 {
2360         struct scrub_block *sblock = bio->bi_private;
2361         struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
2362
2363         if (bio->bi_status)
2364                 sblock->no_io_error_seen = 0;
2365
2366         bio_put(bio);
2367
2368         btrfs_queue_work(fs_info->scrub_workers, &sblock->work);
2369 }
2370
2371 static void scrub_missing_raid56_worker(struct btrfs_work *work)
2372 {
2373         struct scrub_block *sblock = container_of(work, struct scrub_block, work);
2374         struct scrub_ctx *sctx = sblock->sctx;
2375         struct btrfs_fs_info *fs_info = sctx->fs_info;
2376         u64 logical;
2377         struct btrfs_device *dev;
2378
2379         logical = sblock->pagev[0]->logical;
2380         dev = sblock->pagev[0]->dev;
2381
2382         if (sblock->no_io_error_seen)
2383                 scrub_recheck_block_checksum(sblock);
2384
2385         if (!sblock->no_io_error_seen) {
2386                 spin_lock(&sctx->stat_lock);
2387                 sctx->stat.read_errors++;
2388                 spin_unlock(&sctx->stat_lock);
2389                 btrfs_err_rl_in_rcu(fs_info,
2390                         "IO error rebuilding logical %llu for dev %s",
2391                         logical, rcu_str_deref(dev->name));
2392         } else if (sblock->header_error || sblock->checksum_error) {
2393                 spin_lock(&sctx->stat_lock);
2394                 sctx->stat.uncorrectable_errors++;
2395                 spin_unlock(&sctx->stat_lock);
2396                 btrfs_err_rl_in_rcu(fs_info,
2397                         "failed to rebuild valid logical %llu for dev %s",
2398                         logical, rcu_str_deref(dev->name));
2399         } else {
2400                 scrub_write_block_to_dev_replace(sblock);
2401         }
2402
2403         scrub_block_put(sblock);
2404
2405         if (sctx->is_dev_replace && sctx->flush_all_writes) {
2406                 mutex_lock(&sctx->wr_lock);
2407                 scrub_wr_submit(sctx);
2408                 mutex_unlock(&sctx->wr_lock);
2409         }
2410
2411         scrub_pending_bio_dec(sctx);
2412 }
2413
2414 static void scrub_missing_raid56_pages(struct scrub_block *sblock)
2415 {
2416         struct scrub_ctx *sctx = sblock->sctx;
2417         struct btrfs_fs_info *fs_info = sctx->fs_info;
2418         u64 length = sblock->page_count * PAGE_SIZE;
2419         u64 logical = sblock->pagev[0]->logical;
2420         struct btrfs_bio *bbio = NULL;
2421         struct bio *bio;
2422         struct btrfs_raid_bio *rbio;
2423         int ret;
2424         int i;
2425
2426         btrfs_bio_counter_inc_blocked(fs_info);
2427         ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
2428                         &length, &bbio);
2429         if (ret || !bbio || !bbio->raid_map)
2430                 goto bbio_out;
2431
2432         if (WARN_ON(!sctx->is_dev_replace ||
2433                     !(bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2434                 /*
2435                  * We shouldn't be scrubbing a missing device. Even for dev
2436                  * replace, we should only get here for RAID 5/6. We either
2437                  * managed to mount something with no mirrors remaining or
2438                  * there's a bug in scrub_remap_extent()/btrfs_map_block().
2439                  */
2440                 goto bbio_out;
2441         }
2442
2443         bio = btrfs_io_bio_alloc(0);
2444         bio->bi_iter.bi_sector = logical >> 9;
2445         bio->bi_private = sblock;
2446         bio->bi_end_io = scrub_missing_raid56_end_io;
2447
2448         rbio = raid56_alloc_missing_rbio(fs_info, bio, bbio, length);
2449         if (!rbio)
2450                 goto rbio_out;
2451
2452         for (i = 0; i < sblock->page_count; i++) {
2453                 struct scrub_page *spage = sblock->pagev[i];
2454
2455                 raid56_add_scrub_pages(rbio, spage->page, spage->logical);
2456         }
2457
2458         btrfs_init_work(&sblock->work, btrfs_scrub_helper,
2459                         scrub_missing_raid56_worker, NULL, NULL);
2460         scrub_block_get(sblock);
2461         scrub_pending_bio_inc(sctx);
2462         raid56_submit_missing_rbio(rbio);
2463         return;
2464
2465 rbio_out:
2466         bio_put(bio);
2467 bbio_out:
2468         btrfs_bio_counter_dec(fs_info);
2469         btrfs_put_bbio(bbio);
2470         spin_lock(&sctx->stat_lock);
2471         sctx->stat.malloc_errors++;
2472         spin_unlock(&sctx->stat_lock);
2473 }
2474
2475 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
2476                        u64 physical, struct btrfs_device *dev, u64 flags,
2477                        u64 gen, int mirror_num, u8 *csum, int force,
2478                        u64 physical_for_dev_replace)
2479 {
2480         struct scrub_block *sblock;
2481         int index;
2482
2483         sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2484         if (!sblock) {
2485                 spin_lock(&sctx->stat_lock);
2486                 sctx->stat.malloc_errors++;
2487                 spin_unlock(&sctx->stat_lock);
2488                 return -ENOMEM;
2489         }
2490
2491         /* one ref inside this function, plus one for each page added to
2492          * a bio later on */
2493         refcount_set(&sblock->refs, 1);
2494         sblock->sctx = sctx;
2495         sblock->no_io_error_seen = 1;
2496
2497         for (index = 0; len > 0; index++) {
2498                 struct scrub_page *spage;
2499                 u64 l = min_t(u64, len, PAGE_SIZE);
2500
2501                 spage = kzalloc(sizeof(*spage), GFP_KERNEL);
2502                 if (!spage) {
2503 leave_nomem:
2504                         spin_lock(&sctx->stat_lock);
2505                         sctx->stat.malloc_errors++;
2506                         spin_unlock(&sctx->stat_lock);
2507                         scrub_block_put(sblock);
2508                         return -ENOMEM;
2509                 }
2510                 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2511                 scrub_page_get(spage);
2512                 sblock->pagev[index] = spage;
2513                 spage->sblock = sblock;
2514                 spage->dev = dev;
2515                 spage->flags = flags;
2516                 spage->generation = gen;
2517                 spage->logical = logical;
2518                 spage->physical = physical;
2519                 spage->physical_for_dev_replace = physical_for_dev_replace;
2520                 spage->mirror_num = mirror_num;
2521                 if (csum) {
2522                         spage->have_csum = 1;
2523                         memcpy(spage->csum, csum, sctx->csum_size);
2524                 } else {
2525                         spage->have_csum = 0;
2526                 }
2527                 sblock->page_count++;
2528                 spage->page = alloc_page(GFP_KERNEL);
2529                 if (!spage->page)
2530                         goto leave_nomem;
2531                 len -= l;
2532                 logical += l;
2533                 physical += l;
2534                 physical_for_dev_replace += l;
2535         }
2536
2537         WARN_ON(sblock->page_count == 0);
2538         if (dev->missing) {
2539                 /*
2540                  * This case should only be hit for RAID 5/6 device replace. See
2541                  * the comment in scrub_missing_raid56_pages() for details.
2542                  */
2543                 scrub_missing_raid56_pages(sblock);
2544         } else {
2545                 for (index = 0; index < sblock->page_count; index++) {
2546                         struct scrub_page *spage = sblock->pagev[index];
2547                         int ret;
2548
2549                         ret = scrub_add_page_to_rd_bio(sctx, spage);
2550                         if (ret) {
2551                                 scrub_block_put(sblock);
2552                                 return ret;
2553                         }
2554                 }
2555
2556                 if (force)
2557                         scrub_submit(sctx);
2558         }
2559
2560         /* last one frees, either here or in bio completion for last page */
2561         scrub_block_put(sblock);
2562         return 0;
2563 }
2564
2565 static void scrub_bio_end_io(struct bio *bio)
2566 {
2567         struct scrub_bio *sbio = bio->bi_private;
2568         struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
2569
2570         sbio->status = bio->bi_status;
2571         sbio->bio = bio;
2572
2573         btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
2574 }
2575
2576 static void scrub_bio_end_io_worker(struct btrfs_work *work)
2577 {
2578         struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2579         struct scrub_ctx *sctx = sbio->sctx;
2580         int i;
2581
2582         BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
2583         if (sbio->status) {
2584                 for (i = 0; i < sbio->page_count; i++) {
2585                         struct scrub_page *spage = sbio->pagev[i];
2586
2587                         spage->io_error = 1;
2588                         spage->sblock->no_io_error_seen = 0;
2589                 }
2590         }
2591
2592         /* now complete the scrub_block items that have all pages completed */
2593         for (i = 0; i < sbio->page_count; i++) {
2594                 struct scrub_page *spage = sbio->pagev[i];
2595                 struct scrub_block *sblock = spage->sblock;
2596
2597                 if (atomic_dec_and_test(&sblock->outstanding_pages))
2598                         scrub_block_complete(sblock);
2599                 scrub_block_put(sblock);
2600         }
2601
2602         bio_put(sbio->bio);
2603         sbio->bio = NULL;
2604         spin_lock(&sctx->list_lock);
2605         sbio->next_free = sctx->first_free;
2606         sctx->first_free = sbio->index;
2607         spin_unlock(&sctx->list_lock);
2608
2609         if (sctx->is_dev_replace && sctx->flush_all_writes) {
2610                 mutex_lock(&sctx->wr_lock);
2611                 scrub_wr_submit(sctx);
2612                 mutex_unlock(&sctx->wr_lock);
2613         }
2614
2615         scrub_pending_bio_dec(sctx);
2616 }
2617
2618 static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
2619                                        unsigned long *bitmap,
2620                                        u64 start, u64 len)
2621 {
2622         u64 offset;
2623         u64 nsectors64;
2624         u32 nsectors;
2625         int sectorsize = sparity->sctx->fs_info->sectorsize;
2626
2627         if (len >= sparity->stripe_len) {
2628                 bitmap_set(bitmap, 0, sparity->nsectors);
2629                 return;
2630         }
2631
2632         start -= sparity->logic_start;
2633         start = div64_u64_rem(start, sparity->stripe_len, &offset);
2634         offset = div_u64(offset, sectorsize);
2635         nsectors64 = div_u64(len, sectorsize);
2636
2637         ASSERT(nsectors64 < UINT_MAX);
2638         nsectors = (u32)nsectors64;
2639
2640         if (offset + nsectors <= sparity->nsectors) {
2641                 bitmap_set(bitmap, offset, nsectors);
2642                 return;
2643         }
2644
2645         bitmap_set(bitmap, offset, sparity->nsectors - offset);
2646         bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
2647 }
2648
2649 static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
2650                                                    u64 start, u64 len)
2651 {
2652         __scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
2653 }
2654
2655 static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
2656                                                   u64 start, u64 len)
2657 {
2658         __scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
2659 }
2660
2661 static void scrub_block_complete(struct scrub_block *sblock)
2662 {
2663         int corrupted = 0;
2664
2665         if (!sblock->no_io_error_seen) {
2666                 corrupted = 1;
2667                 scrub_handle_errored_block(sblock);
2668         } else {
2669                 /*
2670                  * if has checksum error, write via repair mechanism in
2671                  * dev replace case, otherwise write here in dev replace
2672                  * case.
2673                  */
2674                 corrupted = scrub_checksum(sblock);
2675                 if (!corrupted && sblock->sctx->is_dev_replace)
2676                         scrub_write_block_to_dev_replace(sblock);
2677         }
2678
2679         if (sblock->sparity && corrupted && !sblock->data_corrected) {
2680                 u64 start = sblock->pagev[0]->logical;
2681                 u64 end = sblock->pagev[sblock->page_count - 1]->logical +
2682                           PAGE_SIZE;
2683
2684                 scrub_parity_mark_sectors_error(sblock->sparity,
2685                                                 start, end - start);
2686         }
2687 }
2688
2689 static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
2690 {
2691         struct btrfs_ordered_sum *sum = NULL;
2692         unsigned long index;
2693         unsigned long num_sectors;
2694
2695         while (!list_empty(&sctx->csum_list)) {
2696                 sum = list_first_entry(&sctx->csum_list,
2697                                        struct btrfs_ordered_sum, list);
2698                 if (sum->bytenr > logical)
2699                         return 0;
2700                 if (sum->bytenr + sum->len > logical)
2701                         break;
2702
2703                 ++sctx->stat.csum_discards;
2704                 list_del(&sum->list);
2705                 kfree(sum);
2706                 sum = NULL;
2707         }
2708         if (!sum)
2709                 return 0;
2710
2711         index = div_u64(logical - sum->bytenr, sctx->fs_info->sectorsize);
2712         ASSERT(index < UINT_MAX);
2713
2714         num_sectors = sum->len / sctx->fs_info->sectorsize;
2715         memcpy(csum, sum->sums + index, sctx->csum_size);
2716         if (index == num_sectors - 1) {
2717                 list_del(&sum->list);
2718                 kfree(sum);
2719         }
2720         return 1;
2721 }
2722
2723 /* scrub extent tries to collect up to 64 kB for each bio */
2724 static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
2725                         u64 physical, struct btrfs_device *dev, u64 flags,
2726                         u64 gen, int mirror_num, u64 physical_for_dev_replace)
2727 {
2728         int ret;
2729         u8 csum[BTRFS_CSUM_SIZE];
2730         u32 blocksize;
2731
2732         if (flags & BTRFS_EXTENT_FLAG_DATA) {
2733                 blocksize = sctx->fs_info->sectorsize;
2734                 spin_lock(&sctx->stat_lock);
2735                 sctx->stat.data_extents_scrubbed++;
2736                 sctx->stat.data_bytes_scrubbed += len;
2737                 spin_unlock(&sctx->stat_lock);
2738         } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2739                 blocksize = sctx->fs_info->nodesize;
2740                 spin_lock(&sctx->stat_lock);
2741                 sctx->stat.tree_extents_scrubbed++;
2742                 sctx->stat.tree_bytes_scrubbed += len;
2743                 spin_unlock(&sctx->stat_lock);
2744         } else {
2745                 blocksize = sctx->fs_info->sectorsize;
2746                 WARN_ON(1);
2747         }
2748
2749         while (len) {
2750                 u64 l = min_t(u64, len, blocksize);
2751                 int have_csum = 0;
2752
2753                 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2754                         /* push csums to sbio */
2755                         have_csum = scrub_find_csum(sctx, logical, csum);
2756                         if (have_csum == 0)
2757                                 ++sctx->stat.no_csum;
2758                         if (sctx->is_dev_replace && !have_csum) {
2759                                 ret = copy_nocow_pages(sctx, logical, l,
2760                                                        mirror_num,
2761                                                       physical_for_dev_replace);
2762                                 goto behind_scrub_pages;
2763                         }
2764                 }
2765                 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
2766                                   mirror_num, have_csum ? csum : NULL, 0,
2767                                   physical_for_dev_replace);
2768 behind_scrub_pages:
2769                 if (ret)
2770                         return ret;
2771                 len -= l;
2772                 logical += l;
2773                 physical += l;
2774                 physical_for_dev_replace += l;
2775         }
2776         return 0;
2777 }
2778
2779 static int scrub_pages_for_parity(struct scrub_parity *sparity,
2780                                   u64 logical, u64 len,
2781                                   u64 physical, struct btrfs_device *dev,
2782                                   u64 flags, u64 gen, int mirror_num, u8 *csum)
2783 {
2784         struct scrub_ctx *sctx = sparity->sctx;
2785         struct scrub_block *sblock;
2786         int index;
2787
2788         sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2789         if (!sblock) {
2790                 spin_lock(&sctx->stat_lock);
2791                 sctx->stat.malloc_errors++;
2792                 spin_unlock(&sctx->stat_lock);
2793                 return -ENOMEM;
2794         }
2795
2796         /* one ref inside this function, plus one for each page added to
2797          * a bio later on */
2798         refcount_set(&sblock->refs, 1);
2799         sblock->sctx = sctx;
2800         sblock->no_io_error_seen = 1;
2801         sblock->sparity = sparity;
2802         scrub_parity_get(sparity);
2803
2804         for (index = 0; len > 0; index++) {
2805                 struct scrub_page *spage;
2806                 u64 l = min_t(u64, len, PAGE_SIZE);
2807
2808                 spage = kzalloc(sizeof(*spage), GFP_KERNEL);
2809                 if (!spage) {
2810 leave_nomem:
2811                         spin_lock(&sctx->stat_lock);
2812                         sctx->stat.malloc_errors++;
2813                         spin_unlock(&sctx->stat_lock);
2814                         scrub_block_put(sblock);
2815                         return -ENOMEM;
2816                 }
2817                 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2818                 /* For scrub block */
2819                 scrub_page_get(spage);
2820                 sblock->pagev[index] = spage;
2821                 /* For scrub parity */
2822                 scrub_page_get(spage);
2823                 list_add_tail(&spage->list, &sparity->spages);
2824                 spage->sblock = sblock;
2825                 spage->dev = dev;
2826                 spage->flags = flags;
2827                 spage->generation = gen;
2828                 spage->logical = logical;
2829                 spage->physical = physical;
2830                 spage->mirror_num = mirror_num;
2831                 if (csum) {
2832                         spage->have_csum = 1;
2833                         memcpy(spage->csum, csum, sctx->csum_size);
2834                 } else {
2835                         spage->have_csum = 0;
2836                 }
2837                 sblock->page_count++;
2838                 spage->page = alloc_page(GFP_KERNEL);
2839                 if (!spage->page)
2840                         goto leave_nomem;
2841                 len -= l;
2842                 logical += l;
2843                 physical += l;
2844         }
2845
2846         WARN_ON(sblock->page_count == 0);
2847         for (index = 0; index < sblock->page_count; index++) {
2848                 struct scrub_page *spage = sblock->pagev[index];
2849                 int ret;
2850
2851                 ret = scrub_add_page_to_rd_bio(sctx, spage);
2852                 if (ret) {
2853                         scrub_block_put(sblock);
2854                         return ret;
2855                 }
2856         }
2857
2858         /* last one frees, either here or in bio completion for last page */
2859         scrub_block_put(sblock);
2860         return 0;
2861 }
2862
2863 static int scrub_extent_for_parity(struct scrub_parity *sparity,
2864                                    u64 logical, u64 len,
2865                                    u64 physical, struct btrfs_device *dev,
2866                                    u64 flags, u64 gen, int mirror_num)
2867 {
2868         struct scrub_ctx *sctx = sparity->sctx;
2869         int ret;
2870         u8 csum[BTRFS_CSUM_SIZE];
2871         u32 blocksize;
2872
2873         if (dev->missing) {
2874                 scrub_parity_mark_sectors_error(sparity, logical, len);
2875                 return 0;
2876         }
2877
2878         if (flags & BTRFS_EXTENT_FLAG_DATA) {
2879                 blocksize = sctx->fs_info->sectorsize;
2880         } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2881                 blocksize = sctx->fs_info->nodesize;
2882         } else {
2883                 blocksize = sctx->fs_info->sectorsize;
2884                 WARN_ON(1);
2885         }
2886
2887         while (len) {
2888                 u64 l = min_t(u64, len, blocksize);
2889                 int have_csum = 0;
2890
2891                 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2892                         /* push csums to sbio */
2893                         have_csum = scrub_find_csum(sctx, logical, csum);
2894                         if (have_csum == 0)
2895                                 goto skip;
2896                 }
2897                 ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
2898                                              flags, gen, mirror_num,
2899                                              have_csum ? csum : NULL);
2900                 if (ret)
2901                         return ret;
2902 skip:
2903                 len -= l;
2904                 logical += l;
2905                 physical += l;
2906         }
2907         return 0;
2908 }
2909
2910 /*
2911  * Given a physical address, this will calculate it's
2912  * logical offset. if this is a parity stripe, it will return
2913  * the most left data stripe's logical offset.
2914  *
2915  * return 0 if it is a data stripe, 1 means parity stripe.
2916  */
2917 static int get_raid56_logic_offset(u64 physical, int num,
2918                                    struct map_lookup *map, u64 *offset,
2919                                    u64 *stripe_start)
2920 {
2921         int i;
2922         int j = 0;
2923         u64 stripe_nr;
2924         u64 last_offset;
2925         u32 stripe_index;
2926         u32 rot;
2927
2928         last_offset = (physical - map->stripes[num].physical) *
2929                       nr_data_stripes(map);
2930         if (stripe_start)
2931                 *stripe_start = last_offset;
2932
2933         *offset = last_offset;
2934         for (i = 0; i < nr_data_stripes(map); i++) {
2935                 *offset = last_offset + i * map->stripe_len;
2936
2937                 stripe_nr = div64_u64(*offset, map->stripe_len);
2938                 stripe_nr = div_u64(stripe_nr, nr_data_stripes(map));
2939
2940                 /* Work out the disk rotation on this stripe-set */
2941                 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
2942                 /* calculate which stripe this data locates */
2943                 rot += i;
2944                 stripe_index = rot % map->num_stripes;
2945                 if (stripe_index == num)
2946                         return 0;
2947                 if (stripe_index < num)
2948                         j++;
2949         }
2950         *offset = last_offset + j * map->stripe_len;
2951         return 1;
2952 }
2953
2954 static void scrub_free_parity(struct scrub_parity *sparity)
2955 {
2956         struct scrub_ctx *sctx = sparity->sctx;
2957         struct scrub_page *curr, *next;
2958         int nbits;
2959
2960         nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
2961         if (nbits) {
2962                 spin_lock(&sctx->stat_lock);
2963                 sctx->stat.read_errors += nbits;
2964                 sctx->stat.uncorrectable_errors += nbits;
2965                 spin_unlock(&sctx->stat_lock);
2966         }
2967
2968         list_for_each_entry_safe(curr, next, &sparity->spages, list) {
2969                 list_del_init(&curr->list);
2970                 scrub_page_put(curr);
2971         }
2972
2973         kfree(sparity);
2974 }
2975
2976 static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
2977 {
2978         struct scrub_parity *sparity = container_of(work, struct scrub_parity,
2979                                                     work);
2980         struct scrub_ctx *sctx = sparity->sctx;
2981
2982         scrub_free_parity(sparity);
2983         scrub_pending_bio_dec(sctx);
2984 }
2985
2986 static void scrub_parity_bio_endio(struct bio *bio)
2987 {
2988         struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
2989         struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
2990
2991         if (bio->bi_status)
2992                 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2993                           sparity->nsectors);
2994
2995         bio_put(bio);
2996
2997         btrfs_init_work(&sparity->work, btrfs_scrubparity_helper,
2998                         scrub_parity_bio_endio_worker, NULL, NULL);
2999         btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work);
3000 }
3001
3002 static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
3003 {
3004         struct scrub_ctx *sctx = sparity->sctx;
3005         struct btrfs_fs_info *fs_info = sctx->fs_info;
3006         struct bio *bio;
3007         struct btrfs_raid_bio *rbio;
3008         struct btrfs_bio *bbio = NULL;
3009         u64 length;
3010         int ret;
3011
3012         if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
3013                            sparity->nsectors))
3014                 goto out;
3015
3016         length = sparity->logic_end - sparity->logic_start;
3017
3018         btrfs_bio_counter_inc_blocked(fs_info);
3019         ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start,
3020                                &length, &bbio);
3021         if (ret || !bbio || !bbio->raid_map)
3022                 goto bbio_out;
3023
3024         bio = btrfs_io_bio_alloc(0);
3025         bio->bi_iter.bi_sector = sparity->logic_start >> 9;
3026         bio->bi_private = sparity;
3027         bio->bi_end_io = scrub_parity_bio_endio;
3028
3029         rbio = raid56_parity_alloc_scrub_rbio(fs_info, bio, bbio,
3030                                               length, sparity->scrub_dev,
3031                                               sparity->dbitmap,
3032                                               sparity->nsectors);
3033         if (!rbio)
3034                 goto rbio_out;
3035
3036         scrub_pending_bio_inc(sctx);
3037         raid56_parity_submit_scrub_rbio(rbio);
3038         return;
3039
3040 rbio_out:
3041         bio_put(bio);
3042 bbio_out:
3043         btrfs_bio_counter_dec(fs_info);
3044         btrfs_put_bbio(bbio);
3045         bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
3046                   sparity->nsectors);
3047         spin_lock(&sctx->stat_lock);
3048         sctx->stat.malloc_errors++;
3049         spin_unlock(&sctx->stat_lock);
3050 out:
3051         scrub_free_parity(sparity);
3052 }
3053
3054 static inline int scrub_calc_parity_bitmap_len(int nsectors)
3055 {
3056         return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * sizeof(long);
3057 }
3058
3059 static void scrub_parity_get(struct scrub_parity *sparity)
3060 {
3061         refcount_inc(&sparity->refs);
3062 }
3063
3064 static void scrub_parity_put(struct scrub_parity *sparity)
3065 {
3066         if (!refcount_dec_and_test(&sparity->refs))
3067                 return;
3068
3069         scrub_parity_check_and_repair(sparity);
3070 }
3071
3072 static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
3073                                                   struct map_lookup *map,
3074                                                   struct btrfs_device *sdev,
3075                                                   struct btrfs_path *path,
3076                                                   u64 logic_start,
3077                                                   u64 logic_end)
3078 {
3079         struct btrfs_fs_info *fs_info = sctx->fs_info;
3080         struct btrfs_root *root = fs_info->extent_root;
3081         struct btrfs_root *csum_root = fs_info->csum_root;
3082         struct btrfs_extent_item *extent;
3083         struct btrfs_bio *bbio = NULL;
3084         u64 flags;
3085         int ret;
3086         int slot;
3087         struct extent_buffer *l;
3088         struct btrfs_key key;
3089         u64 generation;
3090         u64 extent_logical;
3091         u64 extent_physical;
3092         u64 extent_len;
3093         u64 mapped_length;
3094         struct btrfs_device *extent_dev;
3095         struct scrub_parity *sparity;
3096         int nsectors;
3097         int bitmap_len;
3098         int extent_mirror_num;
3099         int stop_loop = 0;
3100
3101         nsectors = div_u64(map->stripe_len, fs_info->sectorsize);
3102         bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
3103         sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
3104                           GFP_NOFS);
3105         if (!sparity) {
3106                 spin_lock(&sctx->stat_lock);
3107                 sctx->stat.malloc_errors++;
3108                 spin_unlock(&sctx->stat_lock);
3109                 return -ENOMEM;
3110         }
3111
3112         sparity->stripe_len = map->stripe_len;
3113         sparity->nsectors = nsectors;
3114         sparity->sctx = sctx;
3115         sparity->scrub_dev = sdev;
3116         sparity->logic_start = logic_start;
3117         sparity->logic_end = logic_end;
3118         refcount_set(&sparity->refs, 1);
3119         INIT_LIST_HEAD(&sparity->spages);
3120         sparity->dbitmap = sparity->bitmap;
3121         sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;
3122
3123         ret = 0;
3124         while (logic_start < logic_end) {
3125                 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
3126                         key.type = BTRFS_METADATA_ITEM_KEY;
3127                 else
3128                         key.type = BTRFS_EXTENT_ITEM_KEY;
3129                 key.objectid = logic_start;
3130                 key.offset = (u64)-1;
3131
3132                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3133                 if (ret < 0)
3134                         goto out;
3135
3136                 if (ret > 0) {
3137                         ret = btrfs_previous_extent_item(root, path, 0);
3138                         if (ret < 0)
3139                                 goto out;
3140                         if (ret > 0) {
3141                                 btrfs_release_path(path);
3142                                 ret = btrfs_search_slot(NULL, root, &key,
3143                                                         path, 0, 0);
3144                                 if (ret < 0)
3145                                         goto out;
3146                         }
3147                 }
3148
3149                 stop_loop = 0;
3150                 while (1) {
3151                         u64 bytes;
3152
3153                         l = path->nodes[0];
3154                         slot = path->slots[0];
3155                         if (slot >= btrfs_header_nritems(l)) {
3156                                 ret = btrfs_next_leaf(root, path);
3157                                 if (ret == 0)
3158                                         continue;
3159                                 if (ret < 0)
3160                                         goto out;
3161
3162                                 stop_loop = 1;
3163                                 break;
3164                         }
3165                         btrfs_item_key_to_cpu(l, &key, slot);
3166
3167                         if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3168                             key.type != BTRFS_METADATA_ITEM_KEY)
3169                                 goto next;
3170
3171                         if (key.type == BTRFS_METADATA_ITEM_KEY)
3172                                 bytes = fs_info->nodesize;
3173                         else
3174                                 bytes = key.offset;
3175
3176                         if (key.objectid + bytes <= logic_start)
3177                                 goto next;
3178
3179                         if (key.objectid >= logic_end) {
3180                                 stop_loop = 1;
3181                                 break;
3182                         }
3183
3184                         while (key.objectid >= logic_start + map->stripe_len)
3185                                 logic_start += map->stripe_len;
3186
3187                         extent = btrfs_item_ptr(l, slot,
3188                                                 struct btrfs_extent_item);
3189                         flags = btrfs_extent_flags(l, extent);
3190                         generation = btrfs_extent_generation(l, extent);
3191
3192                         if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3193                             (key.objectid < logic_start ||
3194                              key.objectid + bytes >
3195                              logic_start + map->stripe_len)) {
3196                                 btrfs_err(fs_info,
3197                                           "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
3198                                           key.objectid, logic_start);
3199                                 spin_lock(&sctx->stat_lock);
3200                                 sctx->stat.uncorrectable_errors++;
3201                                 spin_unlock(&sctx->stat_lock);
3202                                 goto next;
3203                         }
3204 again:
3205                         extent_logical = key.objectid;
3206                         extent_len = bytes;
3207
3208                         if (extent_logical < logic_start) {
3209                                 extent_len -= logic_start - extent_logical;
3210                                 extent_logical = logic_start;
3211                         }
3212
3213                         if (extent_logical + extent_len >
3214                             logic_start + map->stripe_len)
3215                                 extent_len = logic_start + map->stripe_len -
3216                                              extent_logical;
3217
3218                         scrub_parity_mark_sectors_data(sparity, extent_logical,
3219                                                        extent_len);
3220
3221                         mapped_length = extent_len;
3222                         bbio = NULL;
3223                         ret = btrfs_map_block(fs_info, BTRFS_MAP_READ,
3224                                         extent_logical, &mapped_length, &bbio,
3225                                         0);
3226                         if (!ret) {
3227                                 if (!bbio || mapped_length < extent_len)
3228                                         ret = -EIO;
3229                         }
3230                         if (ret) {
3231                                 btrfs_put_bbio(bbio);
3232                                 goto out;
3233                         }
3234                         extent_physical = bbio->stripes[0].physical;
3235                         extent_mirror_num = bbio->mirror_num;
3236                         extent_dev = bbio->stripes[0].dev;
3237                         btrfs_put_bbio(bbio);
3238
3239                         ret = btrfs_lookup_csums_range(csum_root,
3240                                                 extent_logical,
3241                                                 extent_logical + extent_len - 1,
3242                                                 &sctx->csum_list, 1);
3243                         if (ret)
3244                                 goto out;
3245
3246                         ret = scrub_extent_for_parity(sparity, extent_logical,
3247                                                       extent_len,
3248                                                       extent_physical,
3249                                                       extent_dev, flags,
3250                                                       generation,
3251                                                       extent_mirror_num);
3252
3253                         scrub_free_csums(sctx);
3254
3255                         if (ret)
3256                                 goto out;
3257
3258                         if (extent_logical + extent_len <
3259                             key.objectid + bytes) {
3260                                 logic_start += map->stripe_len;
3261
3262                                 if (logic_start >= logic_end) {
3263                                         stop_loop = 1;
3264                                         break;
3265                                 }
3266
3267                                 if (logic_start < key.objectid + bytes) {
3268                                         cond_resched();
3269                                         goto again;
3270                                 }
3271                         }
3272 next:
3273                         path->slots[0]++;
3274                 }
3275
3276                 btrfs_release_path(path);
3277
3278                 if (stop_loop)
3279                         break;
3280
3281                 logic_start += map->stripe_len;
3282         }
3283 out:
3284         if (ret < 0)
3285                 scrub_parity_mark_sectors_error(sparity, logic_start,
3286                                                 logic_end - logic_start);
3287         scrub_parity_put(sparity);
3288         scrub_submit(sctx);
3289         mutex_lock(&sctx->wr_lock);
3290         scrub_wr_submit(sctx);
3291         mutex_unlock(&sctx->wr_lock);
3292
3293         btrfs_release_path(path);
3294         return ret < 0 ? ret : 0;
3295 }
3296
3297 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
3298                                            struct map_lookup *map,
3299                                            struct btrfs_device *scrub_dev,
3300                                            int num, u64 base, u64 length,
3301                                            int is_dev_replace)
3302 {
3303         struct btrfs_path *path, *ppath;
3304         struct btrfs_fs_info *fs_info = sctx->fs_info;
3305         struct btrfs_root *root = fs_info->extent_root;
3306         struct btrfs_root *csum_root = fs_info->csum_root;
3307         struct btrfs_extent_item *extent;
3308         struct blk_plug plug;
3309         u64 flags;
3310         int ret;
3311         int slot;
3312         u64 nstripes;
3313         struct extent_buffer *l;
3314         u64 physical;
3315         u64 logical;
3316         u64 logic_end;
3317         u64 physical_end;
3318         u64 generation;
3319         int mirror_num;
3320         struct reada_control *reada1;
3321         struct reada_control *reada2;
3322         struct btrfs_key key;
3323         struct btrfs_key key_end;
3324         u64 increment = map->stripe_len;
3325         u64 offset;
3326         u64 extent_logical;
3327         u64 extent_physical;
3328         u64 extent_len;
3329         u64 stripe_logical;
3330         u64 stripe_end;
3331         struct btrfs_device *extent_dev;
3332         int extent_mirror_num;
3333         int stop_loop = 0;
3334
3335         physical = map->stripes[num].physical;
3336         offset = 0;
3337         nstripes = div64_u64(length, map->stripe_len);
3338         if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3339                 offset = map->stripe_len * num;
3340                 increment = map->stripe_len * map->num_stripes;
3341                 mirror_num = 1;
3342         } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3343                 int factor = map->num_stripes / map->sub_stripes;
3344                 offset = map->stripe_len * (num / map->sub_stripes);
3345                 increment = map->stripe_len * factor;
3346                 mirror_num = num % map->sub_stripes + 1;
3347         } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3348                 increment = map->stripe_len;
3349                 mirror_num = num % map->num_stripes + 1;
3350         } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3351                 increment = map->stripe_len;
3352                 mirror_num = num % map->num_stripes + 1;
3353         } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3354                 get_raid56_logic_offset(physical, num, map, &offset, NULL);
3355                 increment = map->stripe_len * nr_data_stripes(map);
3356                 mirror_num = 1;
3357         } else {
3358                 increment = map->stripe_len;
3359                 mirror_num = 1;
3360         }
3361
3362         path = btrfs_alloc_path();
3363         if (!path)
3364                 return -ENOMEM;
3365
3366         ppath = btrfs_alloc_path();
3367         if (!ppath) {
3368                 btrfs_free_path(path);
3369                 return -ENOMEM;
3370         }
3371
3372         /*
3373          * work on commit root. The related disk blocks are static as
3374          * long as COW is applied. This means, it is save to rewrite
3375          * them to repair disk errors without any race conditions
3376          */
3377         path->search_commit_root = 1;
3378         path->skip_locking = 1;
3379
3380         ppath->search_commit_root = 1;
3381         ppath->skip_locking = 1;
3382         /*
3383          * trigger the readahead for extent tree csum tree and wait for
3384          * completion. During readahead, the scrub is officially paused
3385          * to not hold off transaction commits
3386          */
3387         logical = base + offset;
3388         physical_end = physical + nstripes * map->stripe_len;
3389         if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3390                 get_raid56_logic_offset(physical_end, num,
3391                                         map, &logic_end, NULL);
3392                 logic_end += base;
3393         } else {
3394                 logic_end = logical + increment * nstripes;
3395         }
3396         wait_event(sctx->list_wait,
3397                    atomic_read(&sctx->bios_in_flight) == 0);
3398         scrub_blocked_if_needed(fs_info);
3399
3400         /* FIXME it might be better to start readahead at commit root */
3401         key.objectid = logical;
3402         key.type = BTRFS_EXTENT_ITEM_KEY;
3403         key.offset = (u64)0;
3404         key_end.objectid = logic_end;
3405         key_end.type = BTRFS_METADATA_ITEM_KEY;
3406         key_end.offset = (u64)-1;
3407         reada1 = btrfs_reada_add(root, &key, &key_end);
3408
3409         key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3410         key.type = BTRFS_EXTENT_CSUM_KEY;
3411         key.offset = logical;
3412         key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3413         key_end.type = BTRFS_EXTENT_CSUM_KEY;
3414         key_end.offset = logic_end;
3415         reada2 = btrfs_reada_add(csum_root, &key, &key_end);
3416
3417         if (!IS_ERR(reada1))
3418                 btrfs_reada_wait(reada1);
3419         if (!IS_ERR(reada2))
3420                 btrfs_reada_wait(reada2);
3421
3422
3423         /*
3424          * collect all data csums for the stripe to avoid seeking during
3425          * the scrub. This might currently (crc32) end up to be about 1MB
3426          */
3427         blk_start_plug(&plug);
3428
3429         /*
3430          * now find all extents for each stripe and scrub them
3431          */
3432         ret = 0;
3433         while (physical < physical_end) {
3434                 /*
3435                  * canceled?
3436                  */
3437                 if (atomic_read(&fs_info->scrub_cancel_req) ||
3438                     atomic_read(&sctx->cancel_req)) {
3439                         ret = -ECANCELED;
3440                         goto out;
3441                 }
3442                 /*
3443                  * check to see if we have to pause
3444                  */
3445                 if (atomic_read(&fs_info->scrub_pause_req)) {
3446                         /* push queued extents */
3447                         sctx->flush_all_writes = true;
3448                         scrub_submit(sctx);
3449                         mutex_lock(&sctx->wr_lock);
3450                         scrub_wr_submit(sctx);
3451                         mutex_unlock(&sctx->wr_lock);
3452                         wait_event(sctx->list_wait,
3453                                    atomic_read(&sctx->bios_in_flight) == 0);
3454                         sctx->flush_all_writes = false;
3455                         scrub_blocked_if_needed(fs_info);
3456                 }
3457
3458                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3459                         ret = get_raid56_logic_offset(physical, num, map,
3460                                                       &logical,
3461                                                       &stripe_logical);
3462                         logical += base;
3463                         if (ret) {
3464                                 /* it is parity strip */
3465                                 stripe_logical += base;
3466                                 stripe_end = stripe_logical + increment;
3467                                 ret = scrub_raid56_parity(sctx, map, scrub_dev,
3468                                                           ppath, stripe_logical,
3469                                                           stripe_end);
3470                                 if (ret)
3471                                         goto out;
3472                                 goto skip;
3473                         }
3474                 }
3475
3476                 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
3477                         key.type = BTRFS_METADATA_ITEM_KEY;
3478                 else
3479                         key.type = BTRFS_EXTENT_ITEM_KEY;
3480                 key.objectid = logical;
3481                 key.offset = (u64)-1;
3482
3483                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3484                 if (ret < 0)
3485                         goto out;
3486
3487                 if (ret > 0) {
3488                         ret = btrfs_previous_extent_item(root, path, 0);
3489                         if (ret < 0)
3490                                 goto out;
3491                         if (ret > 0) {
3492                                 /* there's no smaller item, so stick with the
3493                                  * larger one */
3494                                 btrfs_release_path(path);
3495                                 ret = btrfs_search_slot(NULL, root, &key,
3496                                                         path, 0, 0);
3497                                 if (ret < 0)
3498                                         goto out;
3499                         }
3500                 }
3501
3502                 stop_loop = 0;
3503                 while (1) {
3504                         u64 bytes;
3505
3506                         l = path->nodes[0];
3507                         slot = path->slots[0];
3508                         if (slot >= btrfs_header_nritems(l)) {
3509                                 ret = btrfs_next_leaf(root, path);
3510                                 if (ret == 0)
3511                                         continue;
3512                                 if (ret < 0)
3513                                         goto out;
3514
3515                                 stop_loop = 1;
3516                                 break;
3517                         }
3518                         btrfs_item_key_to_cpu(l, &key, slot);
3519
3520                         if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3521                             key.type != BTRFS_METADATA_ITEM_KEY)
3522                                 goto next;
3523
3524                         if (key.type == BTRFS_METADATA_ITEM_KEY)
3525                                 bytes = fs_info->nodesize;
3526                         else
3527                                 bytes = key.offset;
3528
3529                         if (key.objectid + bytes <= logical)
3530                                 goto next;
3531
3532                         if (key.objectid >= logical + map->stripe_len) {
3533                                 /* out of this device extent */
3534                                 if (key.objectid >= logic_end)
3535                                         stop_loop = 1;
3536                                 break;
3537                         }
3538
3539                         extent = btrfs_item_ptr(l, slot,
3540                                                 struct btrfs_extent_item);
3541                         flags = btrfs_extent_flags(l, extent);
3542                         generation = btrfs_extent_generation(l, extent);
3543
3544                         if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3545                             (key.objectid < logical ||
3546                              key.objectid + bytes >
3547                              logical + map->stripe_len)) {
3548                                 btrfs_err(fs_info,
3549                                            "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
3550                                        key.objectid, logical);
3551                                 spin_lock(&sctx->stat_lock);
3552                                 sctx->stat.uncorrectable_errors++;
3553                                 spin_unlock(&sctx->stat_lock);
3554                                 goto next;
3555                         }
3556
3557 again:
3558                         extent_logical = key.objectid;
3559                         extent_len = bytes;
3560
3561                         /*
3562                          * trim extent to this stripe
3563                          */
3564                         if (extent_logical < logical) {
3565                                 extent_len -= logical - extent_logical;
3566                                 extent_logical = logical;
3567                         }
3568                         if (extent_logical + extent_len >
3569                             logical + map->stripe_len) {
3570                                 extent_len = logical + map->stripe_len -
3571                                              extent_logical;
3572                         }
3573
3574                         extent_physical = extent_logical - logical + physical;
3575                         extent_dev = scrub_dev;
3576                         extent_mirror_num = mirror_num;
3577                         if (is_dev_replace)
3578                                 scrub_remap_extent(fs_info, extent_logical,
3579                                                    extent_len, &extent_physical,
3580                                                    &extent_dev,
3581                                                    &extent_mirror_num);
3582
3583                         ret = btrfs_lookup_csums_range(csum_root,
3584                                                        extent_logical,
3585                                                        extent_logical +
3586                                                        extent_len - 1,
3587                                                        &sctx->csum_list, 1);
3588                         if (ret)
3589                                 goto out;
3590
3591                         ret = scrub_extent(sctx, extent_logical, extent_len,
3592                                            extent_physical, extent_dev, flags,
3593                                            generation, extent_mirror_num,
3594                                            extent_logical - logical + physical);
3595
3596                         scrub_free_csums(sctx);
3597
3598                         if (ret)
3599                                 goto out;
3600
3601                         if (extent_logical + extent_len <
3602                             key.objectid + bytes) {
3603                                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3604                                         /*
3605                                          * loop until we find next data stripe
3606                                          * or we have finished all stripes.
3607                                          */
3608 loop:
3609                                         physical += map->stripe_len;
3610                                         ret = get_raid56_logic_offset(physical,
3611                                                         num, map, &logical,
3612                                                         &stripe_logical);
3613                                         logical += base;
3614
3615                                         if (ret && physical < physical_end) {
3616                                                 stripe_logical += base;
3617                                                 stripe_end = stripe_logical +
3618                                                                 increment;
3619                                                 ret = scrub_raid56_parity(sctx,
3620                                                         map, scrub_dev, ppath,
3621                                                         stripe_logical,
3622                                                         stripe_end);
3623                                                 if (ret)
3624                                                         goto out;
3625                                                 goto loop;
3626                                         }
3627                                 } else {
3628                                         physical += map->stripe_len;
3629                                         logical += increment;
3630                                 }
3631                                 if (logical < key.objectid + bytes) {
3632                                         cond_resched();
3633                                         goto again;
3634                                 }
3635
3636                                 if (physical >= physical_end) {
3637                                         stop_loop = 1;
3638                                         break;
3639                                 }
3640                         }
3641 next:
3642                         path->slots[0]++;
3643                 }
3644                 btrfs_release_path(path);
3645 skip:
3646                 logical += increment;
3647                 physical += map->stripe_len;
3648                 spin_lock(&sctx->stat_lock);
3649                 if (stop_loop)
3650                         sctx->stat.last_physical = map->stripes[num].physical +
3651                                                    length;
3652                 else
3653                         sctx->stat.last_physical = physical;
3654                 spin_unlock(&sctx->stat_lock);
3655                 if (stop_loop)
3656                         break;
3657         }
3658 out:
3659         /* push queued extents */
3660         scrub_submit(sctx);
3661         mutex_lock(&sctx->wr_lock);
3662         scrub_wr_submit(sctx);
3663         mutex_unlock(&sctx->wr_lock);
3664
3665         blk_finish_plug(&plug);
3666         btrfs_free_path(path);
3667         btrfs_free_path(ppath);
3668         return ret < 0 ? ret : 0;
3669 }
3670
3671 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
3672                                           struct btrfs_device *scrub_dev,
3673                                           u64 chunk_offset, u64 length,
3674                                           u64 dev_offset,
3675                                           struct btrfs_block_group_cache *cache,
3676                                           int is_dev_replace)
3677 {
3678         struct btrfs_fs_info *fs_info = sctx->fs_info;
3679         struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
3680         struct map_lookup *map;
3681         struct extent_map *em;
3682         int i;
3683         int ret = 0;
3684
3685         read_lock(&map_tree->map_tree.lock);
3686         em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3687         read_unlock(&map_tree->map_tree.lock);
3688
3689         if (!em) {
3690                 /*
3691                  * Might have been an unused block group deleted by the cleaner
3692                  * kthread or relocation.
3693                  */
3694                 spin_lock(&cache->lock);
3695                 if (!cache->removed)
3696                         ret = -EINVAL;
3697                 spin_unlock(&cache->lock);
3698
3699                 return ret;
3700         }
3701
3702         map = em->map_lookup;
3703         if (em->start != chunk_offset)
3704                 goto out;
3705
3706         if (em->len < length)
3707                 goto out;
3708
3709         for (i = 0; i < map->num_stripes; ++i) {
3710                 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
3711                     map->stripes[i].physical == dev_offset) {
3712                         ret = scrub_stripe(sctx, map, scrub_dev, i,
3713                                            chunk_offset, length,
3714                                            is_dev_replace);
3715                         if (ret)
3716                                 goto out;
3717                 }
3718         }
3719 out:
3720         free_extent_map(em);
3721
3722         return ret;
3723 }
3724
3725 static noinline_for_stack
3726 int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3727                            struct btrfs_device *scrub_dev, u64 start, u64 end,
3728                            int is_dev_replace)
3729 {
3730         struct btrfs_dev_extent *dev_extent = NULL;
3731         struct btrfs_path *path;
3732         struct btrfs_fs_info *fs_info = sctx->fs_info;
3733         struct btrfs_root *root = fs_info->dev_root;
3734         u64 length;
3735         u64 chunk_offset;
3736         int ret = 0;
3737         int ro_set;
3738         int slot;
3739         struct extent_buffer *l;
3740         struct btrfs_key key;
3741         struct btrfs_key found_key;
3742         struct btrfs_block_group_cache *cache;
3743         struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
3744
3745         path = btrfs_alloc_path();
3746         if (!path)
3747                 return -ENOMEM;
3748
3749         path->reada = READA_FORWARD;
3750         path->search_commit_root = 1;
3751         path->skip_locking = 1;
3752
3753         key.objectid = scrub_dev->devid;
3754         key.offset = 0ull;
3755         key.type = BTRFS_DEV_EXTENT_KEY;
3756
3757         while (1) {
3758                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3759                 if (ret < 0)
3760                         break;
3761                 if (ret > 0) {
3762                         if (path->slots[0] >=
3763                             btrfs_header_nritems(path->nodes[0])) {
3764                                 ret = btrfs_next_leaf(root, path);
3765                                 if (ret < 0)
3766                                         break;
3767                                 if (ret > 0) {
3768                                         ret = 0;
3769                                         break;
3770                                 }
3771                         } else {
3772                                 ret = 0;
3773                         }
3774                 }
3775
3776                 l = path->nodes[0];
3777                 slot = path->slots[0];
3778
3779                 btrfs_item_key_to_cpu(l, &found_key, slot);
3780
3781                 if (found_key.objectid != scrub_dev->devid)
3782                         break;
3783
3784                 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
3785                         break;
3786
3787                 if (found_key.offset >= end)
3788                         break;
3789
3790                 if (found_key.offset < key.offset)
3791                         break;
3792
3793                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3794                 length = btrfs_dev_extent_length(l, dev_extent);
3795
3796                 if (found_key.offset + length <= start)
3797                         goto skip;
3798
3799                 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3800
3801                 /*
3802                  * get a reference on the corresponding block group to prevent
3803                  * the chunk from going away while we scrub it
3804                  */
3805                 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3806
3807                 /* some chunks are removed but not committed to disk yet,
3808                  * continue scrubbing */
3809                 if (!cache)
3810                         goto skip;
3811
3812                 /*
3813                  * we need call btrfs_inc_block_group_ro() with scrubs_paused,
3814                  * to avoid deadlock caused by:
3815                  * btrfs_inc_block_group_ro()
3816                  * -> btrfs_wait_for_commit()
3817                  * -> btrfs_commit_transaction()
3818                  * -> btrfs_scrub_pause()
3819                  */
3820                 scrub_pause_on(fs_info);
3821                 ret = btrfs_inc_block_group_ro(fs_info, cache);
3822                 if (!ret && is_dev_replace) {
3823                         /*
3824                          * If we are doing a device replace wait for any tasks
3825                          * that started dellaloc right before we set the block
3826                          * group to RO mode, as they might have just allocated
3827                          * an extent from it or decided they could do a nocow
3828                          * write. And if any such tasks did that, wait for their
3829                          * ordered extents to complete and then commit the
3830                          * current transaction, so that we can later see the new
3831                          * extent items in the extent tree - the ordered extents
3832                          * create delayed data references (for cow writes) when
3833                          * they complete, which will be run and insert the
3834                          * corresponding extent items into the extent tree when
3835                          * we commit the transaction they used when running
3836                          * inode.c:btrfs_finish_ordered_io(). We later use
3837                          * the commit root of the extent tree to find extents
3838                          * to copy from the srcdev into the tgtdev, and we don't
3839                          * want to miss any new extents.
3840                          */
3841                         btrfs_wait_block_group_reservations(cache);
3842                         btrfs_wait_nocow_writers(cache);
3843                         ret = btrfs_wait_ordered_roots(fs_info, U64_MAX,
3844                                                        cache->key.objectid,
3845                                                        cache->key.offset);
3846                         if (ret > 0) {
3847                                 struct btrfs_trans_handle *trans;
3848
3849                                 trans = btrfs_join_transaction(root);
3850                                 if (IS_ERR(trans))
3851                                         ret = PTR_ERR(trans);
3852                                 else
3853                                         ret = btrfs_commit_transaction(trans);
3854                                 if (ret) {
3855                                         scrub_pause_off(fs_info);
3856                                         btrfs_put_block_group(cache);
3857                                         break;
3858                                 }
3859                         }
3860                 }
3861                 scrub_pause_off(fs_info);
3862
3863                 if (ret == 0) {
3864                         ro_set = 1;
3865                 } else if (ret == -ENOSPC) {
3866                         /*
3867                          * btrfs_inc_block_group_ro return -ENOSPC when it
3868                          * failed in creating new chunk for metadata.
3869                          * It is not a problem for scrub/replace, because
3870                          * metadata are always cowed, and our scrub paused
3871                          * commit_transactions.
3872                          */
3873                         ro_set = 0;
3874                 } else {
3875                         btrfs_warn(fs_info,
3876                                    "failed setting block group ro: %d", ret);
3877                         btrfs_put_block_group(cache);
3878                         break;
3879                 }
3880
3881                 btrfs_dev_replace_lock(&fs_info->dev_replace, 1);
3882                 dev_replace->cursor_right = found_key.offset + length;
3883                 dev_replace->cursor_left = found_key.offset;
3884                 dev_replace->item_needs_writeback = 1;
3885                 btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
3886                 ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
3887                                   found_key.offset, cache, is_dev_replace);
3888
3889                 /*
3890                  * flush, submit all pending read and write bios, afterwards
3891                  * wait for them.
3892                  * Note that in the dev replace case, a read request causes
3893                  * write requests that are submitted in the read completion
3894                  * worker. Therefore in the current situation, it is required
3895                  * that all write requests are flushed, so that all read and
3896                  * write requests are really completed when bios_in_flight
3897                  * changes to 0.
3898                  */
3899                 sctx->flush_all_writes = true;
3900                 scrub_submit(sctx);
3901                 mutex_lock(&sctx->wr_lock);
3902                 scrub_wr_submit(sctx);
3903                 mutex_unlock(&sctx->wr_lock);
3904
3905                 wait_event(sctx->list_wait,
3906                            atomic_read(&sctx->bios_in_flight) == 0);
3907
3908                 scrub_pause_on(fs_info);
3909
3910                 /*
3911                  * must be called before we decrease @scrub_paused.
3912                  * make sure we don't block transaction commit while
3913                  * we are waiting pending workers finished.
3914                  */
3915                 wait_event(sctx->list_wait,
3916                            atomic_read(&sctx->workers_pending) == 0);
3917                 sctx->flush_all_writes = false;
3918
3919                 scrub_pause_off(fs_info);
3920
3921                 btrfs_dev_replace_lock(&fs_info->dev_replace, 1);
3922                 dev_replace->cursor_left = dev_replace->cursor_right;
3923                 dev_replace->item_needs_writeback = 1;
3924                 btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
3925
3926                 if (ro_set)
3927                         btrfs_dec_block_group_ro(cache);
3928
3929                 /*
3930                  * We might have prevented the cleaner kthread from deleting
3931                  * this block group if it was already unused because we raced
3932                  * and set it to RO mode first. So add it back to the unused
3933                  * list, otherwise it might not ever be deleted unless a manual
3934                  * balance is triggered or it becomes used and unused again.
3935                  */
3936                 spin_lock(&cache->lock);
3937                 if (!cache->removed && !cache->ro && cache->reserved == 0 &&
3938                     btrfs_block_group_used(&cache->item) == 0) {
3939                         spin_unlock(&cache->lock);
3940                         spin_lock(&fs_info->unused_bgs_lock);
3941                         if (list_empty(&cache->bg_list)) {
3942                                 btrfs_get_block_group(cache);
3943                                 list_add_tail(&cache->bg_list,
3944                                               &fs_info->unused_bgs);
3945                         }
3946                         spin_unlock(&fs_info->unused_bgs_lock);
3947                 } else {
3948                         spin_unlock(&cache->lock);
3949                 }
3950
3951                 btrfs_put_block_group(cache);
3952                 if (ret)
3953                         break;
3954                 if (is_dev_replace &&
3955                     atomic64_read(&dev_replace->num_write_errors) > 0) {
3956                         ret = -EIO;
3957                         break;
3958                 }
3959                 if (sctx->stat.malloc_errors > 0) {
3960                         ret = -ENOMEM;
3961                         break;
3962                 }
3963 skip:
3964                 key.offset = found_key.offset + length;
3965                 btrfs_release_path(path);
3966         }
3967
3968         btrfs_free_path(path);
3969
3970         return ret;
3971 }
3972
3973 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
3974                                            struct btrfs_device *scrub_dev)
3975 {
3976         int     i;
3977         u64     bytenr;
3978         u64     gen;
3979         int     ret;
3980         struct btrfs_fs_info *fs_info = sctx->fs_info;
3981
3982         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
3983                 return -EIO;
3984
3985         /* Seed devices of a new filesystem has their own generation. */
3986         if (scrub_dev->fs_devices != fs_info->fs_devices)
3987                 gen = scrub_dev->generation;
3988         else
3989                 gen = fs_info->last_trans_committed;
3990
3991         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
3992                 bytenr = btrfs_sb_offset(i);
3993                 if (bytenr + BTRFS_SUPER_INFO_SIZE >
3994                     scrub_dev->commit_total_bytes)
3995                         break;
3996
3997                 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
3998                                   scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
3999                                   NULL, 1, bytenr);
4000                 if (ret)
4001                         return ret;
4002         }
4003         wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
4004
4005         return 0;
4006 }
4007
4008 /*
4009  * get a reference count on fs_info->scrub_workers. start worker if necessary
4010  */
4011 static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
4012                                                 int is_dev_replace)
4013 {
4014         unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
4015         int max_active = fs_info->thread_pool_size;
4016
4017         if (fs_info->scrub_workers_refcnt == 0) {
4018                 fs_info->scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub",
4019                                 flags, is_dev_replace ? 1 : max_active, 4);
4020                 if (!fs_info->scrub_workers)
4021                         goto fail_scrub_workers;
4022
4023                 fs_info->scrub_wr_completion_workers =
4024                         btrfs_alloc_workqueue(fs_info, "scrubwrc", flags,
4025                                               max_active, 2);
4026                 if (!fs_info->scrub_wr_completion_workers)
4027                         goto fail_scrub_wr_completion_workers;
4028
4029                 fs_info->scrub_nocow_workers =
4030                         btrfs_alloc_workqueue(fs_info, "scrubnc", flags, 1, 0);
4031                 if (!fs_info->scrub_nocow_workers)
4032                         goto fail_scrub_nocow_workers;
4033                 fs_info->scrub_parity_workers =
4034                         btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
4035                                               max_active, 2);
4036                 if (!fs_info->scrub_parity_workers)
4037                         goto fail_scrub_parity_workers;
4038         }
4039         ++fs_info->scrub_workers_refcnt;
4040         return 0;
4041
4042 fail_scrub_parity_workers:
4043         btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
4044 fail_scrub_nocow_workers:
4045         btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
4046 fail_scrub_wr_completion_workers:
4047         btrfs_destroy_workqueue(fs_info->scrub_workers);
4048 fail_scrub_workers:
4049         return -ENOMEM;
4050 }
4051
4052 static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
4053 {
4054         if (--fs_info->scrub_workers_refcnt == 0) {
4055                 btrfs_destroy_workqueue(fs_info->scrub_workers);
4056                 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
4057                 btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
4058                 btrfs_destroy_workqueue(fs_info->scrub_parity_workers);
4059         }
4060         WARN_ON(fs_info->scrub_workers_refcnt < 0);
4061 }
4062
4063 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
4064                     u64 end, struct btrfs_scrub_progress *progress,
4065                     int readonly, int is_dev_replace)
4066 {
4067         struct scrub_ctx *sctx;
4068         int ret;
4069         struct btrfs_device *dev;
4070         struct rcu_string *name;
4071
4072         if (btrfs_fs_closing(fs_info))
4073                 return -EINVAL;
4074
4075         if (fs_info->nodesize > BTRFS_STRIPE_LEN) {
4076                 /*
4077                  * in this case scrub is unable to calculate the checksum
4078                  * the way scrub is implemented. Do not handle this
4079                  * situation at all because it won't ever happen.
4080                  */
4081                 btrfs_err(fs_info,
4082                            "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
4083                        fs_info->nodesize,
4084                        BTRFS_STRIPE_LEN);
4085                 return -EINVAL;
4086         }
4087
4088         if (fs_info->sectorsize != PAGE_SIZE) {
4089                 /* not supported for data w/o checksums */
4090                 btrfs_err_rl(fs_info,
4091                            "scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails",
4092                        fs_info->sectorsize, PAGE_SIZE);
4093                 return -EINVAL;
4094         }
4095
4096         if (fs_info->nodesize >
4097             PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
4098             fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
4099                 /*
4100                  * would exhaust the array bounds of pagev member in
4101                  * struct scrub_block
4102                  */
4103                 btrfs_err(fs_info,
4104                           "scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
4105                        fs_info->nodesize,
4106                        SCRUB_MAX_PAGES_PER_BLOCK,
4107                        fs_info->sectorsize,
4108                        SCRUB_MAX_PAGES_PER_BLOCK);
4109                 return -EINVAL;
4110         }
4111
4112
4113         mutex_lock(&fs_info->fs_devices->device_list_mutex);
4114         dev = btrfs_find_device(fs_info, devid, NULL, NULL);
4115         if (!dev || (dev->missing && !is_dev_replace)) {
4116                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4117                 return -ENODEV;
4118         }
4119
4120         if (!is_dev_replace && !readonly && !dev->writeable) {
4121                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4122                 rcu_read_lock();
4123                 name = rcu_dereference(dev->name);
4124                 btrfs_err(fs_info, "scrub: device %s is not writable",
4125                           name->str);
4126                 rcu_read_unlock();
4127                 return -EROFS;
4128         }
4129
4130         mutex_lock(&fs_info->scrub_lock);
4131         if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
4132                 mutex_unlock(&fs_info->scrub_lock);
4133                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4134                 return -EIO;
4135         }
4136
4137         btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
4138         if (dev->scrub_device ||
4139             (!is_dev_replace &&
4140              btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
4141                 btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
4142                 mutex_unlock(&fs_info->scrub_lock);
4143                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4144                 return -EINPROGRESS;
4145         }
4146         btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
4147
4148         ret = scrub_workers_get(fs_info, is_dev_replace);
4149         if (ret) {
4150                 mutex_unlock(&fs_info->scrub_lock);
4151                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4152                 return ret;
4153         }
4154
4155         sctx = scrub_setup_ctx(dev, is_dev_replace);
4156         if (IS_ERR(sctx)) {
4157                 mutex_unlock(&fs_info->scrub_lock);
4158                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4159                 scrub_workers_put(fs_info);
4160                 return PTR_ERR(sctx);
4161         }
4162         sctx->readonly = readonly;
4163         dev->scrub_device = sctx;
4164         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4165
4166         /*
4167          * checking @scrub_pause_req here, we can avoid
4168          * race between committing transaction and scrubbing.
4169          */
4170         __scrub_blocked_if_needed(fs_info);
4171         atomic_inc(&fs_info->scrubs_running);
4172         mutex_unlock(&fs_info->scrub_lock);
4173
4174         if (!is_dev_replace) {
4175                 /*
4176                  * by holding device list mutex, we can
4177                  * kick off writing super in log tree sync.
4178                  */
4179                 mutex_lock(&fs_info->fs_devices->device_list_mutex);
4180                 ret = scrub_supers(sctx, dev);
4181                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4182         }
4183
4184         if (!ret)
4185                 ret = scrub_enumerate_chunks(sctx, dev, start, end,
4186                                              is_dev_replace);
4187
4188         wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
4189         atomic_dec(&fs_info->scrubs_running);
4190         wake_up(&fs_info->scrub_pause_wait);
4191
4192         wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
4193
4194         if (progress)
4195                 memcpy(progress, &sctx->stat, sizeof(*progress));
4196
4197         mutex_lock(&fs_info->scrub_lock);
4198         dev->scrub_device = NULL;
4199         scrub_workers_put(fs_info);
4200         mutex_unlock(&fs_info->scrub_lock);
4201
4202         scrub_put_ctx(sctx);
4203
4204         return ret;
4205 }
4206
4207 void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
4208 {
4209         mutex_lock(&fs_info->scrub_lock);
4210         atomic_inc(&fs_info->scrub_pause_req);
4211         while (atomic_read(&fs_info->scrubs_paused) !=
4212                atomic_read(&fs_info->scrubs_running)) {
4213                 mutex_unlock(&fs_info->scrub_lock);
4214                 wait_event(fs_info->scrub_pause_wait,
4215                            atomic_read(&fs_info->scrubs_paused) ==
4216                            atomic_read(&fs_info->scrubs_running));
4217                 mutex_lock(&fs_info->scrub_lock);
4218         }
4219         mutex_unlock(&fs_info->scrub_lock);
4220 }
4221
4222 void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
4223 {
4224         atomic_dec(&fs_info->scrub_pause_req);
4225         wake_up(&fs_info->scrub_pause_wait);
4226 }
4227
4228 int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
4229 {
4230         mutex_lock(&fs_info->scrub_lock);
4231         if (!atomic_read(&fs_info->scrubs_running)) {
4232                 mutex_unlock(&fs_info->scrub_lock);
4233                 return -ENOTCONN;
4234         }
4235
4236         atomic_inc(&fs_info->scrub_cancel_req);
4237         while (atomic_read(&fs_info->scrubs_running)) {
4238                 mutex_unlock(&fs_info->scrub_lock);
4239                 wait_event(fs_info->scrub_pause_wait,
4240                            atomic_read(&fs_info->scrubs_running) == 0);
4241                 mutex_lock(&fs_info->scrub_lock);
4242         }
4243         atomic_dec(&fs_info->scrub_cancel_req);
4244         mutex_unlock(&fs_info->scrub_lock);
4245
4246         return 0;
4247 }
4248
4249 int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
4250                            struct btrfs_device *dev)
4251 {
4252         struct scrub_ctx *sctx;
4253
4254         mutex_lock(&fs_info->scrub_lock);
4255         sctx = dev->scrub_device;
4256         if (!sctx) {
4257                 mutex_unlock(&fs_info->scrub_lock);
4258                 return -ENOTCONN;
4259         }
4260         atomic_inc(&sctx->cancel_req);
4261         while (dev->scrub_device) {
4262                 mutex_unlock(&fs_info->scrub_lock);
4263                 wait_event(fs_info->scrub_pause_wait,
4264                            dev->scrub_device == NULL);
4265                 mutex_lock(&fs_info->scrub_lock);
4266         }
4267         mutex_unlock(&fs_info->scrub_lock);
4268
4269         return 0;
4270 }
4271
4272 int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
4273                          struct btrfs_scrub_progress *progress)
4274 {
4275         struct btrfs_device *dev;
4276         struct scrub_ctx *sctx = NULL;
4277
4278         mutex_lock(&fs_info->fs_devices->device_list_mutex);
4279         dev = btrfs_find_device(fs_info, devid, NULL, NULL);
4280         if (dev)
4281                 sctx = dev->scrub_device;
4282         if (sctx)
4283                 memcpy(progress, &sctx->stat, sizeof(*progress));
4284         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4285
4286         return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
4287 }
4288
4289 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
4290                                u64 extent_logical, u64 extent_len,
4291                                u64 *extent_physical,
4292                                struct btrfs_device **extent_dev,
4293                                int *extent_mirror_num)
4294 {
4295         u64 mapped_length;
4296         struct btrfs_bio *bbio = NULL;
4297         int ret;
4298
4299         mapped_length = extent_len;
4300         ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical,
4301                               &mapped_length, &bbio, 0);
4302         if (ret || !bbio || mapped_length < extent_len ||
4303             !bbio->stripes[0].dev->bdev) {
4304                 btrfs_put_bbio(bbio);
4305                 return;
4306         }
4307
4308         *extent_physical = bbio->stripes[0].physical;
4309         *extent_mirror_num = bbio->mirror_num;
4310         *extent_dev = bbio->stripes[0].dev;
4311         btrfs_put_bbio(bbio);
4312 }
4313
4314 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
4315                             int mirror_num, u64 physical_for_dev_replace)
4316 {
4317         struct scrub_copy_nocow_ctx *nocow_ctx;
4318         struct btrfs_fs_info *fs_info = sctx->fs_info;
4319
4320         nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
4321         if (!nocow_ctx) {
4322                 spin_lock(&sctx->stat_lock);
4323                 sctx->stat.malloc_errors++;
4324                 spin_unlock(&sctx->stat_lock);
4325                 return -ENOMEM;
4326         }
4327
4328         scrub_pending_trans_workers_inc(sctx);
4329
4330         nocow_ctx->sctx = sctx;
4331         nocow_ctx->logical = logical;
4332         nocow_ctx->len = len;
4333         nocow_ctx->mirror_num = mirror_num;
4334         nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
4335         btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper,
4336                         copy_nocow_pages_worker, NULL, NULL);
4337         INIT_LIST_HEAD(&nocow_ctx->inodes);
4338         btrfs_queue_work(fs_info->scrub_nocow_workers,
4339                          &nocow_ctx->work);
4340
4341         return 0;
4342 }
4343
4344 static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
4345 {
4346         struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
4347         struct scrub_nocow_inode *nocow_inode;
4348
4349         nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS);
4350         if (!nocow_inode)
4351                 return -ENOMEM;
4352         nocow_inode->inum = inum;
4353         nocow_inode->offset = offset;
4354         nocow_inode->root = root;
4355         list_add_tail(&nocow_inode->list, &nocow_ctx->inodes);
4356         return 0;
4357 }
4358
4359 #define COPY_COMPLETE 1
4360
4361 static void copy_nocow_pages_worker(struct btrfs_work *work)
4362 {
4363         struct scrub_copy_nocow_ctx *nocow_ctx =
4364                 container_of(work, struct scrub_copy_nocow_ctx, work);
4365         struct scrub_ctx *sctx = nocow_ctx->sctx;
4366         struct btrfs_fs_info *fs_info = sctx->fs_info;
4367         struct btrfs_root *root = fs_info->extent_root;
4368         u64 logical = nocow_ctx->logical;
4369         u64 len = nocow_ctx->len;
4370         int mirror_num = nocow_ctx->mirror_num;
4371         u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
4372         int ret;
4373         struct btrfs_trans_handle *trans = NULL;
4374         struct btrfs_path *path;
4375         int not_written = 0;
4376
4377         path = btrfs_alloc_path();
4378         if (!path) {
4379                 spin_lock(&sctx->stat_lock);
4380                 sctx->stat.malloc_errors++;
4381                 spin_unlock(&sctx->stat_lock);
4382                 not_written = 1;
4383                 goto out;
4384         }
4385
4386         trans = btrfs_join_transaction(root);
4387         if (IS_ERR(trans)) {
4388                 not_written = 1;
4389                 goto out;
4390         }
4391
4392         ret = iterate_inodes_from_logical(logical, fs_info, path,
4393                         record_inode_for_nocow, nocow_ctx, false);
4394         if (ret != 0 && ret != -ENOENT) {
4395                 btrfs_warn(fs_info,
4396                            "iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %u, ret %d",
4397                            logical, physical_for_dev_replace, len, mirror_num,
4398                            ret);
4399                 not_written = 1;
4400                 goto out;
4401         }
4402
4403         btrfs_end_transaction(trans);
4404         trans = NULL;
4405         while (!list_empty(&nocow_ctx->inodes)) {
4406                 struct scrub_nocow_inode *entry;
4407                 entry = list_first_entry(&nocow_ctx->inodes,
4408                                          struct scrub_nocow_inode,
4409                                          list);
4410                 list_del_init(&entry->list);
4411                 ret = copy_nocow_pages_for_inode(entry->inum, entry->offset,
4412                                                  entry->root, nocow_ctx);
4413                 kfree(entry);
4414                 if (ret == COPY_COMPLETE) {
4415                         ret = 0;
4416                         break;
4417                 } else if (ret) {
4418                         break;
4419                 }
4420         }
4421 out:
4422         while (!list_empty(&nocow_ctx->inodes)) {
4423                 struct scrub_nocow_inode *entry;
4424                 entry = list_first_entry(&nocow_ctx->inodes,
4425                                          struct scrub_nocow_inode,
4426                                          list);
4427                 list_del_init(&entry->list);
4428                 kfree(entry);
4429         }
4430         if (trans && !IS_ERR(trans))
4431                 btrfs_end_transaction(trans);
4432         if (not_written)
4433                 btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
4434                                             num_uncorrectable_read_errors);
4435
4436         btrfs_free_path(path);
4437         kfree(nocow_ctx);
4438
4439         scrub_pending_trans_workers_dec(sctx);
4440 }
4441
4442 static int check_extent_to_block(struct btrfs_inode *inode, u64 start, u64 len,
4443                                  u64 logical)
4444 {
4445         struct extent_state *cached_state = NULL;
4446         struct btrfs_ordered_extent *ordered;
4447         struct extent_io_tree *io_tree;
4448         struct extent_map *em;
4449         u64 lockstart = start, lockend = start + len - 1;
4450         int ret = 0;
4451
4452         io_tree = &inode->io_tree;
4453
4454         lock_extent_bits(io_tree, lockstart, lockend, &cached_state);
4455         ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
4456         if (ordered) {
4457                 btrfs_put_ordered_extent(ordered);
4458                 ret = 1;
4459                 goto out_unlock;
4460         }
4461
4462         em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
4463         if (IS_ERR(em)) {
4464                 ret = PTR_ERR(em);
4465                 goto out_unlock;
4466         }
4467
4468         /*
4469          * This extent does not actually cover the logical extent anymore,
4470          * move on to the next inode.
4471          */
4472         if (em->block_start > logical ||
4473             em->block_start + em->block_len < logical + len) {
4474                 free_extent_map(em);
4475                 ret = 1;
4476                 goto out_unlock;
4477         }
4478         free_extent_map(em);
4479
4480 out_unlock:
4481         unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
4482                              GFP_NOFS);
4483         return ret;
4484 }
4485
4486 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
4487                                       struct scrub_copy_nocow_ctx *nocow_ctx)
4488 {
4489         struct btrfs_fs_info *fs_info = nocow_ctx->sctx->fs_info;
4490         struct btrfs_key key;
4491         struct inode *inode;
4492         struct page *page;
4493         struct btrfs_root *local_root;
4494         struct extent_io_tree *io_tree;
4495         u64 physical_for_dev_replace;
4496         u64 nocow_ctx_logical;
4497         u64 len = nocow_ctx->len;
4498         unsigned long index;
4499         int srcu_index;
4500         int ret = 0;
4501         int err = 0;
4502
4503         key.objectid = root;
4504         key.type = BTRFS_ROOT_ITEM_KEY;
4505         key.offset = (u64)-1;
4506
4507         srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
4508
4509         local_root = btrfs_read_fs_root_no_name(fs_info, &key);
4510         if (IS_ERR(local_root)) {
4511                 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
4512                 return PTR_ERR(local_root);
4513         }
4514
4515         key.type = BTRFS_INODE_ITEM_KEY;
4516         key.objectid = inum;
4517         key.offset = 0;
4518         inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
4519         srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
4520         if (IS_ERR(inode))
4521                 return PTR_ERR(inode);
4522
4523         /* Avoid truncate/dio/punch hole.. */
4524         inode_lock(inode);
4525         inode_dio_wait(inode);
4526
4527         physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
4528         io_tree = &BTRFS_I(inode)->io_tree;
4529         nocow_ctx_logical = nocow_ctx->logical;
4530
4531         ret = check_extent_to_block(BTRFS_I(inode), offset, len,
4532                         nocow_ctx_logical);
4533         if (ret) {
4534                 ret = ret > 0 ? 0 : ret;
4535                 goto out;
4536         }
4537
4538         while (len >= PAGE_SIZE) {
4539                 index = offset >> PAGE_SHIFT;
4540 again:
4541                 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
4542                 if (!page) {
4543                         btrfs_err(fs_info, "find_or_create_page() failed");
4544                         ret = -ENOMEM;
4545                         goto out;
4546                 }
4547
4548                 if (PageUptodate(page)) {
4549                         if (PageDirty(page))
4550                                 goto next_page;
4551                 } else {
4552                         ClearPageError(page);
4553                         err = extent_read_full_page(io_tree, page,
4554                                                            btrfs_get_extent,
4555                                                            nocow_ctx->mirror_num);
4556                         if (err) {
4557                                 ret = err;
4558                                 goto next_page;
4559                         }
4560
4561                         lock_page(page);
4562                         /*
4563                          * If the page has been remove from the page cache,
4564                          * the data on it is meaningless, because it may be
4565                          * old one, the new data may be written into the new
4566                          * page in the page cache.
4567                          */
4568                         if (page->mapping != inode->i_mapping) {
4569                                 unlock_page(page);
4570                                 put_page(page);
4571                                 goto again;
4572                         }
4573                         if (!PageUptodate(page)) {
4574                                 ret = -EIO;
4575                                 goto next_page;
4576                         }
4577                 }
4578
4579                 ret = check_extent_to_block(BTRFS_I(inode), offset, len,
4580                                             nocow_ctx_logical);
4581                 if (ret) {
4582                         ret = ret > 0 ? 0 : ret;
4583                         goto next_page;
4584                 }
4585
4586                 err = write_page_nocow(nocow_ctx->sctx,
4587                                        physical_for_dev_replace, page);
4588                 if (err)
4589                         ret = err;
4590 next_page:
4591                 unlock_page(page);
4592                 put_page(page);
4593
4594                 if (ret)
4595                         break;
4596
4597                 offset += PAGE_SIZE;
4598                 physical_for_dev_replace += PAGE_SIZE;
4599                 nocow_ctx_logical += PAGE_SIZE;
4600                 len -= PAGE_SIZE;
4601         }
4602         ret = COPY_COMPLETE;
4603 out:
4604         inode_unlock(inode);
4605         iput(inode);
4606         return ret;
4607 }
4608
4609 static int write_page_nocow(struct scrub_ctx *sctx,
4610                             u64 physical_for_dev_replace, struct page *page)
4611 {
4612         struct bio *bio;
4613         struct btrfs_device *dev;
4614         int ret;
4615
4616         dev = sctx->wr_tgtdev;
4617         if (!dev)
4618                 return -EIO;
4619         if (!dev->bdev) {
4620                 btrfs_warn_rl(dev->fs_info,
4621                         "scrub write_page_nocow(bdev == NULL) is unexpected");
4622                 return -EIO;
4623         }
4624         bio = btrfs_io_bio_alloc(1);
4625         bio->bi_iter.bi_size = 0;
4626         bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
4627         bio_set_dev(bio, dev->bdev);
4628         bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
4629         ret = bio_add_page(bio, page, PAGE_SIZE, 0);
4630         if (ret != PAGE_SIZE) {
4631 leave_with_eio:
4632                 bio_put(bio);
4633                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
4634                 return -EIO;
4635         }
4636
4637         if (btrfsic_submit_bio_wait(bio))
4638                 goto leave_with_eio;
4639
4640         bio_put(bio);
4641         return 0;
4642 }