btrfs: fix integer overflow in calc_reclaim_items_nr
[sfrench/cifs-2.6.git] / fs / btrfs / scrub.c
1 /*
2  * Copyright (C) 2011, 2012 STRATO.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/blkdev.h>
20 #include <linux/ratelimit.h>
21 #include <linux/sched/mm.h>
22 #include "ctree.h"
23 #include "volumes.h"
24 #include "disk-io.h"
25 #include "ordered-data.h"
26 #include "transaction.h"
27 #include "backref.h"
28 #include "extent_io.h"
29 #include "dev-replace.h"
30 #include "check-integrity.h"
31 #include "rcu-string.h"
32 #include "raid56.h"
33
34 /*
35  * This is only the first step towards a full-features scrub. It reads all
36  * extent and super block and verifies the checksums. In case a bad checksum
37  * is found or the extent cannot be read, good data will be written back if
38  * any can be found.
39  *
40  * Future enhancements:
41  *  - In case an unrepairable extent is encountered, track which files are
42  *    affected and report them
43  *  - track and record media errors, throw out bad devices
44  *  - add a mode to also read unallocated space
45  */
46
47 struct scrub_block;
48 struct scrub_ctx;
49
50 /*
51  * the following three values only influence the performance.
52  * The last one configures the number of parallel and outstanding I/O
53  * operations. The first two values configure an upper limit for the number
54  * of (dynamically allocated) pages that are added to a bio.
55  */
56 #define SCRUB_PAGES_PER_RD_BIO  32      /* 128k per bio */
57 #define SCRUB_PAGES_PER_WR_BIO  32      /* 128k per bio */
58 #define SCRUB_BIOS_PER_SCTX     64      /* 8MB per device in flight */
59
60 /*
61  * the following value times PAGE_SIZE needs to be large enough to match the
62  * largest node/leaf/sector size that shall be supported.
63  * Values larger than BTRFS_STRIPE_LEN are not supported.
64  */
65 #define SCRUB_MAX_PAGES_PER_BLOCK       16      /* 64k per node/leaf/sector */
66
67 struct scrub_recover {
68         refcount_t              refs;
69         struct btrfs_bio        *bbio;
70         u64                     map_length;
71 };
72
73 struct scrub_page {
74         struct scrub_block      *sblock;
75         struct page             *page;
76         struct btrfs_device     *dev;
77         struct list_head        list;
78         u64                     flags;  /* extent flags */
79         u64                     generation;
80         u64                     logical;
81         u64                     physical;
82         u64                     physical_for_dev_replace;
83         atomic_t                refs;
84         struct {
85                 unsigned int    mirror_num:8;
86                 unsigned int    have_csum:1;
87                 unsigned int    io_error:1;
88         };
89         u8                      csum[BTRFS_CSUM_SIZE];
90
91         struct scrub_recover    *recover;
92 };
93
94 struct scrub_bio {
95         int                     index;
96         struct scrub_ctx        *sctx;
97         struct btrfs_device     *dev;
98         struct bio              *bio;
99         int                     err;
100         u64                     logical;
101         u64                     physical;
102 #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
103         struct scrub_page       *pagev[SCRUB_PAGES_PER_WR_BIO];
104 #else
105         struct scrub_page       *pagev[SCRUB_PAGES_PER_RD_BIO];
106 #endif
107         int                     page_count;
108         int                     next_free;
109         struct btrfs_work       work;
110 };
111
112 struct scrub_block {
113         struct scrub_page       *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
114         int                     page_count;
115         atomic_t                outstanding_pages;
116         refcount_t              refs; /* free mem on transition to zero */
117         struct scrub_ctx        *sctx;
118         struct scrub_parity     *sparity;
119         struct {
120                 unsigned int    header_error:1;
121                 unsigned int    checksum_error:1;
122                 unsigned int    no_io_error_seen:1;
123                 unsigned int    generation_error:1; /* also sets header_error */
124
125                 /* The following is for the data used to check parity */
126                 /* It is for the data with checksum */
127                 unsigned int    data_corrected:1;
128         };
129         struct btrfs_work       work;
130 };
131
132 /* Used for the chunks with parity stripe such RAID5/6 */
133 struct scrub_parity {
134         struct scrub_ctx        *sctx;
135
136         struct btrfs_device     *scrub_dev;
137
138         u64                     logic_start;
139
140         u64                     logic_end;
141
142         int                     nsectors;
143
144         u64                     stripe_len;
145
146         refcount_t              refs;
147
148         struct list_head        spages;
149
150         /* Work of parity check and repair */
151         struct btrfs_work       work;
152
153         /* Mark the parity blocks which have data */
154         unsigned long           *dbitmap;
155
156         /*
157          * Mark the parity blocks which have data, but errors happen when
158          * read data or check data
159          */
160         unsigned long           *ebitmap;
161
162         unsigned long           bitmap[0];
163 };
164
165 struct scrub_ctx {
166         struct scrub_bio        *bios[SCRUB_BIOS_PER_SCTX];
167         struct btrfs_fs_info    *fs_info;
168         int                     first_free;
169         int                     curr;
170         atomic_t                bios_in_flight;
171         atomic_t                workers_pending;
172         spinlock_t              list_lock;
173         wait_queue_head_t       list_wait;
174         u16                     csum_size;
175         struct list_head        csum_list;
176         atomic_t                cancel_req;
177         int                     readonly;
178         int                     pages_per_rd_bio;
179
180         int                     is_dev_replace;
181
182         struct scrub_bio        *wr_curr_bio;
183         struct mutex            wr_lock;
184         int                     pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
185         atomic_t                flush_all_writes;
186         struct btrfs_device     *wr_tgtdev;
187
188         /*
189          * statistics
190          */
191         struct btrfs_scrub_progress stat;
192         spinlock_t              stat_lock;
193
194         /*
195          * Use a ref counter to avoid use-after-free issues. Scrub workers
196          * decrement bios_in_flight and workers_pending and then do a wakeup
197          * on the list_wait wait queue. We must ensure the main scrub task
198          * doesn't free the scrub context before or while the workers are
199          * doing the wakeup() call.
200          */
201         refcount_t              refs;
202 };
203
204 struct scrub_fixup_nodatasum {
205         struct scrub_ctx        *sctx;
206         struct btrfs_device     *dev;
207         u64                     logical;
208         struct btrfs_root       *root;
209         struct btrfs_work       work;
210         int                     mirror_num;
211 };
212
213 struct scrub_nocow_inode {
214         u64                     inum;
215         u64                     offset;
216         u64                     root;
217         struct list_head        list;
218 };
219
220 struct scrub_copy_nocow_ctx {
221         struct scrub_ctx        *sctx;
222         u64                     logical;
223         u64                     len;
224         int                     mirror_num;
225         u64                     physical_for_dev_replace;
226         struct list_head        inodes;
227         struct btrfs_work       work;
228 };
229
230 struct scrub_warning {
231         struct btrfs_path       *path;
232         u64                     extent_item_size;
233         const char              *errstr;
234         sector_t                sector;
235         u64                     logical;
236         struct btrfs_device     *dev;
237 };
238
239 struct full_stripe_lock {
240         struct rb_node node;
241         u64 logical;
242         u64 refs;
243         struct mutex mutex;
244 };
245
246 static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
247 static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
248 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
249 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
250 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
251 static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
252                                      struct scrub_block *sblocks_for_recheck);
253 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
254                                 struct scrub_block *sblock,
255                                 int retry_failed_mirror);
256 static void scrub_recheck_block_checksum(struct scrub_block *sblock);
257 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
258                                              struct scrub_block *sblock_good);
259 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
260                                             struct scrub_block *sblock_good,
261                                             int page_num, int force_write);
262 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
263 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
264                                            int page_num);
265 static int scrub_checksum_data(struct scrub_block *sblock);
266 static int scrub_checksum_tree_block(struct scrub_block *sblock);
267 static int scrub_checksum_super(struct scrub_block *sblock);
268 static void scrub_block_get(struct scrub_block *sblock);
269 static void scrub_block_put(struct scrub_block *sblock);
270 static void scrub_page_get(struct scrub_page *spage);
271 static void scrub_page_put(struct scrub_page *spage);
272 static void scrub_parity_get(struct scrub_parity *sparity);
273 static void scrub_parity_put(struct scrub_parity *sparity);
274 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
275                                     struct scrub_page *spage);
276 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
277                        u64 physical, struct btrfs_device *dev, u64 flags,
278                        u64 gen, int mirror_num, u8 *csum, int force,
279                        u64 physical_for_dev_replace);
280 static void scrub_bio_end_io(struct bio *bio);
281 static void scrub_bio_end_io_worker(struct btrfs_work *work);
282 static void scrub_block_complete(struct scrub_block *sblock);
283 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
284                                u64 extent_logical, u64 extent_len,
285                                u64 *extent_physical,
286                                struct btrfs_device **extent_dev,
287                                int *extent_mirror_num);
288 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
289                                     struct scrub_page *spage);
290 static void scrub_wr_submit(struct scrub_ctx *sctx);
291 static void scrub_wr_bio_end_io(struct bio *bio);
292 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
293 static int write_page_nocow(struct scrub_ctx *sctx,
294                             u64 physical_for_dev_replace, struct page *page);
295 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
296                                       struct scrub_copy_nocow_ctx *ctx);
297 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
298                             int mirror_num, u64 physical_for_dev_replace);
299 static void copy_nocow_pages_worker(struct btrfs_work *work);
300 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
301 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
302 static void scrub_put_ctx(struct scrub_ctx *sctx);
303
304
305 static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
306 {
307         refcount_inc(&sctx->refs);
308         atomic_inc(&sctx->bios_in_flight);
309 }
310
311 static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
312 {
313         atomic_dec(&sctx->bios_in_flight);
314         wake_up(&sctx->list_wait);
315         scrub_put_ctx(sctx);
316 }
317
318 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
319 {
320         while (atomic_read(&fs_info->scrub_pause_req)) {
321                 mutex_unlock(&fs_info->scrub_lock);
322                 wait_event(fs_info->scrub_pause_wait,
323                    atomic_read(&fs_info->scrub_pause_req) == 0);
324                 mutex_lock(&fs_info->scrub_lock);
325         }
326 }
327
328 static void scrub_pause_on(struct btrfs_fs_info *fs_info)
329 {
330         atomic_inc(&fs_info->scrubs_paused);
331         wake_up(&fs_info->scrub_pause_wait);
332 }
333
334 static void scrub_pause_off(struct btrfs_fs_info *fs_info)
335 {
336         mutex_lock(&fs_info->scrub_lock);
337         __scrub_blocked_if_needed(fs_info);
338         atomic_dec(&fs_info->scrubs_paused);
339         mutex_unlock(&fs_info->scrub_lock);
340
341         wake_up(&fs_info->scrub_pause_wait);
342 }
343
344 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
345 {
346         scrub_pause_on(fs_info);
347         scrub_pause_off(fs_info);
348 }
349
350 /*
351  * Insert new full stripe lock into full stripe locks tree
352  *
353  * Return pointer to existing or newly inserted full_stripe_lock structure if
354  * everything works well.
355  * Return ERR_PTR(-ENOMEM) if we failed to allocate memory
356  *
357  * NOTE: caller must hold full_stripe_locks_root->lock before calling this
358  * function
359  */
360 static struct full_stripe_lock *insert_full_stripe_lock(
361                 struct btrfs_full_stripe_locks_tree *locks_root,
362                 u64 fstripe_logical)
363 {
364         struct rb_node **p;
365         struct rb_node *parent = NULL;
366         struct full_stripe_lock *entry;
367         struct full_stripe_lock *ret;
368
369         WARN_ON(!mutex_is_locked(&locks_root->lock));
370
371         p = &locks_root->root.rb_node;
372         while (*p) {
373                 parent = *p;
374                 entry = rb_entry(parent, struct full_stripe_lock, node);
375                 if (fstripe_logical < entry->logical) {
376                         p = &(*p)->rb_left;
377                 } else if (fstripe_logical > entry->logical) {
378                         p = &(*p)->rb_right;
379                 } else {
380                         entry->refs++;
381                         return entry;
382                 }
383         }
384
385         /* Insert new lock */
386         ret = kmalloc(sizeof(*ret), GFP_KERNEL);
387         if (!ret)
388                 return ERR_PTR(-ENOMEM);
389         ret->logical = fstripe_logical;
390         ret->refs = 1;
391         mutex_init(&ret->mutex);
392
393         rb_link_node(&ret->node, parent, p);
394         rb_insert_color(&ret->node, &locks_root->root);
395         return ret;
396 }
397
398 /*
399  * Search for a full stripe lock of a block group
400  *
401  * Return pointer to existing full stripe lock if found
402  * Return NULL if not found
403  */
404 static struct full_stripe_lock *search_full_stripe_lock(
405                 struct btrfs_full_stripe_locks_tree *locks_root,
406                 u64 fstripe_logical)
407 {
408         struct rb_node *node;
409         struct full_stripe_lock *entry;
410
411         WARN_ON(!mutex_is_locked(&locks_root->lock));
412
413         node = locks_root->root.rb_node;
414         while (node) {
415                 entry = rb_entry(node, struct full_stripe_lock, node);
416                 if (fstripe_logical < entry->logical)
417                         node = node->rb_left;
418                 else if (fstripe_logical > entry->logical)
419                         node = node->rb_right;
420                 else
421                         return entry;
422         }
423         return NULL;
424 }
425
426 /*
427  * Helper to get full stripe logical from a normal bytenr.
428  *
429  * Caller must ensure @cache is a RAID56 block group.
430  */
431 static u64 get_full_stripe_logical(struct btrfs_block_group_cache *cache,
432                                    u64 bytenr)
433 {
434         u64 ret;
435
436         /*
437          * Due to chunk item size limit, full stripe length should not be
438          * larger than U32_MAX. Just a sanity check here.
439          */
440         WARN_ON_ONCE(cache->full_stripe_len >= U32_MAX);
441
442         /*
443          * round_down() can only handle power of 2, while RAID56 full
444          * stripe length can be 64KiB * n, so we need to manually round down.
445          */
446         ret = div64_u64(bytenr - cache->key.objectid, cache->full_stripe_len) *
447                 cache->full_stripe_len + cache->key.objectid;
448         return ret;
449 }
450
451 /*
452  * Lock a full stripe to avoid concurrency of recovery and read
453  *
454  * It's only used for profiles with parities (RAID5/6), for other profiles it
455  * does nothing.
456  *
457  * Return 0 if we locked full stripe covering @bytenr, with a mutex held.
458  * So caller must call unlock_full_stripe() at the same context.
459  *
460  * Return <0 if encounters error.
461  */
462 static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
463                             bool *locked_ret)
464 {
465         struct btrfs_block_group_cache *bg_cache;
466         struct btrfs_full_stripe_locks_tree *locks_root;
467         struct full_stripe_lock *existing;
468         u64 fstripe_start;
469         int ret = 0;
470
471         *locked_ret = false;
472         bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
473         if (!bg_cache) {
474                 ASSERT(0);
475                 return -ENOENT;
476         }
477
478         /* Profiles not based on parity don't need full stripe lock */
479         if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
480                 goto out;
481         locks_root = &bg_cache->full_stripe_locks_root;
482
483         fstripe_start = get_full_stripe_logical(bg_cache, bytenr);
484
485         /* Now insert the full stripe lock */
486         mutex_lock(&locks_root->lock);
487         existing = insert_full_stripe_lock(locks_root, fstripe_start);
488         mutex_unlock(&locks_root->lock);
489         if (IS_ERR(existing)) {
490                 ret = PTR_ERR(existing);
491                 goto out;
492         }
493         mutex_lock(&existing->mutex);
494         *locked_ret = true;
495 out:
496         btrfs_put_block_group(bg_cache);
497         return ret;
498 }
499
500 /*
501  * Unlock a full stripe.
502  *
503  * NOTE: Caller must ensure it's the same context calling corresponding
504  * lock_full_stripe().
505  *
506  * Return 0 if we unlock full stripe without problem.
507  * Return <0 for error
508  */
509 static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
510                               bool locked)
511 {
512         struct btrfs_block_group_cache *bg_cache;
513         struct btrfs_full_stripe_locks_tree *locks_root;
514         struct full_stripe_lock *fstripe_lock;
515         u64 fstripe_start;
516         bool freeit = false;
517         int ret = 0;
518
519         /* If we didn't acquire full stripe lock, no need to continue */
520         if (!locked)
521                 return 0;
522
523         bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
524         if (!bg_cache) {
525                 ASSERT(0);
526                 return -ENOENT;
527         }
528         if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
529                 goto out;
530
531         locks_root = &bg_cache->full_stripe_locks_root;
532         fstripe_start = get_full_stripe_logical(bg_cache, bytenr);
533
534         mutex_lock(&locks_root->lock);
535         fstripe_lock = search_full_stripe_lock(locks_root, fstripe_start);
536         /* Unpaired unlock_full_stripe() detected */
537         if (!fstripe_lock) {
538                 WARN_ON(1);
539                 ret = -ENOENT;
540                 mutex_unlock(&locks_root->lock);
541                 goto out;
542         }
543
544         if (fstripe_lock->refs == 0) {
545                 WARN_ON(1);
546                 btrfs_warn(fs_info, "full stripe lock at %llu refcount underflow",
547                         fstripe_lock->logical);
548         } else {
549                 fstripe_lock->refs--;
550         }
551
552         if (fstripe_lock->refs == 0) {
553                 rb_erase(&fstripe_lock->node, &locks_root->root);
554                 freeit = true;
555         }
556         mutex_unlock(&locks_root->lock);
557
558         mutex_unlock(&fstripe_lock->mutex);
559         if (freeit)
560                 kfree(fstripe_lock);
561 out:
562         btrfs_put_block_group(bg_cache);
563         return ret;
564 }
565
566 /*
567  * used for workers that require transaction commits (i.e., for the
568  * NOCOW case)
569  */
570 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
571 {
572         struct btrfs_fs_info *fs_info = sctx->fs_info;
573
574         refcount_inc(&sctx->refs);
575         /*
576          * increment scrubs_running to prevent cancel requests from
577          * completing as long as a worker is running. we must also
578          * increment scrubs_paused to prevent deadlocking on pause
579          * requests used for transactions commits (as the worker uses a
580          * transaction context). it is safe to regard the worker
581          * as paused for all matters practical. effectively, we only
582          * avoid cancellation requests from completing.
583          */
584         mutex_lock(&fs_info->scrub_lock);
585         atomic_inc(&fs_info->scrubs_running);
586         atomic_inc(&fs_info->scrubs_paused);
587         mutex_unlock(&fs_info->scrub_lock);
588
589         /*
590          * check if @scrubs_running=@scrubs_paused condition
591          * inside wait_event() is not an atomic operation.
592          * which means we may inc/dec @scrub_running/paused
593          * at any time. Let's wake up @scrub_pause_wait as
594          * much as we can to let commit transaction blocked less.
595          */
596         wake_up(&fs_info->scrub_pause_wait);
597
598         atomic_inc(&sctx->workers_pending);
599 }
600
601 /* used for workers that require transaction commits */
602 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
603 {
604         struct btrfs_fs_info *fs_info = sctx->fs_info;
605
606         /*
607          * see scrub_pending_trans_workers_inc() why we're pretending
608          * to be paused in the scrub counters
609          */
610         mutex_lock(&fs_info->scrub_lock);
611         atomic_dec(&fs_info->scrubs_running);
612         atomic_dec(&fs_info->scrubs_paused);
613         mutex_unlock(&fs_info->scrub_lock);
614         atomic_dec(&sctx->workers_pending);
615         wake_up(&fs_info->scrub_pause_wait);
616         wake_up(&sctx->list_wait);
617         scrub_put_ctx(sctx);
618 }
619
620 static void scrub_free_csums(struct scrub_ctx *sctx)
621 {
622         while (!list_empty(&sctx->csum_list)) {
623                 struct btrfs_ordered_sum *sum;
624                 sum = list_first_entry(&sctx->csum_list,
625                                        struct btrfs_ordered_sum, list);
626                 list_del(&sum->list);
627                 kfree(sum);
628         }
629 }
630
631 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
632 {
633         int i;
634
635         if (!sctx)
636                 return;
637
638         /* this can happen when scrub is cancelled */
639         if (sctx->curr != -1) {
640                 struct scrub_bio *sbio = sctx->bios[sctx->curr];
641
642                 for (i = 0; i < sbio->page_count; i++) {
643                         WARN_ON(!sbio->pagev[i]->page);
644                         scrub_block_put(sbio->pagev[i]->sblock);
645                 }
646                 bio_put(sbio->bio);
647         }
648
649         for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
650                 struct scrub_bio *sbio = sctx->bios[i];
651
652                 if (!sbio)
653                         break;
654                 kfree(sbio);
655         }
656
657         kfree(sctx->wr_curr_bio);
658         scrub_free_csums(sctx);
659         kfree(sctx);
660 }
661
662 static void scrub_put_ctx(struct scrub_ctx *sctx)
663 {
664         if (refcount_dec_and_test(&sctx->refs))
665                 scrub_free_ctx(sctx);
666 }
667
668 static noinline_for_stack
669 struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
670 {
671         struct scrub_ctx *sctx;
672         int             i;
673         struct btrfs_fs_info *fs_info = dev->fs_info;
674
675         sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
676         if (!sctx)
677                 goto nomem;
678         refcount_set(&sctx->refs, 1);
679         sctx->is_dev_replace = is_dev_replace;
680         sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
681         sctx->curr = -1;
682         sctx->fs_info = dev->fs_info;
683         for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
684                 struct scrub_bio *sbio;
685
686                 sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
687                 if (!sbio)
688                         goto nomem;
689                 sctx->bios[i] = sbio;
690
691                 sbio->index = i;
692                 sbio->sctx = sctx;
693                 sbio->page_count = 0;
694                 btrfs_init_work(&sbio->work, btrfs_scrub_helper,
695                                 scrub_bio_end_io_worker, NULL, NULL);
696
697                 if (i != SCRUB_BIOS_PER_SCTX - 1)
698                         sctx->bios[i]->next_free = i + 1;
699                 else
700                         sctx->bios[i]->next_free = -1;
701         }
702         sctx->first_free = 0;
703         atomic_set(&sctx->bios_in_flight, 0);
704         atomic_set(&sctx->workers_pending, 0);
705         atomic_set(&sctx->cancel_req, 0);
706         sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
707         INIT_LIST_HEAD(&sctx->csum_list);
708
709         spin_lock_init(&sctx->list_lock);
710         spin_lock_init(&sctx->stat_lock);
711         init_waitqueue_head(&sctx->list_wait);
712
713         WARN_ON(sctx->wr_curr_bio != NULL);
714         mutex_init(&sctx->wr_lock);
715         sctx->wr_curr_bio = NULL;
716         if (is_dev_replace) {
717                 WARN_ON(!fs_info->dev_replace.tgtdev);
718                 sctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
719                 sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
720                 atomic_set(&sctx->flush_all_writes, 0);
721         }
722
723         return sctx;
724
725 nomem:
726         scrub_free_ctx(sctx);
727         return ERR_PTR(-ENOMEM);
728 }
729
730 static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
731                                      void *warn_ctx)
732 {
733         u64 isize;
734         u32 nlink;
735         int ret;
736         int i;
737         unsigned nofs_flag;
738         struct extent_buffer *eb;
739         struct btrfs_inode_item *inode_item;
740         struct scrub_warning *swarn = warn_ctx;
741         struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
742         struct inode_fs_paths *ipath = NULL;
743         struct btrfs_root *local_root;
744         struct btrfs_key root_key;
745         struct btrfs_key key;
746
747         root_key.objectid = root;
748         root_key.type = BTRFS_ROOT_ITEM_KEY;
749         root_key.offset = (u64)-1;
750         local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
751         if (IS_ERR(local_root)) {
752                 ret = PTR_ERR(local_root);
753                 goto err;
754         }
755
756         /*
757          * this makes the path point to (inum INODE_ITEM ioff)
758          */
759         key.objectid = inum;
760         key.type = BTRFS_INODE_ITEM_KEY;
761         key.offset = 0;
762
763         ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
764         if (ret) {
765                 btrfs_release_path(swarn->path);
766                 goto err;
767         }
768
769         eb = swarn->path->nodes[0];
770         inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
771                                         struct btrfs_inode_item);
772         isize = btrfs_inode_size(eb, inode_item);
773         nlink = btrfs_inode_nlink(eb, inode_item);
774         btrfs_release_path(swarn->path);
775
776         /*
777          * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub
778          * uses GFP_NOFS in this context, so we keep it consistent but it does
779          * not seem to be strictly necessary.
780          */
781         nofs_flag = memalloc_nofs_save();
782         ipath = init_ipath(4096, local_root, swarn->path);
783         memalloc_nofs_restore(nofs_flag);
784         if (IS_ERR(ipath)) {
785                 ret = PTR_ERR(ipath);
786                 ipath = NULL;
787                 goto err;
788         }
789         ret = paths_from_inode(inum, ipath);
790
791         if (ret < 0)
792                 goto err;
793
794         /*
795          * we deliberately ignore the bit ipath might have been too small to
796          * hold all of the paths here
797          */
798         for (i = 0; i < ipath->fspath->elem_cnt; ++i)
799                 btrfs_warn_in_rcu(fs_info,
800                                   "%s at logical %llu on dev %s, sector %llu, root %llu, inode %llu, offset %llu, length %llu, links %u (path: %s)",
801                                   swarn->errstr, swarn->logical,
802                                   rcu_str_deref(swarn->dev->name),
803                                   (unsigned long long)swarn->sector,
804                                   root, inum, offset,
805                                   min(isize - offset, (u64)PAGE_SIZE), nlink,
806                                   (char *)(unsigned long)ipath->fspath->val[i]);
807
808         free_ipath(ipath);
809         return 0;
810
811 err:
812         btrfs_warn_in_rcu(fs_info,
813                           "%s at logical %llu on dev %s, sector %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
814                           swarn->errstr, swarn->logical,
815                           rcu_str_deref(swarn->dev->name),
816                           (unsigned long long)swarn->sector,
817                           root, inum, offset, ret);
818
819         free_ipath(ipath);
820         return 0;
821 }
822
823 static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
824 {
825         struct btrfs_device *dev;
826         struct btrfs_fs_info *fs_info;
827         struct btrfs_path *path;
828         struct btrfs_key found_key;
829         struct extent_buffer *eb;
830         struct btrfs_extent_item *ei;
831         struct scrub_warning swarn;
832         unsigned long ptr = 0;
833         u64 extent_item_pos;
834         u64 flags = 0;
835         u64 ref_root;
836         u32 item_size;
837         u8 ref_level = 0;
838         int ret;
839
840         WARN_ON(sblock->page_count < 1);
841         dev = sblock->pagev[0]->dev;
842         fs_info = sblock->sctx->fs_info;
843
844         path = btrfs_alloc_path();
845         if (!path)
846                 return;
847
848         swarn.sector = (sblock->pagev[0]->physical) >> 9;
849         swarn.logical = sblock->pagev[0]->logical;
850         swarn.errstr = errstr;
851         swarn.dev = NULL;
852
853         ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
854                                   &flags);
855         if (ret < 0)
856                 goto out;
857
858         extent_item_pos = swarn.logical - found_key.objectid;
859         swarn.extent_item_size = found_key.offset;
860
861         eb = path->nodes[0];
862         ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
863         item_size = btrfs_item_size_nr(eb, path->slots[0]);
864
865         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
866                 do {
867                         ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
868                                                       item_size, &ref_root,
869                                                       &ref_level);
870                         btrfs_warn_in_rcu(fs_info,
871                                 "%s at logical %llu on dev %s, sector %llu: metadata %s (level %d) in tree %llu",
872                                 errstr, swarn.logical,
873                                 rcu_str_deref(dev->name),
874                                 (unsigned long long)swarn.sector,
875                                 ref_level ? "node" : "leaf",
876                                 ret < 0 ? -1 : ref_level,
877                                 ret < 0 ? -1 : ref_root);
878                 } while (ret != 1);
879                 btrfs_release_path(path);
880         } else {
881                 btrfs_release_path(path);
882                 swarn.path = path;
883                 swarn.dev = dev;
884                 iterate_extent_inodes(fs_info, found_key.objectid,
885                                         extent_item_pos, 1,
886                                         scrub_print_warning_inode, &swarn);
887         }
888
889 out:
890         btrfs_free_path(path);
891 }
892
893 static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
894 {
895         struct page *page = NULL;
896         unsigned long index;
897         struct scrub_fixup_nodatasum *fixup = fixup_ctx;
898         int ret;
899         int corrected = 0;
900         struct btrfs_key key;
901         struct inode *inode = NULL;
902         struct btrfs_fs_info *fs_info;
903         u64 end = offset + PAGE_SIZE - 1;
904         struct btrfs_root *local_root;
905         int srcu_index;
906
907         key.objectid = root;
908         key.type = BTRFS_ROOT_ITEM_KEY;
909         key.offset = (u64)-1;
910
911         fs_info = fixup->root->fs_info;
912         srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
913
914         local_root = btrfs_read_fs_root_no_name(fs_info, &key);
915         if (IS_ERR(local_root)) {
916                 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
917                 return PTR_ERR(local_root);
918         }
919
920         key.type = BTRFS_INODE_ITEM_KEY;
921         key.objectid = inum;
922         key.offset = 0;
923         inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
924         srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
925         if (IS_ERR(inode))
926                 return PTR_ERR(inode);
927
928         index = offset >> PAGE_SHIFT;
929
930         page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
931         if (!page) {
932                 ret = -ENOMEM;
933                 goto out;
934         }
935
936         if (PageUptodate(page)) {
937                 if (PageDirty(page)) {
938                         /*
939                          * we need to write the data to the defect sector. the
940                          * data that was in that sector is not in memory,
941                          * because the page was modified. we must not write the
942                          * modified page to that sector.
943                          *
944                          * TODO: what could be done here: wait for the delalloc
945                          *       runner to write out that page (might involve
946                          *       COW) and see whether the sector is still
947                          *       referenced afterwards.
948                          *
949                          * For the meantime, we'll treat this error
950                          * incorrectable, although there is a chance that a
951                          * later scrub will find the bad sector again and that
952                          * there's no dirty page in memory, then.
953                          */
954                         ret = -EIO;
955                         goto out;
956                 }
957                 ret = repair_io_failure(fs_info, inum, offset, PAGE_SIZE,
958                                         fixup->logical, page,
959                                         offset - page_offset(page),
960                                         fixup->mirror_num);
961                 unlock_page(page);
962                 corrected = !ret;
963         } else {
964                 /*
965                  * we need to get good data first. the general readpage path
966                  * will call repair_io_failure for us, we just have to make
967                  * sure we read the bad mirror.
968                  */
969                 ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
970                                         EXTENT_DAMAGED);
971                 if (ret) {
972                         /* set_extent_bits should give proper error */
973                         WARN_ON(ret > 0);
974                         if (ret > 0)
975                                 ret = -EFAULT;
976                         goto out;
977                 }
978
979                 ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
980                                                 btrfs_get_extent,
981                                                 fixup->mirror_num);
982                 wait_on_page_locked(page);
983
984                 corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
985                                                 end, EXTENT_DAMAGED, 0, NULL);
986                 if (!corrected)
987                         clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
988                                                 EXTENT_DAMAGED);
989         }
990
991 out:
992         if (page)
993                 put_page(page);
994
995         iput(inode);
996
997         if (ret < 0)
998                 return ret;
999
1000         if (ret == 0 && corrected) {
1001                 /*
1002                  * we only need to call readpage for one of the inodes belonging
1003                  * to this extent. so make iterate_extent_inodes stop
1004                  */
1005                 return 1;
1006         }
1007
1008         return -EIO;
1009 }
1010
1011 static void scrub_fixup_nodatasum(struct btrfs_work *work)
1012 {
1013         struct btrfs_fs_info *fs_info;
1014         int ret;
1015         struct scrub_fixup_nodatasum *fixup;
1016         struct scrub_ctx *sctx;
1017         struct btrfs_trans_handle *trans = NULL;
1018         struct btrfs_path *path;
1019         int uncorrectable = 0;
1020
1021         fixup = container_of(work, struct scrub_fixup_nodatasum, work);
1022         sctx = fixup->sctx;
1023         fs_info = fixup->root->fs_info;
1024
1025         path = btrfs_alloc_path();
1026         if (!path) {
1027                 spin_lock(&sctx->stat_lock);
1028                 ++sctx->stat.malloc_errors;
1029                 spin_unlock(&sctx->stat_lock);
1030                 uncorrectable = 1;
1031                 goto out;
1032         }
1033
1034         trans = btrfs_join_transaction(fixup->root);
1035         if (IS_ERR(trans)) {
1036                 uncorrectable = 1;
1037                 goto out;
1038         }
1039
1040         /*
1041          * the idea is to trigger a regular read through the standard path. we
1042          * read a page from the (failed) logical address by specifying the
1043          * corresponding copynum of the failed sector. thus, that readpage is
1044          * expected to fail.
1045          * that is the point where on-the-fly error correction will kick in
1046          * (once it's finished) and rewrite the failed sector if a good copy
1047          * can be found.
1048          */
1049         ret = iterate_inodes_from_logical(fixup->logical, fs_info, path,
1050                                           scrub_fixup_readpage, fixup);
1051         if (ret < 0) {
1052                 uncorrectable = 1;
1053                 goto out;
1054         }
1055         WARN_ON(ret != 1);
1056
1057         spin_lock(&sctx->stat_lock);
1058         ++sctx->stat.corrected_errors;
1059         spin_unlock(&sctx->stat_lock);
1060
1061 out:
1062         if (trans && !IS_ERR(trans))
1063                 btrfs_end_transaction(trans);
1064         if (uncorrectable) {
1065                 spin_lock(&sctx->stat_lock);
1066                 ++sctx->stat.uncorrectable_errors;
1067                 spin_unlock(&sctx->stat_lock);
1068                 btrfs_dev_replace_stats_inc(
1069                         &fs_info->dev_replace.num_uncorrectable_read_errors);
1070                 btrfs_err_rl_in_rcu(fs_info,
1071                     "unable to fixup (nodatasum) error at logical %llu on dev %s",
1072                         fixup->logical, rcu_str_deref(fixup->dev->name));
1073         }
1074
1075         btrfs_free_path(path);
1076         kfree(fixup);
1077
1078         scrub_pending_trans_workers_dec(sctx);
1079 }
1080
1081 static inline void scrub_get_recover(struct scrub_recover *recover)
1082 {
1083         refcount_inc(&recover->refs);
1084 }
1085
1086 static inline void scrub_put_recover(struct btrfs_fs_info *fs_info,
1087                                      struct scrub_recover *recover)
1088 {
1089         if (refcount_dec_and_test(&recover->refs)) {
1090                 btrfs_bio_counter_dec(fs_info);
1091                 btrfs_put_bbio(recover->bbio);
1092                 kfree(recover);
1093         }
1094 }
1095
1096 /*
1097  * scrub_handle_errored_block gets called when either verification of the
1098  * pages failed or the bio failed to read, e.g. with EIO. In the latter
1099  * case, this function handles all pages in the bio, even though only one
1100  * may be bad.
1101  * The goal of this function is to repair the errored block by using the
1102  * contents of one of the mirrors.
1103  */
1104 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
1105 {
1106         struct scrub_ctx *sctx = sblock_to_check->sctx;
1107         struct btrfs_device *dev;
1108         struct btrfs_fs_info *fs_info;
1109         u64 length;
1110         u64 logical;
1111         unsigned int failed_mirror_index;
1112         unsigned int is_metadata;
1113         unsigned int have_csum;
1114         struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
1115         struct scrub_block *sblock_bad;
1116         int ret;
1117         int mirror_index;
1118         int page_num;
1119         int success;
1120         bool full_stripe_locked;
1121         static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
1122                                       DEFAULT_RATELIMIT_BURST);
1123
1124         BUG_ON(sblock_to_check->page_count < 1);
1125         fs_info = sctx->fs_info;
1126         if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
1127                 /*
1128                  * if we find an error in a super block, we just report it.
1129                  * They will get written with the next transaction commit
1130                  * anyway
1131                  */
1132                 spin_lock(&sctx->stat_lock);
1133                 ++sctx->stat.super_errors;
1134                 spin_unlock(&sctx->stat_lock);
1135                 return 0;
1136         }
1137         length = sblock_to_check->page_count * PAGE_SIZE;
1138         logical = sblock_to_check->pagev[0]->logical;
1139         BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
1140         failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
1141         is_metadata = !(sblock_to_check->pagev[0]->flags &
1142                         BTRFS_EXTENT_FLAG_DATA);
1143         have_csum = sblock_to_check->pagev[0]->have_csum;
1144         dev = sblock_to_check->pagev[0]->dev;
1145
1146         /*
1147          * For RAID5/6, race can happen for a different device scrub thread.
1148          * For data corruption, Parity and Data threads will both try
1149          * to recovery the data.
1150          * Race can lead to doubly added csum error, or even unrecoverable
1151          * error.
1152          */
1153         ret = lock_full_stripe(fs_info, logical, &full_stripe_locked);
1154         if (ret < 0) {
1155                 spin_lock(&sctx->stat_lock);
1156                 if (ret == -ENOMEM)
1157                         sctx->stat.malloc_errors++;
1158                 sctx->stat.read_errors++;
1159                 sctx->stat.uncorrectable_errors++;
1160                 spin_unlock(&sctx->stat_lock);
1161                 return ret;
1162         }
1163
1164         if (sctx->is_dev_replace && !is_metadata && !have_csum) {
1165                 sblocks_for_recheck = NULL;
1166                 goto nodatasum_case;
1167         }
1168
1169         /*
1170          * read all mirrors one after the other. This includes to
1171          * re-read the extent or metadata block that failed (that was
1172          * the cause that this fixup code is called) another time,
1173          * page by page this time in order to know which pages
1174          * caused I/O errors and which ones are good (for all mirrors).
1175          * It is the goal to handle the situation when more than one
1176          * mirror contains I/O errors, but the errors do not
1177          * overlap, i.e. the data can be repaired by selecting the
1178          * pages from those mirrors without I/O error on the
1179          * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
1180          * would be that mirror #1 has an I/O error on the first page,
1181          * the second page is good, and mirror #2 has an I/O error on
1182          * the second page, but the first page is good.
1183          * Then the first page of the first mirror can be repaired by
1184          * taking the first page of the second mirror, and the
1185          * second page of the second mirror can be repaired by
1186          * copying the contents of the 2nd page of the 1st mirror.
1187          * One more note: if the pages of one mirror contain I/O
1188          * errors, the checksum cannot be verified. In order to get
1189          * the best data for repairing, the first attempt is to find
1190          * a mirror without I/O errors and with a validated checksum.
1191          * Only if this is not possible, the pages are picked from
1192          * mirrors with I/O errors without considering the checksum.
1193          * If the latter is the case, at the end, the checksum of the
1194          * repaired area is verified in order to correctly maintain
1195          * the statistics.
1196          */
1197
1198         sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
1199                                       sizeof(*sblocks_for_recheck), GFP_NOFS);
1200         if (!sblocks_for_recheck) {
1201                 spin_lock(&sctx->stat_lock);
1202                 sctx->stat.malloc_errors++;
1203                 sctx->stat.read_errors++;
1204                 sctx->stat.uncorrectable_errors++;
1205                 spin_unlock(&sctx->stat_lock);
1206                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
1207                 goto out;
1208         }
1209
1210         /* setup the context, map the logical blocks and alloc the pages */
1211         ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
1212         if (ret) {
1213                 spin_lock(&sctx->stat_lock);
1214                 sctx->stat.read_errors++;
1215                 sctx->stat.uncorrectable_errors++;
1216                 spin_unlock(&sctx->stat_lock);
1217                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
1218                 goto out;
1219         }
1220         BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
1221         sblock_bad = sblocks_for_recheck + failed_mirror_index;
1222
1223         /* build and submit the bios for the failed mirror, check checksums */
1224         scrub_recheck_block(fs_info, sblock_bad, 1);
1225
1226         if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
1227             sblock_bad->no_io_error_seen) {
1228                 /*
1229                  * the error disappeared after reading page by page, or
1230                  * the area was part of a huge bio and other parts of the
1231                  * bio caused I/O errors, or the block layer merged several
1232                  * read requests into one and the error is caused by a
1233                  * different bio (usually one of the two latter cases is
1234                  * the cause)
1235                  */
1236                 spin_lock(&sctx->stat_lock);
1237                 sctx->stat.unverified_errors++;
1238                 sblock_to_check->data_corrected = 1;
1239                 spin_unlock(&sctx->stat_lock);
1240
1241                 if (sctx->is_dev_replace)
1242                         scrub_write_block_to_dev_replace(sblock_bad);
1243                 goto out;
1244         }
1245
1246         if (!sblock_bad->no_io_error_seen) {
1247                 spin_lock(&sctx->stat_lock);
1248                 sctx->stat.read_errors++;
1249                 spin_unlock(&sctx->stat_lock);
1250                 if (__ratelimit(&_rs))
1251                         scrub_print_warning("i/o error", sblock_to_check);
1252                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
1253         } else if (sblock_bad->checksum_error) {
1254                 spin_lock(&sctx->stat_lock);
1255                 sctx->stat.csum_errors++;
1256                 spin_unlock(&sctx->stat_lock);
1257                 if (__ratelimit(&_rs))
1258                         scrub_print_warning("checksum error", sblock_to_check);
1259                 btrfs_dev_stat_inc_and_print(dev,
1260                                              BTRFS_DEV_STAT_CORRUPTION_ERRS);
1261         } else if (sblock_bad->header_error) {
1262                 spin_lock(&sctx->stat_lock);
1263                 sctx->stat.verify_errors++;
1264                 spin_unlock(&sctx->stat_lock);
1265                 if (__ratelimit(&_rs))
1266                         scrub_print_warning("checksum/header error",
1267                                             sblock_to_check);
1268                 if (sblock_bad->generation_error)
1269                         btrfs_dev_stat_inc_and_print(dev,
1270                                 BTRFS_DEV_STAT_GENERATION_ERRS);
1271                 else
1272                         btrfs_dev_stat_inc_and_print(dev,
1273                                 BTRFS_DEV_STAT_CORRUPTION_ERRS);
1274         }
1275
1276         if (sctx->readonly) {
1277                 ASSERT(!sctx->is_dev_replace);
1278                 goto out;
1279         }
1280
1281         if (!is_metadata && !have_csum) {
1282                 struct scrub_fixup_nodatasum *fixup_nodatasum;
1283
1284                 WARN_ON(sctx->is_dev_replace);
1285
1286 nodatasum_case:
1287
1288                 /*
1289                  * !is_metadata and !have_csum, this means that the data
1290                  * might not be COWed, that it might be modified
1291                  * concurrently. The general strategy to work on the
1292                  * commit root does not help in the case when COW is not
1293                  * used.
1294                  */
1295                 fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
1296                 if (!fixup_nodatasum)
1297                         goto did_not_correct_error;
1298                 fixup_nodatasum->sctx = sctx;
1299                 fixup_nodatasum->dev = dev;
1300                 fixup_nodatasum->logical = logical;
1301                 fixup_nodatasum->root = fs_info->extent_root;
1302                 fixup_nodatasum->mirror_num = failed_mirror_index + 1;
1303                 scrub_pending_trans_workers_inc(sctx);
1304                 btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper,
1305                                 scrub_fixup_nodatasum, NULL, NULL);
1306                 btrfs_queue_work(fs_info->scrub_workers,
1307                                  &fixup_nodatasum->work);
1308                 goto out;
1309         }
1310
1311         /*
1312          * now build and submit the bios for the other mirrors, check
1313          * checksums.
1314          * First try to pick the mirror which is completely without I/O
1315          * errors and also does not have a checksum error.
1316          * If one is found, and if a checksum is present, the full block
1317          * that is known to contain an error is rewritten. Afterwards
1318          * the block is known to be corrected.
1319          * If a mirror is found which is completely correct, and no
1320          * checksum is present, only those pages are rewritten that had
1321          * an I/O error in the block to be repaired, since it cannot be
1322          * determined, which copy of the other pages is better (and it
1323          * could happen otherwise that a correct page would be
1324          * overwritten by a bad one).
1325          */
1326         for (mirror_index = 0;
1327              mirror_index < BTRFS_MAX_MIRRORS &&
1328              sblocks_for_recheck[mirror_index].page_count > 0;
1329              mirror_index++) {
1330                 struct scrub_block *sblock_other;
1331
1332                 if (mirror_index == failed_mirror_index)
1333                         continue;
1334                 sblock_other = sblocks_for_recheck + mirror_index;
1335
1336                 /* build and submit the bios, check checksums */
1337                 scrub_recheck_block(fs_info, sblock_other, 0);
1338
1339                 if (!sblock_other->header_error &&
1340                     !sblock_other->checksum_error &&
1341                     sblock_other->no_io_error_seen) {
1342                         if (sctx->is_dev_replace) {
1343                                 scrub_write_block_to_dev_replace(sblock_other);
1344                                 goto corrected_error;
1345                         } else {
1346                                 ret = scrub_repair_block_from_good_copy(
1347                                                 sblock_bad, sblock_other);
1348                                 if (!ret)
1349                                         goto corrected_error;
1350                         }
1351                 }
1352         }
1353
1354         if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
1355                 goto did_not_correct_error;
1356
1357         /*
1358          * In case of I/O errors in the area that is supposed to be
1359          * repaired, continue by picking good copies of those pages.
1360          * Select the good pages from mirrors to rewrite bad pages from
1361          * the area to fix. Afterwards verify the checksum of the block
1362          * that is supposed to be repaired. This verification step is
1363          * only done for the purpose of statistic counting and for the
1364          * final scrub report, whether errors remain.
1365          * A perfect algorithm could make use of the checksum and try
1366          * all possible combinations of pages from the different mirrors
1367          * until the checksum verification succeeds. For example, when
1368          * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1369          * of mirror #2 is readable but the final checksum test fails,
1370          * then the 2nd page of mirror #3 could be tried, whether now
1371          * the final checksum succeeds. But this would be a rare
1372          * exception and is therefore not implemented. At least it is
1373          * avoided that the good copy is overwritten.
1374          * A more useful improvement would be to pick the sectors
1375          * without I/O error based on sector sizes (512 bytes on legacy
1376          * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1377          * mirror could be repaired by taking 512 byte of a different
1378          * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1379          * area are unreadable.
1380          */
1381         success = 1;
1382         for (page_num = 0; page_num < sblock_bad->page_count;
1383              page_num++) {
1384                 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1385                 struct scrub_block *sblock_other = NULL;
1386
1387                 /* skip no-io-error page in scrub */
1388                 if (!page_bad->io_error && !sctx->is_dev_replace)
1389                         continue;
1390
1391                 /* try to find no-io-error page in mirrors */
1392                 if (page_bad->io_error) {
1393                         for (mirror_index = 0;
1394                              mirror_index < BTRFS_MAX_MIRRORS &&
1395                              sblocks_for_recheck[mirror_index].page_count > 0;
1396                              mirror_index++) {
1397                                 if (!sblocks_for_recheck[mirror_index].
1398                                     pagev[page_num]->io_error) {
1399                                         sblock_other = sblocks_for_recheck +
1400                                                        mirror_index;
1401                                         break;
1402                                 }
1403                         }
1404                         if (!sblock_other)
1405                                 success = 0;
1406                 }
1407
1408                 if (sctx->is_dev_replace) {
1409                         /*
1410                          * did not find a mirror to fetch the page
1411                          * from. scrub_write_page_to_dev_replace()
1412                          * handles this case (page->io_error), by
1413                          * filling the block with zeros before
1414                          * submitting the write request
1415                          */
1416                         if (!sblock_other)
1417                                 sblock_other = sblock_bad;
1418
1419                         if (scrub_write_page_to_dev_replace(sblock_other,
1420                                                             page_num) != 0) {
1421                                 btrfs_dev_replace_stats_inc(
1422                                         &fs_info->dev_replace.num_write_errors);
1423                                 success = 0;
1424                         }
1425                 } else if (sblock_other) {
1426                         ret = scrub_repair_page_from_good_copy(sblock_bad,
1427                                                                sblock_other,
1428                                                                page_num, 0);
1429                         if (0 == ret)
1430                                 page_bad->io_error = 0;
1431                         else
1432                                 success = 0;
1433                 }
1434         }
1435
1436         if (success && !sctx->is_dev_replace) {
1437                 if (is_metadata || have_csum) {
1438                         /*
1439                          * need to verify the checksum now that all
1440                          * sectors on disk are repaired (the write
1441                          * request for data to be repaired is on its way).
1442                          * Just be lazy and use scrub_recheck_block()
1443                          * which re-reads the data before the checksum
1444                          * is verified, but most likely the data comes out
1445                          * of the page cache.
1446                          */
1447                         scrub_recheck_block(fs_info, sblock_bad, 1);
1448                         if (!sblock_bad->header_error &&
1449                             !sblock_bad->checksum_error &&
1450                             sblock_bad->no_io_error_seen)
1451                                 goto corrected_error;
1452                         else
1453                                 goto did_not_correct_error;
1454                 } else {
1455 corrected_error:
1456                         spin_lock(&sctx->stat_lock);
1457                         sctx->stat.corrected_errors++;
1458                         sblock_to_check->data_corrected = 1;
1459                         spin_unlock(&sctx->stat_lock);
1460                         btrfs_err_rl_in_rcu(fs_info,
1461                                 "fixed up error at logical %llu on dev %s",
1462                                 logical, rcu_str_deref(dev->name));
1463                 }
1464         } else {
1465 did_not_correct_error:
1466                 spin_lock(&sctx->stat_lock);
1467                 sctx->stat.uncorrectable_errors++;
1468                 spin_unlock(&sctx->stat_lock);
1469                 btrfs_err_rl_in_rcu(fs_info,
1470                         "unable to fixup (regular) error at logical %llu on dev %s",
1471                         logical, rcu_str_deref(dev->name));
1472         }
1473
1474 out:
1475         if (sblocks_for_recheck) {
1476                 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
1477                      mirror_index++) {
1478                         struct scrub_block *sblock = sblocks_for_recheck +
1479                                                      mirror_index;
1480                         struct scrub_recover *recover;
1481                         int page_index;
1482
1483                         for (page_index = 0; page_index < sblock->page_count;
1484                              page_index++) {
1485                                 sblock->pagev[page_index]->sblock = NULL;
1486                                 recover = sblock->pagev[page_index]->recover;
1487                                 if (recover) {
1488                                         scrub_put_recover(fs_info, recover);
1489                                         sblock->pagev[page_index]->recover =
1490                                                                         NULL;
1491                                 }
1492                                 scrub_page_put(sblock->pagev[page_index]);
1493                         }
1494                 }
1495                 kfree(sblocks_for_recheck);
1496         }
1497
1498         ret = unlock_full_stripe(fs_info, logical, full_stripe_locked);
1499         if (ret < 0)
1500                 return ret;
1501         return 0;
1502 }
1503
1504 static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
1505 {
1506         if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1507                 return 2;
1508         else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1509                 return 3;
1510         else
1511                 return (int)bbio->num_stripes;
1512 }
1513
1514 static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
1515                                                  u64 *raid_map,
1516                                                  u64 mapped_length,
1517                                                  int nstripes, int mirror,
1518                                                  int *stripe_index,
1519                                                  u64 *stripe_offset)
1520 {
1521         int i;
1522
1523         if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
1524                 /* RAID5/6 */
1525                 for (i = 0; i < nstripes; i++) {
1526                         if (raid_map[i] == RAID6_Q_STRIPE ||
1527                             raid_map[i] == RAID5_P_STRIPE)
1528                                 continue;
1529
1530                         if (logical >= raid_map[i] &&
1531                             logical < raid_map[i] + mapped_length)
1532                                 break;
1533                 }
1534
1535                 *stripe_index = i;
1536                 *stripe_offset = logical - raid_map[i];
1537         } else {
1538                 /* The other RAID type */
1539                 *stripe_index = mirror;
1540                 *stripe_offset = 0;
1541         }
1542 }
1543
1544 static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
1545                                      struct scrub_block *sblocks_for_recheck)
1546 {
1547         struct scrub_ctx *sctx = original_sblock->sctx;
1548         struct btrfs_fs_info *fs_info = sctx->fs_info;
1549         u64 length = original_sblock->page_count * PAGE_SIZE;
1550         u64 logical = original_sblock->pagev[0]->logical;
1551         u64 generation = original_sblock->pagev[0]->generation;
1552         u64 flags = original_sblock->pagev[0]->flags;
1553         u64 have_csum = original_sblock->pagev[0]->have_csum;
1554         struct scrub_recover *recover;
1555         struct btrfs_bio *bbio;
1556         u64 sublen;
1557         u64 mapped_length;
1558         u64 stripe_offset;
1559         int stripe_index;
1560         int page_index = 0;
1561         int mirror_index;
1562         int nmirrors;
1563         int ret;
1564
1565         /*
1566          * note: the two members refs and outstanding_pages
1567          * are not used (and not set) in the blocks that are used for
1568          * the recheck procedure
1569          */
1570
1571         while (length > 0) {
1572                 sublen = min_t(u64, length, PAGE_SIZE);
1573                 mapped_length = sublen;
1574                 bbio = NULL;
1575
1576                 /*
1577                  * with a length of PAGE_SIZE, each returned stripe
1578                  * represents one mirror
1579                  */
1580                 btrfs_bio_counter_inc_blocked(fs_info);
1581                 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
1582                                 logical, &mapped_length, &bbio);
1583                 if (ret || !bbio || mapped_length < sublen) {
1584                         btrfs_put_bbio(bbio);
1585                         btrfs_bio_counter_dec(fs_info);
1586                         return -EIO;
1587                 }
1588
1589                 recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
1590                 if (!recover) {
1591                         btrfs_put_bbio(bbio);
1592                         btrfs_bio_counter_dec(fs_info);
1593                         return -ENOMEM;
1594                 }
1595
1596                 refcount_set(&recover->refs, 1);
1597                 recover->bbio = bbio;
1598                 recover->map_length = mapped_length;
1599
1600                 BUG_ON(page_index >= SCRUB_MAX_PAGES_PER_BLOCK);
1601
1602                 nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS);
1603
1604                 for (mirror_index = 0; mirror_index < nmirrors;
1605                      mirror_index++) {
1606                         struct scrub_block *sblock;
1607                         struct scrub_page *page;
1608
1609                         sblock = sblocks_for_recheck + mirror_index;
1610                         sblock->sctx = sctx;
1611
1612                         page = kzalloc(sizeof(*page), GFP_NOFS);
1613                         if (!page) {
1614 leave_nomem:
1615                                 spin_lock(&sctx->stat_lock);
1616                                 sctx->stat.malloc_errors++;
1617                                 spin_unlock(&sctx->stat_lock);
1618                                 scrub_put_recover(fs_info, recover);
1619                                 return -ENOMEM;
1620                         }
1621                         scrub_page_get(page);
1622                         sblock->pagev[page_index] = page;
1623                         page->sblock = sblock;
1624                         page->flags = flags;
1625                         page->generation = generation;
1626                         page->logical = logical;
1627                         page->have_csum = have_csum;
1628                         if (have_csum)
1629                                 memcpy(page->csum,
1630                                        original_sblock->pagev[0]->csum,
1631                                        sctx->csum_size);
1632
1633                         scrub_stripe_index_and_offset(logical,
1634                                                       bbio->map_type,
1635                                                       bbio->raid_map,
1636                                                       mapped_length,
1637                                                       bbio->num_stripes -
1638                                                       bbio->num_tgtdevs,
1639                                                       mirror_index,
1640                                                       &stripe_index,
1641                                                       &stripe_offset);
1642                         page->physical = bbio->stripes[stripe_index].physical +
1643                                          stripe_offset;
1644                         page->dev = bbio->stripes[stripe_index].dev;
1645
1646                         BUG_ON(page_index >= original_sblock->page_count);
1647                         page->physical_for_dev_replace =
1648                                 original_sblock->pagev[page_index]->
1649                                 physical_for_dev_replace;
1650                         /* for missing devices, dev->bdev is NULL */
1651                         page->mirror_num = mirror_index + 1;
1652                         sblock->page_count++;
1653                         page->page = alloc_page(GFP_NOFS);
1654                         if (!page->page)
1655                                 goto leave_nomem;
1656
1657                         scrub_get_recover(recover);
1658                         page->recover = recover;
1659                 }
1660                 scrub_put_recover(fs_info, recover);
1661                 length -= sublen;
1662                 logical += sublen;
1663                 page_index++;
1664         }
1665
1666         return 0;
1667 }
1668
1669 struct scrub_bio_ret {
1670         struct completion event;
1671         int error;
1672 };
1673
1674 static void scrub_bio_wait_endio(struct bio *bio)
1675 {
1676         struct scrub_bio_ret *ret = bio->bi_private;
1677
1678         ret->error = bio->bi_error;
1679         complete(&ret->event);
1680 }
1681
1682 static inline int scrub_is_page_on_raid56(struct scrub_page *page)
1683 {
1684         return page->recover &&
1685                (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
1686 }
1687
1688 static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
1689                                         struct bio *bio,
1690                                         struct scrub_page *page)
1691 {
1692         struct scrub_bio_ret done;
1693         int ret;
1694
1695         init_completion(&done.event);
1696         done.error = 0;
1697         bio->bi_iter.bi_sector = page->logical >> 9;
1698         bio->bi_private = &done;
1699         bio->bi_end_io = scrub_bio_wait_endio;
1700
1701         ret = raid56_parity_recover(fs_info, bio, page->recover->bbio,
1702                                     page->recover->map_length,
1703                                     page->mirror_num, 0);
1704         if (ret)
1705                 return ret;
1706
1707         wait_for_completion(&done.event);
1708         if (done.error)
1709                 return -EIO;
1710
1711         return 0;
1712 }
1713
1714 /*
1715  * this function will check the on disk data for checksum errors, header
1716  * errors and read I/O errors. If any I/O errors happen, the exact pages
1717  * which are errored are marked as being bad. The goal is to enable scrub
1718  * to take those pages that are not errored from all the mirrors so that
1719  * the pages that are errored in the just handled mirror can be repaired.
1720  */
1721 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1722                                 struct scrub_block *sblock,
1723                                 int retry_failed_mirror)
1724 {
1725         int page_num;
1726
1727         sblock->no_io_error_seen = 1;
1728
1729         for (page_num = 0; page_num < sblock->page_count; page_num++) {
1730                 struct bio *bio;
1731                 struct scrub_page *page = sblock->pagev[page_num];
1732
1733                 if (page->dev->bdev == NULL) {
1734                         page->io_error = 1;
1735                         sblock->no_io_error_seen = 0;
1736                         continue;
1737                 }
1738
1739                 WARN_ON(!page->page);
1740                 bio = btrfs_io_bio_alloc(1);
1741                 bio->bi_bdev = page->dev->bdev;
1742
1743                 bio_add_page(bio, page->page, PAGE_SIZE, 0);
1744                 if (!retry_failed_mirror && scrub_is_page_on_raid56(page)) {
1745                         if (scrub_submit_raid56_bio_wait(fs_info, bio, page)) {
1746                                 page->io_error = 1;
1747                                 sblock->no_io_error_seen = 0;
1748                         }
1749                 } else {
1750                         bio->bi_iter.bi_sector = page->physical >> 9;
1751                         bio_set_op_attrs(bio, REQ_OP_READ, 0);
1752
1753                         if (btrfsic_submit_bio_wait(bio)) {
1754                                 page->io_error = 1;
1755                                 sblock->no_io_error_seen = 0;
1756                         }
1757                 }
1758
1759                 bio_put(bio);
1760         }
1761
1762         if (sblock->no_io_error_seen)
1763                 scrub_recheck_block_checksum(sblock);
1764 }
1765
1766 static inline int scrub_check_fsid(u8 fsid[],
1767                                    struct scrub_page *spage)
1768 {
1769         struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
1770         int ret;
1771
1772         ret = memcmp(fsid, fs_devices->fsid, BTRFS_UUID_SIZE);
1773         return !ret;
1774 }
1775
1776 static void scrub_recheck_block_checksum(struct scrub_block *sblock)
1777 {
1778         sblock->header_error = 0;
1779         sblock->checksum_error = 0;
1780         sblock->generation_error = 0;
1781
1782         if (sblock->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA)
1783                 scrub_checksum_data(sblock);
1784         else
1785                 scrub_checksum_tree_block(sblock);
1786 }
1787
1788 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1789                                              struct scrub_block *sblock_good)
1790 {
1791         int page_num;
1792         int ret = 0;
1793
1794         for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1795                 int ret_sub;
1796
1797                 ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1798                                                            sblock_good,
1799                                                            page_num, 1);
1800                 if (ret_sub)
1801                         ret = ret_sub;
1802         }
1803
1804         return ret;
1805 }
1806
1807 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1808                                             struct scrub_block *sblock_good,
1809                                             int page_num, int force_write)
1810 {
1811         struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1812         struct scrub_page *page_good = sblock_good->pagev[page_num];
1813         struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
1814
1815         BUG_ON(page_bad->page == NULL);
1816         BUG_ON(page_good->page == NULL);
1817         if (force_write || sblock_bad->header_error ||
1818             sblock_bad->checksum_error || page_bad->io_error) {
1819                 struct bio *bio;
1820                 int ret;
1821
1822                 if (!page_bad->dev->bdev) {
1823                         btrfs_warn_rl(fs_info,
1824                                 "scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
1825                         return -EIO;
1826                 }
1827
1828                 bio = btrfs_io_bio_alloc(1);
1829                 bio->bi_bdev = page_bad->dev->bdev;
1830                 bio->bi_iter.bi_sector = page_bad->physical >> 9;
1831                 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1832
1833                 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1834                 if (PAGE_SIZE != ret) {
1835                         bio_put(bio);
1836                         return -EIO;
1837                 }
1838
1839                 if (btrfsic_submit_bio_wait(bio)) {
1840                         btrfs_dev_stat_inc_and_print(page_bad->dev,
1841                                 BTRFS_DEV_STAT_WRITE_ERRS);
1842                         btrfs_dev_replace_stats_inc(
1843                                 &fs_info->dev_replace.num_write_errors);
1844                         bio_put(bio);
1845                         return -EIO;
1846                 }
1847                 bio_put(bio);
1848         }
1849
1850         return 0;
1851 }
1852
1853 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1854 {
1855         struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
1856         int page_num;
1857
1858         /*
1859          * This block is used for the check of the parity on the source device,
1860          * so the data needn't be written into the destination device.
1861          */
1862         if (sblock->sparity)
1863                 return;
1864
1865         for (page_num = 0; page_num < sblock->page_count; page_num++) {
1866                 int ret;
1867
1868                 ret = scrub_write_page_to_dev_replace(sblock, page_num);
1869                 if (ret)
1870                         btrfs_dev_replace_stats_inc(
1871                                 &fs_info->dev_replace.num_write_errors);
1872         }
1873 }
1874
1875 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
1876                                            int page_num)
1877 {
1878         struct scrub_page *spage = sblock->pagev[page_num];
1879
1880         BUG_ON(spage->page == NULL);
1881         if (spage->io_error) {
1882                 void *mapped_buffer = kmap_atomic(spage->page);
1883
1884                 clear_page(mapped_buffer);
1885                 flush_dcache_page(spage->page);
1886                 kunmap_atomic(mapped_buffer);
1887         }
1888         return scrub_add_page_to_wr_bio(sblock->sctx, spage);
1889 }
1890
1891 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
1892                                     struct scrub_page *spage)
1893 {
1894         struct scrub_bio *sbio;
1895         int ret;
1896
1897         mutex_lock(&sctx->wr_lock);
1898 again:
1899         if (!sctx->wr_curr_bio) {
1900                 sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio),
1901                                               GFP_KERNEL);
1902                 if (!sctx->wr_curr_bio) {
1903                         mutex_unlock(&sctx->wr_lock);
1904                         return -ENOMEM;
1905                 }
1906                 sctx->wr_curr_bio->sctx = sctx;
1907                 sctx->wr_curr_bio->page_count = 0;
1908         }
1909         sbio = sctx->wr_curr_bio;
1910         if (sbio->page_count == 0) {
1911                 struct bio *bio;
1912
1913                 sbio->physical = spage->physical_for_dev_replace;
1914                 sbio->logical = spage->logical;
1915                 sbio->dev = sctx->wr_tgtdev;
1916                 bio = sbio->bio;
1917                 if (!bio) {
1918                         bio = btrfs_io_bio_alloc(sctx->pages_per_wr_bio);
1919                         sbio->bio = bio;
1920                 }
1921
1922                 bio->bi_private = sbio;
1923                 bio->bi_end_io = scrub_wr_bio_end_io;
1924                 bio->bi_bdev = sbio->dev->bdev;
1925                 bio->bi_iter.bi_sector = sbio->physical >> 9;
1926                 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1927                 sbio->err = 0;
1928         } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1929                    spage->physical_for_dev_replace ||
1930                    sbio->logical + sbio->page_count * PAGE_SIZE !=
1931                    spage->logical) {
1932                 scrub_wr_submit(sctx);
1933                 goto again;
1934         }
1935
1936         ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1937         if (ret != PAGE_SIZE) {
1938                 if (sbio->page_count < 1) {
1939                         bio_put(sbio->bio);
1940                         sbio->bio = NULL;
1941                         mutex_unlock(&sctx->wr_lock);
1942                         return -EIO;
1943                 }
1944                 scrub_wr_submit(sctx);
1945                 goto again;
1946         }
1947
1948         sbio->pagev[sbio->page_count] = spage;
1949         scrub_page_get(spage);
1950         sbio->page_count++;
1951         if (sbio->page_count == sctx->pages_per_wr_bio)
1952                 scrub_wr_submit(sctx);
1953         mutex_unlock(&sctx->wr_lock);
1954
1955         return 0;
1956 }
1957
1958 static void scrub_wr_submit(struct scrub_ctx *sctx)
1959 {
1960         struct scrub_bio *sbio;
1961
1962         if (!sctx->wr_curr_bio)
1963                 return;
1964
1965         sbio = sctx->wr_curr_bio;
1966         sctx->wr_curr_bio = NULL;
1967         WARN_ON(!sbio->bio->bi_bdev);
1968         scrub_pending_bio_inc(sctx);
1969         /* process all writes in a single worker thread. Then the block layer
1970          * orders the requests before sending them to the driver which
1971          * doubled the write performance on spinning disks when measured
1972          * with Linux 3.5 */
1973         btrfsic_submit_bio(sbio->bio);
1974 }
1975
1976 static void scrub_wr_bio_end_io(struct bio *bio)
1977 {
1978         struct scrub_bio *sbio = bio->bi_private;
1979         struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
1980
1981         sbio->err = bio->bi_error;
1982         sbio->bio = bio;
1983
1984         btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
1985                          scrub_wr_bio_end_io_worker, NULL, NULL);
1986         btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
1987 }
1988
1989 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
1990 {
1991         struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1992         struct scrub_ctx *sctx = sbio->sctx;
1993         int i;
1994
1995         WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
1996         if (sbio->err) {
1997                 struct btrfs_dev_replace *dev_replace =
1998                         &sbio->sctx->fs_info->dev_replace;
1999
2000                 for (i = 0; i < sbio->page_count; i++) {
2001                         struct scrub_page *spage = sbio->pagev[i];
2002
2003                         spage->io_error = 1;
2004                         btrfs_dev_replace_stats_inc(&dev_replace->
2005                                                     num_write_errors);
2006                 }
2007         }
2008
2009         for (i = 0; i < sbio->page_count; i++)
2010                 scrub_page_put(sbio->pagev[i]);
2011
2012         bio_put(sbio->bio);
2013         kfree(sbio);
2014         scrub_pending_bio_dec(sctx);
2015 }
2016
2017 static int scrub_checksum(struct scrub_block *sblock)
2018 {
2019         u64 flags;
2020         int ret;
2021
2022         /*
2023          * No need to initialize these stats currently,
2024          * because this function only use return value
2025          * instead of these stats value.
2026          *
2027          * Todo:
2028          * always use stats
2029          */
2030         sblock->header_error = 0;
2031         sblock->generation_error = 0;
2032         sblock->checksum_error = 0;
2033
2034         WARN_ON(sblock->page_count < 1);
2035         flags = sblock->pagev[0]->flags;
2036         ret = 0;
2037         if (flags & BTRFS_EXTENT_FLAG_DATA)
2038                 ret = scrub_checksum_data(sblock);
2039         else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2040                 ret = scrub_checksum_tree_block(sblock);
2041         else if (flags & BTRFS_EXTENT_FLAG_SUPER)
2042                 (void)scrub_checksum_super(sblock);
2043         else
2044                 WARN_ON(1);
2045         if (ret)
2046                 scrub_handle_errored_block(sblock);
2047
2048         return ret;
2049 }
2050
2051 static int scrub_checksum_data(struct scrub_block *sblock)
2052 {
2053         struct scrub_ctx *sctx = sblock->sctx;
2054         u8 csum[BTRFS_CSUM_SIZE];
2055         u8 *on_disk_csum;
2056         struct page *page;
2057         void *buffer;
2058         u32 crc = ~(u32)0;
2059         u64 len;
2060         int index;
2061
2062         BUG_ON(sblock->page_count < 1);
2063         if (!sblock->pagev[0]->have_csum)
2064                 return 0;
2065
2066         on_disk_csum = sblock->pagev[0]->csum;
2067         page = sblock->pagev[0]->page;
2068         buffer = kmap_atomic(page);
2069
2070         len = sctx->fs_info->sectorsize;
2071         index = 0;
2072         for (;;) {
2073                 u64 l = min_t(u64, len, PAGE_SIZE);
2074
2075                 crc = btrfs_csum_data(buffer, crc, l);
2076                 kunmap_atomic(buffer);
2077                 len -= l;
2078                 if (len == 0)
2079                         break;
2080                 index++;
2081                 BUG_ON(index >= sblock->page_count);
2082                 BUG_ON(!sblock->pagev[index]->page);
2083                 page = sblock->pagev[index]->page;
2084                 buffer = kmap_atomic(page);
2085         }
2086
2087         btrfs_csum_final(crc, csum);
2088         if (memcmp(csum, on_disk_csum, sctx->csum_size))
2089                 sblock->checksum_error = 1;
2090
2091         return sblock->checksum_error;
2092 }
2093
2094 static int scrub_checksum_tree_block(struct scrub_block *sblock)
2095 {
2096         struct scrub_ctx *sctx = sblock->sctx;
2097         struct btrfs_header *h;
2098         struct btrfs_fs_info *fs_info = sctx->fs_info;
2099         u8 calculated_csum[BTRFS_CSUM_SIZE];
2100         u8 on_disk_csum[BTRFS_CSUM_SIZE];
2101         struct page *page;
2102         void *mapped_buffer;
2103         u64 mapped_size;
2104         void *p;
2105         u32 crc = ~(u32)0;
2106         u64 len;
2107         int index;
2108
2109         BUG_ON(sblock->page_count < 1);
2110         page = sblock->pagev[0]->page;
2111         mapped_buffer = kmap_atomic(page);
2112         h = (struct btrfs_header *)mapped_buffer;
2113         memcpy(on_disk_csum, h->csum, sctx->csum_size);
2114
2115         /*
2116          * we don't use the getter functions here, as we
2117          * a) don't have an extent buffer and
2118          * b) the page is already kmapped
2119          */
2120         if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h))
2121                 sblock->header_error = 1;
2122
2123         if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h)) {
2124                 sblock->header_error = 1;
2125                 sblock->generation_error = 1;
2126         }
2127
2128         if (!scrub_check_fsid(h->fsid, sblock->pagev[0]))
2129                 sblock->header_error = 1;
2130
2131         if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
2132                    BTRFS_UUID_SIZE))
2133                 sblock->header_error = 1;
2134
2135         len = sctx->fs_info->nodesize - BTRFS_CSUM_SIZE;
2136         mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
2137         p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
2138         index = 0;
2139         for (;;) {
2140                 u64 l = min_t(u64, len, mapped_size);
2141
2142                 crc = btrfs_csum_data(p, crc, l);
2143                 kunmap_atomic(mapped_buffer);
2144                 len -= l;
2145                 if (len == 0)
2146                         break;
2147                 index++;
2148                 BUG_ON(index >= sblock->page_count);
2149                 BUG_ON(!sblock->pagev[index]->page);
2150                 page = sblock->pagev[index]->page;
2151                 mapped_buffer = kmap_atomic(page);
2152                 mapped_size = PAGE_SIZE;
2153                 p = mapped_buffer;
2154         }
2155
2156         btrfs_csum_final(crc, calculated_csum);
2157         if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
2158                 sblock->checksum_error = 1;
2159
2160         return sblock->header_error || sblock->checksum_error;
2161 }
2162
2163 static int scrub_checksum_super(struct scrub_block *sblock)
2164 {
2165         struct btrfs_super_block *s;
2166         struct scrub_ctx *sctx = sblock->sctx;
2167         u8 calculated_csum[BTRFS_CSUM_SIZE];
2168         u8 on_disk_csum[BTRFS_CSUM_SIZE];
2169         struct page *page;
2170         void *mapped_buffer;
2171         u64 mapped_size;
2172         void *p;
2173         u32 crc = ~(u32)0;
2174         int fail_gen = 0;
2175         int fail_cor = 0;
2176         u64 len;
2177         int index;
2178
2179         BUG_ON(sblock->page_count < 1);
2180         page = sblock->pagev[0]->page;
2181         mapped_buffer = kmap_atomic(page);
2182         s = (struct btrfs_super_block *)mapped_buffer;
2183         memcpy(on_disk_csum, s->csum, sctx->csum_size);
2184
2185         if (sblock->pagev[0]->logical != btrfs_super_bytenr(s))
2186                 ++fail_cor;
2187
2188         if (sblock->pagev[0]->generation != btrfs_super_generation(s))
2189                 ++fail_gen;
2190
2191         if (!scrub_check_fsid(s->fsid, sblock->pagev[0]))
2192                 ++fail_cor;
2193
2194         len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
2195         mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
2196         p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
2197         index = 0;
2198         for (;;) {
2199                 u64 l = min_t(u64, len, mapped_size);
2200
2201                 crc = btrfs_csum_data(p, crc, l);
2202                 kunmap_atomic(mapped_buffer);
2203                 len -= l;
2204                 if (len == 0)
2205                         break;
2206                 index++;
2207                 BUG_ON(index >= sblock->page_count);
2208                 BUG_ON(!sblock->pagev[index]->page);
2209                 page = sblock->pagev[index]->page;
2210                 mapped_buffer = kmap_atomic(page);
2211                 mapped_size = PAGE_SIZE;
2212                 p = mapped_buffer;
2213         }
2214
2215         btrfs_csum_final(crc, calculated_csum);
2216         if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
2217                 ++fail_cor;
2218
2219         if (fail_cor + fail_gen) {
2220                 /*
2221                  * if we find an error in a super block, we just report it.
2222                  * They will get written with the next transaction commit
2223                  * anyway
2224                  */
2225                 spin_lock(&sctx->stat_lock);
2226                 ++sctx->stat.super_errors;
2227                 spin_unlock(&sctx->stat_lock);
2228                 if (fail_cor)
2229                         btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
2230                                 BTRFS_DEV_STAT_CORRUPTION_ERRS);
2231                 else
2232                         btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
2233                                 BTRFS_DEV_STAT_GENERATION_ERRS);
2234         }
2235
2236         return fail_cor + fail_gen;
2237 }
2238
2239 static void scrub_block_get(struct scrub_block *sblock)
2240 {
2241         refcount_inc(&sblock->refs);
2242 }
2243
2244 static void scrub_block_put(struct scrub_block *sblock)
2245 {
2246         if (refcount_dec_and_test(&sblock->refs)) {
2247                 int i;
2248
2249                 if (sblock->sparity)
2250                         scrub_parity_put(sblock->sparity);
2251
2252                 for (i = 0; i < sblock->page_count; i++)
2253                         scrub_page_put(sblock->pagev[i]);
2254                 kfree(sblock);
2255         }
2256 }
2257
2258 static void scrub_page_get(struct scrub_page *spage)
2259 {
2260         atomic_inc(&spage->refs);
2261 }
2262
2263 static void scrub_page_put(struct scrub_page *spage)
2264 {
2265         if (atomic_dec_and_test(&spage->refs)) {
2266                 if (spage->page)
2267                         __free_page(spage->page);
2268                 kfree(spage);
2269         }
2270 }
2271
2272 static void scrub_submit(struct scrub_ctx *sctx)
2273 {
2274         struct scrub_bio *sbio;
2275
2276         if (sctx->curr == -1)
2277                 return;
2278
2279         sbio = sctx->bios[sctx->curr];
2280         sctx->curr = -1;
2281         scrub_pending_bio_inc(sctx);
2282         btrfsic_submit_bio(sbio->bio);
2283 }
2284
2285 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
2286                                     struct scrub_page *spage)
2287 {
2288         struct scrub_block *sblock = spage->sblock;
2289         struct scrub_bio *sbio;
2290         int ret;
2291
2292 again:
2293         /*
2294          * grab a fresh bio or wait for one to become available
2295          */
2296         while (sctx->curr == -1) {
2297                 spin_lock(&sctx->list_lock);
2298                 sctx->curr = sctx->first_free;
2299                 if (sctx->curr != -1) {
2300                         sctx->first_free = sctx->bios[sctx->curr]->next_free;
2301                         sctx->bios[sctx->curr]->next_free = -1;
2302                         sctx->bios[sctx->curr]->page_count = 0;
2303                         spin_unlock(&sctx->list_lock);
2304                 } else {
2305                         spin_unlock(&sctx->list_lock);
2306                         wait_event(sctx->list_wait, sctx->first_free != -1);
2307                 }
2308         }
2309         sbio = sctx->bios[sctx->curr];
2310         if (sbio->page_count == 0) {
2311                 struct bio *bio;
2312
2313                 sbio->physical = spage->physical;
2314                 sbio->logical = spage->logical;
2315                 sbio->dev = spage->dev;
2316                 bio = sbio->bio;
2317                 if (!bio) {
2318                         bio = btrfs_io_bio_alloc(sctx->pages_per_rd_bio);
2319                         sbio->bio = bio;
2320                 }
2321
2322                 bio->bi_private = sbio;
2323                 bio->bi_end_io = scrub_bio_end_io;
2324                 bio->bi_bdev = sbio->dev->bdev;
2325                 bio->bi_iter.bi_sector = sbio->physical >> 9;
2326                 bio_set_op_attrs(bio, REQ_OP_READ, 0);
2327                 sbio->err = 0;
2328         } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
2329                    spage->physical ||
2330                    sbio->logical + sbio->page_count * PAGE_SIZE !=
2331                    spage->logical ||
2332                    sbio->dev != spage->dev) {
2333                 scrub_submit(sctx);
2334                 goto again;
2335         }
2336
2337         sbio->pagev[sbio->page_count] = spage;
2338         ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
2339         if (ret != PAGE_SIZE) {
2340                 if (sbio->page_count < 1) {
2341                         bio_put(sbio->bio);
2342                         sbio->bio = NULL;
2343                         return -EIO;
2344                 }
2345                 scrub_submit(sctx);
2346                 goto again;
2347         }
2348
2349         scrub_block_get(sblock); /* one for the page added to the bio */
2350         atomic_inc(&sblock->outstanding_pages);
2351         sbio->page_count++;
2352         if (sbio->page_count == sctx->pages_per_rd_bio)
2353                 scrub_submit(sctx);
2354
2355         return 0;
2356 }
2357
2358 static void scrub_missing_raid56_end_io(struct bio *bio)
2359 {
2360         struct scrub_block *sblock = bio->bi_private;
2361         struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
2362
2363         if (bio->bi_error)
2364                 sblock->no_io_error_seen = 0;
2365
2366         bio_put(bio);
2367
2368         btrfs_queue_work(fs_info->scrub_workers, &sblock->work);
2369 }
2370
2371 static void scrub_missing_raid56_worker(struct btrfs_work *work)
2372 {
2373         struct scrub_block *sblock = container_of(work, struct scrub_block, work);
2374         struct scrub_ctx *sctx = sblock->sctx;
2375         struct btrfs_fs_info *fs_info = sctx->fs_info;
2376         u64 logical;
2377         struct btrfs_device *dev;
2378
2379         logical = sblock->pagev[0]->logical;
2380         dev = sblock->pagev[0]->dev;
2381
2382         if (sblock->no_io_error_seen)
2383                 scrub_recheck_block_checksum(sblock);
2384
2385         if (!sblock->no_io_error_seen) {
2386                 spin_lock(&sctx->stat_lock);
2387                 sctx->stat.read_errors++;
2388                 spin_unlock(&sctx->stat_lock);
2389                 btrfs_err_rl_in_rcu(fs_info,
2390                         "IO error rebuilding logical %llu for dev %s",
2391                         logical, rcu_str_deref(dev->name));
2392         } else if (sblock->header_error || sblock->checksum_error) {
2393                 spin_lock(&sctx->stat_lock);
2394                 sctx->stat.uncorrectable_errors++;
2395                 spin_unlock(&sctx->stat_lock);
2396                 btrfs_err_rl_in_rcu(fs_info,
2397                         "failed to rebuild valid logical %llu for dev %s",
2398                         logical, rcu_str_deref(dev->name));
2399         } else {
2400                 scrub_write_block_to_dev_replace(sblock);
2401         }
2402
2403         scrub_block_put(sblock);
2404
2405         if (sctx->is_dev_replace &&
2406             atomic_read(&sctx->flush_all_writes)) {
2407                 mutex_lock(&sctx->wr_lock);
2408                 scrub_wr_submit(sctx);
2409                 mutex_unlock(&sctx->wr_lock);
2410         }
2411
2412         scrub_pending_bio_dec(sctx);
2413 }
2414
2415 static void scrub_missing_raid56_pages(struct scrub_block *sblock)
2416 {
2417         struct scrub_ctx *sctx = sblock->sctx;
2418         struct btrfs_fs_info *fs_info = sctx->fs_info;
2419         u64 length = sblock->page_count * PAGE_SIZE;
2420         u64 logical = sblock->pagev[0]->logical;
2421         struct btrfs_bio *bbio = NULL;
2422         struct bio *bio;
2423         struct btrfs_raid_bio *rbio;
2424         int ret;
2425         int i;
2426
2427         btrfs_bio_counter_inc_blocked(fs_info);
2428         ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
2429                         &length, &bbio);
2430         if (ret || !bbio || !bbio->raid_map)
2431                 goto bbio_out;
2432
2433         if (WARN_ON(!sctx->is_dev_replace ||
2434                     !(bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2435                 /*
2436                  * We shouldn't be scrubbing a missing device. Even for dev
2437                  * replace, we should only get here for RAID 5/6. We either
2438                  * managed to mount something with no mirrors remaining or
2439                  * there's a bug in scrub_remap_extent()/btrfs_map_block().
2440                  */
2441                 goto bbio_out;
2442         }
2443
2444         bio = btrfs_io_bio_alloc(0);
2445         bio->bi_iter.bi_sector = logical >> 9;
2446         bio->bi_private = sblock;
2447         bio->bi_end_io = scrub_missing_raid56_end_io;
2448
2449         rbio = raid56_alloc_missing_rbio(fs_info, bio, bbio, length);
2450         if (!rbio)
2451                 goto rbio_out;
2452
2453         for (i = 0; i < sblock->page_count; i++) {
2454                 struct scrub_page *spage = sblock->pagev[i];
2455
2456                 raid56_add_scrub_pages(rbio, spage->page, spage->logical);
2457         }
2458
2459         btrfs_init_work(&sblock->work, btrfs_scrub_helper,
2460                         scrub_missing_raid56_worker, NULL, NULL);
2461         scrub_block_get(sblock);
2462         scrub_pending_bio_inc(sctx);
2463         raid56_submit_missing_rbio(rbio);
2464         return;
2465
2466 rbio_out:
2467         bio_put(bio);
2468 bbio_out:
2469         btrfs_bio_counter_dec(fs_info);
2470         btrfs_put_bbio(bbio);
2471         spin_lock(&sctx->stat_lock);
2472         sctx->stat.malloc_errors++;
2473         spin_unlock(&sctx->stat_lock);
2474 }
2475
2476 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
2477                        u64 physical, struct btrfs_device *dev, u64 flags,
2478                        u64 gen, int mirror_num, u8 *csum, int force,
2479                        u64 physical_for_dev_replace)
2480 {
2481         struct scrub_block *sblock;
2482         int index;
2483
2484         sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2485         if (!sblock) {
2486                 spin_lock(&sctx->stat_lock);
2487                 sctx->stat.malloc_errors++;
2488                 spin_unlock(&sctx->stat_lock);
2489                 return -ENOMEM;
2490         }
2491
2492         /* one ref inside this function, plus one for each page added to
2493          * a bio later on */
2494         refcount_set(&sblock->refs, 1);
2495         sblock->sctx = sctx;
2496         sblock->no_io_error_seen = 1;
2497
2498         for (index = 0; len > 0; index++) {
2499                 struct scrub_page *spage;
2500                 u64 l = min_t(u64, len, PAGE_SIZE);
2501
2502                 spage = kzalloc(sizeof(*spage), GFP_KERNEL);
2503                 if (!spage) {
2504 leave_nomem:
2505                         spin_lock(&sctx->stat_lock);
2506                         sctx->stat.malloc_errors++;
2507                         spin_unlock(&sctx->stat_lock);
2508                         scrub_block_put(sblock);
2509                         return -ENOMEM;
2510                 }
2511                 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2512                 scrub_page_get(spage);
2513                 sblock->pagev[index] = spage;
2514                 spage->sblock = sblock;
2515                 spage->dev = dev;
2516                 spage->flags = flags;
2517                 spage->generation = gen;
2518                 spage->logical = logical;
2519                 spage->physical = physical;
2520                 spage->physical_for_dev_replace = physical_for_dev_replace;
2521                 spage->mirror_num = mirror_num;
2522                 if (csum) {
2523                         spage->have_csum = 1;
2524                         memcpy(spage->csum, csum, sctx->csum_size);
2525                 } else {
2526                         spage->have_csum = 0;
2527                 }
2528                 sblock->page_count++;
2529                 spage->page = alloc_page(GFP_KERNEL);
2530                 if (!spage->page)
2531                         goto leave_nomem;
2532                 len -= l;
2533                 logical += l;
2534                 physical += l;
2535                 physical_for_dev_replace += l;
2536         }
2537
2538         WARN_ON(sblock->page_count == 0);
2539         if (dev->missing) {
2540                 /*
2541                  * This case should only be hit for RAID 5/6 device replace. See
2542                  * the comment in scrub_missing_raid56_pages() for details.
2543                  */
2544                 scrub_missing_raid56_pages(sblock);
2545         } else {
2546                 for (index = 0; index < sblock->page_count; index++) {
2547                         struct scrub_page *spage = sblock->pagev[index];
2548                         int ret;
2549
2550                         ret = scrub_add_page_to_rd_bio(sctx, spage);
2551                         if (ret) {
2552                                 scrub_block_put(sblock);
2553                                 return ret;
2554                         }
2555                 }
2556
2557                 if (force)
2558                         scrub_submit(sctx);
2559         }
2560
2561         /* last one frees, either here or in bio completion for last page */
2562         scrub_block_put(sblock);
2563         return 0;
2564 }
2565
2566 static void scrub_bio_end_io(struct bio *bio)
2567 {
2568         struct scrub_bio *sbio = bio->bi_private;
2569         struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
2570
2571         sbio->err = bio->bi_error;
2572         sbio->bio = bio;
2573
2574         btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
2575 }
2576
2577 static void scrub_bio_end_io_worker(struct btrfs_work *work)
2578 {
2579         struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2580         struct scrub_ctx *sctx = sbio->sctx;
2581         int i;
2582
2583         BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
2584         if (sbio->err) {
2585                 for (i = 0; i < sbio->page_count; i++) {
2586                         struct scrub_page *spage = sbio->pagev[i];
2587
2588                         spage->io_error = 1;
2589                         spage->sblock->no_io_error_seen = 0;
2590                 }
2591         }
2592
2593         /* now complete the scrub_block items that have all pages completed */
2594         for (i = 0; i < sbio->page_count; i++) {
2595                 struct scrub_page *spage = sbio->pagev[i];
2596                 struct scrub_block *sblock = spage->sblock;
2597
2598                 if (atomic_dec_and_test(&sblock->outstanding_pages))
2599                         scrub_block_complete(sblock);
2600                 scrub_block_put(sblock);
2601         }
2602
2603         bio_put(sbio->bio);
2604         sbio->bio = NULL;
2605         spin_lock(&sctx->list_lock);
2606         sbio->next_free = sctx->first_free;
2607         sctx->first_free = sbio->index;
2608         spin_unlock(&sctx->list_lock);
2609
2610         if (sctx->is_dev_replace &&
2611             atomic_read(&sctx->flush_all_writes)) {
2612                 mutex_lock(&sctx->wr_lock);
2613                 scrub_wr_submit(sctx);
2614                 mutex_unlock(&sctx->wr_lock);
2615         }
2616
2617         scrub_pending_bio_dec(sctx);
2618 }
2619
2620 static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
2621                                        unsigned long *bitmap,
2622                                        u64 start, u64 len)
2623 {
2624         u64 offset;
2625         int nsectors;
2626         int sectorsize = sparity->sctx->fs_info->sectorsize;
2627
2628         if (len >= sparity->stripe_len) {
2629                 bitmap_set(bitmap, 0, sparity->nsectors);
2630                 return;
2631         }
2632
2633         start -= sparity->logic_start;
2634         start = div64_u64_rem(start, sparity->stripe_len, &offset);
2635         offset = div_u64(offset, sectorsize);
2636         nsectors = (int)len / sectorsize;
2637
2638         if (offset + nsectors <= sparity->nsectors) {
2639                 bitmap_set(bitmap, offset, nsectors);
2640                 return;
2641         }
2642
2643         bitmap_set(bitmap, offset, sparity->nsectors - offset);
2644         bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
2645 }
2646
2647 static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
2648                                                    u64 start, u64 len)
2649 {
2650         __scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
2651 }
2652
2653 static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
2654                                                   u64 start, u64 len)
2655 {
2656         __scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
2657 }
2658
2659 static void scrub_block_complete(struct scrub_block *sblock)
2660 {
2661         int corrupted = 0;
2662
2663         if (!sblock->no_io_error_seen) {
2664                 corrupted = 1;
2665                 scrub_handle_errored_block(sblock);
2666         } else {
2667                 /*
2668                  * if has checksum error, write via repair mechanism in
2669                  * dev replace case, otherwise write here in dev replace
2670                  * case.
2671                  */
2672                 corrupted = scrub_checksum(sblock);
2673                 if (!corrupted && sblock->sctx->is_dev_replace)
2674                         scrub_write_block_to_dev_replace(sblock);
2675         }
2676
2677         if (sblock->sparity && corrupted && !sblock->data_corrected) {
2678                 u64 start = sblock->pagev[0]->logical;
2679                 u64 end = sblock->pagev[sblock->page_count - 1]->logical +
2680                           PAGE_SIZE;
2681
2682                 scrub_parity_mark_sectors_error(sblock->sparity,
2683                                                 start, end - start);
2684         }
2685 }
2686
2687 static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
2688 {
2689         struct btrfs_ordered_sum *sum = NULL;
2690         unsigned long index;
2691         unsigned long num_sectors;
2692
2693         while (!list_empty(&sctx->csum_list)) {
2694                 sum = list_first_entry(&sctx->csum_list,
2695                                        struct btrfs_ordered_sum, list);
2696                 if (sum->bytenr > logical)
2697                         return 0;
2698                 if (sum->bytenr + sum->len > logical)
2699                         break;
2700
2701                 ++sctx->stat.csum_discards;
2702                 list_del(&sum->list);
2703                 kfree(sum);
2704                 sum = NULL;
2705         }
2706         if (!sum)
2707                 return 0;
2708
2709         index = ((u32)(logical - sum->bytenr)) / sctx->fs_info->sectorsize;
2710         num_sectors = sum->len / sctx->fs_info->sectorsize;
2711         memcpy(csum, sum->sums + index, sctx->csum_size);
2712         if (index == num_sectors - 1) {
2713                 list_del(&sum->list);
2714                 kfree(sum);
2715         }
2716         return 1;
2717 }
2718
2719 /* scrub extent tries to collect up to 64 kB for each bio */
2720 static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
2721                         u64 physical, struct btrfs_device *dev, u64 flags,
2722                         u64 gen, int mirror_num, u64 physical_for_dev_replace)
2723 {
2724         int ret;
2725         u8 csum[BTRFS_CSUM_SIZE];
2726         u32 blocksize;
2727
2728         if (flags & BTRFS_EXTENT_FLAG_DATA) {
2729                 blocksize = sctx->fs_info->sectorsize;
2730                 spin_lock(&sctx->stat_lock);
2731                 sctx->stat.data_extents_scrubbed++;
2732                 sctx->stat.data_bytes_scrubbed += len;
2733                 spin_unlock(&sctx->stat_lock);
2734         } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2735                 blocksize = sctx->fs_info->nodesize;
2736                 spin_lock(&sctx->stat_lock);
2737                 sctx->stat.tree_extents_scrubbed++;
2738                 sctx->stat.tree_bytes_scrubbed += len;
2739                 spin_unlock(&sctx->stat_lock);
2740         } else {
2741                 blocksize = sctx->fs_info->sectorsize;
2742                 WARN_ON(1);
2743         }
2744
2745         while (len) {
2746                 u64 l = min_t(u64, len, blocksize);
2747                 int have_csum = 0;
2748
2749                 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2750                         /* push csums to sbio */
2751                         have_csum = scrub_find_csum(sctx, logical, csum);
2752                         if (have_csum == 0)
2753                                 ++sctx->stat.no_csum;
2754                         if (sctx->is_dev_replace && !have_csum) {
2755                                 ret = copy_nocow_pages(sctx, logical, l,
2756                                                        mirror_num,
2757                                                       physical_for_dev_replace);
2758                                 goto behind_scrub_pages;
2759                         }
2760                 }
2761                 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
2762                                   mirror_num, have_csum ? csum : NULL, 0,
2763                                   physical_for_dev_replace);
2764 behind_scrub_pages:
2765                 if (ret)
2766                         return ret;
2767                 len -= l;
2768                 logical += l;
2769                 physical += l;
2770                 physical_for_dev_replace += l;
2771         }
2772         return 0;
2773 }
2774
2775 static int scrub_pages_for_parity(struct scrub_parity *sparity,
2776                                   u64 logical, u64 len,
2777                                   u64 physical, struct btrfs_device *dev,
2778                                   u64 flags, u64 gen, int mirror_num, u8 *csum)
2779 {
2780         struct scrub_ctx *sctx = sparity->sctx;
2781         struct scrub_block *sblock;
2782         int index;
2783
2784         sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2785         if (!sblock) {
2786                 spin_lock(&sctx->stat_lock);
2787                 sctx->stat.malloc_errors++;
2788                 spin_unlock(&sctx->stat_lock);
2789                 return -ENOMEM;
2790         }
2791
2792         /* one ref inside this function, plus one for each page added to
2793          * a bio later on */
2794         refcount_set(&sblock->refs, 1);
2795         sblock->sctx = sctx;
2796         sblock->no_io_error_seen = 1;
2797         sblock->sparity = sparity;
2798         scrub_parity_get(sparity);
2799
2800         for (index = 0; len > 0; index++) {
2801                 struct scrub_page *spage;
2802                 u64 l = min_t(u64, len, PAGE_SIZE);
2803
2804                 spage = kzalloc(sizeof(*spage), GFP_KERNEL);
2805                 if (!spage) {
2806 leave_nomem:
2807                         spin_lock(&sctx->stat_lock);
2808                         sctx->stat.malloc_errors++;
2809                         spin_unlock(&sctx->stat_lock);
2810                         scrub_block_put(sblock);
2811                         return -ENOMEM;
2812                 }
2813                 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2814                 /* For scrub block */
2815                 scrub_page_get(spage);
2816                 sblock->pagev[index] = spage;
2817                 /* For scrub parity */
2818                 scrub_page_get(spage);
2819                 list_add_tail(&spage->list, &sparity->spages);
2820                 spage->sblock = sblock;
2821                 spage->dev = dev;
2822                 spage->flags = flags;
2823                 spage->generation = gen;
2824                 spage->logical = logical;
2825                 spage->physical = physical;
2826                 spage->mirror_num = mirror_num;
2827                 if (csum) {
2828                         spage->have_csum = 1;
2829                         memcpy(spage->csum, csum, sctx->csum_size);
2830                 } else {
2831                         spage->have_csum = 0;
2832                 }
2833                 sblock->page_count++;
2834                 spage->page = alloc_page(GFP_KERNEL);
2835                 if (!spage->page)
2836                         goto leave_nomem;
2837                 len -= l;
2838                 logical += l;
2839                 physical += l;
2840         }
2841
2842         WARN_ON(sblock->page_count == 0);
2843         for (index = 0; index < sblock->page_count; index++) {
2844                 struct scrub_page *spage = sblock->pagev[index];
2845                 int ret;
2846
2847                 ret = scrub_add_page_to_rd_bio(sctx, spage);
2848                 if (ret) {
2849                         scrub_block_put(sblock);
2850                         return ret;
2851                 }
2852         }
2853
2854         /* last one frees, either here or in bio completion for last page */
2855         scrub_block_put(sblock);
2856         return 0;
2857 }
2858
2859 static int scrub_extent_for_parity(struct scrub_parity *sparity,
2860                                    u64 logical, u64 len,
2861                                    u64 physical, struct btrfs_device *dev,
2862                                    u64 flags, u64 gen, int mirror_num)
2863 {
2864         struct scrub_ctx *sctx = sparity->sctx;
2865         int ret;
2866         u8 csum[BTRFS_CSUM_SIZE];
2867         u32 blocksize;
2868
2869         if (dev->missing) {
2870                 scrub_parity_mark_sectors_error(sparity, logical, len);
2871                 return 0;
2872         }
2873
2874         if (flags & BTRFS_EXTENT_FLAG_DATA) {
2875                 blocksize = sctx->fs_info->sectorsize;
2876         } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2877                 blocksize = sctx->fs_info->nodesize;
2878         } else {
2879                 blocksize = sctx->fs_info->sectorsize;
2880                 WARN_ON(1);
2881         }
2882
2883         while (len) {
2884                 u64 l = min_t(u64, len, blocksize);
2885                 int have_csum = 0;
2886
2887                 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2888                         /* push csums to sbio */
2889                         have_csum = scrub_find_csum(sctx, logical, csum);
2890                         if (have_csum == 0)
2891                                 goto skip;
2892                 }
2893                 ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
2894                                              flags, gen, mirror_num,
2895                                              have_csum ? csum : NULL);
2896                 if (ret)
2897                         return ret;
2898 skip:
2899                 len -= l;
2900                 logical += l;
2901                 physical += l;
2902         }
2903         return 0;
2904 }
2905
2906 /*
2907  * Given a physical address, this will calculate it's
2908  * logical offset. if this is a parity stripe, it will return
2909  * the most left data stripe's logical offset.
2910  *
2911  * return 0 if it is a data stripe, 1 means parity stripe.
2912  */
2913 static int get_raid56_logic_offset(u64 physical, int num,
2914                                    struct map_lookup *map, u64 *offset,
2915                                    u64 *stripe_start)
2916 {
2917         int i;
2918         int j = 0;
2919         u64 stripe_nr;
2920         u64 last_offset;
2921         u32 stripe_index;
2922         u32 rot;
2923
2924         last_offset = (physical - map->stripes[num].physical) *
2925                       nr_data_stripes(map);
2926         if (stripe_start)
2927                 *stripe_start = last_offset;
2928
2929         *offset = last_offset;
2930         for (i = 0; i < nr_data_stripes(map); i++) {
2931                 *offset = last_offset + i * map->stripe_len;
2932
2933                 stripe_nr = div64_u64(*offset, map->stripe_len);
2934                 stripe_nr = div_u64(stripe_nr, nr_data_stripes(map));
2935
2936                 /* Work out the disk rotation on this stripe-set */
2937                 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
2938                 /* calculate which stripe this data locates */
2939                 rot += i;
2940                 stripe_index = rot % map->num_stripes;
2941                 if (stripe_index == num)
2942                         return 0;
2943                 if (stripe_index < num)
2944                         j++;
2945         }
2946         *offset = last_offset + j * map->stripe_len;
2947         return 1;
2948 }
2949
2950 static void scrub_free_parity(struct scrub_parity *sparity)
2951 {
2952         struct scrub_ctx *sctx = sparity->sctx;
2953         struct scrub_page *curr, *next;
2954         int nbits;
2955
2956         nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
2957         if (nbits) {
2958                 spin_lock(&sctx->stat_lock);
2959                 sctx->stat.read_errors += nbits;
2960                 sctx->stat.uncorrectable_errors += nbits;
2961                 spin_unlock(&sctx->stat_lock);
2962         }
2963
2964         list_for_each_entry_safe(curr, next, &sparity->spages, list) {
2965                 list_del_init(&curr->list);
2966                 scrub_page_put(curr);
2967         }
2968
2969         kfree(sparity);
2970 }
2971
2972 static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
2973 {
2974         struct scrub_parity *sparity = container_of(work, struct scrub_parity,
2975                                                     work);
2976         struct scrub_ctx *sctx = sparity->sctx;
2977
2978         scrub_free_parity(sparity);
2979         scrub_pending_bio_dec(sctx);
2980 }
2981
2982 static void scrub_parity_bio_endio(struct bio *bio)
2983 {
2984         struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
2985         struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
2986
2987         if (bio->bi_error)
2988                 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2989                           sparity->nsectors);
2990
2991         bio_put(bio);
2992
2993         btrfs_init_work(&sparity->work, btrfs_scrubparity_helper,
2994                         scrub_parity_bio_endio_worker, NULL, NULL);
2995         btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work);
2996 }
2997
2998 static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
2999 {
3000         struct scrub_ctx *sctx = sparity->sctx;
3001         struct btrfs_fs_info *fs_info = sctx->fs_info;
3002         struct bio *bio;
3003         struct btrfs_raid_bio *rbio;
3004         struct btrfs_bio *bbio = NULL;
3005         u64 length;
3006         int ret;
3007
3008         if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
3009                            sparity->nsectors))
3010                 goto out;
3011
3012         length = sparity->logic_end - sparity->logic_start;
3013
3014         btrfs_bio_counter_inc_blocked(fs_info);
3015         ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start,
3016                                &length, &bbio);
3017         if (ret || !bbio || !bbio->raid_map)
3018                 goto bbio_out;
3019
3020         bio = btrfs_io_bio_alloc(0);
3021         bio->bi_iter.bi_sector = sparity->logic_start >> 9;
3022         bio->bi_private = sparity;
3023         bio->bi_end_io = scrub_parity_bio_endio;
3024
3025         rbio = raid56_parity_alloc_scrub_rbio(fs_info, bio, bbio,
3026                                               length, sparity->scrub_dev,
3027                                               sparity->dbitmap,
3028                                               sparity->nsectors);
3029         if (!rbio)
3030                 goto rbio_out;
3031
3032         scrub_pending_bio_inc(sctx);
3033         raid56_parity_submit_scrub_rbio(rbio);
3034         return;
3035
3036 rbio_out:
3037         bio_put(bio);
3038 bbio_out:
3039         btrfs_bio_counter_dec(fs_info);
3040         btrfs_put_bbio(bbio);
3041         bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
3042                   sparity->nsectors);
3043         spin_lock(&sctx->stat_lock);
3044         sctx->stat.malloc_errors++;
3045         spin_unlock(&sctx->stat_lock);
3046 out:
3047         scrub_free_parity(sparity);
3048 }
3049
3050 static inline int scrub_calc_parity_bitmap_len(int nsectors)
3051 {
3052         return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * sizeof(long);
3053 }
3054
3055 static void scrub_parity_get(struct scrub_parity *sparity)
3056 {
3057         refcount_inc(&sparity->refs);
3058 }
3059
3060 static void scrub_parity_put(struct scrub_parity *sparity)
3061 {
3062         if (!refcount_dec_and_test(&sparity->refs))
3063                 return;
3064
3065         scrub_parity_check_and_repair(sparity);
3066 }
3067
3068 static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
3069                                                   struct map_lookup *map,
3070                                                   struct btrfs_device *sdev,
3071                                                   struct btrfs_path *path,
3072                                                   u64 logic_start,
3073                                                   u64 logic_end)
3074 {
3075         struct btrfs_fs_info *fs_info = sctx->fs_info;
3076         struct btrfs_root *root = fs_info->extent_root;
3077         struct btrfs_root *csum_root = fs_info->csum_root;
3078         struct btrfs_extent_item *extent;
3079         struct btrfs_bio *bbio = NULL;
3080         u64 flags;
3081         int ret;
3082         int slot;
3083         struct extent_buffer *l;
3084         struct btrfs_key key;
3085         u64 generation;
3086         u64 extent_logical;
3087         u64 extent_physical;
3088         u64 extent_len;
3089         u64 mapped_length;
3090         struct btrfs_device *extent_dev;
3091         struct scrub_parity *sparity;
3092         int nsectors;
3093         int bitmap_len;
3094         int extent_mirror_num;
3095         int stop_loop = 0;
3096
3097         nsectors = div_u64(map->stripe_len, fs_info->sectorsize);
3098         bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
3099         sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
3100                           GFP_NOFS);
3101         if (!sparity) {
3102                 spin_lock(&sctx->stat_lock);
3103                 sctx->stat.malloc_errors++;
3104                 spin_unlock(&sctx->stat_lock);
3105                 return -ENOMEM;
3106         }
3107
3108         sparity->stripe_len = map->stripe_len;
3109         sparity->nsectors = nsectors;
3110         sparity->sctx = sctx;
3111         sparity->scrub_dev = sdev;
3112         sparity->logic_start = logic_start;
3113         sparity->logic_end = logic_end;
3114         refcount_set(&sparity->refs, 1);
3115         INIT_LIST_HEAD(&sparity->spages);
3116         sparity->dbitmap = sparity->bitmap;
3117         sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;
3118
3119         ret = 0;
3120         while (logic_start < logic_end) {
3121                 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
3122                         key.type = BTRFS_METADATA_ITEM_KEY;
3123                 else
3124                         key.type = BTRFS_EXTENT_ITEM_KEY;
3125                 key.objectid = logic_start;
3126                 key.offset = (u64)-1;
3127
3128                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3129                 if (ret < 0)
3130                         goto out;
3131
3132                 if (ret > 0) {
3133                         ret = btrfs_previous_extent_item(root, path, 0);
3134                         if (ret < 0)
3135                                 goto out;
3136                         if (ret > 0) {
3137                                 btrfs_release_path(path);
3138                                 ret = btrfs_search_slot(NULL, root, &key,
3139                                                         path, 0, 0);
3140                                 if (ret < 0)
3141                                         goto out;
3142                         }
3143                 }
3144
3145                 stop_loop = 0;
3146                 while (1) {
3147                         u64 bytes;
3148
3149                         l = path->nodes[0];
3150                         slot = path->slots[0];
3151                         if (slot >= btrfs_header_nritems(l)) {
3152                                 ret = btrfs_next_leaf(root, path);
3153                                 if (ret == 0)
3154                                         continue;
3155                                 if (ret < 0)
3156                                         goto out;
3157
3158                                 stop_loop = 1;
3159                                 break;
3160                         }
3161                         btrfs_item_key_to_cpu(l, &key, slot);
3162
3163                         if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3164                             key.type != BTRFS_METADATA_ITEM_KEY)
3165                                 goto next;
3166
3167                         if (key.type == BTRFS_METADATA_ITEM_KEY)
3168                                 bytes = fs_info->nodesize;
3169                         else
3170                                 bytes = key.offset;
3171
3172                         if (key.objectid + bytes <= logic_start)
3173                                 goto next;
3174
3175                         if (key.objectid >= logic_end) {
3176                                 stop_loop = 1;
3177                                 break;
3178                         }
3179
3180                         while (key.objectid >= logic_start + map->stripe_len)
3181                                 logic_start += map->stripe_len;
3182
3183                         extent = btrfs_item_ptr(l, slot,
3184                                                 struct btrfs_extent_item);
3185                         flags = btrfs_extent_flags(l, extent);
3186                         generation = btrfs_extent_generation(l, extent);
3187
3188                         if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3189                             (key.objectid < logic_start ||
3190                              key.objectid + bytes >
3191                              logic_start + map->stripe_len)) {
3192                                 btrfs_err(fs_info,
3193                                           "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
3194                                           key.objectid, logic_start);
3195                                 spin_lock(&sctx->stat_lock);
3196                                 sctx->stat.uncorrectable_errors++;
3197                                 spin_unlock(&sctx->stat_lock);
3198                                 goto next;
3199                         }
3200 again:
3201                         extent_logical = key.objectid;
3202                         extent_len = bytes;
3203
3204                         if (extent_logical < logic_start) {
3205                                 extent_len -= logic_start - extent_logical;
3206                                 extent_logical = logic_start;
3207                         }
3208
3209                         if (extent_logical + extent_len >
3210                             logic_start + map->stripe_len)
3211                                 extent_len = logic_start + map->stripe_len -
3212                                              extent_logical;
3213
3214                         scrub_parity_mark_sectors_data(sparity, extent_logical,
3215                                                        extent_len);
3216
3217                         mapped_length = extent_len;
3218                         bbio = NULL;
3219                         ret = btrfs_map_block(fs_info, BTRFS_MAP_READ,
3220                                         extent_logical, &mapped_length, &bbio,
3221                                         0);
3222                         if (!ret) {
3223                                 if (!bbio || mapped_length < extent_len)
3224                                         ret = -EIO;
3225                         }
3226                         if (ret) {
3227                                 btrfs_put_bbio(bbio);
3228                                 goto out;
3229                         }
3230                         extent_physical = bbio->stripes[0].physical;
3231                         extent_mirror_num = bbio->mirror_num;
3232                         extent_dev = bbio->stripes[0].dev;
3233                         btrfs_put_bbio(bbio);
3234
3235                         ret = btrfs_lookup_csums_range(csum_root,
3236                                                 extent_logical,
3237                                                 extent_logical + extent_len - 1,
3238                                                 &sctx->csum_list, 1);
3239                         if (ret)
3240                                 goto out;
3241
3242                         ret = scrub_extent_for_parity(sparity, extent_logical,
3243                                                       extent_len,
3244                                                       extent_physical,
3245                                                       extent_dev, flags,
3246                                                       generation,
3247                                                       extent_mirror_num);
3248
3249                         scrub_free_csums(sctx);
3250
3251                         if (ret)
3252                                 goto out;
3253
3254                         if (extent_logical + extent_len <
3255                             key.objectid + bytes) {
3256                                 logic_start += map->stripe_len;
3257
3258                                 if (logic_start >= logic_end) {
3259                                         stop_loop = 1;
3260                                         break;
3261                                 }
3262
3263                                 if (logic_start < key.objectid + bytes) {
3264                                         cond_resched();
3265                                         goto again;
3266                                 }
3267                         }
3268 next:
3269                         path->slots[0]++;
3270                 }
3271
3272                 btrfs_release_path(path);
3273
3274                 if (stop_loop)
3275                         break;
3276
3277                 logic_start += map->stripe_len;
3278         }
3279 out:
3280         if (ret < 0)
3281                 scrub_parity_mark_sectors_error(sparity, logic_start,
3282                                                 logic_end - logic_start);
3283         scrub_parity_put(sparity);
3284         scrub_submit(sctx);
3285         mutex_lock(&sctx->wr_lock);
3286         scrub_wr_submit(sctx);
3287         mutex_unlock(&sctx->wr_lock);
3288
3289         btrfs_release_path(path);
3290         return ret < 0 ? ret : 0;
3291 }
3292
3293 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
3294                                            struct map_lookup *map,
3295                                            struct btrfs_device *scrub_dev,
3296                                            int num, u64 base, u64 length,
3297                                            int is_dev_replace)
3298 {
3299         struct btrfs_path *path, *ppath;
3300         struct btrfs_fs_info *fs_info = sctx->fs_info;
3301         struct btrfs_root *root = fs_info->extent_root;
3302         struct btrfs_root *csum_root = fs_info->csum_root;
3303         struct btrfs_extent_item *extent;
3304         struct blk_plug plug;
3305         u64 flags;
3306         int ret;
3307         int slot;
3308         u64 nstripes;
3309         struct extent_buffer *l;
3310         u64 physical;
3311         u64 logical;
3312         u64 logic_end;
3313         u64 physical_end;
3314         u64 generation;
3315         int mirror_num;
3316         struct reada_control *reada1;
3317         struct reada_control *reada2;
3318         struct btrfs_key key;
3319         struct btrfs_key key_end;
3320         u64 increment = map->stripe_len;
3321         u64 offset;
3322         u64 extent_logical;
3323         u64 extent_physical;
3324         u64 extent_len;
3325         u64 stripe_logical;
3326         u64 stripe_end;
3327         struct btrfs_device *extent_dev;
3328         int extent_mirror_num;
3329         int stop_loop = 0;
3330
3331         physical = map->stripes[num].physical;
3332         offset = 0;
3333         nstripes = div64_u64(length, map->stripe_len);
3334         if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3335                 offset = map->stripe_len * num;
3336                 increment = map->stripe_len * map->num_stripes;
3337                 mirror_num = 1;
3338         } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3339                 int factor = map->num_stripes / map->sub_stripes;
3340                 offset = map->stripe_len * (num / map->sub_stripes);
3341                 increment = map->stripe_len * factor;
3342                 mirror_num = num % map->sub_stripes + 1;
3343         } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3344                 increment = map->stripe_len;
3345                 mirror_num = num % map->num_stripes + 1;
3346         } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3347                 increment = map->stripe_len;
3348                 mirror_num = num % map->num_stripes + 1;
3349         } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3350                 get_raid56_logic_offset(physical, num, map, &offset, NULL);
3351                 increment = map->stripe_len * nr_data_stripes(map);
3352                 mirror_num = 1;
3353         } else {
3354                 increment = map->stripe_len;
3355                 mirror_num = 1;
3356         }
3357
3358         path = btrfs_alloc_path();
3359         if (!path)
3360                 return -ENOMEM;
3361
3362         ppath = btrfs_alloc_path();
3363         if (!ppath) {
3364                 btrfs_free_path(path);
3365                 return -ENOMEM;
3366         }
3367
3368         /*
3369          * work on commit root. The related disk blocks are static as
3370          * long as COW is applied. This means, it is save to rewrite
3371          * them to repair disk errors without any race conditions
3372          */
3373         path->search_commit_root = 1;
3374         path->skip_locking = 1;
3375
3376         ppath->search_commit_root = 1;
3377         ppath->skip_locking = 1;
3378         /*
3379          * trigger the readahead for extent tree csum tree and wait for
3380          * completion. During readahead, the scrub is officially paused
3381          * to not hold off transaction commits
3382          */
3383         logical = base + offset;
3384         physical_end = physical + nstripes * map->stripe_len;
3385         if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3386                 get_raid56_logic_offset(physical_end, num,
3387                                         map, &logic_end, NULL);
3388                 logic_end += base;
3389         } else {
3390                 logic_end = logical + increment * nstripes;
3391         }
3392         wait_event(sctx->list_wait,
3393                    atomic_read(&sctx->bios_in_flight) == 0);
3394         scrub_blocked_if_needed(fs_info);
3395
3396         /* FIXME it might be better to start readahead at commit root */
3397         key.objectid = logical;
3398         key.type = BTRFS_EXTENT_ITEM_KEY;
3399         key.offset = (u64)0;
3400         key_end.objectid = logic_end;
3401         key_end.type = BTRFS_METADATA_ITEM_KEY;
3402         key_end.offset = (u64)-1;
3403         reada1 = btrfs_reada_add(root, &key, &key_end);
3404
3405         key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3406         key.type = BTRFS_EXTENT_CSUM_KEY;
3407         key.offset = logical;
3408         key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3409         key_end.type = BTRFS_EXTENT_CSUM_KEY;
3410         key_end.offset = logic_end;
3411         reada2 = btrfs_reada_add(csum_root, &key, &key_end);
3412
3413         if (!IS_ERR(reada1))
3414                 btrfs_reada_wait(reada1);
3415         if (!IS_ERR(reada2))
3416                 btrfs_reada_wait(reada2);
3417
3418
3419         /*
3420          * collect all data csums for the stripe to avoid seeking during
3421          * the scrub. This might currently (crc32) end up to be about 1MB
3422          */
3423         blk_start_plug(&plug);
3424
3425         /*
3426          * now find all extents for each stripe and scrub them
3427          */
3428         ret = 0;
3429         while (physical < physical_end) {
3430                 /*
3431                  * canceled?
3432                  */
3433                 if (atomic_read(&fs_info->scrub_cancel_req) ||
3434                     atomic_read(&sctx->cancel_req)) {
3435                         ret = -ECANCELED;
3436                         goto out;
3437                 }
3438                 /*
3439                  * check to see if we have to pause
3440                  */
3441                 if (atomic_read(&fs_info->scrub_pause_req)) {
3442                         /* push queued extents */
3443                         atomic_set(&sctx->flush_all_writes, 1);
3444                         scrub_submit(sctx);
3445                         mutex_lock(&sctx->wr_lock);
3446                         scrub_wr_submit(sctx);
3447                         mutex_unlock(&sctx->wr_lock);
3448                         wait_event(sctx->list_wait,
3449                                    atomic_read(&sctx->bios_in_flight) == 0);
3450                         atomic_set(&sctx->flush_all_writes, 0);
3451                         scrub_blocked_if_needed(fs_info);
3452                 }
3453
3454                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3455                         ret = get_raid56_logic_offset(physical, num, map,
3456                                                       &logical,
3457                                                       &stripe_logical);
3458                         logical += base;
3459                         if (ret) {
3460                                 /* it is parity strip */
3461                                 stripe_logical += base;
3462                                 stripe_end = stripe_logical + increment;
3463                                 ret = scrub_raid56_parity(sctx, map, scrub_dev,
3464                                                           ppath, stripe_logical,
3465                                                           stripe_end);
3466                                 if (ret)
3467                                         goto out;
3468                                 goto skip;
3469                         }
3470                 }
3471
3472                 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
3473                         key.type = BTRFS_METADATA_ITEM_KEY;
3474                 else
3475                         key.type = BTRFS_EXTENT_ITEM_KEY;
3476                 key.objectid = logical;
3477                 key.offset = (u64)-1;
3478
3479                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3480                 if (ret < 0)
3481                         goto out;
3482
3483                 if (ret > 0) {
3484                         ret = btrfs_previous_extent_item(root, path, 0);
3485                         if (ret < 0)
3486                                 goto out;
3487                         if (ret > 0) {
3488                                 /* there's no smaller item, so stick with the
3489                                  * larger one */
3490                                 btrfs_release_path(path);
3491                                 ret = btrfs_search_slot(NULL, root, &key,
3492                                                         path, 0, 0);
3493                                 if (ret < 0)
3494                                         goto out;
3495                         }
3496                 }
3497
3498                 stop_loop = 0;
3499                 while (1) {
3500                         u64 bytes;
3501
3502                         l = path->nodes[0];
3503                         slot = path->slots[0];
3504                         if (slot >= btrfs_header_nritems(l)) {
3505                                 ret = btrfs_next_leaf(root, path);
3506                                 if (ret == 0)
3507                                         continue;
3508                                 if (ret < 0)
3509                                         goto out;
3510
3511                                 stop_loop = 1;
3512                                 break;
3513                         }
3514                         btrfs_item_key_to_cpu(l, &key, slot);
3515
3516                         if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3517                             key.type != BTRFS_METADATA_ITEM_KEY)
3518                                 goto next;
3519
3520                         if (key.type == BTRFS_METADATA_ITEM_KEY)
3521                                 bytes = fs_info->nodesize;
3522                         else
3523                                 bytes = key.offset;
3524
3525                         if (key.objectid + bytes <= logical)
3526                                 goto next;
3527
3528                         if (key.objectid >= logical + map->stripe_len) {
3529                                 /* out of this device extent */
3530                                 if (key.objectid >= logic_end)
3531                                         stop_loop = 1;
3532                                 break;
3533                         }
3534
3535                         extent = btrfs_item_ptr(l, slot,
3536                                                 struct btrfs_extent_item);
3537                         flags = btrfs_extent_flags(l, extent);
3538                         generation = btrfs_extent_generation(l, extent);
3539
3540                         if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3541                             (key.objectid < logical ||
3542                              key.objectid + bytes >
3543                              logical + map->stripe_len)) {
3544                                 btrfs_err(fs_info,
3545                                            "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
3546                                        key.objectid, logical);
3547                                 spin_lock(&sctx->stat_lock);
3548                                 sctx->stat.uncorrectable_errors++;
3549                                 spin_unlock(&sctx->stat_lock);
3550                                 goto next;
3551                         }
3552
3553 again:
3554                         extent_logical = key.objectid;
3555                         extent_len = bytes;
3556
3557                         /*
3558                          * trim extent to this stripe
3559                          */
3560                         if (extent_logical < logical) {
3561                                 extent_len -= logical - extent_logical;
3562                                 extent_logical = logical;
3563                         }
3564                         if (extent_logical + extent_len >
3565                             logical + map->stripe_len) {
3566                                 extent_len = logical + map->stripe_len -
3567                                              extent_logical;
3568                         }
3569
3570                         extent_physical = extent_logical - logical + physical;
3571                         extent_dev = scrub_dev;
3572                         extent_mirror_num = mirror_num;
3573                         if (is_dev_replace)
3574                                 scrub_remap_extent(fs_info, extent_logical,
3575                                                    extent_len, &extent_physical,
3576                                                    &extent_dev,
3577                                                    &extent_mirror_num);
3578
3579                         ret = btrfs_lookup_csums_range(csum_root,
3580                                                        extent_logical,
3581                                                        extent_logical +
3582                                                        extent_len - 1,
3583                                                        &sctx->csum_list, 1);
3584                         if (ret)
3585                                 goto out;
3586
3587                         ret = scrub_extent(sctx, extent_logical, extent_len,
3588                                            extent_physical, extent_dev, flags,
3589                                            generation, extent_mirror_num,
3590                                            extent_logical - logical + physical);
3591
3592                         scrub_free_csums(sctx);
3593
3594                         if (ret)
3595                                 goto out;
3596
3597                         if (extent_logical + extent_len <
3598                             key.objectid + bytes) {
3599                                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3600                                         /*
3601                                          * loop until we find next data stripe
3602                                          * or we have finished all stripes.
3603                                          */
3604 loop:
3605                                         physical += map->stripe_len;
3606                                         ret = get_raid56_logic_offset(physical,
3607                                                         num, map, &logical,
3608                                                         &stripe_logical);
3609                                         logical += base;
3610
3611                                         if (ret && physical < physical_end) {
3612                                                 stripe_logical += base;
3613                                                 stripe_end = stripe_logical +
3614                                                                 increment;
3615                                                 ret = scrub_raid56_parity(sctx,
3616                                                         map, scrub_dev, ppath,
3617                                                         stripe_logical,
3618                                                         stripe_end);
3619                                                 if (ret)
3620                                                         goto out;
3621                                                 goto loop;
3622                                         }
3623                                 } else {
3624                                         physical += map->stripe_len;
3625                                         logical += increment;
3626                                 }
3627                                 if (logical < key.objectid + bytes) {
3628                                         cond_resched();
3629                                         goto again;
3630                                 }
3631
3632                                 if (physical >= physical_end) {
3633                                         stop_loop = 1;
3634                                         break;
3635                                 }
3636                         }
3637 next:
3638                         path->slots[0]++;
3639                 }
3640                 btrfs_release_path(path);
3641 skip:
3642                 logical += increment;
3643                 physical += map->stripe_len;
3644                 spin_lock(&sctx->stat_lock);
3645                 if (stop_loop)
3646                         sctx->stat.last_physical = map->stripes[num].physical +
3647                                                    length;
3648                 else
3649                         sctx->stat.last_physical = physical;
3650                 spin_unlock(&sctx->stat_lock);
3651                 if (stop_loop)
3652                         break;
3653         }
3654 out:
3655         /* push queued extents */
3656         scrub_submit(sctx);
3657         mutex_lock(&sctx->wr_lock);
3658         scrub_wr_submit(sctx);
3659         mutex_unlock(&sctx->wr_lock);
3660
3661         blk_finish_plug(&plug);
3662         btrfs_free_path(path);
3663         btrfs_free_path(ppath);
3664         return ret < 0 ? ret : 0;
3665 }
3666
3667 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
3668                                           struct btrfs_device *scrub_dev,
3669                                           u64 chunk_offset, u64 length,
3670                                           u64 dev_offset,
3671                                           struct btrfs_block_group_cache *cache,
3672                                           int is_dev_replace)
3673 {
3674         struct btrfs_fs_info *fs_info = sctx->fs_info;
3675         struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
3676         struct map_lookup *map;
3677         struct extent_map *em;
3678         int i;
3679         int ret = 0;
3680
3681         read_lock(&map_tree->map_tree.lock);
3682         em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3683         read_unlock(&map_tree->map_tree.lock);
3684
3685         if (!em) {
3686                 /*
3687                  * Might have been an unused block group deleted by the cleaner
3688                  * kthread or relocation.
3689                  */
3690                 spin_lock(&cache->lock);
3691                 if (!cache->removed)
3692                         ret = -EINVAL;
3693                 spin_unlock(&cache->lock);
3694
3695                 return ret;
3696         }
3697
3698         map = em->map_lookup;
3699         if (em->start != chunk_offset)
3700                 goto out;
3701
3702         if (em->len < length)
3703                 goto out;
3704
3705         for (i = 0; i < map->num_stripes; ++i) {
3706                 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
3707                     map->stripes[i].physical == dev_offset) {
3708                         ret = scrub_stripe(sctx, map, scrub_dev, i,
3709                                            chunk_offset, length,
3710                                            is_dev_replace);
3711                         if (ret)
3712                                 goto out;
3713                 }
3714         }
3715 out:
3716         free_extent_map(em);
3717
3718         return ret;
3719 }
3720
3721 static noinline_for_stack
3722 int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3723                            struct btrfs_device *scrub_dev, u64 start, u64 end,
3724                            int is_dev_replace)
3725 {
3726         struct btrfs_dev_extent *dev_extent = NULL;
3727         struct btrfs_path *path;
3728         struct btrfs_fs_info *fs_info = sctx->fs_info;
3729         struct btrfs_root *root = fs_info->dev_root;
3730         u64 length;
3731         u64 chunk_offset;
3732         int ret = 0;
3733         int ro_set;
3734         int slot;
3735         struct extent_buffer *l;
3736         struct btrfs_key key;
3737         struct btrfs_key found_key;
3738         struct btrfs_block_group_cache *cache;
3739         struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
3740
3741         path = btrfs_alloc_path();
3742         if (!path)
3743                 return -ENOMEM;
3744
3745         path->reada = READA_FORWARD;
3746         path->search_commit_root = 1;
3747         path->skip_locking = 1;
3748
3749         key.objectid = scrub_dev->devid;
3750         key.offset = 0ull;
3751         key.type = BTRFS_DEV_EXTENT_KEY;
3752
3753         while (1) {
3754                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3755                 if (ret < 0)
3756                         break;
3757                 if (ret > 0) {
3758                         if (path->slots[0] >=
3759                             btrfs_header_nritems(path->nodes[0])) {
3760                                 ret = btrfs_next_leaf(root, path);
3761                                 if (ret < 0)
3762                                         break;
3763                                 if (ret > 0) {
3764                                         ret = 0;
3765                                         break;
3766                                 }
3767                         } else {
3768                                 ret = 0;
3769                         }
3770                 }
3771
3772                 l = path->nodes[0];
3773                 slot = path->slots[0];
3774
3775                 btrfs_item_key_to_cpu(l, &found_key, slot);
3776
3777                 if (found_key.objectid != scrub_dev->devid)
3778                         break;
3779
3780                 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
3781                         break;
3782
3783                 if (found_key.offset >= end)
3784                         break;
3785
3786                 if (found_key.offset < key.offset)
3787                         break;
3788
3789                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3790                 length = btrfs_dev_extent_length(l, dev_extent);
3791
3792                 if (found_key.offset + length <= start)
3793                         goto skip;
3794
3795                 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3796
3797                 /*
3798                  * get a reference on the corresponding block group to prevent
3799                  * the chunk from going away while we scrub it
3800                  */
3801                 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3802
3803                 /* some chunks are removed but not committed to disk yet,
3804                  * continue scrubbing */
3805                 if (!cache)
3806                         goto skip;
3807
3808                 /*
3809                  * we need call btrfs_inc_block_group_ro() with scrubs_paused,
3810                  * to avoid deadlock caused by:
3811                  * btrfs_inc_block_group_ro()
3812                  * -> btrfs_wait_for_commit()
3813                  * -> btrfs_commit_transaction()
3814                  * -> btrfs_scrub_pause()
3815                  */
3816                 scrub_pause_on(fs_info);
3817                 ret = btrfs_inc_block_group_ro(fs_info, cache);
3818                 if (!ret && is_dev_replace) {
3819                         /*
3820                          * If we are doing a device replace wait for any tasks
3821                          * that started dellaloc right before we set the block
3822                          * group to RO mode, as they might have just allocated
3823                          * an extent from it or decided they could do a nocow
3824                          * write. And if any such tasks did that, wait for their
3825                          * ordered extents to complete and then commit the
3826                          * current transaction, so that we can later see the new
3827                          * extent items in the extent tree - the ordered extents
3828                          * create delayed data references (for cow writes) when
3829                          * they complete, which will be run and insert the
3830                          * corresponding extent items into the extent tree when
3831                          * we commit the transaction they used when running
3832                          * inode.c:btrfs_finish_ordered_io(). We later use
3833                          * the commit root of the extent tree to find extents
3834                          * to copy from the srcdev into the tgtdev, and we don't
3835                          * want to miss any new extents.
3836                          */
3837                         btrfs_wait_block_group_reservations(cache);
3838                         btrfs_wait_nocow_writers(cache);
3839                         ret = btrfs_wait_ordered_roots(fs_info, U64_MAX,
3840                                                        cache->key.objectid,
3841                                                        cache->key.offset);
3842                         if (ret > 0) {
3843                                 struct btrfs_trans_handle *trans;
3844
3845                                 trans = btrfs_join_transaction(root);
3846                                 if (IS_ERR(trans))
3847                                         ret = PTR_ERR(trans);
3848                                 else
3849                                         ret = btrfs_commit_transaction(trans);
3850                                 if (ret) {
3851                                         scrub_pause_off(fs_info);
3852                                         btrfs_put_block_group(cache);
3853                                         break;
3854                                 }
3855                         }
3856                 }
3857                 scrub_pause_off(fs_info);
3858
3859                 if (ret == 0) {
3860                         ro_set = 1;
3861                 } else if (ret == -ENOSPC) {
3862                         /*
3863                          * btrfs_inc_block_group_ro return -ENOSPC when it
3864                          * failed in creating new chunk for metadata.
3865                          * It is not a problem for scrub/replace, because
3866                          * metadata are always cowed, and our scrub paused
3867                          * commit_transactions.
3868                          */
3869                         ro_set = 0;
3870                 } else {
3871                         btrfs_warn(fs_info,
3872                                    "failed setting block group ro, ret=%d\n",
3873                                    ret);
3874                         btrfs_put_block_group(cache);
3875                         break;
3876                 }
3877
3878                 btrfs_dev_replace_lock(&fs_info->dev_replace, 1);
3879                 dev_replace->cursor_right = found_key.offset + length;
3880                 dev_replace->cursor_left = found_key.offset;
3881                 dev_replace->item_needs_writeback = 1;
3882                 btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
3883                 ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
3884                                   found_key.offset, cache, is_dev_replace);
3885
3886                 /*
3887                  * flush, submit all pending read and write bios, afterwards
3888                  * wait for them.
3889                  * Note that in the dev replace case, a read request causes
3890                  * write requests that are submitted in the read completion
3891                  * worker. Therefore in the current situation, it is required
3892                  * that all write requests are flushed, so that all read and
3893                  * write requests are really completed when bios_in_flight
3894                  * changes to 0.
3895                  */
3896                 atomic_set(&sctx->flush_all_writes, 1);
3897                 scrub_submit(sctx);
3898                 mutex_lock(&sctx->wr_lock);
3899                 scrub_wr_submit(sctx);
3900                 mutex_unlock(&sctx->wr_lock);
3901
3902                 wait_event(sctx->list_wait,
3903                            atomic_read(&sctx->bios_in_flight) == 0);
3904
3905                 scrub_pause_on(fs_info);
3906
3907                 /*
3908                  * must be called before we decrease @scrub_paused.
3909                  * make sure we don't block transaction commit while
3910                  * we are waiting pending workers finished.
3911                  */
3912                 wait_event(sctx->list_wait,
3913                            atomic_read(&sctx->workers_pending) == 0);
3914                 atomic_set(&sctx->flush_all_writes, 0);
3915
3916                 scrub_pause_off(fs_info);
3917
3918                 btrfs_dev_replace_lock(&fs_info->dev_replace, 1);
3919                 dev_replace->cursor_left = dev_replace->cursor_right;
3920                 dev_replace->item_needs_writeback = 1;
3921                 btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
3922
3923                 if (ro_set)
3924                         btrfs_dec_block_group_ro(cache);
3925
3926                 /*
3927                  * We might have prevented the cleaner kthread from deleting
3928                  * this block group if it was already unused because we raced
3929                  * and set it to RO mode first. So add it back to the unused
3930                  * list, otherwise it might not ever be deleted unless a manual
3931                  * balance is triggered or it becomes used and unused again.
3932                  */
3933                 spin_lock(&cache->lock);
3934                 if (!cache->removed && !cache->ro && cache->reserved == 0 &&
3935                     btrfs_block_group_used(&cache->item) == 0) {
3936                         spin_unlock(&cache->lock);
3937                         spin_lock(&fs_info->unused_bgs_lock);
3938                         if (list_empty(&cache->bg_list)) {
3939                                 btrfs_get_block_group(cache);
3940                                 list_add_tail(&cache->bg_list,
3941                                               &fs_info->unused_bgs);
3942                         }
3943                         spin_unlock(&fs_info->unused_bgs_lock);
3944                 } else {
3945                         spin_unlock(&cache->lock);
3946                 }
3947
3948                 btrfs_put_block_group(cache);
3949                 if (ret)
3950                         break;
3951                 if (is_dev_replace &&
3952                     atomic64_read(&dev_replace->num_write_errors) > 0) {
3953                         ret = -EIO;
3954                         break;
3955                 }
3956                 if (sctx->stat.malloc_errors > 0) {
3957                         ret = -ENOMEM;
3958                         break;
3959                 }
3960 skip:
3961                 key.offset = found_key.offset + length;
3962                 btrfs_release_path(path);
3963         }
3964
3965         btrfs_free_path(path);
3966
3967         return ret;
3968 }
3969
3970 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
3971                                            struct btrfs_device *scrub_dev)
3972 {
3973         int     i;
3974         u64     bytenr;
3975         u64     gen;
3976         int     ret;
3977         struct btrfs_fs_info *fs_info = sctx->fs_info;
3978
3979         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
3980                 return -EIO;
3981
3982         /* Seed devices of a new filesystem has their own generation. */
3983         if (scrub_dev->fs_devices != fs_info->fs_devices)
3984                 gen = scrub_dev->generation;
3985         else
3986                 gen = fs_info->last_trans_committed;
3987
3988         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
3989                 bytenr = btrfs_sb_offset(i);
3990                 if (bytenr + BTRFS_SUPER_INFO_SIZE >
3991                     scrub_dev->commit_total_bytes)
3992                         break;
3993
3994                 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
3995                                   scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
3996                                   NULL, 1, bytenr);
3997                 if (ret)
3998                         return ret;
3999         }
4000         wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
4001
4002         return 0;
4003 }
4004
4005 /*
4006  * get a reference count on fs_info->scrub_workers. start worker if necessary
4007  */
4008 static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
4009                                                 int is_dev_replace)
4010 {
4011         unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
4012         int max_active = fs_info->thread_pool_size;
4013
4014         if (fs_info->scrub_workers_refcnt == 0) {
4015                 if (is_dev_replace)
4016                         fs_info->scrub_workers =
4017                                 btrfs_alloc_workqueue(fs_info, "scrub", flags,
4018                                                       1, 4);
4019                 else
4020                         fs_info->scrub_workers =
4021                                 btrfs_alloc_workqueue(fs_info, "scrub", flags,
4022                                                       max_active, 4);
4023                 if (!fs_info->scrub_workers)
4024                         goto fail_scrub_workers;
4025
4026                 fs_info->scrub_wr_completion_workers =
4027                         btrfs_alloc_workqueue(fs_info, "scrubwrc", flags,
4028                                               max_active, 2);
4029                 if (!fs_info->scrub_wr_completion_workers)
4030                         goto fail_scrub_wr_completion_workers;
4031
4032                 fs_info->scrub_nocow_workers =
4033                         btrfs_alloc_workqueue(fs_info, "scrubnc", flags, 1, 0);
4034                 if (!fs_info->scrub_nocow_workers)
4035                         goto fail_scrub_nocow_workers;
4036                 fs_info->scrub_parity_workers =
4037                         btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
4038                                               max_active, 2);
4039                 if (!fs_info->scrub_parity_workers)
4040                         goto fail_scrub_parity_workers;
4041         }
4042         ++fs_info->scrub_workers_refcnt;
4043         return 0;
4044
4045 fail_scrub_parity_workers:
4046         btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
4047 fail_scrub_nocow_workers:
4048         btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
4049 fail_scrub_wr_completion_workers:
4050         btrfs_destroy_workqueue(fs_info->scrub_workers);
4051 fail_scrub_workers:
4052         return -ENOMEM;
4053 }
4054
4055 static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
4056 {
4057         if (--fs_info->scrub_workers_refcnt == 0) {
4058                 btrfs_destroy_workqueue(fs_info->scrub_workers);
4059                 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
4060                 btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
4061                 btrfs_destroy_workqueue(fs_info->scrub_parity_workers);
4062         }
4063         WARN_ON(fs_info->scrub_workers_refcnt < 0);
4064 }
4065
4066 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
4067                     u64 end, struct btrfs_scrub_progress *progress,
4068                     int readonly, int is_dev_replace)
4069 {
4070         struct scrub_ctx *sctx;
4071         int ret;
4072         struct btrfs_device *dev;
4073         struct rcu_string *name;
4074
4075         if (btrfs_fs_closing(fs_info))
4076                 return -EINVAL;
4077
4078         if (fs_info->nodesize > BTRFS_STRIPE_LEN) {
4079                 /*
4080                  * in this case scrub is unable to calculate the checksum
4081                  * the way scrub is implemented. Do not handle this
4082                  * situation at all because it won't ever happen.
4083                  */
4084                 btrfs_err(fs_info,
4085                            "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
4086                        fs_info->nodesize,
4087                        BTRFS_STRIPE_LEN);
4088                 return -EINVAL;
4089         }
4090
4091         if (fs_info->sectorsize != PAGE_SIZE) {
4092                 /* not supported for data w/o checksums */
4093                 btrfs_err_rl(fs_info,
4094                            "scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails",
4095                        fs_info->sectorsize, PAGE_SIZE);
4096                 return -EINVAL;
4097         }
4098
4099         if (fs_info->nodesize >
4100             PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
4101             fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
4102                 /*
4103                  * would exhaust the array bounds of pagev member in
4104                  * struct scrub_block
4105                  */
4106                 btrfs_err(fs_info,
4107                           "scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
4108                        fs_info->nodesize,
4109                        SCRUB_MAX_PAGES_PER_BLOCK,
4110                        fs_info->sectorsize,
4111                        SCRUB_MAX_PAGES_PER_BLOCK);
4112                 return -EINVAL;
4113         }
4114
4115
4116         mutex_lock(&fs_info->fs_devices->device_list_mutex);
4117         dev = btrfs_find_device(fs_info, devid, NULL, NULL);
4118         if (!dev || (dev->missing && !is_dev_replace)) {
4119                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4120                 return -ENODEV;
4121         }
4122
4123         if (!is_dev_replace && !readonly && !dev->writeable) {
4124                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4125                 rcu_read_lock();
4126                 name = rcu_dereference(dev->name);
4127                 btrfs_err(fs_info, "scrub: device %s is not writable",
4128                           name->str);
4129                 rcu_read_unlock();
4130                 return -EROFS;
4131         }
4132
4133         mutex_lock(&fs_info->scrub_lock);
4134         if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
4135                 mutex_unlock(&fs_info->scrub_lock);
4136                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4137                 return -EIO;
4138         }
4139
4140         btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
4141         if (dev->scrub_device ||
4142             (!is_dev_replace &&
4143              btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
4144                 btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
4145                 mutex_unlock(&fs_info->scrub_lock);
4146                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4147                 return -EINPROGRESS;
4148         }
4149         btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
4150
4151         ret = scrub_workers_get(fs_info, is_dev_replace);
4152         if (ret) {
4153                 mutex_unlock(&fs_info->scrub_lock);
4154                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4155                 return ret;
4156         }
4157
4158         sctx = scrub_setup_ctx(dev, is_dev_replace);
4159         if (IS_ERR(sctx)) {
4160                 mutex_unlock(&fs_info->scrub_lock);
4161                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4162                 scrub_workers_put(fs_info);
4163                 return PTR_ERR(sctx);
4164         }
4165         sctx->readonly = readonly;
4166         dev->scrub_device = sctx;
4167         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4168
4169         /*
4170          * checking @scrub_pause_req here, we can avoid
4171          * race between committing transaction and scrubbing.
4172          */
4173         __scrub_blocked_if_needed(fs_info);
4174         atomic_inc(&fs_info->scrubs_running);
4175         mutex_unlock(&fs_info->scrub_lock);
4176
4177         if (!is_dev_replace) {
4178                 /*
4179                  * by holding device list mutex, we can
4180                  * kick off writing super in log tree sync.
4181                  */
4182                 mutex_lock(&fs_info->fs_devices->device_list_mutex);
4183                 ret = scrub_supers(sctx, dev);
4184                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4185         }
4186
4187         if (!ret)
4188                 ret = scrub_enumerate_chunks(sctx, dev, start, end,
4189                                              is_dev_replace);
4190
4191         wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
4192         atomic_dec(&fs_info->scrubs_running);
4193         wake_up(&fs_info->scrub_pause_wait);
4194
4195         wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
4196
4197         if (progress)
4198                 memcpy(progress, &sctx->stat, sizeof(*progress));
4199
4200         mutex_lock(&fs_info->scrub_lock);
4201         dev->scrub_device = NULL;
4202         scrub_workers_put(fs_info);
4203         mutex_unlock(&fs_info->scrub_lock);
4204
4205         scrub_put_ctx(sctx);
4206
4207         return ret;
4208 }
4209
4210 void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
4211 {
4212         mutex_lock(&fs_info->scrub_lock);
4213         atomic_inc(&fs_info->scrub_pause_req);
4214         while (atomic_read(&fs_info->scrubs_paused) !=
4215                atomic_read(&fs_info->scrubs_running)) {
4216                 mutex_unlock(&fs_info->scrub_lock);
4217                 wait_event(fs_info->scrub_pause_wait,
4218                            atomic_read(&fs_info->scrubs_paused) ==
4219                            atomic_read(&fs_info->scrubs_running));
4220                 mutex_lock(&fs_info->scrub_lock);
4221         }
4222         mutex_unlock(&fs_info->scrub_lock);
4223 }
4224
4225 void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
4226 {
4227         atomic_dec(&fs_info->scrub_pause_req);
4228         wake_up(&fs_info->scrub_pause_wait);
4229 }
4230
4231 int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
4232 {
4233         mutex_lock(&fs_info->scrub_lock);
4234         if (!atomic_read(&fs_info->scrubs_running)) {
4235                 mutex_unlock(&fs_info->scrub_lock);
4236                 return -ENOTCONN;
4237         }
4238
4239         atomic_inc(&fs_info->scrub_cancel_req);
4240         while (atomic_read(&fs_info->scrubs_running)) {
4241                 mutex_unlock(&fs_info->scrub_lock);
4242                 wait_event(fs_info->scrub_pause_wait,
4243                            atomic_read(&fs_info->scrubs_running) == 0);
4244                 mutex_lock(&fs_info->scrub_lock);
4245         }
4246         atomic_dec(&fs_info->scrub_cancel_req);
4247         mutex_unlock(&fs_info->scrub_lock);
4248
4249         return 0;
4250 }
4251
4252 int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
4253                            struct btrfs_device *dev)
4254 {
4255         struct scrub_ctx *sctx;
4256
4257         mutex_lock(&fs_info->scrub_lock);
4258         sctx = dev->scrub_device;
4259         if (!sctx) {
4260                 mutex_unlock(&fs_info->scrub_lock);
4261                 return -ENOTCONN;
4262         }
4263         atomic_inc(&sctx->cancel_req);
4264         while (dev->scrub_device) {
4265                 mutex_unlock(&fs_info->scrub_lock);
4266                 wait_event(fs_info->scrub_pause_wait,
4267                            dev->scrub_device == NULL);
4268                 mutex_lock(&fs_info->scrub_lock);
4269         }
4270         mutex_unlock(&fs_info->scrub_lock);
4271
4272         return 0;
4273 }
4274
4275 int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
4276                          struct btrfs_scrub_progress *progress)
4277 {
4278         struct btrfs_device *dev;
4279         struct scrub_ctx *sctx = NULL;
4280
4281         mutex_lock(&fs_info->fs_devices->device_list_mutex);
4282         dev = btrfs_find_device(fs_info, devid, NULL, NULL);
4283         if (dev)
4284                 sctx = dev->scrub_device;
4285         if (sctx)
4286                 memcpy(progress, &sctx->stat, sizeof(*progress));
4287         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4288
4289         return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
4290 }
4291
4292 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
4293                                u64 extent_logical, u64 extent_len,
4294                                u64 *extent_physical,
4295                                struct btrfs_device **extent_dev,
4296                                int *extent_mirror_num)
4297 {
4298         u64 mapped_length;
4299         struct btrfs_bio *bbio = NULL;
4300         int ret;
4301
4302         mapped_length = extent_len;
4303         ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical,
4304                               &mapped_length, &bbio, 0);
4305         if (ret || !bbio || mapped_length < extent_len ||
4306             !bbio->stripes[0].dev->bdev) {
4307                 btrfs_put_bbio(bbio);
4308                 return;
4309         }
4310
4311         *extent_physical = bbio->stripes[0].physical;
4312         *extent_mirror_num = bbio->mirror_num;
4313         *extent_dev = bbio->stripes[0].dev;
4314         btrfs_put_bbio(bbio);
4315 }
4316
4317 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
4318                             int mirror_num, u64 physical_for_dev_replace)
4319 {
4320         struct scrub_copy_nocow_ctx *nocow_ctx;
4321         struct btrfs_fs_info *fs_info = sctx->fs_info;
4322
4323         nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
4324         if (!nocow_ctx) {
4325                 spin_lock(&sctx->stat_lock);
4326                 sctx->stat.malloc_errors++;
4327                 spin_unlock(&sctx->stat_lock);
4328                 return -ENOMEM;
4329         }
4330
4331         scrub_pending_trans_workers_inc(sctx);
4332
4333         nocow_ctx->sctx = sctx;
4334         nocow_ctx->logical = logical;
4335         nocow_ctx->len = len;
4336         nocow_ctx->mirror_num = mirror_num;
4337         nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
4338         btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper,
4339                         copy_nocow_pages_worker, NULL, NULL);
4340         INIT_LIST_HEAD(&nocow_ctx->inodes);
4341         btrfs_queue_work(fs_info->scrub_nocow_workers,
4342                          &nocow_ctx->work);
4343
4344         return 0;
4345 }
4346
4347 static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
4348 {
4349         struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
4350         struct scrub_nocow_inode *nocow_inode;
4351
4352         nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS);
4353         if (!nocow_inode)
4354                 return -ENOMEM;
4355         nocow_inode->inum = inum;
4356         nocow_inode->offset = offset;
4357         nocow_inode->root = root;
4358         list_add_tail(&nocow_inode->list, &nocow_ctx->inodes);
4359         return 0;
4360 }
4361
4362 #define COPY_COMPLETE 1
4363
4364 static void copy_nocow_pages_worker(struct btrfs_work *work)
4365 {
4366         struct scrub_copy_nocow_ctx *nocow_ctx =
4367                 container_of(work, struct scrub_copy_nocow_ctx, work);
4368         struct scrub_ctx *sctx = nocow_ctx->sctx;
4369         struct btrfs_fs_info *fs_info = sctx->fs_info;
4370         struct btrfs_root *root = fs_info->extent_root;
4371         u64 logical = nocow_ctx->logical;
4372         u64 len = nocow_ctx->len;
4373         int mirror_num = nocow_ctx->mirror_num;
4374         u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
4375         int ret;
4376         struct btrfs_trans_handle *trans = NULL;
4377         struct btrfs_path *path;
4378         int not_written = 0;
4379
4380         path = btrfs_alloc_path();
4381         if (!path) {
4382                 spin_lock(&sctx->stat_lock);
4383                 sctx->stat.malloc_errors++;
4384                 spin_unlock(&sctx->stat_lock);
4385                 not_written = 1;
4386                 goto out;
4387         }
4388
4389         trans = btrfs_join_transaction(root);
4390         if (IS_ERR(trans)) {
4391                 not_written = 1;
4392                 goto out;
4393         }
4394
4395         ret = iterate_inodes_from_logical(logical, fs_info, path,
4396                                           record_inode_for_nocow, nocow_ctx);
4397         if (ret != 0 && ret != -ENOENT) {
4398                 btrfs_warn(fs_info,
4399                            "iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %u, ret %d",
4400                            logical, physical_for_dev_replace, len, mirror_num,
4401                            ret);
4402                 not_written = 1;
4403                 goto out;
4404         }
4405
4406         btrfs_end_transaction(trans);
4407         trans = NULL;
4408         while (!list_empty(&nocow_ctx->inodes)) {
4409                 struct scrub_nocow_inode *entry;
4410                 entry = list_first_entry(&nocow_ctx->inodes,
4411                                          struct scrub_nocow_inode,
4412                                          list);
4413                 list_del_init(&entry->list);
4414                 ret = copy_nocow_pages_for_inode(entry->inum, entry->offset,
4415                                                  entry->root, nocow_ctx);
4416                 kfree(entry);
4417                 if (ret == COPY_COMPLETE) {
4418                         ret = 0;
4419                         break;
4420                 } else if (ret) {
4421                         break;
4422                 }
4423         }
4424 out:
4425         while (!list_empty(&nocow_ctx->inodes)) {
4426                 struct scrub_nocow_inode *entry;
4427                 entry = list_first_entry(&nocow_ctx->inodes,
4428                                          struct scrub_nocow_inode,
4429                                          list);
4430                 list_del_init(&entry->list);
4431                 kfree(entry);
4432         }
4433         if (trans && !IS_ERR(trans))
4434                 btrfs_end_transaction(trans);
4435         if (not_written)
4436                 btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
4437                                             num_uncorrectable_read_errors);
4438
4439         btrfs_free_path(path);
4440         kfree(nocow_ctx);
4441
4442         scrub_pending_trans_workers_dec(sctx);
4443 }
4444
4445 static int check_extent_to_block(struct btrfs_inode *inode, u64 start, u64 len,
4446                                  u64 logical)
4447 {
4448         struct extent_state *cached_state = NULL;
4449         struct btrfs_ordered_extent *ordered;
4450         struct extent_io_tree *io_tree;
4451         struct extent_map *em;
4452         u64 lockstart = start, lockend = start + len - 1;
4453         int ret = 0;
4454
4455         io_tree = &inode->io_tree;
4456
4457         lock_extent_bits(io_tree, lockstart, lockend, &cached_state);
4458         ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
4459         if (ordered) {
4460                 btrfs_put_ordered_extent(ordered);
4461                 ret = 1;
4462                 goto out_unlock;
4463         }
4464
4465         em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
4466         if (IS_ERR(em)) {
4467                 ret = PTR_ERR(em);
4468                 goto out_unlock;
4469         }
4470
4471         /*
4472          * This extent does not actually cover the logical extent anymore,
4473          * move on to the next inode.
4474          */
4475         if (em->block_start > logical ||
4476             em->block_start + em->block_len < logical + len) {
4477                 free_extent_map(em);
4478                 ret = 1;
4479                 goto out_unlock;
4480         }
4481         free_extent_map(em);
4482
4483 out_unlock:
4484         unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
4485                              GFP_NOFS);
4486         return ret;
4487 }
4488
4489 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
4490                                       struct scrub_copy_nocow_ctx *nocow_ctx)
4491 {
4492         struct btrfs_fs_info *fs_info = nocow_ctx->sctx->fs_info;
4493         struct btrfs_key key;
4494         struct inode *inode;
4495         struct page *page;
4496         struct btrfs_root *local_root;
4497         struct extent_io_tree *io_tree;
4498         u64 physical_for_dev_replace;
4499         u64 nocow_ctx_logical;
4500         u64 len = nocow_ctx->len;
4501         unsigned long index;
4502         int srcu_index;
4503         int ret = 0;
4504         int err = 0;
4505
4506         key.objectid = root;
4507         key.type = BTRFS_ROOT_ITEM_KEY;
4508         key.offset = (u64)-1;
4509
4510         srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
4511
4512         local_root = btrfs_read_fs_root_no_name(fs_info, &key);
4513         if (IS_ERR(local_root)) {
4514                 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
4515                 return PTR_ERR(local_root);
4516         }
4517
4518         key.type = BTRFS_INODE_ITEM_KEY;
4519         key.objectid = inum;
4520         key.offset = 0;
4521         inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
4522         srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
4523         if (IS_ERR(inode))
4524                 return PTR_ERR(inode);
4525
4526         /* Avoid truncate/dio/punch hole.. */
4527         inode_lock(inode);
4528         inode_dio_wait(inode);
4529
4530         physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
4531         io_tree = &BTRFS_I(inode)->io_tree;
4532         nocow_ctx_logical = nocow_ctx->logical;
4533
4534         ret = check_extent_to_block(BTRFS_I(inode), offset, len,
4535                         nocow_ctx_logical);
4536         if (ret) {
4537                 ret = ret > 0 ? 0 : ret;
4538                 goto out;
4539         }
4540
4541         while (len >= PAGE_SIZE) {
4542                 index = offset >> PAGE_SHIFT;
4543 again:
4544                 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
4545                 if (!page) {
4546                         btrfs_err(fs_info, "find_or_create_page() failed");
4547                         ret = -ENOMEM;
4548                         goto out;
4549                 }
4550
4551                 if (PageUptodate(page)) {
4552                         if (PageDirty(page))
4553                                 goto next_page;
4554                 } else {
4555                         ClearPageError(page);
4556                         err = extent_read_full_page(io_tree, page,
4557                                                            btrfs_get_extent,
4558                                                            nocow_ctx->mirror_num);
4559                         if (err) {
4560                                 ret = err;
4561                                 goto next_page;
4562                         }
4563
4564                         lock_page(page);
4565                         /*
4566                          * If the page has been remove from the page cache,
4567                          * the data on it is meaningless, because it may be
4568                          * old one, the new data may be written into the new
4569                          * page in the page cache.
4570                          */
4571                         if (page->mapping != inode->i_mapping) {
4572                                 unlock_page(page);
4573                                 put_page(page);
4574                                 goto again;
4575                         }
4576                         if (!PageUptodate(page)) {
4577                                 ret = -EIO;
4578                                 goto next_page;
4579                         }
4580                 }
4581
4582                 ret = check_extent_to_block(BTRFS_I(inode), offset, len,
4583                                             nocow_ctx_logical);
4584                 if (ret) {
4585                         ret = ret > 0 ? 0 : ret;
4586                         goto next_page;
4587                 }
4588
4589                 err = write_page_nocow(nocow_ctx->sctx,
4590                                        physical_for_dev_replace, page);
4591                 if (err)
4592                         ret = err;
4593 next_page:
4594                 unlock_page(page);
4595                 put_page(page);
4596
4597                 if (ret)
4598                         break;
4599
4600                 offset += PAGE_SIZE;
4601                 physical_for_dev_replace += PAGE_SIZE;
4602                 nocow_ctx_logical += PAGE_SIZE;
4603                 len -= PAGE_SIZE;
4604         }
4605         ret = COPY_COMPLETE;
4606 out:
4607         inode_unlock(inode);
4608         iput(inode);
4609         return ret;
4610 }
4611
4612 static int write_page_nocow(struct scrub_ctx *sctx,
4613                             u64 physical_for_dev_replace, struct page *page)
4614 {
4615         struct bio *bio;
4616         struct btrfs_device *dev;
4617         int ret;
4618
4619         dev = sctx->wr_tgtdev;
4620         if (!dev)
4621                 return -EIO;
4622         if (!dev->bdev) {
4623                 btrfs_warn_rl(dev->fs_info,
4624                         "scrub write_page_nocow(bdev == NULL) is unexpected");
4625                 return -EIO;
4626         }
4627         bio = btrfs_io_bio_alloc(1);
4628         bio->bi_iter.bi_size = 0;
4629         bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
4630         bio->bi_bdev = dev->bdev;
4631         bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
4632         ret = bio_add_page(bio, page, PAGE_SIZE, 0);
4633         if (ret != PAGE_SIZE) {
4634 leave_with_eio:
4635                 bio_put(bio);
4636                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
4637                 return -EIO;
4638         }
4639
4640         if (btrfsic_submit_bio_wait(bio))
4641                 goto leave_with_eio;
4642
4643         bio_put(bio);
4644         return 0;
4645 }