Merge tag 'powerpc-5.1-6' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[sfrench/cifs-2.6.git] / fs / btrfs / volumes.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5
6 #include <linux/sched.h>
7 #include <linux/bio.h>
8 #include <linux/slab.h>
9 #include <linux/buffer_head.h>
10 #include <linux/blkdev.h>
11 #include <linux/ratelimit.h>
12 #include <linux/kthread.h>
13 #include <linux/raid/pq.h>
14 #include <linux/semaphore.h>
15 #include <linux/uuid.h>
16 #include <linux/list_sort.h>
17 #include "ctree.h"
18 #include "extent_map.h"
19 #include "disk-io.h"
20 #include "transaction.h"
21 #include "print-tree.h"
22 #include "volumes.h"
23 #include "raid56.h"
24 #include "async-thread.h"
25 #include "check-integrity.h"
26 #include "rcu-string.h"
27 #include "math.h"
28 #include "dev-replace.h"
29 #include "sysfs.h"
30
31 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
32         [BTRFS_RAID_RAID10] = {
33                 .sub_stripes    = 2,
34                 .dev_stripes    = 1,
35                 .devs_max       = 0,    /* 0 == as many as possible */
36                 .devs_min       = 4,
37                 .tolerated_failures = 1,
38                 .devs_increment = 2,
39                 .ncopies        = 2,
40                 .nparity        = 0,
41                 .raid_name      = "raid10",
42                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID10,
43                 .mindev_error   = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
44         },
45         [BTRFS_RAID_RAID1] = {
46                 .sub_stripes    = 1,
47                 .dev_stripes    = 1,
48                 .devs_max       = 2,
49                 .devs_min       = 2,
50                 .tolerated_failures = 1,
51                 .devs_increment = 2,
52                 .ncopies        = 2,
53                 .nparity        = 0,
54                 .raid_name      = "raid1",
55                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID1,
56                 .mindev_error   = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
57         },
58         [BTRFS_RAID_DUP] = {
59                 .sub_stripes    = 1,
60                 .dev_stripes    = 2,
61                 .devs_max       = 1,
62                 .devs_min       = 1,
63                 .tolerated_failures = 0,
64                 .devs_increment = 1,
65                 .ncopies        = 2,
66                 .nparity        = 0,
67                 .raid_name      = "dup",
68                 .bg_flag        = BTRFS_BLOCK_GROUP_DUP,
69                 .mindev_error   = 0,
70         },
71         [BTRFS_RAID_RAID0] = {
72                 .sub_stripes    = 1,
73                 .dev_stripes    = 1,
74                 .devs_max       = 0,
75                 .devs_min       = 2,
76                 .tolerated_failures = 0,
77                 .devs_increment = 1,
78                 .ncopies        = 1,
79                 .nparity        = 0,
80                 .raid_name      = "raid0",
81                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID0,
82                 .mindev_error   = 0,
83         },
84         [BTRFS_RAID_SINGLE] = {
85                 .sub_stripes    = 1,
86                 .dev_stripes    = 1,
87                 .devs_max       = 1,
88                 .devs_min       = 1,
89                 .tolerated_failures = 0,
90                 .devs_increment = 1,
91                 .ncopies        = 1,
92                 .nparity        = 0,
93                 .raid_name      = "single",
94                 .bg_flag        = 0,
95                 .mindev_error   = 0,
96         },
97         [BTRFS_RAID_RAID5] = {
98                 .sub_stripes    = 1,
99                 .dev_stripes    = 1,
100                 .devs_max       = 0,
101                 .devs_min       = 2,
102                 .tolerated_failures = 1,
103                 .devs_increment = 1,
104                 .ncopies        = 1,
105                 .nparity        = 1,
106                 .raid_name      = "raid5",
107                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID5,
108                 .mindev_error   = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
109         },
110         [BTRFS_RAID_RAID6] = {
111                 .sub_stripes    = 1,
112                 .dev_stripes    = 1,
113                 .devs_max       = 0,
114                 .devs_min       = 3,
115                 .tolerated_failures = 2,
116                 .devs_increment = 1,
117                 .ncopies        = 1,
118                 .nparity        = 2,
119                 .raid_name      = "raid6",
120                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID6,
121                 .mindev_error   = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
122         },
123 };
124
125 const char *get_raid_name(enum btrfs_raid_types type)
126 {
127         if (type >= BTRFS_NR_RAID_TYPES)
128                 return NULL;
129
130         return btrfs_raid_array[type].raid_name;
131 }
132
133 /*
134  * Fill @buf with textual description of @bg_flags, no more than @size_buf
135  * bytes including terminating null byte.
136  */
137 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf)
138 {
139         int i;
140         int ret;
141         char *bp = buf;
142         u64 flags = bg_flags;
143         u32 size_bp = size_buf;
144
145         if (!flags) {
146                 strcpy(bp, "NONE");
147                 return;
148         }
149
150 #define DESCRIBE_FLAG(flag, desc)                                               \
151         do {                                                            \
152                 if (flags & (flag)) {                                   \
153                         ret = snprintf(bp, size_bp, "%s|", (desc));     \
154                         if (ret < 0 || ret >= size_bp)                  \
155                                 goto out_overflow;                      \
156                         size_bp -= ret;                                 \
157                         bp += ret;                                      \
158                         flags &= ~(flag);                               \
159                 }                                                       \
160         } while (0)
161
162         DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data");
163         DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system");
164         DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata");
165
166         DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single");
167         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
168                 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag,
169                               btrfs_raid_array[i].raid_name);
170 #undef DESCRIBE_FLAG
171
172         if (flags) {
173                 ret = snprintf(bp, size_bp, "0x%llx|", flags);
174                 size_bp -= ret;
175         }
176
177         if (size_bp < size_buf)
178                 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */
179
180         /*
181          * The text is trimmed, it's up to the caller to provide sufficiently
182          * large buffer
183          */
184 out_overflow:;
185 }
186
187 static int init_first_rw_device(struct btrfs_trans_handle *trans,
188                                 struct btrfs_fs_info *fs_info);
189 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
190 static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
191 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
192 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
193 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
194                              enum btrfs_map_op op,
195                              u64 logical, u64 *length,
196                              struct btrfs_bio **bbio_ret,
197                              int mirror_num, int need_raid_map);
198
199 /*
200  * Device locking
201  * ==============
202  *
203  * There are several mutexes that protect manipulation of devices and low-level
204  * structures like chunks but not block groups, extents or files
205  *
206  * uuid_mutex (global lock)
207  * ------------------------
208  * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
209  * the SCAN_DEV ioctl registration or from mount either implicitly (the first
210  * device) or requested by the device= mount option
211  *
212  * the mutex can be very coarse and can cover long-running operations
213  *
214  * protects: updates to fs_devices counters like missing devices, rw devices,
215  * seeding, structure cloning, opening/closing devices at mount/umount time
216  *
217  * global::fs_devs - add, remove, updates to the global list
218  *
219  * does not protect: manipulation of the fs_devices::devices list!
220  *
221  * btrfs_device::name - renames (write side), read is RCU
222  *
223  * fs_devices::device_list_mutex (per-fs, with RCU)
224  * ------------------------------------------------
225  * protects updates to fs_devices::devices, ie. adding and deleting
226  *
227  * simple list traversal with read-only actions can be done with RCU protection
228  *
229  * may be used to exclude some operations from running concurrently without any
230  * modifications to the list (see write_all_supers)
231  *
232  * balance_mutex
233  * -------------
234  * protects balance structures (status, state) and context accessed from
235  * several places (internally, ioctl)
236  *
237  * chunk_mutex
238  * -----------
239  * protects chunks, adding or removing during allocation, trim or when a new
240  * device is added/removed
241  *
242  * cleaner_mutex
243  * -------------
244  * a big lock that is held by the cleaner thread and prevents running subvolume
245  * cleaning together with relocation or delayed iputs
246  *
247  *
248  * Lock nesting
249  * ============
250  *
251  * uuid_mutex
252  *   volume_mutex
253  *     device_list_mutex
254  *       chunk_mutex
255  *     balance_mutex
256  *
257  *
258  * Exclusive operations, BTRFS_FS_EXCL_OP
259  * ======================================
260  *
261  * Maintains the exclusivity of the following operations that apply to the
262  * whole filesystem and cannot run in parallel.
263  *
264  * - Balance (*)
265  * - Device add
266  * - Device remove
267  * - Device replace (*)
268  * - Resize
269  *
270  * The device operations (as above) can be in one of the following states:
271  *
272  * - Running state
273  * - Paused state
274  * - Completed state
275  *
276  * Only device operations marked with (*) can go into the Paused state for the
277  * following reasons:
278  *
279  * - ioctl (only Balance can be Paused through ioctl)
280  * - filesystem remounted as read-only
281  * - filesystem unmounted and mounted as read-only
282  * - system power-cycle and filesystem mounted as read-only
283  * - filesystem or device errors leading to forced read-only
284  *
285  * BTRFS_FS_EXCL_OP flag is set and cleared using atomic operations.
286  * During the course of Paused state, the BTRFS_FS_EXCL_OP remains set.
287  * A device operation in Paused or Running state can be canceled or resumed
288  * either by ioctl (Balance only) or when remounted as read-write.
289  * BTRFS_FS_EXCL_OP flag is cleared when the device operation is canceled or
290  * completed.
291  */
292
293 DEFINE_MUTEX(uuid_mutex);
294 static LIST_HEAD(fs_uuids);
295 struct list_head *btrfs_get_fs_uuids(void)
296 {
297         return &fs_uuids;
298 }
299
300 /*
301  * alloc_fs_devices - allocate struct btrfs_fs_devices
302  * @fsid:               if not NULL, copy the UUID to fs_devices::fsid
303  * @metadata_fsid:      if not NULL, copy the UUID to fs_devices::metadata_fsid
304  *
305  * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
306  * The returned struct is not linked onto any lists and can be destroyed with
307  * kfree() right away.
308  */
309 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid,
310                                                  const u8 *metadata_fsid)
311 {
312         struct btrfs_fs_devices *fs_devs;
313
314         fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
315         if (!fs_devs)
316                 return ERR_PTR(-ENOMEM);
317
318         mutex_init(&fs_devs->device_list_mutex);
319
320         INIT_LIST_HEAD(&fs_devs->devices);
321         INIT_LIST_HEAD(&fs_devs->resized_devices);
322         INIT_LIST_HEAD(&fs_devs->alloc_list);
323         INIT_LIST_HEAD(&fs_devs->fs_list);
324         if (fsid)
325                 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
326
327         if (metadata_fsid)
328                 memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE);
329         else if (fsid)
330                 memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE);
331
332         return fs_devs;
333 }
334
335 void btrfs_free_device(struct btrfs_device *device)
336 {
337         rcu_string_free(device->name);
338         bio_put(device->flush_bio);
339         kfree(device);
340 }
341
342 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
343 {
344         struct btrfs_device *device;
345         WARN_ON(fs_devices->opened);
346         while (!list_empty(&fs_devices->devices)) {
347                 device = list_entry(fs_devices->devices.next,
348                                     struct btrfs_device, dev_list);
349                 list_del(&device->dev_list);
350                 btrfs_free_device(device);
351         }
352         kfree(fs_devices);
353 }
354
355 static void btrfs_kobject_uevent(struct block_device *bdev,
356                                  enum kobject_action action)
357 {
358         int ret;
359
360         ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
361         if (ret)
362                 pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n",
363                         action,
364                         kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
365                         &disk_to_dev(bdev->bd_disk)->kobj);
366 }
367
368 void __exit btrfs_cleanup_fs_uuids(void)
369 {
370         struct btrfs_fs_devices *fs_devices;
371
372         while (!list_empty(&fs_uuids)) {
373                 fs_devices = list_entry(fs_uuids.next,
374                                         struct btrfs_fs_devices, fs_list);
375                 list_del(&fs_devices->fs_list);
376                 free_fs_devices(fs_devices);
377         }
378 }
379
380 /*
381  * Returns a pointer to a new btrfs_device on success; ERR_PTR() on error.
382  * Returned struct is not linked onto any lists and must be destroyed using
383  * btrfs_free_device.
384  */
385 static struct btrfs_device *__alloc_device(void)
386 {
387         struct btrfs_device *dev;
388
389         dev = kzalloc(sizeof(*dev), GFP_KERNEL);
390         if (!dev)
391                 return ERR_PTR(-ENOMEM);
392
393         /*
394          * Preallocate a bio that's always going to be used for flushing device
395          * barriers and matches the device lifespan
396          */
397         dev->flush_bio = bio_alloc_bioset(GFP_KERNEL, 0, NULL);
398         if (!dev->flush_bio) {
399                 kfree(dev);
400                 return ERR_PTR(-ENOMEM);
401         }
402
403         INIT_LIST_HEAD(&dev->dev_list);
404         INIT_LIST_HEAD(&dev->dev_alloc_list);
405         INIT_LIST_HEAD(&dev->resized_list);
406
407         spin_lock_init(&dev->io_lock);
408
409         atomic_set(&dev->reada_in_flight, 0);
410         atomic_set(&dev->dev_stats_ccnt, 0);
411         btrfs_device_data_ordered_init(dev);
412         INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
413         INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
414
415         return dev;
416 }
417
418 static noinline struct btrfs_fs_devices *find_fsid(
419                 const u8 *fsid, const u8 *metadata_fsid)
420 {
421         struct btrfs_fs_devices *fs_devices;
422
423         ASSERT(fsid);
424
425         if (metadata_fsid) {
426                 /*
427                  * Handle scanned device having completed its fsid change but
428                  * belonging to a fs_devices that was created by first scanning
429                  * a device which didn't have its fsid/metadata_uuid changed
430                  * at all and the CHANGING_FSID_V2 flag set.
431                  */
432                 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
433                         if (fs_devices->fsid_change &&
434                             memcmp(metadata_fsid, fs_devices->fsid,
435                                    BTRFS_FSID_SIZE) == 0 &&
436                             memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
437                                    BTRFS_FSID_SIZE) == 0) {
438                                 return fs_devices;
439                         }
440                 }
441                 /*
442                  * Handle scanned device having completed its fsid change but
443                  * belonging to a fs_devices that was created by a device that
444                  * has an outdated pair of fsid/metadata_uuid and
445                  * CHANGING_FSID_V2 flag set.
446                  */
447                 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
448                         if (fs_devices->fsid_change &&
449                             memcmp(fs_devices->metadata_uuid,
450                                    fs_devices->fsid, BTRFS_FSID_SIZE) != 0 &&
451                             memcmp(metadata_fsid, fs_devices->metadata_uuid,
452                                    BTRFS_FSID_SIZE) == 0) {
453                                 return fs_devices;
454                         }
455                 }
456         }
457
458         /* Handle non-split brain cases */
459         list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
460                 if (metadata_fsid) {
461                         if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0
462                             && memcmp(metadata_fsid, fs_devices->metadata_uuid,
463                                       BTRFS_FSID_SIZE) == 0)
464                                 return fs_devices;
465                 } else {
466                         if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
467                                 return fs_devices;
468                 }
469         }
470         return NULL;
471 }
472
473 static int
474 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
475                       int flush, struct block_device **bdev,
476                       struct buffer_head **bh)
477 {
478         int ret;
479
480         *bdev = blkdev_get_by_path(device_path, flags, holder);
481
482         if (IS_ERR(*bdev)) {
483                 ret = PTR_ERR(*bdev);
484                 goto error;
485         }
486
487         if (flush)
488                 filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
489         ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE);
490         if (ret) {
491                 blkdev_put(*bdev, flags);
492                 goto error;
493         }
494         invalidate_bdev(*bdev);
495         *bh = btrfs_read_dev_super(*bdev);
496         if (IS_ERR(*bh)) {
497                 ret = PTR_ERR(*bh);
498                 blkdev_put(*bdev, flags);
499                 goto error;
500         }
501
502         return 0;
503
504 error:
505         *bdev = NULL;
506         *bh = NULL;
507         return ret;
508 }
509
510 static void requeue_list(struct btrfs_pending_bios *pending_bios,
511                         struct bio *head, struct bio *tail)
512 {
513
514         struct bio *old_head;
515
516         old_head = pending_bios->head;
517         pending_bios->head = head;
518         if (pending_bios->tail)
519                 tail->bi_next = old_head;
520         else
521                 pending_bios->tail = tail;
522 }
523
524 /*
525  * we try to collect pending bios for a device so we don't get a large
526  * number of procs sending bios down to the same device.  This greatly
527  * improves the schedulers ability to collect and merge the bios.
528  *
529  * But, it also turns into a long list of bios to process and that is sure
530  * to eventually make the worker thread block.  The solution here is to
531  * make some progress and then put this work struct back at the end of
532  * the list if the block device is congested.  This way, multiple devices
533  * can make progress from a single worker thread.
534  */
535 static noinline void run_scheduled_bios(struct btrfs_device *device)
536 {
537         struct btrfs_fs_info *fs_info = device->fs_info;
538         struct bio *pending;
539         struct backing_dev_info *bdi;
540         struct btrfs_pending_bios *pending_bios;
541         struct bio *tail;
542         struct bio *cur;
543         int again = 0;
544         unsigned long num_run;
545         unsigned long batch_run = 0;
546         unsigned long last_waited = 0;
547         int force_reg = 0;
548         int sync_pending = 0;
549         struct blk_plug plug;
550
551         /*
552          * this function runs all the bios we've collected for
553          * a particular device.  We don't want to wander off to
554          * another device without first sending all of these down.
555          * So, setup a plug here and finish it off before we return
556          */
557         blk_start_plug(&plug);
558
559         bdi = device->bdev->bd_bdi;
560
561 loop:
562         spin_lock(&device->io_lock);
563
564 loop_lock:
565         num_run = 0;
566
567         /* take all the bios off the list at once and process them
568          * later on (without the lock held).  But, remember the
569          * tail and other pointers so the bios can be properly reinserted
570          * into the list if we hit congestion
571          */
572         if (!force_reg && device->pending_sync_bios.head) {
573                 pending_bios = &device->pending_sync_bios;
574                 force_reg = 1;
575         } else {
576                 pending_bios = &device->pending_bios;
577                 force_reg = 0;
578         }
579
580         pending = pending_bios->head;
581         tail = pending_bios->tail;
582         WARN_ON(pending && !tail);
583
584         /*
585          * if pending was null this time around, no bios need processing
586          * at all and we can stop.  Otherwise it'll loop back up again
587          * and do an additional check so no bios are missed.
588          *
589          * device->running_pending is used to synchronize with the
590          * schedule_bio code.
591          */
592         if (device->pending_sync_bios.head == NULL &&
593             device->pending_bios.head == NULL) {
594                 again = 0;
595                 device->running_pending = 0;
596         } else {
597                 again = 1;
598                 device->running_pending = 1;
599         }
600
601         pending_bios->head = NULL;
602         pending_bios->tail = NULL;
603
604         spin_unlock(&device->io_lock);
605
606         while (pending) {
607
608                 rmb();
609                 /* we want to work on both lists, but do more bios on the
610                  * sync list than the regular list
611                  */
612                 if ((num_run > 32 &&
613                     pending_bios != &device->pending_sync_bios &&
614                     device->pending_sync_bios.head) ||
615                    (num_run > 64 && pending_bios == &device->pending_sync_bios &&
616                     device->pending_bios.head)) {
617                         spin_lock(&device->io_lock);
618                         requeue_list(pending_bios, pending, tail);
619                         goto loop_lock;
620                 }
621
622                 cur = pending;
623                 pending = pending->bi_next;
624                 cur->bi_next = NULL;
625
626                 BUG_ON(atomic_read(&cur->__bi_cnt) == 0);
627
628                 /*
629                  * if we're doing the sync list, record that our
630                  * plug has some sync requests on it
631                  *
632                  * If we're doing the regular list and there are
633                  * sync requests sitting around, unplug before
634                  * we add more
635                  */
636                 if (pending_bios == &device->pending_sync_bios) {
637                         sync_pending = 1;
638                 } else if (sync_pending) {
639                         blk_finish_plug(&plug);
640                         blk_start_plug(&plug);
641                         sync_pending = 0;
642                 }
643
644                 btrfsic_submit_bio(cur);
645                 num_run++;
646                 batch_run++;
647
648                 cond_resched();
649
650                 /*
651                  * we made progress, there is more work to do and the bdi
652                  * is now congested.  Back off and let other work structs
653                  * run instead
654                  */
655                 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
656                     fs_info->fs_devices->open_devices > 1) {
657                         struct io_context *ioc;
658
659                         ioc = current->io_context;
660
661                         /*
662                          * the main goal here is that we don't want to
663                          * block if we're going to be able to submit
664                          * more requests without blocking.
665                          *
666                          * This code does two great things, it pokes into
667                          * the elevator code from a filesystem _and_
668                          * it makes assumptions about how batching works.
669                          */
670                         if (ioc && ioc->nr_batch_requests > 0 &&
671                             time_before(jiffies, ioc->last_waited + HZ/50UL) &&
672                             (last_waited == 0 ||
673                              ioc->last_waited == last_waited)) {
674                                 /*
675                                  * we want to go through our batch of
676                                  * requests and stop.  So, we copy out
677                                  * the ioc->last_waited time and test
678                                  * against it before looping
679                                  */
680                                 last_waited = ioc->last_waited;
681                                 cond_resched();
682                                 continue;
683                         }
684                         spin_lock(&device->io_lock);
685                         requeue_list(pending_bios, pending, tail);
686                         device->running_pending = 1;
687
688                         spin_unlock(&device->io_lock);
689                         btrfs_queue_work(fs_info->submit_workers,
690                                          &device->work);
691                         goto done;
692                 }
693         }
694
695         cond_resched();
696         if (again)
697                 goto loop;
698
699         spin_lock(&device->io_lock);
700         if (device->pending_bios.head || device->pending_sync_bios.head)
701                 goto loop_lock;
702         spin_unlock(&device->io_lock);
703
704 done:
705         blk_finish_plug(&plug);
706 }
707
708 static void pending_bios_fn(struct btrfs_work *work)
709 {
710         struct btrfs_device *device;
711
712         device = container_of(work, struct btrfs_device, work);
713         run_scheduled_bios(device);
714 }
715
716 static bool device_path_matched(const char *path, struct btrfs_device *device)
717 {
718         int found;
719
720         rcu_read_lock();
721         found = strcmp(rcu_str_deref(device->name), path);
722         rcu_read_unlock();
723
724         return found == 0;
725 }
726
727 /*
728  *  Search and remove all stale (devices which are not mounted) devices.
729  *  When both inputs are NULL, it will search and release all stale devices.
730  *  path:       Optional. When provided will it release all unmounted devices
731  *              matching this path only.
732  *  skip_dev:   Optional. Will skip this device when searching for the stale
733  *              devices.
734  *  Return:     0 for success or if @path is NULL.
735  *              -EBUSY if @path is a mounted device.
736  *              -ENOENT if @path does not match any device in the list.
737  */
738 static int btrfs_free_stale_devices(const char *path,
739                                      struct btrfs_device *skip_device)
740 {
741         struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
742         struct btrfs_device *device, *tmp_device;
743         int ret = 0;
744
745         if (path)
746                 ret = -ENOENT;
747
748         list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
749
750                 mutex_lock(&fs_devices->device_list_mutex);
751                 list_for_each_entry_safe(device, tmp_device,
752                                          &fs_devices->devices, dev_list) {
753                         if (skip_device && skip_device == device)
754                                 continue;
755                         if (path && !device->name)
756                                 continue;
757                         if (path && !device_path_matched(path, device))
758                                 continue;
759                         if (fs_devices->opened) {
760                                 /* for an already deleted device return 0 */
761                                 if (path && ret != 0)
762                                         ret = -EBUSY;
763                                 break;
764                         }
765
766                         /* delete the stale device */
767                         fs_devices->num_devices--;
768                         list_del(&device->dev_list);
769                         btrfs_free_device(device);
770
771                         ret = 0;
772                         if (fs_devices->num_devices == 0)
773                                 break;
774                 }
775                 mutex_unlock(&fs_devices->device_list_mutex);
776
777                 if (fs_devices->num_devices == 0) {
778                         btrfs_sysfs_remove_fsid(fs_devices);
779                         list_del(&fs_devices->fs_list);
780                         free_fs_devices(fs_devices);
781                 }
782         }
783
784         return ret;
785 }
786
787 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
788                         struct btrfs_device *device, fmode_t flags,
789                         void *holder)
790 {
791         struct request_queue *q;
792         struct block_device *bdev;
793         struct buffer_head *bh;
794         struct btrfs_super_block *disk_super;
795         u64 devid;
796         int ret;
797
798         if (device->bdev)
799                 return -EINVAL;
800         if (!device->name)
801                 return -EINVAL;
802
803         ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
804                                     &bdev, &bh);
805         if (ret)
806                 return ret;
807
808         disk_super = (struct btrfs_super_block *)bh->b_data;
809         devid = btrfs_stack_device_id(&disk_super->dev_item);
810         if (devid != device->devid)
811                 goto error_brelse;
812
813         if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE))
814                 goto error_brelse;
815
816         device->generation = btrfs_super_generation(disk_super);
817
818         if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
819                 if (btrfs_super_incompat_flags(disk_super) &
820                     BTRFS_FEATURE_INCOMPAT_METADATA_UUID) {
821                         pr_err(
822                 "BTRFS: Invalid seeding and uuid-changed device detected\n");
823                         goto error_brelse;
824                 }
825
826                 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
827                 fs_devices->seeding = 1;
828         } else {
829                 if (bdev_read_only(bdev))
830                         clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
831                 else
832                         set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
833         }
834
835         q = bdev_get_queue(bdev);
836         if (!blk_queue_nonrot(q))
837                 fs_devices->rotating = 1;
838
839         device->bdev = bdev;
840         clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
841         device->mode = flags;
842
843         fs_devices->open_devices++;
844         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
845             device->devid != BTRFS_DEV_REPLACE_DEVID) {
846                 fs_devices->rw_devices++;
847                 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list);
848         }
849         brelse(bh);
850
851         return 0;
852
853 error_brelse:
854         brelse(bh);
855         blkdev_put(bdev, flags);
856
857         return -EINVAL;
858 }
859
860 /*
861  * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
862  * being created with a disk that has already completed its fsid change.
863  */
864 static struct btrfs_fs_devices *find_fsid_inprogress(
865                                         struct btrfs_super_block *disk_super)
866 {
867         struct btrfs_fs_devices *fs_devices;
868
869         list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
870                 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
871                            BTRFS_FSID_SIZE) != 0 &&
872                     memcmp(fs_devices->metadata_uuid, disk_super->fsid,
873                            BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) {
874                         return fs_devices;
875                 }
876         }
877
878         return NULL;
879 }
880
881
882 static struct btrfs_fs_devices *find_fsid_changed(
883                                         struct btrfs_super_block *disk_super)
884 {
885         struct btrfs_fs_devices *fs_devices;
886
887         /*
888          * Handles the case where scanned device is part of an fs that had
889          * multiple successful changes of FSID but curently device didn't
890          * observe it. Meaning our fsid will be different than theirs.
891          */
892         list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
893                 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
894                            BTRFS_FSID_SIZE) != 0 &&
895                     memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid,
896                            BTRFS_FSID_SIZE) == 0 &&
897                     memcmp(fs_devices->fsid, disk_super->fsid,
898                            BTRFS_FSID_SIZE) != 0) {
899                         return fs_devices;
900                 }
901         }
902
903         return NULL;
904 }
905 /*
906  * Add new device to list of registered devices
907  *
908  * Returns:
909  * device pointer which was just added or updated when successful
910  * error pointer when failed
911  */
912 static noinline struct btrfs_device *device_list_add(const char *path,
913                            struct btrfs_super_block *disk_super,
914                            bool *new_device_added)
915 {
916         struct btrfs_device *device;
917         struct btrfs_fs_devices *fs_devices = NULL;
918         struct rcu_string *name;
919         u64 found_transid = btrfs_super_generation(disk_super);
920         u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
921         bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
922                 BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
923         bool fsid_change_in_progress = (btrfs_super_flags(disk_super) &
924                                         BTRFS_SUPER_FLAG_CHANGING_FSID_V2);
925
926         if (fsid_change_in_progress) {
927                 if (!has_metadata_uuid) {
928                         /*
929                          * When we have an image which has CHANGING_FSID_V2 set
930                          * it might belong to either a filesystem which has
931                          * disks with completed fsid change or it might belong
932                          * to fs with no UUID changes in effect, handle both.
933                          */
934                         fs_devices = find_fsid_inprogress(disk_super);
935                         if (!fs_devices)
936                                 fs_devices = find_fsid(disk_super->fsid, NULL);
937                 } else {
938                         fs_devices = find_fsid_changed(disk_super);
939                 }
940         } else if (has_metadata_uuid) {
941                 fs_devices = find_fsid(disk_super->fsid,
942                                        disk_super->metadata_uuid);
943         } else {
944                 fs_devices = find_fsid(disk_super->fsid, NULL);
945         }
946
947
948         if (!fs_devices) {
949                 if (has_metadata_uuid)
950                         fs_devices = alloc_fs_devices(disk_super->fsid,
951                                                       disk_super->metadata_uuid);
952                 else
953                         fs_devices = alloc_fs_devices(disk_super->fsid, NULL);
954
955                 if (IS_ERR(fs_devices))
956                         return ERR_CAST(fs_devices);
957
958                 fs_devices->fsid_change = fsid_change_in_progress;
959
960                 mutex_lock(&fs_devices->device_list_mutex);
961                 list_add(&fs_devices->fs_list, &fs_uuids);
962
963                 device = NULL;
964         } else {
965                 mutex_lock(&fs_devices->device_list_mutex);
966                 device = btrfs_find_device(fs_devices, devid,
967                                 disk_super->dev_item.uuid, NULL, false);
968
969                 /*
970                  * If this disk has been pulled into an fs devices created by
971                  * a device which had the CHANGING_FSID_V2 flag then replace the
972                  * metadata_uuid/fsid values of the fs_devices.
973                  */
974                 if (has_metadata_uuid && fs_devices->fsid_change &&
975                     found_transid > fs_devices->latest_generation) {
976                         memcpy(fs_devices->fsid, disk_super->fsid,
977                                         BTRFS_FSID_SIZE);
978                         memcpy(fs_devices->metadata_uuid,
979                                         disk_super->metadata_uuid, BTRFS_FSID_SIZE);
980
981                         fs_devices->fsid_change = false;
982                 }
983         }
984
985         if (!device) {
986                 if (fs_devices->opened) {
987                         mutex_unlock(&fs_devices->device_list_mutex);
988                         return ERR_PTR(-EBUSY);
989                 }
990
991                 device = btrfs_alloc_device(NULL, &devid,
992                                             disk_super->dev_item.uuid);
993                 if (IS_ERR(device)) {
994                         mutex_unlock(&fs_devices->device_list_mutex);
995                         /* we can safely leave the fs_devices entry around */
996                         return device;
997                 }
998
999                 name = rcu_string_strdup(path, GFP_NOFS);
1000                 if (!name) {
1001                         btrfs_free_device(device);
1002                         mutex_unlock(&fs_devices->device_list_mutex);
1003                         return ERR_PTR(-ENOMEM);
1004                 }
1005                 rcu_assign_pointer(device->name, name);
1006
1007                 list_add_rcu(&device->dev_list, &fs_devices->devices);
1008                 fs_devices->num_devices++;
1009
1010                 device->fs_devices = fs_devices;
1011                 *new_device_added = true;
1012
1013                 if (disk_super->label[0])
1014                         pr_info("BTRFS: device label %s devid %llu transid %llu %s\n",
1015                                 disk_super->label, devid, found_transid, path);
1016                 else
1017                         pr_info("BTRFS: device fsid %pU devid %llu transid %llu %s\n",
1018                                 disk_super->fsid, devid, found_transid, path);
1019
1020         } else if (!device->name || strcmp(device->name->str, path)) {
1021                 /*
1022                  * When FS is already mounted.
1023                  * 1. If you are here and if the device->name is NULL that
1024                  *    means this device was missing at time of FS mount.
1025                  * 2. If you are here and if the device->name is different
1026                  *    from 'path' that means either
1027                  *      a. The same device disappeared and reappeared with
1028                  *         different name. or
1029                  *      b. The missing-disk-which-was-replaced, has
1030                  *         reappeared now.
1031                  *
1032                  * We must allow 1 and 2a above. But 2b would be a spurious
1033                  * and unintentional.
1034                  *
1035                  * Further in case of 1 and 2a above, the disk at 'path'
1036                  * would have missed some transaction when it was away and
1037                  * in case of 2a the stale bdev has to be updated as well.
1038                  * 2b must not be allowed at all time.
1039                  */
1040
1041                 /*
1042                  * For now, we do allow update to btrfs_fs_device through the
1043                  * btrfs dev scan cli after FS has been mounted.  We're still
1044                  * tracking a problem where systems fail mount by subvolume id
1045                  * when we reject replacement on a mounted FS.
1046                  */
1047                 if (!fs_devices->opened && found_transid < device->generation) {
1048                         /*
1049                          * That is if the FS is _not_ mounted and if you
1050                          * are here, that means there is more than one
1051                          * disk with same uuid and devid.We keep the one
1052                          * with larger generation number or the last-in if
1053                          * generation are equal.
1054                          */
1055                         mutex_unlock(&fs_devices->device_list_mutex);
1056                         return ERR_PTR(-EEXIST);
1057                 }
1058
1059                 /*
1060                  * We are going to replace the device path for a given devid,
1061                  * make sure it's the same device if the device is mounted
1062                  */
1063                 if (device->bdev) {
1064                         struct block_device *path_bdev;
1065
1066                         path_bdev = lookup_bdev(path);
1067                         if (IS_ERR(path_bdev)) {
1068                                 mutex_unlock(&fs_devices->device_list_mutex);
1069                                 return ERR_CAST(path_bdev);
1070                         }
1071
1072                         if (device->bdev != path_bdev) {
1073                                 bdput(path_bdev);
1074                                 mutex_unlock(&fs_devices->device_list_mutex);
1075                                 btrfs_warn_in_rcu(device->fs_info,
1076                         "duplicate device fsid:devid for %pU:%llu old:%s new:%s",
1077                                         disk_super->fsid, devid,
1078                                         rcu_str_deref(device->name), path);
1079                                 return ERR_PTR(-EEXIST);
1080                         }
1081                         bdput(path_bdev);
1082                         btrfs_info_in_rcu(device->fs_info,
1083                                 "device fsid %pU devid %llu moved old:%s new:%s",
1084                                 disk_super->fsid, devid,
1085                                 rcu_str_deref(device->name), path);
1086                 }
1087
1088                 name = rcu_string_strdup(path, GFP_NOFS);
1089                 if (!name) {
1090                         mutex_unlock(&fs_devices->device_list_mutex);
1091                         return ERR_PTR(-ENOMEM);
1092                 }
1093                 rcu_string_free(device->name);
1094                 rcu_assign_pointer(device->name, name);
1095                 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
1096                         fs_devices->missing_devices--;
1097                         clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
1098                 }
1099         }
1100
1101         /*
1102          * Unmount does not free the btrfs_device struct but would zero
1103          * generation along with most of the other members. So just update
1104          * it back. We need it to pick the disk with largest generation
1105          * (as above).
1106          */
1107         if (!fs_devices->opened) {
1108                 device->generation = found_transid;
1109                 fs_devices->latest_generation = max_t(u64, found_transid,
1110                                                 fs_devices->latest_generation);
1111         }
1112
1113         fs_devices->total_devices = btrfs_super_num_devices(disk_super);
1114
1115         mutex_unlock(&fs_devices->device_list_mutex);
1116         return device;
1117 }
1118
1119 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
1120 {
1121         struct btrfs_fs_devices *fs_devices;
1122         struct btrfs_device *device;
1123         struct btrfs_device *orig_dev;
1124
1125         fs_devices = alloc_fs_devices(orig->fsid, NULL);
1126         if (IS_ERR(fs_devices))
1127                 return fs_devices;
1128
1129         mutex_lock(&orig->device_list_mutex);
1130         fs_devices->total_devices = orig->total_devices;
1131
1132         list_for_each_entry(orig_dev, &orig->devices, dev_list) {
1133                 struct rcu_string *name;
1134
1135                 device = btrfs_alloc_device(NULL, &orig_dev->devid,
1136                                             orig_dev->uuid);
1137                 if (IS_ERR(device))
1138                         goto error;
1139
1140                 /*
1141                  * This is ok to do without rcu read locked because we hold the
1142                  * uuid mutex so nothing we touch in here is going to disappear.
1143                  */
1144                 if (orig_dev->name) {
1145                         name = rcu_string_strdup(orig_dev->name->str,
1146                                         GFP_KERNEL);
1147                         if (!name) {
1148                                 btrfs_free_device(device);
1149                                 goto error;
1150                         }
1151                         rcu_assign_pointer(device->name, name);
1152                 }
1153
1154                 list_add(&device->dev_list, &fs_devices->devices);
1155                 device->fs_devices = fs_devices;
1156                 fs_devices->num_devices++;
1157         }
1158         mutex_unlock(&orig->device_list_mutex);
1159         return fs_devices;
1160 error:
1161         mutex_unlock(&orig->device_list_mutex);
1162         free_fs_devices(fs_devices);
1163         return ERR_PTR(-ENOMEM);
1164 }
1165
1166 /*
1167  * After we have read the system tree and know devids belonging to
1168  * this filesystem, remove the device which does not belong there.
1169  */
1170 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step)
1171 {
1172         struct btrfs_device *device, *next;
1173         struct btrfs_device *latest_dev = NULL;
1174
1175         mutex_lock(&uuid_mutex);
1176 again:
1177         /* This is the initialized path, it is safe to release the devices. */
1178         list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
1179                 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
1180                                                         &device->dev_state)) {
1181                         if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
1182                              &device->dev_state) &&
1183                              (!latest_dev ||
1184                               device->generation > latest_dev->generation)) {
1185                                 latest_dev = device;
1186                         }
1187                         continue;
1188                 }
1189
1190                 if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
1191                         /*
1192                          * In the first step, keep the device which has
1193                          * the correct fsid and the devid that is used
1194                          * for the dev_replace procedure.
1195                          * In the second step, the dev_replace state is
1196                          * read from the device tree and it is known
1197                          * whether the procedure is really active or
1198                          * not, which means whether this device is
1199                          * used or whether it should be removed.
1200                          */
1201                         if (step == 0 || test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
1202                                                   &device->dev_state)) {
1203                                 continue;
1204                         }
1205                 }
1206                 if (device->bdev) {
1207                         blkdev_put(device->bdev, device->mode);
1208                         device->bdev = NULL;
1209                         fs_devices->open_devices--;
1210                 }
1211                 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1212                         list_del_init(&device->dev_alloc_list);
1213                         clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1214                         if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
1215                                       &device->dev_state))
1216                                 fs_devices->rw_devices--;
1217                 }
1218                 list_del_init(&device->dev_list);
1219                 fs_devices->num_devices--;
1220                 btrfs_free_device(device);
1221         }
1222
1223         if (fs_devices->seed) {
1224                 fs_devices = fs_devices->seed;
1225                 goto again;
1226         }
1227
1228         fs_devices->latest_bdev = latest_dev->bdev;
1229
1230         mutex_unlock(&uuid_mutex);
1231 }
1232
1233 static void free_device_rcu(struct rcu_head *head)
1234 {
1235         struct btrfs_device *device;
1236
1237         device = container_of(head, struct btrfs_device, rcu);
1238         btrfs_free_device(device);
1239 }
1240
1241 static void btrfs_close_bdev(struct btrfs_device *device)
1242 {
1243         if (!device->bdev)
1244                 return;
1245
1246         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1247                 sync_blockdev(device->bdev);
1248                 invalidate_bdev(device->bdev);
1249         }
1250
1251         blkdev_put(device->bdev, device->mode);
1252 }
1253
1254 static void btrfs_close_one_device(struct btrfs_device *device)
1255 {
1256         struct btrfs_fs_devices *fs_devices = device->fs_devices;
1257         struct btrfs_device *new_device;
1258         struct rcu_string *name;
1259
1260         if (device->bdev)
1261                 fs_devices->open_devices--;
1262
1263         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
1264             device->devid != BTRFS_DEV_REPLACE_DEVID) {
1265                 list_del_init(&device->dev_alloc_list);
1266                 fs_devices->rw_devices--;
1267         }
1268
1269         if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
1270                 fs_devices->missing_devices--;
1271
1272         btrfs_close_bdev(device);
1273
1274         new_device = btrfs_alloc_device(NULL, &device->devid,
1275                                         device->uuid);
1276         BUG_ON(IS_ERR(new_device)); /* -ENOMEM */
1277
1278         /* Safe because we are under uuid_mutex */
1279         if (device->name) {
1280                 name = rcu_string_strdup(device->name->str, GFP_NOFS);
1281                 BUG_ON(!name); /* -ENOMEM */
1282                 rcu_assign_pointer(new_device->name, name);
1283         }
1284
1285         list_replace_rcu(&device->dev_list, &new_device->dev_list);
1286         new_device->fs_devices = device->fs_devices;
1287
1288         call_rcu(&device->rcu, free_device_rcu);
1289 }
1290
1291 static int close_fs_devices(struct btrfs_fs_devices *fs_devices)
1292 {
1293         struct btrfs_device *device, *tmp;
1294
1295         if (--fs_devices->opened > 0)
1296                 return 0;
1297
1298         mutex_lock(&fs_devices->device_list_mutex);
1299         list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) {
1300                 btrfs_close_one_device(device);
1301         }
1302         mutex_unlock(&fs_devices->device_list_mutex);
1303
1304         WARN_ON(fs_devices->open_devices);
1305         WARN_ON(fs_devices->rw_devices);
1306         fs_devices->opened = 0;
1307         fs_devices->seeding = 0;
1308
1309         return 0;
1310 }
1311
1312 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
1313 {
1314         struct btrfs_fs_devices *seed_devices = NULL;
1315         int ret;
1316
1317         mutex_lock(&uuid_mutex);
1318         ret = close_fs_devices(fs_devices);
1319         if (!fs_devices->opened) {
1320                 seed_devices = fs_devices->seed;
1321                 fs_devices->seed = NULL;
1322         }
1323         mutex_unlock(&uuid_mutex);
1324
1325         while (seed_devices) {
1326                 fs_devices = seed_devices;
1327                 seed_devices = fs_devices->seed;
1328                 close_fs_devices(fs_devices);
1329                 free_fs_devices(fs_devices);
1330         }
1331         return ret;
1332 }
1333
1334 static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
1335                                 fmode_t flags, void *holder)
1336 {
1337         struct btrfs_device *device;
1338         struct btrfs_device *latest_dev = NULL;
1339         int ret = 0;
1340
1341         flags |= FMODE_EXCL;
1342
1343         list_for_each_entry(device, &fs_devices->devices, dev_list) {
1344                 /* Just open everything we can; ignore failures here */
1345                 if (btrfs_open_one_device(fs_devices, device, flags, holder))
1346                         continue;
1347
1348                 if (!latest_dev ||
1349                     device->generation > latest_dev->generation)
1350                         latest_dev = device;
1351         }
1352         if (fs_devices->open_devices == 0) {
1353                 ret = -EINVAL;
1354                 goto out;
1355         }
1356         fs_devices->opened = 1;
1357         fs_devices->latest_bdev = latest_dev->bdev;
1358         fs_devices->total_rw_bytes = 0;
1359 out:
1360         return ret;
1361 }
1362
1363 static int devid_cmp(void *priv, struct list_head *a, struct list_head *b)
1364 {
1365         struct btrfs_device *dev1, *dev2;
1366
1367         dev1 = list_entry(a, struct btrfs_device, dev_list);
1368         dev2 = list_entry(b, struct btrfs_device, dev_list);
1369
1370         if (dev1->devid < dev2->devid)
1371                 return -1;
1372         else if (dev1->devid > dev2->devid)
1373                 return 1;
1374         return 0;
1375 }
1376
1377 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
1378                        fmode_t flags, void *holder)
1379 {
1380         int ret;
1381
1382         lockdep_assert_held(&uuid_mutex);
1383
1384         mutex_lock(&fs_devices->device_list_mutex);
1385         if (fs_devices->opened) {
1386                 fs_devices->opened++;
1387                 ret = 0;
1388         } else {
1389                 list_sort(NULL, &fs_devices->devices, devid_cmp);
1390                 ret = open_fs_devices(fs_devices, flags, holder);
1391         }
1392         mutex_unlock(&fs_devices->device_list_mutex);
1393
1394         return ret;
1395 }
1396
1397 static void btrfs_release_disk_super(struct page *page)
1398 {
1399         kunmap(page);
1400         put_page(page);
1401 }
1402
1403 static int btrfs_read_disk_super(struct block_device *bdev, u64 bytenr,
1404                                  struct page **page,
1405                                  struct btrfs_super_block **disk_super)
1406 {
1407         void *p;
1408         pgoff_t index;
1409
1410         /* make sure our super fits in the device */
1411         if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
1412                 return 1;
1413
1414         /* make sure our super fits in the page */
1415         if (sizeof(**disk_super) > PAGE_SIZE)
1416                 return 1;
1417
1418         /* make sure our super doesn't straddle pages on disk */
1419         index = bytenr >> PAGE_SHIFT;
1420         if ((bytenr + sizeof(**disk_super) - 1) >> PAGE_SHIFT != index)
1421                 return 1;
1422
1423         /* pull in the page with our super */
1424         *page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
1425                                    index, GFP_KERNEL);
1426
1427         if (IS_ERR_OR_NULL(*page))
1428                 return 1;
1429
1430         p = kmap(*page);
1431
1432         /* align our pointer to the offset of the super block */
1433         *disk_super = p + offset_in_page(bytenr);
1434
1435         if (btrfs_super_bytenr(*disk_super) != bytenr ||
1436             btrfs_super_magic(*disk_super) != BTRFS_MAGIC) {
1437                 btrfs_release_disk_super(*page);
1438                 return 1;
1439         }
1440
1441         if ((*disk_super)->label[0] &&
1442                 (*disk_super)->label[BTRFS_LABEL_SIZE - 1])
1443                 (*disk_super)->label[BTRFS_LABEL_SIZE - 1] = '\0';
1444
1445         return 0;
1446 }
1447
1448 int btrfs_forget_devices(const char *path)
1449 {
1450         int ret;
1451
1452         mutex_lock(&uuid_mutex);
1453         ret = btrfs_free_stale_devices(strlen(path) ? path : NULL, NULL);
1454         mutex_unlock(&uuid_mutex);
1455
1456         return ret;
1457 }
1458
1459 /*
1460  * Look for a btrfs signature on a device. This may be called out of the mount path
1461  * and we are not allowed to call set_blocksize during the scan. The superblock
1462  * is read via pagecache
1463  */
1464 struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
1465                                            void *holder)
1466 {
1467         struct btrfs_super_block *disk_super;
1468         bool new_device_added = false;
1469         struct btrfs_device *device = NULL;
1470         struct block_device *bdev;
1471         struct page *page;
1472         u64 bytenr;
1473
1474         lockdep_assert_held(&uuid_mutex);
1475
1476         /*
1477          * we would like to check all the supers, but that would make
1478          * a btrfs mount succeed after a mkfs from a different FS.
1479          * So, we need to add a special mount option to scan for
1480          * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1481          */
1482         bytenr = btrfs_sb_offset(0);
1483         flags |= FMODE_EXCL;
1484
1485         bdev = blkdev_get_by_path(path, flags, holder);
1486         if (IS_ERR(bdev))
1487                 return ERR_CAST(bdev);
1488
1489         if (btrfs_read_disk_super(bdev, bytenr, &page, &disk_super)) {
1490                 device = ERR_PTR(-EINVAL);
1491                 goto error_bdev_put;
1492         }
1493
1494         device = device_list_add(path, disk_super, &new_device_added);
1495         if (!IS_ERR(device)) {
1496                 if (new_device_added)
1497                         btrfs_free_stale_devices(path, device);
1498         }
1499
1500         btrfs_release_disk_super(page);
1501
1502 error_bdev_put:
1503         blkdev_put(bdev, flags);
1504
1505         return device;
1506 }
1507
1508 static int contains_pending_extent(struct btrfs_transaction *transaction,
1509                                    struct btrfs_device *device,
1510                                    u64 *start, u64 len)
1511 {
1512         struct btrfs_fs_info *fs_info = device->fs_info;
1513         struct extent_map *em;
1514         struct list_head *search_list = &fs_info->pinned_chunks;
1515         int ret = 0;
1516         u64 physical_start = *start;
1517
1518         if (transaction)
1519                 search_list = &transaction->pending_chunks;
1520 again:
1521         list_for_each_entry(em, search_list, list) {
1522                 struct map_lookup *map;
1523                 int i;
1524
1525                 map = em->map_lookup;
1526                 for (i = 0; i < map->num_stripes; i++) {
1527                         u64 end;
1528
1529                         if (map->stripes[i].dev != device)
1530                                 continue;
1531                         if (map->stripes[i].physical >= physical_start + len ||
1532                             map->stripes[i].physical + em->orig_block_len <=
1533                             physical_start)
1534                                 continue;
1535                         /*
1536                          * Make sure that while processing the pinned list we do
1537                          * not override our *start with a lower value, because
1538                          * we can have pinned chunks that fall within this
1539                          * device hole and that have lower physical addresses
1540                          * than the pending chunks we processed before. If we
1541                          * do not take this special care we can end up getting
1542                          * 2 pending chunks that start at the same physical
1543                          * device offsets because the end offset of a pinned
1544                          * chunk can be equal to the start offset of some
1545                          * pending chunk.
1546                          */
1547                         end = map->stripes[i].physical + em->orig_block_len;
1548                         if (end > *start) {
1549                                 *start = end;
1550                                 ret = 1;
1551                         }
1552                 }
1553         }
1554         if (search_list != &fs_info->pinned_chunks) {
1555                 search_list = &fs_info->pinned_chunks;
1556                 goto again;
1557         }
1558
1559         return ret;
1560 }
1561
1562
1563 /*
1564  * find_free_dev_extent_start - find free space in the specified device
1565  * @device:       the device which we search the free space in
1566  * @num_bytes:    the size of the free space that we need
1567  * @search_start: the position from which to begin the search
1568  * @start:        store the start of the free space.
1569  * @len:          the size of the free space. that we find, or the size
1570  *                of the max free space if we don't find suitable free space
1571  *
1572  * this uses a pretty simple search, the expectation is that it is
1573  * called very infrequently and that a given device has a small number
1574  * of extents
1575  *
1576  * @start is used to store the start of the free space if we find. But if we
1577  * don't find suitable free space, it will be used to store the start position
1578  * of the max free space.
1579  *
1580  * @len is used to store the size of the free space that we find.
1581  * But if we don't find suitable free space, it is used to store the size of
1582  * the max free space.
1583  */
1584 int find_free_dev_extent_start(struct btrfs_transaction *transaction,
1585                                struct btrfs_device *device, u64 num_bytes,
1586                                u64 search_start, u64 *start, u64 *len)
1587 {
1588         struct btrfs_fs_info *fs_info = device->fs_info;
1589         struct btrfs_root *root = fs_info->dev_root;
1590         struct btrfs_key key;
1591         struct btrfs_dev_extent *dev_extent;
1592         struct btrfs_path *path;
1593         u64 hole_size;
1594         u64 max_hole_start;
1595         u64 max_hole_size;
1596         u64 extent_end;
1597         u64 search_end = device->total_bytes;
1598         int ret;
1599         int slot;
1600         struct extent_buffer *l;
1601
1602         /*
1603          * We don't want to overwrite the superblock on the drive nor any area
1604          * used by the boot loader (grub for example), so we make sure to start
1605          * at an offset of at least 1MB.
1606          */
1607         search_start = max_t(u64, search_start, SZ_1M);
1608
1609         path = btrfs_alloc_path();
1610         if (!path)
1611                 return -ENOMEM;
1612
1613         max_hole_start = search_start;
1614         max_hole_size = 0;
1615
1616 again:
1617         if (search_start >= search_end ||
1618                 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1619                 ret = -ENOSPC;
1620                 goto out;
1621         }
1622
1623         path->reada = READA_FORWARD;
1624         path->search_commit_root = 1;
1625         path->skip_locking = 1;
1626
1627         key.objectid = device->devid;
1628         key.offset = search_start;
1629         key.type = BTRFS_DEV_EXTENT_KEY;
1630
1631         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1632         if (ret < 0)
1633                 goto out;
1634         if (ret > 0) {
1635                 ret = btrfs_previous_item(root, path, key.objectid, key.type);
1636                 if (ret < 0)
1637                         goto out;
1638         }
1639
1640         while (1) {
1641                 l = path->nodes[0];
1642                 slot = path->slots[0];
1643                 if (slot >= btrfs_header_nritems(l)) {
1644                         ret = btrfs_next_leaf(root, path);
1645                         if (ret == 0)
1646                                 continue;
1647                         if (ret < 0)
1648                                 goto out;
1649
1650                         break;
1651                 }
1652                 btrfs_item_key_to_cpu(l, &key, slot);
1653
1654                 if (key.objectid < device->devid)
1655                         goto next;
1656
1657                 if (key.objectid > device->devid)
1658                         break;
1659
1660                 if (key.type != BTRFS_DEV_EXTENT_KEY)
1661                         goto next;
1662
1663                 if (key.offset > search_start) {
1664                         hole_size = key.offset - search_start;
1665
1666                         /*
1667                          * Have to check before we set max_hole_start, otherwise
1668                          * we could end up sending back this offset anyway.
1669                          */
1670                         if (contains_pending_extent(transaction, device,
1671                                                     &search_start,
1672                                                     hole_size)) {
1673                                 if (key.offset >= search_start) {
1674                                         hole_size = key.offset - search_start;
1675                                 } else {
1676                                         WARN_ON_ONCE(1);
1677                                         hole_size = 0;
1678                                 }
1679                         }
1680
1681                         if (hole_size > max_hole_size) {
1682                                 max_hole_start = search_start;
1683                                 max_hole_size = hole_size;
1684                         }
1685
1686                         /*
1687                          * If this free space is greater than which we need,
1688                          * it must be the max free space that we have found
1689                          * until now, so max_hole_start must point to the start
1690                          * of this free space and the length of this free space
1691                          * is stored in max_hole_size. Thus, we return
1692                          * max_hole_start and max_hole_size and go back to the
1693                          * caller.
1694                          */
1695                         if (hole_size >= num_bytes) {
1696                                 ret = 0;
1697                                 goto out;
1698                         }
1699                 }
1700
1701                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1702                 extent_end = key.offset + btrfs_dev_extent_length(l,
1703                                                                   dev_extent);
1704                 if (extent_end > search_start)
1705                         search_start = extent_end;
1706 next:
1707                 path->slots[0]++;
1708                 cond_resched();
1709         }
1710
1711         /*
1712          * At this point, search_start should be the end of
1713          * allocated dev extents, and when shrinking the device,
1714          * search_end may be smaller than search_start.
1715          */
1716         if (search_end > search_start) {
1717                 hole_size = search_end - search_start;
1718
1719                 if (contains_pending_extent(transaction, device, &search_start,
1720                                             hole_size)) {
1721                         btrfs_release_path(path);
1722                         goto again;
1723                 }
1724
1725                 if (hole_size > max_hole_size) {
1726                         max_hole_start = search_start;
1727                         max_hole_size = hole_size;
1728                 }
1729         }
1730
1731         /* See above. */
1732         if (max_hole_size < num_bytes)
1733                 ret = -ENOSPC;
1734         else
1735                 ret = 0;
1736
1737 out:
1738         btrfs_free_path(path);
1739         *start = max_hole_start;
1740         if (len)
1741                 *len = max_hole_size;
1742         return ret;
1743 }
1744
1745 int find_free_dev_extent(struct btrfs_trans_handle *trans,
1746                          struct btrfs_device *device, u64 num_bytes,
1747                          u64 *start, u64 *len)
1748 {
1749         /* FIXME use last free of some kind */
1750         return find_free_dev_extent_start(trans->transaction, device,
1751                                           num_bytes, 0, start, len);
1752 }
1753
1754 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1755                           struct btrfs_device *device,
1756                           u64 start, u64 *dev_extent_len)
1757 {
1758         struct btrfs_fs_info *fs_info = device->fs_info;
1759         struct btrfs_root *root = fs_info->dev_root;
1760         int ret;
1761         struct btrfs_path *path;
1762         struct btrfs_key key;
1763         struct btrfs_key found_key;
1764         struct extent_buffer *leaf = NULL;
1765         struct btrfs_dev_extent *extent = NULL;
1766
1767         path = btrfs_alloc_path();
1768         if (!path)
1769                 return -ENOMEM;
1770
1771         key.objectid = device->devid;
1772         key.offset = start;
1773         key.type = BTRFS_DEV_EXTENT_KEY;
1774 again:
1775         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1776         if (ret > 0) {
1777                 ret = btrfs_previous_item(root, path, key.objectid,
1778                                           BTRFS_DEV_EXTENT_KEY);
1779                 if (ret)
1780                         goto out;
1781                 leaf = path->nodes[0];
1782                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1783                 extent = btrfs_item_ptr(leaf, path->slots[0],
1784                                         struct btrfs_dev_extent);
1785                 BUG_ON(found_key.offset > start || found_key.offset +
1786                        btrfs_dev_extent_length(leaf, extent) < start);
1787                 key = found_key;
1788                 btrfs_release_path(path);
1789                 goto again;
1790         } else if (ret == 0) {
1791                 leaf = path->nodes[0];
1792                 extent = btrfs_item_ptr(leaf, path->slots[0],
1793                                         struct btrfs_dev_extent);
1794         } else {
1795                 btrfs_handle_fs_error(fs_info, ret, "Slot search failed");
1796                 goto out;
1797         }
1798
1799         *dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1800
1801         ret = btrfs_del_item(trans, root, path);
1802         if (ret) {
1803                 btrfs_handle_fs_error(fs_info, ret,
1804                                       "Failed to remove dev extent item");
1805         } else {
1806                 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1807         }
1808 out:
1809         btrfs_free_path(path);
1810         return ret;
1811 }
1812
1813 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1814                                   struct btrfs_device *device,
1815                                   u64 chunk_offset, u64 start, u64 num_bytes)
1816 {
1817         int ret;
1818         struct btrfs_path *path;
1819         struct btrfs_fs_info *fs_info = device->fs_info;
1820         struct btrfs_root *root = fs_info->dev_root;
1821         struct btrfs_dev_extent *extent;
1822         struct extent_buffer *leaf;
1823         struct btrfs_key key;
1824
1825         WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state));
1826         WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1827         path = btrfs_alloc_path();
1828         if (!path)
1829                 return -ENOMEM;
1830
1831         key.objectid = device->devid;
1832         key.offset = start;
1833         key.type = BTRFS_DEV_EXTENT_KEY;
1834         ret = btrfs_insert_empty_item(trans, root, path, &key,
1835                                       sizeof(*extent));
1836         if (ret)
1837                 goto out;
1838
1839         leaf = path->nodes[0];
1840         extent = btrfs_item_ptr(leaf, path->slots[0],
1841                                 struct btrfs_dev_extent);
1842         btrfs_set_dev_extent_chunk_tree(leaf, extent,
1843                                         BTRFS_CHUNK_TREE_OBJECTID);
1844         btrfs_set_dev_extent_chunk_objectid(leaf, extent,
1845                                             BTRFS_FIRST_CHUNK_TREE_OBJECTID);
1846         btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1847
1848         btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1849         btrfs_mark_buffer_dirty(leaf);
1850 out:
1851         btrfs_free_path(path);
1852         return ret;
1853 }
1854
1855 static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1856 {
1857         struct extent_map_tree *em_tree;
1858         struct extent_map *em;
1859         struct rb_node *n;
1860         u64 ret = 0;
1861
1862         em_tree = &fs_info->mapping_tree.map_tree;
1863         read_lock(&em_tree->lock);
1864         n = rb_last(&em_tree->map.rb_root);
1865         if (n) {
1866                 em = rb_entry(n, struct extent_map, rb_node);
1867                 ret = em->start + em->len;
1868         }
1869         read_unlock(&em_tree->lock);
1870
1871         return ret;
1872 }
1873
1874 static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1875                                     u64 *devid_ret)
1876 {
1877         int ret;
1878         struct btrfs_key key;
1879         struct btrfs_key found_key;
1880         struct btrfs_path *path;
1881
1882         path = btrfs_alloc_path();
1883         if (!path)
1884                 return -ENOMEM;
1885
1886         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1887         key.type = BTRFS_DEV_ITEM_KEY;
1888         key.offset = (u64)-1;
1889
1890         ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1891         if (ret < 0)
1892                 goto error;
1893
1894         BUG_ON(ret == 0); /* Corruption */
1895
1896         ret = btrfs_previous_item(fs_info->chunk_root, path,
1897                                   BTRFS_DEV_ITEMS_OBJECTID,
1898                                   BTRFS_DEV_ITEM_KEY);
1899         if (ret) {
1900                 *devid_ret = 1;
1901         } else {
1902                 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1903                                       path->slots[0]);
1904                 *devid_ret = found_key.offset + 1;
1905         }
1906         ret = 0;
1907 error:
1908         btrfs_free_path(path);
1909         return ret;
1910 }
1911
1912 /*
1913  * the device information is stored in the chunk root
1914  * the btrfs_device struct should be fully filled in
1915  */
1916 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
1917                             struct btrfs_device *device)
1918 {
1919         int ret;
1920         struct btrfs_path *path;
1921         struct btrfs_dev_item *dev_item;
1922         struct extent_buffer *leaf;
1923         struct btrfs_key key;
1924         unsigned long ptr;
1925
1926         path = btrfs_alloc_path();
1927         if (!path)
1928                 return -ENOMEM;
1929
1930         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1931         key.type = BTRFS_DEV_ITEM_KEY;
1932         key.offset = device->devid;
1933
1934         ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path,
1935                                       &key, sizeof(*dev_item));
1936         if (ret)
1937                 goto out;
1938
1939         leaf = path->nodes[0];
1940         dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1941
1942         btrfs_set_device_id(leaf, dev_item, device->devid);
1943         btrfs_set_device_generation(leaf, dev_item, 0);
1944         btrfs_set_device_type(leaf, dev_item, device->type);
1945         btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1946         btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1947         btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1948         btrfs_set_device_total_bytes(leaf, dev_item,
1949                                      btrfs_device_get_disk_total_bytes(device));
1950         btrfs_set_device_bytes_used(leaf, dev_item,
1951                                     btrfs_device_get_bytes_used(device));
1952         btrfs_set_device_group(leaf, dev_item, 0);
1953         btrfs_set_device_seek_speed(leaf, dev_item, 0);
1954         btrfs_set_device_bandwidth(leaf, dev_item, 0);
1955         btrfs_set_device_start_offset(leaf, dev_item, 0);
1956
1957         ptr = btrfs_device_uuid(dev_item);
1958         write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1959         ptr = btrfs_device_fsid(dev_item);
1960         write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
1961                             ptr, BTRFS_FSID_SIZE);
1962         btrfs_mark_buffer_dirty(leaf);
1963
1964         ret = 0;
1965 out:
1966         btrfs_free_path(path);
1967         return ret;
1968 }
1969
1970 /*
1971  * Function to update ctime/mtime for a given device path.
1972  * Mainly used for ctime/mtime based probe like libblkid.
1973  */
1974 static void update_dev_time(const char *path_name)
1975 {
1976         struct file *filp;
1977
1978         filp = filp_open(path_name, O_RDWR, 0);
1979         if (IS_ERR(filp))
1980                 return;
1981         file_update_time(filp);
1982         filp_close(filp, NULL);
1983 }
1984
1985 static int btrfs_rm_dev_item(struct btrfs_fs_info *fs_info,
1986                              struct btrfs_device *device)
1987 {
1988         struct btrfs_root *root = fs_info->chunk_root;
1989         int ret;
1990         struct btrfs_path *path;
1991         struct btrfs_key key;
1992         struct btrfs_trans_handle *trans;
1993
1994         path = btrfs_alloc_path();
1995         if (!path)
1996                 return -ENOMEM;
1997
1998         trans = btrfs_start_transaction(root, 0);
1999         if (IS_ERR(trans)) {
2000                 btrfs_free_path(path);
2001                 return PTR_ERR(trans);
2002         }
2003         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2004         key.type = BTRFS_DEV_ITEM_KEY;
2005         key.offset = device->devid;
2006
2007         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2008         if (ret) {
2009                 if (ret > 0)
2010                         ret = -ENOENT;
2011                 btrfs_abort_transaction(trans, ret);
2012                 btrfs_end_transaction(trans);
2013                 goto out;
2014         }
2015
2016         ret = btrfs_del_item(trans, root, path);
2017         if (ret) {
2018                 btrfs_abort_transaction(trans, ret);
2019                 btrfs_end_transaction(trans);
2020         }
2021
2022 out:
2023         btrfs_free_path(path);
2024         if (!ret)
2025                 ret = btrfs_commit_transaction(trans);
2026         return ret;
2027 }
2028
2029 /*
2030  * Verify that @num_devices satisfies the RAID profile constraints in the whole
2031  * filesystem. It's up to the caller to adjust that number regarding eg. device
2032  * replace.
2033  */
2034 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
2035                 u64 num_devices)
2036 {
2037         u64 all_avail;
2038         unsigned seq;
2039         int i;
2040
2041         do {
2042                 seq = read_seqbegin(&fs_info->profiles_lock);
2043
2044                 all_avail = fs_info->avail_data_alloc_bits |
2045                             fs_info->avail_system_alloc_bits |
2046                             fs_info->avail_metadata_alloc_bits;
2047         } while (read_seqretry(&fs_info->profiles_lock, seq));
2048
2049         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
2050                 if (!(all_avail & btrfs_raid_array[i].bg_flag))
2051                         continue;
2052
2053                 if (num_devices < btrfs_raid_array[i].devs_min) {
2054                         int ret = btrfs_raid_array[i].mindev_error;
2055
2056                         if (ret)
2057                                 return ret;
2058                 }
2059         }
2060
2061         return 0;
2062 }
2063
2064 static struct btrfs_device * btrfs_find_next_active_device(
2065                 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device)
2066 {
2067         struct btrfs_device *next_device;
2068
2069         list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
2070                 if (next_device != device &&
2071                     !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state)
2072                     && next_device->bdev)
2073                         return next_device;
2074         }
2075
2076         return NULL;
2077 }
2078
2079 /*
2080  * Helper function to check if the given device is part of s_bdev / latest_bdev
2081  * and replace it with the provided or the next active device, in the context
2082  * where this function called, there should be always be another device (or
2083  * this_dev) which is active.
2084  */
2085 void btrfs_assign_next_active_device(struct btrfs_device *device,
2086                                      struct btrfs_device *this_dev)
2087 {
2088         struct btrfs_fs_info *fs_info = device->fs_info;
2089         struct btrfs_device *next_device;
2090
2091         if (this_dev)
2092                 next_device = this_dev;
2093         else
2094                 next_device = btrfs_find_next_active_device(fs_info->fs_devices,
2095                                                                 device);
2096         ASSERT(next_device);
2097
2098         if (fs_info->sb->s_bdev &&
2099                         (fs_info->sb->s_bdev == device->bdev))
2100                 fs_info->sb->s_bdev = next_device->bdev;
2101
2102         if (fs_info->fs_devices->latest_bdev == device->bdev)
2103                 fs_info->fs_devices->latest_bdev = next_device->bdev;
2104 }
2105
2106 /*
2107  * Return btrfs_fs_devices::num_devices excluding the device that's being
2108  * currently replaced.
2109  */
2110 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
2111 {
2112         u64 num_devices = fs_info->fs_devices->num_devices;
2113
2114         down_read(&fs_info->dev_replace.rwsem);
2115         if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
2116                 ASSERT(num_devices > 1);
2117                 num_devices--;
2118         }
2119         up_read(&fs_info->dev_replace.rwsem);
2120
2121         return num_devices;
2122 }
2123
2124 int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
2125                 u64 devid)
2126 {
2127         struct btrfs_device *device;
2128         struct btrfs_fs_devices *cur_devices;
2129         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2130         u64 num_devices;
2131         int ret = 0;
2132
2133         mutex_lock(&uuid_mutex);
2134
2135         num_devices = btrfs_num_devices(fs_info);
2136
2137         ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
2138         if (ret)
2139                 goto out;
2140
2141         device = btrfs_find_device_by_devspec(fs_info, devid, device_path);
2142
2143         if (IS_ERR(device)) {
2144                 if (PTR_ERR(device) == -ENOENT &&
2145                     strcmp(device_path, "missing") == 0)
2146                         ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2147                 else
2148                         ret = PTR_ERR(device);
2149                 goto out;
2150         }
2151
2152         if (btrfs_pinned_by_swapfile(fs_info, device)) {
2153                 btrfs_warn_in_rcu(fs_info,
2154                   "cannot remove device %s (devid %llu) due to active swapfile",
2155                                   rcu_str_deref(device->name), device->devid);
2156                 ret = -ETXTBSY;
2157                 goto out;
2158         }
2159
2160         if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2161                 ret = BTRFS_ERROR_DEV_TGT_REPLACE;
2162                 goto out;
2163         }
2164
2165         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
2166             fs_info->fs_devices->rw_devices == 1) {
2167                 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
2168                 goto out;
2169         }
2170
2171         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2172                 mutex_lock(&fs_info->chunk_mutex);
2173                 list_del_init(&device->dev_alloc_list);
2174                 device->fs_devices->rw_devices--;
2175                 mutex_unlock(&fs_info->chunk_mutex);
2176         }
2177
2178         mutex_unlock(&uuid_mutex);
2179         ret = btrfs_shrink_device(device, 0);
2180         mutex_lock(&uuid_mutex);
2181         if (ret)
2182                 goto error_undo;
2183
2184         /*
2185          * TODO: the superblock still includes this device in its num_devices
2186          * counter although write_all_supers() is not locked out. This
2187          * could give a filesystem state which requires a degraded mount.
2188          */
2189         ret = btrfs_rm_dev_item(fs_info, device);
2190         if (ret)
2191                 goto error_undo;
2192
2193         clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2194         btrfs_scrub_cancel_dev(fs_info, device);
2195
2196         /*
2197          * the device list mutex makes sure that we don't change
2198          * the device list while someone else is writing out all
2199          * the device supers. Whoever is writing all supers, should
2200          * lock the device list mutex before getting the number of
2201          * devices in the super block (super_copy). Conversely,
2202          * whoever updates the number of devices in the super block
2203          * (super_copy) should hold the device list mutex.
2204          */
2205
2206         /*
2207          * In normal cases the cur_devices == fs_devices. But in case
2208          * of deleting a seed device, the cur_devices should point to
2209          * its own fs_devices listed under the fs_devices->seed.
2210          */
2211         cur_devices = device->fs_devices;
2212         mutex_lock(&fs_devices->device_list_mutex);
2213         list_del_rcu(&device->dev_list);
2214
2215         cur_devices->num_devices--;
2216         cur_devices->total_devices--;
2217         /* Update total_devices of the parent fs_devices if it's seed */
2218         if (cur_devices != fs_devices)
2219                 fs_devices->total_devices--;
2220
2221         if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
2222                 cur_devices->missing_devices--;
2223
2224         btrfs_assign_next_active_device(device, NULL);
2225
2226         if (device->bdev) {
2227                 cur_devices->open_devices--;
2228                 /* remove sysfs entry */
2229                 btrfs_sysfs_rm_device_link(fs_devices, device);
2230         }
2231
2232         num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1;
2233         btrfs_set_super_num_devices(fs_info->super_copy, num_devices);
2234         mutex_unlock(&fs_devices->device_list_mutex);
2235
2236         /*
2237          * at this point, the device is zero sized and detached from
2238          * the devices list.  All that's left is to zero out the old
2239          * supers and free the device.
2240          */
2241         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2242                 btrfs_scratch_superblocks(device->bdev, device->name->str);
2243
2244         btrfs_close_bdev(device);
2245         call_rcu(&device->rcu, free_device_rcu);
2246
2247         if (cur_devices->open_devices == 0) {
2248                 while (fs_devices) {
2249                         if (fs_devices->seed == cur_devices) {
2250                                 fs_devices->seed = cur_devices->seed;
2251                                 break;
2252                         }
2253                         fs_devices = fs_devices->seed;
2254                 }
2255                 cur_devices->seed = NULL;
2256                 close_fs_devices(cur_devices);
2257                 free_fs_devices(cur_devices);
2258         }
2259
2260 out:
2261         mutex_unlock(&uuid_mutex);
2262         return ret;
2263
2264 error_undo:
2265         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2266                 mutex_lock(&fs_info->chunk_mutex);
2267                 list_add(&device->dev_alloc_list,
2268                          &fs_devices->alloc_list);
2269                 device->fs_devices->rw_devices++;
2270                 mutex_unlock(&fs_info->chunk_mutex);
2271         }
2272         goto out;
2273 }
2274
2275 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
2276 {
2277         struct btrfs_fs_devices *fs_devices;
2278
2279         lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex);
2280
2281         /*
2282          * in case of fs with no seed, srcdev->fs_devices will point
2283          * to fs_devices of fs_info. However when the dev being replaced is
2284          * a seed dev it will point to the seed's local fs_devices. In short
2285          * srcdev will have its correct fs_devices in both the cases.
2286          */
2287         fs_devices = srcdev->fs_devices;
2288
2289         list_del_rcu(&srcdev->dev_list);
2290         list_del(&srcdev->dev_alloc_list);
2291         fs_devices->num_devices--;
2292         if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state))
2293                 fs_devices->missing_devices--;
2294
2295         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state))
2296                 fs_devices->rw_devices--;
2297
2298         if (srcdev->bdev)
2299                 fs_devices->open_devices--;
2300 }
2301
2302 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
2303                                       struct btrfs_device *srcdev)
2304 {
2305         struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
2306
2307         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) {
2308                 /* zero out the old super if it is writable */
2309                 btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str);
2310         }
2311
2312         btrfs_close_bdev(srcdev);
2313         call_rcu(&srcdev->rcu, free_device_rcu);
2314
2315         /* if this is no devs we rather delete the fs_devices */
2316         if (!fs_devices->num_devices) {
2317                 struct btrfs_fs_devices *tmp_fs_devices;
2318
2319                 /*
2320                  * On a mounted FS, num_devices can't be zero unless it's a
2321                  * seed. In case of a seed device being replaced, the replace
2322                  * target added to the sprout FS, so there will be no more
2323                  * device left under the seed FS.
2324                  */
2325                 ASSERT(fs_devices->seeding);
2326
2327                 tmp_fs_devices = fs_info->fs_devices;
2328                 while (tmp_fs_devices) {
2329                         if (tmp_fs_devices->seed == fs_devices) {
2330                                 tmp_fs_devices->seed = fs_devices->seed;
2331                                 break;
2332                         }
2333                         tmp_fs_devices = tmp_fs_devices->seed;
2334                 }
2335                 fs_devices->seed = NULL;
2336                 close_fs_devices(fs_devices);
2337                 free_fs_devices(fs_devices);
2338         }
2339 }
2340
2341 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
2342 {
2343         struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices;
2344
2345         WARN_ON(!tgtdev);
2346         mutex_lock(&fs_devices->device_list_mutex);
2347
2348         btrfs_sysfs_rm_device_link(fs_devices, tgtdev);
2349
2350         if (tgtdev->bdev)
2351                 fs_devices->open_devices--;
2352
2353         fs_devices->num_devices--;
2354
2355         btrfs_assign_next_active_device(tgtdev, NULL);
2356
2357         list_del_rcu(&tgtdev->dev_list);
2358
2359         mutex_unlock(&fs_devices->device_list_mutex);
2360
2361         /*
2362          * The update_dev_time() with in btrfs_scratch_superblocks()
2363          * may lead to a call to btrfs_show_devname() which will try
2364          * to hold device_list_mutex. And here this device
2365          * is already out of device list, so we don't have to hold
2366          * the device_list_mutex lock.
2367          */
2368         btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str);
2369
2370         btrfs_close_bdev(tgtdev);
2371         call_rcu(&tgtdev->rcu, free_device_rcu);
2372 }
2373
2374 static struct btrfs_device *btrfs_find_device_by_path(
2375                 struct btrfs_fs_info *fs_info, const char *device_path)
2376 {
2377         int ret = 0;
2378         struct btrfs_super_block *disk_super;
2379         u64 devid;
2380         u8 *dev_uuid;
2381         struct block_device *bdev;
2382         struct buffer_head *bh;
2383         struct btrfs_device *device;
2384
2385         ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
2386                                     fs_info->bdev_holder, 0, &bdev, &bh);
2387         if (ret)
2388                 return ERR_PTR(ret);
2389         disk_super = (struct btrfs_super_block *)bh->b_data;
2390         devid = btrfs_stack_device_id(&disk_super->dev_item);
2391         dev_uuid = disk_super->dev_item.uuid;
2392         if (btrfs_fs_incompat(fs_info, METADATA_UUID))
2393                 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2394                                            disk_super->metadata_uuid, true);
2395         else
2396                 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2397                                            disk_super->fsid, true);
2398
2399         brelse(bh);
2400         if (!device)
2401                 device = ERR_PTR(-ENOENT);
2402         blkdev_put(bdev, FMODE_READ);
2403         return device;
2404 }
2405
2406 /*
2407  * Lookup a device given by device id, or the path if the id is 0.
2408  */
2409 struct btrfs_device *btrfs_find_device_by_devspec(
2410                 struct btrfs_fs_info *fs_info, u64 devid,
2411                 const char *device_path)
2412 {
2413         struct btrfs_device *device;
2414
2415         if (devid) {
2416                 device = btrfs_find_device(fs_info->fs_devices, devid, NULL,
2417                                            NULL, true);
2418                 if (!device)
2419                         return ERR_PTR(-ENOENT);
2420                 return device;
2421         }
2422
2423         if (!device_path || !device_path[0])
2424                 return ERR_PTR(-EINVAL);
2425
2426         if (strcmp(device_path, "missing") == 0) {
2427                 /* Find first missing device */
2428                 list_for_each_entry(device, &fs_info->fs_devices->devices,
2429                                     dev_list) {
2430                         if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
2431                                      &device->dev_state) && !device->bdev)
2432                                 return device;
2433                 }
2434                 return ERR_PTR(-ENOENT);
2435         }
2436
2437         return btrfs_find_device_by_path(fs_info, device_path);
2438 }
2439
2440 /*
2441  * does all the dirty work required for changing file system's UUID.
2442  */
2443 static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
2444 {
2445         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2446         struct btrfs_fs_devices *old_devices;
2447         struct btrfs_fs_devices *seed_devices;
2448         struct btrfs_super_block *disk_super = fs_info->super_copy;
2449         struct btrfs_device *device;
2450         u64 super_flags;
2451
2452         lockdep_assert_held(&uuid_mutex);
2453         if (!fs_devices->seeding)
2454                 return -EINVAL;
2455
2456         seed_devices = alloc_fs_devices(NULL, NULL);
2457         if (IS_ERR(seed_devices))
2458                 return PTR_ERR(seed_devices);
2459
2460         old_devices = clone_fs_devices(fs_devices);
2461         if (IS_ERR(old_devices)) {
2462                 kfree(seed_devices);
2463                 return PTR_ERR(old_devices);
2464         }
2465
2466         list_add(&old_devices->fs_list, &fs_uuids);
2467
2468         memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
2469         seed_devices->opened = 1;
2470         INIT_LIST_HEAD(&seed_devices->devices);
2471         INIT_LIST_HEAD(&seed_devices->alloc_list);
2472         mutex_init(&seed_devices->device_list_mutex);
2473
2474         mutex_lock(&fs_devices->device_list_mutex);
2475         list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
2476                               synchronize_rcu);
2477         list_for_each_entry(device, &seed_devices->devices, dev_list)
2478                 device->fs_devices = seed_devices;
2479
2480         mutex_lock(&fs_info->chunk_mutex);
2481         list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
2482         mutex_unlock(&fs_info->chunk_mutex);
2483
2484         fs_devices->seeding = 0;
2485         fs_devices->num_devices = 0;
2486         fs_devices->open_devices = 0;
2487         fs_devices->missing_devices = 0;
2488         fs_devices->rotating = 0;
2489         fs_devices->seed = seed_devices;
2490
2491         generate_random_uuid(fs_devices->fsid);
2492         memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE);
2493         memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2494         mutex_unlock(&fs_devices->device_list_mutex);
2495
2496         super_flags = btrfs_super_flags(disk_super) &
2497                       ~BTRFS_SUPER_FLAG_SEEDING;
2498         btrfs_set_super_flags(disk_super, super_flags);
2499
2500         return 0;
2501 }
2502
2503 /*
2504  * Store the expected generation for seed devices in device items.
2505  */
2506 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
2507                                struct btrfs_fs_info *fs_info)
2508 {
2509         struct btrfs_root *root = fs_info->chunk_root;
2510         struct btrfs_path *path;
2511         struct extent_buffer *leaf;
2512         struct btrfs_dev_item *dev_item;
2513         struct btrfs_device *device;
2514         struct btrfs_key key;
2515         u8 fs_uuid[BTRFS_FSID_SIZE];
2516         u8 dev_uuid[BTRFS_UUID_SIZE];
2517         u64 devid;
2518         int ret;
2519
2520         path = btrfs_alloc_path();
2521         if (!path)
2522                 return -ENOMEM;
2523
2524         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2525         key.offset = 0;
2526         key.type = BTRFS_DEV_ITEM_KEY;
2527
2528         while (1) {
2529                 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2530                 if (ret < 0)
2531                         goto error;
2532
2533                 leaf = path->nodes[0];
2534 next_slot:
2535                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2536                         ret = btrfs_next_leaf(root, path);
2537                         if (ret > 0)
2538                                 break;
2539                         if (ret < 0)
2540                                 goto error;
2541                         leaf = path->nodes[0];
2542                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2543                         btrfs_release_path(path);
2544                         continue;
2545                 }
2546
2547                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2548                 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2549                     key.type != BTRFS_DEV_ITEM_KEY)
2550                         break;
2551
2552                 dev_item = btrfs_item_ptr(leaf, path->slots[0],
2553                                           struct btrfs_dev_item);
2554                 devid = btrfs_device_id(leaf, dev_item);
2555                 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
2556                                    BTRFS_UUID_SIZE);
2557                 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2558                                    BTRFS_FSID_SIZE);
2559                 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2560                                            fs_uuid, true);
2561                 BUG_ON(!device); /* Logic error */
2562
2563                 if (device->fs_devices->seeding) {
2564                         btrfs_set_device_generation(leaf, dev_item,
2565                                                     device->generation);
2566                         btrfs_mark_buffer_dirty(leaf);
2567                 }
2568
2569                 path->slots[0]++;
2570                 goto next_slot;
2571         }
2572         ret = 0;
2573 error:
2574         btrfs_free_path(path);
2575         return ret;
2576 }
2577
2578 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path)
2579 {
2580         struct btrfs_root *root = fs_info->dev_root;
2581         struct request_queue *q;
2582         struct btrfs_trans_handle *trans;
2583         struct btrfs_device *device;
2584         struct block_device *bdev;
2585         struct super_block *sb = fs_info->sb;
2586         struct rcu_string *name;
2587         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2588         u64 orig_super_total_bytes;
2589         u64 orig_super_num_devices;
2590         int seeding_dev = 0;
2591         int ret = 0;
2592         bool unlocked = false;
2593
2594         if (sb_rdonly(sb) && !fs_devices->seeding)
2595                 return -EROFS;
2596
2597         bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2598                                   fs_info->bdev_holder);
2599         if (IS_ERR(bdev))
2600                 return PTR_ERR(bdev);
2601
2602         if (fs_devices->seeding) {
2603                 seeding_dev = 1;
2604                 down_write(&sb->s_umount);
2605                 mutex_lock(&uuid_mutex);
2606         }
2607
2608         filemap_write_and_wait(bdev->bd_inode->i_mapping);
2609
2610         mutex_lock(&fs_devices->device_list_mutex);
2611         list_for_each_entry(device, &fs_devices->devices, dev_list) {
2612                 if (device->bdev == bdev) {
2613                         ret = -EEXIST;
2614                         mutex_unlock(
2615                                 &fs_devices->device_list_mutex);
2616                         goto error;
2617                 }
2618         }
2619         mutex_unlock(&fs_devices->device_list_mutex);
2620
2621         device = btrfs_alloc_device(fs_info, NULL, NULL);
2622         if (IS_ERR(device)) {
2623                 /* we can safely leave the fs_devices entry around */
2624                 ret = PTR_ERR(device);
2625                 goto error;
2626         }
2627
2628         name = rcu_string_strdup(device_path, GFP_KERNEL);
2629         if (!name) {
2630                 ret = -ENOMEM;
2631                 goto error_free_device;
2632         }
2633         rcu_assign_pointer(device->name, name);
2634
2635         trans = btrfs_start_transaction(root, 0);
2636         if (IS_ERR(trans)) {
2637                 ret = PTR_ERR(trans);
2638                 goto error_free_device;
2639         }
2640
2641         q = bdev_get_queue(bdev);
2642         set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
2643         device->generation = trans->transid;
2644         device->io_width = fs_info->sectorsize;
2645         device->io_align = fs_info->sectorsize;
2646         device->sector_size = fs_info->sectorsize;
2647         device->total_bytes = round_down(i_size_read(bdev->bd_inode),
2648                                          fs_info->sectorsize);
2649         device->disk_total_bytes = device->total_bytes;
2650         device->commit_total_bytes = device->total_bytes;
2651         device->fs_info = fs_info;
2652         device->bdev = bdev;
2653         set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2654         clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
2655         device->mode = FMODE_EXCL;
2656         device->dev_stats_valid = 1;
2657         set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
2658
2659         if (seeding_dev) {
2660                 sb->s_flags &= ~SB_RDONLY;
2661                 ret = btrfs_prepare_sprout(fs_info);
2662                 if (ret) {
2663                         btrfs_abort_transaction(trans, ret);
2664                         goto error_trans;
2665                 }
2666         }
2667
2668         device->fs_devices = fs_devices;
2669
2670         mutex_lock(&fs_devices->device_list_mutex);
2671         mutex_lock(&fs_info->chunk_mutex);
2672         list_add_rcu(&device->dev_list, &fs_devices->devices);
2673         list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
2674         fs_devices->num_devices++;
2675         fs_devices->open_devices++;
2676         fs_devices->rw_devices++;
2677         fs_devices->total_devices++;
2678         fs_devices->total_rw_bytes += device->total_bytes;
2679
2680         atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
2681
2682         if (!blk_queue_nonrot(q))
2683                 fs_devices->rotating = 1;
2684
2685         orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
2686         btrfs_set_super_total_bytes(fs_info->super_copy,
2687                 round_down(orig_super_total_bytes + device->total_bytes,
2688                            fs_info->sectorsize));
2689
2690         orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy);
2691         btrfs_set_super_num_devices(fs_info->super_copy,
2692                                     orig_super_num_devices + 1);
2693
2694         /* add sysfs device entry */
2695         btrfs_sysfs_add_device_link(fs_devices, device);
2696
2697         /*
2698          * we've got more storage, clear any full flags on the space
2699          * infos
2700          */
2701         btrfs_clear_space_info_full(fs_info);
2702
2703         mutex_unlock(&fs_info->chunk_mutex);
2704         mutex_unlock(&fs_devices->device_list_mutex);
2705
2706         if (seeding_dev) {
2707                 mutex_lock(&fs_info->chunk_mutex);
2708                 ret = init_first_rw_device(trans, fs_info);
2709                 mutex_unlock(&fs_info->chunk_mutex);
2710                 if (ret) {
2711                         btrfs_abort_transaction(trans, ret);
2712                         goto error_sysfs;
2713                 }
2714         }
2715
2716         ret = btrfs_add_dev_item(trans, device);
2717         if (ret) {
2718                 btrfs_abort_transaction(trans, ret);
2719                 goto error_sysfs;
2720         }
2721
2722         if (seeding_dev) {
2723                 char fsid_buf[BTRFS_UUID_UNPARSED_SIZE];
2724
2725                 ret = btrfs_finish_sprout(trans, fs_info);
2726                 if (ret) {
2727                         btrfs_abort_transaction(trans, ret);
2728                         goto error_sysfs;
2729                 }
2730
2731                 /* Sprouting would change fsid of the mounted root,
2732                  * so rename the fsid on the sysfs
2733                  */
2734                 snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU",
2735                                                 fs_info->fs_devices->fsid);
2736                 if (kobject_rename(&fs_devices->fsid_kobj, fsid_buf))
2737                         btrfs_warn(fs_info,
2738                                    "sysfs: failed to create fsid for sprout");
2739         }
2740
2741         ret = btrfs_commit_transaction(trans);
2742
2743         if (seeding_dev) {
2744                 mutex_unlock(&uuid_mutex);
2745                 up_write(&sb->s_umount);
2746                 unlocked = true;
2747
2748                 if (ret) /* transaction commit */
2749                         return ret;
2750
2751                 ret = btrfs_relocate_sys_chunks(fs_info);
2752                 if (ret < 0)
2753                         btrfs_handle_fs_error(fs_info, ret,
2754                                     "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
2755                 trans = btrfs_attach_transaction(root);
2756                 if (IS_ERR(trans)) {
2757                         if (PTR_ERR(trans) == -ENOENT)
2758                                 return 0;
2759                         ret = PTR_ERR(trans);
2760                         trans = NULL;
2761                         goto error_sysfs;
2762                 }
2763                 ret = btrfs_commit_transaction(trans);
2764         }
2765
2766         /* Update ctime/mtime for libblkid */
2767         update_dev_time(device_path);
2768         return ret;
2769
2770 error_sysfs:
2771         btrfs_sysfs_rm_device_link(fs_devices, device);
2772         mutex_lock(&fs_info->fs_devices->device_list_mutex);
2773         mutex_lock(&fs_info->chunk_mutex);
2774         list_del_rcu(&device->dev_list);
2775         list_del(&device->dev_alloc_list);
2776         fs_info->fs_devices->num_devices--;
2777         fs_info->fs_devices->open_devices--;
2778         fs_info->fs_devices->rw_devices--;
2779         fs_info->fs_devices->total_devices--;
2780         fs_info->fs_devices->total_rw_bytes -= device->total_bytes;
2781         atomic64_sub(device->total_bytes, &fs_info->free_chunk_space);
2782         btrfs_set_super_total_bytes(fs_info->super_copy,
2783                                     orig_super_total_bytes);
2784         btrfs_set_super_num_devices(fs_info->super_copy,
2785                                     orig_super_num_devices);
2786         mutex_unlock(&fs_info->chunk_mutex);
2787         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2788 error_trans:
2789         if (seeding_dev)
2790                 sb->s_flags |= SB_RDONLY;
2791         if (trans)
2792                 btrfs_end_transaction(trans);
2793 error_free_device:
2794         btrfs_free_device(device);
2795 error:
2796         blkdev_put(bdev, FMODE_EXCL);
2797         if (seeding_dev && !unlocked) {
2798                 mutex_unlock(&uuid_mutex);
2799                 up_write(&sb->s_umount);
2800         }
2801         return ret;
2802 }
2803
2804 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2805                                         struct btrfs_device *device)
2806 {
2807         int ret;
2808         struct btrfs_path *path;
2809         struct btrfs_root *root = device->fs_info->chunk_root;
2810         struct btrfs_dev_item *dev_item;
2811         struct extent_buffer *leaf;
2812         struct btrfs_key key;
2813
2814         path = btrfs_alloc_path();
2815         if (!path)
2816                 return -ENOMEM;
2817
2818         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2819         key.type = BTRFS_DEV_ITEM_KEY;
2820         key.offset = device->devid;
2821
2822         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2823         if (ret < 0)
2824                 goto out;
2825
2826         if (ret > 0) {
2827                 ret = -ENOENT;
2828                 goto out;
2829         }
2830
2831         leaf = path->nodes[0];
2832         dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2833
2834         btrfs_set_device_id(leaf, dev_item, device->devid);
2835         btrfs_set_device_type(leaf, dev_item, device->type);
2836         btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2837         btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2838         btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2839         btrfs_set_device_total_bytes(leaf, dev_item,
2840                                      btrfs_device_get_disk_total_bytes(device));
2841         btrfs_set_device_bytes_used(leaf, dev_item,
2842                                     btrfs_device_get_bytes_used(device));
2843         btrfs_mark_buffer_dirty(leaf);
2844
2845 out:
2846         btrfs_free_path(path);
2847         return ret;
2848 }
2849
2850 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2851                       struct btrfs_device *device, u64 new_size)
2852 {
2853         struct btrfs_fs_info *fs_info = device->fs_info;
2854         struct btrfs_super_block *super_copy = fs_info->super_copy;
2855         struct btrfs_fs_devices *fs_devices;
2856         u64 old_total;
2857         u64 diff;
2858
2859         if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2860                 return -EACCES;
2861
2862         new_size = round_down(new_size, fs_info->sectorsize);
2863
2864         mutex_lock(&fs_info->chunk_mutex);
2865         old_total = btrfs_super_total_bytes(super_copy);
2866         diff = round_down(new_size - device->total_bytes, fs_info->sectorsize);
2867
2868         if (new_size <= device->total_bytes ||
2869             test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2870                 mutex_unlock(&fs_info->chunk_mutex);
2871                 return -EINVAL;
2872         }
2873
2874         fs_devices = fs_info->fs_devices;
2875
2876         btrfs_set_super_total_bytes(super_copy,
2877                         round_down(old_total + diff, fs_info->sectorsize));
2878         device->fs_devices->total_rw_bytes += diff;
2879
2880         btrfs_device_set_total_bytes(device, new_size);
2881         btrfs_device_set_disk_total_bytes(device, new_size);
2882         btrfs_clear_space_info_full(device->fs_info);
2883         if (list_empty(&device->resized_list))
2884                 list_add_tail(&device->resized_list,
2885                               &fs_devices->resized_devices);
2886         mutex_unlock(&fs_info->chunk_mutex);
2887
2888         return btrfs_update_device(trans, device);
2889 }
2890
2891 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2892 {
2893         struct btrfs_fs_info *fs_info = trans->fs_info;
2894         struct btrfs_root *root = fs_info->chunk_root;
2895         int ret;
2896         struct btrfs_path *path;
2897         struct btrfs_key key;
2898
2899         path = btrfs_alloc_path();
2900         if (!path)
2901                 return -ENOMEM;
2902
2903         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2904         key.offset = chunk_offset;
2905         key.type = BTRFS_CHUNK_ITEM_KEY;
2906
2907         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2908         if (ret < 0)
2909                 goto out;
2910         else if (ret > 0) { /* Logic error or corruption */
2911                 btrfs_handle_fs_error(fs_info, -ENOENT,
2912                                       "Failed lookup while freeing chunk.");
2913                 ret = -ENOENT;
2914                 goto out;
2915         }
2916
2917         ret = btrfs_del_item(trans, root, path);
2918         if (ret < 0)
2919                 btrfs_handle_fs_error(fs_info, ret,
2920                                       "Failed to delete chunk item.");
2921 out:
2922         btrfs_free_path(path);
2923         return ret;
2924 }
2925
2926 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
2927 {
2928         struct btrfs_super_block *super_copy = fs_info->super_copy;
2929         struct btrfs_disk_key *disk_key;
2930         struct btrfs_chunk *chunk;
2931         u8 *ptr;
2932         int ret = 0;
2933         u32 num_stripes;
2934         u32 array_size;
2935         u32 len = 0;
2936         u32 cur;
2937         struct btrfs_key key;
2938
2939         mutex_lock(&fs_info->chunk_mutex);
2940         array_size = btrfs_super_sys_array_size(super_copy);
2941
2942         ptr = super_copy->sys_chunk_array;
2943         cur = 0;
2944
2945         while (cur < array_size) {
2946                 disk_key = (struct btrfs_disk_key *)ptr;
2947                 btrfs_disk_key_to_cpu(&key, disk_key);
2948
2949                 len = sizeof(*disk_key);
2950
2951                 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2952                         chunk = (struct btrfs_chunk *)(ptr + len);
2953                         num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2954                         len += btrfs_chunk_item_size(num_stripes);
2955                 } else {
2956                         ret = -EIO;
2957                         break;
2958                 }
2959                 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID &&
2960                     key.offset == chunk_offset) {
2961                         memmove(ptr, ptr + len, array_size - (cur + len));
2962                         array_size -= len;
2963                         btrfs_set_super_sys_array_size(super_copy, array_size);
2964                 } else {
2965                         ptr += len;
2966                         cur += len;
2967                 }
2968         }
2969         mutex_unlock(&fs_info->chunk_mutex);
2970         return ret;
2971 }
2972
2973 /*
2974  * btrfs_get_chunk_map() - Find the mapping containing the given logical extent.
2975  * @logical: Logical block offset in bytes.
2976  * @length: Length of extent in bytes.
2977  *
2978  * Return: Chunk mapping or ERR_PTR.
2979  */
2980 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
2981                                        u64 logical, u64 length)
2982 {
2983         struct extent_map_tree *em_tree;
2984         struct extent_map *em;
2985
2986         em_tree = &fs_info->mapping_tree.map_tree;
2987         read_lock(&em_tree->lock);
2988         em = lookup_extent_mapping(em_tree, logical, length);
2989         read_unlock(&em_tree->lock);
2990
2991         if (!em) {
2992                 btrfs_crit(fs_info, "unable to find logical %llu length %llu",
2993                            logical, length);
2994                 return ERR_PTR(-EINVAL);
2995         }
2996
2997         if (em->start > logical || em->start + em->len < logical) {
2998                 btrfs_crit(fs_info,
2999                            "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
3000                            logical, length, em->start, em->start + em->len);
3001                 free_extent_map(em);
3002                 return ERR_PTR(-EINVAL);
3003         }
3004
3005         /* callers are responsible for dropping em's ref. */
3006         return em;
3007 }
3008
3009 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
3010 {
3011         struct btrfs_fs_info *fs_info = trans->fs_info;
3012         struct extent_map *em;
3013         struct map_lookup *map;
3014         u64 dev_extent_len = 0;
3015         int i, ret = 0;
3016         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
3017
3018         em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
3019         if (IS_ERR(em)) {
3020                 /*
3021                  * This is a logic error, but we don't want to just rely on the
3022                  * user having built with ASSERT enabled, so if ASSERT doesn't
3023                  * do anything we still error out.
3024                  */
3025                 ASSERT(0);
3026                 return PTR_ERR(em);
3027         }
3028         map = em->map_lookup;
3029         mutex_lock(&fs_info->chunk_mutex);
3030         check_system_chunk(trans, map->type);
3031         mutex_unlock(&fs_info->chunk_mutex);
3032
3033         /*
3034          * Take the device list mutex to prevent races with the final phase of
3035          * a device replace operation that replaces the device object associated
3036          * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
3037          */
3038         mutex_lock(&fs_devices->device_list_mutex);
3039         for (i = 0; i < map->num_stripes; i++) {
3040                 struct btrfs_device *device = map->stripes[i].dev;
3041                 ret = btrfs_free_dev_extent(trans, device,
3042                                             map->stripes[i].physical,
3043                                             &dev_extent_len);
3044                 if (ret) {
3045                         mutex_unlock(&fs_devices->device_list_mutex);
3046                         btrfs_abort_transaction(trans, ret);
3047                         goto out;
3048                 }
3049
3050                 if (device->bytes_used > 0) {
3051                         mutex_lock(&fs_info->chunk_mutex);
3052                         btrfs_device_set_bytes_used(device,
3053                                         device->bytes_used - dev_extent_len);
3054                         atomic64_add(dev_extent_len, &fs_info->free_chunk_space);
3055                         btrfs_clear_space_info_full(fs_info);
3056                         mutex_unlock(&fs_info->chunk_mutex);
3057                 }
3058
3059                 ret = btrfs_update_device(trans, device);
3060                 if (ret) {
3061                         mutex_unlock(&fs_devices->device_list_mutex);
3062                         btrfs_abort_transaction(trans, ret);
3063                         goto out;
3064                 }
3065         }
3066         mutex_unlock(&fs_devices->device_list_mutex);
3067
3068         ret = btrfs_free_chunk(trans, chunk_offset);
3069         if (ret) {
3070                 btrfs_abort_transaction(trans, ret);
3071                 goto out;
3072         }
3073
3074         trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len);
3075
3076         if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3077                 ret = btrfs_del_sys_chunk(fs_info, chunk_offset);
3078                 if (ret) {
3079                         btrfs_abort_transaction(trans, ret);
3080                         goto out;
3081                 }
3082         }
3083
3084         ret = btrfs_remove_block_group(trans, chunk_offset, em);
3085         if (ret) {
3086                 btrfs_abort_transaction(trans, ret);
3087                 goto out;
3088         }
3089
3090 out:
3091         /* once for us */
3092         free_extent_map(em);
3093         return ret;
3094 }
3095
3096 static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
3097 {
3098         struct btrfs_root *root = fs_info->chunk_root;
3099         struct btrfs_trans_handle *trans;
3100         int ret;
3101
3102         /*
3103          * Prevent races with automatic removal of unused block groups.
3104          * After we relocate and before we remove the chunk with offset
3105          * chunk_offset, automatic removal of the block group can kick in,
3106          * resulting in a failure when calling btrfs_remove_chunk() below.
3107          *
3108          * Make sure to acquire this mutex before doing a tree search (dev
3109          * or chunk trees) to find chunks. Otherwise the cleaner kthread might
3110          * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
3111          * we release the path used to search the chunk/dev tree and before
3112          * the current task acquires this mutex and calls us.
3113          */
3114         lockdep_assert_held(&fs_info->delete_unused_bgs_mutex);
3115
3116         ret = btrfs_can_relocate(fs_info, chunk_offset);
3117         if (ret)
3118                 return -ENOSPC;
3119
3120         /* step one, relocate all the extents inside this chunk */
3121         btrfs_scrub_pause(fs_info);
3122         ret = btrfs_relocate_block_group(fs_info, chunk_offset);
3123         btrfs_scrub_continue(fs_info);
3124         if (ret)
3125                 return ret;
3126
3127         /*
3128          * We add the kobjects here (and after forcing data chunk creation)
3129          * since relocation is the only place we'll create chunks of a new
3130          * type at runtime.  The only place where we'll remove the last
3131          * chunk of a type is the call immediately below this one.  Even
3132          * so, we're protected against races with the cleaner thread since
3133          * we're covered by the delete_unused_bgs_mutex.
3134          */
3135         btrfs_add_raid_kobjects(fs_info);
3136
3137         trans = btrfs_start_trans_remove_block_group(root->fs_info,
3138                                                      chunk_offset);
3139         if (IS_ERR(trans)) {
3140                 ret = PTR_ERR(trans);
3141                 btrfs_handle_fs_error(root->fs_info, ret, NULL);
3142                 return ret;
3143         }
3144
3145         /*
3146          * step two, delete the device extents and the
3147          * chunk tree entries
3148          */
3149         ret = btrfs_remove_chunk(trans, chunk_offset);
3150         btrfs_end_transaction(trans);
3151         return ret;
3152 }
3153
3154 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
3155 {
3156         struct btrfs_root *chunk_root = fs_info->chunk_root;
3157         struct btrfs_path *path;
3158         struct extent_buffer *leaf;
3159         struct btrfs_chunk *chunk;
3160         struct btrfs_key key;
3161         struct btrfs_key found_key;
3162         u64 chunk_type;
3163         bool retried = false;
3164         int failed = 0;
3165         int ret;
3166
3167         path = btrfs_alloc_path();
3168         if (!path)
3169                 return -ENOMEM;
3170
3171 again:
3172         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3173         key.offset = (u64)-1;
3174         key.type = BTRFS_CHUNK_ITEM_KEY;
3175
3176         while (1) {
3177                 mutex_lock(&fs_info->delete_unused_bgs_mutex);
3178                 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3179                 if (ret < 0) {
3180                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3181                         goto error;
3182                 }
3183                 BUG_ON(ret == 0); /* Corruption */
3184
3185                 ret = btrfs_previous_item(chunk_root, path, key.objectid,
3186                                           key.type);
3187                 if (ret)
3188                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3189                 if (ret < 0)
3190                         goto error;
3191                 if (ret > 0)
3192                         break;
3193
3194                 leaf = path->nodes[0];
3195                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3196
3197                 chunk = btrfs_item_ptr(leaf, path->slots[0],
3198                                        struct btrfs_chunk);
3199                 chunk_type = btrfs_chunk_type(leaf, chunk);
3200                 btrfs_release_path(path);
3201
3202                 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
3203                         ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3204                         if (ret == -ENOSPC)
3205                                 failed++;
3206                         else
3207                                 BUG_ON(ret);
3208                 }
3209                 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3210
3211                 if (found_key.offset == 0)
3212                         break;
3213                 key.offset = found_key.offset - 1;
3214         }
3215         ret = 0;
3216         if (failed && !retried) {
3217                 failed = 0;
3218                 retried = true;
3219                 goto again;
3220         } else if (WARN_ON(failed && retried)) {
3221                 ret = -ENOSPC;
3222         }
3223 error:
3224         btrfs_free_path(path);
3225         return ret;
3226 }
3227
3228 /*
3229  * return 1 : allocate a data chunk successfully,
3230  * return <0: errors during allocating a data chunk,
3231  * return 0 : no need to allocate a data chunk.
3232  */
3233 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
3234                                       u64 chunk_offset)
3235 {
3236         struct btrfs_block_group_cache *cache;
3237         u64 bytes_used;
3238         u64 chunk_type;
3239
3240         cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3241         ASSERT(cache);
3242         chunk_type = cache->flags;
3243         btrfs_put_block_group(cache);
3244
3245         if (chunk_type & BTRFS_BLOCK_GROUP_DATA) {
3246                 spin_lock(&fs_info->data_sinfo->lock);
3247                 bytes_used = fs_info->data_sinfo->bytes_used;
3248                 spin_unlock(&fs_info->data_sinfo->lock);
3249
3250                 if (!bytes_used) {
3251                         struct btrfs_trans_handle *trans;
3252                         int ret;
3253
3254                         trans = btrfs_join_transaction(fs_info->tree_root);
3255                         if (IS_ERR(trans))
3256                                 return PTR_ERR(trans);
3257
3258                         ret = btrfs_force_chunk_alloc(trans,
3259                                                       BTRFS_BLOCK_GROUP_DATA);
3260                         btrfs_end_transaction(trans);
3261                         if (ret < 0)
3262                                 return ret;
3263
3264                         btrfs_add_raid_kobjects(fs_info);
3265
3266                         return 1;
3267                 }
3268         }
3269         return 0;
3270 }
3271
3272 static int insert_balance_item(struct btrfs_fs_info *fs_info,
3273                                struct btrfs_balance_control *bctl)
3274 {
3275         struct btrfs_root *root = fs_info->tree_root;
3276         struct btrfs_trans_handle *trans;
3277         struct btrfs_balance_item *item;
3278         struct btrfs_disk_balance_args disk_bargs;
3279         struct btrfs_path *path;
3280         struct extent_buffer *leaf;
3281         struct btrfs_key key;
3282         int ret, err;
3283
3284         path = btrfs_alloc_path();
3285         if (!path)
3286                 return -ENOMEM;
3287
3288         trans = btrfs_start_transaction(root, 0);
3289         if (IS_ERR(trans)) {
3290                 btrfs_free_path(path);
3291                 return PTR_ERR(trans);
3292         }
3293
3294         key.objectid = BTRFS_BALANCE_OBJECTID;
3295         key.type = BTRFS_TEMPORARY_ITEM_KEY;
3296         key.offset = 0;
3297
3298         ret = btrfs_insert_empty_item(trans, root, path, &key,
3299                                       sizeof(*item));
3300         if (ret)
3301                 goto out;
3302
3303         leaf = path->nodes[0];
3304         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3305
3306         memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3307
3308         btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
3309         btrfs_set_balance_data(leaf, item, &disk_bargs);
3310         btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
3311         btrfs_set_balance_meta(leaf, item, &disk_bargs);
3312         btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
3313         btrfs_set_balance_sys(leaf, item, &disk_bargs);
3314
3315         btrfs_set_balance_flags(leaf, item, bctl->flags);
3316
3317         btrfs_mark_buffer_dirty(leaf);
3318 out:
3319         btrfs_free_path(path);
3320         err = btrfs_commit_transaction(trans);
3321         if (err && !ret)
3322                 ret = err;
3323         return ret;
3324 }
3325
3326 static int del_balance_item(struct btrfs_fs_info *fs_info)
3327 {
3328         struct btrfs_root *root = fs_info->tree_root;
3329         struct btrfs_trans_handle *trans;
3330         struct btrfs_path *path;
3331         struct btrfs_key key;
3332         int ret, err;
3333
3334         path = btrfs_alloc_path();
3335         if (!path)
3336                 return -ENOMEM;
3337
3338         trans = btrfs_start_transaction(root, 0);
3339         if (IS_ERR(trans)) {
3340                 btrfs_free_path(path);
3341                 return PTR_ERR(trans);
3342         }
3343
3344         key.objectid = BTRFS_BALANCE_OBJECTID;
3345         key.type = BTRFS_TEMPORARY_ITEM_KEY;
3346         key.offset = 0;
3347
3348         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3349         if (ret < 0)
3350                 goto out;
3351         if (ret > 0) {
3352                 ret = -ENOENT;
3353                 goto out;
3354         }
3355
3356         ret = btrfs_del_item(trans, root, path);
3357 out:
3358         btrfs_free_path(path);
3359         err = btrfs_commit_transaction(trans);
3360         if (err && !ret)
3361                 ret = err;
3362         return ret;
3363 }
3364
3365 /*
3366  * This is a heuristic used to reduce the number of chunks balanced on
3367  * resume after balance was interrupted.
3368  */
3369 static void update_balance_args(struct btrfs_balance_control *bctl)
3370 {
3371         /*
3372          * Turn on soft mode for chunk types that were being converted.
3373          */
3374         if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
3375                 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
3376         if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
3377                 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
3378         if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
3379                 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
3380
3381         /*
3382          * Turn on usage filter if is not already used.  The idea is
3383          * that chunks that we have already balanced should be
3384          * reasonably full.  Don't do it for chunks that are being
3385          * converted - that will keep us from relocating unconverted
3386          * (albeit full) chunks.
3387          */
3388         if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3389             !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3390             !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3391                 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
3392                 bctl->data.usage = 90;
3393         }
3394         if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3395             !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3396             !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3397                 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
3398                 bctl->sys.usage = 90;
3399         }
3400         if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3401             !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3402             !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3403                 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
3404                 bctl->meta.usage = 90;
3405         }
3406 }
3407
3408 /*
3409  * Clear the balance status in fs_info and delete the balance item from disk.
3410  */
3411 static void reset_balance_state(struct btrfs_fs_info *fs_info)
3412 {
3413         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3414         int ret;
3415
3416         BUG_ON(!fs_info->balance_ctl);
3417
3418         spin_lock(&fs_info->balance_lock);
3419         fs_info->balance_ctl = NULL;
3420         spin_unlock(&fs_info->balance_lock);
3421
3422         kfree(bctl);
3423         ret = del_balance_item(fs_info);
3424         if (ret)
3425                 btrfs_handle_fs_error(fs_info, ret, NULL);
3426 }
3427
3428 /*
3429  * Balance filters.  Return 1 if chunk should be filtered out
3430  * (should not be balanced).
3431  */
3432 static int chunk_profiles_filter(u64 chunk_type,
3433                                  struct btrfs_balance_args *bargs)
3434 {
3435         chunk_type = chunk_to_extended(chunk_type) &
3436                                 BTRFS_EXTENDED_PROFILE_MASK;
3437
3438         if (bargs->profiles & chunk_type)
3439                 return 0;
3440
3441         return 1;
3442 }
3443
3444 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3445                               struct btrfs_balance_args *bargs)
3446 {
3447         struct btrfs_block_group_cache *cache;
3448         u64 chunk_used;
3449         u64 user_thresh_min;
3450         u64 user_thresh_max;
3451         int ret = 1;
3452
3453         cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3454         chunk_used = btrfs_block_group_used(&cache->item);
3455
3456         if (bargs->usage_min == 0)
3457                 user_thresh_min = 0;
3458         else
3459                 user_thresh_min = div_factor_fine(cache->key.offset,
3460                                         bargs->usage_min);
3461
3462         if (bargs->usage_max == 0)
3463                 user_thresh_max = 1;
3464         else if (bargs->usage_max > 100)
3465                 user_thresh_max = cache->key.offset;
3466         else
3467                 user_thresh_max = div_factor_fine(cache->key.offset,
3468                                         bargs->usage_max);
3469
3470         if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
3471                 ret = 0;
3472
3473         btrfs_put_block_group(cache);
3474         return ret;
3475 }
3476
3477 static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3478                 u64 chunk_offset, struct btrfs_balance_args *bargs)
3479 {
3480         struct btrfs_block_group_cache *cache;
3481         u64 chunk_used, user_thresh;
3482         int ret = 1;
3483
3484         cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3485         chunk_used = btrfs_block_group_used(&cache->item);
3486
3487         if (bargs->usage_min == 0)
3488                 user_thresh = 1;
3489         else if (bargs->usage > 100)
3490                 user_thresh = cache->key.offset;
3491         else
3492                 user_thresh = div_factor_fine(cache->key.offset,
3493                                               bargs->usage);
3494
3495         if (chunk_used < user_thresh)
3496                 ret = 0;
3497
3498         btrfs_put_block_group(cache);
3499         return ret;
3500 }
3501
3502 static int chunk_devid_filter(struct extent_buffer *leaf,
3503                               struct btrfs_chunk *chunk,
3504                               struct btrfs_balance_args *bargs)
3505 {
3506         struct btrfs_stripe *stripe;
3507         int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3508         int i;
3509
3510         for (i = 0; i < num_stripes; i++) {
3511                 stripe = btrfs_stripe_nr(chunk, i);
3512                 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
3513                         return 0;
3514         }
3515
3516         return 1;
3517 }
3518
3519 /* [pstart, pend) */
3520 static int chunk_drange_filter(struct extent_buffer *leaf,
3521                                struct btrfs_chunk *chunk,
3522                                struct btrfs_balance_args *bargs)
3523 {
3524         struct btrfs_stripe *stripe;
3525         int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3526         u64 stripe_offset;
3527         u64 stripe_length;
3528         int factor;
3529         int i;
3530
3531         if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3532                 return 0;
3533
3534         if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
3535              BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
3536                 factor = num_stripes / 2;
3537         } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
3538                 factor = num_stripes - 1;
3539         } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
3540                 factor = num_stripes - 2;
3541         } else {
3542                 factor = num_stripes;
3543         }
3544
3545         for (i = 0; i < num_stripes; i++) {
3546                 stripe = btrfs_stripe_nr(chunk, i);
3547                 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3548                         continue;
3549
3550                 stripe_offset = btrfs_stripe_offset(leaf, stripe);
3551                 stripe_length = btrfs_chunk_length(leaf, chunk);
3552                 stripe_length = div_u64(stripe_length, factor);
3553
3554                 if (stripe_offset < bargs->pend &&
3555                     stripe_offset + stripe_length > bargs->pstart)
3556                         return 0;
3557         }
3558
3559         return 1;
3560 }
3561
3562 /* [vstart, vend) */
3563 static int chunk_vrange_filter(struct extent_buffer *leaf,
3564                                struct btrfs_chunk *chunk,
3565                                u64 chunk_offset,
3566                                struct btrfs_balance_args *bargs)
3567 {
3568         if (chunk_offset < bargs->vend &&
3569             chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3570                 /* at least part of the chunk is inside this vrange */
3571                 return 0;
3572
3573         return 1;
3574 }
3575
3576 static int chunk_stripes_range_filter(struct extent_buffer *leaf,
3577                                struct btrfs_chunk *chunk,
3578                                struct btrfs_balance_args *bargs)
3579 {
3580         int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3581
3582         if (bargs->stripes_min <= num_stripes
3583                         && num_stripes <= bargs->stripes_max)
3584                 return 0;
3585
3586         return 1;
3587 }
3588
3589 static int chunk_soft_convert_filter(u64 chunk_type,
3590                                      struct btrfs_balance_args *bargs)
3591 {
3592         if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3593                 return 0;
3594
3595         chunk_type = chunk_to_extended(chunk_type) &
3596                                 BTRFS_EXTENDED_PROFILE_MASK;
3597
3598         if (bargs->target == chunk_type)
3599                 return 1;
3600
3601         return 0;
3602 }
3603
3604 static int should_balance_chunk(struct btrfs_fs_info *fs_info,
3605                                 struct extent_buffer *leaf,
3606                                 struct btrfs_chunk *chunk, u64 chunk_offset)
3607 {
3608         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3609         struct btrfs_balance_args *bargs = NULL;
3610         u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3611
3612         /* type filter */
3613         if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
3614               (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
3615                 return 0;
3616         }
3617
3618         if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3619                 bargs = &bctl->data;
3620         else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3621                 bargs = &bctl->sys;
3622         else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3623                 bargs = &bctl->meta;
3624
3625         /* profiles filter */
3626         if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
3627             chunk_profiles_filter(chunk_type, bargs)) {
3628                 return 0;
3629         }
3630
3631         /* usage filter */
3632         if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3633             chunk_usage_filter(fs_info, chunk_offset, bargs)) {
3634                 return 0;
3635         } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3636             chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
3637                 return 0;
3638         }
3639
3640         /* devid filter */
3641         if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
3642             chunk_devid_filter(leaf, chunk, bargs)) {
3643                 return 0;
3644         }
3645
3646         /* drange filter, makes sense only with devid filter */
3647         if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3648             chunk_drange_filter(leaf, chunk, bargs)) {
3649                 return 0;
3650         }
3651
3652         /* vrange filter */
3653         if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
3654             chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3655                 return 0;
3656         }
3657
3658         /* stripes filter */
3659         if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
3660             chunk_stripes_range_filter(leaf, chunk, bargs)) {
3661                 return 0;
3662         }
3663
3664         /* soft profile changing mode */
3665         if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
3666             chunk_soft_convert_filter(chunk_type, bargs)) {
3667                 return 0;
3668         }
3669
3670         /*
3671          * limited by count, must be the last filter
3672          */
3673         if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
3674                 if (bargs->limit == 0)
3675                         return 0;
3676                 else
3677                         bargs->limit--;
3678         } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
3679                 /*
3680                  * Same logic as the 'limit' filter; the minimum cannot be
3681                  * determined here because we do not have the global information
3682                  * about the count of all chunks that satisfy the filters.
3683                  */
3684                 if (bargs->limit_max == 0)
3685                         return 0;
3686                 else
3687                         bargs->limit_max--;
3688         }
3689
3690         return 1;
3691 }
3692
3693 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3694 {
3695         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3696         struct btrfs_root *chunk_root = fs_info->chunk_root;
3697         u64 chunk_type;
3698         struct btrfs_chunk *chunk;
3699         struct btrfs_path *path = NULL;
3700         struct btrfs_key key;
3701         struct btrfs_key found_key;
3702         struct extent_buffer *leaf;
3703         int slot;
3704         int ret;
3705         int enospc_errors = 0;
3706         bool counting = true;
3707         /* The single value limit and min/max limits use the same bytes in the */
3708         u64 limit_data = bctl->data.limit;
3709         u64 limit_meta = bctl->meta.limit;
3710         u64 limit_sys = bctl->sys.limit;
3711         u32 count_data = 0;
3712         u32 count_meta = 0;
3713         u32 count_sys = 0;
3714         int chunk_reserved = 0;
3715
3716         path = btrfs_alloc_path();
3717         if (!path) {
3718                 ret = -ENOMEM;
3719                 goto error;
3720         }
3721
3722         /* zero out stat counters */
3723         spin_lock(&fs_info->balance_lock);
3724         memset(&bctl->stat, 0, sizeof(bctl->stat));
3725         spin_unlock(&fs_info->balance_lock);
3726 again:
3727         if (!counting) {
3728                 /*
3729                  * The single value limit and min/max limits use the same bytes
3730                  * in the
3731                  */
3732                 bctl->data.limit = limit_data;
3733                 bctl->meta.limit = limit_meta;
3734                 bctl->sys.limit = limit_sys;
3735         }
3736         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3737         key.offset = (u64)-1;
3738         key.type = BTRFS_CHUNK_ITEM_KEY;
3739
3740         while (1) {
3741                 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3742                     atomic_read(&fs_info->balance_cancel_req)) {
3743                         ret = -ECANCELED;
3744                         goto error;
3745                 }
3746
3747                 mutex_lock(&fs_info->delete_unused_bgs_mutex);
3748                 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3749                 if (ret < 0) {
3750                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3751                         goto error;
3752                 }
3753
3754                 /*
3755                  * this shouldn't happen, it means the last relocate
3756                  * failed
3757                  */
3758                 if (ret == 0)
3759                         BUG(); /* FIXME break ? */
3760
3761                 ret = btrfs_previous_item(chunk_root, path, 0,
3762                                           BTRFS_CHUNK_ITEM_KEY);
3763                 if (ret) {
3764                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3765                         ret = 0;
3766                         break;
3767                 }
3768
3769                 leaf = path->nodes[0];
3770                 slot = path->slots[0];
3771                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3772
3773                 if (found_key.objectid != key.objectid) {
3774                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3775                         break;
3776                 }
3777
3778                 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3779                 chunk_type = btrfs_chunk_type(leaf, chunk);
3780
3781                 if (!counting) {
3782                         spin_lock(&fs_info->balance_lock);
3783                         bctl->stat.considered++;
3784                         spin_unlock(&fs_info->balance_lock);
3785                 }
3786
3787                 ret = should_balance_chunk(fs_info, leaf, chunk,
3788                                            found_key.offset);
3789
3790                 btrfs_release_path(path);
3791                 if (!ret) {
3792                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3793                         goto loop;
3794                 }
3795
3796                 if (counting) {
3797                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3798                         spin_lock(&fs_info->balance_lock);
3799                         bctl->stat.expected++;
3800                         spin_unlock(&fs_info->balance_lock);
3801
3802                         if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3803                                 count_data++;
3804                         else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3805                                 count_sys++;
3806                         else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3807                                 count_meta++;
3808
3809                         goto loop;
3810                 }
3811
3812                 /*
3813                  * Apply limit_min filter, no need to check if the LIMITS
3814                  * filter is used, limit_min is 0 by default
3815                  */
3816                 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
3817                                         count_data < bctl->data.limit_min)
3818                                 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
3819                                         count_meta < bctl->meta.limit_min)
3820                                 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
3821                                         count_sys < bctl->sys.limit_min)) {
3822                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3823                         goto loop;
3824                 }
3825
3826                 if (!chunk_reserved) {
3827                         /*
3828                          * We may be relocating the only data chunk we have,
3829                          * which could potentially end up with losing data's
3830                          * raid profile, so lets allocate an empty one in
3831                          * advance.
3832                          */
3833                         ret = btrfs_may_alloc_data_chunk(fs_info,
3834                                                          found_key.offset);
3835                         if (ret < 0) {
3836                                 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3837                                 goto error;
3838                         } else if (ret == 1) {
3839                                 chunk_reserved = 1;
3840                         }
3841                 }
3842
3843                 ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3844                 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3845                 if (ret == -ENOSPC) {
3846                         enospc_errors++;
3847                 } else if (ret == -ETXTBSY) {
3848                         btrfs_info(fs_info,
3849            "skipping relocation of block group %llu due to active swapfile",
3850                                    found_key.offset);
3851                         ret = 0;
3852                 } else if (ret) {
3853                         goto error;
3854                 } else {
3855                         spin_lock(&fs_info->balance_lock);
3856                         bctl->stat.completed++;
3857                         spin_unlock(&fs_info->balance_lock);
3858                 }
3859 loop:
3860                 if (found_key.offset == 0)
3861                         break;
3862                 key.offset = found_key.offset - 1;
3863         }
3864
3865         if (counting) {
3866                 btrfs_release_path(path);
3867                 counting = false;
3868                 goto again;
3869         }
3870 error:
3871         btrfs_free_path(path);
3872         if (enospc_errors) {
3873                 btrfs_info(fs_info, "%d enospc errors during balance",
3874                            enospc_errors);
3875                 if (!ret)
3876                         ret = -ENOSPC;
3877         }
3878
3879         return ret;
3880 }
3881
3882 /**
3883  * alloc_profile_is_valid - see if a given profile is valid and reduced
3884  * @flags: profile to validate
3885  * @extended: if true @flags is treated as an extended profile
3886  */
3887 static int alloc_profile_is_valid(u64 flags, int extended)
3888 {
3889         u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3890                                BTRFS_BLOCK_GROUP_PROFILE_MASK);
3891
3892         flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3893
3894         /* 1) check that all other bits are zeroed */
3895         if (flags & ~mask)
3896                 return 0;
3897
3898         /* 2) see if profile is reduced */
3899         if (flags == 0)
3900                 return !extended; /* "0" is valid for usual profiles */
3901
3902         /* true if exactly one bit set */
3903         return is_power_of_2(flags);
3904 }
3905
3906 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3907 {
3908         /* cancel requested || normal exit path */
3909         return atomic_read(&fs_info->balance_cancel_req) ||
3910                 (atomic_read(&fs_info->balance_pause_req) == 0 &&
3911                  atomic_read(&fs_info->balance_cancel_req) == 0);
3912 }
3913
3914 /* Non-zero return value signifies invalidity */
3915 static inline int validate_convert_profile(struct btrfs_balance_args *bctl_arg,
3916                 u64 allowed)
3917 {
3918         return ((bctl_arg->flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3919                 (!alloc_profile_is_valid(bctl_arg->target, 1) ||
3920                  (bctl_arg->target & ~allowed)));
3921 }
3922
3923 /*
3924  * Fill @buf with textual description of balance filter flags @bargs, up to
3925  * @size_buf including the terminating null. The output may be trimmed if it
3926  * does not fit into the provided buffer.
3927  */
3928 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf,
3929                                  u32 size_buf)
3930 {
3931         int ret;
3932         u32 size_bp = size_buf;
3933         char *bp = buf;
3934         u64 flags = bargs->flags;
3935         char tmp_buf[128] = {'\0'};
3936
3937         if (!flags)
3938                 return;
3939
3940 #define CHECK_APPEND_NOARG(a)                                           \
3941         do {                                                            \
3942                 ret = snprintf(bp, size_bp, (a));                       \
3943                 if (ret < 0 || ret >= size_bp)                          \
3944                         goto out_overflow;                              \
3945                 size_bp -= ret;                                         \
3946                 bp += ret;                                              \
3947         } while (0)
3948
3949 #define CHECK_APPEND_1ARG(a, v1)                                        \
3950         do {                                                            \
3951                 ret = snprintf(bp, size_bp, (a), (v1));                 \
3952                 if (ret < 0 || ret >= size_bp)                          \
3953                         goto out_overflow;                              \
3954                 size_bp -= ret;                                         \
3955                 bp += ret;                                              \
3956         } while (0)
3957
3958 #define CHECK_APPEND_2ARG(a, v1, v2)                                    \
3959         do {                                                            \
3960                 ret = snprintf(bp, size_bp, (a), (v1), (v2));           \
3961                 if (ret < 0 || ret >= size_bp)                          \
3962                         goto out_overflow;                              \
3963                 size_bp -= ret;                                         \
3964                 bp += ret;                                              \
3965         } while (0)
3966
3967         if (flags & BTRFS_BALANCE_ARGS_CONVERT) {
3968                 int index = btrfs_bg_flags_to_raid_index(bargs->target);
3969
3970                 CHECK_APPEND_1ARG("convert=%s,", get_raid_name(index));
3971         }
3972
3973         if (flags & BTRFS_BALANCE_ARGS_SOFT)
3974                 CHECK_APPEND_NOARG("soft,");
3975
3976         if (flags & BTRFS_BALANCE_ARGS_PROFILES) {
3977                 btrfs_describe_block_groups(bargs->profiles, tmp_buf,
3978                                             sizeof(tmp_buf));
3979                 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf);
3980         }
3981
3982         if (flags & BTRFS_BALANCE_ARGS_USAGE)
3983                 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage);
3984
3985         if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE)
3986                 CHECK_APPEND_2ARG("usage=%u..%u,",
3987                                   bargs->usage_min, bargs->usage_max);
3988
3989         if (flags & BTRFS_BALANCE_ARGS_DEVID)
3990                 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid);
3991
3992         if (flags & BTRFS_BALANCE_ARGS_DRANGE)
3993                 CHECK_APPEND_2ARG("drange=%llu..%llu,",
3994                                   bargs->pstart, bargs->pend);
3995
3996         if (flags & BTRFS_BALANCE_ARGS_VRANGE)
3997                 CHECK_APPEND_2ARG("vrange=%llu..%llu,",
3998                                   bargs->vstart, bargs->vend);
3999
4000         if (flags & BTRFS_BALANCE_ARGS_LIMIT)
4001                 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit);
4002
4003         if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)
4004                 CHECK_APPEND_2ARG("limit=%u..%u,",
4005                                 bargs->limit_min, bargs->limit_max);
4006
4007         if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE)
4008                 CHECK_APPEND_2ARG("stripes=%u..%u,",
4009                                   bargs->stripes_min, bargs->stripes_max);
4010
4011 #undef CHECK_APPEND_2ARG
4012 #undef CHECK_APPEND_1ARG
4013 #undef CHECK_APPEND_NOARG
4014
4015 out_overflow:
4016
4017         if (size_bp < size_buf)
4018                 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */
4019         else
4020                 buf[0] = '\0';
4021 }
4022
4023 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info)
4024 {
4025         u32 size_buf = 1024;
4026         char tmp_buf[192] = {'\0'};
4027         char *buf;
4028         char *bp;
4029         u32 size_bp = size_buf;
4030         int ret;
4031         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4032
4033         buf = kzalloc(size_buf, GFP_KERNEL);
4034         if (!buf)
4035                 return;
4036
4037         bp = buf;
4038
4039 #define CHECK_APPEND_1ARG(a, v1)                                        \
4040         do {                                                            \
4041                 ret = snprintf(bp, size_bp, (a), (v1));                 \
4042                 if (ret < 0 || ret >= size_bp)                          \
4043                         goto out_overflow;                              \
4044                 size_bp -= ret;                                         \
4045                 bp += ret;                                              \
4046         } while (0)
4047
4048         if (bctl->flags & BTRFS_BALANCE_FORCE)
4049                 CHECK_APPEND_1ARG("%s", "-f ");
4050
4051         if (bctl->flags & BTRFS_BALANCE_DATA) {
4052                 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf));
4053                 CHECK_APPEND_1ARG("-d%s ", tmp_buf);
4054         }
4055
4056         if (bctl->flags & BTRFS_BALANCE_METADATA) {
4057                 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf));
4058                 CHECK_APPEND_1ARG("-m%s ", tmp_buf);
4059         }
4060
4061         if (bctl->flags & BTRFS_BALANCE_SYSTEM) {
4062                 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf));
4063                 CHECK_APPEND_1ARG("-s%s ", tmp_buf);
4064         }
4065
4066 #undef CHECK_APPEND_1ARG
4067
4068 out_overflow:
4069
4070         if (size_bp < size_buf)
4071                 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */
4072         btrfs_info(fs_info, "balance: %s %s",
4073                    (bctl->flags & BTRFS_BALANCE_RESUME) ?
4074                    "resume" : "start", buf);
4075
4076         kfree(buf);
4077 }
4078
4079 /*
4080  * Should be called with balance mutexe held
4081  */
4082 int btrfs_balance(struct btrfs_fs_info *fs_info,
4083                   struct btrfs_balance_control *bctl,
4084                   struct btrfs_ioctl_balance_args *bargs)
4085 {
4086         u64 meta_target, data_target;
4087         u64 allowed;
4088         int mixed = 0;
4089         int ret;
4090         u64 num_devices;
4091         unsigned seq;
4092         bool reducing_integrity;
4093
4094         if (btrfs_fs_closing(fs_info) ||
4095             atomic_read(&fs_info->balance_pause_req) ||
4096             atomic_read(&fs_info->balance_cancel_req)) {
4097                 ret = -EINVAL;
4098                 goto out;
4099         }
4100
4101         allowed = btrfs_super_incompat_flags(fs_info->super_copy);
4102         if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
4103                 mixed = 1;
4104
4105         /*
4106          * In case of mixed groups both data and meta should be picked,
4107          * and identical options should be given for both of them.
4108          */
4109         allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
4110         if (mixed && (bctl->flags & allowed)) {
4111                 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
4112                     !(bctl->flags & BTRFS_BALANCE_METADATA) ||
4113                     memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
4114                         btrfs_err(fs_info,
4115           "balance: mixed groups data and metadata options must be the same");
4116                         ret = -EINVAL;
4117                         goto out;
4118                 }
4119         }
4120
4121         num_devices = btrfs_num_devices(fs_info);
4122
4123         allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE | BTRFS_BLOCK_GROUP_DUP;
4124         if (num_devices > 1)
4125                 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
4126         if (num_devices > 2)
4127                 allowed |= BTRFS_BLOCK_GROUP_RAID5;
4128         if (num_devices > 3)
4129                 allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
4130                             BTRFS_BLOCK_GROUP_RAID6);
4131         if (validate_convert_profile(&bctl->data, allowed)) {
4132                 int index = btrfs_bg_flags_to_raid_index(bctl->data.target);
4133
4134                 btrfs_err(fs_info,
4135                           "balance: invalid convert data profile %s",
4136                           get_raid_name(index));
4137                 ret = -EINVAL;
4138                 goto out;
4139         }
4140         if (validate_convert_profile(&bctl->meta, allowed)) {
4141                 int index = btrfs_bg_flags_to_raid_index(bctl->meta.target);
4142
4143                 btrfs_err(fs_info,
4144                           "balance: invalid convert metadata profile %s",
4145                           get_raid_name(index));
4146                 ret = -EINVAL;
4147                 goto out;
4148         }
4149         if (validate_convert_profile(&bctl->sys, allowed)) {
4150                 int index = btrfs_bg_flags_to_raid_index(bctl->sys.target);
4151
4152                 btrfs_err(fs_info,
4153                           "balance: invalid convert system profile %s",
4154                           get_raid_name(index));
4155                 ret = -EINVAL;
4156                 goto out;
4157         }
4158
4159         /* allow to reduce meta or sys integrity only if force set */
4160         allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
4161                         BTRFS_BLOCK_GROUP_RAID10 |
4162                         BTRFS_BLOCK_GROUP_RAID5 |
4163                         BTRFS_BLOCK_GROUP_RAID6;
4164         do {
4165                 seq = read_seqbegin(&fs_info->profiles_lock);
4166
4167                 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4168                      (fs_info->avail_system_alloc_bits & allowed) &&
4169                      !(bctl->sys.target & allowed)) ||
4170                     ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4171                      (fs_info->avail_metadata_alloc_bits & allowed) &&
4172                      !(bctl->meta.target & allowed)))
4173                         reducing_integrity = true;
4174                 else
4175                         reducing_integrity = false;
4176
4177                 /* if we're not converting, the target field is uninitialized */
4178                 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4179                         bctl->meta.target : fs_info->avail_metadata_alloc_bits;
4180                 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4181                         bctl->data.target : fs_info->avail_data_alloc_bits;
4182         } while (read_seqretry(&fs_info->profiles_lock, seq));
4183
4184         if (reducing_integrity) {
4185                 if (bctl->flags & BTRFS_BALANCE_FORCE) {
4186                         btrfs_info(fs_info,
4187                                    "balance: force reducing metadata integrity");
4188                 } else {
4189                         btrfs_err(fs_info,
4190           "balance: reduces metadata integrity, use --force if you want this");
4191                         ret = -EINVAL;
4192                         goto out;
4193                 }
4194         }
4195
4196         if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
4197                 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
4198                 int meta_index = btrfs_bg_flags_to_raid_index(meta_target);
4199                 int data_index = btrfs_bg_flags_to_raid_index(data_target);
4200
4201                 btrfs_warn(fs_info,
4202         "balance: metadata profile %s has lower redundancy than data profile %s",
4203                            get_raid_name(meta_index), get_raid_name(data_index));
4204         }
4205
4206         ret = insert_balance_item(fs_info, bctl);
4207         if (ret && ret != -EEXIST)
4208                 goto out;
4209
4210         if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
4211                 BUG_ON(ret == -EEXIST);
4212                 BUG_ON(fs_info->balance_ctl);
4213                 spin_lock(&fs_info->balance_lock);
4214                 fs_info->balance_ctl = bctl;
4215                 spin_unlock(&fs_info->balance_lock);
4216         } else {
4217                 BUG_ON(ret != -EEXIST);
4218                 spin_lock(&fs_info->balance_lock);
4219                 update_balance_args(bctl);
4220                 spin_unlock(&fs_info->balance_lock);
4221         }
4222
4223         ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4224         set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4225         describe_balance_start_or_resume(fs_info);
4226         mutex_unlock(&fs_info->balance_mutex);
4227
4228         ret = __btrfs_balance(fs_info);
4229
4230         mutex_lock(&fs_info->balance_mutex);
4231         if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req))
4232                 btrfs_info(fs_info, "balance: paused");
4233         else if (ret == -ECANCELED && atomic_read(&fs_info->balance_cancel_req))
4234                 btrfs_info(fs_info, "balance: canceled");
4235         else
4236                 btrfs_info(fs_info, "balance: ended with status: %d", ret);
4237
4238         clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4239
4240         if (bargs) {
4241                 memset(bargs, 0, sizeof(*bargs));
4242                 btrfs_update_ioctl_balance_args(fs_info, bargs);
4243         }
4244
4245         if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
4246             balance_need_close(fs_info)) {
4247                 reset_balance_state(fs_info);
4248                 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
4249         }
4250
4251         wake_up(&fs_info->balance_wait_q);
4252
4253         return ret;
4254 out:
4255         if (bctl->flags & BTRFS_BALANCE_RESUME)
4256                 reset_balance_state(fs_info);
4257         else
4258                 kfree(bctl);
4259         clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
4260
4261         return ret;
4262 }
4263
4264 static int balance_kthread(void *data)
4265 {
4266         struct btrfs_fs_info *fs_info = data;
4267         int ret = 0;
4268
4269         mutex_lock(&fs_info->balance_mutex);
4270         if (fs_info->balance_ctl)
4271                 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
4272         mutex_unlock(&fs_info->balance_mutex);
4273
4274         return ret;
4275 }
4276
4277 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
4278 {
4279         struct task_struct *tsk;
4280
4281         mutex_lock(&fs_info->balance_mutex);
4282         if (!fs_info->balance_ctl) {
4283                 mutex_unlock(&fs_info->balance_mutex);
4284                 return 0;
4285         }
4286         mutex_unlock(&fs_info->balance_mutex);
4287
4288         if (btrfs_test_opt(fs_info, SKIP_BALANCE)) {
4289                 btrfs_info(fs_info, "balance: resume skipped");
4290                 return 0;
4291         }
4292
4293         /*
4294          * A ro->rw remount sequence should continue with the paused balance
4295          * regardless of who pauses it, system or the user as of now, so set
4296          * the resume flag.
4297          */
4298         spin_lock(&fs_info->balance_lock);
4299         fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
4300         spin_unlock(&fs_info->balance_lock);
4301
4302         tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
4303         return PTR_ERR_OR_ZERO(tsk);
4304 }
4305
4306 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
4307 {
4308         struct btrfs_balance_control *bctl;
4309         struct btrfs_balance_item *item;
4310         struct btrfs_disk_balance_args disk_bargs;
4311         struct btrfs_path *path;
4312         struct extent_buffer *leaf;
4313         struct btrfs_key key;
4314         int ret;
4315
4316         path = btrfs_alloc_path();
4317         if (!path)
4318                 return -ENOMEM;
4319
4320         key.objectid = BTRFS_BALANCE_OBJECTID;
4321         key.type = BTRFS_TEMPORARY_ITEM_KEY;
4322         key.offset = 0;
4323
4324         ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4325         if (ret < 0)
4326                 goto out;
4327         if (ret > 0) { /* ret = -ENOENT; */
4328                 ret = 0;
4329                 goto out;
4330         }
4331
4332         bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
4333         if (!bctl) {
4334                 ret = -ENOMEM;
4335                 goto out;
4336         }
4337
4338         leaf = path->nodes[0];
4339         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
4340
4341         bctl->flags = btrfs_balance_flags(leaf, item);
4342         bctl->flags |= BTRFS_BALANCE_RESUME;
4343
4344         btrfs_balance_data(leaf, item, &disk_bargs);
4345         btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
4346         btrfs_balance_meta(leaf, item, &disk_bargs);
4347         btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
4348         btrfs_balance_sys(leaf, item, &disk_bargs);
4349         btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
4350
4351         /*
4352          * This should never happen, as the paused balance state is recovered
4353          * during mount without any chance of other exclusive ops to collide.
4354          *
4355          * This gives the exclusive op status to balance and keeps in paused
4356          * state until user intervention (cancel or umount). If the ownership
4357          * cannot be assigned, show a message but do not fail. The balance
4358          * is in a paused state and must have fs_info::balance_ctl properly
4359          * set up.
4360          */
4361         if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags))
4362                 btrfs_warn(fs_info,
4363         "balance: cannot set exclusive op status, resume manually");
4364
4365         mutex_lock(&fs_info->balance_mutex);
4366         BUG_ON(fs_info->balance_ctl);
4367         spin_lock(&fs_info->balance_lock);
4368         fs_info->balance_ctl = bctl;
4369         spin_unlock(&fs_info->balance_lock);
4370         mutex_unlock(&fs_info->balance_mutex);
4371 out:
4372         btrfs_free_path(path);
4373         return ret;
4374 }
4375
4376 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
4377 {
4378         int ret = 0;
4379
4380         mutex_lock(&fs_info->balance_mutex);
4381         if (!fs_info->balance_ctl) {
4382                 mutex_unlock(&fs_info->balance_mutex);
4383                 return -ENOTCONN;
4384         }
4385
4386         if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4387                 atomic_inc(&fs_info->balance_pause_req);
4388                 mutex_unlock(&fs_info->balance_mutex);
4389
4390                 wait_event(fs_info->balance_wait_q,
4391                            !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4392
4393                 mutex_lock(&fs_info->balance_mutex);
4394                 /* we are good with balance_ctl ripped off from under us */
4395                 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4396                 atomic_dec(&fs_info->balance_pause_req);
4397         } else {
4398                 ret = -ENOTCONN;
4399         }
4400
4401         mutex_unlock(&fs_info->balance_mutex);
4402         return ret;
4403 }
4404
4405 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
4406 {
4407         mutex_lock(&fs_info->balance_mutex);
4408         if (!fs_info->balance_ctl) {
4409                 mutex_unlock(&fs_info->balance_mutex);
4410                 return -ENOTCONN;
4411         }
4412
4413         /*
4414          * A paused balance with the item stored on disk can be resumed at
4415          * mount time if the mount is read-write. Otherwise it's still paused
4416          * and we must not allow cancelling as it deletes the item.
4417          */
4418         if (sb_rdonly(fs_info->sb)) {
4419                 mutex_unlock(&fs_info->balance_mutex);
4420                 return -EROFS;
4421         }
4422
4423         atomic_inc(&fs_info->balance_cancel_req);
4424         /*
4425          * if we are running just wait and return, balance item is
4426          * deleted in btrfs_balance in this case
4427          */
4428         if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4429                 mutex_unlock(&fs_info->balance_mutex);
4430                 wait_event(fs_info->balance_wait_q,
4431                            !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4432                 mutex_lock(&fs_info->balance_mutex);
4433         } else {
4434                 mutex_unlock(&fs_info->balance_mutex);
4435                 /*
4436                  * Lock released to allow other waiters to continue, we'll
4437                  * reexamine the status again.
4438                  */
4439                 mutex_lock(&fs_info->balance_mutex);
4440
4441                 if (fs_info->balance_ctl) {
4442                         reset_balance_state(fs_info);
4443                         clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
4444                         btrfs_info(fs_info, "balance: canceled");
4445                 }
4446         }
4447
4448         BUG_ON(fs_info->balance_ctl ||
4449                 test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4450         atomic_dec(&fs_info->balance_cancel_req);
4451         mutex_unlock(&fs_info->balance_mutex);
4452         return 0;
4453 }
4454
4455 static int btrfs_uuid_scan_kthread(void *data)
4456 {
4457         struct btrfs_fs_info *fs_info = data;
4458         struct btrfs_root *root = fs_info->tree_root;
4459         struct btrfs_key key;
4460         struct btrfs_path *path = NULL;
4461         int ret = 0;
4462         struct extent_buffer *eb;
4463         int slot;
4464         struct btrfs_root_item root_item;
4465         u32 item_size;
4466         struct btrfs_trans_handle *trans = NULL;
4467
4468         path = btrfs_alloc_path();
4469         if (!path) {
4470                 ret = -ENOMEM;
4471                 goto out;
4472         }
4473
4474         key.objectid = 0;
4475         key.type = BTRFS_ROOT_ITEM_KEY;
4476         key.offset = 0;
4477
4478         while (1) {
4479                 ret = btrfs_search_forward(root, &key, path,
4480                                 BTRFS_OLDEST_GENERATION);
4481                 if (ret) {
4482                         if (ret > 0)
4483                                 ret = 0;
4484                         break;
4485                 }
4486
4487                 if (key.type != BTRFS_ROOT_ITEM_KEY ||
4488                     (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
4489                      key.objectid != BTRFS_FS_TREE_OBJECTID) ||
4490                     key.objectid > BTRFS_LAST_FREE_OBJECTID)
4491                         goto skip;
4492
4493                 eb = path->nodes[0];
4494                 slot = path->slots[0];
4495                 item_size = btrfs_item_size_nr(eb, slot);
4496                 if (item_size < sizeof(root_item))
4497                         goto skip;
4498
4499                 read_extent_buffer(eb, &root_item,
4500                                    btrfs_item_ptr_offset(eb, slot),
4501                                    (int)sizeof(root_item));
4502                 if (btrfs_root_refs(&root_item) == 0)
4503                         goto skip;
4504
4505                 if (!btrfs_is_empty_uuid(root_item.uuid) ||
4506                     !btrfs_is_empty_uuid(root_item.received_uuid)) {
4507                         if (trans)
4508                                 goto update_tree;
4509
4510                         btrfs_release_path(path);
4511                         /*
4512                          * 1 - subvol uuid item
4513                          * 1 - received_subvol uuid item
4514                          */
4515                         trans = btrfs_start_transaction(fs_info->uuid_root, 2);
4516                         if (IS_ERR(trans)) {
4517                                 ret = PTR_ERR(trans);
4518                                 break;
4519                         }
4520                         continue;
4521                 } else {
4522                         goto skip;
4523                 }
4524 update_tree:
4525                 if (!btrfs_is_empty_uuid(root_item.uuid)) {
4526                         ret = btrfs_uuid_tree_add(trans, root_item.uuid,
4527                                                   BTRFS_UUID_KEY_SUBVOL,
4528                                                   key.objectid);
4529                         if (ret < 0) {
4530                                 btrfs_warn(fs_info, "uuid_tree_add failed %d",
4531                                         ret);
4532                                 break;
4533                         }
4534                 }
4535
4536                 if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
4537                         ret = btrfs_uuid_tree_add(trans,
4538                                                   root_item.received_uuid,
4539                                                  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4540                                                   key.objectid);
4541                         if (ret < 0) {
4542                                 btrfs_warn(fs_info, "uuid_tree_add failed %d",
4543                                         ret);
4544                                 break;
4545                         }
4546                 }
4547
4548 skip:
4549                 if (trans) {
4550                         ret = btrfs_end_transaction(trans);
4551                         trans = NULL;
4552                         if (ret)
4553                                 break;
4554                 }
4555
4556                 btrfs_release_path(path);
4557                 if (key.offset < (u64)-1) {
4558                         key.offset++;
4559                 } else if (key.type < BTRFS_ROOT_ITEM_KEY) {
4560                         key.offset = 0;
4561                         key.type = BTRFS_ROOT_ITEM_KEY;
4562                 } else if (key.objectid < (u64)-1) {
4563                         key.offset = 0;
4564                         key.type = BTRFS_ROOT_ITEM_KEY;
4565                         key.objectid++;
4566                 } else {
4567                         break;
4568                 }
4569                 cond_resched();
4570         }
4571
4572 out:
4573         btrfs_free_path(path);
4574         if (trans && !IS_ERR(trans))
4575                 btrfs_end_transaction(trans);
4576         if (ret)
4577                 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4578         else
4579                 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
4580         up(&fs_info->uuid_tree_rescan_sem);
4581         return 0;
4582 }
4583
4584 /*
4585  * Callback for btrfs_uuid_tree_iterate().
4586  * returns:
4587  * 0    check succeeded, the entry is not outdated.
4588  * < 0  if an error occurred.
4589  * > 0  if the check failed, which means the caller shall remove the entry.
4590  */
4591 static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info,
4592                                        u8 *uuid, u8 type, u64 subid)
4593 {
4594         struct btrfs_key key;
4595         int ret = 0;
4596         struct btrfs_root *subvol_root;
4597
4598         if (type != BTRFS_UUID_KEY_SUBVOL &&
4599             type != BTRFS_UUID_KEY_RECEIVED_SUBVOL)
4600                 goto out;
4601
4602         key.objectid = subid;
4603         key.type = BTRFS_ROOT_ITEM_KEY;
4604         key.offset = (u64)-1;
4605         subvol_root = btrfs_read_fs_root_no_name(fs_info, &key);
4606         if (IS_ERR(subvol_root)) {
4607                 ret = PTR_ERR(subvol_root);
4608                 if (ret == -ENOENT)
4609                         ret = 1;
4610                 goto out;
4611         }
4612
4613         switch (type) {
4614         case BTRFS_UUID_KEY_SUBVOL:
4615                 if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE))
4616                         ret = 1;
4617                 break;
4618         case BTRFS_UUID_KEY_RECEIVED_SUBVOL:
4619                 if (memcmp(uuid, subvol_root->root_item.received_uuid,
4620                            BTRFS_UUID_SIZE))
4621                         ret = 1;
4622                 break;
4623         }
4624
4625 out:
4626         return ret;
4627 }
4628
4629 static int btrfs_uuid_rescan_kthread(void *data)
4630 {
4631         struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data;
4632         int ret;
4633
4634         /*
4635          * 1st step is to iterate through the existing UUID tree and
4636          * to delete all entries that contain outdated data.
4637          * 2nd step is to add all missing entries to the UUID tree.
4638          */
4639         ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry);
4640         if (ret < 0) {
4641                 btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret);
4642                 up(&fs_info->uuid_tree_rescan_sem);
4643                 return ret;
4644         }
4645         return btrfs_uuid_scan_kthread(data);
4646 }
4647
4648 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
4649 {
4650         struct btrfs_trans_handle *trans;
4651         struct btrfs_root *tree_root = fs_info->tree_root;
4652         struct btrfs_root *uuid_root;
4653         struct task_struct *task;
4654         int ret;
4655
4656         /*
4657          * 1 - root node
4658          * 1 - root item
4659          */
4660         trans = btrfs_start_transaction(tree_root, 2);
4661         if (IS_ERR(trans))
4662                 return PTR_ERR(trans);
4663
4664         uuid_root = btrfs_create_tree(trans, fs_info,
4665                                       BTRFS_UUID_TREE_OBJECTID);
4666         if (IS_ERR(uuid_root)) {
4667                 ret = PTR_ERR(uuid_root);
4668                 btrfs_abort_transaction(trans, ret);
4669                 btrfs_end_transaction(trans);
4670                 return ret;
4671         }
4672
4673         fs_info->uuid_root = uuid_root;
4674
4675         ret = btrfs_commit_transaction(trans);
4676         if (ret)
4677                 return ret;
4678
4679         down(&fs_info->uuid_tree_rescan_sem);
4680         task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
4681         if (IS_ERR(task)) {
4682                 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
4683                 btrfs_warn(fs_info, "failed to start uuid_scan task");
4684                 up(&fs_info->uuid_tree_rescan_sem);
4685                 return PTR_ERR(task);
4686         }
4687
4688         return 0;
4689 }
4690
4691 int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
4692 {
4693         struct task_struct *task;
4694
4695         down(&fs_info->uuid_tree_rescan_sem);
4696         task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
4697         if (IS_ERR(task)) {
4698                 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
4699                 btrfs_warn(fs_info, "failed to start uuid_rescan task");
4700                 up(&fs_info->uuid_tree_rescan_sem);
4701                 return PTR_ERR(task);
4702         }
4703
4704         return 0;
4705 }
4706
4707 /*
4708  * shrinking a device means finding all of the device extents past
4709  * the new size, and then following the back refs to the chunks.
4710  * The chunk relocation code actually frees the device extent
4711  */
4712 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
4713 {
4714         struct btrfs_fs_info *fs_info = device->fs_info;
4715         struct btrfs_root *root = fs_info->dev_root;
4716         struct btrfs_trans_handle *trans;
4717         struct btrfs_dev_extent *dev_extent = NULL;
4718         struct btrfs_path *path;
4719         u64 length;
4720         u64 chunk_offset;
4721         int ret;
4722         int slot;
4723         int failed = 0;
4724         bool retried = false;
4725         bool checked_pending_chunks = false;
4726         struct extent_buffer *l;
4727         struct btrfs_key key;
4728         struct btrfs_super_block *super_copy = fs_info->super_copy;
4729         u64 old_total = btrfs_super_total_bytes(super_copy);
4730         u64 old_size = btrfs_device_get_total_bytes(device);
4731         u64 diff;
4732
4733         new_size = round_down(new_size, fs_info->sectorsize);
4734         diff = round_down(old_size - new_size, fs_info->sectorsize);
4735
4736         if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4737                 return -EINVAL;
4738
4739         path = btrfs_alloc_path();
4740         if (!path)
4741                 return -ENOMEM;
4742
4743         path->reada = READA_BACK;
4744
4745         mutex_lock(&fs_info->chunk_mutex);
4746
4747         btrfs_device_set_total_bytes(device, new_size);
4748         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
4749                 device->fs_devices->total_rw_bytes -= diff;
4750                 atomic64_sub(diff, &fs_info->free_chunk_space);
4751         }
4752         mutex_unlock(&fs_info->chunk_mutex);
4753
4754 again:
4755         key.objectid = device->devid;
4756         key.offset = (u64)-1;
4757         key.type = BTRFS_DEV_EXTENT_KEY;
4758
4759         do {
4760                 mutex_lock(&fs_info->delete_unused_bgs_mutex);
4761                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4762                 if (ret < 0) {
4763                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4764                         goto done;
4765                 }
4766
4767                 ret = btrfs_previous_item(root, path, 0, key.type);
4768                 if (ret)
4769                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4770                 if (ret < 0)
4771                         goto done;
4772                 if (ret) {
4773                         ret = 0;
4774                         btrfs_release_path(path);
4775                         break;
4776                 }
4777
4778                 l = path->nodes[0];
4779                 slot = path->slots[0];
4780                 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4781
4782                 if (key.objectid != device->devid) {
4783                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4784                         btrfs_release_path(path);
4785                         break;
4786                 }
4787
4788                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
4789                 length = btrfs_dev_extent_length(l, dev_extent);
4790
4791                 if (key.offset + length <= new_size) {
4792                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4793                         btrfs_release_path(path);
4794                         break;
4795                 }
4796
4797                 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4798                 btrfs_release_path(path);
4799
4800                 /*
4801                  * We may be relocating the only data chunk we have,
4802                  * which could potentially end up with losing data's
4803                  * raid profile, so lets allocate an empty one in
4804                  * advance.
4805                  */
4806                 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset);
4807                 if (ret < 0) {
4808                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4809                         goto done;
4810                 }
4811
4812                 ret = btrfs_relocate_chunk(fs_info, chunk_offset);
4813                 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4814                 if (ret == -ENOSPC) {
4815                         failed++;
4816                 } else if (ret) {
4817                         if (ret == -ETXTBSY) {
4818                                 btrfs_warn(fs_info,
4819                    "could not shrink block group %llu due to active swapfile",
4820                                            chunk_offset);
4821                         }
4822                         goto done;
4823                 }
4824         } while (key.offset-- > 0);
4825
4826         if (failed && !retried) {
4827                 failed = 0;
4828                 retried = true;
4829                 goto again;
4830         } else if (failed && retried) {
4831                 ret = -ENOSPC;
4832                 goto done;
4833         }
4834
4835         /* Shrinking succeeded, else we would be at "done". */
4836         trans = btrfs_start_transaction(root, 0);
4837         if (IS_ERR(trans)) {
4838                 ret = PTR_ERR(trans);
4839                 goto done;
4840         }
4841
4842         mutex_lock(&fs_info->chunk_mutex);
4843
4844         /*
4845          * We checked in the above loop all device extents that were already in
4846          * the device tree. However before we have updated the device's
4847          * total_bytes to the new size, we might have had chunk allocations that
4848          * have not complete yet (new block groups attached to transaction
4849          * handles), and therefore their device extents were not yet in the
4850          * device tree and we missed them in the loop above. So if we have any
4851          * pending chunk using a device extent that overlaps the device range
4852          * that we can not use anymore, commit the current transaction and
4853          * repeat the search on the device tree - this way we guarantee we will
4854          * not have chunks using device extents that end beyond 'new_size'.
4855          */
4856         if (!checked_pending_chunks) {
4857                 u64 start = new_size;
4858                 u64 len = old_size - new_size;
4859
4860                 if (contains_pending_extent(trans->transaction, device,
4861                                             &start, len)) {
4862                         mutex_unlock(&fs_info->chunk_mutex);
4863                         checked_pending_chunks = true;
4864                         failed = 0;
4865                         retried = false;
4866                         ret = btrfs_commit_transaction(trans);
4867                         if (ret)
4868                                 goto done;
4869                         goto again;
4870                 }
4871         }
4872
4873         btrfs_device_set_disk_total_bytes(device, new_size);
4874         if (list_empty(&device->resized_list))
4875                 list_add_tail(&device->resized_list,
4876                               &fs_info->fs_devices->resized_devices);
4877
4878         WARN_ON(diff > old_total);
4879         btrfs_set_super_total_bytes(super_copy,
4880                         round_down(old_total - diff, fs_info->sectorsize));
4881         mutex_unlock(&fs_info->chunk_mutex);
4882
4883         /* Now btrfs_update_device() will change the on-disk size. */
4884         ret = btrfs_update_device(trans, device);
4885         if (ret < 0) {
4886                 btrfs_abort_transaction(trans, ret);
4887                 btrfs_end_transaction(trans);
4888         } else {
4889                 ret = btrfs_commit_transaction(trans);
4890         }
4891 done:
4892         btrfs_free_path(path);
4893         if (ret) {
4894                 mutex_lock(&fs_info->chunk_mutex);
4895                 btrfs_device_set_total_bytes(device, old_size);
4896                 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
4897                         device->fs_devices->total_rw_bytes += diff;
4898                 atomic64_add(diff, &fs_info->free_chunk_space);
4899                 mutex_unlock(&fs_info->chunk_mutex);
4900         }
4901         return ret;
4902 }
4903
4904 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
4905                            struct btrfs_key *key,
4906                            struct btrfs_chunk *chunk, int item_size)
4907 {
4908         struct btrfs_super_block *super_copy = fs_info->super_copy;
4909         struct btrfs_disk_key disk_key;
4910         u32 array_size;
4911         u8 *ptr;
4912
4913         mutex_lock(&fs_info->chunk_mutex);
4914         array_size = btrfs_super_sys_array_size(super_copy);
4915         if (array_size + item_size + sizeof(disk_key)
4916                         > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4917                 mutex_unlock(&fs_info->chunk_mutex);
4918                 return -EFBIG;
4919         }
4920
4921         ptr = super_copy->sys_chunk_array + array_size;
4922         btrfs_cpu_key_to_disk(&disk_key, key);
4923         memcpy(ptr, &disk_key, sizeof(disk_key));
4924         ptr += sizeof(disk_key);
4925         memcpy(ptr, chunk, item_size);
4926         item_size += sizeof(disk_key);
4927         btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
4928         mutex_unlock(&fs_info->chunk_mutex);
4929
4930         return 0;
4931 }
4932
4933 /*
4934  * sort the devices in descending order by max_avail, total_avail
4935  */
4936 static int btrfs_cmp_device_info(const void *a, const void *b)
4937 {
4938         const struct btrfs_device_info *di_a = a;
4939         const struct btrfs_device_info *di_b = b;
4940
4941         if (di_a->max_avail > di_b->max_avail)
4942                 return -1;
4943         if (di_a->max_avail < di_b->max_avail)
4944                 return 1;
4945         if (di_a->total_avail > di_b->total_avail)
4946                 return -1;
4947         if (di_a->total_avail < di_b->total_avail)
4948                 return 1;
4949         return 0;
4950 }
4951
4952 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
4953 {
4954         if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
4955                 return;
4956
4957         btrfs_set_fs_incompat(info, RAID56);
4958 }
4959
4960 #define BTRFS_MAX_DEVS(info) ((BTRFS_MAX_ITEM_SIZE(info)        \
4961                         - sizeof(struct btrfs_chunk))           \
4962                         / sizeof(struct btrfs_stripe) + 1)
4963
4964 #define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE        \
4965                                 - 2 * sizeof(struct btrfs_disk_key)     \
4966                                 - 2 * sizeof(struct btrfs_chunk))       \
4967                                 / sizeof(struct btrfs_stripe) + 1)
4968
4969 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4970                                u64 start, u64 type)
4971 {
4972         struct btrfs_fs_info *info = trans->fs_info;
4973         struct btrfs_fs_devices *fs_devices = info->fs_devices;
4974         struct btrfs_device *device;
4975         struct map_lookup *map = NULL;
4976         struct extent_map_tree *em_tree;
4977         struct extent_map *em;
4978         struct btrfs_device_info *devices_info = NULL;
4979         u64 total_avail;
4980         int num_stripes;        /* total number of stripes to allocate */
4981         int data_stripes;       /* number of stripes that count for
4982                                    block group size */
4983         int sub_stripes;        /* sub_stripes info for map */
4984         int dev_stripes;        /* stripes per dev */
4985         int devs_max;           /* max devs to use */
4986         int devs_min;           /* min devs needed */
4987         int devs_increment;     /* ndevs has to be a multiple of this */
4988         int ncopies;            /* how many copies to data has */
4989         int nparity;            /* number of stripes worth of bytes to
4990                                    store parity information */
4991         int ret;
4992         u64 max_stripe_size;
4993         u64 max_chunk_size;
4994         u64 stripe_size;
4995         u64 chunk_size;
4996         int ndevs;
4997         int i;
4998         int j;
4999         int index;
5000
5001         BUG_ON(!alloc_profile_is_valid(type, 0));
5002
5003         if (list_empty(&fs_devices->alloc_list)) {
5004                 if (btrfs_test_opt(info, ENOSPC_DEBUG))
5005                         btrfs_debug(info, "%s: no writable device", __func__);
5006                 return -ENOSPC;
5007         }
5008
5009         index = btrfs_bg_flags_to_raid_index(type);
5010
5011         sub_stripes = btrfs_raid_array[index].sub_stripes;
5012         dev_stripes = btrfs_raid_array[index].dev_stripes;
5013         devs_max = btrfs_raid_array[index].devs_max;
5014         devs_min = btrfs_raid_array[index].devs_min;
5015         devs_increment = btrfs_raid_array[index].devs_increment;
5016         ncopies = btrfs_raid_array[index].ncopies;
5017         nparity = btrfs_raid_array[index].nparity;
5018
5019         if (type & BTRFS_BLOCK_GROUP_DATA) {
5020                 max_stripe_size = SZ_1G;
5021                 max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE;
5022                 if (!devs_max)
5023                         devs_max = BTRFS_MAX_DEVS(info);
5024         } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
5025                 /* for larger filesystems, use larger metadata chunks */
5026                 if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
5027                         max_stripe_size = SZ_1G;
5028                 else
5029                         max_stripe_size = SZ_256M;
5030                 max_chunk_size = max_stripe_size;
5031                 if (!devs_max)
5032                         devs_max = BTRFS_MAX_DEVS(info);
5033         } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
5034                 max_stripe_size = SZ_32M;
5035                 max_chunk_size = 2 * max_stripe_size;
5036                 if (!devs_max)
5037                         devs_max = BTRFS_MAX_DEVS_SYS_CHUNK;
5038         } else {
5039                 btrfs_err(info, "invalid chunk type 0x%llx requested",
5040                        type);
5041                 BUG_ON(1);
5042         }
5043
5044         /* We don't want a chunk larger than 10% of writable space */
5045         max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
5046                              max_chunk_size);
5047
5048         devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
5049                                GFP_NOFS);
5050         if (!devices_info)
5051                 return -ENOMEM;
5052
5053         /*
5054          * in the first pass through the devices list, we gather information
5055          * about the available holes on each device.
5056          */
5057         ndevs = 0;
5058         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
5059                 u64 max_avail;
5060                 u64 dev_offset;
5061
5062                 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
5063                         WARN(1, KERN_ERR
5064                                "BTRFS: read-only device in alloc_list\n");
5065                         continue;
5066                 }
5067
5068                 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
5069                                         &device->dev_state) ||
5070                     test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
5071                         continue;
5072
5073                 if (device->total_bytes > device->bytes_used)
5074                         total_avail = device->total_bytes - device->bytes_used;
5075                 else
5076                         total_avail = 0;
5077
5078                 /* If there is no space on this device, skip it. */
5079                 if (total_avail == 0)
5080                         continue;
5081
5082                 ret = find_free_dev_extent(trans, device,
5083                                            max_stripe_size * dev_stripes,
5084                                            &dev_offset, &max_avail);
5085                 if (ret && ret != -ENOSPC)
5086                         goto error;
5087
5088                 if (ret == 0)
5089                         max_avail = max_stripe_size * dev_stripes;
5090
5091                 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes) {
5092                         if (btrfs_test_opt(info, ENOSPC_DEBUG))
5093                                 btrfs_debug(info,
5094                         "%s: devid %llu has no free space, have=%llu want=%u",
5095                                             __func__, device->devid, max_avail,
5096                                             BTRFS_STRIPE_LEN * dev_stripes);
5097                         continue;
5098                 }
5099
5100                 if (ndevs == fs_devices->rw_devices) {
5101                         WARN(1, "%s: found more than %llu devices\n",
5102                              __func__, fs_devices->rw_devices);
5103                         break;
5104                 }
5105                 devices_info[ndevs].dev_offset = dev_offset;
5106                 devices_info[ndevs].max_avail = max_avail;
5107                 devices_info[ndevs].total_avail = total_avail;
5108                 devices_info[ndevs].dev = device;
5109                 ++ndevs;
5110         }
5111
5112         /*
5113          * now sort the devices by hole size / available space
5114          */
5115         sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
5116              btrfs_cmp_device_info, NULL);
5117
5118         /* round down to number of usable stripes */
5119         ndevs = round_down(ndevs, devs_increment);
5120
5121         if (ndevs < devs_min) {
5122                 ret = -ENOSPC;
5123                 if (btrfs_test_opt(info, ENOSPC_DEBUG)) {
5124                         btrfs_debug(info,
5125         "%s: not enough devices with free space: have=%d minimum required=%d",
5126                                     __func__, ndevs, devs_min);
5127                 }
5128                 goto error;
5129         }
5130
5131         ndevs = min(ndevs, devs_max);
5132
5133         /*
5134          * The primary goal is to maximize the number of stripes, so use as
5135          * many devices as possible, even if the stripes are not maximum sized.
5136          *
5137          * The DUP profile stores more than one stripe per device, the
5138          * max_avail is the total size so we have to adjust.
5139          */
5140         stripe_size = div_u64(devices_info[ndevs - 1].max_avail, dev_stripes);
5141         num_stripes = ndevs * dev_stripes;
5142
5143         /*
5144          * this will have to be fixed for RAID1 and RAID10 over
5145          * more drives
5146          */
5147         data_stripes = (num_stripes - nparity) / ncopies;
5148
5149         /*
5150          * Use the number of data stripes to figure out how big this chunk
5151          * is really going to be in terms of logical address space,
5152          * and compare that answer with the max chunk size. If it's higher,
5153          * we try to reduce stripe_size.
5154          */
5155         if (stripe_size * data_stripes > max_chunk_size) {
5156                 /*
5157                  * Reduce stripe_size, round it up to a 16MB boundary again and
5158                  * then use it, unless it ends up being even bigger than the
5159                  * previous value we had already.
5160                  */
5161                 stripe_size = min(round_up(div_u64(max_chunk_size,
5162                                                    data_stripes), SZ_16M),
5163                                   stripe_size);
5164         }
5165
5166         /* align to BTRFS_STRIPE_LEN */
5167         stripe_size = round_down(stripe_size, BTRFS_STRIPE_LEN);
5168
5169         map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
5170         if (!map) {
5171                 ret = -ENOMEM;
5172                 goto error;
5173         }
5174         map->num_stripes = num_stripes;
5175
5176         for (i = 0; i < ndevs; ++i) {
5177                 for (j = 0; j < dev_stripes; ++j) {
5178                         int s = i * dev_stripes + j;
5179                         map->stripes[s].dev = devices_info[i].dev;
5180                         map->stripes[s].physical = devices_info[i].dev_offset +
5181                                                    j * stripe_size;
5182                 }
5183         }
5184         map->stripe_len = BTRFS_STRIPE_LEN;
5185         map->io_align = BTRFS_STRIPE_LEN;
5186         map->io_width = BTRFS_STRIPE_LEN;
5187         map->type = type;
5188         map->sub_stripes = sub_stripes;
5189
5190         chunk_size = stripe_size * data_stripes;
5191
5192         trace_btrfs_chunk_alloc(info, map, start, chunk_size);
5193
5194         em = alloc_extent_map();
5195         if (!em) {
5196                 kfree(map);
5197                 ret = -ENOMEM;
5198                 goto error;
5199         }
5200         set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
5201         em->map_lookup = map;
5202         em->start = start;
5203         em->len = chunk_size;
5204         em->block_start = 0;
5205         em->block_len = em->len;
5206         em->orig_block_len = stripe_size;
5207
5208         em_tree = &info->mapping_tree.map_tree;
5209         write_lock(&em_tree->lock);
5210         ret = add_extent_mapping(em_tree, em, 0);
5211         if (ret) {
5212                 write_unlock(&em_tree->lock);
5213                 free_extent_map(em);
5214                 goto error;
5215         }
5216
5217         list_add_tail(&em->list, &trans->transaction->pending_chunks);
5218         refcount_inc(&em->refs);
5219         write_unlock(&em_tree->lock);
5220
5221         ret = btrfs_make_block_group(trans, 0, type, start, chunk_size);
5222         if (ret)
5223                 goto error_del_extent;
5224
5225         for (i = 0; i < map->num_stripes; i++)
5226                 btrfs_device_set_bytes_used(map->stripes[i].dev,
5227                                 map->stripes[i].dev->bytes_used + stripe_size);
5228
5229         atomic64_sub(stripe_size * map->num_stripes, &info->free_chunk_space);
5230
5231         free_extent_map(em);
5232         check_raid56_incompat_flag(info, type);
5233
5234         kfree(devices_info);
5235         return 0;
5236
5237 error_del_extent:
5238         write_lock(&em_tree->lock);
5239         remove_extent_mapping(em_tree, em);
5240         write_unlock(&em_tree->lock);
5241
5242         /* One for our allocation */
5243         free_extent_map(em);
5244         /* One for the tree reference */
5245         free_extent_map(em);
5246         /* One for the pending_chunks list reference */
5247         free_extent_map(em);
5248 error:
5249         kfree(devices_info);
5250         return ret;
5251 }
5252
5253 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
5254                              u64 chunk_offset, u64 chunk_size)
5255 {
5256         struct btrfs_fs_info *fs_info = trans->fs_info;
5257         struct btrfs_root *extent_root = fs_info->extent_root;
5258         struct btrfs_root *chunk_root = fs_info->chunk_root;
5259         struct btrfs_key key;
5260         struct btrfs_device *device;
5261         struct btrfs_chunk *chunk;
5262         struct btrfs_stripe *stripe;
5263         struct extent_map *em;
5264         struct map_lookup *map;
5265         size_t item_size;
5266         u64 dev_offset;
5267         u64 stripe_size;
5268         int i = 0;
5269         int ret = 0;
5270
5271         em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size);
5272         if (IS_ERR(em))
5273                 return PTR_ERR(em);
5274
5275         map = em->map_lookup;
5276         item_size = btrfs_chunk_item_size(map->num_stripes);
5277         stripe_size = em->orig_block_len;
5278
5279         chunk = kzalloc(item_size, GFP_NOFS);
5280         if (!chunk) {
5281                 ret = -ENOMEM;
5282                 goto out;
5283         }
5284
5285         /*
5286          * Take the device list mutex to prevent races with the final phase of
5287          * a device replace operation that replaces the device object associated
5288          * with the map's stripes, because the device object's id can change
5289          * at any time during that final phase of the device replace operation
5290          * (dev-replace.c:btrfs_dev_replace_finishing()).
5291          */
5292         mutex_lock(&fs_info->fs_devices->device_list_mutex);
5293         for (i = 0; i < map->num_stripes; i++) {
5294                 device = map->stripes[i].dev;
5295                 dev_offset = map->stripes[i].physical;
5296
5297                 ret = btrfs_update_device(trans, device);
5298                 if (ret)
5299                         break;
5300                 ret = btrfs_alloc_dev_extent(trans, device, chunk_offset,
5301                                              dev_offset, stripe_size);
5302                 if (ret)
5303                         break;
5304         }
5305         if (ret) {
5306                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
5307                 goto out;
5308         }
5309
5310         stripe = &chunk->stripe;
5311         for (i = 0; i < map->num_stripes; i++) {
5312                 device = map->stripes[i].dev;
5313                 dev_offset = map->stripes[i].physical;
5314
5315                 btrfs_set_stack_stripe_devid(stripe, device->devid);
5316                 btrfs_set_stack_stripe_offset(stripe, dev_offset);
5317                 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
5318                 stripe++;
5319         }
5320         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
5321
5322         btrfs_set_stack_chunk_length(chunk, chunk_size);
5323         btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
5324         btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
5325         btrfs_set_stack_chunk_type(chunk, map->type);
5326         btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
5327         btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
5328         btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
5329         btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize);
5330         btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
5331
5332         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
5333         key.type = BTRFS_CHUNK_ITEM_KEY;
5334         key.offset = chunk_offset;
5335
5336         ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
5337         if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
5338                 /*
5339                  * TODO: Cleanup of inserted chunk root in case of
5340                  * failure.
5341                  */
5342                 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
5343         }
5344
5345 out:
5346         kfree(chunk);
5347         free_extent_map(em);
5348         return ret;
5349 }
5350
5351 /*
5352  * Chunk allocation falls into two parts. The first part does work
5353  * that makes the new allocated chunk usable, but does not do any operation
5354  * that modifies the chunk tree. The second part does the work that
5355  * requires modifying the chunk tree. This division is important for the
5356  * bootstrap process of adding storage to a seed btrfs.
5357  */
5358 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type)
5359 {
5360         u64 chunk_offset;
5361
5362         lockdep_assert_held(&trans->fs_info->chunk_mutex);
5363         chunk_offset = find_next_chunk(trans->fs_info);
5364         return __btrfs_alloc_chunk(trans, chunk_offset, type);
5365 }
5366
5367 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
5368                                          struct btrfs_fs_info *fs_info)
5369 {
5370         u64 chunk_offset;
5371         u64 sys_chunk_offset;
5372         u64 alloc_profile;
5373         int ret;
5374
5375         chunk_offset = find_next_chunk(fs_info);
5376         alloc_profile = btrfs_metadata_alloc_profile(fs_info);
5377         ret = __btrfs_alloc_chunk(trans, chunk_offset, alloc_profile);
5378         if (ret)
5379                 return ret;
5380
5381         sys_chunk_offset = find_next_chunk(fs_info);
5382         alloc_profile = btrfs_system_alloc_profile(fs_info);
5383         ret = __btrfs_alloc_chunk(trans, sys_chunk_offset, alloc_profile);
5384         return ret;
5385 }
5386
5387 static inline int btrfs_chunk_max_errors(struct map_lookup *map)
5388 {
5389         int max_errors;
5390
5391         if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
5392                          BTRFS_BLOCK_GROUP_RAID10 |
5393                          BTRFS_BLOCK_GROUP_RAID5 |
5394                          BTRFS_BLOCK_GROUP_DUP)) {
5395                 max_errors = 1;
5396         } else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
5397                 max_errors = 2;
5398         } else {
5399                 max_errors = 0;
5400         }
5401
5402         return max_errors;
5403 }
5404
5405 int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset)
5406 {
5407         struct extent_map *em;
5408         struct map_lookup *map;
5409         int readonly = 0;
5410         int miss_ndevs = 0;
5411         int i;
5412
5413         em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
5414         if (IS_ERR(em))
5415                 return 1;
5416
5417         map = em->map_lookup;
5418         for (i = 0; i < map->num_stripes; i++) {
5419                 if (test_bit(BTRFS_DEV_STATE_MISSING,
5420                                         &map->stripes[i].dev->dev_state)) {
5421                         miss_ndevs++;
5422                         continue;
5423                 }
5424                 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
5425                                         &map->stripes[i].dev->dev_state)) {
5426                         readonly = 1;
5427                         goto end;
5428                 }
5429         }
5430
5431         /*
5432          * If the number of missing devices is larger than max errors,
5433          * we can not write the data into that chunk successfully, so
5434          * set it readonly.
5435          */
5436         if (miss_ndevs > btrfs_chunk_max_errors(map))
5437                 readonly = 1;
5438 end:
5439         free_extent_map(em);
5440         return readonly;
5441 }
5442
5443 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
5444 {
5445         extent_map_tree_init(&tree->map_tree);
5446 }
5447
5448 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
5449 {
5450         struct extent_map *em;
5451
5452         while (1) {
5453                 write_lock(&tree->map_tree.lock);
5454                 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
5455                 if (em)
5456                         remove_extent_mapping(&tree->map_tree, em);
5457                 write_unlock(&tree->map_tree.lock);
5458                 if (!em)
5459                         break;
5460                 /* once for us */
5461                 free_extent_map(em);
5462                 /* once for the tree */
5463                 free_extent_map(em);
5464         }
5465 }
5466
5467 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5468 {
5469         struct extent_map *em;
5470         struct map_lookup *map;
5471         int ret;
5472
5473         em = btrfs_get_chunk_map(fs_info, logical, len);
5474         if (IS_ERR(em))
5475                 /*
5476                  * We could return errors for these cases, but that could get
5477                  * ugly and we'd probably do the same thing which is just not do
5478                  * anything else and exit, so return 1 so the callers don't try
5479                  * to use other copies.
5480                  */
5481                 return 1;
5482
5483         map = em->map_lookup;
5484         if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
5485                 ret = map->num_stripes;
5486         else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5487                 ret = map->sub_stripes;
5488         else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
5489                 ret = 2;
5490         else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5491                 /*
5492                  * There could be two corrupted data stripes, we need
5493                  * to loop retry in order to rebuild the correct data.
5494                  *
5495                  * Fail a stripe at a time on every retry except the
5496                  * stripe under reconstruction.
5497                  */
5498                 ret = map->num_stripes;
5499         else
5500                 ret = 1;
5501         free_extent_map(em);
5502
5503         down_read(&fs_info->dev_replace.rwsem);
5504         if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) &&
5505             fs_info->dev_replace.tgtdev)
5506                 ret++;
5507         up_read(&fs_info->dev_replace.rwsem);
5508
5509         return ret;
5510 }
5511
5512 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
5513                                     u64 logical)
5514 {
5515         struct extent_map *em;
5516         struct map_lookup *map;
5517         unsigned long len = fs_info->sectorsize;
5518
5519         em = btrfs_get_chunk_map(fs_info, logical, len);
5520
5521         if (!WARN_ON(IS_ERR(em))) {
5522                 map = em->map_lookup;
5523                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5524                         len = map->stripe_len * nr_data_stripes(map);
5525                 free_extent_map(em);
5526         }
5527         return len;
5528 }
5529
5530 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5531 {
5532         struct extent_map *em;
5533         struct map_lookup *map;
5534         int ret = 0;
5535
5536         em = btrfs_get_chunk_map(fs_info, logical, len);
5537
5538         if(!WARN_ON(IS_ERR(em))) {
5539                 map = em->map_lookup;
5540                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5541                         ret = 1;
5542                 free_extent_map(em);
5543         }
5544         return ret;
5545 }
5546
5547 static int find_live_mirror(struct btrfs_fs_info *fs_info,
5548                             struct map_lookup *map, int first,
5549                             int dev_replace_is_ongoing)
5550 {
5551         int i;
5552         int num_stripes;
5553         int preferred_mirror;
5554         int tolerance;
5555         struct btrfs_device *srcdev;
5556
5557         ASSERT((map->type &
5558                  (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)));
5559
5560         if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5561                 num_stripes = map->sub_stripes;
5562         else
5563                 num_stripes = map->num_stripes;
5564
5565         preferred_mirror = first + current->pid % num_stripes;
5566
5567         if (dev_replace_is_ongoing &&
5568             fs_info->dev_replace.cont_reading_from_srcdev_mode ==
5569              BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
5570                 srcdev = fs_info->dev_replace.srcdev;
5571         else
5572                 srcdev = NULL;
5573
5574         /*
5575          * try to avoid the drive that is the source drive for a
5576          * dev-replace procedure, only choose it if no other non-missing
5577          * mirror is available
5578          */
5579         for (tolerance = 0; tolerance < 2; tolerance++) {
5580                 if (map->stripes[preferred_mirror].dev->bdev &&
5581                     (tolerance || map->stripes[preferred_mirror].dev != srcdev))
5582                         return preferred_mirror;
5583                 for (i = first; i < first + num_stripes; i++) {
5584                         if (map->stripes[i].dev->bdev &&
5585                             (tolerance || map->stripes[i].dev != srcdev))
5586                                 return i;
5587                 }
5588         }
5589
5590         /* we couldn't find one that doesn't fail.  Just return something
5591          * and the io error handling code will clean up eventually
5592          */
5593         return preferred_mirror;
5594 }
5595
5596 static inline int parity_smaller(u64 a, u64 b)
5597 {
5598         return a > b;
5599 }
5600
5601 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
5602 static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
5603 {
5604         struct btrfs_bio_stripe s;
5605         int i;
5606         u64 l;
5607         int again = 1;
5608
5609         while (again) {
5610                 again = 0;
5611                 for (i = 0; i < num_stripes - 1; i++) {
5612                         if (parity_smaller(bbio->raid_map[i],
5613                                            bbio->raid_map[i+1])) {
5614                                 s = bbio->stripes[i];
5615                                 l = bbio->raid_map[i];
5616                                 bbio->stripes[i] = bbio->stripes[i+1];
5617                                 bbio->raid_map[i] = bbio->raid_map[i+1];
5618                                 bbio->stripes[i+1] = s;
5619                                 bbio->raid_map[i+1] = l;
5620
5621                                 again = 1;
5622                         }
5623                 }
5624         }
5625 }
5626
5627 static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
5628 {
5629         struct btrfs_bio *bbio = kzalloc(
5630                  /* the size of the btrfs_bio */
5631                 sizeof(struct btrfs_bio) +
5632                 /* plus the variable array for the stripes */
5633                 sizeof(struct btrfs_bio_stripe) * (total_stripes) +
5634                 /* plus the variable array for the tgt dev */
5635                 sizeof(int) * (real_stripes) +
5636                 /*
5637                  * plus the raid_map, which includes both the tgt dev
5638                  * and the stripes
5639                  */
5640                 sizeof(u64) * (total_stripes),
5641                 GFP_NOFS|__GFP_NOFAIL);
5642
5643         atomic_set(&bbio->error, 0);
5644         refcount_set(&bbio->refs, 1);
5645
5646         return bbio;
5647 }
5648
5649 void btrfs_get_bbio(struct btrfs_bio *bbio)
5650 {
5651         WARN_ON(!refcount_read(&bbio->refs));
5652         refcount_inc(&bbio->refs);
5653 }
5654
5655 void btrfs_put_bbio(struct btrfs_bio *bbio)
5656 {
5657         if (!bbio)
5658                 return;
5659         if (refcount_dec_and_test(&bbio->refs))
5660                 kfree(bbio);
5661 }
5662
5663 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */
5664 /*
5665  * Please note that, discard won't be sent to target device of device
5666  * replace.
5667  */
5668 static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
5669                                          u64 logical, u64 length,
5670                                          struct btrfs_bio **bbio_ret)
5671 {
5672         struct extent_map *em;
5673         struct map_lookup *map;
5674         struct btrfs_bio *bbio;
5675         u64 offset;
5676         u64 stripe_nr;
5677         u64 stripe_nr_end;
5678         u64 stripe_end_offset;
5679         u64 stripe_cnt;
5680         u64 stripe_len;
5681         u64 stripe_offset;
5682         u64 num_stripes;
5683         u32 stripe_index;
5684         u32 factor = 0;
5685         u32 sub_stripes = 0;
5686         u64 stripes_per_dev = 0;
5687         u32 remaining_stripes = 0;
5688         u32 last_stripe = 0;
5689         int ret = 0;
5690         int i;
5691
5692         /* discard always return a bbio */
5693         ASSERT(bbio_ret);
5694
5695         em = btrfs_get_chunk_map(fs_info, logical, length);
5696         if (IS_ERR(em))
5697                 return PTR_ERR(em);
5698
5699         map = em->map_lookup;
5700         /* we don't discard raid56 yet */
5701         if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5702                 ret = -EOPNOTSUPP;
5703                 goto out;
5704         }
5705
5706         offset = logical - em->start;
5707         length = min_t(u64, em->len - offset, length);
5708
5709         stripe_len = map->stripe_len;
5710         /*
5711          * stripe_nr counts the total number of stripes we have to stride
5712          * to get to this block
5713          */
5714         stripe_nr = div64_u64(offset, stripe_len);
5715
5716         /* stripe_offset is the offset of this block in its stripe */
5717         stripe_offset = offset - stripe_nr * stripe_len;
5718
5719         stripe_nr_end = round_up(offset + length, map->stripe_len);
5720         stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len);
5721         stripe_cnt = stripe_nr_end - stripe_nr;
5722         stripe_end_offset = stripe_nr_end * map->stripe_len -
5723                             (offset + length);
5724         /*
5725          * after this, stripe_nr is the number of stripes on this
5726          * device we have to walk to find the data, and stripe_index is
5727          * the number of our device in the stripe array
5728          */
5729         num_stripes = 1;
5730         stripe_index = 0;
5731         if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5732                          BTRFS_BLOCK_GROUP_RAID10)) {
5733                 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5734                         sub_stripes = 1;
5735                 else
5736                         sub_stripes = map->sub_stripes;
5737
5738                 factor = map->num_stripes / sub_stripes;
5739                 num_stripes = min_t(u64, map->num_stripes,
5740                                     sub_stripes * stripe_cnt);
5741                 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
5742                 stripe_index *= sub_stripes;
5743                 stripes_per_dev = div_u64_rem(stripe_cnt, factor,
5744                                               &remaining_stripes);
5745                 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
5746                 last_stripe *= sub_stripes;
5747         } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
5748                                 BTRFS_BLOCK_GROUP_DUP)) {
5749                 num_stripes = map->num_stripes;
5750         } else {
5751                 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5752                                         &stripe_index);
5753         }
5754
5755         bbio = alloc_btrfs_bio(num_stripes, 0);
5756         if (!bbio) {
5757                 ret = -ENOMEM;
5758                 goto out;
5759         }
5760
5761         for (i = 0; i < num_stripes; i++) {
5762                 bbio->stripes[i].physical =
5763                         map->stripes[stripe_index].physical +
5764                         stripe_offset + stripe_nr * map->stripe_len;
5765                 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
5766
5767                 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5768                                  BTRFS_BLOCK_GROUP_RAID10)) {
5769                         bbio->stripes[i].length = stripes_per_dev *
5770                                 map->stripe_len;
5771
5772                         if (i / sub_stripes < remaining_stripes)
5773                                 bbio->stripes[i].length +=
5774                                         map->stripe_len;
5775
5776                         /*
5777                          * Special for the first stripe and
5778                          * the last stripe:
5779                          *
5780                          * |-------|...|-------|
5781                          *     |----------|
5782                          *    off     end_off
5783                          */
5784                         if (i < sub_stripes)
5785                                 bbio->stripes[i].length -=
5786                                         stripe_offset;
5787
5788                         if (stripe_index >= last_stripe &&
5789                             stripe_index <= (last_stripe +
5790                                              sub_stripes - 1))
5791                                 bbio->stripes[i].length -=
5792                                         stripe_end_offset;
5793
5794                         if (i == sub_stripes - 1)
5795                                 stripe_offset = 0;
5796                 } else {
5797                         bbio->stripes[i].length = length;
5798                 }
5799
5800                 stripe_index++;
5801                 if (stripe_index == map->num_stripes) {
5802                         stripe_index = 0;
5803                         stripe_nr++;
5804                 }
5805         }
5806
5807         *bbio_ret = bbio;
5808         bbio->map_type = map->type;
5809         bbio->num_stripes = num_stripes;
5810 out:
5811         free_extent_map(em);
5812         return ret;
5813 }
5814
5815 /*
5816  * In dev-replace case, for repair case (that's the only case where the mirror
5817  * is selected explicitly when calling btrfs_map_block), blocks left of the
5818  * left cursor can also be read from the target drive.
5819  *
5820  * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the
5821  * array of stripes.
5822  * For READ, it also needs to be supported using the same mirror number.
5823  *
5824  * If the requested block is not left of the left cursor, EIO is returned. This
5825  * can happen because btrfs_num_copies() returns one more in the dev-replace
5826  * case.
5827  */
5828 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
5829                                          u64 logical, u64 length,
5830                                          u64 srcdev_devid, int *mirror_num,
5831                                          u64 *physical)
5832 {
5833         struct btrfs_bio *bbio = NULL;
5834         int num_stripes;
5835         int index_srcdev = 0;
5836         int found = 0;
5837         u64 physical_of_found = 0;
5838         int i;
5839         int ret = 0;
5840
5841         ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
5842                                 logical, &length, &bbio, 0, 0);
5843         if (ret) {
5844                 ASSERT(bbio == NULL);
5845                 return ret;
5846         }
5847
5848         num_stripes = bbio->num_stripes;
5849         if (*mirror_num > num_stripes) {
5850                 /*
5851                  * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror,
5852                  * that means that the requested area is not left of the left
5853                  * cursor
5854                  */
5855                 btrfs_put_bbio(bbio);
5856                 return -EIO;
5857         }
5858
5859         /*
5860          * process the rest of the function using the mirror_num of the source
5861          * drive. Therefore look it up first.  At the end, patch the device
5862          * pointer to the one of the target drive.
5863          */
5864         for (i = 0; i < num_stripes; i++) {
5865                 if (bbio->stripes[i].dev->devid != srcdev_devid)
5866                         continue;
5867
5868                 /*
5869                  * In case of DUP, in order to keep it simple, only add the
5870                  * mirror with the lowest physical address
5871                  */
5872                 if (found &&
5873                     physical_of_found <= bbio->stripes[i].physical)
5874                         continue;
5875
5876                 index_srcdev = i;
5877                 found = 1;
5878                 physical_of_found = bbio->stripes[i].physical;
5879         }
5880
5881         btrfs_put_bbio(bbio);
5882
5883         ASSERT(found);
5884         if (!found)
5885                 return -EIO;
5886
5887         *mirror_num = index_srcdev + 1;
5888         *physical = physical_of_found;
5889         return ret;
5890 }
5891
5892 static void handle_ops_on_dev_replace(enum btrfs_map_op op,
5893                                       struct btrfs_bio **bbio_ret,
5894                                       struct btrfs_dev_replace *dev_replace,
5895                                       int *num_stripes_ret, int *max_errors_ret)
5896 {
5897         struct btrfs_bio *bbio = *bbio_ret;
5898         u64 srcdev_devid = dev_replace->srcdev->devid;
5899         int tgtdev_indexes = 0;
5900         int num_stripes = *num_stripes_ret;
5901         int max_errors = *max_errors_ret;
5902         int i;
5903
5904         if (op == BTRFS_MAP_WRITE) {
5905                 int index_where_to_add;
5906
5907                 /*
5908                  * duplicate the write operations while the dev replace
5909                  * procedure is running. Since the copying of the old disk to
5910                  * the new disk takes place at run time while the filesystem is
5911                  * mounted writable, the regular write operations to the old
5912                  * disk have to be duplicated to go to the new disk as well.
5913                  *
5914                  * Note that device->missing is handled by the caller, and that
5915                  * the write to the old disk is already set up in the stripes
5916                  * array.
5917                  */
5918                 index_where_to_add = num_stripes;
5919                 for (i = 0; i < num_stripes; i++) {
5920                         if (bbio->stripes[i].dev->devid == srcdev_devid) {
5921                                 /* write to new disk, too */
5922                                 struct btrfs_bio_stripe *new =
5923                                         bbio->stripes + index_where_to_add;
5924                                 struct btrfs_bio_stripe *old =
5925                                         bbio->stripes + i;
5926
5927                                 new->physical = old->physical;
5928                                 new->length = old->length;
5929                                 new->dev = dev_replace->tgtdev;
5930                                 bbio->tgtdev_map[i] = index_where_to_add;
5931                                 index_where_to_add++;
5932                                 max_errors++;
5933                                 tgtdev_indexes++;
5934                         }
5935                 }
5936                 num_stripes = index_where_to_add;
5937         } else if (op == BTRFS_MAP_GET_READ_MIRRORS) {
5938                 int index_srcdev = 0;
5939                 int found = 0;
5940                 u64 physical_of_found = 0;
5941
5942                 /*
5943                  * During the dev-replace procedure, the target drive can also
5944                  * be used to read data in case it is needed to repair a corrupt
5945                  * block elsewhere. This is possible if the requested area is
5946                  * left of the left cursor. In this area, the target drive is a
5947                  * full copy of the source drive.
5948                  */
5949                 for (i = 0; i < num_stripes; i++) {
5950                         if (bbio->stripes[i].dev->devid == srcdev_devid) {
5951                                 /*
5952                                  * In case of DUP, in order to keep it simple,
5953                                  * only add the mirror with the lowest physical
5954                                  * address
5955                                  */
5956                                 if (found &&
5957                                     physical_of_found <=
5958                                      bbio->stripes[i].physical)
5959                                         continue;
5960                                 index_srcdev = i;
5961                                 found = 1;
5962                                 physical_of_found = bbio->stripes[i].physical;
5963                         }
5964                 }
5965                 if (found) {
5966                         struct btrfs_bio_stripe *tgtdev_stripe =
5967                                 bbio->stripes + num_stripes;
5968
5969                         tgtdev_stripe->physical = physical_of_found;
5970                         tgtdev_stripe->length =
5971                                 bbio->stripes[index_srcdev].length;
5972                         tgtdev_stripe->dev = dev_replace->tgtdev;
5973                         bbio->tgtdev_map[index_srcdev] = num_stripes;
5974
5975                         tgtdev_indexes++;
5976                         num_stripes++;
5977                 }
5978         }
5979
5980         *num_stripes_ret = num_stripes;
5981         *max_errors_ret = max_errors;
5982         bbio->num_tgtdevs = tgtdev_indexes;
5983         *bbio_ret = bbio;
5984 }
5985
5986 static bool need_full_stripe(enum btrfs_map_op op)
5987 {
5988         return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS);
5989 }
5990
5991 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
5992                              enum btrfs_map_op op,
5993                              u64 logical, u64 *length,
5994                              struct btrfs_bio **bbio_ret,
5995                              int mirror_num, int need_raid_map)
5996 {
5997         struct extent_map *em;
5998         struct map_lookup *map;
5999         u64 offset;
6000         u64 stripe_offset;
6001         u64 stripe_nr;
6002         u64 stripe_len;
6003         u32 stripe_index;
6004         int i;
6005         int ret = 0;
6006         int num_stripes;
6007         int max_errors = 0;
6008         int tgtdev_indexes = 0;
6009         struct btrfs_bio *bbio = NULL;
6010         struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
6011         int dev_replace_is_ongoing = 0;
6012         int num_alloc_stripes;
6013         int patch_the_first_stripe_for_dev_replace = 0;
6014         u64 physical_to_patch_in_first_stripe = 0;
6015         u64 raid56_full_stripe_start = (u64)-1;
6016
6017         if (op == BTRFS_MAP_DISCARD)
6018                 return __btrfs_map_block_for_discard(fs_info, logical,
6019                                                      *length, bbio_ret);
6020
6021         em = btrfs_get_chunk_map(fs_info, logical, *length);
6022         if (IS_ERR(em))
6023                 return PTR_ERR(em);
6024
6025         map = em->map_lookup;
6026         offset = logical - em->start;
6027
6028         stripe_len = map->stripe_len;
6029         stripe_nr = offset;
6030         /*
6031          * stripe_nr counts the total number of stripes we have to stride
6032          * to get to this block
6033          */
6034         stripe_nr = div64_u64(stripe_nr, stripe_len);
6035
6036         stripe_offset = stripe_nr * stripe_len;
6037         if (offset < stripe_offset) {
6038                 btrfs_crit(fs_info,
6039                            "stripe math has gone wrong, stripe_offset=%llu, offset=%llu, start=%llu, logical=%llu, stripe_len=%llu",
6040                            stripe_offset, offset, em->start, logical,
6041                            stripe_len);
6042                 free_extent_map(em);
6043                 return -EINVAL;
6044         }
6045
6046         /* stripe_offset is the offset of this block in its stripe*/
6047         stripe_offset = offset - stripe_offset;
6048
6049         /* if we're here for raid56, we need to know the stripe aligned start */
6050         if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6051                 unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
6052                 raid56_full_stripe_start = offset;
6053
6054                 /* allow a write of a full stripe, but make sure we don't
6055                  * allow straddling of stripes
6056                  */
6057                 raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
6058                                 full_stripe_len);
6059                 raid56_full_stripe_start *= full_stripe_len;
6060         }
6061
6062         if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
6063                 u64 max_len;
6064                 /* For writes to RAID[56], allow a full stripeset across all disks.
6065                    For other RAID types and for RAID[56] reads, just allow a single
6066                    stripe (on a single disk). */
6067                 if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
6068                     (op == BTRFS_MAP_WRITE)) {
6069                         max_len = stripe_len * nr_data_stripes(map) -
6070                                 (offset - raid56_full_stripe_start);
6071                 } else {
6072                         /* we limit the length of each bio to what fits in a stripe */
6073                         max_len = stripe_len - stripe_offset;
6074                 }
6075                 *length = min_t(u64, em->len - offset, max_len);
6076         } else {
6077                 *length = em->len - offset;
6078         }
6079
6080         /*
6081          * This is for when we're called from btrfs_bio_fits_in_stripe and all
6082          * it cares about is the length
6083          */
6084         if (!bbio_ret)
6085                 goto out;
6086
6087         down_read(&dev_replace->rwsem);
6088         dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
6089         /*
6090          * Hold the semaphore for read during the whole operation, write is
6091          * requested at commit time but must wait.
6092          */
6093         if (!dev_replace_is_ongoing)
6094                 up_read(&dev_replace->rwsem);
6095
6096         if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
6097             !need_full_stripe(op) && dev_replace->tgtdev != NULL) {
6098                 ret = get_extra_mirror_from_replace(fs_info, logical, *length,
6099                                                     dev_replace->srcdev->devid,
6100                                                     &mirror_num,
6101                                             &physical_to_patch_in_first_stripe);
6102                 if (ret)
6103                         goto out;
6104                 else
6105                         patch_the_first_stripe_for_dev_replace = 1;
6106         } else if (mirror_num > map->num_stripes) {
6107                 mirror_num = 0;
6108         }
6109
6110         num_stripes = 1;
6111         stripe_index = 0;
6112         if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
6113                 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6114                                 &stripe_index);
6115                 if (!need_full_stripe(op))
6116                         mirror_num = 1;
6117         } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
6118                 if (need_full_stripe(op))
6119                         num_stripes = map->num_stripes;
6120                 else if (mirror_num)
6121                         stripe_index = mirror_num - 1;
6122                 else {
6123                         stripe_index = find_live_mirror(fs_info, map, 0,
6124                                             dev_replace_is_ongoing);
6125                         mirror_num = stripe_index + 1;
6126                 }
6127
6128         } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
6129                 if (need_full_stripe(op)) {
6130                         num_stripes = map->num_stripes;
6131                 } else if (mirror_num) {
6132                         stripe_index = mirror_num - 1;
6133                 } else {
6134                         mirror_num = 1;
6135                 }
6136
6137         } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
6138                 u32 factor = map->num_stripes / map->sub_stripes;
6139
6140                 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
6141                 stripe_index *= map->sub_stripes;
6142
6143                 if (need_full_stripe(op))
6144                         num_stripes = map->sub_stripes;
6145                 else if (mirror_num)
6146                         stripe_index += mirror_num - 1;
6147                 else {
6148                         int old_stripe_index = stripe_index;
6149                         stripe_index = find_live_mirror(fs_info, map,
6150                                               stripe_index,
6151                                               dev_replace_is_ongoing);
6152                         mirror_num = stripe_index - old_stripe_index + 1;
6153                 }
6154
6155         } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6156                 if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) {
6157                         /* push stripe_nr back to the start of the full stripe */
6158                         stripe_nr = div64_u64(raid56_full_stripe_start,
6159                                         stripe_len * nr_data_stripes(map));
6160
6161                         /* RAID[56] write or recovery. Return all stripes */
6162                         num_stripes = map->num_stripes;
6163                         max_errors = nr_parity_stripes(map);
6164
6165                         *length = map->stripe_len;
6166                         stripe_index = 0;
6167                         stripe_offset = 0;
6168                 } else {
6169                         /*
6170                          * Mirror #0 or #1 means the original data block.
6171                          * Mirror #2 is RAID5 parity block.
6172                          * Mirror #3 is RAID6 Q block.
6173                          */
6174                         stripe_nr = div_u64_rem(stripe_nr,
6175                                         nr_data_stripes(map), &stripe_index);
6176                         if (mirror_num > 1)
6177                                 stripe_index = nr_data_stripes(map) +
6178                                                 mirror_num - 2;
6179
6180                         /* We distribute the parity blocks across stripes */
6181                         div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
6182                                         &stripe_index);
6183                         if (!need_full_stripe(op) && mirror_num <= 1)
6184                                 mirror_num = 1;
6185                 }
6186         } else {
6187                 /*
6188                  * after this, stripe_nr is the number of stripes on this
6189                  * device we have to walk to find the data, and stripe_index is
6190                  * the number of our device in the stripe array
6191                  */
6192                 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6193                                 &stripe_index);
6194                 mirror_num = stripe_index + 1;
6195         }
6196         if (stripe_index >= map->num_stripes) {
6197                 btrfs_crit(fs_info,
6198                            "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
6199                            stripe_index, map->num_stripes);
6200                 ret = -EINVAL;
6201                 goto out;
6202         }
6203
6204         num_alloc_stripes = num_stripes;
6205         if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) {
6206                 if (op == BTRFS_MAP_WRITE)
6207                         num_alloc_stripes <<= 1;
6208                 if (op == BTRFS_MAP_GET_READ_MIRRORS)
6209                         num_alloc_stripes++;
6210                 tgtdev_indexes = num_stripes;
6211         }
6212
6213         bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
6214         if (!bbio) {
6215                 ret = -ENOMEM;
6216                 goto out;
6217         }
6218         if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
6219                 bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes);
6220
6221         /* build raid_map */
6222         if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map &&
6223             (need_full_stripe(op) || mirror_num > 1)) {
6224                 u64 tmp;
6225                 unsigned rot;
6226
6227                 bbio->raid_map = (u64 *)((void *)bbio->stripes +
6228                                  sizeof(struct btrfs_bio_stripe) *
6229                                  num_alloc_stripes +
6230                                  sizeof(int) * tgtdev_indexes);
6231
6232                 /* Work out the disk rotation on this stripe-set */
6233                 div_u64_rem(stripe_nr, num_stripes, &rot);
6234
6235                 /* Fill in the logical address of each stripe */
6236                 tmp = stripe_nr * nr_data_stripes(map);
6237                 for (i = 0; i < nr_data_stripes(map); i++)
6238                         bbio->raid_map[(i+rot) % num_stripes] =
6239                                 em->start + (tmp + i) * map->stripe_len;
6240
6241                 bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
6242                 if (map->type & BTRFS_BLOCK_GROUP_RAID6)
6243                         bbio->raid_map[(i+rot+1) % num_stripes] =
6244                                 RAID6_Q_STRIPE;
6245         }
6246
6247
6248         for (i = 0; i < num_stripes; i++) {
6249                 bbio->stripes[i].physical =
6250                         map->stripes[stripe_index].physical +
6251                         stripe_offset +
6252                         stripe_nr * map->stripe_len;
6253                 bbio->stripes[i].dev =
6254                         map->stripes[stripe_index].dev;
6255                 stripe_index++;
6256         }
6257
6258         if (need_full_stripe(op))
6259                 max_errors = btrfs_chunk_max_errors(map);
6260
6261         if (bbio->raid_map)
6262                 sort_parity_stripes(bbio, num_stripes);
6263
6264         if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
6265             need_full_stripe(op)) {
6266                 handle_ops_on_dev_replace(op, &bbio, dev_replace, &num_stripes,
6267                                           &max_errors);
6268         }
6269
6270         *bbio_ret = bbio;
6271         bbio->map_type = map->type;
6272         bbio->num_stripes = num_stripes;
6273         bbio->max_errors = max_errors;
6274         bbio->mirror_num = mirror_num;
6275
6276         /*
6277          * this is the case that REQ_READ && dev_replace_is_ongoing &&
6278          * mirror_num == num_stripes + 1 && dev_replace target drive is
6279          * available as a mirror
6280          */
6281         if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
6282                 WARN_ON(num_stripes > 1);
6283                 bbio->stripes[0].dev = dev_replace->tgtdev;
6284                 bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
6285                 bbio->mirror_num = map->num_stripes + 1;
6286         }
6287 out:
6288         if (dev_replace_is_ongoing) {
6289                 lockdep_assert_held(&dev_replace->rwsem);
6290                 /* Unlock and let waiting writers proceed */
6291                 up_read(&dev_replace->rwsem);
6292         }
6293         free_extent_map(em);
6294         return ret;
6295 }
6296
6297 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6298                       u64 logical, u64 *length,
6299                       struct btrfs_bio **bbio_ret, int mirror_num)
6300 {
6301         return __btrfs_map_block(fs_info, op, logical, length, bbio_ret,
6302                                  mirror_num, 0);
6303 }
6304
6305 /* For Scrub/replace */
6306 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6307                      u64 logical, u64 *length,
6308                      struct btrfs_bio **bbio_ret)
6309 {
6310         return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 0, 1);
6311 }
6312
6313 int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
6314                      u64 physical, u64 **logical, int *naddrs, int *stripe_len)
6315 {
6316         struct extent_map *em;
6317         struct map_lookup *map;
6318         u64 *buf;
6319         u64 bytenr;
6320         u64 length;
6321         u64 stripe_nr;
6322         u64 rmap_len;
6323         int i, j, nr = 0;
6324
6325         em = btrfs_get_chunk_map(fs_info, chunk_start, 1);
6326         if (IS_ERR(em))
6327                 return -EIO;
6328
6329         map = em->map_lookup;
6330         length = em->len;
6331         rmap_len = map->stripe_len;
6332
6333         if (map->type & BTRFS_BLOCK_GROUP_RAID10)
6334                 length = div_u64(length, map->num_stripes / map->sub_stripes);
6335         else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
6336                 length = div_u64(length, map->num_stripes);
6337         else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6338                 length = div_u64(length, nr_data_stripes(map));
6339                 rmap_len = map->stripe_len * nr_data_stripes(map);
6340         }
6341
6342         buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
6343         BUG_ON(!buf); /* -ENOMEM */
6344
6345         for (i = 0; i < map->num_stripes; i++) {
6346                 if (map->stripes[i].physical > physical ||
6347                     map->stripes[i].physical + length <= physical)
6348                         continue;
6349
6350                 stripe_nr = physical - map->stripes[i].physical;
6351                 stripe_nr = div64_u64(stripe_nr, map->stripe_len);
6352
6353                 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
6354                         stripe_nr = stripe_nr * map->num_stripes + i;
6355                         stripe_nr = div_u64(stripe_nr, map->sub_stripes);
6356                 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
6357                         stripe_nr = stripe_nr * map->num_stripes + i;
6358                 } /* else if RAID[56], multiply by nr_data_stripes().
6359                    * Alternatively, just use rmap_len below instead of
6360                    * map->stripe_len */
6361
6362                 bytenr = chunk_start + stripe_nr * rmap_len;
6363                 WARN_ON(nr >= map->num_stripes);
6364                 for (j = 0; j < nr; j++) {
6365                         if (buf[j] == bytenr)
6366                                 break;
6367                 }
6368                 if (j == nr) {
6369                         WARN_ON(nr >= map->num_stripes);
6370                         buf[nr++] = bytenr;
6371                 }
6372         }
6373
6374         *logical = buf;
6375         *naddrs = nr;
6376         *stripe_len = rmap_len;
6377
6378         free_extent_map(em);
6379         return 0;
6380 }
6381
6382 static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
6383 {
6384         bio->bi_private = bbio->private;
6385         bio->bi_end_io = bbio->end_io;
6386         bio_endio(bio);
6387
6388         btrfs_put_bbio(bbio);
6389 }
6390
6391 static void btrfs_end_bio(struct bio *bio)
6392 {
6393         struct btrfs_bio *bbio = bio->bi_private;
6394         int is_orig_bio = 0;
6395
6396         if (bio->bi_status) {
6397                 atomic_inc(&bbio->error);
6398                 if (bio->bi_status == BLK_STS_IOERR ||
6399                     bio->bi_status == BLK_STS_TARGET) {
6400                         unsigned int stripe_index =
6401                                 btrfs_io_bio(bio)->stripe_index;
6402                         struct btrfs_device *dev;
6403
6404                         BUG_ON(stripe_index >= bbio->num_stripes);
6405                         dev = bbio->stripes[stripe_index].dev;
6406                         if (dev->bdev) {
6407                                 if (bio_op(bio) == REQ_OP_WRITE)
6408                                         btrfs_dev_stat_inc_and_print(dev,
6409                                                 BTRFS_DEV_STAT_WRITE_ERRS);
6410                                 else if (!(bio->bi_opf & REQ_RAHEAD))
6411                                         btrfs_dev_stat_inc_and_print(dev,
6412                                                 BTRFS_DEV_STAT_READ_ERRS);
6413                                 if (bio->bi_opf & REQ_PREFLUSH)
6414                                         btrfs_dev_stat_inc_and_print(dev,
6415                                                 BTRFS_DEV_STAT_FLUSH_ERRS);
6416                         }
6417                 }
6418         }
6419
6420         if (bio == bbio->orig_bio)
6421                 is_orig_bio = 1;
6422
6423         btrfs_bio_counter_dec(bbio->fs_info);
6424
6425         if (atomic_dec_and_test(&bbio->stripes_pending)) {
6426                 if (!is_orig_bio) {
6427                         bio_put(bio);
6428                         bio = bbio->orig_bio;
6429                 }
6430
6431                 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6432                 /* only send an error to the higher layers if it is
6433                  * beyond the tolerance of the btrfs bio
6434                  */
6435                 if (atomic_read(&bbio->error) > bbio->max_errors) {
6436                         bio->bi_status = BLK_STS_IOERR;
6437                 } else {
6438                         /*
6439                          * this bio is actually up to date, we didn't
6440                          * go over the max number of errors
6441                          */
6442                         bio->bi_status = BLK_STS_OK;
6443                 }
6444
6445                 btrfs_end_bbio(bbio, bio);
6446         } else if (!is_orig_bio) {
6447                 bio_put(bio);
6448         }
6449 }
6450
6451 /*
6452  * see run_scheduled_bios for a description of why bios are collected for
6453  * async submit.
6454  *
6455  * This will add one bio to the pending list for a device and make sure
6456  * the work struct is scheduled.
6457  */
6458 static noinline void btrfs_schedule_bio(struct btrfs_device *device,
6459                                         struct bio *bio)
6460 {
6461         struct btrfs_fs_info *fs_info = device->fs_info;
6462         int should_queue = 1;
6463         struct btrfs_pending_bios *pending_bios;
6464
6465         /* don't bother with additional async steps for reads, right now */
6466         if (bio_op(bio) == REQ_OP_READ) {
6467                 btrfsic_submit_bio(bio);
6468                 return;
6469         }
6470
6471         WARN_ON(bio->bi_next);
6472         bio->bi_next = NULL;
6473
6474         spin_lock(&device->io_lock);
6475         if (op_is_sync(bio->bi_opf))
6476                 pending_bios = &device->pending_sync_bios;
6477         else
6478                 pending_bios = &device->pending_bios;
6479
6480         if (pending_bios->tail)
6481                 pending_bios->tail->bi_next = bio;
6482
6483         pending_bios->tail = bio;
6484         if (!pending_bios->head)
6485                 pending_bios->head = bio;
6486         if (device->running_pending)
6487                 should_queue = 0;
6488
6489         spin_unlock(&device->io_lock);
6490
6491         if (should_queue)
6492                 btrfs_queue_work(fs_info->submit_workers, &device->work);
6493 }
6494
6495 static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
6496                               u64 physical, int dev_nr, int async)
6497 {
6498         struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
6499         struct btrfs_fs_info *fs_info = bbio->fs_info;
6500
6501         bio->bi_private = bbio;
6502         btrfs_io_bio(bio)->stripe_index = dev_nr;
6503         bio->bi_end_io = btrfs_end_bio;
6504         bio->bi_iter.bi_sector = physical >> 9;
6505         btrfs_debug_in_rcu(fs_info,
6506         "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
6507                 bio_op(bio), bio->bi_opf, (u64)bio->bi_iter.bi_sector,
6508                 (u_long)dev->bdev->bd_dev, rcu_str_deref(dev->name), dev->devid,
6509                 bio->bi_iter.bi_size);
6510         bio_set_dev(bio, dev->bdev);
6511
6512         btrfs_bio_counter_inc_noblocked(fs_info);
6513
6514         if (async)
6515                 btrfs_schedule_bio(dev, bio);
6516         else
6517                 btrfsic_submit_bio(bio);
6518 }
6519
6520 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
6521 {
6522         atomic_inc(&bbio->error);
6523         if (atomic_dec_and_test(&bbio->stripes_pending)) {
6524                 /* Should be the original bio. */
6525                 WARN_ON(bio != bbio->orig_bio);
6526
6527                 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6528                 bio->bi_iter.bi_sector = logical >> 9;
6529                 if (atomic_read(&bbio->error) > bbio->max_errors)
6530                         bio->bi_status = BLK_STS_IOERR;
6531                 else
6532                         bio->bi_status = BLK_STS_OK;
6533                 btrfs_end_bbio(bbio, bio);
6534         }
6535 }
6536
6537 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
6538                            int mirror_num, int async_submit)
6539 {
6540         struct btrfs_device *dev;
6541         struct bio *first_bio = bio;
6542         u64 logical = (u64)bio->bi_iter.bi_sector << 9;
6543         u64 length = 0;
6544         u64 map_length;
6545         int ret;
6546         int dev_nr;
6547         int total_devs;
6548         struct btrfs_bio *bbio = NULL;
6549
6550         length = bio->bi_iter.bi_size;
6551         map_length = length;
6552
6553         btrfs_bio_counter_inc_blocked(fs_info);
6554         ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical,
6555                                 &map_length, &bbio, mirror_num, 1);
6556         if (ret) {
6557                 btrfs_bio_counter_dec(fs_info);
6558                 return errno_to_blk_status(ret);
6559         }
6560
6561         total_devs = bbio->num_stripes;
6562         bbio->orig_bio = first_bio;
6563         bbio->private = first_bio->bi_private;
6564         bbio->end_io = first_bio->bi_end_io;
6565         bbio->fs_info = fs_info;
6566         atomic_set(&bbio->stripes_pending, bbio->num_stripes);
6567
6568         if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
6569             ((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) {
6570                 /* In this case, map_length has been set to the length of
6571                    a single stripe; not the whole write */
6572                 if (bio_op(bio) == REQ_OP_WRITE) {
6573                         ret = raid56_parity_write(fs_info, bio, bbio,
6574                                                   map_length);
6575                 } else {
6576                         ret = raid56_parity_recover(fs_info, bio, bbio,
6577                                                     map_length, mirror_num, 1);
6578                 }
6579
6580                 btrfs_bio_counter_dec(fs_info);
6581                 return errno_to_blk_status(ret);
6582         }
6583
6584         if (map_length < length) {
6585                 btrfs_crit(fs_info,
6586                            "mapping failed logical %llu bio len %llu len %llu",
6587                            logical, length, map_length);
6588                 BUG();
6589         }
6590
6591         for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6592                 dev = bbio->stripes[dev_nr].dev;
6593                 if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING,
6594                                                    &dev->dev_state) ||
6595                     (bio_op(first_bio) == REQ_OP_WRITE &&
6596                     !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
6597                         bbio_error(bbio, first_bio, logical);
6598                         continue;
6599                 }
6600
6601                 if (dev_nr < total_devs - 1)
6602                         bio = btrfs_bio_clone(first_bio);
6603                 else
6604                         bio = first_bio;
6605
6606                 submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical,
6607                                   dev_nr, async_submit);
6608         }
6609         btrfs_bio_counter_dec(fs_info);
6610         return BLK_STS_OK;
6611 }
6612
6613 /*
6614  * Find a device specified by @devid or @uuid in the list of @fs_devices, or
6615  * return NULL.
6616  *
6617  * If devid and uuid are both specified, the match must be exact, otherwise
6618  * only devid is used.
6619  *
6620  * If @seed is true, traverse through the seed devices.
6621  */
6622 struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices,
6623                                        u64 devid, u8 *uuid, u8 *fsid,
6624                                        bool seed)
6625 {
6626         struct btrfs_device *device;
6627
6628         while (fs_devices) {
6629                 if (!fsid ||
6630                     !memcmp(fs_devices->metadata_uuid, fsid, BTRFS_FSID_SIZE)) {
6631                         list_for_each_entry(device, &fs_devices->devices,
6632                                             dev_list) {
6633                                 if (device->devid == devid &&
6634                                     (!uuid || memcmp(device->uuid, uuid,
6635                                                      BTRFS_UUID_SIZE) == 0))
6636                                         return device;
6637                         }
6638                 }
6639                 if (seed)
6640                         fs_devices = fs_devices->seed;
6641                 else
6642                         return NULL;
6643         }
6644         return NULL;
6645 }
6646
6647 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
6648                                             u64 devid, u8 *dev_uuid)
6649 {
6650         struct btrfs_device *device;
6651
6652         device = btrfs_alloc_device(NULL, &devid, dev_uuid);
6653         if (IS_ERR(device))
6654                 return device;
6655
6656         list_add(&device->dev_list, &fs_devices->devices);
6657         device->fs_devices = fs_devices;
6658         fs_devices->num_devices++;
6659
6660         set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6661         fs_devices->missing_devices++;
6662
6663         return device;
6664 }
6665
6666 /**
6667  * btrfs_alloc_device - allocate struct btrfs_device
6668  * @fs_info:    used only for generating a new devid, can be NULL if
6669  *              devid is provided (i.e. @devid != NULL).
6670  * @devid:      a pointer to devid for this device.  If NULL a new devid
6671  *              is generated.
6672  * @uuid:       a pointer to UUID for this device.  If NULL a new UUID
6673  *              is generated.
6674  *
6675  * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6676  * on error.  Returned struct is not linked onto any lists and must be
6677  * destroyed with btrfs_free_device.
6678  */
6679 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
6680                                         const u64 *devid,
6681                                         const u8 *uuid)
6682 {
6683         struct btrfs_device *dev;
6684         u64 tmp;
6685
6686         if (WARN_ON(!devid && !fs_info))
6687                 return ERR_PTR(-EINVAL);
6688
6689         dev = __alloc_device();
6690         if (IS_ERR(dev))
6691                 return dev;
6692
6693         if (devid)
6694                 tmp = *devid;
6695         else {
6696                 int ret;
6697
6698                 ret = find_next_devid(fs_info, &tmp);
6699                 if (ret) {
6700                         btrfs_free_device(dev);
6701                         return ERR_PTR(ret);
6702                 }
6703         }
6704         dev->devid = tmp;
6705
6706         if (uuid)
6707                 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
6708         else
6709                 generate_random_uuid(dev->uuid);
6710
6711         btrfs_init_work(&dev->work, btrfs_submit_helper,
6712                         pending_bios_fn, NULL, NULL);
6713
6714         return dev;
6715 }
6716
6717 /* Return -EIO if any error, otherwise return 0. */
6718 static int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info,
6719                                    struct extent_buffer *leaf,
6720                                    struct btrfs_chunk *chunk, u64 logical)
6721 {
6722         u64 length;
6723         u64 stripe_len;
6724         u16 num_stripes;
6725         u16 sub_stripes;
6726         u64 type;
6727         u64 features;
6728         bool mixed = false;
6729
6730         length = btrfs_chunk_length(leaf, chunk);
6731         stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6732         num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6733         sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6734         type = btrfs_chunk_type(leaf, chunk);
6735
6736         if (!num_stripes) {
6737                 btrfs_err(fs_info, "invalid chunk num_stripes: %u",
6738                           num_stripes);
6739                 return -EIO;
6740         }
6741         if (!IS_ALIGNED(logical, fs_info->sectorsize)) {
6742                 btrfs_err(fs_info, "invalid chunk logical %llu", logical);
6743                 return -EIO;
6744         }
6745         if (btrfs_chunk_sector_size(leaf, chunk) != fs_info->sectorsize) {
6746                 btrfs_err(fs_info, "invalid chunk sectorsize %u",
6747                           btrfs_chunk_sector_size(leaf, chunk));
6748                 return -EIO;
6749         }
6750         if (!length || !IS_ALIGNED(length, fs_info->sectorsize)) {
6751                 btrfs_err(fs_info, "invalid chunk length %llu", length);
6752                 return -EIO;
6753         }
6754         if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) {
6755                 btrfs_err(fs_info, "invalid chunk stripe length: %llu",
6756                           stripe_len);
6757                 return -EIO;
6758         }
6759         if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
6760             type) {
6761                 btrfs_err(fs_info, "unrecognized chunk type: %llu",
6762                           ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
6763                             BTRFS_BLOCK_GROUP_PROFILE_MASK) &
6764                           btrfs_chunk_type(leaf, chunk));
6765                 return -EIO;
6766         }
6767
6768         if ((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0) {
6769                 btrfs_err(fs_info, "missing chunk type flag: 0x%llx", type);
6770                 return -EIO;
6771         }
6772
6773         if ((type & BTRFS_BLOCK_GROUP_SYSTEM) &&
6774             (type & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA))) {
6775                 btrfs_err(fs_info,
6776                         "system chunk with data or metadata type: 0x%llx", type);
6777                 return -EIO;
6778         }
6779
6780         features = btrfs_super_incompat_flags(fs_info->super_copy);
6781         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
6782                 mixed = true;
6783
6784         if (!mixed) {
6785                 if ((type & BTRFS_BLOCK_GROUP_METADATA) &&
6786                     (type & BTRFS_BLOCK_GROUP_DATA)) {
6787                         btrfs_err(fs_info,
6788                         "mixed chunk type in non-mixed mode: 0x%llx", type);
6789                         return -EIO;
6790                 }
6791         }
6792
6793         if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
6794             (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes != 2) ||
6795             (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
6796             (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
6797             (type & BTRFS_BLOCK_GROUP_DUP && num_stripes != 2) ||
6798             ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
6799              num_stripes != 1)) {
6800                 btrfs_err(fs_info,
6801                         "invalid num_stripes:sub_stripes %u:%u for profile %llu",
6802                         num_stripes, sub_stripes,
6803                         type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
6804                 return -EIO;
6805         }
6806
6807         return 0;
6808 }
6809
6810 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
6811                                         u64 devid, u8 *uuid, bool error)
6812 {
6813         if (error)
6814                 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing",
6815                               devid, uuid);
6816         else
6817                 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing",
6818                               devid, uuid);
6819 }
6820
6821 static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
6822                           struct extent_buffer *leaf,
6823                           struct btrfs_chunk *chunk)
6824 {
6825         struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
6826         struct map_lookup *map;
6827         struct extent_map *em;
6828         u64 logical;
6829         u64 length;
6830         u64 devid;
6831         u8 uuid[BTRFS_UUID_SIZE];
6832         int num_stripes;
6833         int ret;
6834         int i;
6835
6836         logical = key->offset;
6837         length = btrfs_chunk_length(leaf, chunk);
6838         num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6839
6840         ret = btrfs_check_chunk_valid(fs_info, leaf, chunk, logical);
6841         if (ret)
6842                 return ret;
6843
6844         read_lock(&map_tree->map_tree.lock);
6845         em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
6846         read_unlock(&map_tree->map_tree.lock);
6847
6848         /* already mapped? */
6849         if (em && em->start <= logical && em->start + em->len > logical) {
6850                 free_extent_map(em);
6851                 return 0;
6852         } else if (em) {
6853                 free_extent_map(em);
6854         }
6855
6856         em = alloc_extent_map();
6857         if (!em)
6858                 return -ENOMEM;
6859         map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
6860         if (!map) {
6861                 free_extent_map(em);
6862                 return -ENOMEM;
6863         }
6864
6865         set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
6866         em->map_lookup = map;
6867         em->start = logical;
6868         em->len = length;
6869         em->orig_start = 0;
6870         em->block_start = 0;
6871         em->block_len = em->len;
6872
6873         map->num_stripes = num_stripes;
6874         map->io_width = btrfs_chunk_io_width(leaf, chunk);
6875         map->io_align = btrfs_chunk_io_align(leaf, chunk);
6876         map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6877         map->type = btrfs_chunk_type(leaf, chunk);
6878         map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6879         map->verified_stripes = 0;
6880         for (i = 0; i < num_stripes; i++) {
6881                 map->stripes[i].physical =
6882                         btrfs_stripe_offset_nr(leaf, chunk, i);
6883                 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
6884                 read_extent_buffer(leaf, uuid, (unsigned long)
6885                                    btrfs_stripe_dev_uuid_nr(chunk, i),
6886                                    BTRFS_UUID_SIZE);
6887                 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices,
6888                                                         devid, uuid, NULL, true);
6889                 if (!map->stripes[i].dev &&
6890                     !btrfs_test_opt(fs_info, DEGRADED)) {
6891                         free_extent_map(em);
6892                         btrfs_report_missing_device(fs_info, devid, uuid, true);
6893                         return -ENOENT;
6894                 }
6895                 if (!map->stripes[i].dev) {
6896                         map->stripes[i].dev =
6897                                 add_missing_dev(fs_info->fs_devices, devid,
6898                                                 uuid);
6899                         if (IS_ERR(map->stripes[i].dev)) {
6900                                 free_extent_map(em);
6901                                 btrfs_err(fs_info,
6902                                         "failed to init missing dev %llu: %ld",
6903                                         devid, PTR_ERR(map->stripes[i].dev));
6904                                 return PTR_ERR(map->stripes[i].dev);
6905                         }
6906                         btrfs_report_missing_device(fs_info, devid, uuid, false);
6907                 }
6908                 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
6909                                 &(map->stripes[i].dev->dev_state));
6910
6911         }
6912
6913         write_lock(&map_tree->map_tree.lock);
6914         ret = add_extent_mapping(&map_tree->map_tree, em, 0);
6915         write_unlock(&map_tree->map_tree.lock);
6916         if (ret < 0) {
6917                 btrfs_err(fs_info,
6918                           "failed to add chunk map, start=%llu len=%llu: %d",
6919                           em->start, em->len, ret);
6920         }
6921         free_extent_map(em);
6922
6923         return ret;
6924 }
6925
6926 static void fill_device_from_item(struct extent_buffer *leaf,
6927                                  struct btrfs_dev_item *dev_item,
6928                                  struct btrfs_device *device)
6929 {
6930         unsigned long ptr;
6931
6932         device->devid = btrfs_device_id(leaf, dev_item);
6933         device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
6934         device->total_bytes = device->disk_total_bytes;
6935         device->commit_total_bytes = device->disk_total_bytes;
6936         device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
6937         device->commit_bytes_used = device->bytes_used;
6938         device->type = btrfs_device_type(leaf, dev_item);
6939         device->io_align = btrfs_device_io_align(leaf, dev_item);
6940         device->io_width = btrfs_device_io_width(leaf, dev_item);
6941         device->sector_size = btrfs_device_sector_size(leaf, dev_item);
6942         WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
6943         clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
6944
6945         ptr = btrfs_device_uuid(dev_item);
6946         read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
6947 }
6948
6949 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
6950                                                   u8 *fsid)
6951 {
6952         struct btrfs_fs_devices *fs_devices;
6953         int ret;
6954
6955         lockdep_assert_held(&uuid_mutex);
6956         ASSERT(fsid);
6957
6958         fs_devices = fs_info->fs_devices->seed;
6959         while (fs_devices) {
6960                 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE))
6961                         return fs_devices;
6962
6963                 fs_devices = fs_devices->seed;
6964         }
6965
6966         fs_devices = find_fsid(fsid, NULL);
6967         if (!fs_devices) {
6968                 if (!btrfs_test_opt(fs_info, DEGRADED))
6969                         return ERR_PTR(-ENOENT);
6970
6971                 fs_devices = alloc_fs_devices(fsid, NULL);
6972                 if (IS_ERR(fs_devices))
6973                         return fs_devices;
6974
6975                 fs_devices->seeding = 1;
6976                 fs_devices->opened = 1;
6977                 return fs_devices;
6978         }
6979
6980         fs_devices = clone_fs_devices(fs_devices);
6981         if (IS_ERR(fs_devices))
6982                 return fs_devices;
6983
6984         ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder);
6985         if (ret) {
6986                 free_fs_devices(fs_devices);
6987                 fs_devices = ERR_PTR(ret);
6988                 goto out;
6989         }
6990
6991         if (!fs_devices->seeding) {
6992                 close_fs_devices(fs_devices);
6993                 free_fs_devices(fs_devices);
6994                 fs_devices = ERR_PTR(-EINVAL);
6995                 goto out;
6996         }
6997
6998         fs_devices->seed = fs_info->fs_devices->seed;
6999         fs_info->fs_devices->seed = fs_devices;
7000 out:
7001         return fs_devices;
7002 }
7003
7004 static int read_one_dev(struct btrfs_fs_info *fs_info,
7005                         struct extent_buffer *leaf,
7006                         struct btrfs_dev_item *dev_item)
7007 {
7008         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7009         struct btrfs_device *device;
7010         u64 devid;
7011         int ret;
7012         u8 fs_uuid[BTRFS_FSID_SIZE];
7013         u8 dev_uuid[BTRFS_UUID_SIZE];
7014
7015         devid = btrfs_device_id(leaf, dev_item);
7016         read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
7017                            BTRFS_UUID_SIZE);
7018         read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
7019                            BTRFS_FSID_SIZE);
7020
7021         if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) {
7022                 fs_devices = open_seed_devices(fs_info, fs_uuid);
7023                 if (IS_ERR(fs_devices))
7024                         return PTR_ERR(fs_devices);
7025         }
7026
7027         device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
7028                                    fs_uuid, true);
7029         if (!device) {
7030                 if (!btrfs_test_opt(fs_info, DEGRADED)) {
7031                         btrfs_report_missing_device(fs_info, devid,
7032                                                         dev_uuid, true);
7033                         return -ENOENT;
7034                 }
7035
7036                 device = add_missing_dev(fs_devices, devid, dev_uuid);
7037                 if (IS_ERR(device)) {
7038                         btrfs_err(fs_info,
7039                                 "failed to add missing dev %llu: %ld",
7040                                 devid, PTR_ERR(device));
7041                         return PTR_ERR(device);
7042                 }
7043                 btrfs_report_missing_device(fs_info, devid, dev_uuid, false);
7044         } else {
7045                 if (!device->bdev) {
7046                         if (!btrfs_test_opt(fs_info, DEGRADED)) {
7047                                 btrfs_report_missing_device(fs_info,
7048                                                 devid, dev_uuid, true);
7049                                 return -ENOENT;
7050                         }
7051                         btrfs_report_missing_device(fs_info, devid,
7052                                                         dev_uuid, false);
7053                 }
7054
7055                 if (!device->bdev &&
7056                     !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
7057                         /*
7058                          * this happens when a device that was properly setup
7059                          * in the device info lists suddenly goes bad.
7060                          * device->bdev is NULL, and so we have to set
7061                          * device->missing to one here
7062                          */
7063                         device->fs_devices->missing_devices++;
7064                         set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
7065                 }
7066
7067                 /* Move the device to its own fs_devices */
7068                 if (device->fs_devices != fs_devices) {
7069                         ASSERT(test_bit(BTRFS_DEV_STATE_MISSING,
7070                                                         &device->dev_state));
7071
7072                         list_move(&device->dev_list, &fs_devices->devices);
7073                         device->fs_devices->num_devices--;
7074                         fs_devices->num_devices++;
7075
7076                         device->fs_devices->missing_devices--;
7077                         fs_devices->missing_devices++;
7078
7079                         device->fs_devices = fs_devices;
7080                 }
7081         }
7082
7083         if (device->fs_devices != fs_info->fs_devices) {
7084                 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state));
7085                 if (device->generation !=
7086                     btrfs_device_generation(leaf, dev_item))
7087                         return -EINVAL;
7088         }
7089
7090         fill_device_from_item(leaf, dev_item, device);
7091         set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
7092         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
7093            !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
7094                 device->fs_devices->total_rw_bytes += device->total_bytes;
7095                 atomic64_add(device->total_bytes - device->bytes_used,
7096                                 &fs_info->free_chunk_space);
7097         }
7098         ret = 0;
7099         return ret;
7100 }
7101
7102 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
7103 {
7104         struct btrfs_root *root = fs_info->tree_root;
7105         struct btrfs_super_block *super_copy = fs_info->super_copy;
7106         struct extent_buffer *sb;
7107         struct btrfs_disk_key *disk_key;
7108         struct btrfs_chunk *chunk;
7109         u8 *array_ptr;
7110         unsigned long sb_array_offset;
7111         int ret = 0;
7112         u32 num_stripes;
7113         u32 array_size;
7114         u32 len = 0;
7115         u32 cur_offset;
7116         u64 type;
7117         struct btrfs_key key;
7118
7119         ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize);
7120         /*
7121          * This will create extent buffer of nodesize, superblock size is
7122          * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
7123          * overallocate but we can keep it as-is, only the first page is used.
7124          */
7125         sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET);
7126         if (IS_ERR(sb))
7127                 return PTR_ERR(sb);
7128         set_extent_buffer_uptodate(sb);
7129         btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
7130         /*
7131          * The sb extent buffer is artificial and just used to read the system array.
7132          * set_extent_buffer_uptodate() call does not properly mark all it's
7133          * pages up-to-date when the page is larger: extent does not cover the
7134          * whole page and consequently check_page_uptodate does not find all
7135          * the page's extents up-to-date (the hole beyond sb),
7136          * write_extent_buffer then triggers a WARN_ON.
7137          *
7138          * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
7139          * but sb spans only this function. Add an explicit SetPageUptodate call
7140          * to silence the warning eg. on PowerPC 64.
7141          */
7142         if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
7143                 SetPageUptodate(sb->pages[0]);
7144
7145         write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
7146         array_size = btrfs_super_sys_array_size(super_copy);
7147
7148         array_ptr = super_copy->sys_chunk_array;
7149         sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
7150         cur_offset = 0;
7151
7152         while (cur_offset < array_size) {
7153                 disk_key = (struct btrfs_disk_key *)array_ptr;
7154                 len = sizeof(*disk_key);
7155                 if (cur_offset + len > array_size)
7156                         goto out_short_read;
7157
7158                 btrfs_disk_key_to_cpu(&key, disk_key);
7159
7160                 array_ptr += len;
7161                 sb_array_offset += len;
7162                 cur_offset += len;
7163
7164                 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
7165                         chunk = (struct btrfs_chunk *)sb_array_offset;
7166                         /*
7167                          * At least one btrfs_chunk with one stripe must be
7168                          * present, exact stripe count check comes afterwards
7169                          */
7170                         len = btrfs_chunk_item_size(1);
7171                         if (cur_offset + len > array_size)
7172                                 goto out_short_read;
7173
7174                         num_stripes = btrfs_chunk_num_stripes(sb, chunk);
7175                         if (!num_stripes) {
7176                                 btrfs_err(fs_info,
7177                                         "invalid number of stripes %u in sys_array at offset %u",
7178                                         num_stripes, cur_offset);
7179                                 ret = -EIO;
7180                                 break;
7181                         }
7182
7183                         type = btrfs_chunk_type(sb, chunk);
7184                         if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
7185                                 btrfs_err(fs_info,
7186                             "invalid chunk type %llu in sys_array at offset %u",
7187                                         type, cur_offset);
7188                                 ret = -EIO;
7189                                 break;
7190                         }
7191
7192                         len = btrfs_chunk_item_size(num_stripes);
7193                         if (cur_offset + len > array_size)
7194                                 goto out_short_read;
7195
7196                         ret = read_one_chunk(fs_info, &key, sb, chunk);
7197                         if (ret)
7198                                 break;
7199                 } else {
7200                         btrfs_err(fs_info,
7201                             "unexpected item type %u in sys_array at offset %u",
7202                                   (u32)key.type, cur_offset);
7203                         ret = -EIO;
7204                         break;
7205                 }
7206                 array_ptr += len;
7207                 sb_array_offset += len;
7208                 cur_offset += len;
7209         }
7210         clear_extent_buffer_uptodate(sb);
7211         free_extent_buffer_stale(sb);
7212         return ret;
7213
7214 out_short_read:
7215         btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
7216                         len, cur_offset);
7217         clear_extent_buffer_uptodate(sb);
7218         free_extent_buffer_stale(sb);
7219         return -EIO;
7220 }
7221
7222 /*
7223  * Check if all chunks in the fs are OK for read-write degraded mount
7224  *
7225  * If the @failing_dev is specified, it's accounted as missing.
7226  *
7227  * Return true if all chunks meet the minimal RW mount requirements.
7228  * Return false if any chunk doesn't meet the minimal RW mount requirements.
7229  */
7230 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
7231                                         struct btrfs_device *failing_dev)
7232 {
7233         struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
7234         struct extent_map *em;
7235         u64 next_start = 0;
7236         bool ret = true;
7237
7238         read_lock(&map_tree->map_tree.lock);
7239         em = lookup_extent_mapping(&map_tree->map_tree, 0, (u64)-1);
7240         read_unlock(&map_tree->map_tree.lock);
7241         /* No chunk at all? Return false anyway */
7242         if (!em) {
7243                 ret = false;
7244                 goto out;
7245         }
7246         while (em) {
7247                 struct map_lookup *map;
7248                 int missing = 0;
7249                 int max_tolerated;
7250                 int i;
7251
7252                 map = em->map_lookup;
7253                 max_tolerated =
7254                         btrfs_get_num_tolerated_disk_barrier_failures(
7255                                         map->type);
7256                 for (i = 0; i < map->num_stripes; i++) {
7257                         struct btrfs_device *dev = map->stripes[i].dev;
7258
7259                         if (!dev || !dev->bdev ||
7260                             test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
7261                             dev->last_flush_error)
7262                                 missing++;
7263                         else if (failing_dev && failing_dev == dev)
7264                                 missing++;
7265                 }
7266                 if (missing > max_tolerated) {
7267                         if (!failing_dev)
7268                                 btrfs_warn(fs_info,
7269         "chunk %llu missing %d devices, max tolerance is %d for writable mount",
7270                                    em->start, missing, max_tolerated);
7271                         free_extent_map(em);
7272                         ret = false;
7273                         goto out;
7274                 }
7275                 next_start = extent_map_end(em);
7276                 free_extent_map(em);
7277
7278                 read_lock(&map_tree->map_tree.lock);
7279                 em = lookup_extent_mapping(&map_tree->map_tree, next_start,
7280                                            (u64)(-1) - next_start);
7281                 read_unlock(&map_tree->map_tree.lock);
7282         }
7283 out:
7284         return ret;
7285 }
7286
7287 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
7288 {
7289         struct btrfs_root *root = fs_info->chunk_root;
7290         struct btrfs_path *path;
7291         struct extent_buffer *leaf;
7292         struct btrfs_key key;
7293         struct btrfs_key found_key;
7294         int ret;
7295         int slot;
7296         u64 total_dev = 0;
7297
7298         path = btrfs_alloc_path();
7299         if (!path)
7300                 return -ENOMEM;
7301
7302         /*
7303          * uuid_mutex is needed only if we are mounting a sprout FS
7304          * otherwise we don't need it.
7305          */
7306         mutex_lock(&uuid_mutex);
7307         mutex_lock(&fs_info->chunk_mutex);
7308
7309         /*
7310          * Read all device items, and then all the chunk items. All
7311          * device items are found before any chunk item (their object id
7312          * is smaller than the lowest possible object id for a chunk
7313          * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
7314          */
7315         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
7316         key.offset = 0;
7317         key.type = 0;
7318         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7319         if (ret < 0)
7320                 goto error;
7321         while (1) {
7322                 leaf = path->nodes[0];
7323                 slot = path->slots[0];
7324                 if (slot >= btrfs_header_nritems(leaf)) {
7325                         ret = btrfs_next_leaf(root, path);
7326                         if (ret == 0)
7327                                 continue;
7328                         if (ret < 0)
7329                                 goto error;
7330                         break;
7331                 }
7332                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
7333                 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
7334                         struct btrfs_dev_item *dev_item;
7335                         dev_item = btrfs_item_ptr(leaf, slot,
7336                                                   struct btrfs_dev_item);
7337                         ret = read_one_dev(fs_info, leaf, dev_item);
7338                         if (ret)
7339                                 goto error;
7340                         total_dev++;
7341                 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
7342                         struct btrfs_chunk *chunk;
7343                         chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
7344                         ret = read_one_chunk(fs_info, &found_key, leaf, chunk);
7345                         if (ret)
7346                                 goto error;
7347                 }
7348                 path->slots[0]++;
7349         }
7350
7351         /*
7352          * After loading chunk tree, we've got all device information,
7353          * do another round of validation checks.
7354          */
7355         if (total_dev != fs_info->fs_devices->total_devices) {
7356                 btrfs_err(fs_info,
7357            "super_num_devices %llu mismatch with num_devices %llu found here",
7358                           btrfs_super_num_devices(fs_info->super_copy),
7359                           total_dev);
7360                 ret = -EINVAL;
7361                 goto error;
7362         }
7363         if (btrfs_super_total_bytes(fs_info->super_copy) <
7364             fs_info->fs_devices->total_rw_bytes) {
7365                 btrfs_err(fs_info,
7366         "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
7367                           btrfs_super_total_bytes(fs_info->super_copy),
7368                           fs_info->fs_devices->total_rw_bytes);
7369                 ret = -EINVAL;
7370                 goto error;
7371         }
7372         ret = 0;
7373 error:
7374         mutex_unlock(&fs_info->chunk_mutex);
7375         mutex_unlock(&uuid_mutex);
7376
7377         btrfs_free_path(path);
7378         return ret;
7379 }
7380
7381 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
7382 {
7383         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7384         struct btrfs_device *device;
7385
7386         while (fs_devices) {
7387                 mutex_lock(&fs_devices->device_list_mutex);
7388                 list_for_each_entry(device, &fs_devices->devices, dev_list)
7389                         device->fs_info = fs_info;
7390                 mutex_unlock(&fs_devices->device_list_mutex);
7391
7392                 fs_devices = fs_devices->seed;
7393         }
7394 }
7395
7396 static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
7397 {
7398         int i;
7399
7400         for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7401                 btrfs_dev_stat_reset(dev, i);
7402 }
7403
7404 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
7405 {
7406         struct btrfs_key key;
7407         struct btrfs_key found_key;
7408         struct btrfs_root *dev_root = fs_info->dev_root;
7409         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7410         struct extent_buffer *eb;
7411         int slot;
7412         int ret = 0;
7413         struct btrfs_device *device;
7414         struct btrfs_path *path = NULL;
7415         int i;
7416
7417         path = btrfs_alloc_path();
7418         if (!path) {
7419                 ret = -ENOMEM;
7420                 goto out;
7421         }
7422
7423         mutex_lock(&fs_devices->device_list_mutex);
7424         list_for_each_entry(device, &fs_devices->devices, dev_list) {
7425                 int item_size;
7426                 struct btrfs_dev_stats_item *ptr;
7427
7428                 key.objectid = BTRFS_DEV_STATS_OBJECTID;
7429                 key.type = BTRFS_PERSISTENT_ITEM_KEY;
7430                 key.offset = device->devid;
7431                 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
7432                 if (ret) {
7433                         __btrfs_reset_dev_stats(device);
7434                         device->dev_stats_valid = 1;
7435                         btrfs_release_path(path);
7436                         continue;
7437                 }
7438                 slot = path->slots[0];
7439                 eb = path->nodes[0];
7440                 btrfs_item_key_to_cpu(eb, &found_key, slot);
7441                 item_size = btrfs_item_size_nr(eb, slot);
7442
7443                 ptr = btrfs_item_ptr(eb, slot,
7444                                      struct btrfs_dev_stats_item);
7445
7446                 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7447                         if (item_size >= (1 + i) * sizeof(__le64))
7448                                 btrfs_dev_stat_set(device, i,
7449                                         btrfs_dev_stats_value(eb, ptr, i));
7450                         else
7451                                 btrfs_dev_stat_reset(device, i);
7452                 }
7453
7454                 device->dev_stats_valid = 1;
7455                 btrfs_dev_stat_print_on_load(device);
7456                 btrfs_release_path(path);
7457         }
7458         mutex_unlock(&fs_devices->device_list_mutex);
7459
7460 out:
7461         btrfs_free_path(path);
7462         return ret < 0 ? ret : 0;
7463 }
7464
7465 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
7466                                 struct btrfs_device *device)
7467 {
7468         struct btrfs_fs_info *fs_info = trans->fs_info;
7469         struct btrfs_root *dev_root = fs_info->dev_root;
7470         struct btrfs_path *path;
7471         struct btrfs_key key;
7472         struct extent_buffer *eb;
7473         struct btrfs_dev_stats_item *ptr;
7474         int ret;
7475         int i;
7476
7477         key.objectid = BTRFS_DEV_STATS_OBJECTID;
7478         key.type = BTRFS_PERSISTENT_ITEM_KEY;
7479         key.offset = device->devid;
7480
7481         path = btrfs_alloc_path();
7482         if (!path)
7483                 return -ENOMEM;
7484         ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
7485         if (ret < 0) {
7486                 btrfs_warn_in_rcu(fs_info,
7487                         "error %d while searching for dev_stats item for device %s",
7488                               ret, rcu_str_deref(device->name));
7489                 goto out;
7490         }
7491
7492         if (ret == 0 &&
7493             btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
7494                 /* need to delete old one and insert a new one */
7495                 ret = btrfs_del_item(trans, dev_root, path);
7496                 if (ret != 0) {
7497                         btrfs_warn_in_rcu(fs_info,
7498                                 "delete too small dev_stats item for device %s failed %d",
7499                                       rcu_str_deref(device->name), ret);
7500                         goto out;
7501                 }
7502                 ret = 1;
7503         }
7504
7505         if (ret == 1) {
7506                 /* need to insert a new item */
7507                 btrfs_release_path(path);
7508                 ret = btrfs_insert_empty_item(trans, dev_root, path,
7509                                               &key, sizeof(*ptr));
7510                 if (ret < 0) {
7511                         btrfs_warn_in_rcu(fs_info,
7512                                 "insert dev_stats item for device %s failed %d",
7513                                 rcu_str_deref(device->name), ret);
7514                         goto out;
7515                 }
7516         }
7517
7518         eb = path->nodes[0];
7519         ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
7520         for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7521                 btrfs_set_dev_stats_value(eb, ptr, i,
7522                                           btrfs_dev_stat_read(device, i));
7523         btrfs_mark_buffer_dirty(eb);
7524
7525 out:
7526         btrfs_free_path(path);
7527         return ret;
7528 }
7529
7530 /*
7531  * called from commit_transaction. Writes all changed device stats to disk.
7532  */
7533 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
7534                         struct btrfs_fs_info *fs_info)
7535 {
7536         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7537         struct btrfs_device *device;
7538         int stats_cnt;
7539         int ret = 0;
7540
7541         mutex_lock(&fs_devices->device_list_mutex);
7542         list_for_each_entry(device, &fs_devices->devices, dev_list) {
7543                 stats_cnt = atomic_read(&device->dev_stats_ccnt);
7544                 if (!device->dev_stats_valid || stats_cnt == 0)
7545                         continue;
7546
7547
7548                 /*
7549                  * There is a LOAD-LOAD control dependency between the value of
7550                  * dev_stats_ccnt and updating the on-disk values which requires
7551                  * reading the in-memory counters. Such control dependencies
7552                  * require explicit read memory barriers.
7553                  *
7554                  * This memory barriers pairs with smp_mb__before_atomic in
7555                  * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
7556                  * barrier implied by atomic_xchg in
7557                  * btrfs_dev_stats_read_and_reset
7558                  */
7559                 smp_rmb();
7560
7561                 ret = update_dev_stat_item(trans, device);
7562                 if (!ret)
7563                         atomic_sub(stats_cnt, &device->dev_stats_ccnt);
7564         }
7565         mutex_unlock(&fs_devices->device_list_mutex);
7566
7567         return ret;
7568 }
7569
7570 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
7571 {
7572         btrfs_dev_stat_inc(dev, index);
7573         btrfs_dev_stat_print_on_error(dev);
7574 }
7575
7576 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
7577 {
7578         if (!dev->dev_stats_valid)
7579                 return;
7580         btrfs_err_rl_in_rcu(dev->fs_info,
7581                 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7582                            rcu_str_deref(dev->name),
7583                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7584                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7585                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7586                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7587                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7588 }
7589
7590 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
7591 {
7592         int i;
7593
7594         for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7595                 if (btrfs_dev_stat_read(dev, i) != 0)
7596                         break;
7597         if (i == BTRFS_DEV_STAT_VALUES_MAX)
7598                 return; /* all values == 0, suppress message */
7599
7600         btrfs_info_in_rcu(dev->fs_info,
7601                 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7602                rcu_str_deref(dev->name),
7603                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7604                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7605                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7606                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7607                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7608 }
7609
7610 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
7611                         struct btrfs_ioctl_get_dev_stats *stats)
7612 {
7613         struct btrfs_device *dev;
7614         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7615         int i;
7616
7617         mutex_lock(&fs_devices->device_list_mutex);
7618         dev = btrfs_find_device(fs_info->fs_devices, stats->devid, NULL, NULL,
7619                                 true);
7620         mutex_unlock(&fs_devices->device_list_mutex);
7621
7622         if (!dev) {
7623                 btrfs_warn(fs_info, "get dev_stats failed, device not found");
7624                 return -ENODEV;
7625         } else if (!dev->dev_stats_valid) {
7626                 btrfs_warn(fs_info, "get dev_stats failed, not yet valid");
7627                 return -ENODEV;
7628         } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
7629                 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7630                         if (stats->nr_items > i)
7631                                 stats->values[i] =
7632                                         btrfs_dev_stat_read_and_reset(dev, i);
7633                         else
7634                                 btrfs_dev_stat_reset(dev, i);
7635                 }
7636         } else {
7637                 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7638                         if (stats->nr_items > i)
7639                                 stats->values[i] = btrfs_dev_stat_read(dev, i);
7640         }
7641         if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
7642                 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
7643         return 0;
7644 }
7645
7646 void btrfs_scratch_superblocks(struct block_device *bdev, const char *device_path)
7647 {
7648         struct buffer_head *bh;
7649         struct btrfs_super_block *disk_super;
7650         int copy_num;
7651
7652         if (!bdev)
7653                 return;
7654
7655         for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX;
7656                 copy_num++) {
7657
7658                 if (btrfs_read_dev_one_super(bdev, copy_num, &bh))
7659                         continue;
7660
7661                 disk_super = (struct btrfs_super_block *)bh->b_data;
7662
7663                 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
7664                 set_buffer_dirty(bh);
7665                 sync_dirty_buffer(bh);
7666                 brelse(bh);
7667         }
7668
7669         /* Notify udev that device has changed */
7670         btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
7671
7672         /* Update ctime/mtime for device path for libblkid */
7673         update_dev_time(device_path);
7674 }
7675
7676 /*
7677  * Update the size of all devices, which is used for writing out the
7678  * super blocks.
7679  */
7680 void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info)
7681 {
7682         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7683         struct btrfs_device *curr, *next;
7684
7685         if (list_empty(&fs_devices->resized_devices))
7686                 return;
7687
7688         mutex_lock(&fs_devices->device_list_mutex);
7689         mutex_lock(&fs_info->chunk_mutex);
7690         list_for_each_entry_safe(curr, next, &fs_devices->resized_devices,
7691                                  resized_list) {
7692                 list_del_init(&curr->resized_list);
7693                 curr->commit_total_bytes = curr->disk_total_bytes;
7694         }
7695         mutex_unlock(&fs_info->chunk_mutex);
7696         mutex_unlock(&fs_devices->device_list_mutex);
7697 }
7698
7699 /* Must be invoked during the transaction commit */
7700 void btrfs_update_commit_device_bytes_used(struct btrfs_transaction *trans)
7701 {
7702         struct btrfs_fs_info *fs_info = trans->fs_info;
7703         struct extent_map *em;
7704         struct map_lookup *map;
7705         struct btrfs_device *dev;
7706         int i;
7707
7708         if (list_empty(&trans->pending_chunks))
7709                 return;
7710
7711         /* In order to kick the device replace finish process */
7712         mutex_lock(&fs_info->chunk_mutex);
7713         list_for_each_entry(em, &trans->pending_chunks, list) {
7714                 map = em->map_lookup;
7715
7716                 for (i = 0; i < map->num_stripes; i++) {
7717                         dev = map->stripes[i].dev;
7718                         dev->commit_bytes_used = dev->bytes_used;
7719                 }
7720         }
7721         mutex_unlock(&fs_info->chunk_mutex);
7722 }
7723
7724 void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info)
7725 {
7726         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7727         while (fs_devices) {
7728                 fs_devices->fs_info = fs_info;
7729                 fs_devices = fs_devices->seed;
7730         }
7731 }
7732
7733 void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info)
7734 {
7735         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7736         while (fs_devices) {
7737                 fs_devices->fs_info = NULL;
7738                 fs_devices = fs_devices->seed;
7739         }
7740 }
7741
7742 /*
7743  * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
7744  */
7745 int btrfs_bg_type_to_factor(u64 flags)
7746 {
7747         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
7748                      BTRFS_BLOCK_GROUP_RAID10))
7749                 return 2;
7750         return 1;
7751 }
7752
7753
7754 static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes)
7755 {
7756         int index = btrfs_bg_flags_to_raid_index(type);
7757         int ncopies = btrfs_raid_array[index].ncopies;
7758         int data_stripes;
7759
7760         switch (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
7761         case BTRFS_BLOCK_GROUP_RAID5:
7762                 data_stripes = num_stripes - 1;
7763                 break;
7764         case BTRFS_BLOCK_GROUP_RAID6:
7765                 data_stripes = num_stripes - 2;
7766                 break;
7767         default:
7768                 data_stripes = num_stripes / ncopies;
7769                 break;
7770         }
7771         return div_u64(chunk_len, data_stripes);
7772 }
7773
7774 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
7775                                  u64 chunk_offset, u64 devid,
7776                                  u64 physical_offset, u64 physical_len)
7777 {
7778         struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
7779         struct extent_map *em;
7780         struct map_lookup *map;
7781         struct btrfs_device *dev;
7782         u64 stripe_len;
7783         bool found = false;
7784         int ret = 0;
7785         int i;
7786
7787         read_lock(&em_tree->lock);
7788         em = lookup_extent_mapping(em_tree, chunk_offset, 1);
7789         read_unlock(&em_tree->lock);
7790
7791         if (!em) {
7792                 btrfs_err(fs_info,
7793 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
7794                           physical_offset, devid);
7795                 ret = -EUCLEAN;
7796                 goto out;
7797         }
7798
7799         map = em->map_lookup;
7800         stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes);
7801         if (physical_len != stripe_len) {
7802                 btrfs_err(fs_info,
7803 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
7804                           physical_offset, devid, em->start, physical_len,
7805                           stripe_len);
7806                 ret = -EUCLEAN;
7807                 goto out;
7808         }
7809
7810         for (i = 0; i < map->num_stripes; i++) {
7811                 if (map->stripes[i].dev->devid == devid &&
7812                     map->stripes[i].physical == physical_offset) {
7813                         found = true;
7814                         if (map->verified_stripes >= map->num_stripes) {
7815                                 btrfs_err(fs_info,
7816                                 "too many dev extents for chunk %llu found",
7817                                           em->start);
7818                                 ret = -EUCLEAN;
7819                                 goto out;
7820                         }
7821                         map->verified_stripes++;
7822                         break;
7823                 }
7824         }
7825         if (!found) {
7826                 btrfs_err(fs_info,
7827         "dev extent physical offset %llu devid %llu has no corresponding chunk",
7828                         physical_offset, devid);
7829                 ret = -EUCLEAN;
7830         }
7831
7832         /* Make sure no dev extent is beyond device bondary */
7833         dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
7834         if (!dev) {
7835                 btrfs_err(fs_info, "failed to find devid %llu", devid);
7836                 ret = -EUCLEAN;
7837                 goto out;
7838         }
7839
7840         /* It's possible this device is a dummy for seed device */
7841         if (dev->disk_total_bytes == 0) {
7842                 dev = btrfs_find_device(fs_info->fs_devices->seed, devid, NULL,
7843                                         NULL, false);
7844                 if (!dev) {
7845                         btrfs_err(fs_info, "failed to find seed devid %llu",
7846                                   devid);
7847                         ret = -EUCLEAN;
7848                         goto out;
7849                 }
7850         }
7851
7852         if (physical_offset + physical_len > dev->disk_total_bytes) {
7853                 btrfs_err(fs_info,
7854 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
7855                           devid, physical_offset, physical_len,
7856                           dev->disk_total_bytes);
7857                 ret = -EUCLEAN;
7858                 goto out;
7859         }
7860 out:
7861         free_extent_map(em);
7862         return ret;
7863 }
7864
7865 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
7866 {
7867         struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
7868         struct extent_map *em;
7869         struct rb_node *node;
7870         int ret = 0;
7871
7872         read_lock(&em_tree->lock);
7873         for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) {
7874                 em = rb_entry(node, struct extent_map, rb_node);
7875                 if (em->map_lookup->num_stripes !=
7876                     em->map_lookup->verified_stripes) {
7877                         btrfs_err(fs_info,
7878                         "chunk %llu has missing dev extent, have %d expect %d",
7879                                   em->start, em->map_lookup->verified_stripes,
7880                                   em->map_lookup->num_stripes);
7881                         ret = -EUCLEAN;
7882                         goto out;
7883                 }
7884         }
7885 out:
7886         read_unlock(&em_tree->lock);
7887         return ret;
7888 }
7889
7890 /*
7891  * Ensure that all dev extents are mapped to correct chunk, otherwise
7892  * later chunk allocation/free would cause unexpected behavior.
7893  *
7894  * NOTE: This will iterate through the whole device tree, which should be of
7895  * the same size level as the chunk tree.  This slightly increases mount time.
7896  */
7897 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
7898 {
7899         struct btrfs_path *path;
7900         struct btrfs_root *root = fs_info->dev_root;
7901         struct btrfs_key key;
7902         u64 prev_devid = 0;
7903         u64 prev_dev_ext_end = 0;
7904         int ret = 0;
7905
7906         key.objectid = 1;
7907         key.type = BTRFS_DEV_EXTENT_KEY;
7908         key.offset = 0;
7909
7910         path = btrfs_alloc_path();
7911         if (!path)
7912                 return -ENOMEM;
7913
7914         path->reada = READA_FORWARD;
7915         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7916         if (ret < 0)
7917                 goto out;
7918
7919         if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
7920                 ret = btrfs_next_item(root, path);
7921                 if (ret < 0)
7922                         goto out;
7923                 /* No dev extents at all? Not good */
7924                 if (ret > 0) {
7925                         ret = -EUCLEAN;
7926                         goto out;
7927                 }
7928         }
7929         while (1) {
7930                 struct extent_buffer *leaf = path->nodes[0];
7931                 struct btrfs_dev_extent *dext;
7932                 int slot = path->slots[0];
7933                 u64 chunk_offset;
7934                 u64 physical_offset;
7935                 u64 physical_len;
7936                 u64 devid;
7937
7938                 btrfs_item_key_to_cpu(leaf, &key, slot);
7939                 if (key.type != BTRFS_DEV_EXTENT_KEY)
7940                         break;
7941                 devid = key.objectid;
7942                 physical_offset = key.offset;
7943
7944                 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
7945                 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
7946                 physical_len = btrfs_dev_extent_length(leaf, dext);
7947
7948                 /* Check if this dev extent overlaps with the previous one */
7949                 if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
7950                         btrfs_err(fs_info,
7951 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
7952                                   devid, physical_offset, prev_dev_ext_end);
7953                         ret = -EUCLEAN;
7954                         goto out;
7955                 }
7956
7957                 ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
7958                                             physical_offset, physical_len);
7959                 if (ret < 0)
7960                         goto out;
7961                 prev_devid = devid;
7962                 prev_dev_ext_end = physical_offset + physical_len;
7963
7964                 ret = btrfs_next_item(root, path);
7965                 if (ret < 0)
7966                         goto out;
7967                 if (ret > 0) {
7968                         ret = 0;
7969                         break;
7970                 }
7971         }
7972
7973         /* Ensure all chunks have corresponding dev extents */
7974         ret = verify_chunk_dev_extent_mapping(fs_info);
7975 out:
7976         btrfs_free_path(path);
7977         return ret;
7978 }
7979
7980 /*
7981  * Check whether the given block group or device is pinned by any inode being
7982  * used as a swapfile.
7983  */
7984 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr)
7985 {
7986         struct btrfs_swapfile_pin *sp;
7987         struct rb_node *node;
7988
7989         spin_lock(&fs_info->swapfile_pins_lock);
7990         node = fs_info->swapfile_pins.rb_node;
7991         while (node) {
7992                 sp = rb_entry(node, struct btrfs_swapfile_pin, node);
7993                 if (ptr < sp->ptr)
7994                         node = node->rb_left;
7995                 else if (ptr > sp->ptr)
7996                         node = node->rb_right;
7997                 else
7998                         break;
7999         }
8000         spin_unlock(&fs_info->swapfile_pins_lock);
8001         return node != NULL;
8002 }