Merge branch 'x86-platform-next' into x86-platform
[sfrench/cifs-2.6.git] / fs / btrfs / volumes.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <asm/div64.h>
27 #include "compat.h"
28 #include "ctree.h"
29 #include "extent_map.h"
30 #include "disk-io.h"
31 #include "transaction.h"
32 #include "print-tree.h"
33 #include "volumes.h"
34 #include "async-thread.h"
35
36 struct map_lookup {
37         u64 type;
38         int io_align;
39         int io_width;
40         int stripe_len;
41         int sector_size;
42         int num_stripes;
43         int sub_stripes;
44         struct btrfs_bio_stripe stripes[];
45 };
46
47 static int init_first_rw_device(struct btrfs_trans_handle *trans,
48                                 struct btrfs_root *root,
49                                 struct btrfs_device *device);
50 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
51
52 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
53                             (sizeof(struct btrfs_bio_stripe) * (n)))
54
55 static DEFINE_MUTEX(uuid_mutex);
56 static LIST_HEAD(fs_uuids);
57
58 void btrfs_lock_volumes(void)
59 {
60         mutex_lock(&uuid_mutex);
61 }
62
63 void btrfs_unlock_volumes(void)
64 {
65         mutex_unlock(&uuid_mutex);
66 }
67
68 static void lock_chunks(struct btrfs_root *root)
69 {
70         mutex_lock(&root->fs_info->chunk_mutex);
71 }
72
73 static void unlock_chunks(struct btrfs_root *root)
74 {
75         mutex_unlock(&root->fs_info->chunk_mutex);
76 }
77
78 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
79 {
80         struct btrfs_device *device;
81         WARN_ON(fs_devices->opened);
82         while (!list_empty(&fs_devices->devices)) {
83                 device = list_entry(fs_devices->devices.next,
84                                     struct btrfs_device, dev_list);
85                 list_del(&device->dev_list);
86                 kfree(device->name);
87                 kfree(device);
88         }
89         kfree(fs_devices);
90 }
91
92 int btrfs_cleanup_fs_uuids(void)
93 {
94         struct btrfs_fs_devices *fs_devices;
95
96         while (!list_empty(&fs_uuids)) {
97                 fs_devices = list_entry(fs_uuids.next,
98                                         struct btrfs_fs_devices, list);
99                 list_del(&fs_devices->list);
100                 free_fs_devices(fs_devices);
101         }
102         return 0;
103 }
104
105 static noinline struct btrfs_device *__find_device(struct list_head *head,
106                                                    u64 devid, u8 *uuid)
107 {
108         struct btrfs_device *dev;
109
110         list_for_each_entry(dev, head, dev_list) {
111                 if (dev->devid == devid &&
112                     (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
113                         return dev;
114                 }
115         }
116         return NULL;
117 }
118
119 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
120 {
121         struct btrfs_fs_devices *fs_devices;
122
123         list_for_each_entry(fs_devices, &fs_uuids, list) {
124                 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
125                         return fs_devices;
126         }
127         return NULL;
128 }
129
130 static void requeue_list(struct btrfs_pending_bios *pending_bios,
131                         struct bio *head, struct bio *tail)
132 {
133
134         struct bio *old_head;
135
136         old_head = pending_bios->head;
137         pending_bios->head = head;
138         if (pending_bios->tail)
139                 tail->bi_next = old_head;
140         else
141                 pending_bios->tail = tail;
142 }
143
144 /*
145  * we try to collect pending bios for a device so we don't get a large
146  * number of procs sending bios down to the same device.  This greatly
147  * improves the schedulers ability to collect and merge the bios.
148  *
149  * But, it also turns into a long list of bios to process and that is sure
150  * to eventually make the worker thread block.  The solution here is to
151  * make some progress and then put this work struct back at the end of
152  * the list if the block device is congested.  This way, multiple devices
153  * can make progress from a single worker thread.
154  */
155 static noinline int run_scheduled_bios(struct btrfs_device *device)
156 {
157         struct bio *pending;
158         struct backing_dev_info *bdi;
159         struct btrfs_fs_info *fs_info;
160         struct btrfs_pending_bios *pending_bios;
161         struct bio *tail;
162         struct bio *cur;
163         int again = 0;
164         unsigned long num_run;
165         unsigned long batch_run = 0;
166         unsigned long limit;
167         unsigned long last_waited = 0;
168         int force_reg = 0;
169
170         bdi = blk_get_backing_dev_info(device->bdev);
171         fs_info = device->dev_root->fs_info;
172         limit = btrfs_async_submit_limit(fs_info);
173         limit = limit * 2 / 3;
174
175 loop:
176         spin_lock(&device->io_lock);
177
178 loop_lock:
179         num_run = 0;
180
181         /* take all the bios off the list at once and process them
182          * later on (without the lock held).  But, remember the
183          * tail and other pointers so the bios can be properly reinserted
184          * into the list if we hit congestion
185          */
186         if (!force_reg && device->pending_sync_bios.head) {
187                 pending_bios = &device->pending_sync_bios;
188                 force_reg = 1;
189         } else {
190                 pending_bios = &device->pending_bios;
191                 force_reg = 0;
192         }
193
194         pending = pending_bios->head;
195         tail = pending_bios->tail;
196         WARN_ON(pending && !tail);
197
198         /*
199          * if pending was null this time around, no bios need processing
200          * at all and we can stop.  Otherwise it'll loop back up again
201          * and do an additional check so no bios are missed.
202          *
203          * device->running_pending is used to synchronize with the
204          * schedule_bio code.
205          */
206         if (device->pending_sync_bios.head == NULL &&
207             device->pending_bios.head == NULL) {
208                 again = 0;
209                 device->running_pending = 0;
210         } else {
211                 again = 1;
212                 device->running_pending = 1;
213         }
214
215         pending_bios->head = NULL;
216         pending_bios->tail = NULL;
217
218         spin_unlock(&device->io_lock);
219
220         while (pending) {
221
222                 rmb();
223                 /* we want to work on both lists, but do more bios on the
224                  * sync list than the regular list
225                  */
226                 if ((num_run > 32 &&
227                     pending_bios != &device->pending_sync_bios &&
228                     device->pending_sync_bios.head) ||
229                    (num_run > 64 && pending_bios == &device->pending_sync_bios &&
230                     device->pending_bios.head)) {
231                         spin_lock(&device->io_lock);
232                         requeue_list(pending_bios, pending, tail);
233                         goto loop_lock;
234                 }
235
236                 cur = pending;
237                 pending = pending->bi_next;
238                 cur->bi_next = NULL;
239                 atomic_dec(&fs_info->nr_async_bios);
240
241                 if (atomic_read(&fs_info->nr_async_bios) < limit &&
242                     waitqueue_active(&fs_info->async_submit_wait))
243                         wake_up(&fs_info->async_submit_wait);
244
245                 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
246
247                 submit_bio(cur->bi_rw, cur);
248                 num_run++;
249                 batch_run++;
250                 if (need_resched())
251                         cond_resched();
252
253                 /*
254                  * we made progress, there is more work to do and the bdi
255                  * is now congested.  Back off and let other work structs
256                  * run instead
257                  */
258                 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
259                     fs_info->fs_devices->open_devices > 1) {
260                         struct io_context *ioc;
261
262                         ioc = current->io_context;
263
264                         /*
265                          * the main goal here is that we don't want to
266                          * block if we're going to be able to submit
267                          * more requests without blocking.
268                          *
269                          * This code does two great things, it pokes into
270                          * the elevator code from a filesystem _and_
271                          * it makes assumptions about how batching works.
272                          */
273                         if (ioc && ioc->nr_batch_requests > 0 &&
274                             time_before(jiffies, ioc->last_waited + HZ/50UL) &&
275                             (last_waited == 0 ||
276                              ioc->last_waited == last_waited)) {
277                                 /*
278                                  * we want to go through our batch of
279                                  * requests and stop.  So, we copy out
280                                  * the ioc->last_waited time and test
281                                  * against it before looping
282                                  */
283                                 last_waited = ioc->last_waited;
284                                 if (need_resched())
285                                         cond_resched();
286                                 continue;
287                         }
288                         spin_lock(&device->io_lock);
289                         requeue_list(pending_bios, pending, tail);
290                         device->running_pending = 1;
291
292                         spin_unlock(&device->io_lock);
293                         btrfs_requeue_work(&device->work);
294                         goto done;
295                 }
296         }
297
298         cond_resched();
299         if (again)
300                 goto loop;
301
302         spin_lock(&device->io_lock);
303         if (device->pending_bios.head || device->pending_sync_bios.head)
304                 goto loop_lock;
305         spin_unlock(&device->io_lock);
306
307 done:
308         return 0;
309 }
310
311 static void pending_bios_fn(struct btrfs_work *work)
312 {
313         struct btrfs_device *device;
314
315         device = container_of(work, struct btrfs_device, work);
316         run_scheduled_bios(device);
317 }
318
319 static noinline int device_list_add(const char *path,
320                            struct btrfs_super_block *disk_super,
321                            u64 devid, struct btrfs_fs_devices **fs_devices_ret)
322 {
323         struct btrfs_device *device;
324         struct btrfs_fs_devices *fs_devices;
325         u64 found_transid = btrfs_super_generation(disk_super);
326         char *name;
327
328         fs_devices = find_fsid(disk_super->fsid);
329         if (!fs_devices) {
330                 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
331                 if (!fs_devices)
332                         return -ENOMEM;
333                 INIT_LIST_HEAD(&fs_devices->devices);
334                 INIT_LIST_HEAD(&fs_devices->alloc_list);
335                 list_add(&fs_devices->list, &fs_uuids);
336                 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
337                 fs_devices->latest_devid = devid;
338                 fs_devices->latest_trans = found_transid;
339                 mutex_init(&fs_devices->device_list_mutex);
340                 device = NULL;
341         } else {
342                 device = __find_device(&fs_devices->devices, devid,
343                                        disk_super->dev_item.uuid);
344         }
345         if (!device) {
346                 if (fs_devices->opened)
347                         return -EBUSY;
348
349                 device = kzalloc(sizeof(*device), GFP_NOFS);
350                 if (!device) {
351                         /* we can safely leave the fs_devices entry around */
352                         return -ENOMEM;
353                 }
354                 device->devid = devid;
355                 device->work.func = pending_bios_fn;
356                 memcpy(device->uuid, disk_super->dev_item.uuid,
357                        BTRFS_UUID_SIZE);
358                 spin_lock_init(&device->io_lock);
359                 device->name = kstrdup(path, GFP_NOFS);
360                 if (!device->name) {
361                         kfree(device);
362                         return -ENOMEM;
363                 }
364                 INIT_LIST_HEAD(&device->dev_alloc_list);
365
366                 mutex_lock(&fs_devices->device_list_mutex);
367                 list_add(&device->dev_list, &fs_devices->devices);
368                 mutex_unlock(&fs_devices->device_list_mutex);
369
370                 device->fs_devices = fs_devices;
371                 fs_devices->num_devices++;
372         } else if (!device->name || strcmp(device->name, path)) {
373                 name = kstrdup(path, GFP_NOFS);
374                 if (!name)
375                         return -ENOMEM;
376                 kfree(device->name);
377                 device->name = name;
378                 if (device->missing) {
379                         fs_devices->missing_devices--;
380                         device->missing = 0;
381                 }
382         }
383
384         if (found_transid > fs_devices->latest_trans) {
385                 fs_devices->latest_devid = devid;
386                 fs_devices->latest_trans = found_transid;
387         }
388         *fs_devices_ret = fs_devices;
389         return 0;
390 }
391
392 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
393 {
394         struct btrfs_fs_devices *fs_devices;
395         struct btrfs_device *device;
396         struct btrfs_device *orig_dev;
397
398         fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
399         if (!fs_devices)
400                 return ERR_PTR(-ENOMEM);
401
402         INIT_LIST_HEAD(&fs_devices->devices);
403         INIT_LIST_HEAD(&fs_devices->alloc_list);
404         INIT_LIST_HEAD(&fs_devices->list);
405         mutex_init(&fs_devices->device_list_mutex);
406         fs_devices->latest_devid = orig->latest_devid;
407         fs_devices->latest_trans = orig->latest_trans;
408         memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
409
410         mutex_lock(&orig->device_list_mutex);
411         list_for_each_entry(orig_dev, &orig->devices, dev_list) {
412                 device = kzalloc(sizeof(*device), GFP_NOFS);
413                 if (!device)
414                         goto error;
415
416                 device->name = kstrdup(orig_dev->name, GFP_NOFS);
417                 if (!device->name) {
418                         kfree(device);
419                         goto error;
420                 }
421
422                 device->devid = orig_dev->devid;
423                 device->work.func = pending_bios_fn;
424                 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
425                 spin_lock_init(&device->io_lock);
426                 INIT_LIST_HEAD(&device->dev_list);
427                 INIT_LIST_HEAD(&device->dev_alloc_list);
428
429                 list_add(&device->dev_list, &fs_devices->devices);
430                 device->fs_devices = fs_devices;
431                 fs_devices->num_devices++;
432         }
433         mutex_unlock(&orig->device_list_mutex);
434         return fs_devices;
435 error:
436         mutex_unlock(&orig->device_list_mutex);
437         free_fs_devices(fs_devices);
438         return ERR_PTR(-ENOMEM);
439 }
440
441 int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
442 {
443         struct btrfs_device *device, *next;
444
445         mutex_lock(&uuid_mutex);
446 again:
447         mutex_lock(&fs_devices->device_list_mutex);
448         list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
449                 if (device->in_fs_metadata)
450                         continue;
451
452                 if (device->bdev) {
453                         blkdev_put(device->bdev, device->mode);
454                         device->bdev = NULL;
455                         fs_devices->open_devices--;
456                 }
457                 if (device->writeable) {
458                         list_del_init(&device->dev_alloc_list);
459                         device->writeable = 0;
460                         fs_devices->rw_devices--;
461                 }
462                 list_del_init(&device->dev_list);
463                 fs_devices->num_devices--;
464                 kfree(device->name);
465                 kfree(device);
466         }
467         mutex_unlock(&fs_devices->device_list_mutex);
468
469         if (fs_devices->seed) {
470                 fs_devices = fs_devices->seed;
471                 goto again;
472         }
473
474         mutex_unlock(&uuid_mutex);
475         return 0;
476 }
477
478 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
479 {
480         struct btrfs_device *device;
481
482         if (--fs_devices->opened > 0)
483                 return 0;
484
485         list_for_each_entry(device, &fs_devices->devices, dev_list) {
486                 if (device->bdev) {
487                         blkdev_put(device->bdev, device->mode);
488                         fs_devices->open_devices--;
489                 }
490                 if (device->writeable) {
491                         list_del_init(&device->dev_alloc_list);
492                         fs_devices->rw_devices--;
493                 }
494
495                 device->bdev = NULL;
496                 device->writeable = 0;
497                 device->in_fs_metadata = 0;
498         }
499         WARN_ON(fs_devices->open_devices);
500         WARN_ON(fs_devices->rw_devices);
501         fs_devices->opened = 0;
502         fs_devices->seeding = 0;
503
504         return 0;
505 }
506
507 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
508 {
509         struct btrfs_fs_devices *seed_devices = NULL;
510         int ret;
511
512         mutex_lock(&uuid_mutex);
513         ret = __btrfs_close_devices(fs_devices);
514         if (!fs_devices->opened) {
515                 seed_devices = fs_devices->seed;
516                 fs_devices->seed = NULL;
517         }
518         mutex_unlock(&uuid_mutex);
519
520         while (seed_devices) {
521                 fs_devices = seed_devices;
522                 seed_devices = fs_devices->seed;
523                 __btrfs_close_devices(fs_devices);
524                 free_fs_devices(fs_devices);
525         }
526         return ret;
527 }
528
529 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
530                                 fmode_t flags, void *holder)
531 {
532         struct block_device *bdev;
533         struct list_head *head = &fs_devices->devices;
534         struct btrfs_device *device;
535         struct block_device *latest_bdev = NULL;
536         struct buffer_head *bh;
537         struct btrfs_super_block *disk_super;
538         u64 latest_devid = 0;
539         u64 latest_transid = 0;
540         u64 devid;
541         int seeding = 1;
542         int ret = 0;
543
544         flags |= FMODE_EXCL;
545
546         list_for_each_entry(device, head, dev_list) {
547                 if (device->bdev)
548                         continue;
549                 if (!device->name)
550                         continue;
551
552                 bdev = blkdev_get_by_path(device->name, flags, holder);
553                 if (IS_ERR(bdev)) {
554                         printk(KERN_INFO "open %s failed\n", device->name);
555                         goto error;
556                 }
557                 set_blocksize(bdev, 4096);
558
559                 bh = btrfs_read_dev_super(bdev);
560                 if (!bh) {
561                         ret = -EINVAL;
562                         goto error_close;
563                 }
564
565                 disk_super = (struct btrfs_super_block *)bh->b_data;
566                 devid = btrfs_stack_device_id(&disk_super->dev_item);
567                 if (devid != device->devid)
568                         goto error_brelse;
569
570                 if (memcmp(device->uuid, disk_super->dev_item.uuid,
571                            BTRFS_UUID_SIZE))
572                         goto error_brelse;
573
574                 device->generation = btrfs_super_generation(disk_super);
575                 if (!latest_transid || device->generation > latest_transid) {
576                         latest_devid = devid;
577                         latest_transid = device->generation;
578                         latest_bdev = bdev;
579                 }
580
581                 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
582                         device->writeable = 0;
583                 } else {
584                         device->writeable = !bdev_read_only(bdev);
585                         seeding = 0;
586                 }
587
588                 device->bdev = bdev;
589                 device->in_fs_metadata = 0;
590                 device->mode = flags;
591
592                 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
593                         fs_devices->rotating = 1;
594
595                 fs_devices->open_devices++;
596                 if (device->writeable) {
597                         fs_devices->rw_devices++;
598                         list_add(&device->dev_alloc_list,
599                                  &fs_devices->alloc_list);
600                 }
601                 continue;
602
603 error_brelse:
604                 brelse(bh);
605 error_close:
606                 blkdev_put(bdev, flags);
607 error:
608                 continue;
609         }
610         if (fs_devices->open_devices == 0) {
611                 ret = -EIO;
612                 goto out;
613         }
614         fs_devices->seeding = seeding;
615         fs_devices->opened = 1;
616         fs_devices->latest_bdev = latest_bdev;
617         fs_devices->latest_devid = latest_devid;
618         fs_devices->latest_trans = latest_transid;
619         fs_devices->total_rw_bytes = 0;
620 out:
621         return ret;
622 }
623
624 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
625                        fmode_t flags, void *holder)
626 {
627         int ret;
628
629         mutex_lock(&uuid_mutex);
630         if (fs_devices->opened) {
631                 fs_devices->opened++;
632                 ret = 0;
633         } else {
634                 ret = __btrfs_open_devices(fs_devices, flags, holder);
635         }
636         mutex_unlock(&uuid_mutex);
637         return ret;
638 }
639
640 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
641                           struct btrfs_fs_devices **fs_devices_ret)
642 {
643         struct btrfs_super_block *disk_super;
644         struct block_device *bdev;
645         struct buffer_head *bh;
646         int ret;
647         u64 devid;
648         u64 transid;
649
650         mutex_lock(&uuid_mutex);
651
652         flags |= FMODE_EXCL;
653         bdev = blkdev_get_by_path(path, flags, holder);
654
655         if (IS_ERR(bdev)) {
656                 ret = PTR_ERR(bdev);
657                 goto error;
658         }
659
660         ret = set_blocksize(bdev, 4096);
661         if (ret)
662                 goto error_close;
663         bh = btrfs_read_dev_super(bdev);
664         if (!bh) {
665                 ret = -EINVAL;
666                 goto error_close;
667         }
668         disk_super = (struct btrfs_super_block *)bh->b_data;
669         devid = btrfs_stack_device_id(&disk_super->dev_item);
670         transid = btrfs_super_generation(disk_super);
671         if (disk_super->label[0])
672                 printk(KERN_INFO "device label %s ", disk_super->label);
673         else {
674                 /* FIXME, make a readl uuid parser */
675                 printk(KERN_INFO "device fsid %llx-%llx ",
676                        *(unsigned long long *)disk_super->fsid,
677                        *(unsigned long long *)(disk_super->fsid + 8));
678         }
679         printk(KERN_CONT "devid %llu transid %llu %s\n",
680                (unsigned long long)devid, (unsigned long long)transid, path);
681         ret = device_list_add(path, disk_super, devid, fs_devices_ret);
682
683         brelse(bh);
684 error_close:
685         blkdev_put(bdev, flags);
686 error:
687         mutex_unlock(&uuid_mutex);
688         return ret;
689 }
690
691 /* helper to account the used device space in the range */
692 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
693                                    u64 end, u64 *length)
694 {
695         struct btrfs_key key;
696         struct btrfs_root *root = device->dev_root;
697         struct btrfs_dev_extent *dev_extent;
698         struct btrfs_path *path;
699         u64 extent_end;
700         int ret;
701         int slot;
702         struct extent_buffer *l;
703
704         *length = 0;
705
706         if (start >= device->total_bytes)
707                 return 0;
708
709         path = btrfs_alloc_path();
710         if (!path)
711                 return -ENOMEM;
712         path->reada = 2;
713
714         key.objectid = device->devid;
715         key.offset = start;
716         key.type = BTRFS_DEV_EXTENT_KEY;
717
718         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
719         if (ret < 0)
720                 goto out;
721         if (ret > 0) {
722                 ret = btrfs_previous_item(root, path, key.objectid, key.type);
723                 if (ret < 0)
724                         goto out;
725         }
726
727         while (1) {
728                 l = path->nodes[0];
729                 slot = path->slots[0];
730                 if (slot >= btrfs_header_nritems(l)) {
731                         ret = btrfs_next_leaf(root, path);
732                         if (ret == 0)
733                                 continue;
734                         if (ret < 0)
735                                 goto out;
736
737                         break;
738                 }
739                 btrfs_item_key_to_cpu(l, &key, slot);
740
741                 if (key.objectid < device->devid)
742                         goto next;
743
744                 if (key.objectid > device->devid)
745                         break;
746
747                 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
748                         goto next;
749
750                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
751                 extent_end = key.offset + btrfs_dev_extent_length(l,
752                                                                   dev_extent);
753                 if (key.offset <= start && extent_end > end) {
754                         *length = end - start + 1;
755                         break;
756                 } else if (key.offset <= start && extent_end > start)
757                         *length += extent_end - start;
758                 else if (key.offset > start && extent_end <= end)
759                         *length += extent_end - key.offset;
760                 else if (key.offset > start && key.offset <= end) {
761                         *length += end - key.offset + 1;
762                         break;
763                 } else if (key.offset > end)
764                         break;
765
766 next:
767                 path->slots[0]++;
768         }
769         ret = 0;
770 out:
771         btrfs_free_path(path);
772         return ret;
773 }
774
775 /*
776  * find_free_dev_extent - find free space in the specified device
777  * @trans:      transaction handler
778  * @device:     the device which we search the free space in
779  * @num_bytes:  the size of the free space that we need
780  * @start:      store the start of the free space.
781  * @len:        the size of the free space. that we find, or the size of the max
782  *              free space if we don't find suitable free space
783  *
784  * this uses a pretty simple search, the expectation is that it is
785  * called very infrequently and that a given device has a small number
786  * of extents
787  *
788  * @start is used to store the start of the free space if we find. But if we
789  * don't find suitable free space, it will be used to store the start position
790  * of the max free space.
791  *
792  * @len is used to store the size of the free space that we find.
793  * But if we don't find suitable free space, it is used to store the size of
794  * the max free space.
795  */
796 int find_free_dev_extent(struct btrfs_trans_handle *trans,
797                          struct btrfs_device *device, u64 num_bytes,
798                          u64 *start, u64 *len)
799 {
800         struct btrfs_key key;
801         struct btrfs_root *root = device->dev_root;
802         struct btrfs_dev_extent *dev_extent;
803         struct btrfs_path *path;
804         u64 hole_size;
805         u64 max_hole_start;
806         u64 max_hole_size;
807         u64 extent_end;
808         u64 search_start;
809         u64 search_end = device->total_bytes;
810         int ret;
811         int slot;
812         struct extent_buffer *l;
813
814         /* FIXME use last free of some kind */
815
816         /* we don't want to overwrite the superblock on the drive,
817          * so we make sure to start at an offset of at least 1MB
818          */
819         search_start = 1024 * 1024;
820
821         if (root->fs_info->alloc_start + num_bytes <= search_end)
822                 search_start = max(root->fs_info->alloc_start, search_start);
823
824         max_hole_start = search_start;
825         max_hole_size = 0;
826
827         if (search_start >= search_end) {
828                 ret = -ENOSPC;
829                 goto error;
830         }
831
832         path = btrfs_alloc_path();
833         if (!path) {
834                 ret = -ENOMEM;
835                 goto error;
836         }
837         path->reada = 2;
838
839         key.objectid = device->devid;
840         key.offset = search_start;
841         key.type = BTRFS_DEV_EXTENT_KEY;
842
843         ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
844         if (ret < 0)
845                 goto out;
846         if (ret > 0) {
847                 ret = btrfs_previous_item(root, path, key.objectid, key.type);
848                 if (ret < 0)
849                         goto out;
850         }
851
852         while (1) {
853                 l = path->nodes[0];
854                 slot = path->slots[0];
855                 if (slot >= btrfs_header_nritems(l)) {
856                         ret = btrfs_next_leaf(root, path);
857                         if (ret == 0)
858                                 continue;
859                         if (ret < 0)
860                                 goto out;
861
862                         break;
863                 }
864                 btrfs_item_key_to_cpu(l, &key, slot);
865
866                 if (key.objectid < device->devid)
867                         goto next;
868
869                 if (key.objectid > device->devid)
870                         break;
871
872                 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
873                         goto next;
874
875                 if (key.offset > search_start) {
876                         hole_size = key.offset - search_start;
877
878                         if (hole_size > max_hole_size) {
879                                 max_hole_start = search_start;
880                                 max_hole_size = hole_size;
881                         }
882
883                         /*
884                          * If this free space is greater than which we need,
885                          * it must be the max free space that we have found
886                          * until now, so max_hole_start must point to the start
887                          * of this free space and the length of this free space
888                          * is stored in max_hole_size. Thus, we return
889                          * max_hole_start and max_hole_size and go back to the
890                          * caller.
891                          */
892                         if (hole_size >= num_bytes) {
893                                 ret = 0;
894                                 goto out;
895                         }
896                 }
897
898                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
899                 extent_end = key.offset + btrfs_dev_extent_length(l,
900                                                                   dev_extent);
901                 if (extent_end > search_start)
902                         search_start = extent_end;
903 next:
904                 path->slots[0]++;
905                 cond_resched();
906         }
907
908         hole_size = search_end- search_start;
909         if (hole_size > max_hole_size) {
910                 max_hole_start = search_start;
911                 max_hole_size = hole_size;
912         }
913
914         /* See above. */
915         if (hole_size < num_bytes)
916                 ret = -ENOSPC;
917         else
918                 ret = 0;
919
920 out:
921         btrfs_free_path(path);
922 error:
923         *start = max_hole_start;
924         if (len)
925                 *len = max_hole_size;
926         return ret;
927 }
928
929 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
930                           struct btrfs_device *device,
931                           u64 start)
932 {
933         int ret;
934         struct btrfs_path *path;
935         struct btrfs_root *root = device->dev_root;
936         struct btrfs_key key;
937         struct btrfs_key found_key;
938         struct extent_buffer *leaf = NULL;
939         struct btrfs_dev_extent *extent = NULL;
940
941         path = btrfs_alloc_path();
942         if (!path)
943                 return -ENOMEM;
944
945         key.objectid = device->devid;
946         key.offset = start;
947         key.type = BTRFS_DEV_EXTENT_KEY;
948
949         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
950         if (ret > 0) {
951                 ret = btrfs_previous_item(root, path, key.objectid,
952                                           BTRFS_DEV_EXTENT_KEY);
953                 BUG_ON(ret);
954                 leaf = path->nodes[0];
955                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
956                 extent = btrfs_item_ptr(leaf, path->slots[0],
957                                         struct btrfs_dev_extent);
958                 BUG_ON(found_key.offset > start || found_key.offset +
959                        btrfs_dev_extent_length(leaf, extent) < start);
960                 ret = 0;
961         } else if (ret == 0) {
962                 leaf = path->nodes[0];
963                 extent = btrfs_item_ptr(leaf, path->slots[0],
964                                         struct btrfs_dev_extent);
965         }
966         BUG_ON(ret);
967
968         if (device->bytes_used > 0)
969                 device->bytes_used -= btrfs_dev_extent_length(leaf, extent);
970         ret = btrfs_del_item(trans, root, path);
971         BUG_ON(ret);
972
973         btrfs_free_path(path);
974         return ret;
975 }
976
977 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
978                            struct btrfs_device *device,
979                            u64 chunk_tree, u64 chunk_objectid,
980                            u64 chunk_offset, u64 start, u64 num_bytes)
981 {
982         int ret;
983         struct btrfs_path *path;
984         struct btrfs_root *root = device->dev_root;
985         struct btrfs_dev_extent *extent;
986         struct extent_buffer *leaf;
987         struct btrfs_key key;
988
989         WARN_ON(!device->in_fs_metadata);
990         path = btrfs_alloc_path();
991         if (!path)
992                 return -ENOMEM;
993
994         key.objectid = device->devid;
995         key.offset = start;
996         key.type = BTRFS_DEV_EXTENT_KEY;
997         ret = btrfs_insert_empty_item(trans, root, path, &key,
998                                       sizeof(*extent));
999         BUG_ON(ret);
1000
1001         leaf = path->nodes[0];
1002         extent = btrfs_item_ptr(leaf, path->slots[0],
1003                                 struct btrfs_dev_extent);
1004         btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1005         btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1006         btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1007
1008         write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1009                     (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
1010                     BTRFS_UUID_SIZE);
1011
1012         btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1013         btrfs_mark_buffer_dirty(leaf);
1014         btrfs_free_path(path);
1015         return ret;
1016 }
1017
1018 static noinline int find_next_chunk(struct btrfs_root *root,
1019                                     u64 objectid, u64 *offset)
1020 {
1021         struct btrfs_path *path;
1022         int ret;
1023         struct btrfs_key key;
1024         struct btrfs_chunk *chunk;
1025         struct btrfs_key found_key;
1026
1027         path = btrfs_alloc_path();
1028         BUG_ON(!path);
1029
1030         key.objectid = objectid;
1031         key.offset = (u64)-1;
1032         key.type = BTRFS_CHUNK_ITEM_KEY;
1033
1034         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1035         if (ret < 0)
1036                 goto error;
1037
1038         BUG_ON(ret == 0);
1039
1040         ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
1041         if (ret) {
1042                 *offset = 0;
1043         } else {
1044                 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1045                                       path->slots[0]);
1046                 if (found_key.objectid != objectid)
1047                         *offset = 0;
1048                 else {
1049                         chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
1050                                                struct btrfs_chunk);
1051                         *offset = found_key.offset +
1052                                 btrfs_chunk_length(path->nodes[0], chunk);
1053                 }
1054         }
1055         ret = 0;
1056 error:
1057         btrfs_free_path(path);
1058         return ret;
1059 }
1060
1061 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
1062 {
1063         int ret;
1064         struct btrfs_key key;
1065         struct btrfs_key found_key;
1066         struct btrfs_path *path;
1067
1068         root = root->fs_info->chunk_root;
1069
1070         path = btrfs_alloc_path();
1071         if (!path)
1072                 return -ENOMEM;
1073
1074         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1075         key.type = BTRFS_DEV_ITEM_KEY;
1076         key.offset = (u64)-1;
1077
1078         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1079         if (ret < 0)
1080                 goto error;
1081
1082         BUG_ON(ret == 0);
1083
1084         ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1085                                   BTRFS_DEV_ITEM_KEY);
1086         if (ret) {
1087                 *objectid = 1;
1088         } else {
1089                 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1090                                       path->slots[0]);
1091                 *objectid = found_key.offset + 1;
1092         }
1093         ret = 0;
1094 error:
1095         btrfs_free_path(path);
1096         return ret;
1097 }
1098
1099 /*
1100  * the device information is stored in the chunk root
1101  * the btrfs_device struct should be fully filled in
1102  */
1103 int btrfs_add_device(struct btrfs_trans_handle *trans,
1104                      struct btrfs_root *root,
1105                      struct btrfs_device *device)
1106 {
1107         int ret;
1108         struct btrfs_path *path;
1109         struct btrfs_dev_item *dev_item;
1110         struct extent_buffer *leaf;
1111         struct btrfs_key key;
1112         unsigned long ptr;
1113
1114         root = root->fs_info->chunk_root;
1115
1116         path = btrfs_alloc_path();
1117         if (!path)
1118                 return -ENOMEM;
1119
1120         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1121         key.type = BTRFS_DEV_ITEM_KEY;
1122         key.offset = device->devid;
1123
1124         ret = btrfs_insert_empty_item(trans, root, path, &key,
1125                                       sizeof(*dev_item));
1126         if (ret)
1127                 goto out;
1128
1129         leaf = path->nodes[0];
1130         dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1131
1132         btrfs_set_device_id(leaf, dev_item, device->devid);
1133         btrfs_set_device_generation(leaf, dev_item, 0);
1134         btrfs_set_device_type(leaf, dev_item, device->type);
1135         btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1136         btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1137         btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1138         btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1139         btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1140         btrfs_set_device_group(leaf, dev_item, 0);
1141         btrfs_set_device_seek_speed(leaf, dev_item, 0);
1142         btrfs_set_device_bandwidth(leaf, dev_item, 0);
1143         btrfs_set_device_start_offset(leaf, dev_item, 0);
1144
1145         ptr = (unsigned long)btrfs_device_uuid(dev_item);
1146         write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1147         ptr = (unsigned long)btrfs_device_fsid(dev_item);
1148         write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1149         btrfs_mark_buffer_dirty(leaf);
1150
1151         ret = 0;
1152 out:
1153         btrfs_free_path(path);
1154         return ret;
1155 }
1156
1157 static int btrfs_rm_dev_item(struct btrfs_root *root,
1158                              struct btrfs_device *device)
1159 {
1160         int ret;
1161         struct btrfs_path *path;
1162         struct btrfs_key key;
1163         struct btrfs_trans_handle *trans;
1164
1165         root = root->fs_info->chunk_root;
1166
1167         path = btrfs_alloc_path();
1168         if (!path)
1169                 return -ENOMEM;
1170
1171         trans = btrfs_start_transaction(root, 0);
1172         if (IS_ERR(trans)) {
1173                 btrfs_free_path(path);
1174                 return PTR_ERR(trans);
1175         }
1176         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1177         key.type = BTRFS_DEV_ITEM_KEY;
1178         key.offset = device->devid;
1179         lock_chunks(root);
1180
1181         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1182         if (ret < 0)
1183                 goto out;
1184
1185         if (ret > 0) {
1186                 ret = -ENOENT;
1187                 goto out;
1188         }
1189
1190         ret = btrfs_del_item(trans, root, path);
1191         if (ret)
1192                 goto out;
1193 out:
1194         btrfs_free_path(path);
1195         unlock_chunks(root);
1196         btrfs_commit_transaction(trans, root);
1197         return ret;
1198 }
1199
1200 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1201 {
1202         struct btrfs_device *device;
1203         struct btrfs_device *next_device;
1204         struct block_device *bdev;
1205         struct buffer_head *bh = NULL;
1206         struct btrfs_super_block *disk_super;
1207         u64 all_avail;
1208         u64 devid;
1209         u64 num_devices;
1210         u8 *dev_uuid;
1211         int ret = 0;
1212
1213         mutex_lock(&uuid_mutex);
1214         mutex_lock(&root->fs_info->volume_mutex);
1215
1216         all_avail = root->fs_info->avail_data_alloc_bits |
1217                 root->fs_info->avail_system_alloc_bits |
1218                 root->fs_info->avail_metadata_alloc_bits;
1219
1220         if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
1221             root->fs_info->fs_devices->num_devices <= 4) {
1222                 printk(KERN_ERR "btrfs: unable to go below four devices "
1223                        "on raid10\n");
1224                 ret = -EINVAL;
1225                 goto out;
1226         }
1227
1228         if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
1229             root->fs_info->fs_devices->num_devices <= 2) {
1230                 printk(KERN_ERR "btrfs: unable to go below two "
1231                        "devices on raid1\n");
1232                 ret = -EINVAL;
1233                 goto out;
1234         }
1235
1236         if (strcmp(device_path, "missing") == 0) {
1237                 struct list_head *devices;
1238                 struct btrfs_device *tmp;
1239
1240                 device = NULL;
1241                 devices = &root->fs_info->fs_devices->devices;
1242                 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1243                 list_for_each_entry(tmp, devices, dev_list) {
1244                         if (tmp->in_fs_metadata && !tmp->bdev) {
1245                                 device = tmp;
1246                                 break;
1247                         }
1248                 }
1249                 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1250                 bdev = NULL;
1251                 bh = NULL;
1252                 disk_super = NULL;
1253                 if (!device) {
1254                         printk(KERN_ERR "btrfs: no missing devices found to "
1255                                "remove\n");
1256                         goto out;
1257                 }
1258         } else {
1259                 bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL,
1260                                           root->fs_info->bdev_holder);
1261                 if (IS_ERR(bdev)) {
1262                         ret = PTR_ERR(bdev);
1263                         goto out;
1264                 }
1265
1266                 set_blocksize(bdev, 4096);
1267                 bh = btrfs_read_dev_super(bdev);
1268                 if (!bh) {
1269                         ret = -EINVAL;
1270                         goto error_close;
1271                 }
1272                 disk_super = (struct btrfs_super_block *)bh->b_data;
1273                 devid = btrfs_stack_device_id(&disk_super->dev_item);
1274                 dev_uuid = disk_super->dev_item.uuid;
1275                 device = btrfs_find_device(root, devid, dev_uuid,
1276                                            disk_super->fsid);
1277                 if (!device) {
1278                         ret = -ENOENT;
1279                         goto error_brelse;
1280                 }
1281         }
1282
1283         if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1284                 printk(KERN_ERR "btrfs: unable to remove the only writeable "
1285                        "device\n");
1286                 ret = -EINVAL;
1287                 goto error_brelse;
1288         }
1289
1290         if (device->writeable) {
1291                 list_del_init(&device->dev_alloc_list);
1292                 root->fs_info->fs_devices->rw_devices--;
1293         }
1294
1295         ret = btrfs_shrink_device(device, 0);
1296         if (ret)
1297                 goto error_undo;
1298
1299         ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1300         if (ret)
1301                 goto error_undo;
1302
1303         device->in_fs_metadata = 0;
1304
1305         /*
1306          * the device list mutex makes sure that we don't change
1307          * the device list while someone else is writing out all
1308          * the device supers.
1309          */
1310         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1311         list_del_init(&device->dev_list);
1312         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1313
1314         device->fs_devices->num_devices--;
1315
1316         if (device->missing)
1317                 root->fs_info->fs_devices->missing_devices--;
1318
1319         next_device = list_entry(root->fs_info->fs_devices->devices.next,
1320                                  struct btrfs_device, dev_list);
1321         if (device->bdev == root->fs_info->sb->s_bdev)
1322                 root->fs_info->sb->s_bdev = next_device->bdev;
1323         if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1324                 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1325
1326         if (device->bdev) {
1327                 blkdev_put(device->bdev, device->mode);
1328                 device->bdev = NULL;
1329                 device->fs_devices->open_devices--;
1330         }
1331
1332         num_devices = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
1333         btrfs_set_super_num_devices(&root->fs_info->super_copy, num_devices);
1334
1335         if (device->fs_devices->open_devices == 0) {
1336                 struct btrfs_fs_devices *fs_devices;
1337                 fs_devices = root->fs_info->fs_devices;
1338                 while (fs_devices) {
1339                         if (fs_devices->seed == device->fs_devices)
1340                                 break;
1341                         fs_devices = fs_devices->seed;
1342                 }
1343                 fs_devices->seed = device->fs_devices->seed;
1344                 device->fs_devices->seed = NULL;
1345                 __btrfs_close_devices(device->fs_devices);
1346                 free_fs_devices(device->fs_devices);
1347         }
1348
1349         /*
1350          * at this point, the device is zero sized.  We want to
1351          * remove it from the devices list and zero out the old super
1352          */
1353         if (device->writeable) {
1354                 /* make sure this device isn't detected as part of
1355                  * the FS anymore
1356                  */
1357                 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1358                 set_buffer_dirty(bh);
1359                 sync_dirty_buffer(bh);
1360         }
1361
1362         kfree(device->name);
1363         kfree(device);
1364         ret = 0;
1365
1366 error_brelse:
1367         brelse(bh);
1368 error_close:
1369         if (bdev)
1370                 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1371 out:
1372         mutex_unlock(&root->fs_info->volume_mutex);
1373         mutex_unlock(&uuid_mutex);
1374         return ret;
1375 error_undo:
1376         if (device->writeable) {
1377                 list_add(&device->dev_alloc_list,
1378                          &root->fs_info->fs_devices->alloc_list);
1379                 root->fs_info->fs_devices->rw_devices++;
1380         }
1381         goto error_brelse;
1382 }
1383
1384 /*
1385  * does all the dirty work required for changing file system's UUID.
1386  */
1387 static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans,
1388                                 struct btrfs_root *root)
1389 {
1390         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1391         struct btrfs_fs_devices *old_devices;
1392         struct btrfs_fs_devices *seed_devices;
1393         struct btrfs_super_block *disk_super = &root->fs_info->super_copy;
1394         struct btrfs_device *device;
1395         u64 super_flags;
1396
1397         BUG_ON(!mutex_is_locked(&uuid_mutex));
1398         if (!fs_devices->seeding)
1399                 return -EINVAL;
1400
1401         seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1402         if (!seed_devices)
1403                 return -ENOMEM;
1404
1405         old_devices = clone_fs_devices(fs_devices);
1406         if (IS_ERR(old_devices)) {
1407                 kfree(seed_devices);
1408                 return PTR_ERR(old_devices);
1409         }
1410
1411         list_add(&old_devices->list, &fs_uuids);
1412
1413         memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1414         seed_devices->opened = 1;
1415         INIT_LIST_HEAD(&seed_devices->devices);
1416         INIT_LIST_HEAD(&seed_devices->alloc_list);
1417         mutex_init(&seed_devices->device_list_mutex);
1418         list_splice_init(&fs_devices->devices, &seed_devices->devices);
1419         list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1420         list_for_each_entry(device, &seed_devices->devices, dev_list) {
1421                 device->fs_devices = seed_devices;
1422         }
1423
1424         fs_devices->seeding = 0;
1425         fs_devices->num_devices = 0;
1426         fs_devices->open_devices = 0;
1427         fs_devices->seed = seed_devices;
1428
1429         generate_random_uuid(fs_devices->fsid);
1430         memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1431         memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1432         super_flags = btrfs_super_flags(disk_super) &
1433                       ~BTRFS_SUPER_FLAG_SEEDING;
1434         btrfs_set_super_flags(disk_super, super_flags);
1435
1436         return 0;
1437 }
1438
1439 /*
1440  * strore the expected generation for seed devices in device items.
1441  */
1442 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1443                                struct btrfs_root *root)
1444 {
1445         struct btrfs_path *path;
1446         struct extent_buffer *leaf;
1447         struct btrfs_dev_item *dev_item;
1448         struct btrfs_device *device;
1449         struct btrfs_key key;
1450         u8 fs_uuid[BTRFS_UUID_SIZE];
1451         u8 dev_uuid[BTRFS_UUID_SIZE];
1452         u64 devid;
1453         int ret;
1454
1455         path = btrfs_alloc_path();
1456         if (!path)
1457                 return -ENOMEM;
1458
1459         root = root->fs_info->chunk_root;
1460         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1461         key.offset = 0;
1462         key.type = BTRFS_DEV_ITEM_KEY;
1463
1464         while (1) {
1465                 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1466                 if (ret < 0)
1467                         goto error;
1468
1469                 leaf = path->nodes[0];
1470 next_slot:
1471                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1472                         ret = btrfs_next_leaf(root, path);
1473                         if (ret > 0)
1474                                 break;
1475                         if (ret < 0)
1476                                 goto error;
1477                         leaf = path->nodes[0];
1478                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1479                         btrfs_release_path(root, path);
1480                         continue;
1481                 }
1482
1483                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1484                 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1485                     key.type != BTRFS_DEV_ITEM_KEY)
1486                         break;
1487
1488                 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1489                                           struct btrfs_dev_item);
1490                 devid = btrfs_device_id(leaf, dev_item);
1491                 read_extent_buffer(leaf, dev_uuid,
1492                                    (unsigned long)btrfs_device_uuid(dev_item),
1493                                    BTRFS_UUID_SIZE);
1494                 read_extent_buffer(leaf, fs_uuid,
1495                                    (unsigned long)btrfs_device_fsid(dev_item),
1496                                    BTRFS_UUID_SIZE);
1497                 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
1498                 BUG_ON(!device);
1499
1500                 if (device->fs_devices->seeding) {
1501                         btrfs_set_device_generation(leaf, dev_item,
1502                                                     device->generation);
1503                         btrfs_mark_buffer_dirty(leaf);
1504                 }
1505
1506                 path->slots[0]++;
1507                 goto next_slot;
1508         }
1509         ret = 0;
1510 error:
1511         btrfs_free_path(path);
1512         return ret;
1513 }
1514
1515 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1516 {
1517         struct btrfs_trans_handle *trans;
1518         struct btrfs_device *device;
1519         struct block_device *bdev;
1520         struct list_head *devices;
1521         struct super_block *sb = root->fs_info->sb;
1522         u64 total_bytes;
1523         int seeding_dev = 0;
1524         int ret = 0;
1525
1526         if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1527                 return -EINVAL;
1528
1529         bdev = blkdev_get_by_path(device_path, FMODE_EXCL,
1530                                   root->fs_info->bdev_holder);
1531         if (IS_ERR(bdev))
1532                 return PTR_ERR(bdev);
1533
1534         if (root->fs_info->fs_devices->seeding) {
1535                 seeding_dev = 1;
1536                 down_write(&sb->s_umount);
1537                 mutex_lock(&uuid_mutex);
1538         }
1539
1540         filemap_write_and_wait(bdev->bd_inode->i_mapping);
1541         mutex_lock(&root->fs_info->volume_mutex);
1542
1543         devices = &root->fs_info->fs_devices->devices;
1544         /*
1545          * we have the volume lock, so we don't need the extra
1546          * device list mutex while reading the list here.
1547          */
1548         list_for_each_entry(device, devices, dev_list) {
1549                 if (device->bdev == bdev) {
1550                         ret = -EEXIST;
1551                         goto error;
1552                 }
1553         }
1554
1555         device = kzalloc(sizeof(*device), GFP_NOFS);
1556         if (!device) {
1557                 /* we can safely leave the fs_devices entry around */
1558                 ret = -ENOMEM;
1559                 goto error;
1560         }
1561
1562         device->name = kstrdup(device_path, GFP_NOFS);
1563         if (!device->name) {
1564                 kfree(device);
1565                 ret = -ENOMEM;
1566                 goto error;
1567         }
1568
1569         ret = find_next_devid(root, &device->devid);
1570         if (ret) {
1571                 kfree(device->name);
1572                 kfree(device);
1573                 goto error;
1574         }
1575
1576         trans = btrfs_start_transaction(root, 0);
1577         if (IS_ERR(trans)) {
1578                 kfree(device->name);
1579                 kfree(device);
1580                 ret = PTR_ERR(trans);
1581                 goto error;
1582         }
1583
1584         lock_chunks(root);
1585
1586         device->writeable = 1;
1587         device->work.func = pending_bios_fn;
1588         generate_random_uuid(device->uuid);
1589         spin_lock_init(&device->io_lock);
1590         device->generation = trans->transid;
1591         device->io_width = root->sectorsize;
1592         device->io_align = root->sectorsize;
1593         device->sector_size = root->sectorsize;
1594         device->total_bytes = i_size_read(bdev->bd_inode);
1595         device->disk_total_bytes = device->total_bytes;
1596         device->dev_root = root->fs_info->dev_root;
1597         device->bdev = bdev;
1598         device->in_fs_metadata = 1;
1599         device->mode = FMODE_EXCL;
1600         set_blocksize(device->bdev, 4096);
1601
1602         if (seeding_dev) {
1603                 sb->s_flags &= ~MS_RDONLY;
1604                 ret = btrfs_prepare_sprout(trans, root);
1605                 BUG_ON(ret);
1606         }
1607
1608         device->fs_devices = root->fs_info->fs_devices;
1609
1610         /*
1611          * we don't want write_supers to jump in here with our device
1612          * half setup
1613          */
1614         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1615         list_add(&device->dev_list, &root->fs_info->fs_devices->devices);
1616         list_add(&device->dev_alloc_list,
1617                  &root->fs_info->fs_devices->alloc_list);
1618         root->fs_info->fs_devices->num_devices++;
1619         root->fs_info->fs_devices->open_devices++;
1620         root->fs_info->fs_devices->rw_devices++;
1621         root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1622
1623         if (!blk_queue_nonrot(bdev_get_queue(bdev)))
1624                 root->fs_info->fs_devices->rotating = 1;
1625
1626         total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
1627         btrfs_set_super_total_bytes(&root->fs_info->super_copy,
1628                                     total_bytes + device->total_bytes);
1629
1630         total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
1631         btrfs_set_super_num_devices(&root->fs_info->super_copy,
1632                                     total_bytes + 1);
1633         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1634
1635         if (seeding_dev) {
1636                 ret = init_first_rw_device(trans, root, device);
1637                 BUG_ON(ret);
1638                 ret = btrfs_finish_sprout(trans, root);
1639                 BUG_ON(ret);
1640         } else {
1641                 ret = btrfs_add_device(trans, root, device);
1642         }
1643
1644         /*
1645          * we've got more storage, clear any full flags on the space
1646          * infos
1647          */
1648         btrfs_clear_space_info_full(root->fs_info);
1649
1650         unlock_chunks(root);
1651         btrfs_commit_transaction(trans, root);
1652
1653         if (seeding_dev) {
1654                 mutex_unlock(&uuid_mutex);
1655                 up_write(&sb->s_umount);
1656
1657                 ret = btrfs_relocate_sys_chunks(root);
1658                 BUG_ON(ret);
1659         }
1660 out:
1661         mutex_unlock(&root->fs_info->volume_mutex);
1662         return ret;
1663 error:
1664         blkdev_put(bdev, FMODE_EXCL);
1665         if (seeding_dev) {
1666                 mutex_unlock(&uuid_mutex);
1667                 up_write(&sb->s_umount);
1668         }
1669         goto out;
1670 }
1671
1672 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
1673                                         struct btrfs_device *device)
1674 {
1675         int ret;
1676         struct btrfs_path *path;
1677         struct btrfs_root *root;
1678         struct btrfs_dev_item *dev_item;
1679         struct extent_buffer *leaf;
1680         struct btrfs_key key;
1681
1682         root = device->dev_root->fs_info->chunk_root;
1683
1684         path = btrfs_alloc_path();
1685         if (!path)
1686                 return -ENOMEM;
1687
1688         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1689         key.type = BTRFS_DEV_ITEM_KEY;
1690         key.offset = device->devid;
1691
1692         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1693         if (ret < 0)
1694                 goto out;
1695
1696         if (ret > 0) {
1697                 ret = -ENOENT;
1698                 goto out;
1699         }
1700
1701         leaf = path->nodes[0];
1702         dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1703
1704         btrfs_set_device_id(leaf, dev_item, device->devid);
1705         btrfs_set_device_type(leaf, dev_item, device->type);
1706         btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1707         btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1708         btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1709         btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
1710         btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1711         btrfs_mark_buffer_dirty(leaf);
1712
1713 out:
1714         btrfs_free_path(path);
1715         return ret;
1716 }
1717
1718 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1719                       struct btrfs_device *device, u64 new_size)
1720 {
1721         struct btrfs_super_block *super_copy =
1722                 &device->dev_root->fs_info->super_copy;
1723         u64 old_total = btrfs_super_total_bytes(super_copy);
1724         u64 diff = new_size - device->total_bytes;
1725
1726         if (!device->writeable)
1727                 return -EACCES;
1728         if (new_size <= device->total_bytes)
1729                 return -EINVAL;
1730
1731         btrfs_set_super_total_bytes(super_copy, old_total + diff);
1732         device->fs_devices->total_rw_bytes += diff;
1733
1734         device->total_bytes = new_size;
1735         device->disk_total_bytes = new_size;
1736         btrfs_clear_space_info_full(device->dev_root->fs_info);
1737
1738         return btrfs_update_device(trans, device);
1739 }
1740
1741 int btrfs_grow_device(struct btrfs_trans_handle *trans,
1742                       struct btrfs_device *device, u64 new_size)
1743 {
1744         int ret;
1745         lock_chunks(device->dev_root);
1746         ret = __btrfs_grow_device(trans, device, new_size);
1747         unlock_chunks(device->dev_root);
1748         return ret;
1749 }
1750
1751 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1752                             struct btrfs_root *root,
1753                             u64 chunk_tree, u64 chunk_objectid,
1754                             u64 chunk_offset)
1755 {
1756         int ret;
1757         struct btrfs_path *path;
1758         struct btrfs_key key;
1759
1760         root = root->fs_info->chunk_root;
1761         path = btrfs_alloc_path();
1762         if (!path)
1763                 return -ENOMEM;
1764
1765         key.objectid = chunk_objectid;
1766         key.offset = chunk_offset;
1767         key.type = BTRFS_CHUNK_ITEM_KEY;
1768
1769         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1770         BUG_ON(ret);
1771
1772         ret = btrfs_del_item(trans, root, path);
1773         BUG_ON(ret);
1774
1775         btrfs_free_path(path);
1776         return 0;
1777 }
1778
1779 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1780                         chunk_offset)
1781 {
1782         struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1783         struct btrfs_disk_key *disk_key;
1784         struct btrfs_chunk *chunk;
1785         u8 *ptr;
1786         int ret = 0;
1787         u32 num_stripes;
1788         u32 array_size;
1789         u32 len = 0;
1790         u32 cur;
1791         struct btrfs_key key;
1792
1793         array_size = btrfs_super_sys_array_size(super_copy);
1794
1795         ptr = super_copy->sys_chunk_array;
1796         cur = 0;
1797
1798         while (cur < array_size) {
1799                 disk_key = (struct btrfs_disk_key *)ptr;
1800                 btrfs_disk_key_to_cpu(&key, disk_key);
1801
1802                 len = sizeof(*disk_key);
1803
1804                 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1805                         chunk = (struct btrfs_chunk *)(ptr + len);
1806                         num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1807                         len += btrfs_chunk_item_size(num_stripes);
1808                 } else {
1809                         ret = -EIO;
1810                         break;
1811                 }
1812                 if (key.objectid == chunk_objectid &&
1813                     key.offset == chunk_offset) {
1814                         memmove(ptr, ptr + len, array_size - (cur + len));
1815                         array_size -= len;
1816                         btrfs_set_super_sys_array_size(super_copy, array_size);
1817                 } else {
1818                         ptr += len;
1819                         cur += len;
1820                 }
1821         }
1822         return ret;
1823 }
1824
1825 static int btrfs_relocate_chunk(struct btrfs_root *root,
1826                          u64 chunk_tree, u64 chunk_objectid,
1827                          u64 chunk_offset)
1828 {
1829         struct extent_map_tree *em_tree;
1830         struct btrfs_root *extent_root;
1831         struct btrfs_trans_handle *trans;
1832         struct extent_map *em;
1833         struct map_lookup *map;
1834         int ret;
1835         int i;
1836
1837         root = root->fs_info->chunk_root;
1838         extent_root = root->fs_info->extent_root;
1839         em_tree = &root->fs_info->mapping_tree.map_tree;
1840
1841         ret = btrfs_can_relocate(extent_root, chunk_offset);
1842         if (ret)
1843                 return -ENOSPC;
1844
1845         /* step one, relocate all the extents inside this chunk */
1846         ret = btrfs_relocate_block_group(extent_root, chunk_offset);
1847         if (ret)
1848                 return ret;
1849
1850         trans = btrfs_start_transaction(root, 0);
1851         BUG_ON(IS_ERR(trans));
1852
1853         lock_chunks(root);
1854
1855         /*
1856          * step two, delete the device extents and the
1857          * chunk tree entries
1858          */
1859         read_lock(&em_tree->lock);
1860         em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1861         read_unlock(&em_tree->lock);
1862
1863         BUG_ON(em->start > chunk_offset ||
1864                em->start + em->len < chunk_offset);
1865         map = (struct map_lookup *)em->bdev;
1866
1867         for (i = 0; i < map->num_stripes; i++) {
1868                 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
1869                                             map->stripes[i].physical);
1870                 BUG_ON(ret);
1871
1872                 if (map->stripes[i].dev) {
1873                         ret = btrfs_update_device(trans, map->stripes[i].dev);
1874                         BUG_ON(ret);
1875                 }
1876         }
1877         ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
1878                                chunk_offset);
1879
1880         BUG_ON(ret);
1881
1882         if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
1883                 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
1884                 BUG_ON(ret);
1885         }
1886
1887         ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
1888         BUG_ON(ret);
1889
1890         write_lock(&em_tree->lock);
1891         remove_extent_mapping(em_tree, em);
1892         write_unlock(&em_tree->lock);
1893
1894         kfree(map);
1895         em->bdev = NULL;
1896
1897         /* once for the tree */
1898         free_extent_map(em);
1899         /* once for us */
1900         free_extent_map(em);
1901
1902         unlock_chunks(root);
1903         btrfs_end_transaction(trans, root);
1904         return 0;
1905 }
1906
1907 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
1908 {
1909         struct btrfs_root *chunk_root = root->fs_info->chunk_root;
1910         struct btrfs_path *path;
1911         struct extent_buffer *leaf;
1912         struct btrfs_chunk *chunk;
1913         struct btrfs_key key;
1914         struct btrfs_key found_key;
1915         u64 chunk_tree = chunk_root->root_key.objectid;
1916         u64 chunk_type;
1917         bool retried = false;
1918         int failed = 0;
1919         int ret;
1920
1921         path = btrfs_alloc_path();
1922         if (!path)
1923                 return -ENOMEM;
1924
1925 again:
1926         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1927         key.offset = (u64)-1;
1928         key.type = BTRFS_CHUNK_ITEM_KEY;
1929
1930         while (1) {
1931                 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
1932                 if (ret < 0)
1933                         goto error;
1934                 BUG_ON(ret == 0);
1935
1936                 ret = btrfs_previous_item(chunk_root, path, key.objectid,
1937                                           key.type);
1938                 if (ret < 0)
1939                         goto error;
1940                 if (ret > 0)
1941                         break;
1942
1943                 leaf = path->nodes[0];
1944                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1945
1946                 chunk = btrfs_item_ptr(leaf, path->slots[0],
1947                                        struct btrfs_chunk);
1948                 chunk_type = btrfs_chunk_type(leaf, chunk);
1949                 btrfs_release_path(chunk_root, path);
1950
1951                 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
1952                         ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
1953                                                    found_key.objectid,
1954                                                    found_key.offset);
1955                         if (ret == -ENOSPC)
1956                                 failed++;
1957                         else if (ret)
1958                                 BUG();
1959                 }
1960
1961                 if (found_key.offset == 0)
1962                         break;
1963                 key.offset = found_key.offset - 1;
1964         }
1965         ret = 0;
1966         if (failed && !retried) {
1967                 failed = 0;
1968                 retried = true;
1969                 goto again;
1970         } else if (failed && retried) {
1971                 WARN_ON(1);
1972                 ret = -ENOSPC;
1973         }
1974 error:
1975         btrfs_free_path(path);
1976         return ret;
1977 }
1978
1979 static u64 div_factor(u64 num, int factor)
1980 {
1981         if (factor == 10)
1982                 return num;
1983         num *= factor;
1984         do_div(num, 10);
1985         return num;
1986 }
1987
1988 int btrfs_balance(struct btrfs_root *dev_root)
1989 {
1990         int ret;
1991         struct list_head *devices = &dev_root->fs_info->fs_devices->devices;
1992         struct btrfs_device *device;
1993         u64 old_size;
1994         u64 size_to_free;
1995         struct btrfs_path *path;
1996         struct btrfs_key key;
1997         struct btrfs_root *chunk_root = dev_root->fs_info->chunk_root;
1998         struct btrfs_trans_handle *trans;
1999         struct btrfs_key found_key;
2000
2001         if (dev_root->fs_info->sb->s_flags & MS_RDONLY)
2002                 return -EROFS;
2003
2004         if (!capable(CAP_SYS_ADMIN))
2005                 return -EPERM;
2006
2007         mutex_lock(&dev_root->fs_info->volume_mutex);
2008         dev_root = dev_root->fs_info->dev_root;
2009
2010         /* step one make some room on all the devices */
2011         list_for_each_entry(device, devices, dev_list) {
2012                 old_size = device->total_bytes;
2013                 size_to_free = div_factor(old_size, 1);
2014                 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2015                 if (!device->writeable ||
2016                     device->total_bytes - device->bytes_used > size_to_free)
2017                         continue;
2018
2019                 ret = btrfs_shrink_device(device, old_size - size_to_free);
2020                 if (ret == -ENOSPC)
2021                         break;
2022                 BUG_ON(ret);
2023
2024                 trans = btrfs_start_transaction(dev_root, 0);
2025                 BUG_ON(IS_ERR(trans));
2026
2027                 ret = btrfs_grow_device(trans, device, old_size);
2028                 BUG_ON(ret);
2029
2030                 btrfs_end_transaction(trans, dev_root);
2031         }
2032
2033         /* step two, relocate all the chunks */
2034         path = btrfs_alloc_path();
2035         BUG_ON(!path);
2036
2037         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2038         key.offset = (u64)-1;
2039         key.type = BTRFS_CHUNK_ITEM_KEY;
2040
2041         while (1) {
2042                 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2043                 if (ret < 0)
2044                         goto error;
2045
2046                 /*
2047                  * this shouldn't happen, it means the last relocate
2048                  * failed
2049                  */
2050                 if (ret == 0)
2051                         break;
2052
2053                 ret = btrfs_previous_item(chunk_root, path, 0,
2054                                           BTRFS_CHUNK_ITEM_KEY);
2055                 if (ret)
2056                         break;
2057
2058                 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2059                                       path->slots[0]);
2060                 if (found_key.objectid != key.objectid)
2061                         break;
2062
2063                 /* chunk zero is special */
2064                 if (found_key.offset == 0)
2065                         break;
2066
2067                 btrfs_release_path(chunk_root, path);
2068                 ret = btrfs_relocate_chunk(chunk_root,
2069                                            chunk_root->root_key.objectid,
2070                                            found_key.objectid,
2071                                            found_key.offset);
2072                 BUG_ON(ret && ret != -ENOSPC);
2073                 key.offset = found_key.offset - 1;
2074         }
2075         ret = 0;
2076 error:
2077         btrfs_free_path(path);
2078         mutex_unlock(&dev_root->fs_info->volume_mutex);
2079         return ret;
2080 }
2081
2082 /*
2083  * shrinking a device means finding all of the device extents past
2084  * the new size, and then following the back refs to the chunks.
2085  * The chunk relocation code actually frees the device extent
2086  */
2087 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
2088 {
2089         struct btrfs_trans_handle *trans;
2090         struct btrfs_root *root = device->dev_root;
2091         struct btrfs_dev_extent *dev_extent = NULL;
2092         struct btrfs_path *path;
2093         u64 length;
2094         u64 chunk_tree;
2095         u64 chunk_objectid;
2096         u64 chunk_offset;
2097         int ret;
2098         int slot;
2099         int failed = 0;
2100         bool retried = false;
2101         struct extent_buffer *l;
2102         struct btrfs_key key;
2103         struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
2104         u64 old_total = btrfs_super_total_bytes(super_copy);
2105         u64 old_size = device->total_bytes;
2106         u64 diff = device->total_bytes - new_size;
2107
2108         if (new_size >= device->total_bytes)
2109                 return -EINVAL;
2110
2111         path = btrfs_alloc_path();
2112         if (!path)
2113                 return -ENOMEM;
2114
2115         path->reada = 2;
2116
2117         lock_chunks(root);
2118
2119         device->total_bytes = new_size;
2120         if (device->writeable)
2121                 device->fs_devices->total_rw_bytes -= diff;
2122         unlock_chunks(root);
2123
2124 again:
2125         key.objectid = device->devid;
2126         key.offset = (u64)-1;
2127         key.type = BTRFS_DEV_EXTENT_KEY;
2128
2129         while (1) {
2130                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2131                 if (ret < 0)
2132                         goto done;
2133
2134                 ret = btrfs_previous_item(root, path, 0, key.type);
2135                 if (ret < 0)
2136                         goto done;
2137                 if (ret) {
2138                         ret = 0;
2139                         btrfs_release_path(root, path);
2140                         break;
2141                 }
2142
2143                 l = path->nodes[0];
2144                 slot = path->slots[0];
2145                 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
2146
2147                 if (key.objectid != device->devid) {
2148                         btrfs_release_path(root, path);
2149                         break;
2150                 }
2151
2152                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2153                 length = btrfs_dev_extent_length(l, dev_extent);
2154
2155                 if (key.offset + length <= new_size) {
2156                         btrfs_release_path(root, path);
2157                         break;
2158                 }
2159
2160                 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
2161                 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
2162                 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2163                 btrfs_release_path(root, path);
2164
2165                 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
2166                                            chunk_offset);
2167                 if (ret && ret != -ENOSPC)
2168                         goto done;
2169                 if (ret == -ENOSPC)
2170                         failed++;
2171                 key.offset -= 1;
2172         }
2173
2174         if (failed && !retried) {
2175                 failed = 0;
2176                 retried = true;
2177                 goto again;
2178         } else if (failed && retried) {
2179                 ret = -ENOSPC;
2180                 lock_chunks(root);
2181
2182                 device->total_bytes = old_size;
2183                 if (device->writeable)
2184                         device->fs_devices->total_rw_bytes += diff;
2185                 unlock_chunks(root);
2186                 goto done;
2187         }
2188
2189         /* Shrinking succeeded, else we would be at "done". */
2190         trans = btrfs_start_transaction(root, 0);
2191         if (IS_ERR(trans)) {
2192                 ret = PTR_ERR(trans);
2193                 goto done;
2194         }
2195
2196         lock_chunks(root);
2197
2198         device->disk_total_bytes = new_size;
2199         /* Now btrfs_update_device() will change the on-disk size. */
2200         ret = btrfs_update_device(trans, device);
2201         if (ret) {
2202                 unlock_chunks(root);
2203                 btrfs_end_transaction(trans, root);
2204                 goto done;
2205         }
2206         WARN_ON(diff > old_total);
2207         btrfs_set_super_total_bytes(super_copy, old_total - diff);
2208         unlock_chunks(root);
2209         btrfs_end_transaction(trans, root);
2210 done:
2211         btrfs_free_path(path);
2212         return ret;
2213 }
2214
2215 static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
2216                            struct btrfs_root *root,
2217                            struct btrfs_key *key,
2218                            struct btrfs_chunk *chunk, int item_size)
2219 {
2220         struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
2221         struct btrfs_disk_key disk_key;
2222         u32 array_size;
2223         u8 *ptr;
2224
2225         array_size = btrfs_super_sys_array_size(super_copy);
2226         if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
2227                 return -EFBIG;
2228
2229         ptr = super_copy->sys_chunk_array + array_size;
2230         btrfs_cpu_key_to_disk(&disk_key, key);
2231         memcpy(ptr, &disk_key, sizeof(disk_key));
2232         ptr += sizeof(disk_key);
2233         memcpy(ptr, chunk, item_size);
2234         item_size += sizeof(disk_key);
2235         btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
2236         return 0;
2237 }
2238
2239 static noinline u64 chunk_bytes_by_type(u64 type, u64 calc_size,
2240                                         int num_stripes, int sub_stripes)
2241 {
2242         if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
2243                 return calc_size;
2244         else if (type & BTRFS_BLOCK_GROUP_RAID10)
2245                 return calc_size * (num_stripes / sub_stripes);
2246         else
2247                 return calc_size * num_stripes;
2248 }
2249
2250 /* Used to sort the devices by max_avail(descending sort) */
2251 int btrfs_cmp_device_free_bytes(const void *dev_info1, const void *dev_info2)
2252 {
2253         if (((struct btrfs_device_info *)dev_info1)->max_avail >
2254             ((struct btrfs_device_info *)dev_info2)->max_avail)
2255                 return -1;
2256         else if (((struct btrfs_device_info *)dev_info1)->max_avail <
2257                  ((struct btrfs_device_info *)dev_info2)->max_avail)
2258                 return 1;
2259         else
2260                 return 0;
2261 }
2262
2263 static int __btrfs_calc_nstripes(struct btrfs_fs_devices *fs_devices, u64 type,
2264                                  int *num_stripes, int *min_stripes,
2265                                  int *sub_stripes)
2266 {
2267         *num_stripes = 1;
2268         *min_stripes = 1;
2269         *sub_stripes = 0;
2270
2271         if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
2272                 *num_stripes = fs_devices->rw_devices;
2273                 *min_stripes = 2;
2274         }
2275         if (type & (BTRFS_BLOCK_GROUP_DUP)) {
2276                 *num_stripes = 2;
2277                 *min_stripes = 2;
2278         }
2279         if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
2280                 if (fs_devices->rw_devices < 2)
2281                         return -ENOSPC;
2282                 *num_stripes = 2;
2283                 *min_stripes = 2;
2284         }
2285         if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
2286                 *num_stripes = fs_devices->rw_devices;
2287                 if (*num_stripes < 4)
2288                         return -ENOSPC;
2289                 *num_stripes &= ~(u32)1;
2290                 *sub_stripes = 2;
2291                 *min_stripes = 4;
2292         }
2293
2294         return 0;
2295 }
2296
2297 static u64 __btrfs_calc_stripe_size(struct btrfs_fs_devices *fs_devices,
2298                                     u64 proposed_size, u64 type,
2299                                     int num_stripes, int small_stripe)
2300 {
2301         int min_stripe_size = 1 * 1024 * 1024;
2302         u64 calc_size = proposed_size;
2303         u64 max_chunk_size = calc_size;
2304         int ncopies = 1;
2305
2306         if (type & (BTRFS_BLOCK_GROUP_RAID1 |
2307                     BTRFS_BLOCK_GROUP_DUP |
2308                     BTRFS_BLOCK_GROUP_RAID10))
2309                 ncopies = 2;
2310
2311         if (type & BTRFS_BLOCK_GROUP_DATA) {
2312                 max_chunk_size = 10 * calc_size;
2313                 min_stripe_size = 64 * 1024 * 1024;
2314         } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
2315                 max_chunk_size = 256 * 1024 * 1024;
2316                 min_stripe_size = 32 * 1024 * 1024;
2317         } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
2318                 calc_size = 8 * 1024 * 1024;
2319                 max_chunk_size = calc_size * 2;
2320                 min_stripe_size = 1 * 1024 * 1024;
2321         }
2322
2323         /* we don't want a chunk larger than 10% of writeable space */
2324         max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
2325                              max_chunk_size);
2326
2327         if (calc_size * num_stripes > max_chunk_size * ncopies) {
2328                 calc_size = max_chunk_size * ncopies;
2329                 do_div(calc_size, num_stripes);
2330                 do_div(calc_size, BTRFS_STRIPE_LEN);
2331                 calc_size *= BTRFS_STRIPE_LEN;
2332         }
2333
2334         /* we don't want tiny stripes */
2335         if (!small_stripe)
2336                 calc_size = max_t(u64, min_stripe_size, calc_size);
2337
2338         /*
2339          * we're about to do_div by the BTRFS_STRIPE_LEN so lets make sure
2340          * we end up with something bigger than a stripe
2341          */
2342         calc_size = max_t(u64, calc_size, BTRFS_STRIPE_LEN);
2343
2344         do_div(calc_size, BTRFS_STRIPE_LEN);
2345         calc_size *= BTRFS_STRIPE_LEN;
2346
2347         return calc_size;
2348 }
2349
2350 static struct map_lookup *__shrink_map_lookup_stripes(struct map_lookup *map,
2351                                                       int num_stripes)
2352 {
2353         struct map_lookup *new;
2354         size_t len = map_lookup_size(num_stripes);
2355
2356         BUG_ON(map->num_stripes < num_stripes);
2357
2358         if (map->num_stripes == num_stripes)
2359                 return map;
2360
2361         new = kmalloc(len, GFP_NOFS);
2362         if (!new) {
2363                 /* just change map->num_stripes */
2364                 map->num_stripes = num_stripes;
2365                 return map;
2366         }
2367
2368         memcpy(new, map, len);
2369         new->num_stripes = num_stripes;
2370         kfree(map);
2371         return new;
2372 }
2373
2374 /*
2375  * helper to allocate device space from btrfs_device_info, in which we stored
2376  * max free space information of every device. It is used when we can not
2377  * allocate chunks by default size.
2378  *
2379  * By this helper, we can allocate a new chunk as larger as possible.
2380  */
2381 static int __btrfs_alloc_tiny_space(struct btrfs_trans_handle *trans,
2382                                     struct btrfs_fs_devices *fs_devices,
2383                                     struct btrfs_device_info *devices,
2384                                     int nr_device, u64 type,
2385                                     struct map_lookup **map_lookup,
2386                                     int min_stripes, u64 *stripe_size)
2387 {
2388         int i, index, sort_again = 0;
2389         int min_devices = min_stripes;
2390         u64 max_avail, min_free;
2391         struct map_lookup *map = *map_lookup;
2392         int ret;
2393
2394         if (nr_device < min_stripes)
2395                 return -ENOSPC;
2396
2397         btrfs_descending_sort_devices(devices, nr_device);
2398
2399         max_avail = devices[0].max_avail;
2400         if (!max_avail)
2401                 return -ENOSPC;
2402
2403         for (i = 0; i < nr_device; i++) {
2404                 /*
2405                  * if dev_offset = 0, it means the free space of this device
2406                  * is less than what we need, and we didn't search max avail
2407                  * extent on this device, so do it now.
2408                  */
2409                 if (!devices[i].dev_offset) {
2410                         ret = find_free_dev_extent(trans, devices[i].dev,
2411                                                    max_avail,
2412                                                    &devices[i].dev_offset,
2413                                                    &devices[i].max_avail);
2414                         if (ret != 0 && ret != -ENOSPC)
2415                                 return ret;
2416                         sort_again = 1;
2417                 }
2418         }
2419
2420         /* we update the max avail free extent of each devices, sort again */
2421         if (sort_again)
2422                 btrfs_descending_sort_devices(devices, nr_device);
2423
2424         if (type & BTRFS_BLOCK_GROUP_DUP)
2425                 min_devices = 1;
2426
2427         if (!devices[min_devices - 1].max_avail)
2428                 return -ENOSPC;
2429
2430         max_avail = devices[min_devices - 1].max_avail;
2431         if (type & BTRFS_BLOCK_GROUP_DUP)
2432                 do_div(max_avail, 2);
2433
2434         max_avail = __btrfs_calc_stripe_size(fs_devices, max_avail, type,
2435                                              min_stripes, 1);
2436         if (type & BTRFS_BLOCK_GROUP_DUP)
2437                 min_free = max_avail * 2;
2438         else
2439                 min_free = max_avail;
2440
2441         if (min_free > devices[min_devices - 1].max_avail)
2442                 return -ENOSPC;
2443
2444         map = __shrink_map_lookup_stripes(map, min_stripes);
2445         *stripe_size = max_avail;
2446
2447         index = 0;
2448         for (i = 0; i < min_stripes; i++) {
2449                 map->stripes[i].dev = devices[index].dev;
2450                 map->stripes[i].physical = devices[index].dev_offset;
2451                 if (type & BTRFS_BLOCK_GROUP_DUP) {
2452                         i++;
2453                         map->stripes[i].dev = devices[index].dev;
2454                         map->stripes[i].physical = devices[index].dev_offset +
2455                                                    max_avail;
2456                 }
2457                 index++;
2458         }
2459         *map_lookup = map;
2460
2461         return 0;
2462 }
2463
2464 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
2465                                struct btrfs_root *extent_root,
2466                                struct map_lookup **map_ret,
2467                                u64 *num_bytes, u64 *stripe_size,
2468                                u64 start, u64 type)
2469 {
2470         struct btrfs_fs_info *info = extent_root->fs_info;
2471         struct btrfs_device *device = NULL;
2472         struct btrfs_fs_devices *fs_devices = info->fs_devices;
2473         struct list_head *cur;
2474         struct map_lookup *map;
2475         struct extent_map_tree *em_tree;
2476         struct extent_map *em;
2477         struct btrfs_device_info *devices_info;
2478         struct list_head private_devs;
2479         u64 calc_size = 1024 * 1024 * 1024;
2480         u64 min_free;
2481         u64 avail;
2482         u64 dev_offset;
2483         int num_stripes;
2484         int min_stripes;
2485         int sub_stripes;
2486         int min_devices;        /* the min number of devices we need */
2487         int i;
2488         int ret;
2489         int index;
2490
2491         if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
2492             (type & BTRFS_BLOCK_GROUP_DUP)) {
2493                 WARN_ON(1);
2494                 type &= ~BTRFS_BLOCK_GROUP_DUP;
2495         }
2496         if (list_empty(&fs_devices->alloc_list))
2497                 return -ENOSPC;
2498
2499         ret = __btrfs_calc_nstripes(fs_devices, type, &num_stripes,
2500                                     &min_stripes, &sub_stripes);
2501         if (ret)
2502                 return ret;
2503
2504         devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
2505                                GFP_NOFS);
2506         if (!devices_info)
2507                 return -ENOMEM;
2508
2509         map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
2510         if (!map) {
2511                 ret = -ENOMEM;
2512                 goto error;
2513         }
2514         map->num_stripes = num_stripes;
2515
2516         cur = fs_devices->alloc_list.next;
2517         index = 0;
2518         i = 0;
2519
2520         calc_size = __btrfs_calc_stripe_size(fs_devices, calc_size, type,
2521                                              num_stripes, 0);
2522
2523         if (type & BTRFS_BLOCK_GROUP_DUP) {
2524                 min_free = calc_size * 2;
2525                 min_devices = 1;
2526         } else {
2527                 min_free = calc_size;
2528                 min_devices = min_stripes;
2529         }
2530
2531         INIT_LIST_HEAD(&private_devs);
2532         while (index < num_stripes) {
2533                 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
2534                 BUG_ON(!device->writeable);
2535                 if (device->total_bytes > device->bytes_used)
2536                         avail = device->total_bytes - device->bytes_used;
2537                 else
2538                         avail = 0;
2539                 cur = cur->next;
2540
2541                 if (device->in_fs_metadata && avail >= min_free) {
2542                         ret = find_free_dev_extent(trans, device, min_free,
2543                                                    &devices_info[i].dev_offset,
2544                                                    &devices_info[i].max_avail);
2545                         if (ret == 0) {
2546                                 list_move_tail(&device->dev_alloc_list,
2547                                                &private_devs);
2548                                 map->stripes[index].dev = device;
2549                                 map->stripes[index].physical =
2550                                                 devices_info[i].dev_offset;
2551                                 index++;
2552                                 if (type & BTRFS_BLOCK_GROUP_DUP) {
2553                                         map->stripes[index].dev = device;
2554                                         map->stripes[index].physical =
2555                                                 devices_info[i].dev_offset +
2556                                                 calc_size;
2557                                         index++;
2558                                 }
2559                         } else if (ret != -ENOSPC)
2560                                 goto error;
2561
2562                         devices_info[i].dev = device;
2563                         i++;
2564                 } else if (device->in_fs_metadata &&
2565                            avail >= BTRFS_STRIPE_LEN) {
2566                         devices_info[i].dev = device;
2567                         devices_info[i].max_avail = avail;
2568                         i++;
2569                 }
2570
2571                 if (cur == &fs_devices->alloc_list)
2572                         break;
2573         }
2574
2575         list_splice(&private_devs, &fs_devices->alloc_list);
2576         if (index < num_stripes) {
2577                 if (index >= min_stripes) {
2578                         num_stripes = index;
2579                         if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
2580                                 num_stripes /= sub_stripes;
2581                                 num_stripes *= sub_stripes;
2582                         }
2583
2584                         map = __shrink_map_lookup_stripes(map, num_stripes);
2585                 } else if (i >= min_devices) {
2586                         ret = __btrfs_alloc_tiny_space(trans, fs_devices,
2587                                                        devices_info, i, type,
2588                                                        &map, min_stripes,
2589                                                        &calc_size);
2590                         if (ret)
2591                                 goto error;
2592                 } else {
2593                         ret = -ENOSPC;
2594                         goto error;
2595                 }
2596         }
2597         map->sector_size = extent_root->sectorsize;
2598         map->stripe_len = BTRFS_STRIPE_LEN;
2599         map->io_align = BTRFS_STRIPE_LEN;
2600         map->io_width = BTRFS_STRIPE_LEN;
2601         map->type = type;
2602         map->sub_stripes = sub_stripes;
2603
2604         *map_ret = map;
2605         *stripe_size = calc_size;
2606         *num_bytes = chunk_bytes_by_type(type, calc_size,
2607                                          map->num_stripes, sub_stripes);
2608
2609         em = alloc_extent_map(GFP_NOFS);
2610         if (!em) {
2611                 ret = -ENOMEM;
2612                 goto error;
2613         }
2614         em->bdev = (struct block_device *)map;
2615         em->start = start;
2616         em->len = *num_bytes;
2617         em->block_start = 0;
2618         em->block_len = em->len;
2619
2620         em_tree = &extent_root->fs_info->mapping_tree.map_tree;
2621         write_lock(&em_tree->lock);
2622         ret = add_extent_mapping(em_tree, em);
2623         write_unlock(&em_tree->lock);
2624         BUG_ON(ret);
2625         free_extent_map(em);
2626
2627         ret = btrfs_make_block_group(trans, extent_root, 0, type,
2628                                      BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2629                                      start, *num_bytes);
2630         BUG_ON(ret);
2631
2632         index = 0;
2633         while (index < map->num_stripes) {
2634                 device = map->stripes[index].dev;
2635                 dev_offset = map->stripes[index].physical;
2636
2637                 ret = btrfs_alloc_dev_extent(trans, device,
2638                                 info->chunk_root->root_key.objectid,
2639                                 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2640                                 start, dev_offset, calc_size);
2641                 BUG_ON(ret);
2642                 index++;
2643         }
2644
2645         kfree(devices_info);
2646         return 0;
2647
2648 error:
2649         kfree(map);
2650         kfree(devices_info);
2651         return ret;
2652 }
2653
2654 static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
2655                                 struct btrfs_root *extent_root,
2656                                 struct map_lookup *map, u64 chunk_offset,
2657                                 u64 chunk_size, u64 stripe_size)
2658 {
2659         u64 dev_offset;
2660         struct btrfs_key key;
2661         struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
2662         struct btrfs_device *device;
2663         struct btrfs_chunk *chunk;
2664         struct btrfs_stripe *stripe;
2665         size_t item_size = btrfs_chunk_item_size(map->num_stripes);
2666         int index = 0;
2667         int ret;
2668
2669         chunk = kzalloc(item_size, GFP_NOFS);
2670         if (!chunk)
2671                 return -ENOMEM;
2672
2673         index = 0;
2674         while (index < map->num_stripes) {
2675                 device = map->stripes[index].dev;
2676                 device->bytes_used += stripe_size;
2677                 ret = btrfs_update_device(trans, device);
2678                 BUG_ON(ret);
2679                 index++;
2680         }
2681
2682         index = 0;
2683         stripe = &chunk->stripe;
2684         while (index < map->num_stripes) {
2685                 device = map->stripes[index].dev;
2686                 dev_offset = map->stripes[index].physical;
2687
2688                 btrfs_set_stack_stripe_devid(stripe, device->devid);
2689                 btrfs_set_stack_stripe_offset(stripe, dev_offset);
2690                 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
2691                 stripe++;
2692                 index++;
2693         }
2694
2695         btrfs_set_stack_chunk_length(chunk, chunk_size);
2696         btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
2697         btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
2698         btrfs_set_stack_chunk_type(chunk, map->type);
2699         btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
2700         btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
2701         btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
2702         btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
2703         btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
2704
2705         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2706         key.type = BTRFS_CHUNK_ITEM_KEY;
2707         key.offset = chunk_offset;
2708
2709         ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
2710         BUG_ON(ret);
2711
2712         if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2713                 ret = btrfs_add_system_chunk(trans, chunk_root, &key, chunk,
2714                                              item_size);
2715                 BUG_ON(ret);
2716         }
2717         kfree(chunk);
2718         return 0;
2719 }
2720
2721 /*
2722  * Chunk allocation falls into two parts. The first part does works
2723  * that make the new allocated chunk useable, but not do any operation
2724  * that modifies the chunk tree. The second part does the works that
2725  * require modifying the chunk tree. This division is important for the
2726  * bootstrap process of adding storage to a seed btrfs.
2727  */
2728 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
2729                       struct btrfs_root *extent_root, u64 type)
2730 {
2731         u64 chunk_offset;
2732         u64 chunk_size;
2733         u64 stripe_size;
2734         struct map_lookup *map;
2735         struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
2736         int ret;
2737
2738         ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2739                               &chunk_offset);
2740         if (ret)
2741                 return ret;
2742
2743         ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
2744                                   &stripe_size, chunk_offset, type);
2745         if (ret)
2746                 return ret;
2747
2748         ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
2749                                    chunk_size, stripe_size);
2750         BUG_ON(ret);
2751         return 0;
2752 }
2753
2754 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
2755                                          struct btrfs_root *root,
2756                                          struct btrfs_device *device)
2757 {
2758         u64 chunk_offset;
2759         u64 sys_chunk_offset;
2760         u64 chunk_size;
2761         u64 sys_chunk_size;
2762         u64 stripe_size;
2763         u64 sys_stripe_size;
2764         u64 alloc_profile;
2765         struct map_lookup *map;
2766         struct map_lookup *sys_map;
2767         struct btrfs_fs_info *fs_info = root->fs_info;
2768         struct btrfs_root *extent_root = fs_info->extent_root;
2769         int ret;
2770
2771         ret = find_next_chunk(fs_info->chunk_root,
2772                               BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
2773         BUG_ON(ret);
2774
2775         alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
2776                         (fs_info->metadata_alloc_profile &
2777                          fs_info->avail_metadata_alloc_bits);
2778         alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
2779
2780         ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
2781                                   &stripe_size, chunk_offset, alloc_profile);
2782         BUG_ON(ret);
2783
2784         sys_chunk_offset = chunk_offset + chunk_size;
2785
2786         alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
2787                         (fs_info->system_alloc_profile &
2788                          fs_info->avail_system_alloc_bits);
2789         alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
2790
2791         ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
2792                                   &sys_chunk_size, &sys_stripe_size,
2793                                   sys_chunk_offset, alloc_profile);
2794         BUG_ON(ret);
2795
2796         ret = btrfs_add_device(trans, fs_info->chunk_root, device);
2797         BUG_ON(ret);
2798
2799         /*
2800          * Modifying chunk tree needs allocating new blocks from both
2801          * system block group and metadata block group. So we only can
2802          * do operations require modifying the chunk tree after both
2803          * block groups were created.
2804          */
2805         ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
2806                                    chunk_size, stripe_size);
2807         BUG_ON(ret);
2808
2809         ret = __finish_chunk_alloc(trans, extent_root, sys_map,
2810                                    sys_chunk_offset, sys_chunk_size,
2811                                    sys_stripe_size);
2812         BUG_ON(ret);
2813         return 0;
2814 }
2815
2816 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
2817 {
2818         struct extent_map *em;
2819         struct map_lookup *map;
2820         struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
2821         int readonly = 0;
2822         int i;
2823
2824         read_lock(&map_tree->map_tree.lock);
2825         em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
2826         read_unlock(&map_tree->map_tree.lock);
2827         if (!em)
2828                 return 1;
2829
2830         if (btrfs_test_opt(root, DEGRADED)) {
2831                 free_extent_map(em);
2832                 return 0;
2833         }
2834
2835         map = (struct map_lookup *)em->bdev;
2836         for (i = 0; i < map->num_stripes; i++) {
2837                 if (!map->stripes[i].dev->writeable) {
2838                         readonly = 1;
2839                         break;
2840                 }
2841         }
2842         free_extent_map(em);
2843         return readonly;
2844 }
2845
2846 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
2847 {
2848         extent_map_tree_init(&tree->map_tree, GFP_NOFS);
2849 }
2850
2851 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
2852 {
2853         struct extent_map *em;
2854
2855         while (1) {
2856                 write_lock(&tree->map_tree.lock);
2857                 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
2858                 if (em)
2859                         remove_extent_mapping(&tree->map_tree, em);
2860                 write_unlock(&tree->map_tree.lock);
2861                 if (!em)
2862                         break;
2863                 kfree(em->bdev);
2864                 /* once for us */
2865                 free_extent_map(em);
2866                 /* once for the tree */
2867                 free_extent_map(em);
2868         }
2869 }
2870
2871 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
2872 {
2873         struct extent_map *em;
2874         struct map_lookup *map;
2875         struct extent_map_tree *em_tree = &map_tree->map_tree;
2876         int ret;
2877
2878         read_lock(&em_tree->lock);
2879         em = lookup_extent_mapping(em_tree, logical, len);
2880         read_unlock(&em_tree->lock);
2881         BUG_ON(!em);
2882
2883         BUG_ON(em->start > logical || em->start + em->len < logical);
2884         map = (struct map_lookup *)em->bdev;
2885         if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
2886                 ret = map->num_stripes;
2887         else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
2888                 ret = map->sub_stripes;
2889         else
2890                 ret = 1;
2891         free_extent_map(em);
2892         return ret;
2893 }
2894
2895 static int find_live_mirror(struct map_lookup *map, int first, int num,
2896                             int optimal)
2897 {
2898         int i;
2899         if (map->stripes[optimal].dev->bdev)
2900                 return optimal;
2901         for (i = first; i < first + num; i++) {
2902                 if (map->stripes[i].dev->bdev)
2903                         return i;
2904         }
2905         /* we couldn't find one that doesn't fail.  Just return something
2906          * and the io error handling code will clean up eventually
2907          */
2908         return optimal;
2909 }
2910
2911 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
2912                              u64 logical, u64 *length,
2913                              struct btrfs_multi_bio **multi_ret,
2914                              int mirror_num)
2915 {
2916         struct extent_map *em;
2917         struct map_lookup *map;
2918         struct extent_map_tree *em_tree = &map_tree->map_tree;
2919         u64 offset;
2920         u64 stripe_offset;
2921         u64 stripe_nr;
2922         int stripes_allocated = 8;
2923         int stripes_required = 1;
2924         int stripe_index;
2925         int i;
2926         int num_stripes;
2927         int max_errors = 0;
2928         struct btrfs_multi_bio *multi = NULL;
2929
2930         if (multi_ret && !(rw & REQ_WRITE))
2931                 stripes_allocated = 1;
2932 again:
2933         if (multi_ret) {
2934                 multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
2935                                 GFP_NOFS);
2936                 if (!multi)
2937                         return -ENOMEM;
2938
2939                 atomic_set(&multi->error, 0);
2940         }
2941
2942         read_lock(&em_tree->lock);
2943         em = lookup_extent_mapping(em_tree, logical, *length);
2944         read_unlock(&em_tree->lock);
2945
2946         if (!em) {
2947                 printk(KERN_CRIT "unable to find logical %llu len %llu\n",
2948                        (unsigned long long)logical,
2949                        (unsigned long long)*length);
2950                 BUG();
2951         }
2952
2953         BUG_ON(em->start > logical || em->start + em->len < logical);
2954         map = (struct map_lookup *)em->bdev;
2955         offset = logical - em->start;
2956
2957         if (mirror_num > map->num_stripes)
2958                 mirror_num = 0;
2959
2960         /* if our multi bio struct is too small, back off and try again */
2961         if (rw & REQ_WRITE) {
2962                 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
2963                                  BTRFS_BLOCK_GROUP_DUP)) {
2964                         stripes_required = map->num_stripes;
2965                         max_errors = 1;
2966                 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2967                         stripes_required = map->sub_stripes;
2968                         max_errors = 1;
2969                 }
2970         }
2971         if (multi_ret && (rw & REQ_WRITE) &&
2972             stripes_allocated < stripes_required) {
2973                 stripes_allocated = map->num_stripes;
2974                 free_extent_map(em);
2975                 kfree(multi);
2976                 goto again;
2977         }
2978         stripe_nr = offset;
2979         /*
2980          * stripe_nr counts the total number of stripes we have to stride
2981          * to get to this block
2982          */
2983         do_div(stripe_nr, map->stripe_len);
2984
2985         stripe_offset = stripe_nr * map->stripe_len;
2986         BUG_ON(offset < stripe_offset);
2987
2988         /* stripe_offset is the offset of this block in its stripe*/
2989         stripe_offset = offset - stripe_offset;
2990
2991         if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
2992                          BTRFS_BLOCK_GROUP_RAID10 |
2993                          BTRFS_BLOCK_GROUP_DUP)) {
2994                 /* we limit the length of each bio to what fits in a stripe */
2995                 *length = min_t(u64, em->len - offset,
2996                               map->stripe_len - stripe_offset);
2997         } else {
2998                 *length = em->len - offset;
2999         }
3000
3001         if (!multi_ret)
3002                 goto out;
3003
3004         num_stripes = 1;
3005         stripe_index = 0;
3006         if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3007                 if (rw & REQ_WRITE)
3008                         num_stripes = map->num_stripes;
3009                 else if (mirror_num)
3010                         stripe_index = mirror_num - 1;
3011                 else {
3012                         stripe_index = find_live_mirror(map, 0,
3013                                             map->num_stripes,
3014                                             current->pid % map->num_stripes);
3015                 }
3016
3017         } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3018                 if (rw & REQ_WRITE)
3019                         num_stripes = map->num_stripes;
3020                 else if (mirror_num)
3021                         stripe_index = mirror_num - 1;
3022
3023         } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3024                 int factor = map->num_stripes / map->sub_stripes;
3025
3026                 stripe_index = do_div(stripe_nr, factor);
3027                 stripe_index *= map->sub_stripes;
3028
3029                 if (rw & REQ_WRITE)
3030                         num_stripes = map->sub_stripes;
3031                 else if (mirror_num)
3032                         stripe_index += mirror_num - 1;
3033                 else {
3034                         stripe_index = find_live_mirror(map, stripe_index,
3035                                               map->sub_stripes, stripe_index +
3036                                               current->pid % map->sub_stripes);
3037                 }
3038         } else {
3039                 /*
3040                  * after this do_div call, stripe_nr is the number of stripes
3041                  * on this device we have to walk to find the data, and
3042                  * stripe_index is the number of our device in the stripe array
3043                  */
3044                 stripe_index = do_div(stripe_nr, map->num_stripes);
3045         }
3046         BUG_ON(stripe_index >= map->num_stripes);
3047
3048         for (i = 0; i < num_stripes; i++) {
3049                 multi->stripes[i].physical =
3050                         map->stripes[stripe_index].physical +
3051                         stripe_offset + stripe_nr * map->stripe_len;
3052                 multi->stripes[i].dev = map->stripes[stripe_index].dev;
3053                 stripe_index++;
3054         }
3055         if (multi_ret) {
3056                 *multi_ret = multi;
3057                 multi->num_stripes = num_stripes;
3058                 multi->max_errors = max_errors;
3059         }
3060 out:
3061         free_extent_map(em);
3062         return 0;
3063 }
3064
3065 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3066                       u64 logical, u64 *length,
3067                       struct btrfs_multi_bio **multi_ret, int mirror_num)
3068 {
3069         return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
3070                                  mirror_num);
3071 }
3072
3073 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
3074                      u64 chunk_start, u64 physical, u64 devid,
3075                      u64 **logical, int *naddrs, int *stripe_len)
3076 {
3077         struct extent_map_tree *em_tree = &map_tree->map_tree;
3078         struct extent_map *em;
3079         struct map_lookup *map;
3080         u64 *buf;
3081         u64 bytenr;
3082         u64 length;
3083         u64 stripe_nr;
3084         int i, j, nr = 0;
3085
3086         read_lock(&em_tree->lock);
3087         em = lookup_extent_mapping(em_tree, chunk_start, 1);
3088         read_unlock(&em_tree->lock);
3089
3090         BUG_ON(!em || em->start != chunk_start);
3091         map = (struct map_lookup *)em->bdev;
3092
3093         length = em->len;
3094         if (map->type & BTRFS_BLOCK_GROUP_RAID10)
3095                 do_div(length, map->num_stripes / map->sub_stripes);
3096         else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
3097                 do_div(length, map->num_stripes);
3098
3099         buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
3100         BUG_ON(!buf);
3101
3102         for (i = 0; i < map->num_stripes; i++) {
3103                 if (devid && map->stripes[i].dev->devid != devid)
3104                         continue;
3105                 if (map->stripes[i].physical > physical ||
3106                     map->stripes[i].physical + length <= physical)
3107                         continue;
3108
3109                 stripe_nr = physical - map->stripes[i].physical;
3110                 do_div(stripe_nr, map->stripe_len);
3111
3112                 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3113                         stripe_nr = stripe_nr * map->num_stripes + i;
3114                         do_div(stripe_nr, map->sub_stripes);
3115                 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3116                         stripe_nr = stripe_nr * map->num_stripes + i;
3117                 }
3118                 bytenr = chunk_start + stripe_nr * map->stripe_len;
3119                 WARN_ON(nr >= map->num_stripes);
3120                 for (j = 0; j < nr; j++) {
3121                         if (buf[j] == bytenr)
3122                                 break;
3123                 }
3124                 if (j == nr) {
3125                         WARN_ON(nr >= map->num_stripes);
3126                         buf[nr++] = bytenr;
3127                 }
3128         }
3129
3130         *logical = buf;
3131         *naddrs = nr;
3132         *stripe_len = map->stripe_len;
3133
3134         free_extent_map(em);
3135         return 0;
3136 }
3137
3138 static void end_bio_multi_stripe(struct bio *bio, int err)
3139 {
3140         struct btrfs_multi_bio *multi = bio->bi_private;
3141         int is_orig_bio = 0;
3142
3143         if (err)
3144                 atomic_inc(&multi->error);
3145
3146         if (bio == multi->orig_bio)
3147                 is_orig_bio = 1;
3148
3149         if (atomic_dec_and_test(&multi->stripes_pending)) {
3150                 if (!is_orig_bio) {
3151                         bio_put(bio);
3152                         bio = multi->orig_bio;
3153                 }
3154                 bio->bi_private = multi->private;
3155                 bio->bi_end_io = multi->end_io;
3156                 /* only send an error to the higher layers if it is
3157                  * beyond the tolerance of the multi-bio
3158                  */
3159                 if (atomic_read(&multi->error) > multi->max_errors) {
3160                         err = -EIO;
3161                 } else if (err) {
3162                         /*
3163                          * this bio is actually up to date, we didn't
3164                          * go over the max number of errors
3165                          */
3166                         set_bit(BIO_UPTODATE, &bio->bi_flags);
3167                         err = 0;
3168                 }
3169                 kfree(multi);
3170
3171                 bio_endio(bio, err);
3172         } else if (!is_orig_bio) {
3173                 bio_put(bio);
3174         }
3175 }
3176
3177 struct async_sched {
3178         struct bio *bio;
3179         int rw;
3180         struct btrfs_fs_info *info;
3181         struct btrfs_work work;
3182 };
3183
3184 /*
3185  * see run_scheduled_bios for a description of why bios are collected for
3186  * async submit.
3187  *
3188  * This will add one bio to the pending list for a device and make sure
3189  * the work struct is scheduled.
3190  */
3191 static noinline int schedule_bio(struct btrfs_root *root,
3192                                  struct btrfs_device *device,
3193                                  int rw, struct bio *bio)
3194 {
3195         int should_queue = 1;
3196         struct btrfs_pending_bios *pending_bios;
3197
3198         /* don't bother with additional async steps for reads, right now */
3199         if (!(rw & REQ_WRITE)) {
3200                 bio_get(bio);
3201                 submit_bio(rw, bio);
3202                 bio_put(bio);
3203                 return 0;
3204         }
3205
3206         /*
3207          * nr_async_bios allows us to reliably return congestion to the
3208          * higher layers.  Otherwise, the async bio makes it appear we have
3209          * made progress against dirty pages when we've really just put it
3210          * on a queue for later
3211          */
3212         atomic_inc(&root->fs_info->nr_async_bios);
3213         WARN_ON(bio->bi_next);
3214         bio->bi_next = NULL;
3215         bio->bi_rw |= rw;
3216
3217         spin_lock(&device->io_lock);
3218         if (bio->bi_rw & REQ_SYNC)
3219                 pending_bios = &device->pending_sync_bios;
3220         else
3221                 pending_bios = &device->pending_bios;
3222
3223         if (pending_bios->tail)
3224                 pending_bios->tail->bi_next = bio;
3225
3226         pending_bios->tail = bio;
3227         if (!pending_bios->head)
3228                 pending_bios->head = bio;
3229         if (device->running_pending)
3230                 should_queue = 0;
3231
3232         spin_unlock(&device->io_lock);
3233
3234         if (should_queue)
3235                 btrfs_queue_worker(&root->fs_info->submit_workers,
3236                                    &device->work);
3237         return 0;
3238 }
3239
3240 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
3241                   int mirror_num, int async_submit)
3242 {
3243         struct btrfs_mapping_tree *map_tree;
3244         struct btrfs_device *dev;
3245         struct bio *first_bio = bio;
3246         u64 logical = (u64)bio->bi_sector << 9;
3247         u64 length = 0;
3248         u64 map_length;
3249         struct btrfs_multi_bio *multi = NULL;
3250         int ret;
3251         int dev_nr = 0;
3252         int total_devs = 1;
3253
3254         length = bio->bi_size;
3255         map_tree = &root->fs_info->mapping_tree;
3256         map_length = length;
3257
3258         ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi,
3259                               mirror_num);
3260         BUG_ON(ret);
3261
3262         total_devs = multi->num_stripes;
3263         if (map_length < length) {
3264                 printk(KERN_CRIT "mapping failed logical %llu bio len %llu "
3265                        "len %llu\n", (unsigned long long)logical,
3266                        (unsigned long long)length,
3267                        (unsigned long long)map_length);
3268                 BUG();
3269         }
3270         multi->end_io = first_bio->bi_end_io;
3271         multi->private = first_bio->bi_private;
3272         multi->orig_bio = first_bio;
3273         atomic_set(&multi->stripes_pending, multi->num_stripes);
3274
3275         while (dev_nr < total_devs) {
3276                 if (total_devs > 1) {
3277                         if (dev_nr < total_devs - 1) {
3278                                 bio = bio_clone(first_bio, GFP_NOFS);
3279                                 BUG_ON(!bio);
3280                         } else {
3281                                 bio = first_bio;
3282                         }
3283                         bio->bi_private = multi;
3284                         bio->bi_end_io = end_bio_multi_stripe;
3285                 }
3286                 bio->bi_sector = multi->stripes[dev_nr].physical >> 9;
3287                 dev = multi->stripes[dev_nr].dev;
3288                 if (dev && dev->bdev && (rw != WRITE || dev->writeable)) {
3289                         bio->bi_bdev = dev->bdev;
3290                         if (async_submit)
3291                                 schedule_bio(root, dev, rw, bio);
3292                         else
3293                                 submit_bio(rw, bio);
3294                 } else {
3295                         bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
3296                         bio->bi_sector = logical >> 9;
3297                         bio_endio(bio, -EIO);
3298                 }
3299                 dev_nr++;
3300         }
3301         if (total_devs == 1)
3302                 kfree(multi);
3303         return 0;
3304 }
3305
3306 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
3307                                        u8 *uuid, u8 *fsid)
3308 {
3309         struct btrfs_device *device;
3310         struct btrfs_fs_devices *cur_devices;
3311
3312         cur_devices = root->fs_info->fs_devices;
3313         while (cur_devices) {
3314                 if (!fsid ||
3315                     !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
3316                         device = __find_device(&cur_devices->devices,
3317                                                devid, uuid);
3318                         if (device)
3319                                 return device;
3320                 }
3321                 cur_devices = cur_devices->seed;
3322         }
3323         return NULL;
3324 }
3325
3326 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
3327                                             u64 devid, u8 *dev_uuid)
3328 {
3329         struct btrfs_device *device;
3330         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
3331
3332         device = kzalloc(sizeof(*device), GFP_NOFS);
3333         if (!device)
3334                 return NULL;
3335         list_add(&device->dev_list,
3336                  &fs_devices->devices);
3337         device->dev_root = root->fs_info->dev_root;
3338         device->devid = devid;
3339         device->work.func = pending_bios_fn;
3340         device->fs_devices = fs_devices;
3341         device->missing = 1;
3342         fs_devices->num_devices++;
3343         fs_devices->missing_devices++;
3344         spin_lock_init(&device->io_lock);
3345         INIT_LIST_HEAD(&device->dev_alloc_list);
3346         memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
3347         return device;
3348 }
3349
3350 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
3351                           struct extent_buffer *leaf,
3352                           struct btrfs_chunk *chunk)
3353 {
3354         struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
3355         struct map_lookup *map;
3356         struct extent_map *em;
3357         u64 logical;
3358         u64 length;
3359         u64 devid;
3360         u8 uuid[BTRFS_UUID_SIZE];
3361         int num_stripes;
3362         int ret;
3363         int i;
3364
3365         logical = key->offset;
3366         length = btrfs_chunk_length(leaf, chunk);
3367
3368         read_lock(&map_tree->map_tree.lock);
3369         em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
3370         read_unlock(&map_tree->map_tree.lock);
3371
3372         /* already mapped? */
3373         if (em && em->start <= logical && em->start + em->len > logical) {
3374                 free_extent_map(em);
3375                 return 0;
3376         } else if (em) {
3377                 free_extent_map(em);
3378         }
3379
3380         em = alloc_extent_map(GFP_NOFS);
3381         if (!em)
3382                 return -ENOMEM;
3383         num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3384         map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
3385         if (!map) {
3386                 free_extent_map(em);
3387                 return -ENOMEM;
3388         }
3389
3390         em->bdev = (struct block_device *)map;
3391         em->start = logical;
3392         em->len = length;
3393         em->block_start = 0;
3394         em->block_len = em->len;
3395
3396         map->num_stripes = num_stripes;
3397         map->io_width = btrfs_chunk_io_width(leaf, chunk);
3398         map->io_align = btrfs_chunk_io_align(leaf, chunk);
3399         map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
3400         map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
3401         map->type = btrfs_chunk_type(leaf, chunk);
3402         map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
3403         for (i = 0; i < num_stripes; i++) {
3404                 map->stripes[i].physical =
3405                         btrfs_stripe_offset_nr(leaf, chunk, i);
3406                 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
3407                 read_extent_buffer(leaf, uuid, (unsigned long)
3408                                    btrfs_stripe_dev_uuid_nr(chunk, i),
3409                                    BTRFS_UUID_SIZE);
3410                 map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
3411                                                         NULL);
3412                 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
3413                         kfree(map);
3414                         free_extent_map(em);
3415                         return -EIO;
3416                 }
3417                 if (!map->stripes[i].dev) {
3418                         map->stripes[i].dev =
3419                                 add_missing_dev(root, devid, uuid);
3420                         if (!map->stripes[i].dev) {
3421                                 kfree(map);
3422                                 free_extent_map(em);
3423                                 return -EIO;
3424                         }
3425                 }
3426                 map->stripes[i].dev->in_fs_metadata = 1;
3427         }
3428
3429         write_lock(&map_tree->map_tree.lock);
3430         ret = add_extent_mapping(&map_tree->map_tree, em);
3431         write_unlock(&map_tree->map_tree.lock);
3432         BUG_ON(ret);
3433         free_extent_map(em);
3434
3435         return 0;
3436 }
3437
3438 static int fill_device_from_item(struct extent_buffer *leaf,
3439                                  struct btrfs_dev_item *dev_item,
3440                                  struct btrfs_device *device)
3441 {
3442         unsigned long ptr;
3443
3444         device->devid = btrfs_device_id(leaf, dev_item);
3445         device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
3446         device->total_bytes = device->disk_total_bytes;
3447         device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
3448         device->type = btrfs_device_type(leaf, dev_item);
3449         device->io_align = btrfs_device_io_align(leaf, dev_item);
3450         device->io_width = btrfs_device_io_width(leaf, dev_item);
3451         device->sector_size = btrfs_device_sector_size(leaf, dev_item);
3452
3453         ptr = (unsigned long)btrfs_device_uuid(dev_item);
3454         read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
3455
3456         return 0;
3457 }
3458
3459 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
3460 {
3461         struct btrfs_fs_devices *fs_devices;
3462         int ret;
3463
3464         mutex_lock(&uuid_mutex);
3465
3466         fs_devices = root->fs_info->fs_devices->seed;
3467         while (fs_devices) {
3468                 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
3469                         ret = 0;
3470                         goto out;
3471                 }
3472                 fs_devices = fs_devices->seed;
3473         }
3474
3475         fs_devices = find_fsid(fsid);
3476         if (!fs_devices) {
3477                 ret = -ENOENT;
3478                 goto out;
3479         }
3480
3481         fs_devices = clone_fs_devices(fs_devices);
3482         if (IS_ERR(fs_devices)) {
3483                 ret = PTR_ERR(fs_devices);
3484                 goto out;
3485         }
3486
3487         ret = __btrfs_open_devices(fs_devices, FMODE_READ,
3488                                    root->fs_info->bdev_holder);
3489         if (ret)
3490                 goto out;
3491
3492         if (!fs_devices->seeding) {
3493                 __btrfs_close_devices(fs_devices);
3494                 free_fs_devices(fs_devices);
3495                 ret = -EINVAL;
3496                 goto out;
3497         }
3498
3499         fs_devices->seed = root->fs_info->fs_devices->seed;
3500         root->fs_info->fs_devices->seed = fs_devices;
3501 out:
3502         mutex_unlock(&uuid_mutex);
3503         return ret;
3504 }
3505
3506 static int read_one_dev(struct btrfs_root *root,
3507                         struct extent_buffer *leaf,
3508                         struct btrfs_dev_item *dev_item)
3509 {
3510         struct btrfs_device *device;
3511         u64 devid;
3512         int ret;
3513         u8 fs_uuid[BTRFS_UUID_SIZE];
3514         u8 dev_uuid[BTRFS_UUID_SIZE];
3515
3516         devid = btrfs_device_id(leaf, dev_item);
3517         read_extent_buffer(leaf, dev_uuid,
3518                            (unsigned long)btrfs_device_uuid(dev_item),
3519                            BTRFS_UUID_SIZE);
3520         read_extent_buffer(leaf, fs_uuid,
3521                            (unsigned long)btrfs_device_fsid(dev_item),
3522                            BTRFS_UUID_SIZE);
3523
3524         if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
3525                 ret = open_seed_devices(root, fs_uuid);
3526                 if (ret && !btrfs_test_opt(root, DEGRADED))
3527                         return ret;
3528         }
3529
3530         device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
3531         if (!device || !device->bdev) {
3532                 if (!btrfs_test_opt(root, DEGRADED))
3533                         return -EIO;
3534
3535                 if (!device) {
3536                         printk(KERN_WARNING "warning devid %llu missing\n",
3537                                (unsigned long long)devid);
3538                         device = add_missing_dev(root, devid, dev_uuid);
3539                         if (!device)
3540                                 return -ENOMEM;
3541                 } else if (!device->missing) {
3542                         /*
3543                          * this happens when a device that was properly setup
3544                          * in the device info lists suddenly goes bad.
3545                          * device->bdev is NULL, and so we have to set
3546                          * device->missing to one here
3547                          */
3548                         root->fs_info->fs_devices->missing_devices++;
3549                         device->missing = 1;
3550                 }
3551         }
3552
3553         if (device->fs_devices != root->fs_info->fs_devices) {
3554                 BUG_ON(device->writeable);
3555                 if (device->generation !=
3556                     btrfs_device_generation(leaf, dev_item))
3557                         return -EINVAL;
3558         }
3559
3560         fill_device_from_item(leaf, dev_item, device);
3561         device->dev_root = root->fs_info->dev_root;
3562         device->in_fs_metadata = 1;
3563         if (device->writeable)
3564                 device->fs_devices->total_rw_bytes += device->total_bytes;
3565         ret = 0;
3566         return ret;
3567 }
3568
3569 int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf)
3570 {
3571         struct btrfs_dev_item *dev_item;
3572
3573         dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block,
3574                                                      dev_item);
3575         return read_one_dev(root, buf, dev_item);
3576 }
3577
3578 int btrfs_read_sys_array(struct btrfs_root *root)
3579 {
3580         struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
3581         struct extent_buffer *sb;
3582         struct btrfs_disk_key *disk_key;
3583         struct btrfs_chunk *chunk;
3584         u8 *ptr;
3585         unsigned long sb_ptr;
3586         int ret = 0;
3587         u32 num_stripes;
3588         u32 array_size;
3589         u32 len = 0;
3590         u32 cur;
3591         struct btrfs_key key;
3592
3593         sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
3594                                           BTRFS_SUPER_INFO_SIZE);
3595         if (!sb)
3596                 return -ENOMEM;
3597         btrfs_set_buffer_uptodate(sb);
3598         btrfs_set_buffer_lockdep_class(sb, 0);
3599
3600         write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
3601         array_size = btrfs_super_sys_array_size(super_copy);
3602
3603         ptr = super_copy->sys_chunk_array;
3604         sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
3605         cur = 0;
3606
3607         while (cur < array_size) {
3608                 disk_key = (struct btrfs_disk_key *)ptr;
3609                 btrfs_disk_key_to_cpu(&key, disk_key);
3610
3611                 len = sizeof(*disk_key); ptr += len;
3612                 sb_ptr += len;
3613                 cur += len;
3614
3615                 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
3616                         chunk = (struct btrfs_chunk *)sb_ptr;
3617                         ret = read_one_chunk(root, &key, sb, chunk);
3618                         if (ret)
3619                                 break;
3620                         num_stripes = btrfs_chunk_num_stripes(sb, chunk);
3621                         len = btrfs_chunk_item_size(num_stripes);
3622                 } else {
3623                         ret = -EIO;
3624                         break;
3625                 }
3626                 ptr += len;
3627                 sb_ptr += len;
3628                 cur += len;
3629         }
3630         free_extent_buffer(sb);
3631         return ret;
3632 }
3633
3634 int btrfs_read_chunk_tree(struct btrfs_root *root)
3635 {
3636         struct btrfs_path *path;
3637         struct extent_buffer *leaf;
3638         struct btrfs_key key;
3639         struct btrfs_key found_key;
3640         int ret;
3641         int slot;
3642
3643         root = root->fs_info->chunk_root;
3644
3645         path = btrfs_alloc_path();
3646         if (!path)
3647                 return -ENOMEM;
3648
3649         /* first we search for all of the device items, and then we
3650          * read in all of the chunk items.  This way we can create chunk
3651          * mappings that reference all of the devices that are afound
3652          */
3653         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
3654         key.offset = 0;
3655         key.type = 0;
3656 again:
3657         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3658         if (ret < 0)
3659                 goto error;
3660         while (1) {
3661                 leaf = path->nodes[0];
3662                 slot = path->slots[0];
3663                 if (slot >= btrfs_header_nritems(leaf)) {
3664                         ret = btrfs_next_leaf(root, path);
3665                         if (ret == 0)
3666                                 continue;
3667                         if (ret < 0)
3668                                 goto error;
3669                         break;
3670                 }
3671                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3672                 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
3673                         if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
3674                                 break;
3675                         if (found_key.type == BTRFS_DEV_ITEM_KEY) {
3676                                 struct btrfs_dev_item *dev_item;
3677                                 dev_item = btrfs_item_ptr(leaf, slot,
3678                                                   struct btrfs_dev_item);
3679                                 ret = read_one_dev(root, leaf, dev_item);
3680                                 if (ret)
3681                                         goto error;
3682                         }
3683                 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
3684                         struct btrfs_chunk *chunk;
3685                         chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3686                         ret = read_one_chunk(root, &found_key, leaf, chunk);
3687                         if (ret)
3688                                 goto error;
3689                 }
3690                 path->slots[0]++;
3691         }
3692         if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
3693                 key.objectid = 0;
3694                 btrfs_release_path(root, path);
3695                 goto again;
3696         }
3697         ret = 0;
3698 error:
3699         btrfs_free_path(path);
3700         return ret;
3701 }