1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #include <linux/kernel.h>
8 #include <linux/file.h>
10 #include <linux/fsnotify.h>
11 #include <linux/pagemap.h>
12 #include <linux/highmem.h>
13 #include <linux/time.h>
14 #include <linux/string.h>
15 #include <linux/backing-dev.h>
16 #include <linux/mount.h>
17 #include <linux/namei.h>
18 #include <linux/writeback.h>
19 #include <linux/compat.h>
20 #include <linux/security.h>
21 #include <linux/xattr.h>
23 #include <linux/slab.h>
24 #include <linux/blkdev.h>
25 #include <linux/uuid.h>
26 #include <linux/btrfs.h>
27 #include <linux/uaccess.h>
28 #include <linux/iversion.h>
31 #include "transaction.h"
32 #include "btrfs_inode.h"
33 #include "print-tree.h"
36 #include "inode-map.h"
38 #include "rcu-string.h"
40 #include "dev-replace.h"
45 #include "compression.h"
48 /* If we have a 32-bit userspace and 64-bit kernel, then the UAPI
49 * structures are incorrect, as the timespec structure from userspace
50 * is 4 bytes too small. We define these alternatives here to teach
51 * the kernel about the 32-bit struct packing.
53 struct btrfs_ioctl_timespec_32 {
56 } __attribute__ ((__packed__));
58 struct btrfs_ioctl_received_subvol_args_32 {
59 char uuid[BTRFS_UUID_SIZE]; /* in */
60 __u64 stransid; /* in */
61 __u64 rtransid; /* out */
62 struct btrfs_ioctl_timespec_32 stime; /* in */
63 struct btrfs_ioctl_timespec_32 rtime; /* out */
65 __u64 reserved[16]; /* in */
66 } __attribute__ ((__packed__));
68 #define BTRFS_IOC_SET_RECEIVED_SUBVOL_32 _IOWR(BTRFS_IOCTL_MAGIC, 37, \
69 struct btrfs_ioctl_received_subvol_args_32)
72 #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
73 struct btrfs_ioctl_send_args_32 {
74 __s64 send_fd; /* in */
75 __u64 clone_sources_count; /* in */
76 compat_uptr_t clone_sources; /* in */
77 __u64 parent_root; /* in */
79 __u64 reserved[4]; /* in */
80 } __attribute__ ((__packed__));
82 #define BTRFS_IOC_SEND_32 _IOW(BTRFS_IOCTL_MAGIC, 38, \
83 struct btrfs_ioctl_send_args_32)
86 static int btrfs_clone(struct inode *src, struct inode *inode,
87 u64 off, u64 olen, u64 olen_aligned, u64 destoff,
90 /* Mask out flags that are inappropriate for the given type of inode. */
91 static unsigned int btrfs_mask_fsflags_for_type(struct inode *inode,
94 if (S_ISDIR(inode->i_mode))
96 else if (S_ISREG(inode->i_mode))
97 return flags & ~FS_DIRSYNC_FL;
99 return flags & (FS_NODUMP_FL | FS_NOATIME_FL);
103 * Export internal inode flags to the format expected by the FS_IOC_GETFLAGS
106 static unsigned int btrfs_inode_flags_to_fsflags(unsigned int flags)
108 unsigned int iflags = 0;
110 if (flags & BTRFS_INODE_SYNC)
111 iflags |= FS_SYNC_FL;
112 if (flags & BTRFS_INODE_IMMUTABLE)
113 iflags |= FS_IMMUTABLE_FL;
114 if (flags & BTRFS_INODE_APPEND)
115 iflags |= FS_APPEND_FL;
116 if (flags & BTRFS_INODE_NODUMP)
117 iflags |= FS_NODUMP_FL;
118 if (flags & BTRFS_INODE_NOATIME)
119 iflags |= FS_NOATIME_FL;
120 if (flags & BTRFS_INODE_DIRSYNC)
121 iflags |= FS_DIRSYNC_FL;
122 if (flags & BTRFS_INODE_NODATACOW)
123 iflags |= FS_NOCOW_FL;
125 if (flags & BTRFS_INODE_NOCOMPRESS)
126 iflags |= FS_NOCOMP_FL;
127 else if (flags & BTRFS_INODE_COMPRESS)
128 iflags |= FS_COMPR_FL;
134 * Update inode->i_flags based on the btrfs internal flags.
136 void btrfs_sync_inode_flags_to_i_flags(struct inode *inode)
138 struct btrfs_inode *binode = BTRFS_I(inode);
139 unsigned int new_fl = 0;
141 if (binode->flags & BTRFS_INODE_SYNC)
143 if (binode->flags & BTRFS_INODE_IMMUTABLE)
144 new_fl |= S_IMMUTABLE;
145 if (binode->flags & BTRFS_INODE_APPEND)
147 if (binode->flags & BTRFS_INODE_NOATIME)
149 if (binode->flags & BTRFS_INODE_DIRSYNC)
152 set_mask_bits(&inode->i_flags,
153 S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | S_DIRSYNC,
157 static int btrfs_ioctl_getflags(struct file *file, void __user *arg)
159 struct btrfs_inode *binode = BTRFS_I(file_inode(file));
160 unsigned int flags = btrfs_inode_flags_to_fsflags(binode->flags);
162 if (copy_to_user(arg, &flags, sizeof(flags)))
167 /* Check if @flags are a supported and valid set of FS_*_FL flags */
168 static int check_fsflags(unsigned int flags)
170 if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
171 FS_NOATIME_FL | FS_NODUMP_FL | \
172 FS_SYNC_FL | FS_DIRSYNC_FL | \
173 FS_NOCOMP_FL | FS_COMPR_FL |
177 if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL))
183 static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
185 struct inode *inode = file_inode(file);
186 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
187 struct btrfs_inode *binode = BTRFS_I(inode);
188 struct btrfs_root *root = binode->root;
189 struct btrfs_trans_handle *trans;
190 unsigned int fsflags, old_fsflags;
193 unsigned int old_i_flags;
196 if (!inode_owner_or_capable(inode))
199 if (btrfs_root_readonly(root))
202 if (copy_from_user(&fsflags, arg, sizeof(fsflags)))
205 ret = check_fsflags(fsflags);
209 ret = mnt_want_write_file(file);
215 old_flags = binode->flags;
216 old_i_flags = inode->i_flags;
217 mode = inode->i_mode;
219 fsflags = btrfs_mask_fsflags_for_type(inode, fsflags);
220 old_fsflags = btrfs_inode_flags_to_fsflags(binode->flags);
221 if ((fsflags ^ old_fsflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
222 if (!capable(CAP_LINUX_IMMUTABLE)) {
228 if (fsflags & FS_SYNC_FL)
229 binode->flags |= BTRFS_INODE_SYNC;
231 binode->flags &= ~BTRFS_INODE_SYNC;
232 if (fsflags & FS_IMMUTABLE_FL)
233 binode->flags |= BTRFS_INODE_IMMUTABLE;
235 binode->flags &= ~BTRFS_INODE_IMMUTABLE;
236 if (fsflags & FS_APPEND_FL)
237 binode->flags |= BTRFS_INODE_APPEND;
239 binode->flags &= ~BTRFS_INODE_APPEND;
240 if (fsflags & FS_NODUMP_FL)
241 binode->flags |= BTRFS_INODE_NODUMP;
243 binode->flags &= ~BTRFS_INODE_NODUMP;
244 if (fsflags & FS_NOATIME_FL)
245 binode->flags |= BTRFS_INODE_NOATIME;
247 binode->flags &= ~BTRFS_INODE_NOATIME;
248 if (fsflags & FS_DIRSYNC_FL)
249 binode->flags |= BTRFS_INODE_DIRSYNC;
251 binode->flags &= ~BTRFS_INODE_DIRSYNC;
252 if (fsflags & FS_NOCOW_FL) {
255 * It's safe to turn csums off here, no extents exist.
256 * Otherwise we want the flag to reflect the real COW
257 * status of the file and will not set it.
259 if (inode->i_size == 0)
260 binode->flags |= BTRFS_INODE_NODATACOW
261 | BTRFS_INODE_NODATASUM;
263 binode->flags |= BTRFS_INODE_NODATACOW;
267 * Revert back under same assumptions as above
270 if (inode->i_size == 0)
271 binode->flags &= ~(BTRFS_INODE_NODATACOW
272 | BTRFS_INODE_NODATASUM);
274 binode->flags &= ~BTRFS_INODE_NODATACOW;
279 * The COMPRESS flag can only be changed by users, while the NOCOMPRESS
280 * flag may be changed automatically if compression code won't make
283 if (fsflags & FS_NOCOMP_FL) {
284 binode->flags &= ~BTRFS_INODE_COMPRESS;
285 binode->flags |= BTRFS_INODE_NOCOMPRESS;
287 ret = btrfs_set_prop(inode, "btrfs.compression", NULL, 0, 0);
288 if (ret && ret != -ENODATA)
290 } else if (fsflags & FS_COMPR_FL) {
293 binode->flags |= BTRFS_INODE_COMPRESS;
294 binode->flags &= ~BTRFS_INODE_NOCOMPRESS;
296 comp = btrfs_compress_type2str(fs_info->compress_type);
297 if (!comp || comp[0] == 0)
298 comp = btrfs_compress_type2str(BTRFS_COMPRESS_ZLIB);
300 ret = btrfs_set_prop(inode, "btrfs.compression",
301 comp, strlen(comp), 0);
306 ret = btrfs_set_prop(inode, "btrfs.compression", NULL, 0, 0);
307 if (ret && ret != -ENODATA)
309 binode->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
312 trans = btrfs_start_transaction(root, 1);
314 ret = PTR_ERR(trans);
318 btrfs_sync_inode_flags_to_i_flags(inode);
319 inode_inc_iversion(inode);
320 inode->i_ctime = current_time(inode);
321 ret = btrfs_update_inode(trans, root, inode);
323 btrfs_end_transaction(trans);
326 binode->flags = old_flags;
327 inode->i_flags = old_i_flags;
332 mnt_drop_write_file(file);
337 * Translate btrfs internal inode flags to xflags as expected by the
338 * FS_IOC_FSGETXATT ioctl. Filter only the supported ones, unknown flags are
341 static unsigned int btrfs_inode_flags_to_xflags(unsigned int flags)
343 unsigned int xflags = 0;
345 if (flags & BTRFS_INODE_APPEND)
346 xflags |= FS_XFLAG_APPEND;
347 if (flags & BTRFS_INODE_IMMUTABLE)
348 xflags |= FS_XFLAG_IMMUTABLE;
349 if (flags & BTRFS_INODE_NOATIME)
350 xflags |= FS_XFLAG_NOATIME;
351 if (flags & BTRFS_INODE_NODUMP)
352 xflags |= FS_XFLAG_NODUMP;
353 if (flags & BTRFS_INODE_SYNC)
354 xflags |= FS_XFLAG_SYNC;
359 /* Check if @flags are a supported and valid set of FS_XFLAGS_* flags */
360 static int check_xflags(unsigned int flags)
362 if (flags & ~(FS_XFLAG_APPEND | FS_XFLAG_IMMUTABLE | FS_XFLAG_NOATIME |
363 FS_XFLAG_NODUMP | FS_XFLAG_SYNC))
369 * Set the xflags from the internal inode flags. The remaining items of fsxattr
372 static int btrfs_ioctl_fsgetxattr(struct file *file, void __user *arg)
374 struct btrfs_inode *binode = BTRFS_I(file_inode(file));
377 memset(&fa, 0, sizeof(fa));
378 fa.fsx_xflags = btrfs_inode_flags_to_xflags(binode->flags);
380 if (copy_to_user(arg, &fa, sizeof(fa)))
386 static int btrfs_ioctl_fssetxattr(struct file *file, void __user *arg)
388 struct inode *inode = file_inode(file);
389 struct btrfs_inode *binode = BTRFS_I(inode);
390 struct btrfs_root *root = binode->root;
391 struct btrfs_trans_handle *trans;
394 unsigned old_i_flags;
397 if (!inode_owner_or_capable(inode))
400 if (btrfs_root_readonly(root))
403 memset(&fa, 0, sizeof(fa));
404 if (copy_from_user(&fa, arg, sizeof(fa)))
407 ret = check_xflags(fa.fsx_xflags);
411 if (fa.fsx_extsize != 0 || fa.fsx_projid != 0 || fa.fsx_cowextsize != 0)
414 ret = mnt_want_write_file(file);
420 old_flags = binode->flags;
421 old_i_flags = inode->i_flags;
423 /* We need the capabilities to change append-only or immutable inode */
424 if (((old_flags & (BTRFS_INODE_APPEND | BTRFS_INODE_IMMUTABLE)) ||
425 (fa.fsx_xflags & (FS_XFLAG_APPEND | FS_XFLAG_IMMUTABLE))) &&
426 !capable(CAP_LINUX_IMMUTABLE)) {
431 if (fa.fsx_xflags & FS_XFLAG_SYNC)
432 binode->flags |= BTRFS_INODE_SYNC;
434 binode->flags &= ~BTRFS_INODE_SYNC;
435 if (fa.fsx_xflags & FS_XFLAG_IMMUTABLE)
436 binode->flags |= BTRFS_INODE_IMMUTABLE;
438 binode->flags &= ~BTRFS_INODE_IMMUTABLE;
439 if (fa.fsx_xflags & FS_XFLAG_APPEND)
440 binode->flags |= BTRFS_INODE_APPEND;
442 binode->flags &= ~BTRFS_INODE_APPEND;
443 if (fa.fsx_xflags & FS_XFLAG_NODUMP)
444 binode->flags |= BTRFS_INODE_NODUMP;
446 binode->flags &= ~BTRFS_INODE_NODUMP;
447 if (fa.fsx_xflags & FS_XFLAG_NOATIME)
448 binode->flags |= BTRFS_INODE_NOATIME;
450 binode->flags &= ~BTRFS_INODE_NOATIME;
452 /* 1 item for the inode */
453 trans = btrfs_start_transaction(root, 1);
455 ret = PTR_ERR(trans);
459 btrfs_sync_inode_flags_to_i_flags(inode);
460 inode_inc_iversion(inode);
461 inode->i_ctime = current_time(inode);
462 ret = btrfs_update_inode(trans, root, inode);
464 btrfs_end_transaction(trans);
468 binode->flags = old_flags;
469 inode->i_flags = old_i_flags;
473 mnt_drop_write_file(file);
478 static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
480 struct inode *inode = file_inode(file);
482 return put_user(inode->i_generation, arg);
485 static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
487 struct inode *inode = file_inode(file);
488 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
489 struct btrfs_device *device;
490 struct request_queue *q;
491 struct fstrim_range range;
492 u64 minlen = ULLONG_MAX;
494 u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
497 if (!capable(CAP_SYS_ADMIN))
501 list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
505 q = bdev_get_queue(device->bdev);
506 if (blk_queue_discard(q)) {
508 minlen = min_t(u64, q->limits.discard_granularity,
516 if (copy_from_user(&range, arg, sizeof(range)))
518 if (range.start > total_bytes ||
519 range.len < fs_info->sb->s_blocksize)
522 range.len = min(range.len, total_bytes - range.start);
523 range.minlen = max(range.minlen, minlen);
524 ret = btrfs_trim_fs(fs_info, &range);
528 if (copy_to_user(arg, &range, sizeof(range)))
534 int btrfs_is_empty_uuid(u8 *uuid)
538 for (i = 0; i < BTRFS_UUID_SIZE; i++) {
545 static noinline int create_subvol(struct inode *dir,
546 struct dentry *dentry,
547 const char *name, int namelen,
549 struct btrfs_qgroup_inherit *inherit)
551 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
552 struct btrfs_trans_handle *trans;
553 struct btrfs_key key;
554 struct btrfs_root_item *root_item;
555 struct btrfs_inode_item *inode_item;
556 struct extent_buffer *leaf;
557 struct btrfs_root *root = BTRFS_I(dir)->root;
558 struct btrfs_root *new_root;
559 struct btrfs_block_rsv block_rsv;
560 struct timespec64 cur_time = current_time(dir);
565 u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
569 root_item = kzalloc(sizeof(*root_item), GFP_KERNEL);
573 ret = btrfs_find_free_objectid(fs_info->tree_root, &objectid);
578 * Don't create subvolume whose level is not zero. Or qgroup will be
579 * screwed up since it assumes subvolume qgroup's level to be 0.
581 if (btrfs_qgroup_level(objectid)) {
586 btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
588 * The same as the snapshot creation, please see the comment
589 * of create_snapshot().
591 ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 8, false);
595 trans = btrfs_start_transaction(root, 0);
597 ret = PTR_ERR(trans);
598 btrfs_subvolume_release_metadata(fs_info, &block_rsv);
601 trans->block_rsv = &block_rsv;
602 trans->bytes_reserved = block_rsv.size;
604 ret = btrfs_qgroup_inherit(trans, 0, objectid, inherit);
608 leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0);
614 btrfs_mark_buffer_dirty(leaf);
616 inode_item = &root_item->inode;
617 btrfs_set_stack_inode_generation(inode_item, 1);
618 btrfs_set_stack_inode_size(inode_item, 3);
619 btrfs_set_stack_inode_nlink(inode_item, 1);
620 btrfs_set_stack_inode_nbytes(inode_item,
622 btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
624 btrfs_set_root_flags(root_item, 0);
625 btrfs_set_root_limit(root_item, 0);
626 btrfs_set_stack_inode_flags(inode_item, BTRFS_INODE_ROOT_ITEM_INIT);
628 btrfs_set_root_bytenr(root_item, leaf->start);
629 btrfs_set_root_generation(root_item, trans->transid);
630 btrfs_set_root_level(root_item, 0);
631 btrfs_set_root_refs(root_item, 1);
632 btrfs_set_root_used(root_item, leaf->len);
633 btrfs_set_root_last_snapshot(root_item, 0);
635 btrfs_set_root_generation_v2(root_item,
636 btrfs_root_generation(root_item));
637 uuid_le_gen(&new_uuid);
638 memcpy(root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
639 btrfs_set_stack_timespec_sec(&root_item->otime, cur_time.tv_sec);
640 btrfs_set_stack_timespec_nsec(&root_item->otime, cur_time.tv_nsec);
641 root_item->ctime = root_item->otime;
642 btrfs_set_root_ctransid(root_item, trans->transid);
643 btrfs_set_root_otransid(root_item, trans->transid);
645 btrfs_tree_unlock(leaf);
646 free_extent_buffer(leaf);
649 btrfs_set_root_dirid(root_item, new_dirid);
651 key.objectid = objectid;
653 key.type = BTRFS_ROOT_ITEM_KEY;
654 ret = btrfs_insert_root(trans, fs_info->tree_root, &key,
659 key.offset = (u64)-1;
660 new_root = btrfs_read_fs_root_no_name(fs_info, &key);
661 if (IS_ERR(new_root)) {
662 ret = PTR_ERR(new_root);
663 btrfs_abort_transaction(trans, ret);
667 btrfs_record_root_in_trans(trans, new_root);
669 ret = btrfs_create_subvol_root(trans, new_root, root, new_dirid);
671 /* We potentially lose an unused inode item here */
672 btrfs_abort_transaction(trans, ret);
676 mutex_lock(&new_root->objectid_mutex);
677 new_root->highest_objectid = new_dirid;
678 mutex_unlock(&new_root->objectid_mutex);
681 * insert the directory item
683 ret = btrfs_set_inode_index(BTRFS_I(dir), &index);
685 btrfs_abort_transaction(trans, ret);
689 ret = btrfs_insert_dir_item(trans, name, namelen, BTRFS_I(dir), &key,
690 BTRFS_FT_DIR, index);
692 btrfs_abort_transaction(trans, ret);
696 btrfs_i_size_write(BTRFS_I(dir), dir->i_size + namelen * 2);
697 ret = btrfs_update_inode(trans, root, dir);
700 ret = btrfs_add_root_ref(trans, objectid, root->root_key.objectid,
701 btrfs_ino(BTRFS_I(dir)), index, name, namelen);
704 ret = btrfs_uuid_tree_add(trans, root_item->uuid,
705 BTRFS_UUID_KEY_SUBVOL, objectid);
707 btrfs_abort_transaction(trans, ret);
711 trans->block_rsv = NULL;
712 trans->bytes_reserved = 0;
713 btrfs_subvolume_release_metadata(fs_info, &block_rsv);
716 *async_transid = trans->transid;
717 err = btrfs_commit_transaction_async(trans, 1);
719 err = btrfs_commit_transaction(trans);
721 err = btrfs_commit_transaction(trans);
727 inode = btrfs_lookup_dentry(dir, dentry);
729 return PTR_ERR(inode);
730 d_instantiate(dentry, inode);
739 static int create_snapshot(struct btrfs_root *root, struct inode *dir,
740 struct dentry *dentry,
741 u64 *async_transid, bool readonly,
742 struct btrfs_qgroup_inherit *inherit)
744 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
746 struct btrfs_pending_snapshot *pending_snapshot;
747 struct btrfs_trans_handle *trans;
749 bool snapshot_force_cow = false;
751 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
754 pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_KERNEL);
755 if (!pending_snapshot)
758 pending_snapshot->root_item = kzalloc(sizeof(struct btrfs_root_item),
760 pending_snapshot->path = btrfs_alloc_path();
761 if (!pending_snapshot->root_item || !pending_snapshot->path) {
767 * Force new buffered writes to reserve space even when NOCOW is
768 * possible. This is to avoid later writeback (running dealloc) to
769 * fallback to COW mode and unexpectedly fail with ENOSPC.
771 atomic_inc(&root->will_be_snapshotted);
772 smp_mb__after_atomic();
773 /* wait for no snapshot writes */
774 wait_event(root->subv_writers->wait,
775 percpu_counter_sum(&root->subv_writers->counter) == 0);
777 ret = btrfs_start_delalloc_inodes(root);
782 * All previous writes have started writeback in NOCOW mode, so now
783 * we force future writes to fallback to COW mode during snapshot
786 atomic_inc(&root->snapshot_force_cow);
787 snapshot_force_cow = true;
789 btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
791 btrfs_init_block_rsv(&pending_snapshot->block_rsv,
792 BTRFS_BLOCK_RSV_TEMP);
794 * 1 - parent dir inode
797 * 2 - root ref/backref
798 * 1 - root of snapshot
801 ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root,
802 &pending_snapshot->block_rsv, 8,
807 pending_snapshot->dentry = dentry;
808 pending_snapshot->root = root;
809 pending_snapshot->readonly = readonly;
810 pending_snapshot->dir = dir;
811 pending_snapshot->inherit = inherit;
813 trans = btrfs_start_transaction(root, 0);
815 ret = PTR_ERR(trans);
819 spin_lock(&fs_info->trans_lock);
820 list_add(&pending_snapshot->list,
821 &trans->transaction->pending_snapshots);
822 spin_unlock(&fs_info->trans_lock);
824 *async_transid = trans->transid;
825 ret = btrfs_commit_transaction_async(trans, 1);
827 ret = btrfs_commit_transaction(trans);
829 ret = btrfs_commit_transaction(trans);
834 ret = pending_snapshot->error;
838 ret = btrfs_orphan_cleanup(pending_snapshot->snap);
842 inode = btrfs_lookup_dentry(d_inode(dentry->d_parent), dentry);
844 ret = PTR_ERR(inode);
848 d_instantiate(dentry, inode);
851 btrfs_subvolume_release_metadata(fs_info, &pending_snapshot->block_rsv);
853 if (snapshot_force_cow)
854 atomic_dec(&root->snapshot_force_cow);
855 if (atomic_dec_and_test(&root->will_be_snapshotted))
856 wake_up_var(&root->will_be_snapshotted);
858 kfree(pending_snapshot->root_item);
859 btrfs_free_path(pending_snapshot->path);
860 kfree(pending_snapshot);
865 /* copy of may_delete in fs/namei.c()
866 * Check whether we can remove a link victim from directory dir, check
867 * whether the type of victim is right.
868 * 1. We can't do it if dir is read-only (done in permission())
869 * 2. We should have write and exec permissions on dir
870 * 3. We can't remove anything from append-only dir
871 * 4. We can't do anything with immutable dir (done in permission())
872 * 5. If the sticky bit on dir is set we should either
873 * a. be owner of dir, or
874 * b. be owner of victim, or
875 * c. have CAP_FOWNER capability
876 * 6. If the victim is append-only or immutable we can't do anything with
877 * links pointing to it.
878 * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR.
879 * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR.
880 * 9. We can't remove a root or mountpoint.
881 * 10. We don't allow removal of NFS sillyrenamed files; it's handled by
882 * nfs_async_unlink().
885 static int btrfs_may_delete(struct inode *dir, struct dentry *victim, int isdir)
889 if (d_really_is_negative(victim))
892 BUG_ON(d_inode(victim->d_parent) != dir);
893 audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
895 error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
900 if (check_sticky(dir, d_inode(victim)) || IS_APPEND(d_inode(victim)) ||
901 IS_IMMUTABLE(d_inode(victim)) || IS_SWAPFILE(d_inode(victim)))
904 if (!d_is_dir(victim))
908 } else if (d_is_dir(victim))
912 if (victim->d_flags & DCACHE_NFSFS_RENAMED)
917 /* copy of may_create in fs/namei.c() */
918 static inline int btrfs_may_create(struct inode *dir, struct dentry *child)
920 if (d_really_is_positive(child))
924 return inode_permission(dir, MAY_WRITE | MAY_EXEC);
928 * Create a new subvolume below @parent. This is largely modeled after
929 * sys_mkdirat and vfs_mkdir, but we only do a single component lookup
930 * inside this filesystem so it's quite a bit simpler.
932 static noinline int btrfs_mksubvol(const struct path *parent,
933 const char *name, int namelen,
934 struct btrfs_root *snap_src,
935 u64 *async_transid, bool readonly,
936 struct btrfs_qgroup_inherit *inherit)
938 struct inode *dir = d_inode(parent->dentry);
939 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
940 struct dentry *dentry;
943 error = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
947 dentry = lookup_one_len(name, parent->dentry, namelen);
948 error = PTR_ERR(dentry);
952 error = btrfs_may_create(dir, dentry);
957 * even if this name doesn't exist, we may get hash collisions.
958 * check for them now when we can safely fail
960 error = btrfs_check_dir_item_collision(BTRFS_I(dir)->root,
966 down_read(&fs_info->subvol_sem);
968 if (btrfs_root_refs(&BTRFS_I(dir)->root->root_item) == 0)
972 error = create_snapshot(snap_src, dir, dentry,
973 async_transid, readonly, inherit);
975 error = create_subvol(dir, dentry, name, namelen,
976 async_transid, inherit);
979 fsnotify_mkdir(dir, dentry);
981 up_read(&fs_info->subvol_sem);
990 * When we're defragging a range, we don't want to kick it off again
991 * if it is really just waiting for delalloc to send it down.
992 * If we find a nice big extent or delalloc range for the bytes in the
993 * file you want to defrag, we return 0 to let you know to skip this
996 static int check_defrag_in_cache(struct inode *inode, u64 offset, u32 thresh)
998 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
999 struct extent_map *em = NULL;
1000 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1003 read_lock(&em_tree->lock);
1004 em = lookup_extent_mapping(em_tree, offset, PAGE_SIZE);
1005 read_unlock(&em_tree->lock);
1008 end = extent_map_end(em);
1009 free_extent_map(em);
1010 if (end - offset > thresh)
1013 /* if we already have a nice delalloc here, just stop */
1015 end = count_range_bits(io_tree, &offset, offset + thresh,
1016 thresh, EXTENT_DELALLOC, 1);
1023 * helper function to walk through a file and find extents
1024 * newer than a specific transid, and smaller than thresh.
1026 * This is used by the defragging code to find new and small
1029 static int find_new_extents(struct btrfs_root *root,
1030 struct inode *inode, u64 newer_than,
1031 u64 *off, u32 thresh)
1033 struct btrfs_path *path;
1034 struct btrfs_key min_key;
1035 struct extent_buffer *leaf;
1036 struct btrfs_file_extent_item *extent;
1039 u64 ino = btrfs_ino(BTRFS_I(inode));
1041 path = btrfs_alloc_path();
1045 min_key.objectid = ino;
1046 min_key.type = BTRFS_EXTENT_DATA_KEY;
1047 min_key.offset = *off;
1050 ret = btrfs_search_forward(root, &min_key, path, newer_than);
1054 if (min_key.objectid != ino)
1056 if (min_key.type != BTRFS_EXTENT_DATA_KEY)
1059 leaf = path->nodes[0];
1060 extent = btrfs_item_ptr(leaf, path->slots[0],
1061 struct btrfs_file_extent_item);
1063 type = btrfs_file_extent_type(leaf, extent);
1064 if (type == BTRFS_FILE_EXTENT_REG &&
1065 btrfs_file_extent_num_bytes(leaf, extent) < thresh &&
1066 check_defrag_in_cache(inode, min_key.offset, thresh)) {
1067 *off = min_key.offset;
1068 btrfs_free_path(path);
1073 if (path->slots[0] < btrfs_header_nritems(leaf)) {
1074 btrfs_item_key_to_cpu(leaf, &min_key, path->slots[0]);
1078 if (min_key.offset == (u64)-1)
1082 btrfs_release_path(path);
1085 btrfs_free_path(path);
1089 static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start)
1091 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1092 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1093 struct extent_map *em;
1094 u64 len = PAGE_SIZE;
1097 * hopefully we have this extent in the tree already, try without
1098 * the full extent lock
1100 read_lock(&em_tree->lock);
1101 em = lookup_extent_mapping(em_tree, start, len);
1102 read_unlock(&em_tree->lock);
1105 struct extent_state *cached = NULL;
1106 u64 end = start + len - 1;
1108 /* get the big lock and read metadata off disk */
1109 lock_extent_bits(io_tree, start, end, &cached);
1110 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0);
1111 unlock_extent_cached(io_tree, start, end, &cached);
1120 static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em)
1122 struct extent_map *next;
1125 /* this is the last extent */
1126 if (em->start + em->len >= i_size_read(inode))
1129 next = defrag_lookup_extent(inode, em->start + em->len);
1130 if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
1132 else if ((em->block_start + em->block_len == next->block_start) &&
1133 (em->block_len > SZ_128K && next->block_len > SZ_128K))
1136 free_extent_map(next);
1140 static int should_defrag_range(struct inode *inode, u64 start, u32 thresh,
1141 u64 *last_len, u64 *skip, u64 *defrag_end,
1144 struct extent_map *em;
1146 bool next_mergeable = true;
1147 bool prev_mergeable = true;
1150 * make sure that once we start defragging an extent, we keep on
1153 if (start < *defrag_end)
1158 em = defrag_lookup_extent(inode, start);
1162 /* this will cover holes, and inline extents */
1163 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
1169 prev_mergeable = false;
1171 next_mergeable = defrag_check_next_extent(inode, em);
1173 * we hit a real extent, if it is big or the next extent is not a
1174 * real extent, don't bother defragging it
1176 if (!compress && (*last_len == 0 || *last_len >= thresh) &&
1177 (em->len >= thresh || (!next_mergeable && !prev_mergeable)))
1181 * last_len ends up being a counter of how many bytes we've defragged.
1182 * every time we choose not to defrag an extent, we reset *last_len
1183 * so that the next tiny extent will force a defrag.
1185 * The end result of this is that tiny extents before a single big
1186 * extent will force at least part of that big extent to be defragged.
1189 *defrag_end = extent_map_end(em);
1192 *skip = extent_map_end(em);
1196 free_extent_map(em);
1201 * it doesn't do much good to defrag one or two pages
1202 * at a time. This pulls in a nice chunk of pages
1203 * to COW and defrag.
1205 * It also makes sure the delalloc code has enough
1206 * dirty data to avoid making new small extents as part
1209 * It's a good idea to start RA on this range
1210 * before calling this.
1212 static int cluster_pages_for_defrag(struct inode *inode,
1213 struct page **pages,
1214 unsigned long start_index,
1215 unsigned long num_pages)
1217 unsigned long file_end;
1218 u64 isize = i_size_read(inode);
1225 struct btrfs_ordered_extent *ordered;
1226 struct extent_state *cached_state = NULL;
1227 struct extent_io_tree *tree;
1228 struct extent_changeset *data_reserved = NULL;
1229 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1231 file_end = (isize - 1) >> PAGE_SHIFT;
1232 if (!isize || start_index > file_end)
1235 page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1);
1237 ret = btrfs_delalloc_reserve_space(inode, &data_reserved,
1238 start_index << PAGE_SHIFT,
1239 page_cnt << PAGE_SHIFT);
1243 tree = &BTRFS_I(inode)->io_tree;
1245 /* step one, lock all the pages */
1246 for (i = 0; i < page_cnt; i++) {
1249 page = find_or_create_page(inode->i_mapping,
1250 start_index + i, mask);
1254 page_start = page_offset(page);
1255 page_end = page_start + PAGE_SIZE - 1;
1257 lock_extent_bits(tree, page_start, page_end,
1259 ordered = btrfs_lookup_ordered_extent(inode,
1261 unlock_extent_cached(tree, page_start, page_end,
1267 btrfs_start_ordered_extent(inode, ordered, 1);
1268 btrfs_put_ordered_extent(ordered);
1271 * we unlocked the page above, so we need check if
1272 * it was released or not.
1274 if (page->mapping != inode->i_mapping) {
1281 if (!PageUptodate(page)) {
1282 btrfs_readpage(NULL, page);
1284 if (!PageUptodate(page)) {
1292 if (page->mapping != inode->i_mapping) {
1304 if (!(inode->i_sb->s_flags & SB_ACTIVE))
1308 * so now we have a nice long stream of locked
1309 * and up to date pages, lets wait on them
1311 for (i = 0; i < i_done; i++)
1312 wait_on_page_writeback(pages[i]);
1314 page_start = page_offset(pages[0]);
1315 page_end = page_offset(pages[i_done - 1]) + PAGE_SIZE;
1317 lock_extent_bits(&BTRFS_I(inode)->io_tree,
1318 page_start, page_end - 1, &cached_state);
1319 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
1320 page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
1321 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0,
1324 if (i_done != page_cnt) {
1325 spin_lock(&BTRFS_I(inode)->lock);
1326 btrfs_mod_outstanding_extents(BTRFS_I(inode), 1);
1327 spin_unlock(&BTRFS_I(inode)->lock);
1328 btrfs_delalloc_release_space(inode, data_reserved,
1329 start_index << PAGE_SHIFT,
1330 (page_cnt - i_done) << PAGE_SHIFT, true);
1334 set_extent_defrag(&BTRFS_I(inode)->io_tree, page_start, page_end - 1,
1337 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1338 page_start, page_end - 1, &cached_state);
1340 for (i = 0; i < i_done; i++) {
1341 clear_page_dirty_for_io(pages[i]);
1342 ClearPageChecked(pages[i]);
1343 set_page_extent_mapped(pages[i]);
1344 set_page_dirty(pages[i]);
1345 unlock_page(pages[i]);
1348 btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT,
1350 extent_changeset_free(data_reserved);
1353 for (i = 0; i < i_done; i++) {
1354 unlock_page(pages[i]);
1357 btrfs_delalloc_release_space(inode, data_reserved,
1358 start_index << PAGE_SHIFT,
1359 page_cnt << PAGE_SHIFT, true);
1360 btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT,
1362 extent_changeset_free(data_reserved);
1367 int btrfs_defrag_file(struct inode *inode, struct file *file,
1368 struct btrfs_ioctl_defrag_range_args *range,
1369 u64 newer_than, unsigned long max_to_defrag)
1371 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1372 struct btrfs_root *root = BTRFS_I(inode)->root;
1373 struct file_ra_state *ra = NULL;
1374 unsigned long last_index;
1375 u64 isize = i_size_read(inode);
1379 u64 newer_off = range->start;
1381 unsigned long ra_index = 0;
1383 int defrag_count = 0;
1384 int compress_type = BTRFS_COMPRESS_ZLIB;
1385 u32 extent_thresh = range->extent_thresh;
1386 unsigned long max_cluster = SZ_256K >> PAGE_SHIFT;
1387 unsigned long cluster = max_cluster;
1388 u64 new_align = ~((u64)SZ_128K - 1);
1389 struct page **pages = NULL;
1390 bool do_compress = range->flags & BTRFS_DEFRAG_RANGE_COMPRESS;
1395 if (range->start >= isize)
1399 if (range->compress_type > BTRFS_COMPRESS_TYPES)
1401 if (range->compress_type)
1402 compress_type = range->compress_type;
1405 if (extent_thresh == 0)
1406 extent_thresh = SZ_256K;
1409 * If we were not given a file, allocate a readahead context. As
1410 * readahead is just an optimization, defrag will work without it so
1411 * we don't error out.
1414 ra = kzalloc(sizeof(*ra), GFP_KERNEL);
1416 file_ra_state_init(ra, inode->i_mapping);
1421 pages = kmalloc_array(max_cluster, sizeof(struct page *), GFP_KERNEL);
1427 /* find the last page to defrag */
1428 if (range->start + range->len > range->start) {
1429 last_index = min_t(u64, isize - 1,
1430 range->start + range->len - 1) >> PAGE_SHIFT;
1432 last_index = (isize - 1) >> PAGE_SHIFT;
1436 ret = find_new_extents(root, inode, newer_than,
1437 &newer_off, SZ_64K);
1439 range->start = newer_off;
1441 * we always align our defrag to help keep
1442 * the extents in the file evenly spaced
1444 i = (newer_off & new_align) >> PAGE_SHIFT;
1448 i = range->start >> PAGE_SHIFT;
1451 max_to_defrag = last_index - i + 1;
1454 * make writeback starts from i, so the defrag range can be
1455 * written sequentially.
1457 if (i < inode->i_mapping->writeback_index)
1458 inode->i_mapping->writeback_index = i;
1460 while (i <= last_index && defrag_count < max_to_defrag &&
1461 (i < DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE))) {
1463 * make sure we stop running if someone unmounts
1466 if (!(inode->i_sb->s_flags & SB_ACTIVE))
1469 if (btrfs_defrag_cancelled(fs_info)) {
1470 btrfs_debug(fs_info, "defrag_file cancelled");
1475 if (!should_defrag_range(inode, (u64)i << PAGE_SHIFT,
1476 extent_thresh, &last_len, &skip,
1477 &defrag_end, do_compress)){
1480 * the should_defrag function tells us how much to skip
1481 * bump our counter by the suggested amount
1483 next = DIV_ROUND_UP(skip, PAGE_SIZE);
1484 i = max(i + 1, next);
1489 cluster = (PAGE_ALIGN(defrag_end) >>
1491 cluster = min(cluster, max_cluster);
1493 cluster = max_cluster;
1496 if (i + cluster > ra_index) {
1497 ra_index = max(i, ra_index);
1499 page_cache_sync_readahead(inode->i_mapping, ra,
1500 file, ra_index, cluster);
1501 ra_index += cluster;
1506 BTRFS_I(inode)->defrag_compress = compress_type;
1507 ret = cluster_pages_for_defrag(inode, pages, i, cluster);
1509 inode_unlock(inode);
1513 defrag_count += ret;
1514 balance_dirty_pages_ratelimited(inode->i_mapping);
1515 inode_unlock(inode);
1518 if (newer_off == (u64)-1)
1524 newer_off = max(newer_off + 1,
1525 (u64)i << PAGE_SHIFT);
1527 ret = find_new_extents(root, inode, newer_than,
1528 &newer_off, SZ_64K);
1530 range->start = newer_off;
1531 i = (newer_off & new_align) >> PAGE_SHIFT;
1538 last_len += ret << PAGE_SHIFT;
1546 if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO)) {
1547 filemap_flush(inode->i_mapping);
1548 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1549 &BTRFS_I(inode)->runtime_flags))
1550 filemap_flush(inode->i_mapping);
1553 if (range->compress_type == BTRFS_COMPRESS_LZO) {
1554 btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
1555 } else if (range->compress_type == BTRFS_COMPRESS_ZSTD) {
1556 btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
1564 BTRFS_I(inode)->defrag_compress = BTRFS_COMPRESS_NONE;
1565 inode_unlock(inode);
1573 static noinline int btrfs_ioctl_resize(struct file *file,
1576 struct inode *inode = file_inode(file);
1577 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1581 struct btrfs_root *root = BTRFS_I(inode)->root;
1582 struct btrfs_ioctl_vol_args *vol_args;
1583 struct btrfs_trans_handle *trans;
1584 struct btrfs_device *device = NULL;
1587 char *devstr = NULL;
1591 if (!capable(CAP_SYS_ADMIN))
1594 ret = mnt_want_write_file(file);
1598 if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
1599 mnt_drop_write_file(file);
1600 return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
1603 vol_args = memdup_user(arg, sizeof(*vol_args));
1604 if (IS_ERR(vol_args)) {
1605 ret = PTR_ERR(vol_args);
1609 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
1611 sizestr = vol_args->name;
1612 devstr = strchr(sizestr, ':');
1614 sizestr = devstr + 1;
1616 devstr = vol_args->name;
1617 ret = kstrtoull(devstr, 10, &devid);
1624 btrfs_info(fs_info, "resizing devid %llu", devid);
1627 device = btrfs_find_device(fs_info, devid, NULL, NULL);
1629 btrfs_info(fs_info, "resizer unable to find device %llu",
1635 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1637 "resizer unable to apply on readonly device %llu",
1643 if (!strcmp(sizestr, "max"))
1644 new_size = device->bdev->bd_inode->i_size;
1646 if (sizestr[0] == '-') {
1649 } else if (sizestr[0] == '+') {
1653 new_size = memparse(sizestr, &retptr);
1654 if (*retptr != '\0' || new_size == 0) {
1660 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1665 old_size = btrfs_device_get_total_bytes(device);
1668 if (new_size > old_size) {
1672 new_size = old_size - new_size;
1673 } else if (mod > 0) {
1674 if (new_size > ULLONG_MAX - old_size) {
1678 new_size = old_size + new_size;
1681 if (new_size < SZ_256M) {
1685 if (new_size > device->bdev->bd_inode->i_size) {
1690 new_size = round_down(new_size, fs_info->sectorsize);
1692 btrfs_info_in_rcu(fs_info, "new size for %s is %llu",
1693 rcu_str_deref(device->name), new_size);
1695 if (new_size > old_size) {
1696 trans = btrfs_start_transaction(root, 0);
1697 if (IS_ERR(trans)) {
1698 ret = PTR_ERR(trans);
1701 ret = btrfs_grow_device(trans, device, new_size);
1702 btrfs_commit_transaction(trans);
1703 } else if (new_size < old_size) {
1704 ret = btrfs_shrink_device(device, new_size);
1705 } /* equal, nothing need to do */
1710 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
1711 mnt_drop_write_file(file);
1715 static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
1716 const char *name, unsigned long fd, int subvol,
1717 u64 *transid, bool readonly,
1718 struct btrfs_qgroup_inherit *inherit)
1723 if (!S_ISDIR(file_inode(file)->i_mode))
1726 ret = mnt_want_write_file(file);
1730 namelen = strlen(name);
1731 if (strchr(name, '/')) {
1733 goto out_drop_write;
1736 if (name[0] == '.' &&
1737 (namelen == 1 || (name[1] == '.' && namelen == 2))) {
1739 goto out_drop_write;
1743 ret = btrfs_mksubvol(&file->f_path, name, namelen,
1744 NULL, transid, readonly, inherit);
1746 struct fd src = fdget(fd);
1747 struct inode *src_inode;
1750 goto out_drop_write;
1753 src_inode = file_inode(src.file);
1754 if (src_inode->i_sb != file_inode(file)->i_sb) {
1755 btrfs_info(BTRFS_I(file_inode(file))->root->fs_info,
1756 "Snapshot src from another FS");
1758 } else if (!inode_owner_or_capable(src_inode)) {
1760 * Subvolume creation is not restricted, but snapshots
1761 * are limited to own subvolumes only
1765 ret = btrfs_mksubvol(&file->f_path, name, namelen,
1766 BTRFS_I(src_inode)->root,
1767 transid, readonly, inherit);
1772 mnt_drop_write_file(file);
1777 static noinline int btrfs_ioctl_snap_create(struct file *file,
1778 void __user *arg, int subvol)
1780 struct btrfs_ioctl_vol_args *vol_args;
1783 if (!S_ISDIR(file_inode(file)->i_mode))
1786 vol_args = memdup_user(arg, sizeof(*vol_args));
1787 if (IS_ERR(vol_args))
1788 return PTR_ERR(vol_args);
1789 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
1791 ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
1792 vol_args->fd, subvol,
1799 static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
1800 void __user *arg, int subvol)
1802 struct btrfs_ioctl_vol_args_v2 *vol_args;
1806 bool readonly = false;
1807 struct btrfs_qgroup_inherit *inherit = NULL;
1809 if (!S_ISDIR(file_inode(file)->i_mode))
1812 vol_args = memdup_user(arg, sizeof(*vol_args));
1813 if (IS_ERR(vol_args))
1814 return PTR_ERR(vol_args);
1815 vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
1817 if (vol_args->flags &
1818 ~(BTRFS_SUBVOL_CREATE_ASYNC | BTRFS_SUBVOL_RDONLY |
1819 BTRFS_SUBVOL_QGROUP_INHERIT)) {
1824 if (vol_args->flags & BTRFS_SUBVOL_CREATE_ASYNC)
1826 if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
1828 if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
1829 if (vol_args->size > PAGE_SIZE) {
1833 inherit = memdup_user(vol_args->qgroup_inherit, vol_args->size);
1834 if (IS_ERR(inherit)) {
1835 ret = PTR_ERR(inherit);
1840 ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
1841 vol_args->fd, subvol, ptr,
1846 if (ptr && copy_to_user(arg +
1847 offsetof(struct btrfs_ioctl_vol_args_v2,
1859 static noinline int btrfs_ioctl_subvol_getflags(struct file *file,
1862 struct inode *inode = file_inode(file);
1863 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1864 struct btrfs_root *root = BTRFS_I(inode)->root;
1868 if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID)
1871 down_read(&fs_info->subvol_sem);
1872 if (btrfs_root_readonly(root))
1873 flags |= BTRFS_SUBVOL_RDONLY;
1874 up_read(&fs_info->subvol_sem);
1876 if (copy_to_user(arg, &flags, sizeof(flags)))
1882 static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
1885 struct inode *inode = file_inode(file);
1886 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1887 struct btrfs_root *root = BTRFS_I(inode)->root;
1888 struct btrfs_trans_handle *trans;
1893 if (!inode_owner_or_capable(inode))
1896 ret = mnt_want_write_file(file);
1900 if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
1902 goto out_drop_write;
1905 if (copy_from_user(&flags, arg, sizeof(flags))) {
1907 goto out_drop_write;
1910 if (flags & BTRFS_SUBVOL_CREATE_ASYNC) {
1912 goto out_drop_write;
1915 if (flags & ~BTRFS_SUBVOL_RDONLY) {
1917 goto out_drop_write;
1920 down_write(&fs_info->subvol_sem);
1923 if (!!(flags & BTRFS_SUBVOL_RDONLY) == btrfs_root_readonly(root))
1926 root_flags = btrfs_root_flags(&root->root_item);
1927 if (flags & BTRFS_SUBVOL_RDONLY) {
1928 btrfs_set_root_flags(&root->root_item,
1929 root_flags | BTRFS_ROOT_SUBVOL_RDONLY);
1932 * Block RO -> RW transition if this subvolume is involved in
1935 spin_lock(&root->root_item_lock);
1936 if (root->send_in_progress == 0) {
1937 btrfs_set_root_flags(&root->root_item,
1938 root_flags & ~BTRFS_ROOT_SUBVOL_RDONLY);
1939 spin_unlock(&root->root_item_lock);
1941 spin_unlock(&root->root_item_lock);
1943 "Attempt to set subvolume %llu read-write during send",
1944 root->root_key.objectid);
1950 trans = btrfs_start_transaction(root, 1);
1951 if (IS_ERR(trans)) {
1952 ret = PTR_ERR(trans);
1956 ret = btrfs_update_root(trans, fs_info->tree_root,
1957 &root->root_key, &root->root_item);
1959 btrfs_end_transaction(trans);
1963 ret = btrfs_commit_transaction(trans);
1967 btrfs_set_root_flags(&root->root_item, root_flags);
1969 up_write(&fs_info->subvol_sem);
1971 mnt_drop_write_file(file);
1976 static noinline int key_in_sk(struct btrfs_key *key,
1977 struct btrfs_ioctl_search_key *sk)
1979 struct btrfs_key test;
1982 test.objectid = sk->min_objectid;
1983 test.type = sk->min_type;
1984 test.offset = sk->min_offset;
1986 ret = btrfs_comp_cpu_keys(key, &test);
1990 test.objectid = sk->max_objectid;
1991 test.type = sk->max_type;
1992 test.offset = sk->max_offset;
1994 ret = btrfs_comp_cpu_keys(key, &test);
2000 static noinline int copy_to_sk(struct btrfs_path *path,
2001 struct btrfs_key *key,
2002 struct btrfs_ioctl_search_key *sk,
2005 unsigned long *sk_offset,
2009 struct extent_buffer *leaf;
2010 struct btrfs_ioctl_search_header sh;
2011 struct btrfs_key test;
2012 unsigned long item_off;
2013 unsigned long item_len;
2019 leaf = path->nodes[0];
2020 slot = path->slots[0];
2021 nritems = btrfs_header_nritems(leaf);
2023 if (btrfs_header_generation(leaf) > sk->max_transid) {
2027 found_transid = btrfs_header_generation(leaf);
2029 for (i = slot; i < nritems; i++) {
2030 item_off = btrfs_item_ptr_offset(leaf, i);
2031 item_len = btrfs_item_size_nr(leaf, i);
2033 btrfs_item_key_to_cpu(leaf, key, i);
2034 if (!key_in_sk(key, sk))
2037 if (sizeof(sh) + item_len > *buf_size) {
2044 * return one empty item back for v1, which does not
2048 *buf_size = sizeof(sh) + item_len;
2053 if (sizeof(sh) + item_len + *sk_offset > *buf_size) {
2058 sh.objectid = key->objectid;
2059 sh.offset = key->offset;
2060 sh.type = key->type;
2062 sh.transid = found_transid;
2064 /* copy search result header */
2065 if (copy_to_user(ubuf + *sk_offset, &sh, sizeof(sh))) {
2070 *sk_offset += sizeof(sh);
2073 char __user *up = ubuf + *sk_offset;
2075 if (read_extent_buffer_to_user(leaf, up,
2076 item_off, item_len)) {
2081 *sk_offset += item_len;
2085 if (ret) /* -EOVERFLOW from above */
2088 if (*num_found >= sk->nr_items) {
2095 test.objectid = sk->max_objectid;
2096 test.type = sk->max_type;
2097 test.offset = sk->max_offset;
2098 if (btrfs_comp_cpu_keys(key, &test) >= 0)
2100 else if (key->offset < (u64)-1)
2102 else if (key->type < (u8)-1) {
2105 } else if (key->objectid < (u64)-1) {
2113 * 0: all items from this leaf copied, continue with next
2114 * 1: * more items can be copied, but unused buffer is too small
2115 * * all items were found
2116 * Either way, it will stops the loop which iterates to the next
2118 * -EOVERFLOW: item was to large for buffer
2119 * -EFAULT: could not copy extent buffer back to userspace
2124 static noinline int search_ioctl(struct inode *inode,
2125 struct btrfs_ioctl_search_key *sk,
2129 struct btrfs_fs_info *info = btrfs_sb(inode->i_sb);
2130 struct btrfs_root *root;
2131 struct btrfs_key key;
2132 struct btrfs_path *path;
2135 unsigned long sk_offset = 0;
2137 if (*buf_size < sizeof(struct btrfs_ioctl_search_header)) {
2138 *buf_size = sizeof(struct btrfs_ioctl_search_header);
2142 path = btrfs_alloc_path();
2146 if (sk->tree_id == 0) {
2147 /* search the root of the inode that was passed */
2148 root = BTRFS_I(inode)->root;
2150 key.objectid = sk->tree_id;
2151 key.type = BTRFS_ROOT_ITEM_KEY;
2152 key.offset = (u64)-1;
2153 root = btrfs_read_fs_root_no_name(info, &key);
2155 btrfs_free_path(path);
2156 return PTR_ERR(root);
2160 key.objectid = sk->min_objectid;
2161 key.type = sk->min_type;
2162 key.offset = sk->min_offset;
2165 ret = btrfs_search_forward(root, &key, path, sk->min_transid);
2171 ret = copy_to_sk(path, &key, sk, buf_size, ubuf,
2172 &sk_offset, &num_found);
2173 btrfs_release_path(path);
2181 sk->nr_items = num_found;
2182 btrfs_free_path(path);
2186 static noinline int btrfs_ioctl_tree_search(struct file *file,
2189 struct btrfs_ioctl_search_args __user *uargs;
2190 struct btrfs_ioctl_search_key sk;
2191 struct inode *inode;
2195 if (!capable(CAP_SYS_ADMIN))
2198 uargs = (struct btrfs_ioctl_search_args __user *)argp;
2200 if (copy_from_user(&sk, &uargs->key, sizeof(sk)))
2203 buf_size = sizeof(uargs->buf);
2205 inode = file_inode(file);
2206 ret = search_ioctl(inode, &sk, &buf_size, uargs->buf);
2209 * In the origin implementation an overflow is handled by returning a
2210 * search header with a len of zero, so reset ret.
2212 if (ret == -EOVERFLOW)
2215 if (ret == 0 && copy_to_user(&uargs->key, &sk, sizeof(sk)))
2220 static noinline int btrfs_ioctl_tree_search_v2(struct file *file,
2223 struct btrfs_ioctl_search_args_v2 __user *uarg;
2224 struct btrfs_ioctl_search_args_v2 args;
2225 struct inode *inode;
2228 const size_t buf_limit = SZ_16M;
2230 if (!capable(CAP_SYS_ADMIN))
2233 /* copy search header and buffer size */
2234 uarg = (struct btrfs_ioctl_search_args_v2 __user *)argp;
2235 if (copy_from_user(&args, uarg, sizeof(args)))
2238 buf_size = args.buf_size;
2240 /* limit result size to 16MB */
2241 if (buf_size > buf_limit)
2242 buf_size = buf_limit;
2244 inode = file_inode(file);
2245 ret = search_ioctl(inode, &args.key, &buf_size,
2246 (char __user *)(&uarg->buf[0]));
2247 if (ret == 0 && copy_to_user(&uarg->key, &args.key, sizeof(args.key)))
2249 else if (ret == -EOVERFLOW &&
2250 copy_to_user(&uarg->buf_size, &buf_size, sizeof(buf_size)))
2257 * Search INODE_REFs to identify path name of 'dirid' directory
2258 * in a 'tree_id' tree. and sets path name to 'name'.
2260 static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
2261 u64 tree_id, u64 dirid, char *name)
2263 struct btrfs_root *root;
2264 struct btrfs_key key;
2270 struct btrfs_inode_ref *iref;
2271 struct extent_buffer *l;
2272 struct btrfs_path *path;
2274 if (dirid == BTRFS_FIRST_FREE_OBJECTID) {
2279 path = btrfs_alloc_path();
2283 ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX - 1];
2285 key.objectid = tree_id;
2286 key.type = BTRFS_ROOT_ITEM_KEY;
2287 key.offset = (u64)-1;
2288 root = btrfs_read_fs_root_no_name(info, &key);
2290 ret = PTR_ERR(root);
2294 key.objectid = dirid;
2295 key.type = BTRFS_INODE_REF_KEY;
2296 key.offset = (u64)-1;
2299 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2303 ret = btrfs_previous_item(root, path, dirid,
2304 BTRFS_INODE_REF_KEY);
2314 slot = path->slots[0];
2315 btrfs_item_key_to_cpu(l, &key, slot);
2317 iref = btrfs_item_ptr(l, slot, struct btrfs_inode_ref);
2318 len = btrfs_inode_ref_name_len(l, iref);
2320 total_len += len + 1;
2322 ret = -ENAMETOOLONG;
2327 read_extent_buffer(l, ptr, (unsigned long)(iref + 1), len);
2329 if (key.offset == BTRFS_FIRST_FREE_OBJECTID)
2332 btrfs_release_path(path);
2333 key.objectid = key.offset;
2334 key.offset = (u64)-1;
2335 dirid = key.objectid;
2337 memmove(name, ptr, total_len);
2338 name[total_len] = '\0';
2341 btrfs_free_path(path);
2345 static int btrfs_search_path_in_tree_user(struct inode *inode,
2346 struct btrfs_ioctl_ino_lookup_user_args *args)
2348 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2349 struct super_block *sb = inode->i_sb;
2350 struct btrfs_key upper_limit = BTRFS_I(inode)->location;
2351 u64 treeid = BTRFS_I(inode)->root->root_key.objectid;
2352 u64 dirid = args->dirid;
2353 unsigned long item_off;
2354 unsigned long item_len;
2355 struct btrfs_inode_ref *iref;
2356 struct btrfs_root_ref *rref;
2357 struct btrfs_root *root;
2358 struct btrfs_path *path;
2359 struct btrfs_key key, key2;
2360 struct extent_buffer *leaf;
2361 struct inode *temp_inode;
2368 path = btrfs_alloc_path();
2373 * If the bottom subvolume does not exist directly under upper_limit,
2374 * construct the path in from the bottom up.
2376 if (dirid != upper_limit.objectid) {
2377 ptr = &args->path[BTRFS_INO_LOOKUP_USER_PATH_MAX - 1];
2379 key.objectid = treeid;
2380 key.type = BTRFS_ROOT_ITEM_KEY;
2381 key.offset = (u64)-1;
2382 root = btrfs_read_fs_root_no_name(fs_info, &key);
2384 ret = PTR_ERR(root);
2388 key.objectid = dirid;
2389 key.type = BTRFS_INODE_REF_KEY;
2390 key.offset = (u64)-1;
2392 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2395 } else if (ret > 0) {
2396 ret = btrfs_previous_item(root, path, dirid,
2397 BTRFS_INODE_REF_KEY);
2400 } else if (ret > 0) {
2406 leaf = path->nodes[0];
2407 slot = path->slots[0];
2408 btrfs_item_key_to_cpu(leaf, &key, slot);
2410 iref = btrfs_item_ptr(leaf, slot, struct btrfs_inode_ref);
2411 len = btrfs_inode_ref_name_len(leaf, iref);
2413 total_len += len + 1;
2414 if (ptr < args->path) {
2415 ret = -ENAMETOOLONG;
2420 read_extent_buffer(leaf, ptr,
2421 (unsigned long)(iref + 1), len);
2423 /* Check the read+exec permission of this directory */
2424 ret = btrfs_previous_item(root, path, dirid,
2425 BTRFS_INODE_ITEM_KEY);
2428 } else if (ret > 0) {
2433 leaf = path->nodes[0];
2434 slot = path->slots[0];
2435 btrfs_item_key_to_cpu(leaf, &key2, slot);
2436 if (key2.objectid != dirid) {
2441 temp_inode = btrfs_iget(sb, &key2, root, NULL);
2442 if (IS_ERR(temp_inode)) {
2443 ret = PTR_ERR(temp_inode);
2446 ret = inode_permission(temp_inode, MAY_READ | MAY_EXEC);
2453 if (key.offset == upper_limit.objectid)
2455 if (key.objectid == BTRFS_FIRST_FREE_OBJECTID) {
2460 btrfs_release_path(path);
2461 key.objectid = key.offset;
2462 key.offset = (u64)-1;
2463 dirid = key.objectid;
2466 memmove(args->path, ptr, total_len);
2467 args->path[total_len] = '\0';
2468 btrfs_release_path(path);
2471 /* Get the bottom subvolume's name from ROOT_REF */
2472 root = fs_info->tree_root;
2473 key.objectid = treeid;
2474 key.type = BTRFS_ROOT_REF_KEY;
2475 key.offset = args->treeid;
2476 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2479 } else if (ret > 0) {
2484 leaf = path->nodes[0];
2485 slot = path->slots[0];
2486 btrfs_item_key_to_cpu(leaf, &key, slot);
2488 item_off = btrfs_item_ptr_offset(leaf, slot);
2489 item_len = btrfs_item_size_nr(leaf, slot);
2490 /* Check if dirid in ROOT_REF corresponds to passed dirid */
2491 rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref);
2492 if (args->dirid != btrfs_root_ref_dirid(leaf, rref)) {
2497 /* Copy subvolume's name */
2498 item_off += sizeof(struct btrfs_root_ref);
2499 item_len -= sizeof(struct btrfs_root_ref);
2500 read_extent_buffer(leaf, args->name, item_off, item_len);
2501 args->name[item_len] = 0;
2504 btrfs_free_path(path);
2508 static noinline int btrfs_ioctl_ino_lookup(struct file *file,
2511 struct btrfs_ioctl_ino_lookup_args *args;
2512 struct inode *inode;
2515 args = memdup_user(argp, sizeof(*args));
2517 return PTR_ERR(args);
2519 inode = file_inode(file);
2522 * Unprivileged query to obtain the containing subvolume root id. The
2523 * path is reset so it's consistent with btrfs_search_path_in_tree.
2525 if (args->treeid == 0)
2526 args->treeid = BTRFS_I(inode)->root->root_key.objectid;
2528 if (args->objectid == BTRFS_FIRST_FREE_OBJECTID) {
2533 if (!capable(CAP_SYS_ADMIN)) {
2538 ret = btrfs_search_path_in_tree(BTRFS_I(inode)->root->fs_info,
2539 args->treeid, args->objectid,
2543 if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
2551 * Version of ino_lookup ioctl (unprivileged)
2553 * The main differences from ino_lookup ioctl are:
2555 * 1. Read + Exec permission will be checked using inode_permission() during
2556 * path construction. -EACCES will be returned in case of failure.
2557 * 2. Path construction will be stopped at the inode number which corresponds
2558 * to the fd with which this ioctl is called. If constructed path does not
2559 * exist under fd's inode, -EACCES will be returned.
2560 * 3. The name of bottom subvolume is also searched and filled.
2562 static int btrfs_ioctl_ino_lookup_user(struct file *file, void __user *argp)
2564 struct btrfs_ioctl_ino_lookup_user_args *args;
2565 struct inode *inode;
2568 args = memdup_user(argp, sizeof(*args));
2570 return PTR_ERR(args);
2572 inode = file_inode(file);
2574 if (args->dirid == BTRFS_FIRST_FREE_OBJECTID &&
2575 BTRFS_I(inode)->location.objectid != BTRFS_FIRST_FREE_OBJECTID) {
2577 * The subvolume does not exist under fd with which this is
2584 ret = btrfs_search_path_in_tree_user(inode, args);
2586 if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
2593 /* Get the subvolume information in BTRFS_ROOT_ITEM and BTRFS_ROOT_BACKREF */
2594 static int btrfs_ioctl_get_subvol_info(struct file *file, void __user *argp)
2596 struct btrfs_ioctl_get_subvol_info_args *subvol_info;
2597 struct btrfs_fs_info *fs_info;
2598 struct btrfs_root *root;
2599 struct btrfs_path *path;
2600 struct btrfs_key key;
2601 struct btrfs_root_item *root_item;
2602 struct btrfs_root_ref *rref;
2603 struct extent_buffer *leaf;
2604 unsigned long item_off;
2605 unsigned long item_len;
2606 struct inode *inode;
2610 path = btrfs_alloc_path();
2614 subvol_info = kzalloc(sizeof(*subvol_info), GFP_KERNEL);
2616 btrfs_free_path(path);
2620 inode = file_inode(file);
2621 fs_info = BTRFS_I(inode)->root->fs_info;
2623 /* Get root_item of inode's subvolume */
2624 key.objectid = BTRFS_I(inode)->root->root_key.objectid;
2625 key.type = BTRFS_ROOT_ITEM_KEY;
2626 key.offset = (u64)-1;
2627 root = btrfs_read_fs_root_no_name(fs_info, &key);
2629 ret = PTR_ERR(root);
2632 root_item = &root->root_item;
2634 subvol_info->treeid = key.objectid;
2636 subvol_info->generation = btrfs_root_generation(root_item);
2637 subvol_info->flags = btrfs_root_flags(root_item);
2639 memcpy(subvol_info->uuid, root_item->uuid, BTRFS_UUID_SIZE);
2640 memcpy(subvol_info->parent_uuid, root_item->parent_uuid,
2642 memcpy(subvol_info->received_uuid, root_item->received_uuid,
2645 subvol_info->ctransid = btrfs_root_ctransid(root_item);
2646 subvol_info->ctime.sec = btrfs_stack_timespec_sec(&root_item->ctime);
2647 subvol_info->ctime.nsec = btrfs_stack_timespec_nsec(&root_item->ctime);
2649 subvol_info->otransid = btrfs_root_otransid(root_item);
2650 subvol_info->otime.sec = btrfs_stack_timespec_sec(&root_item->otime);
2651 subvol_info->otime.nsec = btrfs_stack_timespec_nsec(&root_item->otime);
2653 subvol_info->stransid = btrfs_root_stransid(root_item);
2654 subvol_info->stime.sec = btrfs_stack_timespec_sec(&root_item->stime);
2655 subvol_info->stime.nsec = btrfs_stack_timespec_nsec(&root_item->stime);
2657 subvol_info->rtransid = btrfs_root_rtransid(root_item);
2658 subvol_info->rtime.sec = btrfs_stack_timespec_sec(&root_item->rtime);
2659 subvol_info->rtime.nsec = btrfs_stack_timespec_nsec(&root_item->rtime);
2661 if (key.objectid != BTRFS_FS_TREE_OBJECTID) {
2662 /* Search root tree for ROOT_BACKREF of this subvolume */
2663 root = fs_info->tree_root;
2665 key.type = BTRFS_ROOT_BACKREF_KEY;
2667 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2670 } else if (path->slots[0] >=
2671 btrfs_header_nritems(path->nodes[0])) {
2672 ret = btrfs_next_leaf(root, path);
2675 } else if (ret > 0) {
2681 leaf = path->nodes[0];
2682 slot = path->slots[0];
2683 btrfs_item_key_to_cpu(leaf, &key, slot);
2684 if (key.objectid == subvol_info->treeid &&
2685 key.type == BTRFS_ROOT_BACKREF_KEY) {
2686 subvol_info->parent_id = key.offset;
2688 rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref);
2689 subvol_info->dirid = btrfs_root_ref_dirid(leaf, rref);
2691 item_off = btrfs_item_ptr_offset(leaf, slot)
2692 + sizeof(struct btrfs_root_ref);
2693 item_len = btrfs_item_size_nr(leaf, slot)
2694 - sizeof(struct btrfs_root_ref);
2695 read_extent_buffer(leaf, subvol_info->name,
2696 item_off, item_len);
2703 if (copy_to_user(argp, subvol_info, sizeof(*subvol_info)))
2707 btrfs_free_path(path);
2708 kzfree(subvol_info);
2713 * Return ROOT_REF information of the subvolume containing this inode
2714 * except the subvolume name.
2716 static int btrfs_ioctl_get_subvol_rootref(struct file *file, void __user *argp)
2718 struct btrfs_ioctl_get_subvol_rootref_args *rootrefs;
2719 struct btrfs_root_ref *rref;
2720 struct btrfs_root *root;
2721 struct btrfs_path *path;
2722 struct btrfs_key key;
2723 struct extent_buffer *leaf;
2724 struct inode *inode;
2730 path = btrfs_alloc_path();
2734 rootrefs = memdup_user(argp, sizeof(*rootrefs));
2735 if (IS_ERR(rootrefs)) {
2736 btrfs_free_path(path);
2737 return PTR_ERR(rootrefs);
2740 inode = file_inode(file);
2741 root = BTRFS_I(inode)->root->fs_info->tree_root;
2742 objectid = BTRFS_I(inode)->root->root_key.objectid;
2744 key.objectid = objectid;
2745 key.type = BTRFS_ROOT_REF_KEY;
2746 key.offset = rootrefs->min_treeid;
2749 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2752 } else if (path->slots[0] >=
2753 btrfs_header_nritems(path->nodes[0])) {
2754 ret = btrfs_next_leaf(root, path);
2757 } else if (ret > 0) {
2763 leaf = path->nodes[0];
2764 slot = path->slots[0];
2766 btrfs_item_key_to_cpu(leaf, &key, slot);
2767 if (key.objectid != objectid || key.type != BTRFS_ROOT_REF_KEY) {
2772 if (found == BTRFS_MAX_ROOTREF_BUFFER_NUM) {
2777 rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref);
2778 rootrefs->rootref[found].treeid = key.offset;
2779 rootrefs->rootref[found].dirid =
2780 btrfs_root_ref_dirid(leaf, rref);
2783 ret = btrfs_next_item(root, path);
2786 } else if (ret > 0) {
2793 if (!ret || ret == -EOVERFLOW) {
2794 rootrefs->num_items = found;
2795 /* update min_treeid for next search */
2797 rootrefs->min_treeid =
2798 rootrefs->rootref[found - 1].treeid + 1;
2799 if (copy_to_user(argp, rootrefs, sizeof(*rootrefs)))
2804 btrfs_free_path(path);
2809 static noinline int btrfs_ioctl_snap_destroy(struct file *file,
2812 struct dentry *parent = file->f_path.dentry;
2813 struct btrfs_fs_info *fs_info = btrfs_sb(parent->d_sb);
2814 struct dentry *dentry;
2815 struct inode *dir = d_inode(parent);
2816 struct inode *inode;
2817 struct btrfs_root *root = BTRFS_I(dir)->root;
2818 struct btrfs_root *dest = NULL;
2819 struct btrfs_ioctl_vol_args *vol_args;
2823 if (!S_ISDIR(dir->i_mode))
2826 vol_args = memdup_user(arg, sizeof(*vol_args));
2827 if (IS_ERR(vol_args))
2828 return PTR_ERR(vol_args);
2830 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
2831 namelen = strlen(vol_args->name);
2832 if (strchr(vol_args->name, '/') ||
2833 strncmp(vol_args->name, "..", namelen) == 0) {
2838 err = mnt_want_write_file(file);
2843 err = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
2845 goto out_drop_write;
2846 dentry = lookup_one_len(vol_args->name, parent, namelen);
2847 if (IS_ERR(dentry)) {
2848 err = PTR_ERR(dentry);
2849 goto out_unlock_dir;
2852 if (d_really_is_negative(dentry)) {
2857 inode = d_inode(dentry);
2858 dest = BTRFS_I(inode)->root;
2859 if (!capable(CAP_SYS_ADMIN)) {
2861 * Regular user. Only allow this with a special mount
2862 * option, when the user has write+exec access to the
2863 * subvol root, and when rmdir(2) would have been
2866 * Note that this is _not_ check that the subvol is
2867 * empty or doesn't contain data that we wouldn't
2868 * otherwise be able to delete.
2870 * Users who want to delete empty subvols should try
2874 if (!btrfs_test_opt(fs_info, USER_SUBVOL_RM_ALLOWED))
2878 * Do not allow deletion if the parent dir is the same
2879 * as the dir to be deleted. That means the ioctl
2880 * must be called on the dentry referencing the root
2881 * of the subvol, not a random directory contained
2888 err = inode_permission(inode, MAY_WRITE | MAY_EXEC);
2893 /* check if subvolume may be deleted by a user */
2894 err = btrfs_may_delete(dir, dentry, 1);
2898 if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
2904 err = btrfs_delete_subvolume(dir, dentry);
2905 inode_unlock(inode);
2914 mnt_drop_write_file(file);
2920 static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
2922 struct inode *inode = file_inode(file);
2923 struct btrfs_root *root = BTRFS_I(inode)->root;
2924 struct btrfs_ioctl_defrag_range_args *range;
2927 ret = mnt_want_write_file(file);
2931 if (btrfs_root_readonly(root)) {
2936 switch (inode->i_mode & S_IFMT) {
2938 if (!capable(CAP_SYS_ADMIN)) {
2942 ret = btrfs_defrag_root(root);
2946 * Note that this does not check the file descriptor for write
2947 * access. This prevents defragmenting executables that are
2948 * running and allows defrag on files open in read-only mode.
2950 if (!capable(CAP_SYS_ADMIN) &&
2951 inode_permission(inode, MAY_WRITE)) {
2956 range = kzalloc(sizeof(*range), GFP_KERNEL);
2963 if (copy_from_user(range, argp,
2969 /* compression requires us to start the IO */
2970 if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
2971 range->flags |= BTRFS_DEFRAG_RANGE_START_IO;
2972 range->extent_thresh = (u32)-1;
2975 /* the rest are all set to zero by kzalloc */
2976 range->len = (u64)-1;
2978 ret = btrfs_defrag_file(file_inode(file), file,
2979 range, BTRFS_OLDEST_GENERATION, 0);
2988 mnt_drop_write_file(file);
2992 static long btrfs_ioctl_add_dev(struct btrfs_fs_info *fs_info, void __user *arg)
2994 struct btrfs_ioctl_vol_args *vol_args;
2997 if (!capable(CAP_SYS_ADMIN))
3000 if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags))
3001 return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
3003 vol_args = memdup_user(arg, sizeof(*vol_args));
3004 if (IS_ERR(vol_args)) {
3005 ret = PTR_ERR(vol_args);
3009 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
3010 ret = btrfs_init_new_device(fs_info, vol_args->name);
3013 btrfs_info(fs_info, "disk added %s", vol_args->name);
3017 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
3021 static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
3023 struct inode *inode = file_inode(file);
3024 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3025 struct btrfs_ioctl_vol_args_v2 *vol_args;
3028 if (!capable(CAP_SYS_ADMIN))
3031 ret = mnt_want_write_file(file);
3035 vol_args = memdup_user(arg, sizeof(*vol_args));
3036 if (IS_ERR(vol_args)) {
3037 ret = PTR_ERR(vol_args);
3041 /* Check for compatibility reject unknown flags */
3042 if (vol_args->flags & ~BTRFS_VOL_ARG_V2_FLAGS_SUPPORTED) {
3047 if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
3048 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
3052 if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID) {
3053 ret = btrfs_rm_device(fs_info, NULL, vol_args->devid);
3055 vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
3056 ret = btrfs_rm_device(fs_info, vol_args->name, 0);
3058 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
3061 if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID)
3062 btrfs_info(fs_info, "device deleted: id %llu",
3065 btrfs_info(fs_info, "device deleted: %s",
3071 mnt_drop_write_file(file);
3075 static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
3077 struct inode *inode = file_inode(file);
3078 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3079 struct btrfs_ioctl_vol_args *vol_args;
3082 if (!capable(CAP_SYS_ADMIN))
3085 ret = mnt_want_write_file(file);
3089 if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
3090 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
3091 goto out_drop_write;
3094 vol_args = memdup_user(arg, sizeof(*vol_args));
3095 if (IS_ERR(vol_args)) {
3096 ret = PTR_ERR(vol_args);
3100 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
3101 ret = btrfs_rm_device(fs_info, vol_args->name, 0);
3104 btrfs_info(fs_info, "disk deleted %s", vol_args->name);
3107 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
3109 mnt_drop_write_file(file);
3114 static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info,
3117 struct btrfs_ioctl_fs_info_args *fi_args;
3118 struct btrfs_device *device;
3119 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
3122 fi_args = kzalloc(sizeof(*fi_args), GFP_KERNEL);
3127 fi_args->num_devices = fs_devices->num_devices;
3129 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
3130 if (device->devid > fi_args->max_id)
3131 fi_args->max_id = device->devid;
3135 memcpy(&fi_args->fsid, fs_info->fsid, sizeof(fi_args->fsid));
3136 fi_args->nodesize = fs_info->nodesize;
3137 fi_args->sectorsize = fs_info->sectorsize;
3138 fi_args->clone_alignment = fs_info->sectorsize;
3140 if (copy_to_user(arg, fi_args, sizeof(*fi_args)))
3147 static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
3150 struct btrfs_ioctl_dev_info_args *di_args;
3151 struct btrfs_device *dev;
3153 char *s_uuid = NULL;
3155 di_args = memdup_user(arg, sizeof(*di_args));
3156 if (IS_ERR(di_args))
3157 return PTR_ERR(di_args);
3159 if (!btrfs_is_empty_uuid(di_args->uuid))
3160 s_uuid = di_args->uuid;
3163 dev = btrfs_find_device(fs_info, di_args->devid, s_uuid, NULL);
3170 di_args->devid = dev->devid;
3171 di_args->bytes_used = btrfs_device_get_bytes_used(dev);
3172 di_args->total_bytes = btrfs_device_get_total_bytes(dev);
3173 memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
3175 strncpy(di_args->path, rcu_str_deref(dev->name),
3176 sizeof(di_args->path) - 1);
3177 di_args->path[sizeof(di_args->path) - 1] = 0;
3179 di_args->path[0] = '\0';
3184 if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args)))
3191 static struct page *extent_same_get_page(struct inode *inode, pgoff_t index)
3195 page = grab_cache_page(inode->i_mapping, index);
3197 return ERR_PTR(-ENOMEM);
3199 if (!PageUptodate(page)) {
3202 ret = btrfs_readpage(NULL, page);
3204 return ERR_PTR(ret);
3206 if (!PageUptodate(page)) {
3209 return ERR_PTR(-EIO);
3211 if (page->mapping != inode->i_mapping) {
3214 return ERR_PTR(-EAGAIN);
3221 static int gather_extent_pages(struct inode *inode, struct page **pages,
3222 int num_pages, u64 off)
3225 pgoff_t index = off >> PAGE_SHIFT;
3227 for (i = 0; i < num_pages; i++) {
3229 pages[i] = extent_same_get_page(inode, index + i);
3230 if (IS_ERR(pages[i])) {
3231 int err = PTR_ERR(pages[i]);
3242 static int lock_extent_range(struct inode *inode, u64 off, u64 len,
3243 bool retry_range_locking)
3246 * Do any pending delalloc/csum calculations on inode, one way or
3247 * another, and lock file content.
3248 * The locking order is:
3251 * 2) range in the inode's io tree
3254 struct btrfs_ordered_extent *ordered;
3255 lock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
3256 ordered = btrfs_lookup_first_ordered_extent(inode,
3259 ordered->file_offset + ordered->len <= off ||
3260 ordered->file_offset >= off + len) &&
3261 !test_range_bit(&BTRFS_I(inode)->io_tree, off,
3262 off + len - 1, EXTENT_DELALLOC, 0, NULL)) {
3264 btrfs_put_ordered_extent(ordered);
3267 unlock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
3269 btrfs_put_ordered_extent(ordered);
3270 if (!retry_range_locking)
3272 btrfs_wait_ordered_range(inode, off, len);
3277 static void btrfs_double_inode_unlock(struct inode *inode1, struct inode *inode2)
3279 inode_unlock(inode1);
3280 inode_unlock(inode2);
3283 static void btrfs_double_inode_lock(struct inode *inode1, struct inode *inode2)
3285 if (inode1 < inode2)
3286 swap(inode1, inode2);
3288 inode_lock_nested(inode1, I_MUTEX_PARENT);
3289 inode_lock_nested(inode2, I_MUTEX_CHILD);
3292 static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
3293 struct inode *inode2, u64 loff2, u64 len)
3295 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
3296 unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
3299 static int btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
3300 struct inode *inode2, u64 loff2, u64 len,
3301 bool retry_range_locking)
3305 if (inode1 < inode2) {
3306 swap(inode1, inode2);
3309 ret = lock_extent_range(inode1, loff1, len, retry_range_locking);
3312 ret = lock_extent_range(inode2, loff2, len, retry_range_locking);
3314 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1,
3321 struct page **src_pages;
3322 struct page **dst_pages;
3325 static void btrfs_cmp_data_free(struct cmp_pages *cmp)
3330 for (i = 0; i < cmp->num_pages; i++) {
3331 pg = cmp->src_pages[i];
3335 cmp->src_pages[i] = NULL;
3337 pg = cmp->dst_pages[i];
3341 cmp->dst_pages[i] = NULL;
3346 static int btrfs_cmp_data_prepare(struct inode *src, u64 loff,
3347 struct inode *dst, u64 dst_loff,
3348 u64 len, struct cmp_pages *cmp)
3351 int num_pages = PAGE_ALIGN(len) >> PAGE_SHIFT;
3353 cmp->num_pages = num_pages;
3355 ret = gather_extent_pages(src, cmp->src_pages, num_pages, loff);
3359 ret = gather_extent_pages(dst, cmp->dst_pages, num_pages, dst_loff);
3363 btrfs_cmp_data_free(cmp);
3367 static int btrfs_cmp_data(u64 len, struct cmp_pages *cmp)
3371 struct page *src_page, *dst_page;
3372 unsigned int cmp_len = PAGE_SIZE;
3373 void *addr, *dst_addr;
3377 if (len < PAGE_SIZE)
3380 BUG_ON(i >= cmp->num_pages);
3382 src_page = cmp->src_pages[i];
3383 dst_page = cmp->dst_pages[i];
3384 ASSERT(PageLocked(src_page));
3385 ASSERT(PageLocked(dst_page));
3387 addr = kmap_atomic(src_page);
3388 dst_addr = kmap_atomic(dst_page);
3390 flush_dcache_page(src_page);
3391 flush_dcache_page(dst_page);
3393 if (memcmp(addr, dst_addr, cmp_len))
3396 kunmap_atomic(addr);
3397 kunmap_atomic(dst_addr);
3409 static int extent_same_check_offsets(struct inode *inode, u64 off, u64 *plen,
3413 u64 bs = BTRFS_I(inode)->root->fs_info->sb->s_blocksize;
3415 if (off + olen > inode->i_size || off + olen < off)
3418 /* if we extend to eof, continue to block boundary */
3419 if (off + len == inode->i_size)
3420 *plen = len = ALIGN(inode->i_size, bs) - off;
3422 /* Check that we are block aligned - btrfs_clone() requires this */
3423 if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs))
3429 static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 olen,
3430 struct inode *dst, u64 dst_loff,
3431 struct cmp_pages *cmp)
3435 bool same_inode = (src == dst);
3436 u64 same_lock_start = 0;
3437 u64 same_lock_len = 0;
3439 ret = extent_same_check_offsets(src, loff, &len, olen);
3443 ret = extent_same_check_offsets(dst, dst_loff, &len, olen);
3449 * Single inode case wants the same checks, except we
3450 * don't want our length pushed out past i_size as
3451 * comparing that data range makes no sense.
3453 * extent_same_check_offsets() will do this for an
3454 * unaligned length at i_size, so catch it here and
3455 * reject the request.
3457 * This effectively means we require aligned extents
3458 * for the single-inode case, whereas the other cases
3459 * allow an unaligned length so long as it ends at
3465 /* Check for overlapping ranges */
3466 if (dst_loff + len > loff && dst_loff < loff + len)
3469 same_lock_start = min_t(u64, loff, dst_loff);
3470 same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start;
3473 * If the source and destination inodes are different, the
3474 * source's range end offset matches the source's i_size, that
3475 * i_size is not a multiple of the sector size, and the
3476 * destination range does not go past the destination's i_size,
3477 * we must round down the length to the nearest sector size
3478 * multiple. If we don't do this adjustment we end replacing
3479 * with zeroes the bytes in the range that starts at the
3480 * deduplication range's end offset and ends at the next sector
3483 if (loff + olen == i_size_read(src) &&
3484 dst_loff + len < i_size_read(dst)) {
3485 const u64 sz = BTRFS_I(src)->root->fs_info->sectorsize;
3487 len = round_down(i_size_read(src), sz) - loff;
3493 ret = btrfs_cmp_data_prepare(src, loff, dst, dst_loff, olen, cmp);
3498 ret = lock_extent_range(src, same_lock_start, same_lock_len,
3501 ret = btrfs_double_extent_lock(src, loff, dst, dst_loff, len,
3504 * If one of the inodes has dirty pages in the respective range or
3505 * ordered extents, we need to flush dellaloc and wait for all ordered
3506 * extents in the range. We must unlock the pages and the ranges in the
3507 * io trees to avoid deadlocks when flushing delalloc (requires locking
3508 * pages) and when waiting for ordered extents to complete (they require
3511 if (ret == -EAGAIN) {
3513 * Ranges in the io trees already unlocked. Now unlock all
3514 * pages before waiting for all IO to complete.
3516 btrfs_cmp_data_free(cmp);
3518 btrfs_wait_ordered_range(src, same_lock_start,
3521 btrfs_wait_ordered_range(src, loff, len);
3522 btrfs_wait_ordered_range(dst, dst_loff, len);
3528 /* ranges in the io trees already unlocked */
3529 btrfs_cmp_data_free(cmp);
3533 /* pass original length for comparison so we stay within i_size */
3534 ret = btrfs_cmp_data(olen, cmp);
3536 ret = btrfs_clone(src, dst, loff, olen, len, dst_loff, 1);
3539 unlock_extent(&BTRFS_I(src)->io_tree, same_lock_start,
3540 same_lock_start + same_lock_len - 1);
3542 btrfs_double_extent_unlock(src, loff, dst, dst_loff, len);
3544 btrfs_cmp_data_free(cmp);
3549 #define BTRFS_MAX_DEDUPE_LEN SZ_16M
3551 static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
3552 struct inode *dst, u64 dst_loff)
3555 struct cmp_pages cmp;
3556 int num_pages = PAGE_ALIGN(BTRFS_MAX_DEDUPE_LEN) >> PAGE_SHIFT;
3557 bool same_inode = (src == dst);
3558 u64 i, tail_len, chunk_count;
3566 btrfs_double_inode_lock(src, dst);
3568 /* don't make the dst file partly checksummed */
3569 if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
3570 (BTRFS_I(dst)->flags & BTRFS_INODE_NODATASUM)) {
3575 tail_len = olen % BTRFS_MAX_DEDUPE_LEN;
3576 chunk_count = div_u64(olen, BTRFS_MAX_DEDUPE_LEN);
3577 if (chunk_count == 0)
3578 num_pages = PAGE_ALIGN(tail_len) >> PAGE_SHIFT;
3581 * If deduping ranges in the same inode, locking rules make it
3582 * mandatory to always lock pages in ascending order to avoid deadlocks
3583 * with concurrent tasks (such as starting writeback/delalloc).
3585 if (same_inode && dst_loff < loff)
3586 swap(loff, dst_loff);
3589 * We must gather up all the pages before we initiate our extent
3590 * locking. We use an array for the page pointers. Size of the array is
3591 * bounded by len, which is in turn bounded by BTRFS_MAX_DEDUPE_LEN.
3593 cmp.src_pages = kvmalloc_array(num_pages, sizeof(struct page *),
3594 GFP_KERNEL | __GFP_ZERO);
3595 cmp.dst_pages = kvmalloc_array(num_pages, sizeof(struct page *),
3596 GFP_KERNEL | __GFP_ZERO);
3597 if (!cmp.src_pages || !cmp.dst_pages) {
3602 for (i = 0; i < chunk_count; i++) {
3603 ret = btrfs_extent_same_range(src, loff, BTRFS_MAX_DEDUPE_LEN,
3604 dst, dst_loff, &cmp);
3608 loff += BTRFS_MAX_DEDUPE_LEN;
3609 dst_loff += BTRFS_MAX_DEDUPE_LEN;
3613 ret = btrfs_extent_same_range(src, loff, tail_len, dst,
3617 kvfree(cmp.src_pages);
3618 kvfree(cmp.dst_pages);
3624 btrfs_double_inode_unlock(src, dst);
3629 int btrfs_dedupe_file_range(struct file *src_file, loff_t src_loff,
3630 struct file *dst_file, loff_t dst_loff,
3633 struct inode *src = file_inode(src_file);
3634 struct inode *dst = file_inode(dst_file);
3635 u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize;
3637 if (WARN_ON_ONCE(bs < PAGE_SIZE)) {
3639 * Btrfs does not support blocksize < page_size. As a
3640 * result, btrfs_cmp_data() won't correctly handle
3641 * this situation without an update.
3646 return btrfs_extent_same(src, src_loff, olen, dst, dst_loff);
3649 static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
3650 struct inode *inode,
3656 struct btrfs_root *root = BTRFS_I(inode)->root;
3659 inode_inc_iversion(inode);
3660 if (!no_time_update)
3661 inode->i_mtime = inode->i_ctime = current_time(inode);
3663 * We round up to the block size at eof when determining which
3664 * extents to clone above, but shouldn't round up the file size.
3666 if (endoff > destoff + olen)
3667 endoff = destoff + olen;
3668 if (endoff > inode->i_size)
3669 btrfs_i_size_write(BTRFS_I(inode), endoff);
3671 ret = btrfs_update_inode(trans, root, inode);
3673 btrfs_abort_transaction(trans, ret);
3674 btrfs_end_transaction(trans);
3677 ret = btrfs_end_transaction(trans);
3682 static void clone_update_extent_map(struct btrfs_inode *inode,
3683 const struct btrfs_trans_handle *trans,
3684 const struct btrfs_path *path,
3685 const u64 hole_offset,
3688 struct extent_map_tree *em_tree = &inode->extent_tree;
3689 struct extent_map *em;
3692 em = alloc_extent_map();
3694 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
3699 struct btrfs_file_extent_item *fi;
3701 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
3702 struct btrfs_file_extent_item);
3703 btrfs_extent_item_to_extent_map(inode, path, fi, false, em);
3704 em->generation = -1;
3705 if (btrfs_file_extent_type(path->nodes[0], fi) ==
3706 BTRFS_FILE_EXTENT_INLINE)
3707 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3708 &inode->runtime_flags);
3710 em->start = hole_offset;
3712 em->ram_bytes = em->len;
3713 em->orig_start = hole_offset;
3714 em->block_start = EXTENT_MAP_HOLE;
3716 em->orig_block_len = 0;
3717 em->compress_type = BTRFS_COMPRESS_NONE;
3718 em->generation = trans->transid;
3722 write_lock(&em_tree->lock);
3723 ret = add_extent_mapping(em_tree, em, 1);
3724 write_unlock(&em_tree->lock);
3725 if (ret != -EEXIST) {
3726 free_extent_map(em);
3729 btrfs_drop_extent_cache(inode, em->start,
3730 em->start + em->len - 1, 0);
3734 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
3738 * Make sure we do not end up inserting an inline extent into a file that has
3739 * already other (non-inline) extents. If a file has an inline extent it can
3740 * not have any other extents and the (single) inline extent must start at the
3741 * file offset 0. Failing to respect these rules will lead to file corruption,
3742 * resulting in EIO errors on read/write operations, hitting BUG_ON's in mm, etc
3744 * We can have extents that have been already written to disk or we can have
3745 * dirty ranges still in delalloc, in which case the extent maps and items are
3746 * created only when we run delalloc, and the delalloc ranges might fall outside
3747 * the range we are currently locking in the inode's io tree. So we check the
3748 * inode's i_size because of that (i_size updates are done while holding the
3749 * i_mutex, which we are holding here).
3750 * We also check to see if the inode has a size not greater than "datal" but has
3751 * extents beyond it, due to an fallocate with FALLOC_FL_KEEP_SIZE (and we are
3752 * protected against such concurrent fallocate calls by the i_mutex).
3754 * If the file has no extents but a size greater than datal, do not allow the
3755 * copy because we would need turn the inline extent into a non-inline one (even
3756 * with NO_HOLES enabled). If we find our destination inode only has one inline
3757 * extent, just overwrite it with the source inline extent if its size is less
3758 * than the source extent's size, or we could copy the source inline extent's
3759 * data into the destination inode's inline extent if the later is greater then
3762 static int clone_copy_inline_extent(struct inode *dst,
3763 struct btrfs_trans_handle *trans,
3764 struct btrfs_path *path,
3765 struct btrfs_key *new_key,
3766 const u64 drop_start,
3772 struct btrfs_fs_info *fs_info = btrfs_sb(dst->i_sb);
3773 struct btrfs_root *root = BTRFS_I(dst)->root;
3774 const u64 aligned_end = ALIGN(new_key->offset + datal,
3775 fs_info->sectorsize);
3777 struct btrfs_key key;
3779 if (new_key->offset > 0)
3782 key.objectid = btrfs_ino(BTRFS_I(dst));
3783 key.type = BTRFS_EXTENT_DATA_KEY;
3785 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3788 } else if (ret > 0) {
3789 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
3790 ret = btrfs_next_leaf(root, path);
3794 goto copy_inline_extent;
3796 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
3797 if (key.objectid == btrfs_ino(BTRFS_I(dst)) &&
3798 key.type == BTRFS_EXTENT_DATA_KEY) {
3799 ASSERT(key.offset > 0);
3802 } else if (i_size_read(dst) <= datal) {
3803 struct btrfs_file_extent_item *ei;
3807 * If the file size is <= datal, make sure there are no other
3808 * extents following (can happen do to an fallocate call with
3809 * the flag FALLOC_FL_KEEP_SIZE).
3811 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
3812 struct btrfs_file_extent_item);
3814 * If it's an inline extent, it can not have other extents
3817 if (btrfs_file_extent_type(path->nodes[0], ei) ==
3818 BTRFS_FILE_EXTENT_INLINE)
3819 goto copy_inline_extent;
3821 ext_len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
3822 if (ext_len > aligned_end)
3825 ret = btrfs_next_item(root, path);
3828 } else if (ret == 0) {
3829 btrfs_item_key_to_cpu(path->nodes[0], &key,
3831 if (key.objectid == btrfs_ino(BTRFS_I(dst)) &&
3832 key.type == BTRFS_EXTENT_DATA_KEY)
3839 * We have no extent items, or we have an extent at offset 0 which may
3840 * or may not be inlined. All these cases are dealt the same way.
3842 if (i_size_read(dst) > datal) {
3844 * If the destination inode has an inline extent...
3845 * This would require copying the data from the source inline
3846 * extent into the beginning of the destination's inline extent.
3847 * But this is really complex, both extents can be compressed
3848 * or just one of them, which would require decompressing and
3849 * re-compressing data (which could increase the new compressed
3850 * size, not allowing the compressed data to fit anymore in an
3852 * So just don't support this case for now (it should be rare,
3853 * we are not really saving space when cloning inline extents).
3858 btrfs_release_path(path);
3859 ret = btrfs_drop_extents(trans, root, dst, drop_start, aligned_end, 1);
3862 ret = btrfs_insert_empty_item(trans, root, path, new_key, size);
3867 const u32 start = btrfs_file_extent_calc_inline_size(0);
3869 memmove(inline_data + start, inline_data + start + skip, datal);
3872 write_extent_buffer(path->nodes[0], inline_data,
3873 btrfs_item_ptr_offset(path->nodes[0],
3876 inode_add_bytes(dst, datal);
3882 * btrfs_clone() - clone a range from inode file to another
3884 * @src: Inode to clone from
3885 * @inode: Inode to clone to
3886 * @off: Offset within source to start clone from
3887 * @olen: Original length, passed by user, of range to clone
3888 * @olen_aligned: Block-aligned value of olen
3889 * @destoff: Offset within @inode to start clone
3890 * @no_time_update: Whether to update mtime/ctime on the target inode
3892 static int btrfs_clone(struct inode *src, struct inode *inode,
3893 const u64 off, const u64 olen, const u64 olen_aligned,
3894 const u64 destoff, int no_time_update)
3896 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3897 struct btrfs_root *root = BTRFS_I(inode)->root;
3898 struct btrfs_path *path = NULL;
3899 struct extent_buffer *leaf;
3900 struct btrfs_trans_handle *trans;
3902 struct btrfs_key key;
3906 const u64 len = olen_aligned;
3907 u64 last_dest_end = destoff;
3910 buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
3914 path = btrfs_alloc_path();
3920 path->reada = READA_FORWARD;
3922 key.objectid = btrfs_ino(BTRFS_I(src));
3923 key.type = BTRFS_EXTENT_DATA_KEY;
3927 u64 next_key_min_offset = key.offset + 1;
3930 * note the key will change type as we walk through the
3933 path->leave_spinning = 1;
3934 ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path,
3939 * First search, if no extent item that starts at offset off was
3940 * found but the previous item is an extent item, it's possible
3941 * it might overlap our target range, therefore process it.
3943 if (key.offset == off && ret > 0 && path->slots[0] > 0) {
3944 btrfs_item_key_to_cpu(path->nodes[0], &key,
3945 path->slots[0] - 1);
3946 if (key.type == BTRFS_EXTENT_DATA_KEY)
3950 nritems = btrfs_header_nritems(path->nodes[0]);
3952 if (path->slots[0] >= nritems) {
3953 ret = btrfs_next_leaf(BTRFS_I(src)->root, path);
3958 nritems = btrfs_header_nritems(path->nodes[0]);
3960 leaf = path->nodes[0];
3961 slot = path->slots[0];
3963 btrfs_item_key_to_cpu(leaf, &key, slot);
3964 if (key.type > BTRFS_EXTENT_DATA_KEY ||
3965 key.objectid != btrfs_ino(BTRFS_I(src)))
3968 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3969 struct btrfs_file_extent_item *extent;
3972 struct btrfs_key new_key;
3973 u64 disko = 0, diskl = 0;
3974 u64 datao = 0, datal = 0;
3978 extent = btrfs_item_ptr(leaf, slot,
3979 struct btrfs_file_extent_item);
3980 comp = btrfs_file_extent_compression(leaf, extent);
3981 type = btrfs_file_extent_type(leaf, extent);
3982 if (type == BTRFS_FILE_EXTENT_REG ||
3983 type == BTRFS_FILE_EXTENT_PREALLOC) {
3984 disko = btrfs_file_extent_disk_bytenr(leaf,
3986 diskl = btrfs_file_extent_disk_num_bytes(leaf,
3988 datao = btrfs_file_extent_offset(leaf, extent);
3989 datal = btrfs_file_extent_num_bytes(leaf,
3991 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
3992 /* take upper bound, may be compressed */
3993 datal = btrfs_file_extent_ram_bytes(leaf,
3998 * The first search might have left us at an extent
3999 * item that ends before our target range's start, can
4000 * happen if we have holes and NO_HOLES feature enabled.
4002 if (key.offset + datal <= off) {
4005 } else if (key.offset >= off + len) {
4008 next_key_min_offset = key.offset + datal;
4009 size = btrfs_item_size_nr(leaf, slot);
4010 read_extent_buffer(leaf, buf,
4011 btrfs_item_ptr_offset(leaf, slot),
4014 btrfs_release_path(path);
4015 path->leave_spinning = 0;
4017 memcpy(&new_key, &key, sizeof(new_key));
4018 new_key.objectid = btrfs_ino(BTRFS_I(inode));
4019 if (off <= key.offset)
4020 new_key.offset = key.offset + destoff - off;
4022 new_key.offset = destoff;
4025 * Deal with a hole that doesn't have an extent item
4026 * that represents it (NO_HOLES feature enabled).
4027 * This hole is either in the middle of the cloning
4028 * range or at the beginning (fully overlaps it or
4029 * partially overlaps it).
4031 if (new_key.offset != last_dest_end)
4032 drop_start = last_dest_end;
4034 drop_start = new_key.offset;
4037 * 1 - adjusting old extent (we may have to split it)
4038 * 1 - add new extent
4041 trans = btrfs_start_transaction(root, 3);
4042 if (IS_ERR(trans)) {
4043 ret = PTR_ERR(trans);
4047 if (type == BTRFS_FILE_EXTENT_REG ||
4048 type == BTRFS_FILE_EXTENT_PREALLOC) {
4050 * a | --- range to clone ---| b
4051 * | ------------- extent ------------- |
4054 /* subtract range b */
4055 if (key.offset + datal > off + len)
4056 datal = off + len - key.offset;
4058 /* subtract range a */
4059 if (off > key.offset) {
4060 datao += off - key.offset;
4061 datal -= off - key.offset;
4064 ret = btrfs_drop_extents(trans, root, inode,
4066 new_key.offset + datal,
4069 if (ret != -EOPNOTSUPP)
4070 btrfs_abort_transaction(trans,
4072 btrfs_end_transaction(trans);
4076 ret = btrfs_insert_empty_item(trans, root, path,
4079 btrfs_abort_transaction(trans, ret);
4080 btrfs_end_transaction(trans);
4084 leaf = path->nodes[0];
4085 slot = path->slots[0];
4086 write_extent_buffer(leaf, buf,
4087 btrfs_item_ptr_offset(leaf, slot),
4090 extent = btrfs_item_ptr(leaf, slot,
4091 struct btrfs_file_extent_item);
4093 /* disko == 0 means it's a hole */
4097 btrfs_set_file_extent_offset(leaf, extent,
4099 btrfs_set_file_extent_num_bytes(leaf, extent,
4103 inode_add_bytes(inode, datal);
4104 ret = btrfs_inc_extent_ref(trans,
4107 root->root_key.objectid,
4108 btrfs_ino(BTRFS_I(inode)),
4109 new_key.offset - datao);
4111 btrfs_abort_transaction(trans,
4113 btrfs_end_transaction(trans);
4118 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
4122 if (off > key.offset) {
4123 skip = off - key.offset;
4124 new_key.offset += skip;
4127 if (key.offset + datal > off + len)
4128 trim = key.offset + datal - (off + len);
4130 if (comp && (skip || trim)) {
4132 btrfs_end_transaction(trans);
4135 size -= skip + trim;
4136 datal -= skip + trim;
4138 ret = clone_copy_inline_extent(inode,
4145 if (ret != -EOPNOTSUPP)
4146 btrfs_abort_transaction(trans,
4148 btrfs_end_transaction(trans);
4151 leaf = path->nodes[0];
4152 slot = path->slots[0];
4155 /* If we have an implicit hole (NO_HOLES feature). */
4156 if (drop_start < new_key.offset)
4157 clone_update_extent_map(BTRFS_I(inode), trans,
4159 new_key.offset - drop_start);
4161 clone_update_extent_map(BTRFS_I(inode), trans,
4164 btrfs_mark_buffer_dirty(leaf);
4165 btrfs_release_path(path);
4167 last_dest_end = ALIGN(new_key.offset + datal,
4168 fs_info->sectorsize);
4169 ret = clone_finish_inode_update(trans, inode,
4175 if (new_key.offset + datal >= destoff + len)
4178 btrfs_release_path(path);
4179 key.offset = next_key_min_offset;
4181 if (fatal_signal_pending(current)) {
4188 if (last_dest_end < destoff + len) {
4190 * We have an implicit hole (NO_HOLES feature is enabled) that
4191 * fully or partially overlaps our cloning range at its end.
4193 btrfs_release_path(path);
4196 * 1 - remove extent(s)
4199 trans = btrfs_start_transaction(root, 2);
4200 if (IS_ERR(trans)) {
4201 ret = PTR_ERR(trans);
4204 ret = btrfs_drop_extents(trans, root, inode,
4205 last_dest_end, destoff + len, 1);
4207 if (ret != -EOPNOTSUPP)
4208 btrfs_abort_transaction(trans, ret);
4209 btrfs_end_transaction(trans);
4212 clone_update_extent_map(BTRFS_I(inode), trans, NULL,
4214 destoff + len - last_dest_end);
4215 ret = clone_finish_inode_update(trans, inode, destoff + len,
4216 destoff, olen, no_time_update);
4220 btrfs_free_path(path);
4225 static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
4226 u64 off, u64 olen, u64 destoff)
4228 struct inode *inode = file_inode(file);
4229 struct inode *src = file_inode(file_src);
4230 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4231 struct btrfs_root *root = BTRFS_I(inode)->root;
4234 u64 bs = fs_info->sb->s_blocksize;
4235 int same_inode = src == inode;
4239 * - split compressed inline extents. annoying: we need to
4240 * decompress into destination's address_space (the file offset
4241 * may change, so source mapping won't do), then recompress (or
4242 * otherwise reinsert) a subrange.
4244 * - split destination inode's inline extents. The inline extents can
4245 * be either compressed or non-compressed.
4248 if (btrfs_root_readonly(root))
4251 if (file_src->f_path.mnt != file->f_path.mnt ||
4252 src->i_sb != inode->i_sb)
4255 if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
4259 btrfs_double_inode_lock(src, inode);
4264 /* don't make the dst file partly checksummed */
4265 if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
4266 (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
4271 /* determine range to clone */
4273 if (off + len > src->i_size || off + len < off)
4276 olen = len = src->i_size - off;
4277 /* if we extend to eof, continue to block boundary */
4278 if (off + len == src->i_size)
4279 len = ALIGN(src->i_size, bs) - off;
4286 /* verify the end result is block aligned */
4287 if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs) ||
4288 !IS_ALIGNED(destoff, bs))
4291 /* verify if ranges are overlapped within the same file */
4293 if (destoff + len > off && destoff < off + len)
4297 if (destoff > inode->i_size) {
4298 ret = btrfs_cont_expand(inode, inode->i_size, destoff);
4304 * Lock the target range too. Right after we replace the file extent
4305 * items in the fs tree (which now point to the cloned data), we might
4306 * have a worker replace them with extent items relative to a write
4307 * operation that was issued before this clone operation (i.e. confront
4308 * with inode.c:btrfs_finish_ordered_io).
4311 u64 lock_start = min_t(u64, off, destoff);
4312 u64 lock_len = max_t(u64, off, destoff) + len - lock_start;
4314 ret = lock_extent_range(src, lock_start, lock_len, true);
4316 ret = btrfs_double_extent_lock(src, off, inode, destoff, len,
4321 /* ranges in the io trees already unlocked */
4325 ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
4328 u64 lock_start = min_t(u64, off, destoff);
4329 u64 lock_end = max_t(u64, off, destoff) + len - 1;
4331 unlock_extent(&BTRFS_I(src)->io_tree, lock_start, lock_end);
4333 btrfs_double_extent_unlock(src, off, inode, destoff, len);
4336 * Truncate page cache pages so that future reads will see the cloned
4337 * data immediately and not the previous data.
4339 truncate_inode_pages_range(&inode->i_data,
4340 round_down(destoff, PAGE_SIZE),
4341 round_up(destoff + len, PAGE_SIZE) - 1);
4344 btrfs_double_inode_unlock(src, inode);
4350 int btrfs_clone_file_range(struct file *src_file, loff_t off,
4351 struct file *dst_file, loff_t destoff, u64 len)
4353 return btrfs_clone_files(dst_file, src_file, off, len, destoff);
4356 static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
4358 struct inode *inode = file_inode(file);
4359 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4360 struct btrfs_root *root = BTRFS_I(inode)->root;
4361 struct btrfs_root *new_root;
4362 struct btrfs_dir_item *di;
4363 struct btrfs_trans_handle *trans;
4364 struct btrfs_path *path;
4365 struct btrfs_key location;
4366 struct btrfs_disk_key disk_key;
4371 if (!capable(CAP_SYS_ADMIN))
4374 ret = mnt_want_write_file(file);
4378 if (copy_from_user(&objectid, argp, sizeof(objectid))) {
4384 objectid = BTRFS_FS_TREE_OBJECTID;
4386 location.objectid = objectid;
4387 location.type = BTRFS_ROOT_ITEM_KEY;
4388 location.offset = (u64)-1;
4390 new_root = btrfs_read_fs_root_no_name(fs_info, &location);
4391 if (IS_ERR(new_root)) {
4392 ret = PTR_ERR(new_root);
4395 if (!is_fstree(new_root->root_key.objectid)) {
4400 path = btrfs_alloc_path();
4405 path->leave_spinning = 1;
4407 trans = btrfs_start_transaction(root, 1);
4408 if (IS_ERR(trans)) {
4409 btrfs_free_path(path);
4410 ret = PTR_ERR(trans);
4414 dir_id = btrfs_super_root_dir(fs_info->super_copy);
4415 di = btrfs_lookup_dir_item(trans, fs_info->tree_root, path,
4416 dir_id, "default", 7, 1);
4417 if (IS_ERR_OR_NULL(di)) {
4418 btrfs_free_path(path);
4419 btrfs_end_transaction(trans);
4421 "Umm, you don't have the default diritem, this isn't going to work");
4426 btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key);
4427 btrfs_set_dir_item_key(path->nodes[0], di, &disk_key);
4428 btrfs_mark_buffer_dirty(path->nodes[0]);
4429 btrfs_free_path(path);
4431 btrfs_set_fs_incompat(fs_info, DEFAULT_SUBVOL);
4432 btrfs_end_transaction(trans);
4434 mnt_drop_write_file(file);
4438 static void get_block_group_info(struct list_head *groups_list,
4439 struct btrfs_ioctl_space_info *space)
4441 struct btrfs_block_group_cache *block_group;
4443 space->total_bytes = 0;
4444 space->used_bytes = 0;
4446 list_for_each_entry(block_group, groups_list, list) {
4447 space->flags = block_group->flags;
4448 space->total_bytes += block_group->key.offset;
4449 space->used_bytes +=
4450 btrfs_block_group_used(&block_group->item);
4454 static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info,
4457 struct btrfs_ioctl_space_args space_args;
4458 struct btrfs_ioctl_space_info space;
4459 struct btrfs_ioctl_space_info *dest;
4460 struct btrfs_ioctl_space_info *dest_orig;
4461 struct btrfs_ioctl_space_info __user *user_dest;
4462 struct btrfs_space_info *info;
4463 static const u64 types[] = {
4464 BTRFS_BLOCK_GROUP_DATA,
4465 BTRFS_BLOCK_GROUP_SYSTEM,
4466 BTRFS_BLOCK_GROUP_METADATA,
4467 BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA
4475 if (copy_from_user(&space_args,
4476 (struct btrfs_ioctl_space_args __user *)arg,
4477 sizeof(space_args)))
4480 for (i = 0; i < num_types; i++) {
4481 struct btrfs_space_info *tmp;
4485 list_for_each_entry_rcu(tmp, &fs_info->space_info,
4487 if (tmp->flags == types[i]) {
4497 down_read(&info->groups_sem);
4498 for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
4499 if (!list_empty(&info->block_groups[c]))
4502 up_read(&info->groups_sem);
4506 * Global block reserve, exported as a space_info
4510 /* space_slots == 0 means they are asking for a count */
4511 if (space_args.space_slots == 0) {
4512 space_args.total_spaces = slot_count;
4516 slot_count = min_t(u64, space_args.space_slots, slot_count);
4518 alloc_size = sizeof(*dest) * slot_count;
4520 /* we generally have at most 6 or so space infos, one for each raid
4521 * level. So, a whole page should be more than enough for everyone
4523 if (alloc_size > PAGE_SIZE)
4526 space_args.total_spaces = 0;
4527 dest = kmalloc(alloc_size, GFP_KERNEL);
4532 /* now we have a buffer to copy into */
4533 for (i = 0; i < num_types; i++) {
4534 struct btrfs_space_info *tmp;
4541 list_for_each_entry_rcu(tmp, &fs_info->space_info,
4543 if (tmp->flags == types[i]) {
4552 down_read(&info->groups_sem);
4553 for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
4554 if (!list_empty(&info->block_groups[c])) {
4555 get_block_group_info(&info->block_groups[c],
4557 memcpy(dest, &space, sizeof(space));
4559 space_args.total_spaces++;
4565 up_read(&info->groups_sem);
4569 * Add global block reserve
4572 struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4574 spin_lock(&block_rsv->lock);
4575 space.total_bytes = block_rsv->size;
4576 space.used_bytes = block_rsv->size - block_rsv->reserved;
4577 spin_unlock(&block_rsv->lock);
4578 space.flags = BTRFS_SPACE_INFO_GLOBAL_RSV;
4579 memcpy(dest, &space, sizeof(space));
4580 space_args.total_spaces++;
4583 user_dest = (struct btrfs_ioctl_space_info __user *)
4584 (arg + sizeof(struct btrfs_ioctl_space_args));
4586 if (copy_to_user(user_dest, dest_orig, alloc_size))
4591 if (ret == 0 && copy_to_user(arg, &space_args, sizeof(space_args)))
4597 static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root,
4600 struct btrfs_trans_handle *trans;
4604 trans = btrfs_attach_transaction_barrier(root);
4605 if (IS_ERR(trans)) {
4606 if (PTR_ERR(trans) != -ENOENT)
4607 return PTR_ERR(trans);
4609 /* No running transaction, don't bother */
4610 transid = root->fs_info->last_trans_committed;
4613 transid = trans->transid;
4614 ret = btrfs_commit_transaction_async(trans, 0);
4616 btrfs_end_transaction(trans);
4621 if (copy_to_user(argp, &transid, sizeof(transid)))
4626 static noinline long btrfs_ioctl_wait_sync(struct btrfs_fs_info *fs_info,
4632 if (copy_from_user(&transid, argp, sizeof(transid)))
4635 transid = 0; /* current trans */
4637 return btrfs_wait_for_commit(fs_info, transid);
4640 static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
4642 struct btrfs_fs_info *fs_info = btrfs_sb(file_inode(file)->i_sb);
4643 struct btrfs_ioctl_scrub_args *sa;
4646 if (!capable(CAP_SYS_ADMIN))
4649 sa = memdup_user(arg, sizeof(*sa));
4653 if (!(sa->flags & BTRFS_SCRUB_READONLY)) {
4654 ret = mnt_want_write_file(file);
4659 ret = btrfs_scrub_dev(fs_info, sa->devid, sa->start, sa->end,
4660 &sa->progress, sa->flags & BTRFS_SCRUB_READONLY,
4663 if (copy_to_user(arg, sa, sizeof(*sa)))
4666 if (!(sa->flags & BTRFS_SCRUB_READONLY))
4667 mnt_drop_write_file(file);
4673 static long btrfs_ioctl_scrub_cancel(struct btrfs_fs_info *fs_info)
4675 if (!capable(CAP_SYS_ADMIN))
4678 return btrfs_scrub_cancel(fs_info);
4681 static long btrfs_ioctl_scrub_progress(struct btrfs_fs_info *fs_info,
4684 struct btrfs_ioctl_scrub_args *sa;
4687 if (!capable(CAP_SYS_ADMIN))
4690 sa = memdup_user(arg, sizeof(*sa));
4694 ret = btrfs_scrub_progress(fs_info, sa->devid, &sa->progress);
4696 if (copy_to_user(arg, sa, sizeof(*sa)))
4703 static long btrfs_ioctl_get_dev_stats(struct btrfs_fs_info *fs_info,
4706 struct btrfs_ioctl_get_dev_stats *sa;
4709 sa = memdup_user(arg, sizeof(*sa));
4713 if ((sa->flags & BTRFS_DEV_STATS_RESET) && !capable(CAP_SYS_ADMIN)) {
4718 ret = btrfs_get_dev_stats(fs_info, sa);
4720 if (copy_to_user(arg, sa, sizeof(*sa)))
4727 static long btrfs_ioctl_dev_replace(struct btrfs_fs_info *fs_info,
4730 struct btrfs_ioctl_dev_replace_args *p;
4733 if (!capable(CAP_SYS_ADMIN))
4736 p = memdup_user(arg, sizeof(*p));
4741 case BTRFS_IOCTL_DEV_REPLACE_CMD_START:
4742 if (sb_rdonly(fs_info->sb)) {
4746 if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
4747 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
4749 ret = btrfs_dev_replace_by_ioctl(fs_info, p);
4750 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
4753 case BTRFS_IOCTL_DEV_REPLACE_CMD_STATUS:
4754 btrfs_dev_replace_status(fs_info, p);
4757 case BTRFS_IOCTL_DEV_REPLACE_CMD_CANCEL:
4758 p->result = btrfs_dev_replace_cancel(fs_info);
4766 if (copy_to_user(arg, p, sizeof(*p)))
4773 static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
4779 struct btrfs_ioctl_ino_path_args *ipa = NULL;
4780 struct inode_fs_paths *ipath = NULL;
4781 struct btrfs_path *path;
4783 if (!capable(CAP_DAC_READ_SEARCH))
4786 path = btrfs_alloc_path();
4792 ipa = memdup_user(arg, sizeof(*ipa));
4799 size = min_t(u32, ipa->size, 4096);
4800 ipath = init_ipath(size, root, path);
4801 if (IS_ERR(ipath)) {
4802 ret = PTR_ERR(ipath);
4807 ret = paths_from_inode(ipa->inum, ipath);
4811 for (i = 0; i < ipath->fspath->elem_cnt; ++i) {
4812 rel_ptr = ipath->fspath->val[i] -
4813 (u64)(unsigned long)ipath->fspath->val;
4814 ipath->fspath->val[i] = rel_ptr;
4817 ret = copy_to_user((void __user *)(unsigned long)ipa->fspath,
4818 ipath->fspath, size);
4825 btrfs_free_path(path);
4832 static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx)
4834 struct btrfs_data_container *inodes = ctx;
4835 const size_t c = 3 * sizeof(u64);
4837 if (inodes->bytes_left >= c) {
4838 inodes->bytes_left -= c;
4839 inodes->val[inodes->elem_cnt] = inum;
4840 inodes->val[inodes->elem_cnt + 1] = offset;
4841 inodes->val[inodes->elem_cnt + 2] = root;
4842 inodes->elem_cnt += 3;
4844 inodes->bytes_missing += c - inodes->bytes_left;
4845 inodes->bytes_left = 0;
4846 inodes->elem_missed += 3;
4852 static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
4853 void __user *arg, int version)
4857 struct btrfs_ioctl_logical_ino_args *loi;
4858 struct btrfs_data_container *inodes = NULL;
4859 struct btrfs_path *path = NULL;
4862 if (!capable(CAP_SYS_ADMIN))
4865 loi = memdup_user(arg, sizeof(*loi));
4867 return PTR_ERR(loi);
4870 ignore_offset = false;
4871 size = min_t(u32, loi->size, SZ_64K);
4873 /* All reserved bits must be 0 for now */
4874 if (memchr_inv(loi->reserved, 0, sizeof(loi->reserved))) {
4878 /* Only accept flags we have defined so far */
4879 if (loi->flags & ~(BTRFS_LOGICAL_INO_ARGS_IGNORE_OFFSET)) {
4883 ignore_offset = loi->flags & BTRFS_LOGICAL_INO_ARGS_IGNORE_OFFSET;
4884 size = min_t(u32, loi->size, SZ_16M);
4887 path = btrfs_alloc_path();
4893 inodes = init_data_container(size);
4894 if (IS_ERR(inodes)) {
4895 ret = PTR_ERR(inodes);
4900 ret = iterate_inodes_from_logical(loi->logical, fs_info, path,
4901 build_ino_list, inodes, ignore_offset);
4907 ret = copy_to_user((void __user *)(unsigned long)loi->inodes, inodes,
4913 btrfs_free_path(path);
4921 void btrfs_update_ioctl_balance_args(struct btrfs_fs_info *fs_info,
4922 struct btrfs_ioctl_balance_args *bargs)
4924 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4926 bargs->flags = bctl->flags;
4928 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags))
4929 bargs->state |= BTRFS_BALANCE_STATE_RUNNING;
4930 if (atomic_read(&fs_info->balance_pause_req))
4931 bargs->state |= BTRFS_BALANCE_STATE_PAUSE_REQ;
4932 if (atomic_read(&fs_info->balance_cancel_req))
4933 bargs->state |= BTRFS_BALANCE_STATE_CANCEL_REQ;
4935 memcpy(&bargs->data, &bctl->data, sizeof(bargs->data));
4936 memcpy(&bargs->meta, &bctl->meta, sizeof(bargs->meta));
4937 memcpy(&bargs->sys, &bctl->sys, sizeof(bargs->sys));
4939 spin_lock(&fs_info->balance_lock);
4940 memcpy(&bargs->stat, &bctl->stat, sizeof(bargs->stat));
4941 spin_unlock(&fs_info->balance_lock);
4944 static long btrfs_ioctl_balance(struct file *file, void __user *arg)
4946 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
4947 struct btrfs_fs_info *fs_info = root->fs_info;
4948 struct btrfs_ioctl_balance_args *bargs;
4949 struct btrfs_balance_control *bctl;
4950 bool need_unlock; /* for mut. excl. ops lock */
4953 if (!capable(CAP_SYS_ADMIN))
4956 ret = mnt_want_write_file(file);
4961 if (!test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
4962 mutex_lock(&fs_info->balance_mutex);
4968 * mut. excl. ops lock is locked. Three possibilities:
4969 * (1) some other op is running
4970 * (2) balance is running
4971 * (3) balance is paused -- special case (think resume)
4973 mutex_lock(&fs_info->balance_mutex);
4974 if (fs_info->balance_ctl) {
4975 /* this is either (2) or (3) */
4976 if (!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4977 mutex_unlock(&fs_info->balance_mutex);
4979 * Lock released to allow other waiters to continue,
4980 * we'll reexamine the status again.
4982 mutex_lock(&fs_info->balance_mutex);
4984 if (fs_info->balance_ctl &&
4985 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4987 need_unlock = false;
4991 mutex_unlock(&fs_info->balance_mutex);
4995 mutex_unlock(&fs_info->balance_mutex);
5001 mutex_unlock(&fs_info->balance_mutex);
5002 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
5007 BUG_ON(!test_bit(BTRFS_FS_EXCL_OP, &fs_info->flags));
5010 bargs = memdup_user(arg, sizeof(*bargs));
5011 if (IS_ERR(bargs)) {
5012 ret = PTR_ERR(bargs);
5016 if (bargs->flags & BTRFS_BALANCE_RESUME) {
5017 if (!fs_info->balance_ctl) {
5022 bctl = fs_info->balance_ctl;
5023 spin_lock(&fs_info->balance_lock);
5024 bctl->flags |= BTRFS_BALANCE_RESUME;
5025 spin_unlock(&fs_info->balance_lock);
5033 if (fs_info->balance_ctl) {
5038 bctl = kzalloc(sizeof(*bctl), GFP_KERNEL);
5045 memcpy(&bctl->data, &bargs->data, sizeof(bctl->data));
5046 memcpy(&bctl->meta, &bargs->meta, sizeof(bctl->meta));
5047 memcpy(&bctl->sys, &bargs->sys, sizeof(bctl->sys));
5049 bctl->flags = bargs->flags;
5051 /* balance everything - no filters */
5052 bctl->flags |= BTRFS_BALANCE_TYPE_MASK;
5055 if (bctl->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) {
5062 * Ownership of bctl and filesystem flag BTRFS_FS_EXCL_OP goes to
5063 * btrfs_balance. bctl is freed in reset_balance_state, or, if
5064 * restriper was paused all the way until unmount, in free_fs_info.
5065 * The flag should be cleared after reset_balance_state.
5067 need_unlock = false;
5069 ret = btrfs_balance(fs_info, bctl, bargs);
5073 if (copy_to_user(arg, bargs, sizeof(*bargs)))
5082 mutex_unlock(&fs_info->balance_mutex);
5084 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
5086 mnt_drop_write_file(file);
5090 static long btrfs_ioctl_balance_ctl(struct btrfs_fs_info *fs_info, int cmd)
5092 if (!capable(CAP_SYS_ADMIN))
5096 case BTRFS_BALANCE_CTL_PAUSE:
5097 return btrfs_pause_balance(fs_info);
5098 case BTRFS_BALANCE_CTL_CANCEL:
5099 return btrfs_cancel_balance(fs_info);
5105 static long btrfs_ioctl_balance_progress(struct btrfs_fs_info *fs_info,
5108 struct btrfs_ioctl_balance_args *bargs;
5111 if (!capable(CAP_SYS_ADMIN))
5114 mutex_lock(&fs_info->balance_mutex);
5115 if (!fs_info->balance_ctl) {
5120 bargs = kzalloc(sizeof(*bargs), GFP_KERNEL);
5126 btrfs_update_ioctl_balance_args(fs_info, bargs);
5128 if (copy_to_user(arg, bargs, sizeof(*bargs)))
5133 mutex_unlock(&fs_info->balance_mutex);
5137 static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
5139 struct inode *inode = file_inode(file);
5140 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5141 struct btrfs_ioctl_quota_ctl_args *sa;
5144 if (!capable(CAP_SYS_ADMIN))
5147 ret = mnt_want_write_file(file);
5151 sa = memdup_user(arg, sizeof(*sa));
5157 down_write(&fs_info->subvol_sem);
5160 case BTRFS_QUOTA_CTL_ENABLE:
5161 ret = btrfs_quota_enable(fs_info);
5163 case BTRFS_QUOTA_CTL_DISABLE:
5164 ret = btrfs_quota_disable(fs_info);
5172 up_write(&fs_info->subvol_sem);
5174 mnt_drop_write_file(file);
5178 static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
5180 struct inode *inode = file_inode(file);
5181 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5182 struct btrfs_root *root = BTRFS_I(inode)->root;
5183 struct btrfs_ioctl_qgroup_assign_args *sa;
5184 struct btrfs_trans_handle *trans;
5188 if (!capable(CAP_SYS_ADMIN))
5191 ret = mnt_want_write_file(file);
5195 sa = memdup_user(arg, sizeof(*sa));
5201 trans = btrfs_join_transaction(root);
5202 if (IS_ERR(trans)) {
5203 ret = PTR_ERR(trans);
5208 ret = btrfs_add_qgroup_relation(trans, sa->src, sa->dst);
5210 ret = btrfs_del_qgroup_relation(trans, sa->src, sa->dst);
5213 /* update qgroup status and info */
5214 err = btrfs_run_qgroups(trans);
5216 btrfs_handle_fs_error(fs_info, err,
5217 "failed to update qgroup status and info");
5218 err = btrfs_end_transaction(trans);
5225 mnt_drop_write_file(file);
5229 static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
5231 struct inode *inode = file_inode(file);
5232 struct btrfs_root *root = BTRFS_I(inode)->root;
5233 struct btrfs_ioctl_qgroup_create_args *sa;
5234 struct btrfs_trans_handle *trans;
5238 if (!capable(CAP_SYS_ADMIN))
5241 ret = mnt_want_write_file(file);
5245 sa = memdup_user(arg, sizeof(*sa));
5251 if (!sa->qgroupid) {
5256 trans = btrfs_join_transaction(root);
5257 if (IS_ERR(trans)) {
5258 ret = PTR_ERR(trans);
5263 ret = btrfs_create_qgroup(trans, sa->qgroupid);
5265 ret = btrfs_remove_qgroup(trans, sa->qgroupid);
5268 err = btrfs_end_transaction(trans);
5275 mnt_drop_write_file(file);
5279 static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
5281 struct inode *inode = file_inode(file);
5282 struct btrfs_root *root = BTRFS_I(inode)->root;
5283 struct btrfs_ioctl_qgroup_limit_args *sa;
5284 struct btrfs_trans_handle *trans;
5289 if (!capable(CAP_SYS_ADMIN))
5292 ret = mnt_want_write_file(file);
5296 sa = memdup_user(arg, sizeof(*sa));
5302 trans = btrfs_join_transaction(root);
5303 if (IS_ERR(trans)) {
5304 ret = PTR_ERR(trans);
5308 qgroupid = sa->qgroupid;
5310 /* take the current subvol as qgroup */
5311 qgroupid = root->root_key.objectid;
5314 ret = btrfs_limit_qgroup(trans, qgroupid, &sa->lim);
5316 err = btrfs_end_transaction(trans);
5323 mnt_drop_write_file(file);
5327 static long btrfs_ioctl_quota_rescan(struct file *file, void __user *arg)
5329 struct inode *inode = file_inode(file);
5330 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5331 struct btrfs_ioctl_quota_rescan_args *qsa;
5334 if (!capable(CAP_SYS_ADMIN))
5337 ret = mnt_want_write_file(file);
5341 qsa = memdup_user(arg, sizeof(*qsa));
5352 ret = btrfs_qgroup_rescan(fs_info);
5357 mnt_drop_write_file(file);
5361 static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg)
5363 struct inode *inode = file_inode(file);
5364 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5365 struct btrfs_ioctl_quota_rescan_args *qsa;
5368 if (!capable(CAP_SYS_ADMIN))
5371 qsa = kzalloc(sizeof(*qsa), GFP_KERNEL);
5375 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
5377 qsa->progress = fs_info->qgroup_rescan_progress.objectid;
5380 if (copy_to_user(arg, qsa, sizeof(*qsa)))
5387 static long btrfs_ioctl_quota_rescan_wait(struct file *file, void __user *arg)
5389 struct inode *inode = file_inode(file);
5390 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5392 if (!capable(CAP_SYS_ADMIN))
5395 return btrfs_qgroup_wait_for_completion(fs_info, true);
5398 static long _btrfs_ioctl_set_received_subvol(struct file *file,
5399 struct btrfs_ioctl_received_subvol_args *sa)
5401 struct inode *inode = file_inode(file);
5402 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5403 struct btrfs_root *root = BTRFS_I(inode)->root;
5404 struct btrfs_root_item *root_item = &root->root_item;
5405 struct btrfs_trans_handle *trans;
5406 struct timespec64 ct = current_time(inode);
5408 int received_uuid_changed;
5410 if (!inode_owner_or_capable(inode))
5413 ret = mnt_want_write_file(file);
5417 down_write(&fs_info->subvol_sem);
5419 if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
5424 if (btrfs_root_readonly(root)) {
5431 * 2 - uuid items (received uuid + subvol uuid)
5433 trans = btrfs_start_transaction(root, 3);
5434 if (IS_ERR(trans)) {
5435 ret = PTR_ERR(trans);
5440 sa->rtransid = trans->transid;
5441 sa->rtime.sec = ct.tv_sec;
5442 sa->rtime.nsec = ct.tv_nsec;
5444 received_uuid_changed = memcmp(root_item->received_uuid, sa->uuid,
5446 if (received_uuid_changed &&
5447 !btrfs_is_empty_uuid(root_item->received_uuid)) {
5448 ret = btrfs_uuid_tree_remove(trans, root_item->received_uuid,
5449 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
5450 root->root_key.objectid);
5451 if (ret && ret != -ENOENT) {
5452 btrfs_abort_transaction(trans, ret);
5453 btrfs_end_transaction(trans);
5457 memcpy(root_item->received_uuid, sa->uuid, BTRFS_UUID_SIZE);
5458 btrfs_set_root_stransid(root_item, sa->stransid);
5459 btrfs_set_root_rtransid(root_item, sa->rtransid);
5460 btrfs_set_stack_timespec_sec(&root_item->stime, sa->stime.sec);
5461 btrfs_set_stack_timespec_nsec(&root_item->stime, sa->stime.nsec);
5462 btrfs_set_stack_timespec_sec(&root_item->rtime, sa->rtime.sec);
5463 btrfs_set_stack_timespec_nsec(&root_item->rtime, sa->rtime.nsec);
5465 ret = btrfs_update_root(trans, fs_info->tree_root,
5466 &root->root_key, &root->root_item);
5468 btrfs_end_transaction(trans);
5471 if (received_uuid_changed && !btrfs_is_empty_uuid(sa->uuid)) {
5472 ret = btrfs_uuid_tree_add(trans, sa->uuid,
5473 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
5474 root->root_key.objectid);
5475 if (ret < 0 && ret != -EEXIST) {
5476 btrfs_abort_transaction(trans, ret);
5477 btrfs_end_transaction(trans);
5481 ret = btrfs_commit_transaction(trans);
5483 up_write(&fs_info->subvol_sem);
5484 mnt_drop_write_file(file);
5489 static long btrfs_ioctl_set_received_subvol_32(struct file *file,
5492 struct btrfs_ioctl_received_subvol_args_32 *args32 = NULL;
5493 struct btrfs_ioctl_received_subvol_args *args64 = NULL;
5496 args32 = memdup_user(arg, sizeof(*args32));
5498 return PTR_ERR(args32);
5500 args64 = kmalloc(sizeof(*args64), GFP_KERNEL);
5506 memcpy(args64->uuid, args32->uuid, BTRFS_UUID_SIZE);
5507 args64->stransid = args32->stransid;
5508 args64->rtransid = args32->rtransid;
5509 args64->stime.sec = args32->stime.sec;
5510 args64->stime.nsec = args32->stime.nsec;
5511 args64->rtime.sec = args32->rtime.sec;
5512 args64->rtime.nsec = args32->rtime.nsec;
5513 args64->flags = args32->flags;
5515 ret = _btrfs_ioctl_set_received_subvol(file, args64);
5519 memcpy(args32->uuid, args64->uuid, BTRFS_UUID_SIZE);
5520 args32->stransid = args64->stransid;
5521 args32->rtransid = args64->rtransid;
5522 args32->stime.sec = args64->stime.sec;
5523 args32->stime.nsec = args64->stime.nsec;
5524 args32->rtime.sec = args64->rtime.sec;
5525 args32->rtime.nsec = args64->rtime.nsec;
5526 args32->flags = args64->flags;
5528 ret = copy_to_user(arg, args32, sizeof(*args32));
5539 static long btrfs_ioctl_set_received_subvol(struct file *file,
5542 struct btrfs_ioctl_received_subvol_args *sa = NULL;
5545 sa = memdup_user(arg, sizeof(*sa));
5549 ret = _btrfs_ioctl_set_received_subvol(file, sa);
5554 ret = copy_to_user(arg, sa, sizeof(*sa));
5563 static int btrfs_ioctl_get_fslabel(struct file *file, void __user *arg)
5565 struct inode *inode = file_inode(file);
5566 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5569 char label[BTRFS_LABEL_SIZE];
5571 spin_lock(&fs_info->super_lock);
5572 memcpy(label, fs_info->super_copy->label, BTRFS_LABEL_SIZE);
5573 spin_unlock(&fs_info->super_lock);
5575 len = strnlen(label, BTRFS_LABEL_SIZE);
5577 if (len == BTRFS_LABEL_SIZE) {
5579 "label is too long, return the first %zu bytes",
5583 ret = copy_to_user(arg, label, len);
5585 return ret ? -EFAULT : 0;
5588 static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
5590 struct inode *inode = file_inode(file);
5591 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5592 struct btrfs_root *root = BTRFS_I(inode)->root;
5593 struct btrfs_super_block *super_block = fs_info->super_copy;
5594 struct btrfs_trans_handle *trans;
5595 char label[BTRFS_LABEL_SIZE];
5598 if (!capable(CAP_SYS_ADMIN))
5601 if (copy_from_user(label, arg, sizeof(label)))
5604 if (strnlen(label, BTRFS_LABEL_SIZE) == BTRFS_LABEL_SIZE) {
5606 "unable to set label with more than %d bytes",
5607 BTRFS_LABEL_SIZE - 1);
5611 ret = mnt_want_write_file(file);
5615 trans = btrfs_start_transaction(root, 0);
5616 if (IS_ERR(trans)) {
5617 ret = PTR_ERR(trans);
5621 spin_lock(&fs_info->super_lock);
5622 strcpy(super_block->label, label);
5623 spin_unlock(&fs_info->super_lock);
5624 ret = btrfs_commit_transaction(trans);
5627 mnt_drop_write_file(file);
5631 #define INIT_FEATURE_FLAGS(suffix) \
5632 { .compat_flags = BTRFS_FEATURE_COMPAT_##suffix, \
5633 .compat_ro_flags = BTRFS_FEATURE_COMPAT_RO_##suffix, \
5634 .incompat_flags = BTRFS_FEATURE_INCOMPAT_##suffix }
5636 int btrfs_ioctl_get_supported_features(void __user *arg)
5638 static const struct btrfs_ioctl_feature_flags features[3] = {
5639 INIT_FEATURE_FLAGS(SUPP),
5640 INIT_FEATURE_FLAGS(SAFE_SET),
5641 INIT_FEATURE_FLAGS(SAFE_CLEAR)
5644 if (copy_to_user(arg, &features, sizeof(features)))
5650 static int btrfs_ioctl_get_features(struct file *file, void __user *arg)
5652 struct inode *inode = file_inode(file);
5653 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5654 struct btrfs_super_block *super_block = fs_info->super_copy;
5655 struct btrfs_ioctl_feature_flags features;
5657 features.compat_flags = btrfs_super_compat_flags(super_block);
5658 features.compat_ro_flags = btrfs_super_compat_ro_flags(super_block);
5659 features.incompat_flags = btrfs_super_incompat_flags(super_block);
5661 if (copy_to_user(arg, &features, sizeof(features)))
5667 static int check_feature_bits(struct btrfs_fs_info *fs_info,
5668 enum btrfs_feature_set set,
5669 u64 change_mask, u64 flags, u64 supported_flags,
5670 u64 safe_set, u64 safe_clear)
5672 const char *type = btrfs_feature_set_names[set];
5674 u64 disallowed, unsupported;
5675 u64 set_mask = flags & change_mask;
5676 u64 clear_mask = ~flags & change_mask;
5678 unsupported = set_mask & ~supported_flags;
5680 names = btrfs_printable_features(set, unsupported);
5683 "this kernel does not support the %s feature bit%s",
5684 names, strchr(names, ',') ? "s" : "");
5688 "this kernel does not support %s bits 0x%llx",
5693 disallowed = set_mask & ~safe_set;
5695 names = btrfs_printable_features(set, disallowed);
5698 "can't set the %s feature bit%s while mounted",
5699 names, strchr(names, ',') ? "s" : "");
5703 "can't set %s bits 0x%llx while mounted",
5708 disallowed = clear_mask & ~safe_clear;
5710 names = btrfs_printable_features(set, disallowed);
5713 "can't clear the %s feature bit%s while mounted",
5714 names, strchr(names, ',') ? "s" : "");
5718 "can't clear %s bits 0x%llx while mounted",
5726 #define check_feature(fs_info, change_mask, flags, mask_base) \
5727 check_feature_bits(fs_info, FEAT_##mask_base, change_mask, flags, \
5728 BTRFS_FEATURE_ ## mask_base ## _SUPP, \
5729 BTRFS_FEATURE_ ## mask_base ## _SAFE_SET, \
5730 BTRFS_FEATURE_ ## mask_base ## _SAFE_CLEAR)
5732 static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
5734 struct inode *inode = file_inode(file);
5735 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5736 struct btrfs_root *root = BTRFS_I(inode)->root;
5737 struct btrfs_super_block *super_block = fs_info->super_copy;
5738 struct btrfs_ioctl_feature_flags flags[2];
5739 struct btrfs_trans_handle *trans;
5743 if (!capable(CAP_SYS_ADMIN))
5746 if (copy_from_user(flags, arg, sizeof(flags)))
5750 if (!flags[0].compat_flags && !flags[0].compat_ro_flags &&
5751 !flags[0].incompat_flags)
5754 ret = check_feature(fs_info, flags[0].compat_flags,
5755 flags[1].compat_flags, COMPAT);
5759 ret = check_feature(fs_info, flags[0].compat_ro_flags,
5760 flags[1].compat_ro_flags, COMPAT_RO);
5764 ret = check_feature(fs_info, flags[0].incompat_flags,
5765 flags[1].incompat_flags, INCOMPAT);
5769 ret = mnt_want_write_file(file);
5773 trans = btrfs_start_transaction(root, 0);
5774 if (IS_ERR(trans)) {
5775 ret = PTR_ERR(trans);
5776 goto out_drop_write;
5779 spin_lock(&fs_info->super_lock);
5780 newflags = btrfs_super_compat_flags(super_block);
5781 newflags |= flags[0].compat_flags & flags[1].compat_flags;
5782 newflags &= ~(flags[0].compat_flags & ~flags[1].compat_flags);
5783 btrfs_set_super_compat_flags(super_block, newflags);
5785 newflags = btrfs_super_compat_ro_flags(super_block);
5786 newflags |= flags[0].compat_ro_flags & flags[1].compat_ro_flags;
5787 newflags &= ~(flags[0].compat_ro_flags & ~flags[1].compat_ro_flags);
5788 btrfs_set_super_compat_ro_flags(super_block, newflags);
5790 newflags = btrfs_super_incompat_flags(super_block);
5791 newflags |= flags[0].incompat_flags & flags[1].incompat_flags;
5792 newflags &= ~(flags[0].incompat_flags & ~flags[1].incompat_flags);
5793 btrfs_set_super_incompat_flags(super_block, newflags);
5794 spin_unlock(&fs_info->super_lock);
5796 ret = btrfs_commit_transaction(trans);
5798 mnt_drop_write_file(file);
5803 static int _btrfs_ioctl_send(struct file *file, void __user *argp, bool compat)
5805 struct btrfs_ioctl_send_args *arg;
5809 #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
5810 struct btrfs_ioctl_send_args_32 args32;
5812 ret = copy_from_user(&args32, argp, sizeof(args32));
5815 arg = kzalloc(sizeof(*arg), GFP_KERNEL);
5818 arg->send_fd = args32.send_fd;
5819 arg->clone_sources_count = args32.clone_sources_count;
5820 arg->clone_sources = compat_ptr(args32.clone_sources);
5821 arg->parent_root = args32.parent_root;
5822 arg->flags = args32.flags;
5823 memcpy(arg->reserved, args32.reserved,
5824 sizeof(args32.reserved));
5829 arg = memdup_user(argp, sizeof(*arg));
5831 return PTR_ERR(arg);
5833 ret = btrfs_ioctl_send(file, arg);
5838 long btrfs_ioctl(struct file *file, unsigned int
5839 cmd, unsigned long arg)
5841 struct inode *inode = file_inode(file);
5842 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5843 struct btrfs_root *root = BTRFS_I(inode)->root;
5844 void __user *argp = (void __user *)arg;
5847 case FS_IOC_GETFLAGS:
5848 return btrfs_ioctl_getflags(file, argp);
5849 case FS_IOC_SETFLAGS:
5850 return btrfs_ioctl_setflags(file, argp);
5851 case FS_IOC_GETVERSION:
5852 return btrfs_ioctl_getversion(file, argp);
5854 return btrfs_ioctl_fitrim(file, argp);
5855 case BTRFS_IOC_SNAP_CREATE:
5856 return btrfs_ioctl_snap_create(file, argp, 0);
5857 case BTRFS_IOC_SNAP_CREATE_V2:
5858 return btrfs_ioctl_snap_create_v2(file, argp, 0);
5859 case BTRFS_IOC_SUBVOL_CREATE:
5860 return btrfs_ioctl_snap_create(file, argp, 1);
5861 case BTRFS_IOC_SUBVOL_CREATE_V2:
5862 return btrfs_ioctl_snap_create_v2(file, argp, 1);
5863 case BTRFS_IOC_SNAP_DESTROY:
5864 return btrfs_ioctl_snap_destroy(file, argp);
5865 case BTRFS_IOC_SUBVOL_GETFLAGS:
5866 return btrfs_ioctl_subvol_getflags(file, argp);
5867 case BTRFS_IOC_SUBVOL_SETFLAGS:
5868 return btrfs_ioctl_subvol_setflags(file, argp);
5869 case BTRFS_IOC_DEFAULT_SUBVOL:
5870 return btrfs_ioctl_default_subvol(file, argp);
5871 case BTRFS_IOC_DEFRAG:
5872 return btrfs_ioctl_defrag(file, NULL);
5873 case BTRFS_IOC_DEFRAG_RANGE:
5874 return btrfs_ioctl_defrag(file, argp);
5875 case BTRFS_IOC_RESIZE:
5876 return btrfs_ioctl_resize(file, argp);
5877 case BTRFS_IOC_ADD_DEV:
5878 return btrfs_ioctl_add_dev(fs_info, argp);
5879 case BTRFS_IOC_RM_DEV:
5880 return btrfs_ioctl_rm_dev(file, argp);
5881 case BTRFS_IOC_RM_DEV_V2:
5882 return btrfs_ioctl_rm_dev_v2(file, argp);
5883 case BTRFS_IOC_FS_INFO:
5884 return btrfs_ioctl_fs_info(fs_info, argp);
5885 case BTRFS_IOC_DEV_INFO:
5886 return btrfs_ioctl_dev_info(fs_info, argp);
5887 case BTRFS_IOC_BALANCE:
5888 return btrfs_ioctl_balance(file, NULL);
5889 case BTRFS_IOC_TREE_SEARCH:
5890 return btrfs_ioctl_tree_search(file, argp);
5891 case BTRFS_IOC_TREE_SEARCH_V2:
5892 return btrfs_ioctl_tree_search_v2(file, argp);
5893 case BTRFS_IOC_INO_LOOKUP:
5894 return btrfs_ioctl_ino_lookup(file, argp);
5895 case BTRFS_IOC_INO_PATHS:
5896 return btrfs_ioctl_ino_to_path(root, argp);
5897 case BTRFS_IOC_LOGICAL_INO:
5898 return btrfs_ioctl_logical_to_ino(fs_info, argp, 1);
5899 case BTRFS_IOC_LOGICAL_INO_V2:
5900 return btrfs_ioctl_logical_to_ino(fs_info, argp, 2);
5901 case BTRFS_IOC_SPACE_INFO:
5902 return btrfs_ioctl_space_info(fs_info, argp);
5903 case BTRFS_IOC_SYNC: {
5906 ret = btrfs_start_delalloc_roots(fs_info, -1);
5909 ret = btrfs_sync_fs(inode->i_sb, 1);
5911 * The transaction thread may want to do more work,
5912 * namely it pokes the cleaner kthread that will start
5913 * processing uncleaned subvols.
5915 wake_up_process(fs_info->transaction_kthread);
5918 case BTRFS_IOC_START_SYNC:
5919 return btrfs_ioctl_start_sync(root, argp);
5920 case BTRFS_IOC_WAIT_SYNC:
5921 return btrfs_ioctl_wait_sync(fs_info, argp);
5922 case BTRFS_IOC_SCRUB:
5923 return btrfs_ioctl_scrub(file, argp);
5924 case BTRFS_IOC_SCRUB_CANCEL:
5925 return btrfs_ioctl_scrub_cancel(fs_info);
5926 case BTRFS_IOC_SCRUB_PROGRESS:
5927 return btrfs_ioctl_scrub_progress(fs_info, argp);
5928 case BTRFS_IOC_BALANCE_V2:
5929 return btrfs_ioctl_balance(file, argp);
5930 case BTRFS_IOC_BALANCE_CTL:
5931 return btrfs_ioctl_balance_ctl(fs_info, arg);
5932 case BTRFS_IOC_BALANCE_PROGRESS:
5933 return btrfs_ioctl_balance_progress(fs_info, argp);
5934 case BTRFS_IOC_SET_RECEIVED_SUBVOL:
5935 return btrfs_ioctl_set_received_subvol(file, argp);
5937 case BTRFS_IOC_SET_RECEIVED_SUBVOL_32:
5938 return btrfs_ioctl_set_received_subvol_32(file, argp);
5940 case BTRFS_IOC_SEND:
5941 return _btrfs_ioctl_send(file, argp, false);
5942 #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
5943 case BTRFS_IOC_SEND_32:
5944 return _btrfs_ioctl_send(file, argp, true);
5946 case BTRFS_IOC_GET_DEV_STATS:
5947 return btrfs_ioctl_get_dev_stats(fs_info, argp);
5948 case BTRFS_IOC_QUOTA_CTL:
5949 return btrfs_ioctl_quota_ctl(file, argp);
5950 case BTRFS_IOC_QGROUP_ASSIGN:
5951 return btrfs_ioctl_qgroup_assign(file, argp);
5952 case BTRFS_IOC_QGROUP_CREATE:
5953 return btrfs_ioctl_qgroup_create(file, argp);
5954 case BTRFS_IOC_QGROUP_LIMIT:
5955 return btrfs_ioctl_qgroup_limit(file, argp);
5956 case BTRFS_IOC_QUOTA_RESCAN:
5957 return btrfs_ioctl_quota_rescan(file, argp);
5958 case BTRFS_IOC_QUOTA_RESCAN_STATUS:
5959 return btrfs_ioctl_quota_rescan_status(file, argp);
5960 case BTRFS_IOC_QUOTA_RESCAN_WAIT:
5961 return btrfs_ioctl_quota_rescan_wait(file, argp);
5962 case BTRFS_IOC_DEV_REPLACE:
5963 return btrfs_ioctl_dev_replace(fs_info, argp);
5964 case BTRFS_IOC_GET_FSLABEL:
5965 return btrfs_ioctl_get_fslabel(file, argp);
5966 case BTRFS_IOC_SET_FSLABEL:
5967 return btrfs_ioctl_set_fslabel(file, argp);
5968 case BTRFS_IOC_GET_SUPPORTED_FEATURES:
5969 return btrfs_ioctl_get_supported_features(argp);
5970 case BTRFS_IOC_GET_FEATURES:
5971 return btrfs_ioctl_get_features(file, argp);
5972 case BTRFS_IOC_SET_FEATURES:
5973 return btrfs_ioctl_set_features(file, argp);
5974 case FS_IOC_FSGETXATTR:
5975 return btrfs_ioctl_fsgetxattr(file, argp);
5976 case FS_IOC_FSSETXATTR:
5977 return btrfs_ioctl_fssetxattr(file, argp);
5978 case BTRFS_IOC_GET_SUBVOL_INFO:
5979 return btrfs_ioctl_get_subvol_info(file, argp);
5980 case BTRFS_IOC_GET_SUBVOL_ROOTREF:
5981 return btrfs_ioctl_get_subvol_rootref(file, argp);
5982 case BTRFS_IOC_INO_LOOKUP_USER:
5983 return btrfs_ioctl_ino_lookup_user(file, argp);
5989 #ifdef CONFIG_COMPAT
5990 long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
5993 * These all access 32-bit values anyway so no further
5994 * handling is necessary.
5997 case FS_IOC32_GETFLAGS:
5998 cmd = FS_IOC_GETFLAGS;
6000 case FS_IOC32_SETFLAGS:
6001 cmd = FS_IOC_SETFLAGS;
6003 case FS_IOC32_GETVERSION:
6004 cmd = FS_IOC_GETVERSION;
6008 return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));