Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
[sfrench/cifs-2.6.git] / fs / btrfs / qgroup.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011 STRATO.  All rights reserved.
4  */
5
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h>
9 #include <linux/blkdev.h>
10 #include <linux/rbtree.h>
11 #include <linux/slab.h>
12 #include <linux/workqueue.h>
13 #include <linux/btrfs.h>
14
15 #include "ctree.h"
16 #include "transaction.h"
17 #include "disk-io.h"
18 #include "locking.h"
19 #include "ulist.h"
20 #include "backref.h"
21 #include "extent_io.h"
22 #include "qgroup.h"
23
24
25 /* TODO XXX FIXME
26  *  - subvol delete -> delete when ref goes to 0? delete limits also?
27  *  - reorganize keys
28  *  - compressed
29  *  - sync
30  *  - copy also limits on subvol creation
31  *  - limit
32  *  - caches fuer ulists
33  *  - performance benchmarks
34  *  - check all ioctl parameters
35  */
36
37 /*
38  * Helpers to access qgroup reservation
39  *
40  * Callers should ensure the lock context and type are valid
41  */
42
43 static u64 qgroup_rsv_total(const struct btrfs_qgroup *qgroup)
44 {
45         u64 ret = 0;
46         int i;
47
48         for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
49                 ret += qgroup->rsv.values[i];
50
51         return ret;
52 }
53
54 #ifdef CONFIG_BTRFS_DEBUG
55 static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type)
56 {
57         if (type == BTRFS_QGROUP_RSV_DATA)
58                 return "data";
59         if (type == BTRFS_QGROUP_RSV_META_PERTRANS)
60                 return "meta_pertrans";
61         if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
62                 return "meta_prealloc";
63         return NULL;
64 }
65 #endif
66
67 static void qgroup_rsv_add(struct btrfs_fs_info *fs_info,
68                            struct btrfs_qgroup *qgroup, u64 num_bytes,
69                            enum btrfs_qgroup_rsv_type type)
70 {
71         trace_qgroup_update_reserve(fs_info, qgroup, num_bytes, type);
72         qgroup->rsv.values[type] += num_bytes;
73 }
74
75 static void qgroup_rsv_release(struct btrfs_fs_info *fs_info,
76                                struct btrfs_qgroup *qgroup, u64 num_bytes,
77                                enum btrfs_qgroup_rsv_type type)
78 {
79         trace_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type);
80         if (qgroup->rsv.values[type] >= num_bytes) {
81                 qgroup->rsv.values[type] -= num_bytes;
82                 return;
83         }
84 #ifdef CONFIG_BTRFS_DEBUG
85         WARN_RATELIMIT(1,
86                 "qgroup %llu %s reserved space underflow, have %llu to free %llu",
87                 qgroup->qgroupid, qgroup_rsv_type_str(type),
88                 qgroup->rsv.values[type], num_bytes);
89 #endif
90         qgroup->rsv.values[type] = 0;
91 }
92
93 static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info *fs_info,
94                                      struct btrfs_qgroup *dest,
95                                      struct btrfs_qgroup *src)
96 {
97         int i;
98
99         for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
100                 qgroup_rsv_add(fs_info, dest, src->rsv.values[i], i);
101 }
102
103 static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info *fs_info,
104                                          struct btrfs_qgroup *dest,
105                                           struct btrfs_qgroup *src)
106 {
107         int i;
108
109         for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
110                 qgroup_rsv_release(fs_info, dest, src->rsv.values[i], i);
111 }
112
113 static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq,
114                                            int mod)
115 {
116         if (qg->old_refcnt < seq)
117                 qg->old_refcnt = seq;
118         qg->old_refcnt += mod;
119 }
120
121 static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq,
122                                            int mod)
123 {
124         if (qg->new_refcnt < seq)
125                 qg->new_refcnt = seq;
126         qg->new_refcnt += mod;
127 }
128
129 static inline u64 btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup *qg, u64 seq)
130 {
131         if (qg->old_refcnt < seq)
132                 return 0;
133         return qg->old_refcnt - seq;
134 }
135
136 static inline u64 btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup *qg, u64 seq)
137 {
138         if (qg->new_refcnt < seq)
139                 return 0;
140         return qg->new_refcnt - seq;
141 }
142
143 /*
144  * glue structure to represent the relations between qgroups.
145  */
146 struct btrfs_qgroup_list {
147         struct list_head next_group;
148         struct list_head next_member;
149         struct btrfs_qgroup *group;
150         struct btrfs_qgroup *member;
151 };
152
153 static inline u64 qgroup_to_aux(struct btrfs_qgroup *qg)
154 {
155         return (u64)(uintptr_t)qg;
156 }
157
158 static inline struct btrfs_qgroup* unode_aux_to_qgroup(struct ulist_node *n)
159 {
160         return (struct btrfs_qgroup *)(uintptr_t)n->aux;
161 }
162
163 static int
164 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
165                    int init_flags);
166 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
167
168 /* must be called with qgroup_ioctl_lock held */
169 static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
170                                            u64 qgroupid)
171 {
172         struct rb_node *n = fs_info->qgroup_tree.rb_node;
173         struct btrfs_qgroup *qgroup;
174
175         while (n) {
176                 qgroup = rb_entry(n, struct btrfs_qgroup, node);
177                 if (qgroup->qgroupid < qgroupid)
178                         n = n->rb_left;
179                 else if (qgroup->qgroupid > qgroupid)
180                         n = n->rb_right;
181                 else
182                         return qgroup;
183         }
184         return NULL;
185 }
186
187 /* must be called with qgroup_lock held */
188 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
189                                           u64 qgroupid)
190 {
191         struct rb_node **p = &fs_info->qgroup_tree.rb_node;
192         struct rb_node *parent = NULL;
193         struct btrfs_qgroup *qgroup;
194
195         while (*p) {
196                 parent = *p;
197                 qgroup = rb_entry(parent, struct btrfs_qgroup, node);
198
199                 if (qgroup->qgroupid < qgroupid)
200                         p = &(*p)->rb_left;
201                 else if (qgroup->qgroupid > qgroupid)
202                         p = &(*p)->rb_right;
203                 else
204                         return qgroup;
205         }
206
207         qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
208         if (!qgroup)
209                 return ERR_PTR(-ENOMEM);
210
211         qgroup->qgroupid = qgroupid;
212         INIT_LIST_HEAD(&qgroup->groups);
213         INIT_LIST_HEAD(&qgroup->members);
214         INIT_LIST_HEAD(&qgroup->dirty);
215
216         rb_link_node(&qgroup->node, parent, p);
217         rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
218
219         return qgroup;
220 }
221
222 static void __del_qgroup_rb(struct btrfs_qgroup *qgroup)
223 {
224         struct btrfs_qgroup_list *list;
225
226         list_del(&qgroup->dirty);
227         while (!list_empty(&qgroup->groups)) {
228                 list = list_first_entry(&qgroup->groups,
229                                         struct btrfs_qgroup_list, next_group);
230                 list_del(&list->next_group);
231                 list_del(&list->next_member);
232                 kfree(list);
233         }
234
235         while (!list_empty(&qgroup->members)) {
236                 list = list_first_entry(&qgroup->members,
237                                         struct btrfs_qgroup_list, next_member);
238                 list_del(&list->next_group);
239                 list_del(&list->next_member);
240                 kfree(list);
241         }
242         kfree(qgroup);
243 }
244
245 /* must be called with qgroup_lock held */
246 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
247 {
248         struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
249
250         if (!qgroup)
251                 return -ENOENT;
252
253         rb_erase(&qgroup->node, &fs_info->qgroup_tree);
254         __del_qgroup_rb(qgroup);
255         return 0;
256 }
257
258 /* must be called with qgroup_lock held */
259 static int add_relation_rb(struct btrfs_fs_info *fs_info,
260                            u64 memberid, u64 parentid)
261 {
262         struct btrfs_qgroup *member;
263         struct btrfs_qgroup *parent;
264         struct btrfs_qgroup_list *list;
265
266         member = find_qgroup_rb(fs_info, memberid);
267         parent = find_qgroup_rb(fs_info, parentid);
268         if (!member || !parent)
269                 return -ENOENT;
270
271         list = kzalloc(sizeof(*list), GFP_ATOMIC);
272         if (!list)
273                 return -ENOMEM;
274
275         list->group = parent;
276         list->member = member;
277         list_add_tail(&list->next_group, &member->groups);
278         list_add_tail(&list->next_member, &parent->members);
279
280         return 0;
281 }
282
283 /* must be called with qgroup_lock held */
284 static int del_relation_rb(struct btrfs_fs_info *fs_info,
285                            u64 memberid, u64 parentid)
286 {
287         struct btrfs_qgroup *member;
288         struct btrfs_qgroup *parent;
289         struct btrfs_qgroup_list *list;
290
291         member = find_qgroup_rb(fs_info, memberid);
292         parent = find_qgroup_rb(fs_info, parentid);
293         if (!member || !parent)
294                 return -ENOENT;
295
296         list_for_each_entry(list, &member->groups, next_group) {
297                 if (list->group == parent) {
298                         list_del(&list->next_group);
299                         list_del(&list->next_member);
300                         kfree(list);
301                         return 0;
302                 }
303         }
304         return -ENOENT;
305 }
306
307 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
308 int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
309                                u64 rfer, u64 excl)
310 {
311         struct btrfs_qgroup *qgroup;
312
313         qgroup = find_qgroup_rb(fs_info, qgroupid);
314         if (!qgroup)
315                 return -EINVAL;
316         if (qgroup->rfer != rfer || qgroup->excl != excl)
317                 return -EINVAL;
318         return 0;
319 }
320 #endif
321
322 /*
323  * The full config is read in one go, only called from open_ctree()
324  * It doesn't use any locking, as at this point we're still single-threaded
325  */
326 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
327 {
328         struct btrfs_key key;
329         struct btrfs_key found_key;
330         struct btrfs_root *quota_root = fs_info->quota_root;
331         struct btrfs_path *path = NULL;
332         struct extent_buffer *l;
333         int slot;
334         int ret = 0;
335         u64 flags = 0;
336         u64 rescan_progress = 0;
337
338         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
339                 return 0;
340
341         fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
342         if (!fs_info->qgroup_ulist) {
343                 ret = -ENOMEM;
344                 goto out;
345         }
346
347         path = btrfs_alloc_path();
348         if (!path) {
349                 ret = -ENOMEM;
350                 goto out;
351         }
352
353         /* default this to quota off, in case no status key is found */
354         fs_info->qgroup_flags = 0;
355
356         /*
357          * pass 1: read status, all qgroup infos and limits
358          */
359         key.objectid = 0;
360         key.type = 0;
361         key.offset = 0;
362         ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
363         if (ret)
364                 goto out;
365
366         while (1) {
367                 struct btrfs_qgroup *qgroup;
368
369                 slot = path->slots[0];
370                 l = path->nodes[0];
371                 btrfs_item_key_to_cpu(l, &found_key, slot);
372
373                 if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
374                         struct btrfs_qgroup_status_item *ptr;
375
376                         ptr = btrfs_item_ptr(l, slot,
377                                              struct btrfs_qgroup_status_item);
378
379                         if (btrfs_qgroup_status_version(l, ptr) !=
380                             BTRFS_QGROUP_STATUS_VERSION) {
381                                 btrfs_err(fs_info,
382                                  "old qgroup version, quota disabled");
383                                 goto out;
384                         }
385                         if (btrfs_qgroup_status_generation(l, ptr) !=
386                             fs_info->generation) {
387                                 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
388                                 btrfs_err(fs_info,
389                                         "qgroup generation mismatch, marked as inconsistent");
390                         }
391                         fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
392                                                                           ptr);
393                         rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
394                         goto next1;
395                 }
396
397                 if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
398                     found_key.type != BTRFS_QGROUP_LIMIT_KEY)
399                         goto next1;
400
401                 qgroup = find_qgroup_rb(fs_info, found_key.offset);
402                 if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
403                     (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
404                         btrfs_err(fs_info, "inconsistent qgroup config");
405                         flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
406                 }
407                 if (!qgroup) {
408                         qgroup = add_qgroup_rb(fs_info, found_key.offset);
409                         if (IS_ERR(qgroup)) {
410                                 ret = PTR_ERR(qgroup);
411                                 goto out;
412                         }
413                 }
414                 switch (found_key.type) {
415                 case BTRFS_QGROUP_INFO_KEY: {
416                         struct btrfs_qgroup_info_item *ptr;
417
418                         ptr = btrfs_item_ptr(l, slot,
419                                              struct btrfs_qgroup_info_item);
420                         qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
421                         qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
422                         qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
423                         qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
424                         /* generation currently unused */
425                         break;
426                 }
427                 case BTRFS_QGROUP_LIMIT_KEY: {
428                         struct btrfs_qgroup_limit_item *ptr;
429
430                         ptr = btrfs_item_ptr(l, slot,
431                                              struct btrfs_qgroup_limit_item);
432                         qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
433                         qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
434                         qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
435                         qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
436                         qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
437                         break;
438                 }
439                 }
440 next1:
441                 ret = btrfs_next_item(quota_root, path);
442                 if (ret < 0)
443                         goto out;
444                 if (ret)
445                         break;
446         }
447         btrfs_release_path(path);
448
449         /*
450          * pass 2: read all qgroup relations
451          */
452         key.objectid = 0;
453         key.type = BTRFS_QGROUP_RELATION_KEY;
454         key.offset = 0;
455         ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
456         if (ret)
457                 goto out;
458         while (1) {
459                 slot = path->slots[0];
460                 l = path->nodes[0];
461                 btrfs_item_key_to_cpu(l, &found_key, slot);
462
463                 if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
464                         goto next2;
465
466                 if (found_key.objectid > found_key.offset) {
467                         /* parent <- member, not needed to build config */
468                         /* FIXME should we omit the key completely? */
469                         goto next2;
470                 }
471
472                 ret = add_relation_rb(fs_info, found_key.objectid,
473                                       found_key.offset);
474                 if (ret == -ENOENT) {
475                         btrfs_warn(fs_info,
476                                 "orphan qgroup relation 0x%llx->0x%llx",
477                                 found_key.objectid, found_key.offset);
478                         ret = 0;        /* ignore the error */
479                 }
480                 if (ret)
481                         goto out;
482 next2:
483                 ret = btrfs_next_item(quota_root, path);
484                 if (ret < 0)
485                         goto out;
486                 if (ret)
487                         break;
488         }
489 out:
490         fs_info->qgroup_flags |= flags;
491         if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
492                 clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
493         else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
494                  ret >= 0)
495                 ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
496         btrfs_free_path(path);
497
498         if (ret < 0) {
499                 ulist_free(fs_info->qgroup_ulist);
500                 fs_info->qgroup_ulist = NULL;
501                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
502         }
503
504         return ret < 0 ? ret : 0;
505 }
506
507 /*
508  * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
509  * first two are in single-threaded paths.And for the third one, we have set
510  * quota_root to be null with qgroup_lock held before, so it is safe to clean
511  * up the in-memory structures without qgroup_lock held.
512  */
513 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
514 {
515         struct rb_node *n;
516         struct btrfs_qgroup *qgroup;
517
518         while ((n = rb_first(&fs_info->qgroup_tree))) {
519                 qgroup = rb_entry(n, struct btrfs_qgroup, node);
520                 rb_erase(n, &fs_info->qgroup_tree);
521                 __del_qgroup_rb(qgroup);
522         }
523         /*
524          * we call btrfs_free_qgroup_config() when umounting
525          * filesystem and disabling quota, so we set qgroup_ulist
526          * to be null here to avoid double free.
527          */
528         ulist_free(fs_info->qgroup_ulist);
529         fs_info->qgroup_ulist = NULL;
530 }
531
532 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans,
533                                     struct btrfs_root *quota_root,
534                                     u64 src, u64 dst)
535 {
536         int ret;
537         struct btrfs_path *path;
538         struct btrfs_key key;
539
540         path = btrfs_alloc_path();
541         if (!path)
542                 return -ENOMEM;
543
544         key.objectid = src;
545         key.type = BTRFS_QGROUP_RELATION_KEY;
546         key.offset = dst;
547
548         ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
549
550         btrfs_mark_buffer_dirty(path->nodes[0]);
551
552         btrfs_free_path(path);
553         return ret;
554 }
555
556 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans,
557                                     struct btrfs_root *quota_root,
558                                     u64 src, u64 dst)
559 {
560         int ret;
561         struct btrfs_path *path;
562         struct btrfs_key key;
563
564         path = btrfs_alloc_path();
565         if (!path)
566                 return -ENOMEM;
567
568         key.objectid = src;
569         key.type = BTRFS_QGROUP_RELATION_KEY;
570         key.offset = dst;
571
572         ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
573         if (ret < 0)
574                 goto out;
575
576         if (ret > 0) {
577                 ret = -ENOENT;
578                 goto out;
579         }
580
581         ret = btrfs_del_item(trans, quota_root, path);
582 out:
583         btrfs_free_path(path);
584         return ret;
585 }
586
587 static int add_qgroup_item(struct btrfs_trans_handle *trans,
588                            struct btrfs_root *quota_root, u64 qgroupid)
589 {
590         int ret;
591         struct btrfs_path *path;
592         struct btrfs_qgroup_info_item *qgroup_info;
593         struct btrfs_qgroup_limit_item *qgroup_limit;
594         struct extent_buffer *leaf;
595         struct btrfs_key key;
596
597         if (btrfs_is_testing(quota_root->fs_info))
598                 return 0;
599
600         path = btrfs_alloc_path();
601         if (!path)
602                 return -ENOMEM;
603
604         key.objectid = 0;
605         key.type = BTRFS_QGROUP_INFO_KEY;
606         key.offset = qgroupid;
607
608         /*
609          * Avoid a transaction abort by catching -EEXIST here. In that
610          * case, we proceed by re-initializing the existing structure
611          * on disk.
612          */
613
614         ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
615                                       sizeof(*qgroup_info));
616         if (ret && ret != -EEXIST)
617                 goto out;
618
619         leaf = path->nodes[0];
620         qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
621                                  struct btrfs_qgroup_info_item);
622         btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
623         btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
624         btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
625         btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
626         btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
627
628         btrfs_mark_buffer_dirty(leaf);
629
630         btrfs_release_path(path);
631
632         key.type = BTRFS_QGROUP_LIMIT_KEY;
633         ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
634                                       sizeof(*qgroup_limit));
635         if (ret && ret != -EEXIST)
636                 goto out;
637
638         leaf = path->nodes[0];
639         qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
640                                   struct btrfs_qgroup_limit_item);
641         btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
642         btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
643         btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
644         btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
645         btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
646
647         btrfs_mark_buffer_dirty(leaf);
648
649         ret = 0;
650 out:
651         btrfs_free_path(path);
652         return ret;
653 }
654
655 static int del_qgroup_item(struct btrfs_trans_handle *trans,
656                            struct btrfs_root *quota_root, u64 qgroupid)
657 {
658         int ret;
659         struct btrfs_path *path;
660         struct btrfs_key key;
661
662         path = btrfs_alloc_path();
663         if (!path)
664                 return -ENOMEM;
665
666         key.objectid = 0;
667         key.type = BTRFS_QGROUP_INFO_KEY;
668         key.offset = qgroupid;
669         ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
670         if (ret < 0)
671                 goto out;
672
673         if (ret > 0) {
674                 ret = -ENOENT;
675                 goto out;
676         }
677
678         ret = btrfs_del_item(trans, quota_root, path);
679         if (ret)
680                 goto out;
681
682         btrfs_release_path(path);
683
684         key.type = BTRFS_QGROUP_LIMIT_KEY;
685         ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
686         if (ret < 0)
687                 goto out;
688
689         if (ret > 0) {
690                 ret = -ENOENT;
691                 goto out;
692         }
693
694         ret = btrfs_del_item(trans, quota_root, path);
695
696 out:
697         btrfs_free_path(path);
698         return ret;
699 }
700
701 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
702                                     struct btrfs_root *root,
703                                     struct btrfs_qgroup *qgroup)
704 {
705         struct btrfs_path *path;
706         struct btrfs_key key;
707         struct extent_buffer *l;
708         struct btrfs_qgroup_limit_item *qgroup_limit;
709         int ret;
710         int slot;
711
712         key.objectid = 0;
713         key.type = BTRFS_QGROUP_LIMIT_KEY;
714         key.offset = qgroup->qgroupid;
715
716         path = btrfs_alloc_path();
717         if (!path)
718                 return -ENOMEM;
719
720         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
721         if (ret > 0)
722                 ret = -ENOENT;
723
724         if (ret)
725                 goto out;
726
727         l = path->nodes[0];
728         slot = path->slots[0];
729         qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item);
730         btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags);
731         btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer);
732         btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl);
733         btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
734         btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
735
736         btrfs_mark_buffer_dirty(l);
737
738 out:
739         btrfs_free_path(path);
740         return ret;
741 }
742
743 static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
744                                    struct btrfs_root *root,
745                                    struct btrfs_qgroup *qgroup)
746 {
747         struct btrfs_path *path;
748         struct btrfs_key key;
749         struct extent_buffer *l;
750         struct btrfs_qgroup_info_item *qgroup_info;
751         int ret;
752         int slot;
753
754         if (btrfs_is_testing(root->fs_info))
755                 return 0;
756
757         key.objectid = 0;
758         key.type = BTRFS_QGROUP_INFO_KEY;
759         key.offset = qgroup->qgroupid;
760
761         path = btrfs_alloc_path();
762         if (!path)
763                 return -ENOMEM;
764
765         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
766         if (ret > 0)
767                 ret = -ENOENT;
768
769         if (ret)
770                 goto out;
771
772         l = path->nodes[0];
773         slot = path->slots[0];
774         qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item);
775         btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
776         btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
777         btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
778         btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
779         btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
780
781         btrfs_mark_buffer_dirty(l);
782
783 out:
784         btrfs_free_path(path);
785         return ret;
786 }
787
788 static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
789                                      struct btrfs_fs_info *fs_info,
790                                     struct btrfs_root *root)
791 {
792         struct btrfs_path *path;
793         struct btrfs_key key;
794         struct extent_buffer *l;
795         struct btrfs_qgroup_status_item *ptr;
796         int ret;
797         int slot;
798
799         key.objectid = 0;
800         key.type = BTRFS_QGROUP_STATUS_KEY;
801         key.offset = 0;
802
803         path = btrfs_alloc_path();
804         if (!path)
805                 return -ENOMEM;
806
807         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
808         if (ret > 0)
809                 ret = -ENOENT;
810
811         if (ret)
812                 goto out;
813
814         l = path->nodes[0];
815         slot = path->slots[0];
816         ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
817         btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags);
818         btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
819         btrfs_set_qgroup_status_rescan(l, ptr,
820                                 fs_info->qgroup_rescan_progress.objectid);
821
822         btrfs_mark_buffer_dirty(l);
823
824 out:
825         btrfs_free_path(path);
826         return ret;
827 }
828
829 /*
830  * called with qgroup_lock held
831  */
832 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
833                                   struct btrfs_root *root)
834 {
835         struct btrfs_path *path;
836         struct btrfs_key key;
837         struct extent_buffer *leaf = NULL;
838         int ret;
839         int nr = 0;
840
841         path = btrfs_alloc_path();
842         if (!path)
843                 return -ENOMEM;
844
845         path->leave_spinning = 1;
846
847         key.objectid = 0;
848         key.offset = 0;
849         key.type = 0;
850
851         while (1) {
852                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
853                 if (ret < 0)
854                         goto out;
855                 leaf = path->nodes[0];
856                 nr = btrfs_header_nritems(leaf);
857                 if (!nr)
858                         break;
859                 /*
860                  * delete the leaf one by one
861                  * since the whole tree is going
862                  * to be deleted.
863                  */
864                 path->slots[0] = 0;
865                 ret = btrfs_del_items(trans, root, path, 0, nr);
866                 if (ret)
867                         goto out;
868
869                 btrfs_release_path(path);
870         }
871         ret = 0;
872 out:
873         btrfs_free_path(path);
874         return ret;
875 }
876
877 int btrfs_quota_enable(struct btrfs_trans_handle *trans,
878                        struct btrfs_fs_info *fs_info)
879 {
880         struct btrfs_root *quota_root;
881         struct btrfs_root *tree_root = fs_info->tree_root;
882         struct btrfs_path *path = NULL;
883         struct btrfs_qgroup_status_item *ptr;
884         struct extent_buffer *leaf;
885         struct btrfs_key key;
886         struct btrfs_key found_key;
887         struct btrfs_qgroup *qgroup = NULL;
888         int ret = 0;
889         int slot;
890
891         mutex_lock(&fs_info->qgroup_ioctl_lock);
892         if (fs_info->quota_root)
893                 goto out;
894
895         fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
896         if (!fs_info->qgroup_ulist) {
897                 ret = -ENOMEM;
898                 goto out;
899         }
900
901         /*
902          * initially create the quota tree
903          */
904         quota_root = btrfs_create_tree(trans, fs_info,
905                                        BTRFS_QUOTA_TREE_OBJECTID);
906         if (IS_ERR(quota_root)) {
907                 ret =  PTR_ERR(quota_root);
908                 goto out;
909         }
910
911         path = btrfs_alloc_path();
912         if (!path) {
913                 ret = -ENOMEM;
914                 goto out_free_root;
915         }
916
917         key.objectid = 0;
918         key.type = BTRFS_QGROUP_STATUS_KEY;
919         key.offset = 0;
920
921         ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
922                                       sizeof(*ptr));
923         if (ret)
924                 goto out_free_path;
925
926         leaf = path->nodes[0];
927         ptr = btrfs_item_ptr(leaf, path->slots[0],
928                                  struct btrfs_qgroup_status_item);
929         btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
930         btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
931         fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
932                                 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
933         btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags);
934         btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
935
936         btrfs_mark_buffer_dirty(leaf);
937
938         key.objectid = 0;
939         key.type = BTRFS_ROOT_REF_KEY;
940         key.offset = 0;
941
942         btrfs_release_path(path);
943         ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
944         if (ret > 0)
945                 goto out_add_root;
946         if (ret < 0)
947                 goto out_free_path;
948
949
950         while (1) {
951                 slot = path->slots[0];
952                 leaf = path->nodes[0];
953                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
954
955                 if (found_key.type == BTRFS_ROOT_REF_KEY) {
956                         ret = add_qgroup_item(trans, quota_root,
957                                               found_key.offset);
958                         if (ret)
959                                 goto out_free_path;
960
961                         qgroup = add_qgroup_rb(fs_info, found_key.offset);
962                         if (IS_ERR(qgroup)) {
963                                 ret = PTR_ERR(qgroup);
964                                 goto out_free_path;
965                         }
966                 }
967                 ret = btrfs_next_item(tree_root, path);
968                 if (ret < 0)
969                         goto out_free_path;
970                 if (ret)
971                         break;
972         }
973
974 out_add_root:
975         btrfs_release_path(path);
976         ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
977         if (ret)
978                 goto out_free_path;
979
980         qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
981         if (IS_ERR(qgroup)) {
982                 ret = PTR_ERR(qgroup);
983                 goto out_free_path;
984         }
985         spin_lock(&fs_info->qgroup_lock);
986         fs_info->quota_root = quota_root;
987         set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
988         spin_unlock(&fs_info->qgroup_lock);
989         ret = qgroup_rescan_init(fs_info, 0, 1);
990         if (!ret) {
991                 qgroup_rescan_zero_tracking(fs_info);
992                 btrfs_queue_work(fs_info->qgroup_rescan_workers,
993                                  &fs_info->qgroup_rescan_work);
994         }
995
996 out_free_path:
997         btrfs_free_path(path);
998 out_free_root:
999         if (ret) {
1000                 free_extent_buffer(quota_root->node);
1001                 free_extent_buffer(quota_root->commit_root);
1002                 kfree(quota_root);
1003         }
1004 out:
1005         if (ret) {
1006                 ulist_free(fs_info->qgroup_ulist);
1007                 fs_info->qgroup_ulist = NULL;
1008         }
1009         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1010         return ret;
1011 }
1012
1013 int btrfs_quota_disable(struct btrfs_trans_handle *trans,
1014                         struct btrfs_fs_info *fs_info)
1015 {
1016         struct btrfs_root *quota_root;
1017         int ret = 0;
1018
1019         mutex_lock(&fs_info->qgroup_ioctl_lock);
1020         if (!fs_info->quota_root)
1021                 goto out;
1022         clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1023         btrfs_qgroup_wait_for_completion(fs_info, false);
1024         spin_lock(&fs_info->qgroup_lock);
1025         quota_root = fs_info->quota_root;
1026         fs_info->quota_root = NULL;
1027         fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
1028         spin_unlock(&fs_info->qgroup_lock);
1029
1030         btrfs_free_qgroup_config(fs_info);
1031
1032         ret = btrfs_clean_quota_tree(trans, quota_root);
1033         if (ret)
1034                 goto out;
1035
1036         ret = btrfs_del_root(trans, fs_info, &quota_root->root_key);
1037         if (ret)
1038                 goto out;
1039
1040         list_del(&quota_root->dirty_list);
1041
1042         btrfs_tree_lock(quota_root->node);
1043         clean_tree_block(fs_info, quota_root->node);
1044         btrfs_tree_unlock(quota_root->node);
1045         btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
1046
1047         free_extent_buffer(quota_root->node);
1048         free_extent_buffer(quota_root->commit_root);
1049         kfree(quota_root);
1050 out:
1051         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1052         return ret;
1053 }
1054
1055 static void qgroup_dirty(struct btrfs_fs_info *fs_info,
1056                          struct btrfs_qgroup *qgroup)
1057 {
1058         if (list_empty(&qgroup->dirty))
1059                 list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
1060 }
1061
1062 /*
1063  * The easy accounting, we're updating qgroup relationship whose child qgroup
1064  * only has exclusive extents.
1065  *
1066  * In this case, all exclsuive extents will also be exlusive for parent, so
1067  * excl/rfer just get added/removed.
1068  *
1069  * So is qgroup reservation space, which should also be added/removed to
1070  * parent.
1071  * Or when child tries to release reservation space, parent will underflow its
1072  * reservation (for relationship adding case).
1073  *
1074  * Caller should hold fs_info->qgroup_lock.
1075  */
1076 static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
1077                                     struct ulist *tmp, u64 ref_root,
1078                                     struct btrfs_qgroup *src, int sign)
1079 {
1080         struct btrfs_qgroup *qgroup;
1081         struct btrfs_qgroup_list *glist;
1082         struct ulist_node *unode;
1083         struct ulist_iterator uiter;
1084         u64 num_bytes = src->excl;
1085         int ret = 0;
1086
1087         qgroup = find_qgroup_rb(fs_info, ref_root);
1088         if (!qgroup)
1089                 goto out;
1090
1091         qgroup->rfer += sign * num_bytes;
1092         qgroup->rfer_cmpr += sign * num_bytes;
1093
1094         WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1095         qgroup->excl += sign * num_bytes;
1096         qgroup->excl_cmpr += sign * num_bytes;
1097
1098         if (sign > 0)
1099                 qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
1100         else
1101                 qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
1102
1103         qgroup_dirty(fs_info, qgroup);
1104
1105         /* Get all of the parent groups that contain this qgroup */
1106         list_for_each_entry(glist, &qgroup->groups, next_group) {
1107                 ret = ulist_add(tmp, glist->group->qgroupid,
1108                                 qgroup_to_aux(glist->group), GFP_ATOMIC);
1109                 if (ret < 0)
1110                         goto out;
1111         }
1112
1113         /* Iterate all of the parents and adjust their reference counts */
1114         ULIST_ITER_INIT(&uiter);
1115         while ((unode = ulist_next(tmp, &uiter))) {
1116                 qgroup = unode_aux_to_qgroup(unode);
1117                 qgroup->rfer += sign * num_bytes;
1118                 qgroup->rfer_cmpr += sign * num_bytes;
1119                 WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1120                 qgroup->excl += sign * num_bytes;
1121                 if (sign > 0)
1122                         qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
1123                 else
1124                         qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
1125                 qgroup->excl_cmpr += sign * num_bytes;
1126                 qgroup_dirty(fs_info, qgroup);
1127
1128                 /* Add any parents of the parents */
1129                 list_for_each_entry(glist, &qgroup->groups, next_group) {
1130                         ret = ulist_add(tmp, glist->group->qgroupid,
1131                                         qgroup_to_aux(glist->group), GFP_ATOMIC);
1132                         if (ret < 0)
1133                                 goto out;
1134                 }
1135         }
1136         ret = 0;
1137 out:
1138         return ret;
1139 }
1140
1141
1142 /*
1143  * Quick path for updating qgroup with only excl refs.
1144  *
1145  * In that case, just update all parent will be enough.
1146  * Or we needs to do a full rescan.
1147  * Caller should also hold fs_info->qgroup_lock.
1148  *
1149  * Return 0 for quick update, return >0 for need to full rescan
1150  * and mark INCONSISTENT flag.
1151  * Return < 0 for other error.
1152  */
1153 static int quick_update_accounting(struct btrfs_fs_info *fs_info,
1154                                    struct ulist *tmp, u64 src, u64 dst,
1155                                    int sign)
1156 {
1157         struct btrfs_qgroup *qgroup;
1158         int ret = 1;
1159         int err = 0;
1160
1161         qgroup = find_qgroup_rb(fs_info, src);
1162         if (!qgroup)
1163                 goto out;
1164         if (qgroup->excl == qgroup->rfer) {
1165                 ret = 0;
1166                 err = __qgroup_excl_accounting(fs_info, tmp, dst,
1167                                                qgroup, sign);
1168                 if (err < 0) {
1169                         ret = err;
1170                         goto out;
1171                 }
1172         }
1173 out:
1174         if (ret)
1175                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1176         return ret;
1177 }
1178
1179 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
1180                               struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1181 {
1182         struct btrfs_root *quota_root;
1183         struct btrfs_qgroup *parent;
1184         struct btrfs_qgroup *member;
1185         struct btrfs_qgroup_list *list;
1186         struct ulist *tmp;
1187         int ret = 0;
1188
1189         /* Check the level of src and dst first */
1190         if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
1191                 return -EINVAL;
1192
1193         tmp = ulist_alloc(GFP_KERNEL);
1194         if (!tmp)
1195                 return -ENOMEM;
1196
1197         mutex_lock(&fs_info->qgroup_ioctl_lock);
1198         quota_root = fs_info->quota_root;
1199         if (!quota_root) {
1200                 ret = -EINVAL;
1201                 goto out;
1202         }
1203         member = find_qgroup_rb(fs_info, src);
1204         parent = find_qgroup_rb(fs_info, dst);
1205         if (!member || !parent) {
1206                 ret = -EINVAL;
1207                 goto out;
1208         }
1209
1210         /* check if such qgroup relation exist firstly */
1211         list_for_each_entry(list, &member->groups, next_group) {
1212                 if (list->group == parent) {
1213                         ret = -EEXIST;
1214                         goto out;
1215                 }
1216         }
1217
1218         ret = add_qgroup_relation_item(trans, quota_root, src, dst);
1219         if (ret)
1220                 goto out;
1221
1222         ret = add_qgroup_relation_item(trans, quota_root, dst, src);
1223         if (ret) {
1224                 del_qgroup_relation_item(trans, quota_root, src, dst);
1225                 goto out;
1226         }
1227
1228         spin_lock(&fs_info->qgroup_lock);
1229         ret = add_relation_rb(fs_info, src, dst);
1230         if (ret < 0) {
1231                 spin_unlock(&fs_info->qgroup_lock);
1232                 goto out;
1233         }
1234         ret = quick_update_accounting(fs_info, tmp, src, dst, 1);
1235         spin_unlock(&fs_info->qgroup_lock);
1236 out:
1237         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1238         ulist_free(tmp);
1239         return ret;
1240 }
1241
1242 static int __del_qgroup_relation(struct btrfs_trans_handle *trans,
1243                               struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1244 {
1245         struct btrfs_root *quota_root;
1246         struct btrfs_qgroup *parent;
1247         struct btrfs_qgroup *member;
1248         struct btrfs_qgroup_list *list;
1249         struct ulist *tmp;
1250         int ret = 0;
1251         int err;
1252
1253         tmp = ulist_alloc(GFP_KERNEL);
1254         if (!tmp)
1255                 return -ENOMEM;
1256
1257         quota_root = fs_info->quota_root;
1258         if (!quota_root) {
1259                 ret = -EINVAL;
1260                 goto out;
1261         }
1262
1263         member = find_qgroup_rb(fs_info, src);
1264         parent = find_qgroup_rb(fs_info, dst);
1265         if (!member || !parent) {
1266                 ret = -EINVAL;
1267                 goto out;
1268         }
1269
1270         /* check if such qgroup relation exist firstly */
1271         list_for_each_entry(list, &member->groups, next_group) {
1272                 if (list->group == parent)
1273                         goto exist;
1274         }
1275         ret = -ENOENT;
1276         goto out;
1277 exist:
1278         ret = del_qgroup_relation_item(trans, quota_root, src, dst);
1279         err = del_qgroup_relation_item(trans, quota_root, dst, src);
1280         if (err && !ret)
1281                 ret = err;
1282
1283         spin_lock(&fs_info->qgroup_lock);
1284         del_relation_rb(fs_info, src, dst);
1285         ret = quick_update_accounting(fs_info, tmp, src, dst, -1);
1286         spin_unlock(&fs_info->qgroup_lock);
1287 out:
1288         ulist_free(tmp);
1289         return ret;
1290 }
1291
1292 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
1293                               struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1294 {
1295         int ret = 0;
1296
1297         mutex_lock(&fs_info->qgroup_ioctl_lock);
1298         ret = __del_qgroup_relation(trans, fs_info, src, dst);
1299         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1300
1301         return ret;
1302 }
1303
1304 int btrfs_create_qgroup(struct btrfs_trans_handle *trans,
1305                         struct btrfs_fs_info *fs_info, u64 qgroupid)
1306 {
1307         struct btrfs_root *quota_root;
1308         struct btrfs_qgroup *qgroup;
1309         int ret = 0;
1310
1311         mutex_lock(&fs_info->qgroup_ioctl_lock);
1312         quota_root = fs_info->quota_root;
1313         if (!quota_root) {
1314                 ret = -EINVAL;
1315                 goto out;
1316         }
1317         qgroup = find_qgroup_rb(fs_info, qgroupid);
1318         if (qgroup) {
1319                 ret = -EEXIST;
1320                 goto out;
1321         }
1322
1323         ret = add_qgroup_item(trans, quota_root, qgroupid);
1324         if (ret)
1325                 goto out;
1326
1327         spin_lock(&fs_info->qgroup_lock);
1328         qgroup = add_qgroup_rb(fs_info, qgroupid);
1329         spin_unlock(&fs_info->qgroup_lock);
1330
1331         if (IS_ERR(qgroup))
1332                 ret = PTR_ERR(qgroup);
1333 out:
1334         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1335         return ret;
1336 }
1337
1338 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
1339                         struct btrfs_fs_info *fs_info, u64 qgroupid)
1340 {
1341         struct btrfs_root *quota_root;
1342         struct btrfs_qgroup *qgroup;
1343         struct btrfs_qgroup_list *list;
1344         int ret = 0;
1345
1346         mutex_lock(&fs_info->qgroup_ioctl_lock);
1347         quota_root = fs_info->quota_root;
1348         if (!quota_root) {
1349                 ret = -EINVAL;
1350                 goto out;
1351         }
1352
1353         qgroup = find_qgroup_rb(fs_info, qgroupid);
1354         if (!qgroup) {
1355                 ret = -ENOENT;
1356                 goto out;
1357         } else {
1358                 /* check if there are no children of this qgroup */
1359                 if (!list_empty(&qgroup->members)) {
1360                         ret = -EBUSY;
1361                         goto out;
1362                 }
1363         }
1364         ret = del_qgroup_item(trans, quota_root, qgroupid);
1365         if (ret && ret != -ENOENT)
1366                 goto out;
1367
1368         while (!list_empty(&qgroup->groups)) {
1369                 list = list_first_entry(&qgroup->groups,
1370                                         struct btrfs_qgroup_list, next_group);
1371                 ret = __del_qgroup_relation(trans, fs_info,
1372                                            qgroupid,
1373                                            list->group->qgroupid);
1374                 if (ret)
1375                         goto out;
1376         }
1377
1378         spin_lock(&fs_info->qgroup_lock);
1379         del_qgroup_rb(fs_info, qgroupid);
1380         spin_unlock(&fs_info->qgroup_lock);
1381 out:
1382         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1383         return ret;
1384 }
1385
1386 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
1387                        struct btrfs_fs_info *fs_info, u64 qgroupid,
1388                        struct btrfs_qgroup_limit *limit)
1389 {
1390         struct btrfs_root *quota_root;
1391         struct btrfs_qgroup *qgroup;
1392         int ret = 0;
1393         /* Sometimes we would want to clear the limit on this qgroup.
1394          * To meet this requirement, we treat the -1 as a special value
1395          * which tell kernel to clear the limit on this qgroup.
1396          */
1397         const u64 CLEAR_VALUE = -1;
1398
1399         mutex_lock(&fs_info->qgroup_ioctl_lock);
1400         quota_root = fs_info->quota_root;
1401         if (!quota_root) {
1402                 ret = -EINVAL;
1403                 goto out;
1404         }
1405
1406         qgroup = find_qgroup_rb(fs_info, qgroupid);
1407         if (!qgroup) {
1408                 ret = -ENOENT;
1409                 goto out;
1410         }
1411
1412         spin_lock(&fs_info->qgroup_lock);
1413         if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) {
1414                 if (limit->max_rfer == CLEAR_VALUE) {
1415                         qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1416                         limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1417                         qgroup->max_rfer = 0;
1418                 } else {
1419                         qgroup->max_rfer = limit->max_rfer;
1420                 }
1421         }
1422         if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
1423                 if (limit->max_excl == CLEAR_VALUE) {
1424                         qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1425                         limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1426                         qgroup->max_excl = 0;
1427                 } else {
1428                         qgroup->max_excl = limit->max_excl;
1429                 }
1430         }
1431         if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) {
1432                 if (limit->rsv_rfer == CLEAR_VALUE) {
1433                         qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1434                         limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1435                         qgroup->rsv_rfer = 0;
1436                 } else {
1437                         qgroup->rsv_rfer = limit->rsv_rfer;
1438                 }
1439         }
1440         if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) {
1441                 if (limit->rsv_excl == CLEAR_VALUE) {
1442                         qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1443                         limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1444                         qgroup->rsv_excl = 0;
1445                 } else {
1446                         qgroup->rsv_excl = limit->rsv_excl;
1447                 }
1448         }
1449         qgroup->lim_flags |= limit->flags;
1450
1451         spin_unlock(&fs_info->qgroup_lock);
1452
1453         ret = update_qgroup_limit_item(trans, quota_root, qgroup);
1454         if (ret) {
1455                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1456                 btrfs_info(fs_info, "unable to update quota limit for %llu",
1457                        qgroupid);
1458         }
1459
1460 out:
1461         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1462         return ret;
1463 }
1464
1465 int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
1466                                 struct btrfs_delayed_ref_root *delayed_refs,
1467                                 struct btrfs_qgroup_extent_record *record)
1468 {
1469         struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node;
1470         struct rb_node *parent_node = NULL;
1471         struct btrfs_qgroup_extent_record *entry;
1472         u64 bytenr = record->bytenr;
1473
1474         lockdep_assert_held(&delayed_refs->lock);
1475         trace_btrfs_qgroup_trace_extent(fs_info, record);
1476
1477         while (*p) {
1478                 parent_node = *p;
1479                 entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record,
1480                                  node);
1481                 if (bytenr < entry->bytenr)
1482                         p = &(*p)->rb_left;
1483                 else if (bytenr > entry->bytenr)
1484                         p = &(*p)->rb_right;
1485                 else
1486                         return 1;
1487         }
1488
1489         rb_link_node(&record->node, parent_node, p);
1490         rb_insert_color(&record->node, &delayed_refs->dirty_extent_root);
1491         return 0;
1492 }
1493
1494 int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info,
1495                                    struct btrfs_qgroup_extent_record *qrecord)
1496 {
1497         struct ulist *old_root;
1498         u64 bytenr = qrecord->bytenr;
1499         int ret;
1500
1501         ret = btrfs_find_all_roots(NULL, fs_info, bytenr, 0, &old_root, false);
1502         if (ret < 0) {
1503                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1504                 btrfs_warn(fs_info,
1505 "error accounting new delayed refs extent (err code: %d), quota inconsistent",
1506                         ret);
1507                 return 0;
1508         }
1509
1510         /*
1511          * Here we don't need to get the lock of
1512          * trans->transaction->delayed_refs, since inserted qrecord won't
1513          * be deleted, only qrecord->node may be modified (new qrecord insert)
1514          *
1515          * So modifying qrecord->old_roots is safe here
1516          */
1517         qrecord->old_roots = old_root;
1518         return 0;
1519 }
1520
1521 int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans,
1522                 struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes,
1523                 gfp_t gfp_flag)
1524 {
1525         struct btrfs_qgroup_extent_record *record;
1526         struct btrfs_delayed_ref_root *delayed_refs;
1527         int ret;
1528
1529         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)
1530             || bytenr == 0 || num_bytes == 0)
1531                 return 0;
1532         if (WARN_ON(trans == NULL))
1533                 return -EINVAL;
1534         record = kmalloc(sizeof(*record), gfp_flag);
1535         if (!record)
1536                 return -ENOMEM;
1537
1538         delayed_refs = &trans->transaction->delayed_refs;
1539         record->bytenr = bytenr;
1540         record->num_bytes = num_bytes;
1541         record->old_roots = NULL;
1542
1543         spin_lock(&delayed_refs->lock);
1544         ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record);
1545         spin_unlock(&delayed_refs->lock);
1546         if (ret > 0) {
1547                 kfree(record);
1548                 return 0;
1549         }
1550         return btrfs_qgroup_trace_extent_post(fs_info, record);
1551 }
1552
1553 int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
1554                                   struct btrfs_fs_info *fs_info,
1555                                   struct extent_buffer *eb)
1556 {
1557         int nr = btrfs_header_nritems(eb);
1558         int i, extent_type, ret;
1559         struct btrfs_key key;
1560         struct btrfs_file_extent_item *fi;
1561         u64 bytenr, num_bytes;
1562
1563         /* We can be called directly from walk_up_proc() */
1564         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1565                 return 0;
1566
1567         for (i = 0; i < nr; i++) {
1568                 btrfs_item_key_to_cpu(eb, &key, i);
1569
1570                 if (key.type != BTRFS_EXTENT_DATA_KEY)
1571                         continue;
1572
1573                 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
1574                 /* filter out non qgroup-accountable extents  */
1575                 extent_type = btrfs_file_extent_type(eb, fi);
1576
1577                 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
1578                         continue;
1579
1580                 bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
1581                 if (!bytenr)
1582                         continue;
1583
1584                 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
1585
1586                 ret = btrfs_qgroup_trace_extent(trans, fs_info, bytenr,
1587                                                 num_bytes, GFP_NOFS);
1588                 if (ret)
1589                         return ret;
1590         }
1591         cond_resched();
1592         return 0;
1593 }
1594
1595 /*
1596  * Walk up the tree from the bottom, freeing leaves and any interior
1597  * nodes which have had all slots visited. If a node (leaf or
1598  * interior) is freed, the node above it will have it's slot
1599  * incremented. The root node will never be freed.
1600  *
1601  * At the end of this function, we should have a path which has all
1602  * slots incremented to the next position for a search. If we need to
1603  * read a new node it will be NULL and the node above it will have the
1604  * correct slot selected for a later read.
1605  *
1606  * If we increment the root nodes slot counter past the number of
1607  * elements, 1 is returned to signal completion of the search.
1608  */
1609 static int adjust_slots_upwards(struct btrfs_path *path, int root_level)
1610 {
1611         int level = 0;
1612         int nr, slot;
1613         struct extent_buffer *eb;
1614
1615         if (root_level == 0)
1616                 return 1;
1617
1618         while (level <= root_level) {
1619                 eb = path->nodes[level];
1620                 nr = btrfs_header_nritems(eb);
1621                 path->slots[level]++;
1622                 slot = path->slots[level];
1623                 if (slot >= nr || level == 0) {
1624                         /*
1625                          * Don't free the root -  we will detect this
1626                          * condition after our loop and return a
1627                          * positive value for caller to stop walking the tree.
1628                          */
1629                         if (level != root_level) {
1630                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
1631                                 path->locks[level] = 0;
1632
1633                                 free_extent_buffer(eb);
1634                                 path->nodes[level] = NULL;
1635                                 path->slots[level] = 0;
1636                         }
1637                 } else {
1638                         /*
1639                          * We have a valid slot to walk back down
1640                          * from. Stop here so caller can process these
1641                          * new nodes.
1642                          */
1643                         break;
1644                 }
1645
1646                 level++;
1647         }
1648
1649         eb = path->nodes[root_level];
1650         if (path->slots[root_level] >= btrfs_header_nritems(eb))
1651                 return 1;
1652
1653         return 0;
1654 }
1655
1656 int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
1657                                struct btrfs_root *root,
1658                                struct extent_buffer *root_eb,
1659                                u64 root_gen, int root_level)
1660 {
1661         struct btrfs_fs_info *fs_info = root->fs_info;
1662         int ret = 0;
1663         int level;
1664         struct extent_buffer *eb = root_eb;
1665         struct btrfs_path *path = NULL;
1666
1667         BUG_ON(root_level < 0 || root_level >= BTRFS_MAX_LEVEL);
1668         BUG_ON(root_eb == NULL);
1669
1670         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1671                 return 0;
1672
1673         if (!extent_buffer_uptodate(root_eb)) {
1674                 ret = btrfs_read_buffer(root_eb, root_gen, root_level, NULL);
1675                 if (ret)
1676                         goto out;
1677         }
1678
1679         if (root_level == 0) {
1680                 ret = btrfs_qgroup_trace_leaf_items(trans, fs_info, root_eb);
1681                 goto out;
1682         }
1683
1684         path = btrfs_alloc_path();
1685         if (!path)
1686                 return -ENOMEM;
1687
1688         /*
1689          * Walk down the tree.  Missing extent blocks are filled in as
1690          * we go. Metadata is accounted every time we read a new
1691          * extent block.
1692          *
1693          * When we reach a leaf, we account for file extent items in it,
1694          * walk back up the tree (adjusting slot pointers as we go)
1695          * and restart the search process.
1696          */
1697         extent_buffer_get(root_eb); /* For path */
1698         path->nodes[root_level] = root_eb;
1699         path->slots[root_level] = 0;
1700         path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
1701 walk_down:
1702         level = root_level;
1703         while (level >= 0) {
1704                 if (path->nodes[level] == NULL) {
1705                         struct btrfs_key first_key;
1706                         int parent_slot;
1707                         u64 child_gen;
1708                         u64 child_bytenr;
1709
1710                         /*
1711                          * We need to get child blockptr/gen from parent before
1712                          * we can read it.
1713                           */
1714                         eb = path->nodes[level + 1];
1715                         parent_slot = path->slots[level + 1];
1716                         child_bytenr = btrfs_node_blockptr(eb, parent_slot);
1717                         child_gen = btrfs_node_ptr_generation(eb, parent_slot);
1718                         btrfs_node_key_to_cpu(eb, &first_key, parent_slot);
1719
1720                         eb = read_tree_block(fs_info, child_bytenr, child_gen,
1721                                              level, &first_key);
1722                         if (IS_ERR(eb)) {
1723                                 ret = PTR_ERR(eb);
1724                                 goto out;
1725                         } else if (!extent_buffer_uptodate(eb)) {
1726                                 free_extent_buffer(eb);
1727                                 ret = -EIO;
1728                                 goto out;
1729                         }
1730
1731                         path->nodes[level] = eb;
1732                         path->slots[level] = 0;
1733
1734                         btrfs_tree_read_lock(eb);
1735                         btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1736                         path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
1737
1738                         ret = btrfs_qgroup_trace_extent(trans, fs_info,
1739                                                         child_bytenr,
1740                                                         fs_info->nodesize,
1741                                                         GFP_NOFS);
1742                         if (ret)
1743                                 goto out;
1744                 }
1745
1746                 if (level == 0) {
1747                         ret = btrfs_qgroup_trace_leaf_items(trans,fs_info,
1748                                                            path->nodes[level]);
1749                         if (ret)
1750                                 goto out;
1751
1752                         /* Nonzero return here means we completed our search */
1753                         ret = adjust_slots_upwards(path, root_level);
1754                         if (ret)
1755                                 break;
1756
1757                         /* Restart search with new slots */
1758                         goto walk_down;
1759                 }
1760
1761                 level--;
1762         }
1763
1764         ret = 0;
1765 out:
1766         btrfs_free_path(path);
1767
1768         return ret;
1769 }
1770
1771 #define UPDATE_NEW      0
1772 #define UPDATE_OLD      1
1773 /*
1774  * Walk all of the roots that points to the bytenr and adjust their refcnts.
1775  */
1776 static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
1777                                 struct ulist *roots, struct ulist *tmp,
1778                                 struct ulist *qgroups, u64 seq, int update_old)
1779 {
1780         struct ulist_node *unode;
1781         struct ulist_iterator uiter;
1782         struct ulist_node *tmp_unode;
1783         struct ulist_iterator tmp_uiter;
1784         struct btrfs_qgroup *qg;
1785         int ret = 0;
1786
1787         if (!roots)
1788                 return 0;
1789         ULIST_ITER_INIT(&uiter);
1790         while ((unode = ulist_next(roots, &uiter))) {
1791                 qg = find_qgroup_rb(fs_info, unode->val);
1792                 if (!qg)
1793                         continue;
1794
1795                 ulist_reinit(tmp);
1796                 ret = ulist_add(qgroups, qg->qgroupid, qgroup_to_aux(qg),
1797                                 GFP_ATOMIC);
1798                 if (ret < 0)
1799                         return ret;
1800                 ret = ulist_add(tmp, qg->qgroupid, qgroup_to_aux(qg), GFP_ATOMIC);
1801                 if (ret < 0)
1802                         return ret;
1803                 ULIST_ITER_INIT(&tmp_uiter);
1804                 while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
1805                         struct btrfs_qgroup_list *glist;
1806
1807                         qg = unode_aux_to_qgroup(tmp_unode);
1808                         if (update_old)
1809                                 btrfs_qgroup_update_old_refcnt(qg, seq, 1);
1810                         else
1811                                 btrfs_qgroup_update_new_refcnt(qg, seq, 1);
1812                         list_for_each_entry(glist, &qg->groups, next_group) {
1813                                 ret = ulist_add(qgroups, glist->group->qgroupid,
1814                                                 qgroup_to_aux(glist->group),
1815                                                 GFP_ATOMIC);
1816                                 if (ret < 0)
1817                                         return ret;
1818                                 ret = ulist_add(tmp, glist->group->qgroupid,
1819                                                 qgroup_to_aux(glist->group),
1820                                                 GFP_ATOMIC);
1821                                 if (ret < 0)
1822                                         return ret;
1823                         }
1824                 }
1825         }
1826         return 0;
1827 }
1828
1829 /*
1830  * Update qgroup rfer/excl counters.
1831  * Rfer update is easy, codes can explain themselves.
1832  *
1833  * Excl update is tricky, the update is split into 2 part.
1834  * Part 1: Possible exclusive <-> sharing detect:
1835  *      |       A       |       !A      |
1836  *  -------------------------------------
1837  *  B   |       *       |       -       |
1838  *  -------------------------------------
1839  *  !B  |       +       |       **      |
1840  *  -------------------------------------
1841  *
1842  * Conditions:
1843  * A:   cur_old_roots < nr_old_roots    (not exclusive before)
1844  * !A:  cur_old_roots == nr_old_roots   (possible exclusive before)
1845  * B:   cur_new_roots < nr_new_roots    (not exclusive now)
1846  * !B:  cur_new_roots == nr_new_roots   (possible exclusive now)
1847  *
1848  * Results:
1849  * +: Possible sharing -> exclusive     -: Possible exclusive -> sharing
1850  * *: Definitely not changed.           **: Possible unchanged.
1851  *
1852  * For !A and !B condition, the exception is cur_old/new_roots == 0 case.
1853  *
1854  * To make the logic clear, we first use condition A and B to split
1855  * combination into 4 results.
1856  *
1857  * Then, for result "+" and "-", check old/new_roots == 0 case, as in them
1858  * only on variant maybe 0.
1859  *
1860  * Lastly, check result **, since there are 2 variants maybe 0, split them
1861  * again(2x2).
1862  * But this time we don't need to consider other things, the codes and logic
1863  * is easy to understand now.
1864  */
1865 static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
1866                                   struct ulist *qgroups,
1867                                   u64 nr_old_roots,
1868                                   u64 nr_new_roots,
1869                                   u64 num_bytes, u64 seq)
1870 {
1871         struct ulist_node *unode;
1872         struct ulist_iterator uiter;
1873         struct btrfs_qgroup *qg;
1874         u64 cur_new_count, cur_old_count;
1875
1876         ULIST_ITER_INIT(&uiter);
1877         while ((unode = ulist_next(qgroups, &uiter))) {
1878                 bool dirty = false;
1879
1880                 qg = unode_aux_to_qgroup(unode);
1881                 cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
1882                 cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
1883
1884                 trace_qgroup_update_counters(fs_info, qg->qgroupid,
1885                                              cur_old_count, cur_new_count);
1886
1887                 /* Rfer update part */
1888                 if (cur_old_count == 0 && cur_new_count > 0) {
1889                         qg->rfer += num_bytes;
1890                         qg->rfer_cmpr += num_bytes;
1891                         dirty = true;
1892                 }
1893                 if (cur_old_count > 0 && cur_new_count == 0) {
1894                         qg->rfer -= num_bytes;
1895                         qg->rfer_cmpr -= num_bytes;
1896                         dirty = true;
1897                 }
1898
1899                 /* Excl update part */
1900                 /* Exclusive/none -> shared case */
1901                 if (cur_old_count == nr_old_roots &&
1902                     cur_new_count < nr_new_roots) {
1903                         /* Exclusive -> shared */
1904                         if (cur_old_count != 0) {
1905                                 qg->excl -= num_bytes;
1906                                 qg->excl_cmpr -= num_bytes;
1907                                 dirty = true;
1908                         }
1909                 }
1910
1911                 /* Shared -> exclusive/none case */
1912                 if (cur_old_count < nr_old_roots &&
1913                     cur_new_count == nr_new_roots) {
1914                         /* Shared->exclusive */
1915                         if (cur_new_count != 0) {
1916                                 qg->excl += num_bytes;
1917                                 qg->excl_cmpr += num_bytes;
1918                                 dirty = true;
1919                         }
1920                 }
1921
1922                 /* Exclusive/none -> exclusive/none case */
1923                 if (cur_old_count == nr_old_roots &&
1924                     cur_new_count == nr_new_roots) {
1925                         if (cur_old_count == 0) {
1926                                 /* None -> exclusive/none */
1927
1928                                 if (cur_new_count != 0) {
1929                                         /* None -> exclusive */
1930                                         qg->excl += num_bytes;
1931                                         qg->excl_cmpr += num_bytes;
1932                                         dirty = true;
1933                                 }
1934                                 /* None -> none, nothing changed */
1935                         } else {
1936                                 /* Exclusive -> exclusive/none */
1937
1938                                 if (cur_new_count == 0) {
1939                                         /* Exclusive -> none */
1940                                         qg->excl -= num_bytes;
1941                                         qg->excl_cmpr -= num_bytes;
1942                                         dirty = true;
1943                                 }
1944                                 /* Exclusive -> exclusive, nothing changed */
1945                         }
1946                 }
1947
1948                 if (dirty)
1949                         qgroup_dirty(fs_info, qg);
1950         }
1951         return 0;
1952 }
1953
1954 /*
1955  * Check if the @roots potentially is a list of fs tree roots
1956  *
1957  * Return 0 for definitely not a fs/subvol tree roots ulist
1958  * Return 1 for possible fs/subvol tree roots in the list (considering an empty
1959  *          one as well)
1960  */
1961 static int maybe_fs_roots(struct ulist *roots)
1962 {
1963         struct ulist_node *unode;
1964         struct ulist_iterator uiter;
1965
1966         /* Empty one, still possible for fs roots */
1967         if (!roots || roots->nnodes == 0)
1968                 return 1;
1969
1970         ULIST_ITER_INIT(&uiter);
1971         unode = ulist_next(roots, &uiter);
1972         if (!unode)
1973                 return 1;
1974
1975         /*
1976          * If it contains fs tree roots, then it must belong to fs/subvol
1977          * trees.
1978          * If it contains a non-fs tree, it won't be shared with fs/subvol trees.
1979          */
1980         return is_fstree(unode->val);
1981 }
1982
1983 int
1984 btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
1985                             struct btrfs_fs_info *fs_info,
1986                             u64 bytenr, u64 num_bytes,
1987                             struct ulist *old_roots, struct ulist *new_roots)
1988 {
1989         struct ulist *qgroups = NULL;
1990         struct ulist *tmp = NULL;
1991         u64 seq;
1992         u64 nr_new_roots = 0;
1993         u64 nr_old_roots = 0;
1994         int ret = 0;
1995
1996         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1997                 return 0;
1998
1999         if (new_roots) {
2000                 if (!maybe_fs_roots(new_roots))
2001                         goto out_free;
2002                 nr_new_roots = new_roots->nnodes;
2003         }
2004         if (old_roots) {
2005                 if (!maybe_fs_roots(old_roots))
2006                         goto out_free;
2007                 nr_old_roots = old_roots->nnodes;
2008         }
2009
2010         /* Quick exit, either not fs tree roots, or won't affect any qgroup */
2011         if (nr_old_roots == 0 && nr_new_roots == 0)
2012                 goto out_free;
2013
2014         BUG_ON(!fs_info->quota_root);
2015
2016         trace_btrfs_qgroup_account_extent(fs_info, bytenr, num_bytes,
2017                                           nr_old_roots, nr_new_roots);
2018
2019         qgroups = ulist_alloc(GFP_NOFS);
2020         if (!qgroups) {
2021                 ret = -ENOMEM;
2022                 goto out_free;
2023         }
2024         tmp = ulist_alloc(GFP_NOFS);
2025         if (!tmp) {
2026                 ret = -ENOMEM;
2027                 goto out_free;
2028         }
2029
2030         mutex_lock(&fs_info->qgroup_rescan_lock);
2031         if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
2032                 if (fs_info->qgroup_rescan_progress.objectid <= bytenr) {
2033                         mutex_unlock(&fs_info->qgroup_rescan_lock);
2034                         ret = 0;
2035                         goto out_free;
2036                 }
2037         }
2038         mutex_unlock(&fs_info->qgroup_rescan_lock);
2039
2040         spin_lock(&fs_info->qgroup_lock);
2041         seq = fs_info->qgroup_seq;
2042
2043         /* Update old refcnts using old_roots */
2044         ret = qgroup_update_refcnt(fs_info, old_roots, tmp, qgroups, seq,
2045                                    UPDATE_OLD);
2046         if (ret < 0)
2047                 goto out;
2048
2049         /* Update new refcnts using new_roots */
2050         ret = qgroup_update_refcnt(fs_info, new_roots, tmp, qgroups, seq,
2051                                    UPDATE_NEW);
2052         if (ret < 0)
2053                 goto out;
2054
2055         qgroup_update_counters(fs_info, qgroups, nr_old_roots, nr_new_roots,
2056                                num_bytes, seq);
2057
2058         /*
2059          * Bump qgroup_seq to avoid seq overlap
2060          */
2061         fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1;
2062 out:
2063         spin_unlock(&fs_info->qgroup_lock);
2064 out_free:
2065         ulist_free(tmp);
2066         ulist_free(qgroups);
2067         ulist_free(old_roots);
2068         ulist_free(new_roots);
2069         return ret;
2070 }
2071
2072 int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
2073 {
2074         struct btrfs_fs_info *fs_info = trans->fs_info;
2075         struct btrfs_qgroup_extent_record *record;
2076         struct btrfs_delayed_ref_root *delayed_refs;
2077         struct ulist *new_roots = NULL;
2078         struct rb_node *node;
2079         u64 qgroup_to_skip;
2080         int ret = 0;
2081
2082         delayed_refs = &trans->transaction->delayed_refs;
2083         qgroup_to_skip = delayed_refs->qgroup_to_skip;
2084         while ((node = rb_first(&delayed_refs->dirty_extent_root))) {
2085                 record = rb_entry(node, struct btrfs_qgroup_extent_record,
2086                                   node);
2087
2088                 trace_btrfs_qgroup_account_extents(fs_info, record);
2089
2090                 if (!ret) {
2091                         /*
2092                          * Old roots should be searched when inserting qgroup
2093                          * extent record
2094                          */
2095                         if (WARN_ON(!record->old_roots)) {
2096                                 /* Search commit root to find old_roots */
2097                                 ret = btrfs_find_all_roots(NULL, fs_info,
2098                                                 record->bytenr, 0,
2099                                                 &record->old_roots, false);
2100                                 if (ret < 0)
2101                                         goto cleanup;
2102                         }
2103
2104                         /*
2105                          * Use SEQ_LAST as time_seq to do special search, which
2106                          * doesn't lock tree or delayed_refs and search current
2107                          * root. It's safe inside commit_transaction().
2108                          */
2109                         ret = btrfs_find_all_roots(trans, fs_info,
2110                                 record->bytenr, SEQ_LAST, &new_roots, false);
2111                         if (ret < 0)
2112                                 goto cleanup;
2113                         if (qgroup_to_skip) {
2114                                 ulist_del(new_roots, qgroup_to_skip, 0);
2115                                 ulist_del(record->old_roots, qgroup_to_skip,
2116                                           0);
2117                         }
2118                         ret = btrfs_qgroup_account_extent(trans, fs_info,
2119                                         record->bytenr, record->num_bytes,
2120                                         record->old_roots, new_roots);
2121                         record->old_roots = NULL;
2122                         new_roots = NULL;
2123                 }
2124 cleanup:
2125                 ulist_free(record->old_roots);
2126                 ulist_free(new_roots);
2127                 new_roots = NULL;
2128                 rb_erase(node, &delayed_refs->dirty_extent_root);
2129                 kfree(record);
2130
2131         }
2132         return ret;
2133 }
2134
2135 /*
2136  * called from commit_transaction. Writes all changed qgroups to disk.
2137  */
2138 int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
2139                       struct btrfs_fs_info *fs_info)
2140 {
2141         struct btrfs_root *quota_root = fs_info->quota_root;
2142         int ret = 0;
2143
2144         if (!quota_root)
2145                 return ret;
2146
2147         spin_lock(&fs_info->qgroup_lock);
2148         while (!list_empty(&fs_info->dirty_qgroups)) {
2149                 struct btrfs_qgroup *qgroup;
2150                 qgroup = list_first_entry(&fs_info->dirty_qgroups,
2151                                           struct btrfs_qgroup, dirty);
2152                 list_del_init(&qgroup->dirty);
2153                 spin_unlock(&fs_info->qgroup_lock);
2154                 ret = update_qgroup_info_item(trans, quota_root, qgroup);
2155                 if (ret)
2156                         fs_info->qgroup_flags |=
2157                                         BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2158                 ret = update_qgroup_limit_item(trans, quota_root, qgroup);
2159                 if (ret)
2160                         fs_info->qgroup_flags |=
2161                                         BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2162                 spin_lock(&fs_info->qgroup_lock);
2163         }
2164         if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2165                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
2166         else
2167                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
2168         spin_unlock(&fs_info->qgroup_lock);
2169
2170         ret = update_qgroup_status_item(trans, fs_info, quota_root);
2171         if (ret)
2172                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2173
2174         return ret;
2175 }
2176
2177 /*
2178  * Copy the accounting information between qgroups. This is necessary
2179  * when a snapshot or a subvolume is created. Throwing an error will
2180  * cause a transaction abort so we take extra care here to only error
2181  * when a readonly fs is a reasonable outcome.
2182  */
2183 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
2184                          struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
2185                          struct btrfs_qgroup_inherit *inherit)
2186 {
2187         int ret = 0;
2188         int i;
2189         u64 *i_qgroups;
2190         struct btrfs_root *quota_root = fs_info->quota_root;
2191         struct btrfs_qgroup *srcgroup;
2192         struct btrfs_qgroup *dstgroup;
2193         u32 level_size = 0;
2194         u64 nums;
2195
2196         mutex_lock(&fs_info->qgroup_ioctl_lock);
2197         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2198                 goto out;
2199
2200         if (!quota_root) {
2201                 ret = -EINVAL;
2202                 goto out;
2203         }
2204
2205         if (inherit) {
2206                 i_qgroups = (u64 *)(inherit + 1);
2207                 nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
2208                        2 * inherit->num_excl_copies;
2209                 for (i = 0; i < nums; ++i) {
2210                         srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
2211
2212                         /*
2213                          * Zero out invalid groups so we can ignore
2214                          * them later.
2215                          */
2216                         if (!srcgroup ||
2217                             ((srcgroup->qgroupid >> 48) <= (objectid >> 48)))
2218                                 *i_qgroups = 0ULL;
2219
2220                         ++i_qgroups;
2221                 }
2222         }
2223
2224         /*
2225          * create a tracking group for the subvol itself
2226          */
2227         ret = add_qgroup_item(trans, quota_root, objectid);
2228         if (ret)
2229                 goto out;
2230
2231         if (srcid) {
2232                 struct btrfs_root *srcroot;
2233                 struct btrfs_key srckey;
2234
2235                 srckey.objectid = srcid;
2236                 srckey.type = BTRFS_ROOT_ITEM_KEY;
2237                 srckey.offset = (u64)-1;
2238                 srcroot = btrfs_read_fs_root_no_name(fs_info, &srckey);
2239                 if (IS_ERR(srcroot)) {
2240                         ret = PTR_ERR(srcroot);
2241                         goto out;
2242                 }
2243
2244                 level_size = fs_info->nodesize;
2245         }
2246
2247         /*
2248          * add qgroup to all inherited groups
2249          */
2250         if (inherit) {
2251                 i_qgroups = (u64 *)(inherit + 1);
2252                 for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) {
2253                         if (*i_qgroups == 0)
2254                                 continue;
2255                         ret = add_qgroup_relation_item(trans, quota_root,
2256                                                        objectid, *i_qgroups);
2257                         if (ret && ret != -EEXIST)
2258                                 goto out;
2259                         ret = add_qgroup_relation_item(trans, quota_root,
2260                                                        *i_qgroups, objectid);
2261                         if (ret && ret != -EEXIST)
2262                                 goto out;
2263                 }
2264                 ret = 0;
2265         }
2266
2267
2268         spin_lock(&fs_info->qgroup_lock);
2269
2270         dstgroup = add_qgroup_rb(fs_info, objectid);
2271         if (IS_ERR(dstgroup)) {
2272                 ret = PTR_ERR(dstgroup);
2273                 goto unlock;
2274         }
2275
2276         if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
2277                 dstgroup->lim_flags = inherit->lim.flags;
2278                 dstgroup->max_rfer = inherit->lim.max_rfer;
2279                 dstgroup->max_excl = inherit->lim.max_excl;
2280                 dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
2281                 dstgroup->rsv_excl = inherit->lim.rsv_excl;
2282
2283                 ret = update_qgroup_limit_item(trans, quota_root, dstgroup);
2284                 if (ret) {
2285                         fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2286                         btrfs_info(fs_info,
2287                                    "unable to update quota limit for %llu",
2288                                    dstgroup->qgroupid);
2289                         goto unlock;
2290                 }
2291         }
2292
2293         if (srcid) {
2294                 srcgroup = find_qgroup_rb(fs_info, srcid);
2295                 if (!srcgroup)
2296                         goto unlock;
2297
2298                 /*
2299                  * We call inherit after we clone the root in order to make sure
2300                  * our counts don't go crazy, so at this point the only
2301                  * difference between the two roots should be the root node.
2302                  */
2303                 dstgroup->rfer = srcgroup->rfer;
2304                 dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
2305                 dstgroup->excl = level_size;
2306                 dstgroup->excl_cmpr = level_size;
2307                 srcgroup->excl = level_size;
2308                 srcgroup->excl_cmpr = level_size;
2309
2310                 /* inherit the limit info */
2311                 dstgroup->lim_flags = srcgroup->lim_flags;
2312                 dstgroup->max_rfer = srcgroup->max_rfer;
2313                 dstgroup->max_excl = srcgroup->max_excl;
2314                 dstgroup->rsv_rfer = srcgroup->rsv_rfer;
2315                 dstgroup->rsv_excl = srcgroup->rsv_excl;
2316
2317                 qgroup_dirty(fs_info, dstgroup);
2318                 qgroup_dirty(fs_info, srcgroup);
2319         }
2320
2321         if (!inherit)
2322                 goto unlock;
2323
2324         i_qgroups = (u64 *)(inherit + 1);
2325         for (i = 0; i < inherit->num_qgroups; ++i) {
2326                 if (*i_qgroups) {
2327                         ret = add_relation_rb(fs_info, objectid, *i_qgroups);
2328                         if (ret)
2329                                 goto unlock;
2330                 }
2331                 ++i_qgroups;
2332         }
2333
2334         for (i = 0; i <  inherit->num_ref_copies; ++i, i_qgroups += 2) {
2335                 struct btrfs_qgroup *src;
2336                 struct btrfs_qgroup *dst;
2337
2338                 if (!i_qgroups[0] || !i_qgroups[1])
2339                         continue;
2340
2341                 src = find_qgroup_rb(fs_info, i_qgroups[0]);
2342                 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2343
2344                 if (!src || !dst) {
2345                         ret = -EINVAL;
2346                         goto unlock;
2347                 }
2348
2349                 dst->rfer = src->rfer - level_size;
2350                 dst->rfer_cmpr = src->rfer_cmpr - level_size;
2351         }
2352         for (i = 0; i <  inherit->num_excl_copies; ++i, i_qgroups += 2) {
2353                 struct btrfs_qgroup *src;
2354                 struct btrfs_qgroup *dst;
2355
2356                 if (!i_qgroups[0] || !i_qgroups[1])
2357                         continue;
2358
2359                 src = find_qgroup_rb(fs_info, i_qgroups[0]);
2360                 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2361
2362                 if (!src || !dst) {
2363                         ret = -EINVAL;
2364                         goto unlock;
2365                 }
2366
2367                 dst->excl = src->excl + level_size;
2368                 dst->excl_cmpr = src->excl_cmpr + level_size;
2369         }
2370
2371 unlock:
2372         spin_unlock(&fs_info->qgroup_lock);
2373 out:
2374         mutex_unlock(&fs_info->qgroup_ioctl_lock);
2375         return ret;
2376 }
2377
2378 static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes)
2379 {
2380         if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
2381             qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer)
2382                 return false;
2383
2384         if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
2385             qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl)
2386                 return false;
2387
2388         return true;
2389 }
2390
2391 static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
2392                           enum btrfs_qgroup_rsv_type type)
2393 {
2394         struct btrfs_root *quota_root;
2395         struct btrfs_qgroup *qgroup;
2396         struct btrfs_fs_info *fs_info = root->fs_info;
2397         u64 ref_root = root->root_key.objectid;
2398         int ret = 0;
2399         struct ulist_node *unode;
2400         struct ulist_iterator uiter;
2401
2402         if (!is_fstree(ref_root))
2403                 return 0;
2404
2405         if (num_bytes == 0)
2406                 return 0;
2407
2408         if (test_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags) &&
2409             capable(CAP_SYS_RESOURCE))
2410                 enforce = false;
2411
2412         spin_lock(&fs_info->qgroup_lock);
2413         quota_root = fs_info->quota_root;
2414         if (!quota_root)
2415                 goto out;
2416
2417         qgroup = find_qgroup_rb(fs_info, ref_root);
2418         if (!qgroup)
2419                 goto out;
2420
2421         /*
2422          * in a first step, we check all affected qgroups if any limits would
2423          * be exceeded
2424          */
2425         ulist_reinit(fs_info->qgroup_ulist);
2426         ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
2427                         qgroup_to_aux(qgroup), GFP_ATOMIC);
2428         if (ret < 0)
2429                 goto out;
2430         ULIST_ITER_INIT(&uiter);
2431         while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2432                 struct btrfs_qgroup *qg;
2433                 struct btrfs_qgroup_list *glist;
2434
2435                 qg = unode_aux_to_qgroup(unode);
2436
2437                 if (enforce && !qgroup_check_limits(qg, num_bytes)) {
2438                         ret = -EDQUOT;
2439                         goto out;
2440                 }
2441
2442                 list_for_each_entry(glist, &qg->groups, next_group) {
2443                         ret = ulist_add(fs_info->qgroup_ulist,
2444                                         glist->group->qgroupid,
2445                                         qgroup_to_aux(glist->group), GFP_ATOMIC);
2446                         if (ret < 0)
2447                                 goto out;
2448                 }
2449         }
2450         ret = 0;
2451         /*
2452          * no limits exceeded, now record the reservation into all qgroups
2453          */
2454         ULIST_ITER_INIT(&uiter);
2455         while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2456                 struct btrfs_qgroup *qg;
2457
2458                 qg = unode_aux_to_qgroup(unode);
2459
2460                 trace_qgroup_update_reserve(fs_info, qg, num_bytes, type);
2461                 qgroup_rsv_add(fs_info, qg, num_bytes, type);
2462         }
2463
2464 out:
2465         spin_unlock(&fs_info->qgroup_lock);
2466         return ret;
2467 }
2468
2469 /*
2470  * Free @num_bytes of reserved space with @type for qgroup.  (Normally level 0
2471  * qgroup).
2472  *
2473  * Will handle all higher level qgroup too.
2474  *
2475  * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup.
2476  * This special case is only used for META_PERTRANS type.
2477  */
2478 void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
2479                                u64 ref_root, u64 num_bytes,
2480                                enum btrfs_qgroup_rsv_type type)
2481 {
2482         struct btrfs_root *quota_root;
2483         struct btrfs_qgroup *qgroup;
2484         struct ulist_node *unode;
2485         struct ulist_iterator uiter;
2486         int ret = 0;
2487
2488         if (!is_fstree(ref_root))
2489                 return;
2490
2491         if (num_bytes == 0)
2492                 return;
2493
2494         if (num_bytes == (u64)-1 && type != BTRFS_QGROUP_RSV_META_PERTRANS) {
2495                 WARN(1, "%s: Invalid type to free", __func__);
2496                 return;
2497         }
2498         spin_lock(&fs_info->qgroup_lock);
2499
2500         quota_root = fs_info->quota_root;
2501         if (!quota_root)
2502                 goto out;
2503
2504         qgroup = find_qgroup_rb(fs_info, ref_root);
2505         if (!qgroup)
2506                 goto out;
2507
2508         if (num_bytes == (u64)-1)
2509                 /*
2510                  * We're freeing all pertrans rsv, get reserved value from
2511                  * level 0 qgroup as real num_bytes to free.
2512                  */
2513                 num_bytes = qgroup->rsv.values[type];
2514
2515         ulist_reinit(fs_info->qgroup_ulist);
2516         ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
2517                         qgroup_to_aux(qgroup), GFP_ATOMIC);
2518         if (ret < 0)
2519                 goto out;
2520         ULIST_ITER_INIT(&uiter);
2521         while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2522                 struct btrfs_qgroup *qg;
2523                 struct btrfs_qgroup_list *glist;
2524
2525                 qg = unode_aux_to_qgroup(unode);
2526
2527                 trace_qgroup_update_reserve(fs_info, qg, -(s64)num_bytes, type);
2528                 qgroup_rsv_release(fs_info, qg, num_bytes, type);
2529
2530                 list_for_each_entry(glist, &qg->groups, next_group) {
2531                         ret = ulist_add(fs_info->qgroup_ulist,
2532                                         glist->group->qgroupid,
2533                                         qgroup_to_aux(glist->group), GFP_ATOMIC);
2534                         if (ret < 0)
2535                                 goto out;
2536                 }
2537         }
2538
2539 out:
2540         spin_unlock(&fs_info->qgroup_lock);
2541 }
2542
2543 /*
2544  * returns < 0 on error, 0 when more leafs are to be scanned.
2545  * returns 1 when done.
2546  */
2547 static int
2548 qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
2549                    struct btrfs_trans_handle *trans)
2550 {
2551         struct btrfs_key found;
2552         struct extent_buffer *scratch_leaf = NULL;
2553         struct ulist *roots = NULL;
2554         struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
2555         u64 num_bytes;
2556         int slot;
2557         int ret;
2558
2559         mutex_lock(&fs_info->qgroup_rescan_lock);
2560         ret = btrfs_search_slot_for_read(fs_info->extent_root,
2561                                          &fs_info->qgroup_rescan_progress,
2562                                          path, 1, 0);
2563
2564         btrfs_debug(fs_info,
2565                 "current progress key (%llu %u %llu), search_slot ret %d",
2566                 fs_info->qgroup_rescan_progress.objectid,
2567                 fs_info->qgroup_rescan_progress.type,
2568                 fs_info->qgroup_rescan_progress.offset, ret);
2569
2570         if (ret) {
2571                 /*
2572                  * The rescan is about to end, we will not be scanning any
2573                  * further blocks. We cannot unset the RESCAN flag here, because
2574                  * we want to commit the transaction if everything went well.
2575                  * To make the live accounting work in this phase, we set our
2576                  * scan progress pointer such that every real extent objectid
2577                  * will be smaller.
2578                  */
2579                 fs_info->qgroup_rescan_progress.objectid = (u64)-1;
2580                 btrfs_release_path(path);
2581                 mutex_unlock(&fs_info->qgroup_rescan_lock);
2582                 return ret;
2583         }
2584
2585         btrfs_item_key_to_cpu(path->nodes[0], &found,
2586                               btrfs_header_nritems(path->nodes[0]) - 1);
2587         fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
2588
2589         btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2590         scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]);
2591         if (!scratch_leaf) {
2592                 ret = -ENOMEM;
2593                 mutex_unlock(&fs_info->qgroup_rescan_lock);
2594                 goto out;
2595         }
2596         extent_buffer_get(scratch_leaf);
2597         btrfs_tree_read_lock(scratch_leaf);
2598         btrfs_set_lock_blocking_rw(scratch_leaf, BTRFS_READ_LOCK);
2599         slot = path->slots[0];
2600         btrfs_release_path(path);
2601         mutex_unlock(&fs_info->qgroup_rescan_lock);
2602
2603         for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
2604                 btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
2605                 if (found.type != BTRFS_EXTENT_ITEM_KEY &&
2606                     found.type != BTRFS_METADATA_ITEM_KEY)
2607                         continue;
2608                 if (found.type == BTRFS_METADATA_ITEM_KEY)
2609                         num_bytes = fs_info->nodesize;
2610                 else
2611                         num_bytes = found.offset;
2612
2613                 ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0,
2614                                            &roots, false);
2615                 if (ret < 0)
2616                         goto out;
2617                 /* For rescan, just pass old_roots as NULL */
2618                 ret = btrfs_qgroup_account_extent(trans, fs_info,
2619                                 found.objectid, num_bytes, NULL, roots);
2620                 if (ret < 0)
2621                         goto out;
2622         }
2623 out:
2624         if (scratch_leaf) {
2625                 btrfs_tree_read_unlock_blocking(scratch_leaf);
2626                 free_extent_buffer(scratch_leaf);
2627         }
2628         btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2629
2630         return ret;
2631 }
2632
2633 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
2634 {
2635         struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
2636                                                      qgroup_rescan_work);
2637         struct btrfs_path *path;
2638         struct btrfs_trans_handle *trans = NULL;
2639         int err = -ENOMEM;
2640         int ret = 0;
2641
2642         path = btrfs_alloc_path();
2643         if (!path)
2644                 goto out;
2645
2646         err = 0;
2647         while (!err && !btrfs_fs_closing(fs_info)) {
2648                 trans = btrfs_start_transaction(fs_info->fs_root, 0);
2649                 if (IS_ERR(trans)) {
2650                         err = PTR_ERR(trans);
2651                         break;
2652                 }
2653                 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
2654                         err = -EINTR;
2655                 } else {
2656                         err = qgroup_rescan_leaf(fs_info, path, trans);
2657                 }
2658                 if (err > 0)
2659                         btrfs_commit_transaction(trans);
2660                 else
2661                         btrfs_end_transaction(trans);
2662         }
2663
2664 out:
2665         btrfs_free_path(path);
2666
2667         mutex_lock(&fs_info->qgroup_rescan_lock);
2668         if (!btrfs_fs_closing(fs_info))
2669                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2670
2671         if (err > 0 &&
2672             fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
2673                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2674         } else if (err < 0) {
2675                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2676         }
2677         mutex_unlock(&fs_info->qgroup_rescan_lock);
2678
2679         /*
2680          * only update status, since the previous part has already updated the
2681          * qgroup info.
2682          */
2683         trans = btrfs_start_transaction(fs_info->quota_root, 1);
2684         if (IS_ERR(trans)) {
2685                 err = PTR_ERR(trans);
2686                 btrfs_err(fs_info,
2687                           "fail to start transaction for status update: %d",
2688                           err);
2689                 goto done;
2690         }
2691         ret = update_qgroup_status_item(trans, fs_info, fs_info->quota_root);
2692         if (ret < 0) {
2693                 err = ret;
2694                 btrfs_err(fs_info, "fail to update qgroup status: %d", err);
2695         }
2696         btrfs_end_transaction(trans);
2697
2698         if (btrfs_fs_closing(fs_info)) {
2699                 btrfs_info(fs_info, "qgroup scan paused");
2700         } else if (err >= 0) {
2701                 btrfs_info(fs_info, "qgroup scan completed%s",
2702                         err > 0 ? " (inconsistency flag cleared)" : "");
2703         } else {
2704                 btrfs_err(fs_info, "qgroup scan failed with %d", err);
2705         }
2706
2707 done:
2708         mutex_lock(&fs_info->qgroup_rescan_lock);
2709         fs_info->qgroup_rescan_running = false;
2710         mutex_unlock(&fs_info->qgroup_rescan_lock);
2711         complete_all(&fs_info->qgroup_rescan_completion);
2712 }
2713
2714 /*
2715  * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
2716  * memory required for the rescan context.
2717  */
2718 static int
2719 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
2720                    int init_flags)
2721 {
2722         int ret = 0;
2723
2724         if (!init_flags &&
2725             (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) ||
2726              !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))) {
2727                 ret = -EINVAL;
2728                 goto err;
2729         }
2730
2731         mutex_lock(&fs_info->qgroup_rescan_lock);
2732         spin_lock(&fs_info->qgroup_lock);
2733
2734         if (init_flags) {
2735                 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
2736                         ret = -EINPROGRESS;
2737                 else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
2738                         ret = -EINVAL;
2739
2740                 if (ret) {
2741                         spin_unlock(&fs_info->qgroup_lock);
2742                         mutex_unlock(&fs_info->qgroup_rescan_lock);
2743                         goto err;
2744                 }
2745                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2746         }
2747
2748         memset(&fs_info->qgroup_rescan_progress, 0,
2749                 sizeof(fs_info->qgroup_rescan_progress));
2750         fs_info->qgroup_rescan_progress.objectid = progress_objectid;
2751         init_completion(&fs_info->qgroup_rescan_completion);
2752         fs_info->qgroup_rescan_running = true;
2753
2754         spin_unlock(&fs_info->qgroup_lock);
2755         mutex_unlock(&fs_info->qgroup_rescan_lock);
2756
2757         memset(&fs_info->qgroup_rescan_work, 0,
2758                sizeof(fs_info->qgroup_rescan_work));
2759         btrfs_init_work(&fs_info->qgroup_rescan_work,
2760                         btrfs_qgroup_rescan_helper,
2761                         btrfs_qgroup_rescan_worker, NULL, NULL);
2762
2763         if (ret) {
2764 err:
2765                 btrfs_info(fs_info, "qgroup_rescan_init failed with %d", ret);
2766                 return ret;
2767         }
2768
2769         return 0;
2770 }
2771
2772 static void
2773 qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
2774 {
2775         struct rb_node *n;
2776         struct btrfs_qgroup *qgroup;
2777
2778         spin_lock(&fs_info->qgroup_lock);
2779         /* clear all current qgroup tracking information */
2780         for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
2781                 qgroup = rb_entry(n, struct btrfs_qgroup, node);
2782                 qgroup->rfer = 0;
2783                 qgroup->rfer_cmpr = 0;
2784                 qgroup->excl = 0;
2785                 qgroup->excl_cmpr = 0;
2786         }
2787         spin_unlock(&fs_info->qgroup_lock);
2788 }
2789
2790 int
2791 btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
2792 {
2793         int ret = 0;
2794         struct btrfs_trans_handle *trans;
2795
2796         ret = qgroup_rescan_init(fs_info, 0, 1);
2797         if (ret)
2798                 return ret;
2799
2800         /*
2801          * We have set the rescan_progress to 0, which means no more
2802          * delayed refs will be accounted by btrfs_qgroup_account_ref.
2803          * However, btrfs_qgroup_account_ref may be right after its call
2804          * to btrfs_find_all_roots, in which case it would still do the
2805          * accounting.
2806          * To solve this, we're committing the transaction, which will
2807          * ensure we run all delayed refs and only after that, we are
2808          * going to clear all tracking information for a clean start.
2809          */
2810
2811         trans = btrfs_join_transaction(fs_info->fs_root);
2812         if (IS_ERR(trans)) {
2813                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2814                 return PTR_ERR(trans);
2815         }
2816         ret = btrfs_commit_transaction(trans);
2817         if (ret) {
2818                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2819                 return ret;
2820         }
2821
2822         qgroup_rescan_zero_tracking(fs_info);
2823
2824         btrfs_queue_work(fs_info->qgroup_rescan_workers,
2825                          &fs_info->qgroup_rescan_work);
2826
2827         return 0;
2828 }
2829
2830 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
2831                                      bool interruptible)
2832 {
2833         int running;
2834         int ret = 0;
2835
2836         mutex_lock(&fs_info->qgroup_rescan_lock);
2837         spin_lock(&fs_info->qgroup_lock);
2838         running = fs_info->qgroup_rescan_running;
2839         spin_unlock(&fs_info->qgroup_lock);
2840         mutex_unlock(&fs_info->qgroup_rescan_lock);
2841
2842         if (!running)
2843                 return 0;
2844
2845         if (interruptible)
2846                 ret = wait_for_completion_interruptible(
2847                                         &fs_info->qgroup_rescan_completion);
2848         else
2849                 wait_for_completion(&fs_info->qgroup_rescan_completion);
2850
2851         return ret;
2852 }
2853
2854 /*
2855  * this is only called from open_ctree where we're still single threaded, thus
2856  * locking is omitted here.
2857  */
2858 void
2859 btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
2860 {
2861         if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
2862                 btrfs_queue_work(fs_info->qgroup_rescan_workers,
2863                                  &fs_info->qgroup_rescan_work);
2864 }
2865
2866 /*
2867  * Reserve qgroup space for range [start, start + len).
2868  *
2869  * This function will either reserve space from related qgroups or doing
2870  * nothing if the range is already reserved.
2871  *
2872  * Return 0 for successful reserve
2873  * Return <0 for error (including -EQUOT)
2874  *
2875  * NOTE: this function may sleep for memory allocation.
2876  *       if btrfs_qgroup_reserve_data() is called multiple times with
2877  *       same @reserved, caller must ensure when error happens it's OK
2878  *       to free *ALL* reserved space.
2879  */
2880 int btrfs_qgroup_reserve_data(struct inode *inode,
2881                         struct extent_changeset **reserved_ret, u64 start,
2882                         u64 len)
2883 {
2884         struct btrfs_root *root = BTRFS_I(inode)->root;
2885         struct ulist_node *unode;
2886         struct ulist_iterator uiter;
2887         struct extent_changeset *reserved;
2888         u64 orig_reserved;
2889         u64 to_reserve;
2890         int ret;
2891
2892         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
2893             !is_fstree(root->objectid) || len == 0)
2894                 return 0;
2895
2896         /* @reserved parameter is mandatory for qgroup */
2897         if (WARN_ON(!reserved_ret))
2898                 return -EINVAL;
2899         if (!*reserved_ret) {
2900                 *reserved_ret = extent_changeset_alloc();
2901                 if (!*reserved_ret)
2902                         return -ENOMEM;
2903         }
2904         reserved = *reserved_ret;
2905         /* Record already reserved space */
2906         orig_reserved = reserved->bytes_changed;
2907         ret = set_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
2908                         start + len -1, EXTENT_QGROUP_RESERVED, reserved);
2909
2910         /* Newly reserved space */
2911         to_reserve = reserved->bytes_changed - orig_reserved;
2912         trace_btrfs_qgroup_reserve_data(inode, start, len,
2913                                         to_reserve, QGROUP_RESERVE);
2914         if (ret < 0)
2915                 goto cleanup;
2916         ret = qgroup_reserve(root, to_reserve, true, BTRFS_QGROUP_RSV_DATA);
2917         if (ret < 0)
2918                 goto cleanup;
2919
2920         return ret;
2921
2922 cleanup:
2923         /* cleanup *ALL* already reserved ranges */
2924         ULIST_ITER_INIT(&uiter);
2925         while ((unode = ulist_next(&reserved->range_changed, &uiter)))
2926                 clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val,
2927                                  unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL);
2928         extent_changeset_release(reserved);
2929         return ret;
2930 }
2931
2932 /* Free ranges specified by @reserved, normally in error path */
2933 static int qgroup_free_reserved_data(struct inode *inode,
2934                         struct extent_changeset *reserved, u64 start, u64 len)
2935 {
2936         struct btrfs_root *root = BTRFS_I(inode)->root;
2937         struct ulist_node *unode;
2938         struct ulist_iterator uiter;
2939         struct extent_changeset changeset;
2940         int freed = 0;
2941         int ret;
2942
2943         extent_changeset_init(&changeset);
2944         len = round_up(start + len, root->fs_info->sectorsize);
2945         start = round_down(start, root->fs_info->sectorsize);
2946
2947         ULIST_ITER_INIT(&uiter);
2948         while ((unode = ulist_next(&reserved->range_changed, &uiter))) {
2949                 u64 range_start = unode->val;
2950                 /* unode->aux is the inclusive end */
2951                 u64 range_len = unode->aux - range_start + 1;
2952                 u64 free_start;
2953                 u64 free_len;
2954
2955                 extent_changeset_release(&changeset);
2956
2957                 /* Only free range in range [start, start + len) */
2958                 if (range_start >= start + len ||
2959                     range_start + range_len <= start)
2960                         continue;
2961                 free_start = max(range_start, start);
2962                 free_len = min(start + len, range_start + range_len) -
2963                            free_start;
2964                 /*
2965                  * TODO: To also modify reserved->ranges_reserved to reflect
2966                  * the modification.
2967                  *
2968                  * However as long as we free qgroup reserved according to
2969                  * EXTENT_QGROUP_RESERVED, we won't double free.
2970                  * So not need to rush.
2971                  */
2972                 ret = clear_record_extent_bits(&BTRFS_I(inode)->io_failure_tree,
2973                                 free_start, free_start + free_len - 1,
2974                                 EXTENT_QGROUP_RESERVED, &changeset);
2975                 if (ret < 0)
2976                         goto out;
2977                 freed += changeset.bytes_changed;
2978         }
2979         btrfs_qgroup_free_refroot(root->fs_info, root->objectid, freed,
2980                                   BTRFS_QGROUP_RSV_DATA);
2981         ret = freed;
2982 out:
2983         extent_changeset_release(&changeset);
2984         return ret;
2985 }
2986
2987 static int __btrfs_qgroup_release_data(struct inode *inode,
2988                         struct extent_changeset *reserved, u64 start, u64 len,
2989                         int free)
2990 {
2991         struct extent_changeset changeset;
2992         int trace_op = QGROUP_RELEASE;
2993         int ret;
2994
2995         /* In release case, we shouldn't have @reserved */
2996         WARN_ON(!free && reserved);
2997         if (free && reserved)
2998                 return qgroup_free_reserved_data(inode, reserved, start, len);
2999         extent_changeset_init(&changeset);
3000         ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, start, 
3001                         start + len -1, EXTENT_QGROUP_RESERVED, &changeset);
3002         if (ret < 0)
3003                 goto out;
3004
3005         if (free)
3006                 trace_op = QGROUP_FREE;
3007         trace_btrfs_qgroup_release_data(inode, start, len,
3008                                         changeset.bytes_changed, trace_op);
3009         if (free)
3010                 btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info,
3011                                 BTRFS_I(inode)->root->objectid,
3012                                 changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
3013         ret = changeset.bytes_changed;
3014 out:
3015         extent_changeset_release(&changeset);
3016         return ret;
3017 }
3018
3019 /*
3020  * Free a reserved space range from io_tree and related qgroups
3021  *
3022  * Should be called when a range of pages get invalidated before reaching disk.
3023  * Or for error cleanup case.
3024  * if @reserved is given, only reserved range in [@start, @start + @len) will
3025  * be freed.
3026  *
3027  * For data written to disk, use btrfs_qgroup_release_data().
3028  *
3029  * NOTE: This function may sleep for memory allocation.
3030  */
3031 int btrfs_qgroup_free_data(struct inode *inode,
3032                         struct extent_changeset *reserved, u64 start, u64 len)
3033 {
3034         return __btrfs_qgroup_release_data(inode, reserved, start, len, 1);
3035 }
3036
3037 /*
3038  * Release a reserved space range from io_tree only.
3039  *
3040  * Should be called when a range of pages get written to disk and corresponding
3041  * FILE_EXTENT is inserted into corresponding root.
3042  *
3043  * Since new qgroup accounting framework will only update qgroup numbers at
3044  * commit_transaction() time, its reserved space shouldn't be freed from
3045  * related qgroups.
3046  *
3047  * But we should release the range from io_tree, to allow further write to be
3048  * COWed.
3049  *
3050  * NOTE: This function may sleep for memory allocation.
3051  */
3052 int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len)
3053 {
3054         return __btrfs_qgroup_release_data(inode, NULL, start, len, 0);
3055 }
3056
3057 static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes,
3058                               enum btrfs_qgroup_rsv_type type)
3059 {
3060         if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
3061             type != BTRFS_QGROUP_RSV_META_PERTRANS)
3062                 return;
3063         if (num_bytes == 0)
3064                 return;
3065
3066         spin_lock(&root->qgroup_meta_rsv_lock);
3067         if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
3068                 root->qgroup_meta_rsv_prealloc += num_bytes;
3069         else
3070                 root->qgroup_meta_rsv_pertrans += num_bytes;
3071         spin_unlock(&root->qgroup_meta_rsv_lock);
3072 }
3073
3074 static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes,
3075                              enum btrfs_qgroup_rsv_type type)
3076 {
3077         if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
3078             type != BTRFS_QGROUP_RSV_META_PERTRANS)
3079                 return 0;
3080         if (num_bytes == 0)
3081                 return 0;
3082
3083         spin_lock(&root->qgroup_meta_rsv_lock);
3084         if (type == BTRFS_QGROUP_RSV_META_PREALLOC) {
3085                 num_bytes = min_t(u64, root->qgroup_meta_rsv_prealloc,
3086                                   num_bytes);
3087                 root->qgroup_meta_rsv_prealloc -= num_bytes;
3088         } else {
3089                 num_bytes = min_t(u64, root->qgroup_meta_rsv_pertrans,
3090                                   num_bytes);
3091                 root->qgroup_meta_rsv_pertrans -= num_bytes;
3092         }
3093         spin_unlock(&root->qgroup_meta_rsv_lock);
3094         return num_bytes;
3095 }
3096
3097 int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
3098                                 enum btrfs_qgroup_rsv_type type, bool enforce)
3099 {
3100         struct btrfs_fs_info *fs_info = root->fs_info;
3101         int ret;
3102
3103         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3104             !is_fstree(root->objectid) || num_bytes == 0)
3105                 return 0;
3106
3107         BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
3108         trace_qgroup_meta_reserve(root, type, (s64)num_bytes);
3109         ret = qgroup_reserve(root, num_bytes, enforce, type);
3110         if (ret < 0)
3111                 return ret;
3112         /*
3113          * Record what we have reserved into root.
3114          *
3115          * To avoid quota disabled->enabled underflow.
3116          * In that case, we may try to free space we haven't reserved
3117          * (since quota was disabled), so record what we reserved into root.
3118          * And ensure later release won't underflow this number.
3119          */
3120         add_root_meta_rsv(root, num_bytes, type);
3121         return ret;
3122 }
3123
3124 void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root)
3125 {
3126         struct btrfs_fs_info *fs_info = root->fs_info;
3127
3128         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3129             !is_fstree(root->objectid))
3130                 return;
3131
3132         /* TODO: Update trace point to handle such free */
3133         trace_qgroup_meta_free_all_pertrans(root);
3134         /* Special value -1 means to free all reserved space */
3135         btrfs_qgroup_free_refroot(fs_info, root->objectid, (u64)-1,
3136                                   BTRFS_QGROUP_RSV_META_PERTRANS);
3137 }
3138
3139 void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
3140                               enum btrfs_qgroup_rsv_type type)
3141 {
3142         struct btrfs_fs_info *fs_info = root->fs_info;
3143
3144         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3145             !is_fstree(root->objectid))
3146                 return;
3147
3148         /*
3149          * reservation for META_PREALLOC can happen before quota is enabled,
3150          * which can lead to underflow.
3151          * Here ensure we will only free what we really have reserved.
3152          */
3153         num_bytes = sub_root_meta_rsv(root, num_bytes, type);
3154         BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
3155         trace_qgroup_meta_reserve(root, type, -(s64)num_bytes);
3156         btrfs_qgroup_free_refroot(fs_info, root->objectid, num_bytes, type);
3157 }
3158
3159 static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root,
3160                                 int num_bytes)
3161 {
3162         struct btrfs_root *quota_root = fs_info->quota_root;
3163         struct btrfs_qgroup *qgroup;
3164         struct ulist_node *unode;
3165         struct ulist_iterator uiter;
3166         int ret = 0;
3167
3168         if (num_bytes == 0)
3169                 return;
3170         if (!quota_root)
3171                 return;
3172
3173         spin_lock(&fs_info->qgroup_lock);
3174         qgroup = find_qgroup_rb(fs_info, ref_root);
3175         if (!qgroup)
3176                 goto out;
3177         ulist_reinit(fs_info->qgroup_ulist);
3178         ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
3179                        qgroup_to_aux(qgroup), GFP_ATOMIC);
3180         if (ret < 0)
3181                 goto out;
3182         ULIST_ITER_INIT(&uiter);
3183         while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
3184                 struct btrfs_qgroup *qg;
3185                 struct btrfs_qgroup_list *glist;
3186
3187                 qg = unode_aux_to_qgroup(unode);
3188
3189                 qgroup_rsv_release(fs_info, qg, num_bytes,
3190                                 BTRFS_QGROUP_RSV_META_PREALLOC);
3191                 qgroup_rsv_add(fs_info, qg, num_bytes,
3192                                 BTRFS_QGROUP_RSV_META_PERTRANS);
3193                 list_for_each_entry(glist, &qg->groups, next_group) {
3194                         ret = ulist_add(fs_info->qgroup_ulist,
3195                                         glist->group->qgroupid,
3196                                         qgroup_to_aux(glist->group), GFP_ATOMIC);
3197                         if (ret < 0)
3198                                 goto out;
3199                 }
3200         }
3201 out:
3202         spin_unlock(&fs_info->qgroup_lock);
3203 }
3204
3205 void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
3206 {
3207         struct btrfs_fs_info *fs_info = root->fs_info;
3208
3209         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3210             !is_fstree(root->objectid))
3211                 return;
3212         /* Same as btrfs_qgroup_free_meta_prealloc() */
3213         num_bytes = sub_root_meta_rsv(root, num_bytes,
3214                                       BTRFS_QGROUP_RSV_META_PREALLOC);
3215         trace_qgroup_meta_convert(root, num_bytes);
3216         qgroup_convert_meta(fs_info, root->objectid, num_bytes);
3217 }
3218
3219 /*
3220  * Check qgroup reserved space leaking, normally at destroy inode
3221  * time
3222  */
3223 void btrfs_qgroup_check_reserved_leak(struct inode *inode)
3224 {
3225         struct extent_changeset changeset;
3226         struct ulist_node *unode;
3227         struct ulist_iterator iter;
3228         int ret;
3229
3230         extent_changeset_init(&changeset);
3231         ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
3232                         EXTENT_QGROUP_RESERVED, &changeset);
3233
3234         WARN_ON(ret < 0);
3235         if (WARN_ON(changeset.bytes_changed)) {
3236                 ULIST_ITER_INIT(&iter);
3237                 while ((unode = ulist_next(&changeset.range_changed, &iter))) {
3238                         btrfs_warn(BTRFS_I(inode)->root->fs_info,
3239                                 "leaking qgroup reserved space, ino: %lu, start: %llu, end: %llu",
3240                                 inode->i_ino, unode->val, unode->aux);
3241                 }
3242                 btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info,
3243                                 BTRFS_I(inode)->root->objectid,
3244                                 changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
3245
3246         }
3247         extent_changeset_release(&changeset);
3248 }