btrfs: use helper to set ulist aux from a qgroup
[sfrench/cifs-2.6.git] / fs / btrfs / qgroup.c
1 /*
2  * Copyright (C) 2011 STRATO.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/sched.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/rbtree.h>
24 #include <linux/slab.h>
25 #include <linux/workqueue.h>
26 #include <linux/btrfs.h>
27
28 #include "ctree.h"
29 #include "transaction.h"
30 #include "disk-io.h"
31 #include "locking.h"
32 #include "ulist.h"
33 #include "backref.h"
34 #include "extent_io.h"
35 #include "qgroup.h"
36
37
38 /* TODO XXX FIXME
39  *  - subvol delete -> delete when ref goes to 0? delete limits also?
40  *  - reorganize keys
41  *  - compressed
42  *  - sync
43  *  - copy also limits on subvol creation
44  *  - limit
45  *  - caches fuer ulists
46  *  - performance benchmarks
47  *  - check all ioctl parameters
48  */
49
50 /*
51  * Helpers to access qgroup reservation
52  *
53  * Callers should ensure the lock context and type are valid
54  */
55
56 static u64 qgroup_rsv_total(const struct btrfs_qgroup *qgroup)
57 {
58         u64 ret = 0;
59         int i;
60
61         for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
62                 ret += qgroup->rsv.values[i];
63
64         return ret;
65 }
66
67 #ifdef CONFIG_BTRFS_DEBUG
68 static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type)
69 {
70         if (type == BTRFS_QGROUP_RSV_DATA)
71                 return "data";
72         if (type == BTRFS_QGROUP_RSV_META_PERTRANS)
73                 return "meta_pertrans";
74         if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
75                 return "meta_prealloc";
76         return NULL;
77 }
78 #endif
79
80 static void qgroup_rsv_add(struct btrfs_fs_info *fs_info,
81                            struct btrfs_qgroup *qgroup, u64 num_bytes,
82                            enum btrfs_qgroup_rsv_type type)
83 {
84         trace_qgroup_update_reserve(fs_info, qgroup, num_bytes, type);
85         qgroup->rsv.values[type] += num_bytes;
86 }
87
88 static void qgroup_rsv_release(struct btrfs_fs_info *fs_info,
89                                struct btrfs_qgroup *qgroup, u64 num_bytes,
90                                enum btrfs_qgroup_rsv_type type)
91 {
92         trace_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type);
93         if (qgroup->rsv.values[type] >= num_bytes) {
94                 qgroup->rsv.values[type] -= num_bytes;
95                 return;
96         }
97 #ifdef CONFIG_BTRFS_DEBUG
98         WARN_RATELIMIT(1,
99                 "qgroup %llu %s reserved space underflow, have %llu to free %llu",
100                 qgroup->qgroupid, qgroup_rsv_type_str(type),
101                 qgroup->rsv.values[type], num_bytes);
102 #endif
103         qgroup->rsv.values[type] = 0;
104 }
105
106 static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info *fs_info,
107                                      struct btrfs_qgroup *dest,
108                                      struct btrfs_qgroup *src)
109 {
110         int i;
111
112         for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
113                 qgroup_rsv_add(fs_info, dest, src->rsv.values[i], i);
114 }
115
116 static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info *fs_info,
117                                          struct btrfs_qgroup *dest,
118                                           struct btrfs_qgroup *src)
119 {
120         int i;
121
122         for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
123                 qgroup_rsv_release(fs_info, dest, src->rsv.values[i], i);
124 }
125
126 static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq,
127                                            int mod)
128 {
129         if (qg->old_refcnt < seq)
130                 qg->old_refcnt = seq;
131         qg->old_refcnt += mod;
132 }
133
134 static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq,
135                                            int mod)
136 {
137         if (qg->new_refcnt < seq)
138                 qg->new_refcnt = seq;
139         qg->new_refcnt += mod;
140 }
141
142 static inline u64 btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup *qg, u64 seq)
143 {
144         if (qg->old_refcnt < seq)
145                 return 0;
146         return qg->old_refcnt - seq;
147 }
148
149 static inline u64 btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup *qg, u64 seq)
150 {
151         if (qg->new_refcnt < seq)
152                 return 0;
153         return qg->new_refcnt - seq;
154 }
155
156 /*
157  * glue structure to represent the relations between qgroups.
158  */
159 struct btrfs_qgroup_list {
160         struct list_head next_group;
161         struct list_head next_member;
162         struct btrfs_qgroup *group;
163         struct btrfs_qgroup *member;
164 };
165
166 static inline u64 qgroup_to_aux(struct btrfs_qgroup *qg)
167 {
168         return (u64)(uintptr_t)qg;
169 }
170
171 static inline struct btrfs_qgroup* unode_aux_to_qgroup(struct ulist_node *n)
172 {
173         return (struct btrfs_qgroup *)(uintptr_t)n->aux;
174 }
175
176 static int
177 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
178                    int init_flags);
179 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
180
181 /* must be called with qgroup_ioctl_lock held */
182 static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
183                                            u64 qgroupid)
184 {
185         struct rb_node *n = fs_info->qgroup_tree.rb_node;
186         struct btrfs_qgroup *qgroup;
187
188         while (n) {
189                 qgroup = rb_entry(n, struct btrfs_qgroup, node);
190                 if (qgroup->qgroupid < qgroupid)
191                         n = n->rb_left;
192                 else if (qgroup->qgroupid > qgroupid)
193                         n = n->rb_right;
194                 else
195                         return qgroup;
196         }
197         return NULL;
198 }
199
200 /* must be called with qgroup_lock held */
201 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
202                                           u64 qgroupid)
203 {
204         struct rb_node **p = &fs_info->qgroup_tree.rb_node;
205         struct rb_node *parent = NULL;
206         struct btrfs_qgroup *qgroup;
207
208         while (*p) {
209                 parent = *p;
210                 qgroup = rb_entry(parent, struct btrfs_qgroup, node);
211
212                 if (qgroup->qgroupid < qgroupid)
213                         p = &(*p)->rb_left;
214                 else if (qgroup->qgroupid > qgroupid)
215                         p = &(*p)->rb_right;
216                 else
217                         return qgroup;
218         }
219
220         qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
221         if (!qgroup)
222                 return ERR_PTR(-ENOMEM);
223
224         qgroup->qgroupid = qgroupid;
225         INIT_LIST_HEAD(&qgroup->groups);
226         INIT_LIST_HEAD(&qgroup->members);
227         INIT_LIST_HEAD(&qgroup->dirty);
228
229         rb_link_node(&qgroup->node, parent, p);
230         rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
231
232         return qgroup;
233 }
234
235 static void __del_qgroup_rb(struct btrfs_qgroup *qgroup)
236 {
237         struct btrfs_qgroup_list *list;
238
239         list_del(&qgroup->dirty);
240         while (!list_empty(&qgroup->groups)) {
241                 list = list_first_entry(&qgroup->groups,
242                                         struct btrfs_qgroup_list, next_group);
243                 list_del(&list->next_group);
244                 list_del(&list->next_member);
245                 kfree(list);
246         }
247
248         while (!list_empty(&qgroup->members)) {
249                 list = list_first_entry(&qgroup->members,
250                                         struct btrfs_qgroup_list, next_member);
251                 list_del(&list->next_group);
252                 list_del(&list->next_member);
253                 kfree(list);
254         }
255         kfree(qgroup);
256 }
257
258 /* must be called with qgroup_lock held */
259 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
260 {
261         struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
262
263         if (!qgroup)
264                 return -ENOENT;
265
266         rb_erase(&qgroup->node, &fs_info->qgroup_tree);
267         __del_qgroup_rb(qgroup);
268         return 0;
269 }
270
271 /* must be called with qgroup_lock held */
272 static int add_relation_rb(struct btrfs_fs_info *fs_info,
273                            u64 memberid, u64 parentid)
274 {
275         struct btrfs_qgroup *member;
276         struct btrfs_qgroup *parent;
277         struct btrfs_qgroup_list *list;
278
279         member = find_qgroup_rb(fs_info, memberid);
280         parent = find_qgroup_rb(fs_info, parentid);
281         if (!member || !parent)
282                 return -ENOENT;
283
284         list = kzalloc(sizeof(*list), GFP_ATOMIC);
285         if (!list)
286                 return -ENOMEM;
287
288         list->group = parent;
289         list->member = member;
290         list_add_tail(&list->next_group, &member->groups);
291         list_add_tail(&list->next_member, &parent->members);
292
293         return 0;
294 }
295
296 /* must be called with qgroup_lock held */
297 static int del_relation_rb(struct btrfs_fs_info *fs_info,
298                            u64 memberid, u64 parentid)
299 {
300         struct btrfs_qgroup *member;
301         struct btrfs_qgroup *parent;
302         struct btrfs_qgroup_list *list;
303
304         member = find_qgroup_rb(fs_info, memberid);
305         parent = find_qgroup_rb(fs_info, parentid);
306         if (!member || !parent)
307                 return -ENOENT;
308
309         list_for_each_entry(list, &member->groups, next_group) {
310                 if (list->group == parent) {
311                         list_del(&list->next_group);
312                         list_del(&list->next_member);
313                         kfree(list);
314                         return 0;
315                 }
316         }
317         return -ENOENT;
318 }
319
320 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
321 int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
322                                u64 rfer, u64 excl)
323 {
324         struct btrfs_qgroup *qgroup;
325
326         qgroup = find_qgroup_rb(fs_info, qgroupid);
327         if (!qgroup)
328                 return -EINVAL;
329         if (qgroup->rfer != rfer || qgroup->excl != excl)
330                 return -EINVAL;
331         return 0;
332 }
333 #endif
334
335 /*
336  * The full config is read in one go, only called from open_ctree()
337  * It doesn't use any locking, as at this point we're still single-threaded
338  */
339 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
340 {
341         struct btrfs_key key;
342         struct btrfs_key found_key;
343         struct btrfs_root *quota_root = fs_info->quota_root;
344         struct btrfs_path *path = NULL;
345         struct extent_buffer *l;
346         int slot;
347         int ret = 0;
348         u64 flags = 0;
349         u64 rescan_progress = 0;
350
351         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
352                 return 0;
353
354         fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
355         if (!fs_info->qgroup_ulist) {
356                 ret = -ENOMEM;
357                 goto out;
358         }
359
360         path = btrfs_alloc_path();
361         if (!path) {
362                 ret = -ENOMEM;
363                 goto out;
364         }
365
366         /* default this to quota off, in case no status key is found */
367         fs_info->qgroup_flags = 0;
368
369         /*
370          * pass 1: read status, all qgroup infos and limits
371          */
372         key.objectid = 0;
373         key.type = 0;
374         key.offset = 0;
375         ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
376         if (ret)
377                 goto out;
378
379         while (1) {
380                 struct btrfs_qgroup *qgroup;
381
382                 slot = path->slots[0];
383                 l = path->nodes[0];
384                 btrfs_item_key_to_cpu(l, &found_key, slot);
385
386                 if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
387                         struct btrfs_qgroup_status_item *ptr;
388
389                         ptr = btrfs_item_ptr(l, slot,
390                                              struct btrfs_qgroup_status_item);
391
392                         if (btrfs_qgroup_status_version(l, ptr) !=
393                             BTRFS_QGROUP_STATUS_VERSION) {
394                                 btrfs_err(fs_info,
395                                  "old qgroup version, quota disabled");
396                                 goto out;
397                         }
398                         if (btrfs_qgroup_status_generation(l, ptr) !=
399                             fs_info->generation) {
400                                 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
401                                 btrfs_err(fs_info,
402                                         "qgroup generation mismatch, marked as inconsistent");
403                         }
404                         fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
405                                                                           ptr);
406                         rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
407                         goto next1;
408                 }
409
410                 if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
411                     found_key.type != BTRFS_QGROUP_LIMIT_KEY)
412                         goto next1;
413
414                 qgroup = find_qgroup_rb(fs_info, found_key.offset);
415                 if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
416                     (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
417                         btrfs_err(fs_info, "inconsistent qgroup config");
418                         flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
419                 }
420                 if (!qgroup) {
421                         qgroup = add_qgroup_rb(fs_info, found_key.offset);
422                         if (IS_ERR(qgroup)) {
423                                 ret = PTR_ERR(qgroup);
424                                 goto out;
425                         }
426                 }
427                 switch (found_key.type) {
428                 case BTRFS_QGROUP_INFO_KEY: {
429                         struct btrfs_qgroup_info_item *ptr;
430
431                         ptr = btrfs_item_ptr(l, slot,
432                                              struct btrfs_qgroup_info_item);
433                         qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
434                         qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
435                         qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
436                         qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
437                         /* generation currently unused */
438                         break;
439                 }
440                 case BTRFS_QGROUP_LIMIT_KEY: {
441                         struct btrfs_qgroup_limit_item *ptr;
442
443                         ptr = btrfs_item_ptr(l, slot,
444                                              struct btrfs_qgroup_limit_item);
445                         qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
446                         qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
447                         qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
448                         qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
449                         qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
450                         break;
451                 }
452                 }
453 next1:
454                 ret = btrfs_next_item(quota_root, path);
455                 if (ret < 0)
456                         goto out;
457                 if (ret)
458                         break;
459         }
460         btrfs_release_path(path);
461
462         /*
463          * pass 2: read all qgroup relations
464          */
465         key.objectid = 0;
466         key.type = BTRFS_QGROUP_RELATION_KEY;
467         key.offset = 0;
468         ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
469         if (ret)
470                 goto out;
471         while (1) {
472                 slot = path->slots[0];
473                 l = path->nodes[0];
474                 btrfs_item_key_to_cpu(l, &found_key, slot);
475
476                 if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
477                         goto next2;
478
479                 if (found_key.objectid > found_key.offset) {
480                         /* parent <- member, not needed to build config */
481                         /* FIXME should we omit the key completely? */
482                         goto next2;
483                 }
484
485                 ret = add_relation_rb(fs_info, found_key.objectid,
486                                       found_key.offset);
487                 if (ret == -ENOENT) {
488                         btrfs_warn(fs_info,
489                                 "orphan qgroup relation 0x%llx->0x%llx",
490                                 found_key.objectid, found_key.offset);
491                         ret = 0;        /* ignore the error */
492                 }
493                 if (ret)
494                         goto out;
495 next2:
496                 ret = btrfs_next_item(quota_root, path);
497                 if (ret < 0)
498                         goto out;
499                 if (ret)
500                         break;
501         }
502 out:
503         fs_info->qgroup_flags |= flags;
504         if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
505                 clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
506         else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
507                  ret >= 0)
508                 ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
509         btrfs_free_path(path);
510
511         if (ret < 0) {
512                 ulist_free(fs_info->qgroup_ulist);
513                 fs_info->qgroup_ulist = NULL;
514                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
515         }
516
517         return ret < 0 ? ret : 0;
518 }
519
520 /*
521  * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
522  * first two are in single-threaded paths.And for the third one, we have set
523  * quota_root to be null with qgroup_lock held before, so it is safe to clean
524  * up the in-memory structures without qgroup_lock held.
525  */
526 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
527 {
528         struct rb_node *n;
529         struct btrfs_qgroup *qgroup;
530
531         while ((n = rb_first(&fs_info->qgroup_tree))) {
532                 qgroup = rb_entry(n, struct btrfs_qgroup, node);
533                 rb_erase(n, &fs_info->qgroup_tree);
534                 __del_qgroup_rb(qgroup);
535         }
536         /*
537          * we call btrfs_free_qgroup_config() when umounting
538          * filesystem and disabling quota, so we set qgroup_ulist
539          * to be null here to avoid double free.
540          */
541         ulist_free(fs_info->qgroup_ulist);
542         fs_info->qgroup_ulist = NULL;
543 }
544
545 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans,
546                                     struct btrfs_root *quota_root,
547                                     u64 src, u64 dst)
548 {
549         int ret;
550         struct btrfs_path *path;
551         struct btrfs_key key;
552
553         path = btrfs_alloc_path();
554         if (!path)
555                 return -ENOMEM;
556
557         key.objectid = src;
558         key.type = BTRFS_QGROUP_RELATION_KEY;
559         key.offset = dst;
560
561         ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
562
563         btrfs_mark_buffer_dirty(path->nodes[0]);
564
565         btrfs_free_path(path);
566         return ret;
567 }
568
569 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans,
570                                     struct btrfs_root *quota_root,
571                                     u64 src, u64 dst)
572 {
573         int ret;
574         struct btrfs_path *path;
575         struct btrfs_key key;
576
577         path = btrfs_alloc_path();
578         if (!path)
579                 return -ENOMEM;
580
581         key.objectid = src;
582         key.type = BTRFS_QGROUP_RELATION_KEY;
583         key.offset = dst;
584
585         ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
586         if (ret < 0)
587                 goto out;
588
589         if (ret > 0) {
590                 ret = -ENOENT;
591                 goto out;
592         }
593
594         ret = btrfs_del_item(trans, quota_root, path);
595 out:
596         btrfs_free_path(path);
597         return ret;
598 }
599
600 static int add_qgroup_item(struct btrfs_trans_handle *trans,
601                            struct btrfs_root *quota_root, u64 qgroupid)
602 {
603         int ret;
604         struct btrfs_path *path;
605         struct btrfs_qgroup_info_item *qgroup_info;
606         struct btrfs_qgroup_limit_item *qgroup_limit;
607         struct extent_buffer *leaf;
608         struct btrfs_key key;
609
610         if (btrfs_is_testing(quota_root->fs_info))
611                 return 0;
612
613         path = btrfs_alloc_path();
614         if (!path)
615                 return -ENOMEM;
616
617         key.objectid = 0;
618         key.type = BTRFS_QGROUP_INFO_KEY;
619         key.offset = qgroupid;
620
621         /*
622          * Avoid a transaction abort by catching -EEXIST here. In that
623          * case, we proceed by re-initializing the existing structure
624          * on disk.
625          */
626
627         ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
628                                       sizeof(*qgroup_info));
629         if (ret && ret != -EEXIST)
630                 goto out;
631
632         leaf = path->nodes[0];
633         qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
634                                  struct btrfs_qgroup_info_item);
635         btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
636         btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
637         btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
638         btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
639         btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
640
641         btrfs_mark_buffer_dirty(leaf);
642
643         btrfs_release_path(path);
644
645         key.type = BTRFS_QGROUP_LIMIT_KEY;
646         ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
647                                       sizeof(*qgroup_limit));
648         if (ret && ret != -EEXIST)
649                 goto out;
650
651         leaf = path->nodes[0];
652         qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
653                                   struct btrfs_qgroup_limit_item);
654         btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
655         btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
656         btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
657         btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
658         btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
659
660         btrfs_mark_buffer_dirty(leaf);
661
662         ret = 0;
663 out:
664         btrfs_free_path(path);
665         return ret;
666 }
667
668 static int del_qgroup_item(struct btrfs_trans_handle *trans,
669                            struct btrfs_root *quota_root, u64 qgroupid)
670 {
671         int ret;
672         struct btrfs_path *path;
673         struct btrfs_key key;
674
675         path = btrfs_alloc_path();
676         if (!path)
677                 return -ENOMEM;
678
679         key.objectid = 0;
680         key.type = BTRFS_QGROUP_INFO_KEY;
681         key.offset = qgroupid;
682         ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
683         if (ret < 0)
684                 goto out;
685
686         if (ret > 0) {
687                 ret = -ENOENT;
688                 goto out;
689         }
690
691         ret = btrfs_del_item(trans, quota_root, path);
692         if (ret)
693                 goto out;
694
695         btrfs_release_path(path);
696
697         key.type = BTRFS_QGROUP_LIMIT_KEY;
698         ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
699         if (ret < 0)
700                 goto out;
701
702         if (ret > 0) {
703                 ret = -ENOENT;
704                 goto out;
705         }
706
707         ret = btrfs_del_item(trans, quota_root, path);
708
709 out:
710         btrfs_free_path(path);
711         return ret;
712 }
713
714 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
715                                     struct btrfs_root *root,
716                                     struct btrfs_qgroup *qgroup)
717 {
718         struct btrfs_path *path;
719         struct btrfs_key key;
720         struct extent_buffer *l;
721         struct btrfs_qgroup_limit_item *qgroup_limit;
722         int ret;
723         int slot;
724
725         key.objectid = 0;
726         key.type = BTRFS_QGROUP_LIMIT_KEY;
727         key.offset = qgroup->qgroupid;
728
729         path = btrfs_alloc_path();
730         if (!path)
731                 return -ENOMEM;
732
733         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
734         if (ret > 0)
735                 ret = -ENOENT;
736
737         if (ret)
738                 goto out;
739
740         l = path->nodes[0];
741         slot = path->slots[0];
742         qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item);
743         btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags);
744         btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer);
745         btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl);
746         btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
747         btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
748
749         btrfs_mark_buffer_dirty(l);
750
751 out:
752         btrfs_free_path(path);
753         return ret;
754 }
755
756 static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
757                                    struct btrfs_root *root,
758                                    struct btrfs_qgroup *qgroup)
759 {
760         struct btrfs_path *path;
761         struct btrfs_key key;
762         struct extent_buffer *l;
763         struct btrfs_qgroup_info_item *qgroup_info;
764         int ret;
765         int slot;
766
767         if (btrfs_is_testing(root->fs_info))
768                 return 0;
769
770         key.objectid = 0;
771         key.type = BTRFS_QGROUP_INFO_KEY;
772         key.offset = qgroup->qgroupid;
773
774         path = btrfs_alloc_path();
775         if (!path)
776                 return -ENOMEM;
777
778         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
779         if (ret > 0)
780                 ret = -ENOENT;
781
782         if (ret)
783                 goto out;
784
785         l = path->nodes[0];
786         slot = path->slots[0];
787         qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item);
788         btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
789         btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
790         btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
791         btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
792         btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
793
794         btrfs_mark_buffer_dirty(l);
795
796 out:
797         btrfs_free_path(path);
798         return ret;
799 }
800
801 static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
802                                      struct btrfs_fs_info *fs_info,
803                                     struct btrfs_root *root)
804 {
805         struct btrfs_path *path;
806         struct btrfs_key key;
807         struct extent_buffer *l;
808         struct btrfs_qgroup_status_item *ptr;
809         int ret;
810         int slot;
811
812         key.objectid = 0;
813         key.type = BTRFS_QGROUP_STATUS_KEY;
814         key.offset = 0;
815
816         path = btrfs_alloc_path();
817         if (!path)
818                 return -ENOMEM;
819
820         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
821         if (ret > 0)
822                 ret = -ENOENT;
823
824         if (ret)
825                 goto out;
826
827         l = path->nodes[0];
828         slot = path->slots[0];
829         ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
830         btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags);
831         btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
832         btrfs_set_qgroup_status_rescan(l, ptr,
833                                 fs_info->qgroup_rescan_progress.objectid);
834
835         btrfs_mark_buffer_dirty(l);
836
837 out:
838         btrfs_free_path(path);
839         return ret;
840 }
841
842 /*
843  * called with qgroup_lock held
844  */
845 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
846                                   struct btrfs_root *root)
847 {
848         struct btrfs_path *path;
849         struct btrfs_key key;
850         struct extent_buffer *leaf = NULL;
851         int ret;
852         int nr = 0;
853
854         path = btrfs_alloc_path();
855         if (!path)
856                 return -ENOMEM;
857
858         path->leave_spinning = 1;
859
860         key.objectid = 0;
861         key.offset = 0;
862         key.type = 0;
863
864         while (1) {
865                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
866                 if (ret < 0)
867                         goto out;
868                 leaf = path->nodes[0];
869                 nr = btrfs_header_nritems(leaf);
870                 if (!nr)
871                         break;
872                 /*
873                  * delete the leaf one by one
874                  * since the whole tree is going
875                  * to be deleted.
876                  */
877                 path->slots[0] = 0;
878                 ret = btrfs_del_items(trans, root, path, 0, nr);
879                 if (ret)
880                         goto out;
881
882                 btrfs_release_path(path);
883         }
884         ret = 0;
885 out:
886         btrfs_free_path(path);
887         return ret;
888 }
889
890 int btrfs_quota_enable(struct btrfs_trans_handle *trans,
891                        struct btrfs_fs_info *fs_info)
892 {
893         struct btrfs_root *quota_root;
894         struct btrfs_root *tree_root = fs_info->tree_root;
895         struct btrfs_path *path = NULL;
896         struct btrfs_qgroup_status_item *ptr;
897         struct extent_buffer *leaf;
898         struct btrfs_key key;
899         struct btrfs_key found_key;
900         struct btrfs_qgroup *qgroup = NULL;
901         int ret = 0;
902         int slot;
903
904         mutex_lock(&fs_info->qgroup_ioctl_lock);
905         if (fs_info->quota_root)
906                 goto out;
907
908         fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
909         if (!fs_info->qgroup_ulist) {
910                 ret = -ENOMEM;
911                 goto out;
912         }
913
914         /*
915          * initially create the quota tree
916          */
917         quota_root = btrfs_create_tree(trans, fs_info,
918                                        BTRFS_QUOTA_TREE_OBJECTID);
919         if (IS_ERR(quota_root)) {
920                 ret =  PTR_ERR(quota_root);
921                 goto out;
922         }
923
924         path = btrfs_alloc_path();
925         if (!path) {
926                 ret = -ENOMEM;
927                 goto out_free_root;
928         }
929
930         key.objectid = 0;
931         key.type = BTRFS_QGROUP_STATUS_KEY;
932         key.offset = 0;
933
934         ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
935                                       sizeof(*ptr));
936         if (ret)
937                 goto out_free_path;
938
939         leaf = path->nodes[0];
940         ptr = btrfs_item_ptr(leaf, path->slots[0],
941                                  struct btrfs_qgroup_status_item);
942         btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
943         btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
944         fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
945                                 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
946         btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags);
947         btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
948
949         btrfs_mark_buffer_dirty(leaf);
950
951         key.objectid = 0;
952         key.type = BTRFS_ROOT_REF_KEY;
953         key.offset = 0;
954
955         btrfs_release_path(path);
956         ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
957         if (ret > 0)
958                 goto out_add_root;
959         if (ret < 0)
960                 goto out_free_path;
961
962
963         while (1) {
964                 slot = path->slots[0];
965                 leaf = path->nodes[0];
966                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
967
968                 if (found_key.type == BTRFS_ROOT_REF_KEY) {
969                         ret = add_qgroup_item(trans, quota_root,
970                                               found_key.offset);
971                         if (ret)
972                                 goto out_free_path;
973
974                         qgroup = add_qgroup_rb(fs_info, found_key.offset);
975                         if (IS_ERR(qgroup)) {
976                                 ret = PTR_ERR(qgroup);
977                                 goto out_free_path;
978                         }
979                 }
980                 ret = btrfs_next_item(tree_root, path);
981                 if (ret < 0)
982                         goto out_free_path;
983                 if (ret)
984                         break;
985         }
986
987 out_add_root:
988         btrfs_release_path(path);
989         ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
990         if (ret)
991                 goto out_free_path;
992
993         qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
994         if (IS_ERR(qgroup)) {
995                 ret = PTR_ERR(qgroup);
996                 goto out_free_path;
997         }
998         spin_lock(&fs_info->qgroup_lock);
999         fs_info->quota_root = quota_root;
1000         set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1001         spin_unlock(&fs_info->qgroup_lock);
1002         ret = qgroup_rescan_init(fs_info, 0, 1);
1003         if (!ret) {
1004                 qgroup_rescan_zero_tracking(fs_info);
1005                 btrfs_queue_work(fs_info->qgroup_rescan_workers,
1006                                  &fs_info->qgroup_rescan_work);
1007         }
1008
1009 out_free_path:
1010         btrfs_free_path(path);
1011 out_free_root:
1012         if (ret) {
1013                 free_extent_buffer(quota_root->node);
1014                 free_extent_buffer(quota_root->commit_root);
1015                 kfree(quota_root);
1016         }
1017 out:
1018         if (ret) {
1019                 ulist_free(fs_info->qgroup_ulist);
1020                 fs_info->qgroup_ulist = NULL;
1021         }
1022         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1023         return ret;
1024 }
1025
1026 int btrfs_quota_disable(struct btrfs_trans_handle *trans,
1027                         struct btrfs_fs_info *fs_info)
1028 {
1029         struct btrfs_root *quota_root;
1030         int ret = 0;
1031
1032         mutex_lock(&fs_info->qgroup_ioctl_lock);
1033         if (!fs_info->quota_root)
1034                 goto out;
1035         clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1036         btrfs_qgroup_wait_for_completion(fs_info, false);
1037         spin_lock(&fs_info->qgroup_lock);
1038         quota_root = fs_info->quota_root;
1039         fs_info->quota_root = NULL;
1040         fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
1041         spin_unlock(&fs_info->qgroup_lock);
1042
1043         btrfs_free_qgroup_config(fs_info);
1044
1045         ret = btrfs_clean_quota_tree(trans, quota_root);
1046         if (ret)
1047                 goto out;
1048
1049         ret = btrfs_del_root(trans, fs_info, &quota_root->root_key);
1050         if (ret)
1051                 goto out;
1052
1053         list_del(&quota_root->dirty_list);
1054
1055         btrfs_tree_lock(quota_root->node);
1056         clean_tree_block(fs_info, quota_root->node);
1057         btrfs_tree_unlock(quota_root->node);
1058         btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
1059
1060         free_extent_buffer(quota_root->node);
1061         free_extent_buffer(quota_root->commit_root);
1062         kfree(quota_root);
1063 out:
1064         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1065         return ret;
1066 }
1067
1068 static void qgroup_dirty(struct btrfs_fs_info *fs_info,
1069                          struct btrfs_qgroup *qgroup)
1070 {
1071         if (list_empty(&qgroup->dirty))
1072                 list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
1073 }
1074
1075 /*
1076  * The easy accounting, we're updating qgroup relationship whose child qgroup
1077  * only has exclusive extents.
1078  *
1079  * In this case, all exclsuive extents will also be exlusive for parent, so
1080  * excl/rfer just get added/removed.
1081  *
1082  * So is qgroup reservation space, which should also be added/removed to
1083  * parent.
1084  * Or when child tries to release reservation space, parent will underflow its
1085  * reservation (for relationship adding case).
1086  *
1087  * Caller should hold fs_info->qgroup_lock.
1088  */
1089 static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
1090                                     struct ulist *tmp, u64 ref_root,
1091                                     struct btrfs_qgroup *src, int sign)
1092 {
1093         struct btrfs_qgroup *qgroup;
1094         struct btrfs_qgroup_list *glist;
1095         struct ulist_node *unode;
1096         struct ulist_iterator uiter;
1097         u64 num_bytes = src->excl;
1098         int ret = 0;
1099
1100         qgroup = find_qgroup_rb(fs_info, ref_root);
1101         if (!qgroup)
1102                 goto out;
1103
1104         qgroup->rfer += sign * num_bytes;
1105         qgroup->rfer_cmpr += sign * num_bytes;
1106
1107         WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1108         qgroup->excl += sign * num_bytes;
1109         qgroup->excl_cmpr += sign * num_bytes;
1110
1111         if (sign > 0)
1112                 qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
1113         else
1114                 qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
1115
1116         qgroup_dirty(fs_info, qgroup);
1117
1118         /* Get all of the parent groups that contain this qgroup */
1119         list_for_each_entry(glist, &qgroup->groups, next_group) {
1120                 ret = ulist_add(tmp, glist->group->qgroupid,
1121                                 qgroup_to_aux(glist->group), GFP_ATOMIC);
1122                 if (ret < 0)
1123                         goto out;
1124         }
1125
1126         /* Iterate all of the parents and adjust their reference counts */
1127         ULIST_ITER_INIT(&uiter);
1128         while ((unode = ulist_next(tmp, &uiter))) {
1129                 qgroup = unode_aux_to_qgroup(unode);
1130                 qgroup->rfer += sign * num_bytes;
1131                 qgroup->rfer_cmpr += sign * num_bytes;
1132                 WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1133                 qgroup->excl += sign * num_bytes;
1134                 if (sign > 0)
1135                         qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
1136                 else
1137                         qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
1138                 qgroup->excl_cmpr += sign * num_bytes;
1139                 qgroup_dirty(fs_info, qgroup);
1140
1141                 /* Add any parents of the parents */
1142                 list_for_each_entry(glist, &qgroup->groups, next_group) {
1143                         ret = ulist_add(tmp, glist->group->qgroupid,
1144                                         qgroup_to_aux(glist->group), GFP_ATOMIC);
1145                         if (ret < 0)
1146                                 goto out;
1147                 }
1148         }
1149         ret = 0;
1150 out:
1151         return ret;
1152 }
1153
1154
1155 /*
1156  * Quick path for updating qgroup with only excl refs.
1157  *
1158  * In that case, just update all parent will be enough.
1159  * Or we needs to do a full rescan.
1160  * Caller should also hold fs_info->qgroup_lock.
1161  *
1162  * Return 0 for quick update, return >0 for need to full rescan
1163  * and mark INCONSISTENT flag.
1164  * Return < 0 for other error.
1165  */
1166 static int quick_update_accounting(struct btrfs_fs_info *fs_info,
1167                                    struct ulist *tmp, u64 src, u64 dst,
1168                                    int sign)
1169 {
1170         struct btrfs_qgroup *qgroup;
1171         int ret = 1;
1172         int err = 0;
1173
1174         qgroup = find_qgroup_rb(fs_info, src);
1175         if (!qgroup)
1176                 goto out;
1177         if (qgroup->excl == qgroup->rfer) {
1178                 ret = 0;
1179                 err = __qgroup_excl_accounting(fs_info, tmp, dst,
1180                                                qgroup, sign);
1181                 if (err < 0) {
1182                         ret = err;
1183                         goto out;
1184                 }
1185         }
1186 out:
1187         if (ret)
1188                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1189         return ret;
1190 }
1191
1192 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
1193                               struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1194 {
1195         struct btrfs_root *quota_root;
1196         struct btrfs_qgroup *parent;
1197         struct btrfs_qgroup *member;
1198         struct btrfs_qgroup_list *list;
1199         struct ulist *tmp;
1200         int ret = 0;
1201
1202         /* Check the level of src and dst first */
1203         if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
1204                 return -EINVAL;
1205
1206         tmp = ulist_alloc(GFP_KERNEL);
1207         if (!tmp)
1208                 return -ENOMEM;
1209
1210         mutex_lock(&fs_info->qgroup_ioctl_lock);
1211         quota_root = fs_info->quota_root;
1212         if (!quota_root) {
1213                 ret = -EINVAL;
1214                 goto out;
1215         }
1216         member = find_qgroup_rb(fs_info, src);
1217         parent = find_qgroup_rb(fs_info, dst);
1218         if (!member || !parent) {
1219                 ret = -EINVAL;
1220                 goto out;
1221         }
1222
1223         /* check if such qgroup relation exist firstly */
1224         list_for_each_entry(list, &member->groups, next_group) {
1225                 if (list->group == parent) {
1226                         ret = -EEXIST;
1227                         goto out;
1228                 }
1229         }
1230
1231         ret = add_qgroup_relation_item(trans, quota_root, src, dst);
1232         if (ret)
1233                 goto out;
1234
1235         ret = add_qgroup_relation_item(trans, quota_root, dst, src);
1236         if (ret) {
1237                 del_qgroup_relation_item(trans, quota_root, src, dst);
1238                 goto out;
1239         }
1240
1241         spin_lock(&fs_info->qgroup_lock);
1242         ret = add_relation_rb(fs_info, src, dst);
1243         if (ret < 0) {
1244                 spin_unlock(&fs_info->qgroup_lock);
1245                 goto out;
1246         }
1247         ret = quick_update_accounting(fs_info, tmp, src, dst, 1);
1248         spin_unlock(&fs_info->qgroup_lock);
1249 out:
1250         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1251         ulist_free(tmp);
1252         return ret;
1253 }
1254
1255 static int __del_qgroup_relation(struct btrfs_trans_handle *trans,
1256                               struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1257 {
1258         struct btrfs_root *quota_root;
1259         struct btrfs_qgroup *parent;
1260         struct btrfs_qgroup *member;
1261         struct btrfs_qgroup_list *list;
1262         struct ulist *tmp;
1263         int ret = 0;
1264         int err;
1265
1266         tmp = ulist_alloc(GFP_KERNEL);
1267         if (!tmp)
1268                 return -ENOMEM;
1269
1270         quota_root = fs_info->quota_root;
1271         if (!quota_root) {
1272                 ret = -EINVAL;
1273                 goto out;
1274         }
1275
1276         member = find_qgroup_rb(fs_info, src);
1277         parent = find_qgroup_rb(fs_info, dst);
1278         if (!member || !parent) {
1279                 ret = -EINVAL;
1280                 goto out;
1281         }
1282
1283         /* check if such qgroup relation exist firstly */
1284         list_for_each_entry(list, &member->groups, next_group) {
1285                 if (list->group == parent)
1286                         goto exist;
1287         }
1288         ret = -ENOENT;
1289         goto out;
1290 exist:
1291         ret = del_qgroup_relation_item(trans, quota_root, src, dst);
1292         err = del_qgroup_relation_item(trans, quota_root, dst, src);
1293         if (err && !ret)
1294                 ret = err;
1295
1296         spin_lock(&fs_info->qgroup_lock);
1297         del_relation_rb(fs_info, src, dst);
1298         ret = quick_update_accounting(fs_info, tmp, src, dst, -1);
1299         spin_unlock(&fs_info->qgroup_lock);
1300 out:
1301         ulist_free(tmp);
1302         return ret;
1303 }
1304
1305 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
1306                               struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1307 {
1308         int ret = 0;
1309
1310         mutex_lock(&fs_info->qgroup_ioctl_lock);
1311         ret = __del_qgroup_relation(trans, fs_info, src, dst);
1312         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1313
1314         return ret;
1315 }
1316
1317 int btrfs_create_qgroup(struct btrfs_trans_handle *trans,
1318                         struct btrfs_fs_info *fs_info, u64 qgroupid)
1319 {
1320         struct btrfs_root *quota_root;
1321         struct btrfs_qgroup *qgroup;
1322         int ret = 0;
1323
1324         mutex_lock(&fs_info->qgroup_ioctl_lock);
1325         quota_root = fs_info->quota_root;
1326         if (!quota_root) {
1327                 ret = -EINVAL;
1328                 goto out;
1329         }
1330         qgroup = find_qgroup_rb(fs_info, qgroupid);
1331         if (qgroup) {
1332                 ret = -EEXIST;
1333                 goto out;
1334         }
1335
1336         ret = add_qgroup_item(trans, quota_root, qgroupid);
1337         if (ret)
1338                 goto out;
1339
1340         spin_lock(&fs_info->qgroup_lock);
1341         qgroup = add_qgroup_rb(fs_info, qgroupid);
1342         spin_unlock(&fs_info->qgroup_lock);
1343
1344         if (IS_ERR(qgroup))
1345                 ret = PTR_ERR(qgroup);
1346 out:
1347         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1348         return ret;
1349 }
1350
1351 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
1352                         struct btrfs_fs_info *fs_info, u64 qgroupid)
1353 {
1354         struct btrfs_root *quota_root;
1355         struct btrfs_qgroup *qgroup;
1356         struct btrfs_qgroup_list *list;
1357         int ret = 0;
1358
1359         mutex_lock(&fs_info->qgroup_ioctl_lock);
1360         quota_root = fs_info->quota_root;
1361         if (!quota_root) {
1362                 ret = -EINVAL;
1363                 goto out;
1364         }
1365
1366         qgroup = find_qgroup_rb(fs_info, qgroupid);
1367         if (!qgroup) {
1368                 ret = -ENOENT;
1369                 goto out;
1370         } else {
1371                 /* check if there are no children of this qgroup */
1372                 if (!list_empty(&qgroup->members)) {
1373                         ret = -EBUSY;
1374                         goto out;
1375                 }
1376         }
1377         ret = del_qgroup_item(trans, quota_root, qgroupid);
1378         if (ret && ret != -ENOENT)
1379                 goto out;
1380
1381         while (!list_empty(&qgroup->groups)) {
1382                 list = list_first_entry(&qgroup->groups,
1383                                         struct btrfs_qgroup_list, next_group);
1384                 ret = __del_qgroup_relation(trans, fs_info,
1385                                            qgroupid,
1386                                            list->group->qgroupid);
1387                 if (ret)
1388                         goto out;
1389         }
1390
1391         spin_lock(&fs_info->qgroup_lock);
1392         del_qgroup_rb(fs_info, qgroupid);
1393         spin_unlock(&fs_info->qgroup_lock);
1394 out:
1395         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1396         return ret;
1397 }
1398
1399 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
1400                        struct btrfs_fs_info *fs_info, u64 qgroupid,
1401                        struct btrfs_qgroup_limit *limit)
1402 {
1403         struct btrfs_root *quota_root;
1404         struct btrfs_qgroup *qgroup;
1405         int ret = 0;
1406         /* Sometimes we would want to clear the limit on this qgroup.
1407          * To meet this requirement, we treat the -1 as a special value
1408          * which tell kernel to clear the limit on this qgroup.
1409          */
1410         const u64 CLEAR_VALUE = -1;
1411
1412         mutex_lock(&fs_info->qgroup_ioctl_lock);
1413         quota_root = fs_info->quota_root;
1414         if (!quota_root) {
1415                 ret = -EINVAL;
1416                 goto out;
1417         }
1418
1419         qgroup = find_qgroup_rb(fs_info, qgroupid);
1420         if (!qgroup) {
1421                 ret = -ENOENT;
1422                 goto out;
1423         }
1424
1425         spin_lock(&fs_info->qgroup_lock);
1426         if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) {
1427                 if (limit->max_rfer == CLEAR_VALUE) {
1428                         qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1429                         limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1430                         qgroup->max_rfer = 0;
1431                 } else {
1432                         qgroup->max_rfer = limit->max_rfer;
1433                 }
1434         }
1435         if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
1436                 if (limit->max_excl == CLEAR_VALUE) {
1437                         qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1438                         limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1439                         qgroup->max_excl = 0;
1440                 } else {
1441                         qgroup->max_excl = limit->max_excl;
1442                 }
1443         }
1444         if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) {
1445                 if (limit->rsv_rfer == CLEAR_VALUE) {
1446                         qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1447                         limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1448                         qgroup->rsv_rfer = 0;
1449                 } else {
1450                         qgroup->rsv_rfer = limit->rsv_rfer;
1451                 }
1452         }
1453         if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) {
1454                 if (limit->rsv_excl == CLEAR_VALUE) {
1455                         qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1456                         limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1457                         qgroup->rsv_excl = 0;
1458                 } else {
1459                         qgroup->rsv_excl = limit->rsv_excl;
1460                 }
1461         }
1462         qgroup->lim_flags |= limit->flags;
1463
1464         spin_unlock(&fs_info->qgroup_lock);
1465
1466         ret = update_qgroup_limit_item(trans, quota_root, qgroup);
1467         if (ret) {
1468                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1469                 btrfs_info(fs_info, "unable to update quota limit for %llu",
1470                        qgroupid);
1471         }
1472
1473 out:
1474         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1475         return ret;
1476 }
1477
1478 int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
1479                                 struct btrfs_delayed_ref_root *delayed_refs,
1480                                 struct btrfs_qgroup_extent_record *record)
1481 {
1482         struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node;
1483         struct rb_node *parent_node = NULL;
1484         struct btrfs_qgroup_extent_record *entry;
1485         u64 bytenr = record->bytenr;
1486
1487         assert_spin_locked(&delayed_refs->lock);
1488         trace_btrfs_qgroup_trace_extent(fs_info, record);
1489
1490         while (*p) {
1491                 parent_node = *p;
1492                 entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record,
1493                                  node);
1494                 if (bytenr < entry->bytenr)
1495                         p = &(*p)->rb_left;
1496                 else if (bytenr > entry->bytenr)
1497                         p = &(*p)->rb_right;
1498                 else
1499                         return 1;
1500         }
1501
1502         rb_link_node(&record->node, parent_node, p);
1503         rb_insert_color(&record->node, &delayed_refs->dirty_extent_root);
1504         return 0;
1505 }
1506
1507 int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info,
1508                                    struct btrfs_qgroup_extent_record *qrecord)
1509 {
1510         struct ulist *old_root;
1511         u64 bytenr = qrecord->bytenr;
1512         int ret;
1513
1514         ret = btrfs_find_all_roots(NULL, fs_info, bytenr, 0, &old_root, false);
1515         if (ret < 0) {
1516                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1517                 btrfs_warn(fs_info,
1518 "error accounting new delayed refs extent (err code: %d), quota inconsistent",
1519                         ret);
1520                 return 0;
1521         }
1522
1523         /*
1524          * Here we don't need to get the lock of
1525          * trans->transaction->delayed_refs, since inserted qrecord won't
1526          * be deleted, only qrecord->node may be modified (new qrecord insert)
1527          *
1528          * So modifying qrecord->old_roots is safe here
1529          */
1530         qrecord->old_roots = old_root;
1531         return 0;
1532 }
1533
1534 int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans,
1535                 struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes,
1536                 gfp_t gfp_flag)
1537 {
1538         struct btrfs_qgroup_extent_record *record;
1539         struct btrfs_delayed_ref_root *delayed_refs;
1540         int ret;
1541
1542         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)
1543             || bytenr == 0 || num_bytes == 0)
1544                 return 0;
1545         if (WARN_ON(trans == NULL))
1546                 return -EINVAL;
1547         record = kmalloc(sizeof(*record), gfp_flag);
1548         if (!record)
1549                 return -ENOMEM;
1550
1551         delayed_refs = &trans->transaction->delayed_refs;
1552         record->bytenr = bytenr;
1553         record->num_bytes = num_bytes;
1554         record->old_roots = NULL;
1555
1556         spin_lock(&delayed_refs->lock);
1557         ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record);
1558         spin_unlock(&delayed_refs->lock);
1559         if (ret > 0) {
1560                 kfree(record);
1561                 return 0;
1562         }
1563         return btrfs_qgroup_trace_extent_post(fs_info, record);
1564 }
1565
1566 int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
1567                                   struct btrfs_fs_info *fs_info,
1568                                   struct extent_buffer *eb)
1569 {
1570         int nr = btrfs_header_nritems(eb);
1571         int i, extent_type, ret;
1572         struct btrfs_key key;
1573         struct btrfs_file_extent_item *fi;
1574         u64 bytenr, num_bytes;
1575
1576         /* We can be called directly from walk_up_proc() */
1577         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1578                 return 0;
1579
1580         for (i = 0; i < nr; i++) {
1581                 btrfs_item_key_to_cpu(eb, &key, i);
1582
1583                 if (key.type != BTRFS_EXTENT_DATA_KEY)
1584                         continue;
1585
1586                 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
1587                 /* filter out non qgroup-accountable extents  */
1588                 extent_type = btrfs_file_extent_type(eb, fi);
1589
1590                 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
1591                         continue;
1592
1593                 bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
1594                 if (!bytenr)
1595                         continue;
1596
1597                 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
1598
1599                 ret = btrfs_qgroup_trace_extent(trans, fs_info, bytenr,
1600                                                 num_bytes, GFP_NOFS);
1601                 if (ret)
1602                         return ret;
1603         }
1604         cond_resched();
1605         return 0;
1606 }
1607
1608 /*
1609  * Walk up the tree from the bottom, freeing leaves and any interior
1610  * nodes which have had all slots visited. If a node (leaf or
1611  * interior) is freed, the node above it will have it's slot
1612  * incremented. The root node will never be freed.
1613  *
1614  * At the end of this function, we should have a path which has all
1615  * slots incremented to the next position for a search. If we need to
1616  * read a new node it will be NULL and the node above it will have the
1617  * correct slot selected for a later read.
1618  *
1619  * If we increment the root nodes slot counter past the number of
1620  * elements, 1 is returned to signal completion of the search.
1621  */
1622 static int adjust_slots_upwards(struct btrfs_path *path, int root_level)
1623 {
1624         int level = 0;
1625         int nr, slot;
1626         struct extent_buffer *eb;
1627
1628         if (root_level == 0)
1629                 return 1;
1630
1631         while (level <= root_level) {
1632                 eb = path->nodes[level];
1633                 nr = btrfs_header_nritems(eb);
1634                 path->slots[level]++;
1635                 slot = path->slots[level];
1636                 if (slot >= nr || level == 0) {
1637                         /*
1638                          * Don't free the root -  we will detect this
1639                          * condition after our loop and return a
1640                          * positive value for caller to stop walking the tree.
1641                          */
1642                         if (level != root_level) {
1643                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
1644                                 path->locks[level] = 0;
1645
1646                                 free_extent_buffer(eb);
1647                                 path->nodes[level] = NULL;
1648                                 path->slots[level] = 0;
1649                         }
1650                 } else {
1651                         /*
1652                          * We have a valid slot to walk back down
1653                          * from. Stop here so caller can process these
1654                          * new nodes.
1655                          */
1656                         break;
1657                 }
1658
1659                 level++;
1660         }
1661
1662         eb = path->nodes[root_level];
1663         if (path->slots[root_level] >= btrfs_header_nritems(eb))
1664                 return 1;
1665
1666         return 0;
1667 }
1668
1669 int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
1670                                struct btrfs_root *root,
1671                                struct extent_buffer *root_eb,
1672                                u64 root_gen, int root_level)
1673 {
1674         struct btrfs_fs_info *fs_info = root->fs_info;
1675         int ret = 0;
1676         int level;
1677         struct extent_buffer *eb = root_eb;
1678         struct btrfs_path *path = NULL;
1679
1680         BUG_ON(root_level < 0 || root_level >= BTRFS_MAX_LEVEL);
1681         BUG_ON(root_eb == NULL);
1682
1683         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1684                 return 0;
1685
1686         if (!extent_buffer_uptodate(root_eb)) {
1687                 ret = btrfs_read_buffer(root_eb, root_gen);
1688                 if (ret)
1689                         goto out;
1690         }
1691
1692         if (root_level == 0) {
1693                 ret = btrfs_qgroup_trace_leaf_items(trans, fs_info, root_eb);
1694                 goto out;
1695         }
1696
1697         path = btrfs_alloc_path();
1698         if (!path)
1699                 return -ENOMEM;
1700
1701         /*
1702          * Walk down the tree.  Missing extent blocks are filled in as
1703          * we go. Metadata is accounted every time we read a new
1704          * extent block.
1705          *
1706          * When we reach a leaf, we account for file extent items in it,
1707          * walk back up the tree (adjusting slot pointers as we go)
1708          * and restart the search process.
1709          */
1710         extent_buffer_get(root_eb); /* For path */
1711         path->nodes[root_level] = root_eb;
1712         path->slots[root_level] = 0;
1713         path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
1714 walk_down:
1715         level = root_level;
1716         while (level >= 0) {
1717                 if (path->nodes[level] == NULL) {
1718                         int parent_slot;
1719                         u64 child_gen;
1720                         u64 child_bytenr;
1721
1722                         /*
1723                          * We need to get child blockptr/gen from parent before
1724                          * we can read it.
1725                           */
1726                         eb = path->nodes[level + 1];
1727                         parent_slot = path->slots[level + 1];
1728                         child_bytenr = btrfs_node_blockptr(eb, parent_slot);
1729                         child_gen = btrfs_node_ptr_generation(eb, parent_slot);
1730
1731                         eb = read_tree_block(fs_info, child_bytenr, child_gen);
1732                         if (IS_ERR(eb)) {
1733                                 ret = PTR_ERR(eb);
1734                                 goto out;
1735                         } else if (!extent_buffer_uptodate(eb)) {
1736                                 free_extent_buffer(eb);
1737                                 ret = -EIO;
1738                                 goto out;
1739                         }
1740
1741                         path->nodes[level] = eb;
1742                         path->slots[level] = 0;
1743
1744                         btrfs_tree_read_lock(eb);
1745                         btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1746                         path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
1747
1748                         ret = btrfs_qgroup_trace_extent(trans, fs_info,
1749                                                         child_bytenr,
1750                                                         fs_info->nodesize,
1751                                                         GFP_NOFS);
1752                         if (ret)
1753                                 goto out;
1754                 }
1755
1756                 if (level == 0) {
1757                         ret = btrfs_qgroup_trace_leaf_items(trans,fs_info,
1758                                                            path->nodes[level]);
1759                         if (ret)
1760                                 goto out;
1761
1762                         /* Nonzero return here means we completed our search */
1763                         ret = adjust_slots_upwards(path, root_level);
1764                         if (ret)
1765                                 break;
1766
1767                         /* Restart search with new slots */
1768                         goto walk_down;
1769                 }
1770
1771                 level--;
1772         }
1773
1774         ret = 0;
1775 out:
1776         btrfs_free_path(path);
1777
1778         return ret;
1779 }
1780
1781 #define UPDATE_NEW      0
1782 #define UPDATE_OLD      1
1783 /*
1784  * Walk all of the roots that points to the bytenr and adjust their refcnts.
1785  */
1786 static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
1787                                 struct ulist *roots, struct ulist *tmp,
1788                                 struct ulist *qgroups, u64 seq, int update_old)
1789 {
1790         struct ulist_node *unode;
1791         struct ulist_iterator uiter;
1792         struct ulist_node *tmp_unode;
1793         struct ulist_iterator tmp_uiter;
1794         struct btrfs_qgroup *qg;
1795         int ret = 0;
1796
1797         if (!roots)
1798                 return 0;
1799         ULIST_ITER_INIT(&uiter);
1800         while ((unode = ulist_next(roots, &uiter))) {
1801                 qg = find_qgroup_rb(fs_info, unode->val);
1802                 if (!qg)
1803                         continue;
1804
1805                 ulist_reinit(tmp);
1806                 ret = ulist_add(qgroups, qg->qgroupid, qgroup_to_aux(qg),
1807                                 GFP_ATOMIC);
1808                 if (ret < 0)
1809                         return ret;
1810                 ret = ulist_add(tmp, qg->qgroupid, qgroup_to_aux(qg), GFP_ATOMIC);
1811                 if (ret < 0)
1812                         return ret;
1813                 ULIST_ITER_INIT(&tmp_uiter);
1814                 while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
1815                         struct btrfs_qgroup_list *glist;
1816
1817                         qg = unode_aux_to_qgroup(tmp_unode);
1818                         if (update_old)
1819                                 btrfs_qgroup_update_old_refcnt(qg, seq, 1);
1820                         else
1821                                 btrfs_qgroup_update_new_refcnt(qg, seq, 1);
1822                         list_for_each_entry(glist, &qg->groups, next_group) {
1823                                 ret = ulist_add(qgroups, glist->group->qgroupid,
1824                                                 qgroup_to_aux(glist->group),
1825                                                 GFP_ATOMIC);
1826                                 if (ret < 0)
1827                                         return ret;
1828                                 ret = ulist_add(tmp, glist->group->qgroupid,
1829                                                 qgroup_to_aux(glist->group),
1830                                                 GFP_ATOMIC);
1831                                 if (ret < 0)
1832                                         return ret;
1833                         }
1834                 }
1835         }
1836         return 0;
1837 }
1838
1839 /*
1840  * Update qgroup rfer/excl counters.
1841  * Rfer update is easy, codes can explain themselves.
1842  *
1843  * Excl update is tricky, the update is split into 2 part.
1844  * Part 1: Possible exclusive <-> sharing detect:
1845  *      |       A       |       !A      |
1846  *  -------------------------------------
1847  *  B   |       *       |       -       |
1848  *  -------------------------------------
1849  *  !B  |       +       |       **      |
1850  *  -------------------------------------
1851  *
1852  * Conditions:
1853  * A:   cur_old_roots < nr_old_roots    (not exclusive before)
1854  * !A:  cur_old_roots == nr_old_roots   (possible exclusive before)
1855  * B:   cur_new_roots < nr_new_roots    (not exclusive now)
1856  * !B:  cur_new_roots == nr_new_roots   (possible exclusive now)
1857  *
1858  * Results:
1859  * +: Possible sharing -> exclusive     -: Possible exclusive -> sharing
1860  * *: Definitely not changed.           **: Possible unchanged.
1861  *
1862  * For !A and !B condition, the exception is cur_old/new_roots == 0 case.
1863  *
1864  * To make the logic clear, we first use condition A and B to split
1865  * combination into 4 results.
1866  *
1867  * Then, for result "+" and "-", check old/new_roots == 0 case, as in them
1868  * only on variant maybe 0.
1869  *
1870  * Lastly, check result **, since there are 2 variants maybe 0, split them
1871  * again(2x2).
1872  * But this time we don't need to consider other things, the codes and logic
1873  * is easy to understand now.
1874  */
1875 static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
1876                                   struct ulist *qgroups,
1877                                   u64 nr_old_roots,
1878                                   u64 nr_new_roots,
1879                                   u64 num_bytes, u64 seq)
1880 {
1881         struct ulist_node *unode;
1882         struct ulist_iterator uiter;
1883         struct btrfs_qgroup *qg;
1884         u64 cur_new_count, cur_old_count;
1885
1886         ULIST_ITER_INIT(&uiter);
1887         while ((unode = ulist_next(qgroups, &uiter))) {
1888                 bool dirty = false;
1889
1890                 qg = unode_aux_to_qgroup(unode);
1891                 cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
1892                 cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
1893
1894                 trace_qgroup_update_counters(fs_info, qg->qgroupid,
1895                                              cur_old_count, cur_new_count);
1896
1897                 /* Rfer update part */
1898                 if (cur_old_count == 0 && cur_new_count > 0) {
1899                         qg->rfer += num_bytes;
1900                         qg->rfer_cmpr += num_bytes;
1901                         dirty = true;
1902                 }
1903                 if (cur_old_count > 0 && cur_new_count == 0) {
1904                         qg->rfer -= num_bytes;
1905                         qg->rfer_cmpr -= num_bytes;
1906                         dirty = true;
1907                 }
1908
1909                 /* Excl update part */
1910                 /* Exclusive/none -> shared case */
1911                 if (cur_old_count == nr_old_roots &&
1912                     cur_new_count < nr_new_roots) {
1913                         /* Exclusive -> shared */
1914                         if (cur_old_count != 0) {
1915                                 qg->excl -= num_bytes;
1916                                 qg->excl_cmpr -= num_bytes;
1917                                 dirty = true;
1918                         }
1919                 }
1920
1921                 /* Shared -> exclusive/none case */
1922                 if (cur_old_count < nr_old_roots &&
1923                     cur_new_count == nr_new_roots) {
1924                         /* Shared->exclusive */
1925                         if (cur_new_count != 0) {
1926                                 qg->excl += num_bytes;
1927                                 qg->excl_cmpr += num_bytes;
1928                                 dirty = true;
1929                         }
1930                 }
1931
1932                 /* Exclusive/none -> exclusive/none case */
1933                 if (cur_old_count == nr_old_roots &&
1934                     cur_new_count == nr_new_roots) {
1935                         if (cur_old_count == 0) {
1936                                 /* None -> exclusive/none */
1937
1938                                 if (cur_new_count != 0) {
1939                                         /* None -> exclusive */
1940                                         qg->excl += num_bytes;
1941                                         qg->excl_cmpr += num_bytes;
1942                                         dirty = true;
1943                                 }
1944                                 /* None -> none, nothing changed */
1945                         } else {
1946                                 /* Exclusive -> exclusive/none */
1947
1948                                 if (cur_new_count == 0) {
1949                                         /* Exclusive -> none */
1950                                         qg->excl -= num_bytes;
1951                                         qg->excl_cmpr -= num_bytes;
1952                                         dirty = true;
1953                                 }
1954                                 /* Exclusive -> exclusive, nothing changed */
1955                         }
1956                 }
1957
1958                 if (dirty)
1959                         qgroup_dirty(fs_info, qg);
1960         }
1961         return 0;
1962 }
1963
1964 /*
1965  * Check if the @roots potentially is a list of fs tree roots
1966  *
1967  * Return 0 for definitely not a fs/subvol tree roots ulist
1968  * Return 1 for possible fs/subvol tree roots in the list (considering an empty
1969  *          one as well)
1970  */
1971 static int maybe_fs_roots(struct ulist *roots)
1972 {
1973         struct ulist_node *unode;
1974         struct ulist_iterator uiter;
1975
1976         /* Empty one, still possible for fs roots */
1977         if (!roots || roots->nnodes == 0)
1978                 return 1;
1979
1980         ULIST_ITER_INIT(&uiter);
1981         unode = ulist_next(roots, &uiter);
1982         if (!unode)
1983                 return 1;
1984
1985         /*
1986          * If it contains fs tree roots, then it must belong to fs/subvol
1987          * trees.
1988          * If it contains a non-fs tree, it won't be shared with fs/subvol trees.
1989          */
1990         return is_fstree(unode->val);
1991 }
1992
1993 int
1994 btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
1995                             struct btrfs_fs_info *fs_info,
1996                             u64 bytenr, u64 num_bytes,
1997                             struct ulist *old_roots, struct ulist *new_roots)
1998 {
1999         struct ulist *qgroups = NULL;
2000         struct ulist *tmp = NULL;
2001         u64 seq;
2002         u64 nr_new_roots = 0;
2003         u64 nr_old_roots = 0;
2004         int ret = 0;
2005
2006         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2007                 return 0;
2008
2009         if (new_roots) {
2010                 if (!maybe_fs_roots(new_roots))
2011                         goto out_free;
2012                 nr_new_roots = new_roots->nnodes;
2013         }
2014         if (old_roots) {
2015                 if (!maybe_fs_roots(old_roots))
2016                         goto out_free;
2017                 nr_old_roots = old_roots->nnodes;
2018         }
2019
2020         /* Quick exit, either not fs tree roots, or won't affect any qgroup */
2021         if (nr_old_roots == 0 && nr_new_roots == 0)
2022                 goto out_free;
2023
2024         BUG_ON(!fs_info->quota_root);
2025
2026         trace_btrfs_qgroup_account_extent(fs_info, bytenr, num_bytes,
2027                                           nr_old_roots, nr_new_roots);
2028
2029         qgroups = ulist_alloc(GFP_NOFS);
2030         if (!qgroups) {
2031                 ret = -ENOMEM;
2032                 goto out_free;
2033         }
2034         tmp = ulist_alloc(GFP_NOFS);
2035         if (!tmp) {
2036                 ret = -ENOMEM;
2037                 goto out_free;
2038         }
2039
2040         mutex_lock(&fs_info->qgroup_rescan_lock);
2041         if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
2042                 if (fs_info->qgroup_rescan_progress.objectid <= bytenr) {
2043                         mutex_unlock(&fs_info->qgroup_rescan_lock);
2044                         ret = 0;
2045                         goto out_free;
2046                 }
2047         }
2048         mutex_unlock(&fs_info->qgroup_rescan_lock);
2049
2050         spin_lock(&fs_info->qgroup_lock);
2051         seq = fs_info->qgroup_seq;
2052
2053         /* Update old refcnts using old_roots */
2054         ret = qgroup_update_refcnt(fs_info, old_roots, tmp, qgroups, seq,
2055                                    UPDATE_OLD);
2056         if (ret < 0)
2057                 goto out;
2058
2059         /* Update new refcnts using new_roots */
2060         ret = qgroup_update_refcnt(fs_info, new_roots, tmp, qgroups, seq,
2061                                    UPDATE_NEW);
2062         if (ret < 0)
2063                 goto out;
2064
2065         qgroup_update_counters(fs_info, qgroups, nr_old_roots, nr_new_roots,
2066                                num_bytes, seq);
2067
2068         /*
2069          * Bump qgroup_seq to avoid seq overlap
2070          */
2071         fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1;
2072 out:
2073         spin_unlock(&fs_info->qgroup_lock);
2074 out_free:
2075         ulist_free(tmp);
2076         ulist_free(qgroups);
2077         ulist_free(old_roots);
2078         ulist_free(new_roots);
2079         return ret;
2080 }
2081
2082 int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
2083 {
2084         struct btrfs_fs_info *fs_info = trans->fs_info;
2085         struct btrfs_qgroup_extent_record *record;
2086         struct btrfs_delayed_ref_root *delayed_refs;
2087         struct ulist *new_roots = NULL;
2088         struct rb_node *node;
2089         u64 qgroup_to_skip;
2090         int ret = 0;
2091
2092         delayed_refs = &trans->transaction->delayed_refs;
2093         qgroup_to_skip = delayed_refs->qgroup_to_skip;
2094         while ((node = rb_first(&delayed_refs->dirty_extent_root))) {
2095                 record = rb_entry(node, struct btrfs_qgroup_extent_record,
2096                                   node);
2097
2098                 trace_btrfs_qgroup_account_extents(fs_info, record);
2099
2100                 if (!ret) {
2101                         /*
2102                          * Old roots should be searched when inserting qgroup
2103                          * extent record
2104                          */
2105                         if (WARN_ON(!record->old_roots)) {
2106                                 /* Search commit root to find old_roots */
2107                                 ret = btrfs_find_all_roots(NULL, fs_info,
2108                                                 record->bytenr, 0,
2109                                                 &record->old_roots, false);
2110                                 if (ret < 0)
2111                                         goto cleanup;
2112                         }
2113
2114                         /*
2115                          * Use SEQ_LAST as time_seq to do special search, which
2116                          * doesn't lock tree or delayed_refs and search current
2117                          * root. It's safe inside commit_transaction().
2118                          */
2119                         ret = btrfs_find_all_roots(trans, fs_info,
2120                                 record->bytenr, SEQ_LAST, &new_roots, false);
2121                         if (ret < 0)
2122                                 goto cleanup;
2123                         if (qgroup_to_skip) {
2124                                 ulist_del(new_roots, qgroup_to_skip, 0);
2125                                 ulist_del(record->old_roots, qgroup_to_skip,
2126                                           0);
2127                         }
2128                         ret = btrfs_qgroup_account_extent(trans, fs_info,
2129                                         record->bytenr, record->num_bytes,
2130                                         record->old_roots, new_roots);
2131                         record->old_roots = NULL;
2132                         new_roots = NULL;
2133                 }
2134 cleanup:
2135                 ulist_free(record->old_roots);
2136                 ulist_free(new_roots);
2137                 new_roots = NULL;
2138                 rb_erase(node, &delayed_refs->dirty_extent_root);
2139                 kfree(record);
2140
2141         }
2142         return ret;
2143 }
2144
2145 /*
2146  * called from commit_transaction. Writes all changed qgroups to disk.
2147  */
2148 int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
2149                       struct btrfs_fs_info *fs_info)
2150 {
2151         struct btrfs_root *quota_root = fs_info->quota_root;
2152         int ret = 0;
2153
2154         if (!quota_root)
2155                 return ret;
2156
2157         spin_lock(&fs_info->qgroup_lock);
2158         while (!list_empty(&fs_info->dirty_qgroups)) {
2159                 struct btrfs_qgroup *qgroup;
2160                 qgroup = list_first_entry(&fs_info->dirty_qgroups,
2161                                           struct btrfs_qgroup, dirty);
2162                 list_del_init(&qgroup->dirty);
2163                 spin_unlock(&fs_info->qgroup_lock);
2164                 ret = update_qgroup_info_item(trans, quota_root, qgroup);
2165                 if (ret)
2166                         fs_info->qgroup_flags |=
2167                                         BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2168                 ret = update_qgroup_limit_item(trans, quota_root, qgroup);
2169                 if (ret)
2170                         fs_info->qgroup_flags |=
2171                                         BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2172                 spin_lock(&fs_info->qgroup_lock);
2173         }
2174         if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2175                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
2176         else
2177                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
2178         spin_unlock(&fs_info->qgroup_lock);
2179
2180         ret = update_qgroup_status_item(trans, fs_info, quota_root);
2181         if (ret)
2182                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2183
2184         return ret;
2185 }
2186
2187 /*
2188  * Copy the accounting information between qgroups. This is necessary
2189  * when a snapshot or a subvolume is created. Throwing an error will
2190  * cause a transaction abort so we take extra care here to only error
2191  * when a readonly fs is a reasonable outcome.
2192  */
2193 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
2194                          struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
2195                          struct btrfs_qgroup_inherit *inherit)
2196 {
2197         int ret = 0;
2198         int i;
2199         u64 *i_qgroups;
2200         struct btrfs_root *quota_root = fs_info->quota_root;
2201         struct btrfs_qgroup *srcgroup;
2202         struct btrfs_qgroup *dstgroup;
2203         u32 level_size = 0;
2204         u64 nums;
2205
2206         mutex_lock(&fs_info->qgroup_ioctl_lock);
2207         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2208                 goto out;
2209
2210         if (!quota_root) {
2211                 ret = -EINVAL;
2212                 goto out;
2213         }
2214
2215         if (inherit) {
2216                 i_qgroups = (u64 *)(inherit + 1);
2217                 nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
2218                        2 * inherit->num_excl_copies;
2219                 for (i = 0; i < nums; ++i) {
2220                         srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
2221
2222                         /*
2223                          * Zero out invalid groups so we can ignore
2224                          * them later.
2225                          */
2226                         if (!srcgroup ||
2227                             ((srcgroup->qgroupid >> 48) <= (objectid >> 48)))
2228                                 *i_qgroups = 0ULL;
2229
2230                         ++i_qgroups;
2231                 }
2232         }
2233
2234         /*
2235          * create a tracking group for the subvol itself
2236          */
2237         ret = add_qgroup_item(trans, quota_root, objectid);
2238         if (ret)
2239                 goto out;
2240
2241         if (srcid) {
2242                 struct btrfs_root *srcroot;
2243                 struct btrfs_key srckey;
2244
2245                 srckey.objectid = srcid;
2246                 srckey.type = BTRFS_ROOT_ITEM_KEY;
2247                 srckey.offset = (u64)-1;
2248                 srcroot = btrfs_read_fs_root_no_name(fs_info, &srckey);
2249                 if (IS_ERR(srcroot)) {
2250                         ret = PTR_ERR(srcroot);
2251                         goto out;
2252                 }
2253
2254                 level_size = fs_info->nodesize;
2255         }
2256
2257         /*
2258          * add qgroup to all inherited groups
2259          */
2260         if (inherit) {
2261                 i_qgroups = (u64 *)(inherit + 1);
2262                 for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) {
2263                         if (*i_qgroups == 0)
2264                                 continue;
2265                         ret = add_qgroup_relation_item(trans, quota_root,
2266                                                        objectid, *i_qgroups);
2267                         if (ret && ret != -EEXIST)
2268                                 goto out;
2269                         ret = add_qgroup_relation_item(trans, quota_root,
2270                                                        *i_qgroups, objectid);
2271                         if (ret && ret != -EEXIST)
2272                                 goto out;
2273                 }
2274                 ret = 0;
2275         }
2276
2277
2278         spin_lock(&fs_info->qgroup_lock);
2279
2280         dstgroup = add_qgroup_rb(fs_info, objectid);
2281         if (IS_ERR(dstgroup)) {
2282                 ret = PTR_ERR(dstgroup);
2283                 goto unlock;
2284         }
2285
2286         if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
2287                 dstgroup->lim_flags = inherit->lim.flags;
2288                 dstgroup->max_rfer = inherit->lim.max_rfer;
2289                 dstgroup->max_excl = inherit->lim.max_excl;
2290                 dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
2291                 dstgroup->rsv_excl = inherit->lim.rsv_excl;
2292
2293                 ret = update_qgroup_limit_item(trans, quota_root, dstgroup);
2294                 if (ret) {
2295                         fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2296                         btrfs_info(fs_info,
2297                                    "unable to update quota limit for %llu",
2298                                    dstgroup->qgroupid);
2299                         goto unlock;
2300                 }
2301         }
2302
2303         if (srcid) {
2304                 srcgroup = find_qgroup_rb(fs_info, srcid);
2305                 if (!srcgroup)
2306                         goto unlock;
2307
2308                 /*
2309                  * We call inherit after we clone the root in order to make sure
2310                  * our counts don't go crazy, so at this point the only
2311                  * difference between the two roots should be the root node.
2312                  */
2313                 dstgroup->rfer = srcgroup->rfer;
2314                 dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
2315                 dstgroup->excl = level_size;
2316                 dstgroup->excl_cmpr = level_size;
2317                 srcgroup->excl = level_size;
2318                 srcgroup->excl_cmpr = level_size;
2319
2320                 /* inherit the limit info */
2321                 dstgroup->lim_flags = srcgroup->lim_flags;
2322                 dstgroup->max_rfer = srcgroup->max_rfer;
2323                 dstgroup->max_excl = srcgroup->max_excl;
2324                 dstgroup->rsv_rfer = srcgroup->rsv_rfer;
2325                 dstgroup->rsv_excl = srcgroup->rsv_excl;
2326
2327                 qgroup_dirty(fs_info, dstgroup);
2328                 qgroup_dirty(fs_info, srcgroup);
2329         }
2330
2331         if (!inherit)
2332                 goto unlock;
2333
2334         i_qgroups = (u64 *)(inherit + 1);
2335         for (i = 0; i < inherit->num_qgroups; ++i) {
2336                 if (*i_qgroups) {
2337                         ret = add_relation_rb(fs_info, objectid, *i_qgroups);
2338                         if (ret)
2339                                 goto unlock;
2340                 }
2341                 ++i_qgroups;
2342         }
2343
2344         for (i = 0; i <  inherit->num_ref_copies; ++i, i_qgroups += 2) {
2345                 struct btrfs_qgroup *src;
2346                 struct btrfs_qgroup *dst;
2347
2348                 if (!i_qgroups[0] || !i_qgroups[1])
2349                         continue;
2350
2351                 src = find_qgroup_rb(fs_info, i_qgroups[0]);
2352                 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2353
2354                 if (!src || !dst) {
2355                         ret = -EINVAL;
2356                         goto unlock;
2357                 }
2358
2359                 dst->rfer = src->rfer - level_size;
2360                 dst->rfer_cmpr = src->rfer_cmpr - level_size;
2361         }
2362         for (i = 0; i <  inherit->num_excl_copies; ++i, i_qgroups += 2) {
2363                 struct btrfs_qgroup *src;
2364                 struct btrfs_qgroup *dst;
2365
2366                 if (!i_qgroups[0] || !i_qgroups[1])
2367                         continue;
2368
2369                 src = find_qgroup_rb(fs_info, i_qgroups[0]);
2370                 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2371
2372                 if (!src || !dst) {
2373                         ret = -EINVAL;
2374                         goto unlock;
2375                 }
2376
2377                 dst->excl = src->excl + level_size;
2378                 dst->excl_cmpr = src->excl_cmpr + level_size;
2379         }
2380
2381 unlock:
2382         spin_unlock(&fs_info->qgroup_lock);
2383 out:
2384         mutex_unlock(&fs_info->qgroup_ioctl_lock);
2385         return ret;
2386 }
2387
2388 static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes)
2389 {
2390         if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
2391             qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer)
2392                 return false;
2393
2394         if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
2395             qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl)
2396                 return false;
2397
2398         return true;
2399 }
2400
2401 static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
2402                           enum btrfs_qgroup_rsv_type type)
2403 {
2404         struct btrfs_root *quota_root;
2405         struct btrfs_qgroup *qgroup;
2406         struct btrfs_fs_info *fs_info = root->fs_info;
2407         u64 ref_root = root->root_key.objectid;
2408         int ret = 0;
2409         struct ulist_node *unode;
2410         struct ulist_iterator uiter;
2411
2412         if (!is_fstree(ref_root))
2413                 return 0;
2414
2415         if (num_bytes == 0)
2416                 return 0;
2417
2418         if (test_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags) &&
2419             capable(CAP_SYS_RESOURCE))
2420                 enforce = false;
2421
2422         spin_lock(&fs_info->qgroup_lock);
2423         quota_root = fs_info->quota_root;
2424         if (!quota_root)
2425                 goto out;
2426
2427         qgroup = find_qgroup_rb(fs_info, ref_root);
2428         if (!qgroup)
2429                 goto out;
2430
2431         /*
2432          * in a first step, we check all affected qgroups if any limits would
2433          * be exceeded
2434          */
2435         ulist_reinit(fs_info->qgroup_ulist);
2436         ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
2437                         qgroup_to_aux(qgroup), GFP_ATOMIC);
2438         if (ret < 0)
2439                 goto out;
2440         ULIST_ITER_INIT(&uiter);
2441         while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2442                 struct btrfs_qgroup *qg;
2443                 struct btrfs_qgroup_list *glist;
2444
2445                 qg = unode_aux_to_qgroup(unode);
2446
2447                 if (enforce && !qgroup_check_limits(qg, num_bytes)) {
2448                         ret = -EDQUOT;
2449                         goto out;
2450                 }
2451
2452                 list_for_each_entry(glist, &qg->groups, next_group) {
2453                         ret = ulist_add(fs_info->qgroup_ulist,
2454                                         glist->group->qgroupid,
2455                                         qgroup_to_aux(glist->group), GFP_ATOMIC);
2456                         if (ret < 0)
2457                                 goto out;
2458                 }
2459         }
2460         ret = 0;
2461         /*
2462          * no limits exceeded, now record the reservation into all qgroups
2463          */
2464         ULIST_ITER_INIT(&uiter);
2465         while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2466                 struct btrfs_qgroup *qg;
2467
2468                 qg = unode_aux_to_qgroup(unode);
2469
2470                 trace_qgroup_update_reserve(fs_info, qg, num_bytes, type);
2471                 qgroup_rsv_add(fs_info, qg, num_bytes, type);
2472         }
2473
2474 out:
2475         spin_unlock(&fs_info->qgroup_lock);
2476         return ret;
2477 }
2478
2479 /*
2480  * Free @num_bytes of reserved space with @type for qgroup.  (Normally level 0
2481  * qgroup).
2482  *
2483  * Will handle all higher level qgroup too.
2484  *
2485  * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup.
2486  * This special case is only used for META_PERTRANS type.
2487  */
2488 void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
2489                                u64 ref_root, u64 num_bytes,
2490                                enum btrfs_qgroup_rsv_type type)
2491 {
2492         struct btrfs_root *quota_root;
2493         struct btrfs_qgroup *qgroup;
2494         struct ulist_node *unode;
2495         struct ulist_iterator uiter;
2496         int ret = 0;
2497
2498         if (!is_fstree(ref_root))
2499                 return;
2500
2501         if (num_bytes == 0)
2502                 return;
2503
2504         if (num_bytes == (u64)-1 && type != BTRFS_QGROUP_RSV_META_PERTRANS) {
2505                 WARN(1, "%s: Invalid type to free", __func__);
2506                 return;
2507         }
2508         spin_lock(&fs_info->qgroup_lock);
2509
2510         quota_root = fs_info->quota_root;
2511         if (!quota_root)
2512                 goto out;
2513
2514         qgroup = find_qgroup_rb(fs_info, ref_root);
2515         if (!qgroup)
2516                 goto out;
2517
2518         if (num_bytes == (u64)-1)
2519                 /*
2520                  * We're freeing all pertrans rsv, get reserved value from
2521                  * level 0 qgroup as real num_bytes to free.
2522                  */
2523                 num_bytes = qgroup->rsv.values[type];
2524
2525         ulist_reinit(fs_info->qgroup_ulist);
2526         ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
2527                         qgroup_to_aux(qgroup), GFP_ATOMIC);
2528         if (ret < 0)
2529                 goto out;
2530         ULIST_ITER_INIT(&uiter);
2531         while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2532                 struct btrfs_qgroup *qg;
2533                 struct btrfs_qgroup_list *glist;
2534
2535                 qg = unode_aux_to_qgroup(unode);
2536
2537                 trace_qgroup_update_reserve(fs_info, qg, -(s64)num_bytes, type);
2538                 qgroup_rsv_release(fs_info, qg, num_bytes, type);
2539
2540                 list_for_each_entry(glist, &qg->groups, next_group) {
2541                         ret = ulist_add(fs_info->qgroup_ulist,
2542                                         glist->group->qgroupid,
2543                                         qgroup_to_aux(glist->group), GFP_ATOMIC);
2544                         if (ret < 0)
2545                                 goto out;
2546                 }
2547         }
2548
2549 out:
2550         spin_unlock(&fs_info->qgroup_lock);
2551 }
2552
2553 /*
2554  * returns < 0 on error, 0 when more leafs are to be scanned.
2555  * returns 1 when done.
2556  */
2557 static int
2558 qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
2559                    struct btrfs_trans_handle *trans)
2560 {
2561         struct btrfs_key found;
2562         struct extent_buffer *scratch_leaf = NULL;
2563         struct ulist *roots = NULL;
2564         struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
2565         u64 num_bytes;
2566         int slot;
2567         int ret;
2568
2569         mutex_lock(&fs_info->qgroup_rescan_lock);
2570         ret = btrfs_search_slot_for_read(fs_info->extent_root,
2571                                          &fs_info->qgroup_rescan_progress,
2572                                          path, 1, 0);
2573
2574         btrfs_debug(fs_info,
2575                 "current progress key (%llu %u %llu), search_slot ret %d",
2576                 fs_info->qgroup_rescan_progress.objectid,
2577                 fs_info->qgroup_rescan_progress.type,
2578                 fs_info->qgroup_rescan_progress.offset, ret);
2579
2580         if (ret) {
2581                 /*
2582                  * The rescan is about to end, we will not be scanning any
2583                  * further blocks. We cannot unset the RESCAN flag here, because
2584                  * we want to commit the transaction if everything went well.
2585                  * To make the live accounting work in this phase, we set our
2586                  * scan progress pointer such that every real extent objectid
2587                  * will be smaller.
2588                  */
2589                 fs_info->qgroup_rescan_progress.objectid = (u64)-1;
2590                 btrfs_release_path(path);
2591                 mutex_unlock(&fs_info->qgroup_rescan_lock);
2592                 return ret;
2593         }
2594
2595         btrfs_item_key_to_cpu(path->nodes[0], &found,
2596                               btrfs_header_nritems(path->nodes[0]) - 1);
2597         fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
2598
2599         btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2600         scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]);
2601         if (!scratch_leaf) {
2602                 ret = -ENOMEM;
2603                 mutex_unlock(&fs_info->qgroup_rescan_lock);
2604                 goto out;
2605         }
2606         extent_buffer_get(scratch_leaf);
2607         btrfs_tree_read_lock(scratch_leaf);
2608         btrfs_set_lock_blocking_rw(scratch_leaf, BTRFS_READ_LOCK);
2609         slot = path->slots[0];
2610         btrfs_release_path(path);
2611         mutex_unlock(&fs_info->qgroup_rescan_lock);
2612
2613         for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
2614                 btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
2615                 if (found.type != BTRFS_EXTENT_ITEM_KEY &&
2616                     found.type != BTRFS_METADATA_ITEM_KEY)
2617                         continue;
2618                 if (found.type == BTRFS_METADATA_ITEM_KEY)
2619                         num_bytes = fs_info->nodesize;
2620                 else
2621                         num_bytes = found.offset;
2622
2623                 ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0,
2624                                            &roots, false);
2625                 if (ret < 0)
2626                         goto out;
2627                 /* For rescan, just pass old_roots as NULL */
2628                 ret = btrfs_qgroup_account_extent(trans, fs_info,
2629                                 found.objectid, num_bytes, NULL, roots);
2630                 if (ret < 0)
2631                         goto out;
2632         }
2633 out:
2634         if (scratch_leaf) {
2635                 btrfs_tree_read_unlock_blocking(scratch_leaf);
2636                 free_extent_buffer(scratch_leaf);
2637         }
2638         btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2639
2640         return ret;
2641 }
2642
2643 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
2644 {
2645         struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
2646                                                      qgroup_rescan_work);
2647         struct btrfs_path *path;
2648         struct btrfs_trans_handle *trans = NULL;
2649         int err = -ENOMEM;
2650         int ret = 0;
2651
2652         path = btrfs_alloc_path();
2653         if (!path)
2654                 goto out;
2655
2656         err = 0;
2657         while (!err && !btrfs_fs_closing(fs_info)) {
2658                 trans = btrfs_start_transaction(fs_info->fs_root, 0);
2659                 if (IS_ERR(trans)) {
2660                         err = PTR_ERR(trans);
2661                         break;
2662                 }
2663                 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
2664                         err = -EINTR;
2665                 } else {
2666                         err = qgroup_rescan_leaf(fs_info, path, trans);
2667                 }
2668                 if (err > 0)
2669                         btrfs_commit_transaction(trans);
2670                 else
2671                         btrfs_end_transaction(trans);
2672         }
2673
2674 out:
2675         btrfs_free_path(path);
2676
2677         mutex_lock(&fs_info->qgroup_rescan_lock);
2678         if (!btrfs_fs_closing(fs_info))
2679                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2680
2681         if (err > 0 &&
2682             fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
2683                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2684         } else if (err < 0) {
2685                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2686         }
2687         mutex_unlock(&fs_info->qgroup_rescan_lock);
2688
2689         /*
2690          * only update status, since the previous part has already updated the
2691          * qgroup info.
2692          */
2693         trans = btrfs_start_transaction(fs_info->quota_root, 1);
2694         if (IS_ERR(trans)) {
2695                 err = PTR_ERR(trans);
2696                 btrfs_err(fs_info,
2697                           "fail to start transaction for status update: %d",
2698                           err);
2699                 goto done;
2700         }
2701         ret = update_qgroup_status_item(trans, fs_info, fs_info->quota_root);
2702         if (ret < 0) {
2703                 err = ret;
2704                 btrfs_err(fs_info, "fail to update qgroup status: %d", err);
2705         }
2706         btrfs_end_transaction(trans);
2707
2708         if (btrfs_fs_closing(fs_info)) {
2709                 btrfs_info(fs_info, "qgroup scan paused");
2710         } else if (err >= 0) {
2711                 btrfs_info(fs_info, "qgroup scan completed%s",
2712                         err > 0 ? " (inconsistency flag cleared)" : "");
2713         } else {
2714                 btrfs_err(fs_info, "qgroup scan failed with %d", err);
2715         }
2716
2717 done:
2718         mutex_lock(&fs_info->qgroup_rescan_lock);
2719         fs_info->qgroup_rescan_running = false;
2720         mutex_unlock(&fs_info->qgroup_rescan_lock);
2721         complete_all(&fs_info->qgroup_rescan_completion);
2722 }
2723
2724 /*
2725  * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
2726  * memory required for the rescan context.
2727  */
2728 static int
2729 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
2730                    int init_flags)
2731 {
2732         int ret = 0;
2733
2734         if (!init_flags &&
2735             (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) ||
2736              !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))) {
2737                 ret = -EINVAL;
2738                 goto err;
2739         }
2740
2741         mutex_lock(&fs_info->qgroup_rescan_lock);
2742         spin_lock(&fs_info->qgroup_lock);
2743
2744         if (init_flags) {
2745                 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
2746                         ret = -EINPROGRESS;
2747                 else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
2748                         ret = -EINVAL;
2749
2750                 if (ret) {
2751                         spin_unlock(&fs_info->qgroup_lock);
2752                         mutex_unlock(&fs_info->qgroup_rescan_lock);
2753                         goto err;
2754                 }
2755                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2756         }
2757
2758         memset(&fs_info->qgroup_rescan_progress, 0,
2759                 sizeof(fs_info->qgroup_rescan_progress));
2760         fs_info->qgroup_rescan_progress.objectid = progress_objectid;
2761         init_completion(&fs_info->qgroup_rescan_completion);
2762         fs_info->qgroup_rescan_running = true;
2763
2764         spin_unlock(&fs_info->qgroup_lock);
2765         mutex_unlock(&fs_info->qgroup_rescan_lock);
2766
2767         memset(&fs_info->qgroup_rescan_work, 0,
2768                sizeof(fs_info->qgroup_rescan_work));
2769         btrfs_init_work(&fs_info->qgroup_rescan_work,
2770                         btrfs_qgroup_rescan_helper,
2771                         btrfs_qgroup_rescan_worker, NULL, NULL);
2772
2773         if (ret) {
2774 err:
2775                 btrfs_info(fs_info, "qgroup_rescan_init failed with %d", ret);
2776                 return ret;
2777         }
2778
2779         return 0;
2780 }
2781
2782 static void
2783 qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
2784 {
2785         struct rb_node *n;
2786         struct btrfs_qgroup *qgroup;
2787
2788         spin_lock(&fs_info->qgroup_lock);
2789         /* clear all current qgroup tracking information */
2790         for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
2791                 qgroup = rb_entry(n, struct btrfs_qgroup, node);
2792                 qgroup->rfer = 0;
2793                 qgroup->rfer_cmpr = 0;
2794                 qgroup->excl = 0;
2795                 qgroup->excl_cmpr = 0;
2796         }
2797         spin_unlock(&fs_info->qgroup_lock);
2798 }
2799
2800 int
2801 btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
2802 {
2803         int ret = 0;
2804         struct btrfs_trans_handle *trans;
2805
2806         ret = qgroup_rescan_init(fs_info, 0, 1);
2807         if (ret)
2808                 return ret;
2809
2810         /*
2811          * We have set the rescan_progress to 0, which means no more
2812          * delayed refs will be accounted by btrfs_qgroup_account_ref.
2813          * However, btrfs_qgroup_account_ref may be right after its call
2814          * to btrfs_find_all_roots, in which case it would still do the
2815          * accounting.
2816          * To solve this, we're committing the transaction, which will
2817          * ensure we run all delayed refs and only after that, we are
2818          * going to clear all tracking information for a clean start.
2819          */
2820
2821         trans = btrfs_join_transaction(fs_info->fs_root);
2822         if (IS_ERR(trans)) {
2823                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2824                 return PTR_ERR(trans);
2825         }
2826         ret = btrfs_commit_transaction(trans);
2827         if (ret) {
2828                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2829                 return ret;
2830         }
2831
2832         qgroup_rescan_zero_tracking(fs_info);
2833
2834         btrfs_queue_work(fs_info->qgroup_rescan_workers,
2835                          &fs_info->qgroup_rescan_work);
2836
2837         return 0;
2838 }
2839
2840 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
2841                                      bool interruptible)
2842 {
2843         int running;
2844         int ret = 0;
2845
2846         mutex_lock(&fs_info->qgroup_rescan_lock);
2847         spin_lock(&fs_info->qgroup_lock);
2848         running = fs_info->qgroup_rescan_running;
2849         spin_unlock(&fs_info->qgroup_lock);
2850         mutex_unlock(&fs_info->qgroup_rescan_lock);
2851
2852         if (!running)
2853                 return 0;
2854
2855         if (interruptible)
2856                 ret = wait_for_completion_interruptible(
2857                                         &fs_info->qgroup_rescan_completion);
2858         else
2859                 wait_for_completion(&fs_info->qgroup_rescan_completion);
2860
2861         return ret;
2862 }
2863
2864 /*
2865  * this is only called from open_ctree where we're still single threaded, thus
2866  * locking is omitted here.
2867  */
2868 void
2869 btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
2870 {
2871         if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
2872                 btrfs_queue_work(fs_info->qgroup_rescan_workers,
2873                                  &fs_info->qgroup_rescan_work);
2874 }
2875
2876 /*
2877  * Reserve qgroup space for range [start, start + len).
2878  *
2879  * This function will either reserve space from related qgroups or doing
2880  * nothing if the range is already reserved.
2881  *
2882  * Return 0 for successful reserve
2883  * Return <0 for error (including -EQUOT)
2884  *
2885  * NOTE: this function may sleep for memory allocation.
2886  *       if btrfs_qgroup_reserve_data() is called multiple times with
2887  *       same @reserved, caller must ensure when error happens it's OK
2888  *       to free *ALL* reserved space.
2889  */
2890 int btrfs_qgroup_reserve_data(struct inode *inode,
2891                         struct extent_changeset **reserved_ret, u64 start,
2892                         u64 len)
2893 {
2894         struct btrfs_root *root = BTRFS_I(inode)->root;
2895         struct ulist_node *unode;
2896         struct ulist_iterator uiter;
2897         struct extent_changeset *reserved;
2898         u64 orig_reserved;
2899         u64 to_reserve;
2900         int ret;
2901
2902         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
2903             !is_fstree(root->objectid) || len == 0)
2904                 return 0;
2905
2906         /* @reserved parameter is mandatory for qgroup */
2907         if (WARN_ON(!reserved_ret))
2908                 return -EINVAL;
2909         if (!*reserved_ret) {
2910                 *reserved_ret = extent_changeset_alloc();
2911                 if (!*reserved_ret)
2912                         return -ENOMEM;
2913         }
2914         reserved = *reserved_ret;
2915         /* Record already reserved space */
2916         orig_reserved = reserved->bytes_changed;
2917         ret = set_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
2918                         start + len -1, EXTENT_QGROUP_RESERVED, reserved);
2919
2920         /* Newly reserved space */
2921         to_reserve = reserved->bytes_changed - orig_reserved;
2922         trace_btrfs_qgroup_reserve_data(inode, start, len,
2923                                         to_reserve, QGROUP_RESERVE);
2924         if (ret < 0)
2925                 goto cleanup;
2926         ret = qgroup_reserve(root, to_reserve, true, BTRFS_QGROUP_RSV_DATA);
2927         if (ret < 0)
2928                 goto cleanup;
2929
2930         return ret;
2931
2932 cleanup:
2933         /* cleanup *ALL* already reserved ranges */
2934         ULIST_ITER_INIT(&uiter);
2935         while ((unode = ulist_next(&reserved->range_changed, &uiter)))
2936                 clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val,
2937                                  unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL);
2938         extent_changeset_release(reserved);
2939         return ret;
2940 }
2941
2942 /* Free ranges specified by @reserved, normally in error path */
2943 static int qgroup_free_reserved_data(struct inode *inode,
2944                         struct extent_changeset *reserved, u64 start, u64 len)
2945 {
2946         struct btrfs_root *root = BTRFS_I(inode)->root;
2947         struct ulist_node *unode;
2948         struct ulist_iterator uiter;
2949         struct extent_changeset changeset;
2950         int freed = 0;
2951         int ret;
2952
2953         extent_changeset_init(&changeset);
2954         len = round_up(start + len, root->fs_info->sectorsize);
2955         start = round_down(start, root->fs_info->sectorsize);
2956
2957         ULIST_ITER_INIT(&uiter);
2958         while ((unode = ulist_next(&reserved->range_changed, &uiter))) {
2959                 u64 range_start = unode->val;
2960                 /* unode->aux is the inclusive end */
2961                 u64 range_len = unode->aux - range_start + 1;
2962                 u64 free_start;
2963                 u64 free_len;
2964
2965                 extent_changeset_release(&changeset);
2966
2967                 /* Only free range in range [start, start + len) */
2968                 if (range_start >= start + len ||
2969                     range_start + range_len <= start)
2970                         continue;
2971                 free_start = max(range_start, start);
2972                 free_len = min(start + len, range_start + range_len) -
2973                            free_start;
2974                 /*
2975                  * TODO: To also modify reserved->ranges_reserved to reflect
2976                  * the modification.
2977                  *
2978                  * However as long as we free qgroup reserved according to
2979                  * EXTENT_QGROUP_RESERVED, we won't double free.
2980                  * So not need to rush.
2981                  */
2982                 ret = clear_record_extent_bits(&BTRFS_I(inode)->io_failure_tree,
2983                                 free_start, free_start + free_len - 1,
2984                                 EXTENT_QGROUP_RESERVED, &changeset);
2985                 if (ret < 0)
2986                         goto out;
2987                 freed += changeset.bytes_changed;
2988         }
2989         btrfs_qgroup_free_refroot(root->fs_info, root->objectid, freed,
2990                                   BTRFS_QGROUP_RSV_DATA);
2991         ret = freed;
2992 out:
2993         extent_changeset_release(&changeset);
2994         return ret;
2995 }
2996
2997 static int __btrfs_qgroup_release_data(struct inode *inode,
2998                         struct extent_changeset *reserved, u64 start, u64 len,
2999                         int free)
3000 {
3001         struct extent_changeset changeset;
3002         int trace_op = QGROUP_RELEASE;
3003         int ret;
3004
3005         /* In release case, we shouldn't have @reserved */
3006         WARN_ON(!free && reserved);
3007         if (free && reserved)
3008                 return qgroup_free_reserved_data(inode, reserved, start, len);
3009         extent_changeset_init(&changeset);
3010         ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, start, 
3011                         start + len -1, EXTENT_QGROUP_RESERVED, &changeset);
3012         if (ret < 0)
3013                 goto out;
3014
3015         if (free)
3016                 trace_op = QGROUP_FREE;
3017         trace_btrfs_qgroup_release_data(inode, start, len,
3018                                         changeset.bytes_changed, trace_op);
3019         if (free)
3020                 btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info,
3021                                 BTRFS_I(inode)->root->objectid,
3022                                 changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
3023         ret = changeset.bytes_changed;
3024 out:
3025         extent_changeset_release(&changeset);
3026         return ret;
3027 }
3028
3029 /*
3030  * Free a reserved space range from io_tree and related qgroups
3031  *
3032  * Should be called when a range of pages get invalidated before reaching disk.
3033  * Or for error cleanup case.
3034  * if @reserved is given, only reserved range in [@start, @start + @len) will
3035  * be freed.
3036  *
3037  * For data written to disk, use btrfs_qgroup_release_data().
3038  *
3039  * NOTE: This function may sleep for memory allocation.
3040  */
3041 int btrfs_qgroup_free_data(struct inode *inode,
3042                         struct extent_changeset *reserved, u64 start, u64 len)
3043 {
3044         return __btrfs_qgroup_release_data(inode, reserved, start, len, 1);
3045 }
3046
3047 /*
3048  * Release a reserved space range from io_tree only.
3049  *
3050  * Should be called when a range of pages get written to disk and corresponding
3051  * FILE_EXTENT is inserted into corresponding root.
3052  *
3053  * Since new qgroup accounting framework will only update qgroup numbers at
3054  * commit_transaction() time, its reserved space shouldn't be freed from
3055  * related qgroups.
3056  *
3057  * But we should release the range from io_tree, to allow further write to be
3058  * COWed.
3059  *
3060  * NOTE: This function may sleep for memory allocation.
3061  */
3062 int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len)
3063 {
3064         return __btrfs_qgroup_release_data(inode, NULL, start, len, 0);
3065 }
3066
3067 static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes,
3068                               enum btrfs_qgroup_rsv_type type)
3069 {
3070         if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
3071             type != BTRFS_QGROUP_RSV_META_PERTRANS)
3072                 return;
3073         if (num_bytes == 0)
3074                 return;
3075
3076         spin_lock(&root->qgroup_meta_rsv_lock);
3077         if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
3078                 root->qgroup_meta_rsv_prealloc += num_bytes;
3079         else
3080                 root->qgroup_meta_rsv_pertrans += num_bytes;
3081         spin_unlock(&root->qgroup_meta_rsv_lock);
3082 }
3083
3084 static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes,
3085                              enum btrfs_qgroup_rsv_type type)
3086 {
3087         if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
3088             type != BTRFS_QGROUP_RSV_META_PERTRANS)
3089                 return 0;
3090         if (num_bytes == 0)
3091                 return 0;
3092
3093         spin_lock(&root->qgroup_meta_rsv_lock);
3094         if (type == BTRFS_QGROUP_RSV_META_PREALLOC) {
3095                 num_bytes = min_t(u64, root->qgroup_meta_rsv_prealloc,
3096                                   num_bytes);
3097                 root->qgroup_meta_rsv_prealloc -= num_bytes;
3098         } else {
3099                 num_bytes = min_t(u64, root->qgroup_meta_rsv_pertrans,
3100                                   num_bytes);
3101                 root->qgroup_meta_rsv_pertrans -= num_bytes;
3102         }
3103         spin_unlock(&root->qgroup_meta_rsv_lock);
3104         return num_bytes;
3105 }
3106
3107 int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
3108                                 enum btrfs_qgroup_rsv_type type, bool enforce)
3109 {
3110         struct btrfs_fs_info *fs_info = root->fs_info;
3111         int ret;
3112
3113         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3114             !is_fstree(root->objectid) || num_bytes == 0)
3115                 return 0;
3116
3117         BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
3118         trace_qgroup_meta_reserve(root, type, (s64)num_bytes);
3119         ret = qgroup_reserve(root, num_bytes, enforce, type);
3120         if (ret < 0)
3121                 return ret;
3122         /*
3123          * Record what we have reserved into root.
3124          *
3125          * To avoid quota disabled->enabled underflow.
3126          * In that case, we may try to free space we haven't reserved
3127          * (since quota was disabled), so record what we reserved into root.
3128          * And ensure later release won't underflow this number.
3129          */
3130         add_root_meta_rsv(root, num_bytes, type);
3131         return ret;
3132 }
3133
3134 void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root)
3135 {
3136         struct btrfs_fs_info *fs_info = root->fs_info;
3137
3138         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3139             !is_fstree(root->objectid))
3140                 return;
3141
3142         /* TODO: Update trace point to handle such free */
3143         trace_qgroup_meta_free_all_pertrans(root);
3144         /* Special value -1 means to free all reserved space */
3145         btrfs_qgroup_free_refroot(fs_info, root->objectid, (u64)-1,
3146                                   BTRFS_QGROUP_RSV_META_PERTRANS);
3147 }
3148
3149 void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
3150                               enum btrfs_qgroup_rsv_type type)
3151 {
3152         struct btrfs_fs_info *fs_info = root->fs_info;
3153
3154         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3155             !is_fstree(root->objectid))
3156                 return;
3157
3158         /*
3159          * reservation for META_PREALLOC can happen before quota is enabled,
3160          * which can lead to underflow.
3161          * Here ensure we will only free what we really have reserved.
3162          */
3163         num_bytes = sub_root_meta_rsv(root, num_bytes, type);
3164         BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
3165         trace_qgroup_meta_reserve(root, type, -(s64)num_bytes);
3166         btrfs_qgroup_free_refroot(fs_info, root->objectid, num_bytes, type);
3167 }
3168
3169 static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root,
3170                                 int num_bytes)
3171 {
3172         struct btrfs_root *quota_root = fs_info->quota_root;
3173         struct btrfs_qgroup *qgroup;
3174         struct ulist_node *unode;
3175         struct ulist_iterator uiter;
3176         int ret = 0;
3177
3178         if (num_bytes == 0)
3179                 return;
3180         if (!quota_root)
3181                 return;
3182
3183         spin_lock(&fs_info->qgroup_lock);
3184         qgroup = find_qgroup_rb(fs_info, ref_root);
3185         if (!qgroup)
3186                 goto out;
3187         ulist_reinit(fs_info->qgroup_ulist);
3188         ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
3189                        qgroup_to_aux(qgroup), GFP_ATOMIC);
3190         if (ret < 0)
3191                 goto out;
3192         ULIST_ITER_INIT(&uiter);
3193         while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
3194                 struct btrfs_qgroup *qg;
3195                 struct btrfs_qgroup_list *glist;
3196
3197                 qg = unode_aux_to_qgroup(unode);
3198
3199                 qgroup_rsv_release(fs_info, qg, num_bytes,
3200                                 BTRFS_QGROUP_RSV_META_PREALLOC);
3201                 qgroup_rsv_add(fs_info, qg, num_bytes,
3202                                 BTRFS_QGROUP_RSV_META_PERTRANS);
3203                 list_for_each_entry(glist, &qg->groups, next_group) {
3204                         ret = ulist_add(fs_info->qgroup_ulist,
3205                                         glist->group->qgroupid,
3206                                         qgroup_to_aux(glist->group), GFP_ATOMIC);
3207                         if (ret < 0)
3208                                 goto out;
3209                 }
3210         }
3211 out:
3212         spin_unlock(&fs_info->qgroup_lock);
3213 }
3214
3215 void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
3216 {
3217         struct btrfs_fs_info *fs_info = root->fs_info;
3218
3219         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3220             !is_fstree(root->objectid))
3221                 return;
3222         /* Same as btrfs_qgroup_free_meta_prealloc() */
3223         num_bytes = sub_root_meta_rsv(root, num_bytes,
3224                                       BTRFS_QGROUP_RSV_META_PREALLOC);
3225         trace_qgroup_meta_convert(root, num_bytes);
3226         qgroup_convert_meta(fs_info, root->objectid, num_bytes);
3227 }
3228
3229 /*
3230  * Check qgroup reserved space leaking, normally at destroy inode
3231  * time
3232  */
3233 void btrfs_qgroup_check_reserved_leak(struct inode *inode)
3234 {
3235         struct extent_changeset changeset;
3236         struct ulist_node *unode;
3237         struct ulist_iterator iter;
3238         int ret;
3239
3240         extent_changeset_init(&changeset);
3241         ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
3242                         EXTENT_QGROUP_RESERVED, &changeset);
3243
3244         WARN_ON(ret < 0);
3245         if (WARN_ON(changeset.bytes_changed)) {
3246                 ULIST_ITER_INIT(&iter);
3247                 while ((unode = ulist_next(&changeset.range_changed, &iter))) {
3248                         btrfs_warn(BTRFS_I(inode)->root->fs_info,
3249                                 "leaking qgroup reserved space, ino: %lu, start: %llu, end: %llu",
3250                                 inode->i_ino, unode->val, unode->aux);