btrfs: remove unused parameter from adjust_slots_upwards
[sfrench/cifs-2.6.git] / fs / btrfs / qgroup.c
1 /*
2  * Copyright (C) 2011 STRATO.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/sched.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/rbtree.h>
24 #include <linux/slab.h>
25 #include <linux/workqueue.h>
26 #include <linux/btrfs.h>
27
28 #include "ctree.h"
29 #include "transaction.h"
30 #include "disk-io.h"
31 #include "locking.h"
32 #include "ulist.h"
33 #include "backref.h"
34 #include "extent_io.h"
35 #include "qgroup.h"
36
37
38 /* TODO XXX FIXME
39  *  - subvol delete -> delete when ref goes to 0? delete limits also?
40  *  - reorganize keys
41  *  - compressed
42  *  - sync
43  *  - copy also limits on subvol creation
44  *  - limit
45  *  - caches fuer ulists
46  *  - performance benchmarks
47  *  - check all ioctl parameters
48  */
49
50 /*
51  * one struct for each qgroup, organized in fs_info->qgroup_tree.
52  */
53 struct btrfs_qgroup {
54         u64 qgroupid;
55
56         /*
57          * state
58          */
59         u64 rfer;       /* referenced */
60         u64 rfer_cmpr;  /* referenced compressed */
61         u64 excl;       /* exclusive */
62         u64 excl_cmpr;  /* exclusive compressed */
63
64         /*
65          * limits
66          */
67         u64 lim_flags;  /* which limits are set */
68         u64 max_rfer;
69         u64 max_excl;
70         u64 rsv_rfer;
71         u64 rsv_excl;
72
73         /*
74          * reservation tracking
75          */
76         u64 reserved;
77
78         /*
79          * lists
80          */
81         struct list_head groups;  /* groups this group is member of */
82         struct list_head members; /* groups that are members of this group */
83         struct list_head dirty;   /* dirty groups */
84         struct rb_node node;      /* tree of qgroups */
85
86         /*
87          * temp variables for accounting operations
88          * Refer to qgroup_shared_accounting() for details.
89          */
90         u64 old_refcnt;
91         u64 new_refcnt;
92 };
93
94 static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq,
95                                            int mod)
96 {
97         if (qg->old_refcnt < seq)
98                 qg->old_refcnt = seq;
99         qg->old_refcnt += mod;
100 }
101
102 static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq,
103                                            int mod)
104 {
105         if (qg->new_refcnt < seq)
106                 qg->new_refcnt = seq;
107         qg->new_refcnt += mod;
108 }
109
110 static inline u64 btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup *qg, u64 seq)
111 {
112         if (qg->old_refcnt < seq)
113                 return 0;
114         return qg->old_refcnt - seq;
115 }
116
117 static inline u64 btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup *qg, u64 seq)
118 {
119         if (qg->new_refcnt < seq)
120                 return 0;
121         return qg->new_refcnt - seq;
122 }
123
124 /*
125  * glue structure to represent the relations between qgroups.
126  */
127 struct btrfs_qgroup_list {
128         struct list_head next_group;
129         struct list_head next_member;
130         struct btrfs_qgroup *group;
131         struct btrfs_qgroup *member;
132 };
133
134 static inline u64 qgroup_to_aux(struct btrfs_qgroup *qg)
135 {
136         return (u64)(uintptr_t)qg;
137 }
138
139 static inline struct btrfs_qgroup* unode_aux_to_qgroup(struct ulist_node *n)
140 {
141         return (struct btrfs_qgroup *)(uintptr_t)n->aux;
142 }
143
144 static int
145 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
146                    int init_flags);
147 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
148
149 /* must be called with qgroup_ioctl_lock held */
150 static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
151                                            u64 qgroupid)
152 {
153         struct rb_node *n = fs_info->qgroup_tree.rb_node;
154         struct btrfs_qgroup *qgroup;
155
156         while (n) {
157                 qgroup = rb_entry(n, struct btrfs_qgroup, node);
158                 if (qgroup->qgroupid < qgroupid)
159                         n = n->rb_left;
160                 else if (qgroup->qgroupid > qgroupid)
161                         n = n->rb_right;
162                 else
163                         return qgroup;
164         }
165         return NULL;
166 }
167
168 /* must be called with qgroup_lock held */
169 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
170                                           u64 qgroupid)
171 {
172         struct rb_node **p = &fs_info->qgroup_tree.rb_node;
173         struct rb_node *parent = NULL;
174         struct btrfs_qgroup *qgroup;
175
176         while (*p) {
177                 parent = *p;
178                 qgroup = rb_entry(parent, struct btrfs_qgroup, node);
179
180                 if (qgroup->qgroupid < qgroupid)
181                         p = &(*p)->rb_left;
182                 else if (qgroup->qgroupid > qgroupid)
183                         p = &(*p)->rb_right;
184                 else
185                         return qgroup;
186         }
187
188         qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
189         if (!qgroup)
190                 return ERR_PTR(-ENOMEM);
191
192         qgroup->qgroupid = qgroupid;
193         INIT_LIST_HEAD(&qgroup->groups);
194         INIT_LIST_HEAD(&qgroup->members);
195         INIT_LIST_HEAD(&qgroup->dirty);
196
197         rb_link_node(&qgroup->node, parent, p);
198         rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
199
200         return qgroup;
201 }
202
203 static void __del_qgroup_rb(struct btrfs_qgroup *qgroup)
204 {
205         struct btrfs_qgroup_list *list;
206
207         list_del(&qgroup->dirty);
208         while (!list_empty(&qgroup->groups)) {
209                 list = list_first_entry(&qgroup->groups,
210                                         struct btrfs_qgroup_list, next_group);
211                 list_del(&list->next_group);
212                 list_del(&list->next_member);
213                 kfree(list);
214         }
215
216         while (!list_empty(&qgroup->members)) {
217                 list = list_first_entry(&qgroup->members,
218                                         struct btrfs_qgroup_list, next_member);
219                 list_del(&list->next_group);
220                 list_del(&list->next_member);
221                 kfree(list);
222         }
223         kfree(qgroup);
224 }
225
226 /* must be called with qgroup_lock held */
227 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
228 {
229         struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
230
231         if (!qgroup)
232                 return -ENOENT;
233
234         rb_erase(&qgroup->node, &fs_info->qgroup_tree);
235         __del_qgroup_rb(qgroup);
236         return 0;
237 }
238
239 /* must be called with qgroup_lock held */
240 static int add_relation_rb(struct btrfs_fs_info *fs_info,
241                            u64 memberid, u64 parentid)
242 {
243         struct btrfs_qgroup *member;
244         struct btrfs_qgroup *parent;
245         struct btrfs_qgroup_list *list;
246
247         member = find_qgroup_rb(fs_info, memberid);
248         parent = find_qgroup_rb(fs_info, parentid);
249         if (!member || !parent)
250                 return -ENOENT;
251
252         list = kzalloc(sizeof(*list), GFP_ATOMIC);
253         if (!list)
254                 return -ENOMEM;
255
256         list->group = parent;
257         list->member = member;
258         list_add_tail(&list->next_group, &member->groups);
259         list_add_tail(&list->next_member, &parent->members);
260
261         return 0;
262 }
263
264 /* must be called with qgroup_lock held */
265 static int del_relation_rb(struct btrfs_fs_info *fs_info,
266                            u64 memberid, u64 parentid)
267 {
268         struct btrfs_qgroup *member;
269         struct btrfs_qgroup *parent;
270         struct btrfs_qgroup_list *list;
271
272         member = find_qgroup_rb(fs_info, memberid);
273         parent = find_qgroup_rb(fs_info, parentid);
274         if (!member || !parent)
275                 return -ENOENT;
276
277         list_for_each_entry(list, &member->groups, next_group) {
278                 if (list->group == parent) {
279                         list_del(&list->next_group);
280                         list_del(&list->next_member);
281                         kfree(list);
282                         return 0;
283                 }
284         }
285         return -ENOENT;
286 }
287
288 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
289 int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
290                                u64 rfer, u64 excl)
291 {
292         struct btrfs_qgroup *qgroup;
293
294         qgroup = find_qgroup_rb(fs_info, qgroupid);
295         if (!qgroup)
296                 return -EINVAL;
297         if (qgroup->rfer != rfer || qgroup->excl != excl)
298                 return -EINVAL;
299         return 0;
300 }
301 #endif
302
303 /*
304  * The full config is read in one go, only called from open_ctree()
305  * It doesn't use any locking, as at this point we're still single-threaded
306  */
307 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
308 {
309         struct btrfs_key key;
310         struct btrfs_key found_key;
311         struct btrfs_root *quota_root = fs_info->quota_root;
312         struct btrfs_path *path = NULL;
313         struct extent_buffer *l;
314         int slot;
315         int ret = 0;
316         u64 flags = 0;
317         u64 rescan_progress = 0;
318
319         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
320                 return 0;
321
322         fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
323         if (!fs_info->qgroup_ulist) {
324                 ret = -ENOMEM;
325                 goto out;
326         }
327
328         path = btrfs_alloc_path();
329         if (!path) {
330                 ret = -ENOMEM;
331                 goto out;
332         }
333
334         /* default this to quota off, in case no status key is found */
335         fs_info->qgroup_flags = 0;
336
337         /*
338          * pass 1: read status, all qgroup infos and limits
339          */
340         key.objectid = 0;
341         key.type = 0;
342         key.offset = 0;
343         ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
344         if (ret)
345                 goto out;
346
347         while (1) {
348                 struct btrfs_qgroup *qgroup;
349
350                 slot = path->slots[0];
351                 l = path->nodes[0];
352                 btrfs_item_key_to_cpu(l, &found_key, slot);
353
354                 if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
355                         struct btrfs_qgroup_status_item *ptr;
356
357                         ptr = btrfs_item_ptr(l, slot,
358                                              struct btrfs_qgroup_status_item);
359
360                         if (btrfs_qgroup_status_version(l, ptr) !=
361                             BTRFS_QGROUP_STATUS_VERSION) {
362                                 btrfs_err(fs_info,
363                                  "old qgroup version, quota disabled");
364                                 goto out;
365                         }
366                         if (btrfs_qgroup_status_generation(l, ptr) !=
367                             fs_info->generation) {
368                                 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
369                                 btrfs_err(fs_info,
370                                         "qgroup generation mismatch, marked as inconsistent");
371                         }
372                         fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
373                                                                           ptr);
374                         rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
375                         goto next1;
376                 }
377
378                 if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
379                     found_key.type != BTRFS_QGROUP_LIMIT_KEY)
380                         goto next1;
381
382                 qgroup = find_qgroup_rb(fs_info, found_key.offset);
383                 if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
384                     (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
385                         btrfs_err(fs_info, "inconsistent qgroup config");
386                         flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
387                 }
388                 if (!qgroup) {
389                         qgroup = add_qgroup_rb(fs_info, found_key.offset);
390                         if (IS_ERR(qgroup)) {
391                                 ret = PTR_ERR(qgroup);
392                                 goto out;
393                         }
394                 }
395                 switch (found_key.type) {
396                 case BTRFS_QGROUP_INFO_KEY: {
397                         struct btrfs_qgroup_info_item *ptr;
398
399                         ptr = btrfs_item_ptr(l, slot,
400                                              struct btrfs_qgroup_info_item);
401                         qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
402                         qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
403                         qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
404                         qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
405                         /* generation currently unused */
406                         break;
407                 }
408                 case BTRFS_QGROUP_LIMIT_KEY: {
409                         struct btrfs_qgroup_limit_item *ptr;
410
411                         ptr = btrfs_item_ptr(l, slot,
412                                              struct btrfs_qgroup_limit_item);
413                         qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
414                         qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
415                         qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
416                         qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
417                         qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
418                         break;
419                 }
420                 }
421 next1:
422                 ret = btrfs_next_item(quota_root, path);
423                 if (ret < 0)
424                         goto out;
425                 if (ret)
426                         break;
427         }
428         btrfs_release_path(path);
429
430         /*
431          * pass 2: read all qgroup relations
432          */
433         key.objectid = 0;
434         key.type = BTRFS_QGROUP_RELATION_KEY;
435         key.offset = 0;
436         ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
437         if (ret)
438                 goto out;
439         while (1) {
440                 slot = path->slots[0];
441                 l = path->nodes[0];
442                 btrfs_item_key_to_cpu(l, &found_key, slot);
443
444                 if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
445                         goto next2;
446
447                 if (found_key.objectid > found_key.offset) {
448                         /* parent <- member, not needed to build config */
449                         /* FIXME should we omit the key completely? */
450                         goto next2;
451                 }
452
453                 ret = add_relation_rb(fs_info, found_key.objectid,
454                                       found_key.offset);
455                 if (ret == -ENOENT) {
456                         btrfs_warn(fs_info,
457                                 "orphan qgroup relation 0x%llx->0x%llx",
458                                 found_key.objectid, found_key.offset);
459                         ret = 0;        /* ignore the error */
460                 }
461                 if (ret)
462                         goto out;
463 next2:
464                 ret = btrfs_next_item(quota_root, path);
465                 if (ret < 0)
466                         goto out;
467                 if (ret)
468                         break;
469         }
470 out:
471         fs_info->qgroup_flags |= flags;
472         if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
473                 clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
474         else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
475                  ret >= 0)
476                 ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
477         btrfs_free_path(path);
478
479         if (ret < 0) {
480                 ulist_free(fs_info->qgroup_ulist);
481                 fs_info->qgroup_ulist = NULL;
482                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
483         }
484
485         return ret < 0 ? ret : 0;
486 }
487
488 /*
489  * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
490  * first two are in single-threaded paths.And for the third one, we have set
491  * quota_root to be null with qgroup_lock held before, so it is safe to clean
492  * up the in-memory structures without qgroup_lock held.
493  */
494 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
495 {
496         struct rb_node *n;
497         struct btrfs_qgroup *qgroup;
498
499         while ((n = rb_first(&fs_info->qgroup_tree))) {
500                 qgroup = rb_entry(n, struct btrfs_qgroup, node);
501                 rb_erase(n, &fs_info->qgroup_tree);
502                 __del_qgroup_rb(qgroup);
503         }
504         /*
505          * we call btrfs_free_qgroup_config() when umounting
506          * filesystem and disabling quota, so we set qgroup_ulist
507          * to be null here to avoid double free.
508          */
509         ulist_free(fs_info->qgroup_ulist);
510         fs_info->qgroup_ulist = NULL;
511 }
512
513 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans,
514                                     struct btrfs_root *quota_root,
515                                     u64 src, u64 dst)
516 {
517         int ret;
518         struct btrfs_path *path;
519         struct btrfs_key key;
520
521         path = btrfs_alloc_path();
522         if (!path)
523                 return -ENOMEM;
524
525         key.objectid = src;
526         key.type = BTRFS_QGROUP_RELATION_KEY;
527         key.offset = dst;
528
529         ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
530
531         btrfs_mark_buffer_dirty(path->nodes[0]);
532
533         btrfs_free_path(path);
534         return ret;
535 }
536
537 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans,
538                                     struct btrfs_root *quota_root,
539                                     u64 src, u64 dst)
540 {
541         int ret;
542         struct btrfs_path *path;
543         struct btrfs_key key;
544
545         path = btrfs_alloc_path();
546         if (!path)
547                 return -ENOMEM;
548
549         key.objectid = src;
550         key.type = BTRFS_QGROUP_RELATION_KEY;
551         key.offset = dst;
552
553         ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
554         if (ret < 0)
555                 goto out;
556
557         if (ret > 0) {
558                 ret = -ENOENT;
559                 goto out;
560         }
561
562         ret = btrfs_del_item(trans, quota_root, path);
563 out:
564         btrfs_free_path(path);
565         return ret;
566 }
567
568 static int add_qgroup_item(struct btrfs_trans_handle *trans,
569                            struct btrfs_root *quota_root, u64 qgroupid)
570 {
571         int ret;
572         struct btrfs_path *path;
573         struct btrfs_qgroup_info_item *qgroup_info;
574         struct btrfs_qgroup_limit_item *qgroup_limit;
575         struct extent_buffer *leaf;
576         struct btrfs_key key;
577
578         if (btrfs_is_testing(quota_root->fs_info))
579                 return 0;
580
581         path = btrfs_alloc_path();
582         if (!path)
583                 return -ENOMEM;
584
585         key.objectid = 0;
586         key.type = BTRFS_QGROUP_INFO_KEY;
587         key.offset = qgroupid;
588
589         /*
590          * Avoid a transaction abort by catching -EEXIST here. In that
591          * case, we proceed by re-initializing the existing structure
592          * on disk.
593          */
594
595         ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
596                                       sizeof(*qgroup_info));
597         if (ret && ret != -EEXIST)
598                 goto out;
599
600         leaf = path->nodes[0];
601         qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
602                                  struct btrfs_qgroup_info_item);
603         btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
604         btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
605         btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
606         btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
607         btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
608
609         btrfs_mark_buffer_dirty(leaf);
610
611         btrfs_release_path(path);
612
613         key.type = BTRFS_QGROUP_LIMIT_KEY;
614         ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
615                                       sizeof(*qgroup_limit));
616         if (ret && ret != -EEXIST)
617                 goto out;
618
619         leaf = path->nodes[0];
620         qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
621                                   struct btrfs_qgroup_limit_item);
622         btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
623         btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
624         btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
625         btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
626         btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
627
628         btrfs_mark_buffer_dirty(leaf);
629
630         ret = 0;
631 out:
632         btrfs_free_path(path);
633         return ret;
634 }
635
636 static int del_qgroup_item(struct btrfs_trans_handle *trans,
637                            struct btrfs_root *quota_root, u64 qgroupid)
638 {
639         int ret;
640         struct btrfs_path *path;
641         struct btrfs_key key;
642
643         path = btrfs_alloc_path();
644         if (!path)
645                 return -ENOMEM;
646
647         key.objectid = 0;
648         key.type = BTRFS_QGROUP_INFO_KEY;
649         key.offset = qgroupid;
650         ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
651         if (ret < 0)
652                 goto out;
653
654         if (ret > 0) {
655                 ret = -ENOENT;
656                 goto out;
657         }
658
659         ret = btrfs_del_item(trans, quota_root, path);
660         if (ret)
661                 goto out;
662
663         btrfs_release_path(path);
664
665         key.type = BTRFS_QGROUP_LIMIT_KEY;
666         ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
667         if (ret < 0)
668                 goto out;
669
670         if (ret > 0) {
671                 ret = -ENOENT;
672                 goto out;
673         }
674
675         ret = btrfs_del_item(trans, quota_root, path);
676
677 out:
678         btrfs_free_path(path);
679         return ret;
680 }
681
682 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
683                                     struct btrfs_root *root,
684                                     struct btrfs_qgroup *qgroup)
685 {
686         struct btrfs_path *path;
687         struct btrfs_key key;
688         struct extent_buffer *l;
689         struct btrfs_qgroup_limit_item *qgroup_limit;
690         int ret;
691         int slot;
692
693         key.objectid = 0;
694         key.type = BTRFS_QGROUP_LIMIT_KEY;
695         key.offset = qgroup->qgroupid;
696
697         path = btrfs_alloc_path();
698         if (!path)
699                 return -ENOMEM;
700
701         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
702         if (ret > 0)
703                 ret = -ENOENT;
704
705         if (ret)
706                 goto out;
707
708         l = path->nodes[0];
709         slot = path->slots[0];
710         qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item);
711         btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags);
712         btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer);
713         btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl);
714         btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
715         btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
716
717         btrfs_mark_buffer_dirty(l);
718
719 out:
720         btrfs_free_path(path);
721         return ret;
722 }
723
724 static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
725                                    struct btrfs_root *root,
726                                    struct btrfs_qgroup *qgroup)
727 {
728         struct btrfs_path *path;
729         struct btrfs_key key;
730         struct extent_buffer *l;
731         struct btrfs_qgroup_info_item *qgroup_info;
732         int ret;
733         int slot;
734
735         if (btrfs_is_testing(root->fs_info))
736                 return 0;
737
738         key.objectid = 0;
739         key.type = BTRFS_QGROUP_INFO_KEY;
740         key.offset = qgroup->qgroupid;
741
742         path = btrfs_alloc_path();
743         if (!path)
744                 return -ENOMEM;
745
746         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
747         if (ret > 0)
748                 ret = -ENOENT;
749
750         if (ret)
751                 goto out;
752
753         l = path->nodes[0];
754         slot = path->slots[0];
755         qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item);
756         btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
757         btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
758         btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
759         btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
760         btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
761
762         btrfs_mark_buffer_dirty(l);
763
764 out:
765         btrfs_free_path(path);
766         return ret;
767 }
768
769 static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
770                                      struct btrfs_fs_info *fs_info,
771                                     struct btrfs_root *root)
772 {
773         struct btrfs_path *path;
774         struct btrfs_key key;
775         struct extent_buffer *l;
776         struct btrfs_qgroup_status_item *ptr;
777         int ret;
778         int slot;
779
780         key.objectid = 0;
781         key.type = BTRFS_QGROUP_STATUS_KEY;
782         key.offset = 0;
783
784         path = btrfs_alloc_path();
785         if (!path)
786                 return -ENOMEM;
787
788         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
789         if (ret > 0)
790                 ret = -ENOENT;
791
792         if (ret)
793                 goto out;
794
795         l = path->nodes[0];
796         slot = path->slots[0];
797         ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
798         btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags);
799         btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
800         btrfs_set_qgroup_status_rescan(l, ptr,
801                                 fs_info->qgroup_rescan_progress.objectid);
802
803         btrfs_mark_buffer_dirty(l);
804
805 out:
806         btrfs_free_path(path);
807         return ret;
808 }
809
810 /*
811  * called with qgroup_lock held
812  */
813 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
814                                   struct btrfs_root *root)
815 {
816         struct btrfs_path *path;
817         struct btrfs_key key;
818         struct extent_buffer *leaf = NULL;
819         int ret;
820         int nr = 0;
821
822         path = btrfs_alloc_path();
823         if (!path)
824                 return -ENOMEM;
825
826         path->leave_spinning = 1;
827
828         key.objectid = 0;
829         key.offset = 0;
830         key.type = 0;
831
832         while (1) {
833                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
834                 if (ret < 0)
835                         goto out;
836                 leaf = path->nodes[0];
837                 nr = btrfs_header_nritems(leaf);
838                 if (!nr)
839                         break;
840                 /*
841                  * delete the leaf one by one
842                  * since the whole tree is going
843                  * to be deleted.
844                  */
845                 path->slots[0] = 0;
846                 ret = btrfs_del_items(trans, root, path, 0, nr);
847                 if (ret)
848                         goto out;
849
850                 btrfs_release_path(path);
851         }
852         ret = 0;
853 out:
854         set_bit(BTRFS_FS_QUOTA_DISABLING, &root->fs_info->flags);
855         btrfs_free_path(path);
856         return ret;
857 }
858
859 int btrfs_quota_enable(struct btrfs_trans_handle *trans,
860                        struct btrfs_fs_info *fs_info)
861 {
862         struct btrfs_root *quota_root;
863         struct btrfs_root *tree_root = fs_info->tree_root;
864         struct btrfs_path *path = NULL;
865         struct btrfs_qgroup_status_item *ptr;
866         struct extent_buffer *leaf;
867         struct btrfs_key key;
868         struct btrfs_key found_key;
869         struct btrfs_qgroup *qgroup = NULL;
870         int ret = 0;
871         int slot;
872
873         mutex_lock(&fs_info->qgroup_ioctl_lock);
874         if (fs_info->quota_root) {
875                 set_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags);
876                 goto out;
877         }
878
879         fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
880         if (!fs_info->qgroup_ulist) {
881                 ret = -ENOMEM;
882                 goto out;
883         }
884
885         /*
886          * initially create the quota tree
887          */
888         quota_root = btrfs_create_tree(trans, fs_info,
889                                        BTRFS_QUOTA_TREE_OBJECTID);
890         if (IS_ERR(quota_root)) {
891                 ret =  PTR_ERR(quota_root);
892                 goto out;
893         }
894
895         path = btrfs_alloc_path();
896         if (!path) {
897                 ret = -ENOMEM;
898                 goto out_free_root;
899         }
900
901         key.objectid = 0;
902         key.type = BTRFS_QGROUP_STATUS_KEY;
903         key.offset = 0;
904
905         ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
906                                       sizeof(*ptr));
907         if (ret)
908                 goto out_free_path;
909
910         leaf = path->nodes[0];
911         ptr = btrfs_item_ptr(leaf, path->slots[0],
912                                  struct btrfs_qgroup_status_item);
913         btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
914         btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
915         fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
916                                 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
917         btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags);
918         btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
919
920         btrfs_mark_buffer_dirty(leaf);
921
922         key.objectid = 0;
923         key.type = BTRFS_ROOT_REF_KEY;
924         key.offset = 0;
925
926         btrfs_release_path(path);
927         ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
928         if (ret > 0)
929                 goto out_add_root;
930         if (ret < 0)
931                 goto out_free_path;
932
933
934         while (1) {
935                 slot = path->slots[0];
936                 leaf = path->nodes[0];
937                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
938
939                 if (found_key.type == BTRFS_ROOT_REF_KEY) {
940                         ret = add_qgroup_item(trans, quota_root,
941                                               found_key.offset);
942                         if (ret)
943                                 goto out_free_path;
944
945                         qgroup = add_qgroup_rb(fs_info, found_key.offset);
946                         if (IS_ERR(qgroup)) {
947                                 ret = PTR_ERR(qgroup);
948                                 goto out_free_path;
949                         }
950                 }
951                 ret = btrfs_next_item(tree_root, path);
952                 if (ret < 0)
953                         goto out_free_path;
954                 if (ret)
955                         break;
956         }
957
958 out_add_root:
959         btrfs_release_path(path);
960         ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
961         if (ret)
962                 goto out_free_path;
963
964         qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
965         if (IS_ERR(qgroup)) {
966                 ret = PTR_ERR(qgroup);
967                 goto out_free_path;
968         }
969         spin_lock(&fs_info->qgroup_lock);
970         fs_info->quota_root = quota_root;
971         set_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags);
972         spin_unlock(&fs_info->qgroup_lock);
973 out_free_path:
974         btrfs_free_path(path);
975 out_free_root:
976         if (ret) {
977                 free_extent_buffer(quota_root->node);
978                 free_extent_buffer(quota_root->commit_root);
979                 kfree(quota_root);
980         }
981 out:
982         if (ret) {
983                 ulist_free(fs_info->qgroup_ulist);
984                 fs_info->qgroup_ulist = NULL;
985         }
986         mutex_unlock(&fs_info->qgroup_ioctl_lock);
987         return ret;
988 }
989
990 int btrfs_quota_disable(struct btrfs_trans_handle *trans,
991                         struct btrfs_fs_info *fs_info)
992 {
993         struct btrfs_root *tree_root = fs_info->tree_root;
994         struct btrfs_root *quota_root;
995         int ret = 0;
996
997         mutex_lock(&fs_info->qgroup_ioctl_lock);
998         if (!fs_info->quota_root)
999                 goto out;
1000         clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1001         set_bit(BTRFS_FS_QUOTA_DISABLING, &fs_info->flags);
1002         btrfs_qgroup_wait_for_completion(fs_info, false);
1003         spin_lock(&fs_info->qgroup_lock);
1004         quota_root = fs_info->quota_root;
1005         fs_info->quota_root = NULL;
1006         fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
1007         spin_unlock(&fs_info->qgroup_lock);
1008
1009         btrfs_free_qgroup_config(fs_info);
1010
1011         ret = btrfs_clean_quota_tree(trans, quota_root);
1012         if (ret)
1013                 goto out;
1014
1015         ret = btrfs_del_root(trans, tree_root, &quota_root->root_key);
1016         if (ret)
1017                 goto out;
1018
1019         list_del(&quota_root->dirty_list);
1020
1021         btrfs_tree_lock(quota_root->node);
1022         clean_tree_block(fs_info, quota_root->node);
1023         btrfs_tree_unlock(quota_root->node);
1024         btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
1025
1026         free_extent_buffer(quota_root->node);
1027         free_extent_buffer(quota_root->commit_root);
1028         kfree(quota_root);
1029 out:
1030         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1031         return ret;
1032 }
1033
1034 static void qgroup_dirty(struct btrfs_fs_info *fs_info,
1035                          struct btrfs_qgroup *qgroup)
1036 {
1037         if (list_empty(&qgroup->dirty))
1038                 list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
1039 }
1040
1041 static void report_reserved_underflow(struct btrfs_fs_info *fs_info,
1042                                       struct btrfs_qgroup *qgroup,
1043                                       u64 num_bytes)
1044 {
1045         btrfs_warn(fs_info,
1046                 "qgroup %llu reserved space underflow, have: %llu, to free: %llu",
1047                 qgroup->qgroupid, qgroup->reserved, num_bytes);
1048         qgroup->reserved = 0;
1049 }
1050 /*
1051  * The easy accounting, if we are adding/removing the only ref for an extent
1052  * then this qgroup and all of the parent qgroups get their reference and
1053  * exclusive counts adjusted.
1054  *
1055  * Caller should hold fs_info->qgroup_lock.
1056  */
1057 static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
1058                                     struct ulist *tmp, u64 ref_root,
1059                                     u64 num_bytes, int sign)
1060 {
1061         struct btrfs_qgroup *qgroup;
1062         struct btrfs_qgroup_list *glist;
1063         struct ulist_node *unode;
1064         struct ulist_iterator uiter;
1065         int ret = 0;
1066
1067         qgroup = find_qgroup_rb(fs_info, ref_root);
1068         if (!qgroup)
1069                 goto out;
1070
1071         qgroup->rfer += sign * num_bytes;
1072         qgroup->rfer_cmpr += sign * num_bytes;
1073
1074         WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1075         qgroup->excl += sign * num_bytes;
1076         qgroup->excl_cmpr += sign * num_bytes;
1077         if (sign > 0) {
1078                 if (WARN_ON(qgroup->reserved < num_bytes))
1079                         report_reserved_underflow(fs_info, qgroup, num_bytes);
1080                 else
1081                         qgroup->reserved -= num_bytes;
1082         }
1083
1084         qgroup_dirty(fs_info, qgroup);
1085
1086         /* Get all of the parent groups that contain this qgroup */
1087         list_for_each_entry(glist, &qgroup->groups, next_group) {
1088                 ret = ulist_add(tmp, glist->group->qgroupid,
1089                                 qgroup_to_aux(glist->group), GFP_ATOMIC);
1090                 if (ret < 0)
1091                         goto out;
1092         }
1093
1094         /* Iterate all of the parents and adjust their reference counts */
1095         ULIST_ITER_INIT(&uiter);
1096         while ((unode = ulist_next(tmp, &uiter))) {
1097                 qgroup = unode_aux_to_qgroup(unode);
1098                 qgroup->rfer += sign * num_bytes;
1099                 qgroup->rfer_cmpr += sign * num_bytes;
1100                 WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1101                 qgroup->excl += sign * num_bytes;
1102                 if (sign > 0) {
1103                         if (WARN_ON(qgroup->reserved < num_bytes))
1104                                 report_reserved_underflow(fs_info, qgroup,
1105                                                           num_bytes);
1106                         else
1107                                 qgroup->reserved -= num_bytes;
1108                 }
1109                 qgroup->excl_cmpr += sign * num_bytes;
1110                 qgroup_dirty(fs_info, qgroup);
1111
1112                 /* Add any parents of the parents */
1113                 list_for_each_entry(glist, &qgroup->groups, next_group) {
1114                         ret = ulist_add(tmp, glist->group->qgroupid,
1115                                         qgroup_to_aux(glist->group), GFP_ATOMIC);
1116                         if (ret < 0)
1117                                 goto out;
1118                 }
1119         }
1120         ret = 0;
1121 out:
1122         return ret;
1123 }
1124
1125
1126 /*
1127  * Quick path for updating qgroup with only excl refs.
1128  *
1129  * In that case, just update all parent will be enough.
1130  * Or we needs to do a full rescan.
1131  * Caller should also hold fs_info->qgroup_lock.
1132  *
1133  * Return 0 for quick update, return >0 for need to full rescan
1134  * and mark INCONSISTENT flag.
1135  * Return < 0 for other error.
1136  */
1137 static int quick_update_accounting(struct btrfs_fs_info *fs_info,
1138                                    struct ulist *tmp, u64 src, u64 dst,
1139                                    int sign)
1140 {
1141         struct btrfs_qgroup *qgroup;
1142         int ret = 1;
1143         int err = 0;
1144
1145         qgroup = find_qgroup_rb(fs_info, src);
1146         if (!qgroup)
1147                 goto out;
1148         if (qgroup->excl == qgroup->rfer) {
1149                 ret = 0;
1150                 err = __qgroup_excl_accounting(fs_info, tmp, dst,
1151                                                qgroup->excl, sign);
1152                 if (err < 0) {
1153                         ret = err;
1154                         goto out;
1155                 }
1156         }
1157 out:
1158         if (ret)
1159                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1160         return ret;
1161 }
1162
1163 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
1164                               struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1165 {
1166         struct btrfs_root *quota_root;
1167         struct btrfs_qgroup *parent;
1168         struct btrfs_qgroup *member;
1169         struct btrfs_qgroup_list *list;
1170         struct ulist *tmp;
1171         int ret = 0;
1172
1173         /* Check the level of src and dst first */
1174         if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
1175                 return -EINVAL;
1176
1177         tmp = ulist_alloc(GFP_KERNEL);
1178         if (!tmp)
1179                 return -ENOMEM;
1180
1181         mutex_lock(&fs_info->qgroup_ioctl_lock);
1182         quota_root = fs_info->quota_root;
1183         if (!quota_root) {
1184                 ret = -EINVAL;
1185                 goto out;
1186         }
1187         member = find_qgroup_rb(fs_info, src);
1188         parent = find_qgroup_rb(fs_info, dst);
1189         if (!member || !parent) {
1190                 ret = -EINVAL;
1191                 goto out;
1192         }
1193
1194         /* check if such qgroup relation exist firstly */
1195         list_for_each_entry(list, &member->groups, next_group) {
1196                 if (list->group == parent) {
1197                         ret = -EEXIST;
1198                         goto out;
1199                 }
1200         }
1201
1202         ret = add_qgroup_relation_item(trans, quota_root, src, dst);
1203         if (ret)
1204                 goto out;
1205
1206         ret = add_qgroup_relation_item(trans, quota_root, dst, src);
1207         if (ret) {
1208                 del_qgroup_relation_item(trans, quota_root, src, dst);
1209                 goto out;
1210         }
1211
1212         spin_lock(&fs_info->qgroup_lock);
1213         ret = add_relation_rb(fs_info, src, dst);
1214         if (ret < 0) {
1215                 spin_unlock(&fs_info->qgroup_lock);
1216                 goto out;
1217         }
1218         ret = quick_update_accounting(fs_info, tmp, src, dst, 1);
1219         spin_unlock(&fs_info->qgroup_lock);
1220 out:
1221         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1222         ulist_free(tmp);
1223         return ret;
1224 }
1225
1226 static int __del_qgroup_relation(struct btrfs_trans_handle *trans,
1227                               struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1228 {
1229         struct btrfs_root *quota_root;
1230         struct btrfs_qgroup *parent;
1231         struct btrfs_qgroup *member;
1232         struct btrfs_qgroup_list *list;
1233         struct ulist *tmp;
1234         int ret = 0;
1235         int err;
1236
1237         tmp = ulist_alloc(GFP_KERNEL);
1238         if (!tmp)
1239                 return -ENOMEM;
1240
1241         quota_root = fs_info->quota_root;
1242         if (!quota_root) {
1243                 ret = -EINVAL;
1244                 goto out;
1245         }
1246
1247         member = find_qgroup_rb(fs_info, src);
1248         parent = find_qgroup_rb(fs_info, dst);
1249         if (!member || !parent) {
1250                 ret = -EINVAL;
1251                 goto out;
1252         }
1253
1254         /* check if such qgroup relation exist firstly */
1255         list_for_each_entry(list, &member->groups, next_group) {
1256                 if (list->group == parent)
1257                         goto exist;
1258         }
1259         ret = -ENOENT;
1260         goto out;
1261 exist:
1262         ret = del_qgroup_relation_item(trans, quota_root, src, dst);
1263         err = del_qgroup_relation_item(trans, quota_root, dst, src);
1264         if (err && !ret)
1265                 ret = err;
1266
1267         spin_lock(&fs_info->qgroup_lock);
1268         del_relation_rb(fs_info, src, dst);
1269         ret = quick_update_accounting(fs_info, tmp, src, dst, -1);
1270         spin_unlock(&fs_info->qgroup_lock);
1271 out:
1272         ulist_free(tmp);
1273         return ret;
1274 }
1275
1276 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
1277                               struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1278 {
1279         int ret = 0;
1280
1281         mutex_lock(&fs_info->qgroup_ioctl_lock);
1282         ret = __del_qgroup_relation(trans, fs_info, src, dst);
1283         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1284
1285         return ret;
1286 }
1287
1288 int btrfs_create_qgroup(struct btrfs_trans_handle *trans,
1289                         struct btrfs_fs_info *fs_info, u64 qgroupid)
1290 {
1291         struct btrfs_root *quota_root;
1292         struct btrfs_qgroup *qgroup;
1293         int ret = 0;
1294
1295         mutex_lock(&fs_info->qgroup_ioctl_lock);
1296         quota_root = fs_info->quota_root;
1297         if (!quota_root) {
1298                 ret = -EINVAL;
1299                 goto out;
1300         }
1301         qgroup = find_qgroup_rb(fs_info, qgroupid);
1302         if (qgroup) {
1303                 ret = -EEXIST;
1304                 goto out;
1305         }
1306
1307         ret = add_qgroup_item(trans, quota_root, qgroupid);
1308         if (ret)
1309                 goto out;
1310
1311         spin_lock(&fs_info->qgroup_lock);
1312         qgroup = add_qgroup_rb(fs_info, qgroupid);
1313         spin_unlock(&fs_info->qgroup_lock);
1314
1315         if (IS_ERR(qgroup))
1316                 ret = PTR_ERR(qgroup);
1317 out:
1318         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1319         return ret;
1320 }
1321
1322 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
1323                         struct btrfs_fs_info *fs_info, u64 qgroupid)
1324 {
1325         struct btrfs_root *quota_root;
1326         struct btrfs_qgroup *qgroup;
1327         struct btrfs_qgroup_list *list;
1328         int ret = 0;
1329
1330         mutex_lock(&fs_info->qgroup_ioctl_lock);
1331         quota_root = fs_info->quota_root;
1332         if (!quota_root) {
1333                 ret = -EINVAL;
1334                 goto out;
1335         }
1336
1337         qgroup = find_qgroup_rb(fs_info, qgroupid);
1338         if (!qgroup) {
1339                 ret = -ENOENT;
1340                 goto out;
1341         } else {
1342                 /* check if there are no children of this qgroup */
1343                 if (!list_empty(&qgroup->members)) {
1344                         ret = -EBUSY;
1345                         goto out;
1346                 }
1347         }
1348         ret = del_qgroup_item(trans, quota_root, qgroupid);
1349
1350         while (!list_empty(&qgroup->groups)) {
1351                 list = list_first_entry(&qgroup->groups,
1352                                         struct btrfs_qgroup_list, next_group);
1353                 ret = __del_qgroup_relation(trans, fs_info,
1354                                            qgroupid,
1355                                            list->group->qgroupid);
1356                 if (ret)
1357                         goto out;
1358         }
1359
1360         spin_lock(&fs_info->qgroup_lock);
1361         del_qgroup_rb(fs_info, qgroupid);
1362         spin_unlock(&fs_info->qgroup_lock);
1363 out:
1364         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1365         return ret;
1366 }
1367
1368 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
1369                        struct btrfs_fs_info *fs_info, u64 qgroupid,
1370                        struct btrfs_qgroup_limit *limit)
1371 {
1372         struct btrfs_root *quota_root;
1373         struct btrfs_qgroup *qgroup;
1374         int ret = 0;
1375         /* Sometimes we would want to clear the limit on this qgroup.
1376          * To meet this requirement, we treat the -1 as a special value
1377          * which tell kernel to clear the limit on this qgroup.
1378          */
1379         const u64 CLEAR_VALUE = -1;
1380
1381         mutex_lock(&fs_info->qgroup_ioctl_lock);
1382         quota_root = fs_info->quota_root;
1383         if (!quota_root) {
1384                 ret = -EINVAL;
1385                 goto out;
1386         }
1387
1388         qgroup = find_qgroup_rb(fs_info, qgroupid);
1389         if (!qgroup) {
1390                 ret = -ENOENT;
1391                 goto out;
1392         }
1393
1394         spin_lock(&fs_info->qgroup_lock);
1395         if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) {
1396                 if (limit->max_rfer == CLEAR_VALUE) {
1397                         qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1398                         limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1399                         qgroup->max_rfer = 0;
1400                 } else {
1401                         qgroup->max_rfer = limit->max_rfer;
1402                 }
1403         }
1404         if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
1405                 if (limit->max_excl == CLEAR_VALUE) {
1406                         qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1407                         limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1408                         qgroup->max_excl = 0;
1409                 } else {
1410                         qgroup->max_excl = limit->max_excl;
1411                 }
1412         }
1413         if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) {
1414                 if (limit->rsv_rfer == CLEAR_VALUE) {
1415                         qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1416                         limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1417                         qgroup->rsv_rfer = 0;
1418                 } else {
1419                         qgroup->rsv_rfer = limit->rsv_rfer;
1420                 }
1421         }
1422         if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) {
1423                 if (limit->rsv_excl == CLEAR_VALUE) {
1424                         qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1425                         limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1426                         qgroup->rsv_excl = 0;
1427                 } else {
1428                         qgroup->rsv_excl = limit->rsv_excl;
1429                 }
1430         }
1431         qgroup->lim_flags |= limit->flags;
1432
1433         spin_unlock(&fs_info->qgroup_lock);
1434
1435         ret = update_qgroup_limit_item(trans, quota_root, qgroup);
1436         if (ret) {
1437                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1438                 btrfs_info(fs_info, "unable to update quota limit for %llu",
1439                        qgroupid);
1440         }
1441
1442 out:
1443         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1444         return ret;
1445 }
1446
1447 int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
1448                                          struct btrfs_fs_info *fs_info)
1449 {
1450         struct btrfs_qgroup_extent_record *record;
1451         struct btrfs_delayed_ref_root *delayed_refs;
1452         struct rb_node *node;
1453         u64 qgroup_to_skip;
1454         int ret = 0;
1455
1456         delayed_refs = &trans->transaction->delayed_refs;
1457         qgroup_to_skip = delayed_refs->qgroup_to_skip;
1458
1459         /*
1460          * No need to do lock, since this function will only be called in
1461          * btrfs_commit_transaction().
1462          */
1463         node = rb_first(&delayed_refs->dirty_extent_root);
1464         while (node) {
1465                 record = rb_entry(node, struct btrfs_qgroup_extent_record,
1466                                   node);
1467                 ret = btrfs_find_all_roots(NULL, fs_info, record->bytenr, 0,
1468                                            &record->old_roots);
1469                 if (ret < 0)
1470                         break;
1471                 if (qgroup_to_skip)
1472                         ulist_del(record->old_roots, qgroup_to_skip, 0);
1473                 node = rb_next(node);
1474         }
1475         return ret;
1476 }
1477
1478 int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
1479                                 struct btrfs_delayed_ref_root *delayed_refs,
1480                                 struct btrfs_qgroup_extent_record *record)
1481 {
1482         struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node;
1483         struct rb_node *parent_node = NULL;
1484         struct btrfs_qgroup_extent_record *entry;
1485         u64 bytenr = record->bytenr;
1486
1487         assert_spin_locked(&delayed_refs->lock);
1488         trace_btrfs_qgroup_trace_extent(fs_info, record);
1489
1490         while (*p) {
1491                 parent_node = *p;
1492                 entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record,
1493                                  node);
1494                 if (bytenr < entry->bytenr)
1495                         p = &(*p)->rb_left;
1496                 else if (bytenr > entry->bytenr)
1497                         p = &(*p)->rb_right;
1498                 else
1499                         return 1;
1500         }
1501
1502         rb_link_node(&record->node, parent_node, p);
1503         rb_insert_color(&record->node, &delayed_refs->dirty_extent_root);
1504         return 0;
1505 }
1506
1507 int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans,
1508                 struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes,
1509                 gfp_t gfp_flag)
1510 {
1511         struct btrfs_qgroup_extent_record *record;
1512         struct btrfs_delayed_ref_root *delayed_refs;
1513         int ret;
1514
1515         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)
1516             || bytenr == 0 || num_bytes == 0)
1517                 return 0;
1518         if (WARN_ON(trans == NULL))
1519                 return -EINVAL;
1520         record = kmalloc(sizeof(*record), gfp_flag);
1521         if (!record)
1522                 return -ENOMEM;
1523
1524         delayed_refs = &trans->transaction->delayed_refs;
1525         record->bytenr = bytenr;
1526         record->num_bytes = num_bytes;
1527         record->old_roots = NULL;
1528
1529         spin_lock(&delayed_refs->lock);
1530         ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record);
1531         spin_unlock(&delayed_refs->lock);
1532         if (ret > 0)
1533                 kfree(record);
1534         return 0;
1535 }
1536
1537 int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
1538                                   struct btrfs_fs_info *fs_info,
1539                                   struct extent_buffer *eb)
1540 {
1541         int nr = btrfs_header_nritems(eb);
1542         int i, extent_type, ret;
1543         struct btrfs_key key;
1544         struct btrfs_file_extent_item *fi;
1545         u64 bytenr, num_bytes;
1546
1547         /* We can be called directly from walk_up_proc() */
1548         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1549                 return 0;
1550
1551         for (i = 0; i < nr; i++) {
1552                 btrfs_item_key_to_cpu(eb, &key, i);
1553
1554                 if (key.type != BTRFS_EXTENT_DATA_KEY)
1555                         continue;
1556
1557                 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
1558                 /* filter out non qgroup-accountable extents  */
1559                 extent_type = btrfs_file_extent_type(eb, fi);
1560
1561                 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
1562                         continue;
1563
1564                 bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
1565                 if (!bytenr)
1566                         continue;
1567
1568                 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
1569
1570                 ret = btrfs_qgroup_trace_extent(trans, fs_info, bytenr,
1571                                                 num_bytes, GFP_NOFS);
1572                 if (ret)
1573                         return ret;
1574         }
1575         return 0;
1576 }
1577
1578 /*
1579  * Walk up the tree from the bottom, freeing leaves and any interior
1580  * nodes which have had all slots visited. If a node (leaf or
1581  * interior) is freed, the node above it will have it's slot
1582  * incremented. The root node will never be freed.
1583  *
1584  * At the end of this function, we should have a path which has all
1585  * slots incremented to the next position for a search. If we need to
1586  * read a new node it will be NULL and the node above it will have the
1587  * correct slot selected for a later read.
1588  *
1589  * If we increment the root nodes slot counter past the number of
1590  * elements, 1 is returned to signal completion of the search.
1591  */
1592 static int adjust_slots_upwards(struct btrfs_path *path, int root_level)
1593 {
1594         int level = 0;
1595         int nr, slot;
1596         struct extent_buffer *eb;
1597
1598         if (root_level == 0)
1599                 return 1;
1600
1601         while (level <= root_level) {
1602                 eb = path->nodes[level];
1603                 nr = btrfs_header_nritems(eb);
1604                 path->slots[level]++;
1605                 slot = path->slots[level];
1606                 if (slot >= nr || level == 0) {
1607                         /*
1608                          * Don't free the root -  we will detect this
1609                          * condition after our loop and return a
1610                          * positive value for caller to stop walking the tree.
1611                          */
1612                         if (level != root_level) {
1613                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
1614                                 path->locks[level] = 0;
1615
1616                                 free_extent_buffer(eb);
1617                                 path->nodes[level] = NULL;
1618                                 path->slots[level] = 0;
1619                         }
1620                 } else {
1621                         /*
1622                          * We have a valid slot to walk back down
1623                          * from. Stop here so caller can process these
1624                          * new nodes.
1625                          */
1626                         break;
1627                 }
1628
1629                 level++;
1630         }
1631
1632         eb = path->nodes[root_level];
1633         if (path->slots[root_level] >= btrfs_header_nritems(eb))
1634                 return 1;
1635
1636         return 0;
1637 }
1638
1639 int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
1640                                struct btrfs_root *root,
1641                                struct extent_buffer *root_eb,
1642                                u64 root_gen, int root_level)
1643 {
1644         struct btrfs_fs_info *fs_info = root->fs_info;
1645         int ret = 0;
1646         int level;
1647         struct extent_buffer *eb = root_eb;
1648         struct btrfs_path *path = NULL;
1649
1650         BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
1651         BUG_ON(root_eb == NULL);
1652
1653         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1654                 return 0;
1655
1656         if (!extent_buffer_uptodate(root_eb)) {
1657                 ret = btrfs_read_buffer(root_eb, root_gen);
1658                 if (ret)
1659                         goto out;
1660         }
1661
1662         if (root_level == 0) {
1663                 ret = btrfs_qgroup_trace_leaf_items(trans, fs_info, root_eb);
1664                 goto out;
1665         }
1666
1667         path = btrfs_alloc_path();
1668         if (!path)
1669                 return -ENOMEM;
1670
1671         /*
1672          * Walk down the tree.  Missing extent blocks are filled in as
1673          * we go. Metadata is accounted every time we read a new
1674          * extent block.
1675          *
1676          * When we reach a leaf, we account for file extent items in it,
1677          * walk back up the tree (adjusting slot pointers as we go)
1678          * and restart the search process.
1679          */
1680         extent_buffer_get(root_eb); /* For path */
1681         path->nodes[root_level] = root_eb;
1682         path->slots[root_level] = 0;
1683         path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
1684 walk_down:
1685         level = root_level;
1686         while (level >= 0) {
1687                 if (path->nodes[level] == NULL) {
1688                         int parent_slot;
1689                         u64 child_gen;
1690                         u64 child_bytenr;
1691
1692                         /*
1693                          * We need to get child blockptr/gen from parent before
1694                          * we can read it.
1695                           */
1696                         eb = path->nodes[level + 1];
1697                         parent_slot = path->slots[level + 1];
1698                         child_bytenr = btrfs_node_blockptr(eb, parent_slot);
1699                         child_gen = btrfs_node_ptr_generation(eb, parent_slot);
1700
1701                         eb = read_tree_block(fs_info, child_bytenr, child_gen);
1702                         if (IS_ERR(eb)) {
1703                                 ret = PTR_ERR(eb);
1704                                 goto out;
1705                         } else if (!extent_buffer_uptodate(eb)) {
1706                                 free_extent_buffer(eb);
1707                                 ret = -EIO;
1708                                 goto out;
1709                         }
1710
1711                         path->nodes[level] = eb;
1712                         path->slots[level] = 0;
1713
1714                         btrfs_tree_read_lock(eb);
1715                         btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1716                         path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
1717
1718                         ret = btrfs_qgroup_trace_extent(trans, fs_info,
1719                                                         child_bytenr,
1720                                                         fs_info->nodesize,
1721                                                         GFP_NOFS);
1722                         if (ret)
1723                                 goto out;
1724                 }
1725
1726                 if (level == 0) {
1727                         ret = btrfs_qgroup_trace_leaf_items(trans,fs_info,
1728                                                            path->nodes[level]);
1729                         if (ret)
1730                                 goto out;
1731
1732                         /* Nonzero return here means we completed our search */
1733                         ret = adjust_slots_upwards(path, root_level);
1734                         if (ret)
1735                                 break;
1736
1737                         /* Restart search with new slots */
1738                         goto walk_down;
1739                 }
1740
1741                 level--;
1742         }
1743
1744         ret = 0;
1745 out:
1746         btrfs_free_path(path);
1747
1748         return ret;
1749 }
1750
1751 #define UPDATE_NEW      0
1752 #define UPDATE_OLD      1
1753 /*
1754  * Walk all of the roots that points to the bytenr and adjust their refcnts.
1755  */
1756 static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
1757                                 struct ulist *roots, struct ulist *tmp,
1758                                 struct ulist *qgroups, u64 seq, int update_old)
1759 {
1760         struct ulist_node *unode;
1761         struct ulist_iterator uiter;
1762         struct ulist_node *tmp_unode;
1763         struct ulist_iterator tmp_uiter;
1764         struct btrfs_qgroup *qg;
1765         int ret = 0;
1766
1767         if (!roots)
1768                 return 0;
1769         ULIST_ITER_INIT(&uiter);
1770         while ((unode = ulist_next(roots, &uiter))) {
1771                 qg = find_qgroup_rb(fs_info, unode->val);
1772                 if (!qg)
1773                         continue;
1774
1775                 ulist_reinit(tmp);
1776                 ret = ulist_add(qgroups, qg->qgroupid, qgroup_to_aux(qg),
1777                                 GFP_ATOMIC);
1778                 if (ret < 0)
1779                         return ret;
1780                 ret = ulist_add(tmp, qg->qgroupid, qgroup_to_aux(qg), GFP_ATOMIC);
1781                 if (ret < 0)
1782                         return ret;
1783                 ULIST_ITER_INIT(&tmp_uiter);
1784                 while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
1785                         struct btrfs_qgroup_list *glist;
1786
1787                         qg = unode_aux_to_qgroup(tmp_unode);
1788                         if (update_old)
1789                                 btrfs_qgroup_update_old_refcnt(qg, seq, 1);
1790                         else
1791                                 btrfs_qgroup_update_new_refcnt(qg, seq, 1);
1792                         list_for_each_entry(glist, &qg->groups, next_group) {
1793                                 ret = ulist_add(qgroups, glist->group->qgroupid,
1794                                                 qgroup_to_aux(glist->group),
1795                                                 GFP_ATOMIC);
1796                                 if (ret < 0)
1797                                         return ret;
1798                                 ret = ulist_add(tmp, glist->group->qgroupid,
1799                                                 qgroup_to_aux(glist->group),
1800                                                 GFP_ATOMIC);
1801                                 if (ret < 0)
1802                                         return ret;
1803                         }
1804                 }
1805         }
1806         return 0;
1807 }
1808
1809 /*
1810  * Update qgroup rfer/excl counters.
1811  * Rfer update is easy, codes can explain themselves.
1812  *
1813  * Excl update is tricky, the update is split into 2 part.
1814  * Part 1: Possible exclusive <-> sharing detect:
1815  *      |       A       |       !A      |
1816  *  -------------------------------------
1817  *  B   |       *       |       -       |
1818  *  -------------------------------------
1819  *  !B  |       +       |       **      |
1820  *  -------------------------------------
1821  *
1822  * Conditions:
1823  * A:   cur_old_roots < nr_old_roots    (not exclusive before)
1824  * !A:  cur_old_roots == nr_old_roots   (possible exclusive before)
1825  * B:   cur_new_roots < nr_new_roots    (not exclusive now)
1826  * !B:  cur_new_roots == nr_new_roots   (possible exclusive now)
1827  *
1828  * Results:
1829  * +: Possible sharing -> exclusive     -: Possible exclusive -> sharing
1830  * *: Definitely not changed.           **: Possible unchanged.
1831  *
1832  * For !A and !B condition, the exception is cur_old/new_roots == 0 case.
1833  *
1834  * To make the logic clear, we first use condition A and B to split
1835  * combination into 4 results.
1836  *
1837  * Then, for result "+" and "-", check old/new_roots == 0 case, as in them
1838  * only on variant maybe 0.
1839  *
1840  * Lastly, check result **, since there are 2 variants maybe 0, split them
1841  * again(2x2).
1842  * But this time we don't need to consider other things, the codes and logic
1843  * is easy to understand now.
1844  */
1845 static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
1846                                   struct ulist *qgroups,
1847                                   u64 nr_old_roots,
1848                                   u64 nr_new_roots,
1849                                   u64 num_bytes, u64 seq)
1850 {
1851         struct ulist_node *unode;
1852         struct ulist_iterator uiter;
1853         struct btrfs_qgroup *qg;
1854         u64 cur_new_count, cur_old_count;
1855
1856         ULIST_ITER_INIT(&uiter);
1857         while ((unode = ulist_next(qgroups, &uiter))) {
1858                 bool dirty = false;
1859
1860                 qg = unode_aux_to_qgroup(unode);
1861                 cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
1862                 cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
1863
1864                 trace_qgroup_update_counters(fs_info, qg->qgroupid,
1865                                              cur_old_count, cur_new_count);
1866
1867                 /* Rfer update part */
1868                 if (cur_old_count == 0 && cur_new_count > 0) {
1869                         qg->rfer += num_bytes;
1870                         qg->rfer_cmpr += num_bytes;
1871                         dirty = true;
1872                 }
1873                 if (cur_old_count > 0 && cur_new_count == 0) {
1874                         qg->rfer -= num_bytes;
1875                         qg->rfer_cmpr -= num_bytes;
1876                         dirty = true;
1877                 }
1878
1879                 /* Excl update part */
1880                 /* Exclusive/none -> shared case */
1881                 if (cur_old_count == nr_old_roots &&
1882                     cur_new_count < nr_new_roots) {
1883                         /* Exclusive -> shared */
1884                         if (cur_old_count != 0) {
1885                                 qg->excl -= num_bytes;
1886                                 qg->excl_cmpr -= num_bytes;
1887                                 dirty = true;
1888                         }
1889                 }
1890
1891                 /* Shared -> exclusive/none case */
1892                 if (cur_old_count < nr_old_roots &&
1893                     cur_new_count == nr_new_roots) {
1894                         /* Shared->exclusive */
1895                         if (cur_new_count != 0) {
1896                                 qg->excl += num_bytes;
1897                                 qg->excl_cmpr += num_bytes;
1898                                 dirty = true;
1899                         }
1900                 }
1901
1902                 /* Exclusive/none -> exclusive/none case */
1903                 if (cur_old_count == nr_old_roots &&
1904                     cur_new_count == nr_new_roots) {
1905                         if (cur_old_count == 0) {
1906                                 /* None -> exclusive/none */
1907
1908                                 if (cur_new_count != 0) {
1909                                         /* None -> exclusive */
1910                                         qg->excl += num_bytes;
1911                                         qg->excl_cmpr += num_bytes;
1912                                         dirty = true;
1913                                 }
1914                                 /* None -> none, nothing changed */
1915                         } else {
1916                                 /* Exclusive -> exclusive/none */
1917
1918                                 if (cur_new_count == 0) {
1919                                         /* Exclusive -> none */
1920                                         qg->excl -= num_bytes;
1921                                         qg->excl_cmpr -= num_bytes;
1922                                         dirty = true;
1923                                 }
1924                                 /* Exclusive -> exclusive, nothing changed */
1925                         }
1926                 }
1927
1928                 if (dirty)
1929                         qgroup_dirty(fs_info, qg);
1930         }
1931         return 0;
1932 }
1933
1934 int
1935 btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
1936                             struct btrfs_fs_info *fs_info,
1937                             u64 bytenr, u64 num_bytes,
1938                             struct ulist *old_roots, struct ulist *new_roots)
1939 {
1940         struct ulist *qgroups = NULL;
1941         struct ulist *tmp = NULL;
1942         u64 seq;
1943         u64 nr_new_roots = 0;
1944         u64 nr_old_roots = 0;
1945         int ret = 0;
1946
1947         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1948                 return 0;
1949
1950         if (new_roots)
1951                 nr_new_roots = new_roots->nnodes;
1952         if (old_roots)
1953                 nr_old_roots = old_roots->nnodes;
1954
1955         BUG_ON(!fs_info->quota_root);
1956
1957         trace_btrfs_qgroup_account_extent(fs_info, bytenr, num_bytes,
1958                                           nr_old_roots, nr_new_roots);
1959
1960         qgroups = ulist_alloc(GFP_NOFS);
1961         if (!qgroups) {
1962                 ret = -ENOMEM;
1963                 goto out_free;
1964         }
1965         tmp = ulist_alloc(GFP_NOFS);
1966         if (!tmp) {
1967                 ret = -ENOMEM;
1968                 goto out_free;
1969         }
1970
1971         mutex_lock(&fs_info->qgroup_rescan_lock);
1972         if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
1973                 if (fs_info->qgroup_rescan_progress.objectid <= bytenr) {
1974                         mutex_unlock(&fs_info->qgroup_rescan_lock);
1975                         ret = 0;
1976                         goto out_free;
1977                 }
1978         }
1979         mutex_unlock(&fs_info->qgroup_rescan_lock);
1980
1981         spin_lock(&fs_info->qgroup_lock);
1982         seq = fs_info->qgroup_seq;
1983
1984         /* Update old refcnts using old_roots */
1985         ret = qgroup_update_refcnt(fs_info, old_roots, tmp, qgroups, seq,
1986                                    UPDATE_OLD);
1987         if (ret < 0)
1988                 goto out;
1989
1990         /* Update new refcnts using new_roots */
1991         ret = qgroup_update_refcnt(fs_info, new_roots, tmp, qgroups, seq,
1992                                    UPDATE_NEW);
1993         if (ret < 0)
1994                 goto out;
1995
1996         qgroup_update_counters(fs_info, qgroups, nr_old_roots, nr_new_roots,
1997                                num_bytes, seq);
1998
1999         /*
2000          * Bump qgroup_seq to avoid seq overlap
2001          */
2002         fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1;
2003 out:
2004         spin_unlock(&fs_info->qgroup_lock);
2005 out_free:
2006         ulist_free(tmp);
2007         ulist_free(qgroups);
2008         ulist_free(old_roots);
2009         ulist_free(new_roots);
2010         return ret;
2011 }
2012
2013 int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans,
2014                                  struct btrfs_fs_info *fs_info)
2015 {
2016         struct btrfs_qgroup_extent_record *record;
2017         struct btrfs_delayed_ref_root *delayed_refs;
2018         struct ulist *new_roots = NULL;
2019         struct rb_node *node;
2020         u64 qgroup_to_skip;
2021         int ret = 0;
2022
2023         delayed_refs = &trans->transaction->delayed_refs;
2024         qgroup_to_skip = delayed_refs->qgroup_to_skip;
2025         while ((node = rb_first(&delayed_refs->dirty_extent_root))) {
2026                 record = rb_entry(node, struct btrfs_qgroup_extent_record,
2027                                   node);
2028
2029                 trace_btrfs_qgroup_account_extents(fs_info, record);
2030
2031                 if (!ret) {
2032                         /*
2033                          * Use (u64)-1 as time_seq to do special search, which
2034                          * doesn't lock tree or delayed_refs and search current
2035                          * root. It's safe inside commit_transaction().
2036                          */
2037                         ret = btrfs_find_all_roots(trans, fs_info,
2038                                         record->bytenr, (u64)-1, &new_roots);
2039                         if (ret < 0)
2040                                 goto cleanup;
2041                         if (qgroup_to_skip)
2042                                 ulist_del(new_roots, qgroup_to_skip, 0);
2043                         ret = btrfs_qgroup_account_extent(trans, fs_info,
2044                                         record->bytenr, record->num_bytes,
2045                                         record->old_roots, new_roots);
2046                         record->old_roots = NULL;
2047                         new_roots = NULL;
2048                 }
2049 cleanup:
2050                 ulist_free(record->old_roots);
2051                 ulist_free(new_roots);
2052                 new_roots = NULL;
2053                 rb_erase(node, &delayed_refs->dirty_extent_root);
2054                 kfree(record);
2055
2056         }
2057         return ret;
2058 }
2059
2060 /*
2061  * called from commit_transaction. Writes all changed qgroups to disk.
2062  */
2063 int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
2064                       struct btrfs_fs_info *fs_info)
2065 {
2066         struct btrfs_root *quota_root = fs_info->quota_root;
2067         int ret = 0;
2068         int start_rescan_worker = 0;
2069
2070         if (!quota_root)
2071                 goto out;
2072
2073         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
2074             test_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags))
2075                 start_rescan_worker = 1;
2076
2077         if (test_and_clear_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags))
2078                 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
2079         if (test_and_clear_bit(BTRFS_FS_QUOTA_DISABLING, &fs_info->flags))
2080                 clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
2081
2082         spin_lock(&fs_info->qgroup_lock);
2083         while (!list_empty(&fs_info->dirty_qgroups)) {
2084                 struct btrfs_qgroup *qgroup;
2085                 qgroup = list_first_entry(&fs_info->dirty_qgroups,
2086                                           struct btrfs_qgroup, dirty);
2087                 list_del_init(&qgroup->dirty);
2088                 spin_unlock(&fs_info->qgroup_lock);
2089                 ret = update_qgroup_info_item(trans, quota_root, qgroup);
2090                 if (ret)
2091                         fs_info->qgroup_flags |=
2092                                         BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2093                 ret = update_qgroup_limit_item(trans, quota_root, qgroup);
2094                 if (ret)
2095                         fs_info->qgroup_flags |=
2096                                         BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2097                 spin_lock(&fs_info->qgroup_lock);
2098         }
2099         if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2100                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
2101         else
2102                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
2103         spin_unlock(&fs_info->qgroup_lock);
2104
2105         ret = update_qgroup_status_item(trans, fs_info, quota_root);
2106         if (ret)
2107                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2108
2109         if (!ret && start_rescan_worker) {
2110                 ret = qgroup_rescan_init(fs_info, 0, 1);
2111                 if (!ret) {
2112                         qgroup_rescan_zero_tracking(fs_info);
2113                         btrfs_queue_work(fs_info->qgroup_rescan_workers,
2114                                          &fs_info->qgroup_rescan_work);
2115                 }
2116                 ret = 0;
2117         }
2118
2119 out:
2120
2121         return ret;
2122 }
2123
2124 /*
2125  * Copy the accounting information between qgroups. This is necessary
2126  * when a snapshot or a subvolume is created. Throwing an error will
2127  * cause a transaction abort so we take extra care here to only error
2128  * when a readonly fs is a reasonable outcome.
2129  */
2130 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
2131                          struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
2132                          struct btrfs_qgroup_inherit *inherit)
2133 {
2134         int ret = 0;
2135         int i;
2136         u64 *i_qgroups;
2137         struct btrfs_root *quota_root = fs_info->quota_root;
2138         struct btrfs_qgroup *srcgroup;
2139         struct btrfs_qgroup *dstgroup;
2140         u32 level_size = 0;
2141         u64 nums;
2142
2143         mutex_lock(&fs_info->qgroup_ioctl_lock);
2144         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2145                 goto out;
2146
2147         if (!quota_root) {
2148                 ret = -EINVAL;
2149                 goto out;
2150         }
2151
2152         if (inherit) {
2153                 i_qgroups = (u64 *)(inherit + 1);
2154                 nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
2155                        2 * inherit->num_excl_copies;
2156                 for (i = 0; i < nums; ++i) {
2157                         srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
2158
2159                         /*
2160                          * Zero out invalid groups so we can ignore
2161                          * them later.
2162                          */
2163                         if (!srcgroup ||
2164                             ((srcgroup->qgroupid >> 48) <= (objectid >> 48)))
2165                                 *i_qgroups = 0ULL;
2166
2167                         ++i_qgroups;
2168                 }
2169         }
2170
2171         /*
2172          * create a tracking group for the subvol itself
2173          */
2174         ret = add_qgroup_item(trans, quota_root, objectid);
2175         if (ret)
2176                 goto out;
2177
2178         if (srcid) {
2179                 struct btrfs_root *srcroot;
2180                 struct btrfs_key srckey;
2181
2182                 srckey.objectid = srcid;
2183                 srckey.type = BTRFS_ROOT_ITEM_KEY;
2184                 srckey.offset = (u64)-1;
2185                 srcroot = btrfs_read_fs_root_no_name(fs_info, &srckey);
2186                 if (IS_ERR(srcroot)) {
2187                         ret = PTR_ERR(srcroot);
2188                         goto out;
2189                 }
2190
2191                 level_size = fs_info->nodesize;
2192         }
2193
2194         /*
2195          * add qgroup to all inherited groups
2196          */
2197         if (inherit) {
2198                 i_qgroups = (u64 *)(inherit + 1);
2199                 for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) {
2200                         if (*i_qgroups == 0)
2201                                 continue;
2202                         ret = add_qgroup_relation_item(trans, quota_root,
2203                                                        objectid, *i_qgroups);
2204                         if (ret && ret != -EEXIST)
2205                                 goto out;
2206                         ret = add_qgroup_relation_item(trans, quota_root,
2207                                                        *i_qgroups, objectid);
2208                         if (ret && ret != -EEXIST)
2209                                 goto out;
2210                 }
2211                 ret = 0;
2212         }
2213
2214
2215         spin_lock(&fs_info->qgroup_lock);
2216
2217         dstgroup = add_qgroup_rb(fs_info, objectid);
2218         if (IS_ERR(dstgroup)) {
2219                 ret = PTR_ERR(dstgroup);
2220                 goto unlock;
2221         }
2222
2223         if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
2224                 dstgroup->lim_flags = inherit->lim.flags;
2225                 dstgroup->max_rfer = inherit->lim.max_rfer;
2226                 dstgroup->max_excl = inherit->lim.max_excl;
2227                 dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
2228                 dstgroup->rsv_excl = inherit->lim.rsv_excl;
2229
2230                 ret = update_qgroup_limit_item(trans, quota_root, dstgroup);
2231                 if (ret) {
2232                         fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2233                         btrfs_info(fs_info,
2234                                    "unable to update quota limit for %llu",
2235                                    dstgroup->qgroupid);
2236                         goto unlock;
2237                 }
2238         }
2239
2240         if (srcid) {
2241                 srcgroup = find_qgroup_rb(fs_info, srcid);
2242                 if (!srcgroup)
2243                         goto unlock;
2244
2245                 /*
2246                  * We call inherit after we clone the root in order to make sure
2247                  * our counts don't go crazy, so at this point the only
2248                  * difference between the two roots should be the root node.
2249                  */
2250                 dstgroup->rfer = srcgroup->rfer;
2251                 dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
2252                 dstgroup->excl = level_size;
2253                 dstgroup->excl_cmpr = level_size;
2254                 srcgroup->excl = level_size;
2255                 srcgroup->excl_cmpr = level_size;
2256
2257                 /* inherit the limit info */
2258                 dstgroup->lim_flags = srcgroup->lim_flags;
2259                 dstgroup->max_rfer = srcgroup->max_rfer;
2260                 dstgroup->max_excl = srcgroup->max_excl;
2261                 dstgroup->rsv_rfer = srcgroup->rsv_rfer;
2262                 dstgroup->rsv_excl = srcgroup->rsv_excl;
2263
2264                 qgroup_dirty(fs_info, dstgroup);
2265                 qgroup_dirty(fs_info, srcgroup);
2266         }
2267
2268         if (!inherit)
2269                 goto unlock;
2270
2271         i_qgroups = (u64 *)(inherit + 1);
2272         for (i = 0; i < inherit->num_qgroups; ++i) {
2273                 if (*i_qgroups) {
2274                         ret = add_relation_rb(fs_info, objectid, *i_qgroups);
2275                         if (ret)
2276                                 goto unlock;
2277                 }
2278                 ++i_qgroups;
2279         }
2280
2281         for (i = 0; i <  inherit->num_ref_copies; ++i, i_qgroups += 2) {
2282                 struct btrfs_qgroup *src;
2283                 struct btrfs_qgroup *dst;
2284
2285                 if (!i_qgroups[0] || !i_qgroups[1])
2286                         continue;
2287
2288                 src = find_qgroup_rb(fs_info, i_qgroups[0]);
2289                 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2290
2291                 if (!src || !dst) {
2292                         ret = -EINVAL;
2293                         goto unlock;
2294                 }
2295
2296                 dst->rfer = src->rfer - level_size;
2297                 dst->rfer_cmpr = src->rfer_cmpr - level_size;
2298         }
2299         for (i = 0; i <  inherit->num_excl_copies; ++i, i_qgroups += 2) {
2300                 struct btrfs_qgroup *src;
2301                 struct btrfs_qgroup *dst;
2302
2303                 if (!i_qgroups[0] || !i_qgroups[1])
2304                         continue;
2305
2306                 src = find_qgroup_rb(fs_info, i_qgroups[0]);
2307                 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2308
2309                 if (!src || !dst) {
2310                         ret = -EINVAL;
2311                         goto unlock;
2312                 }
2313
2314                 dst->excl = src->excl + level_size;
2315                 dst->excl_cmpr = src->excl_cmpr + level_size;
2316         }
2317
2318 unlock:
2319         spin_unlock(&fs_info->qgroup_lock);
2320 out:
2321         mutex_unlock(&fs_info->qgroup_ioctl_lock);
2322         return ret;
2323 }
2324
2325 static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes)
2326 {
2327         if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
2328             qg->reserved + (s64)qg->rfer + num_bytes > qg->max_rfer)
2329                 return false;
2330
2331         if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
2332             qg->reserved + (s64)qg->excl + num_bytes > qg->max_excl)
2333                 return false;
2334
2335         return true;
2336 }
2337
2338 static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce)
2339 {
2340         struct btrfs_root *quota_root;
2341         struct btrfs_qgroup *qgroup;
2342         struct btrfs_fs_info *fs_info = root->fs_info;
2343         u64 ref_root = root->root_key.objectid;
2344         int ret = 0;
2345         struct ulist_node *unode;
2346         struct ulist_iterator uiter;
2347
2348         if (!is_fstree(ref_root))
2349                 return 0;
2350
2351         if (num_bytes == 0)
2352                 return 0;
2353
2354         spin_lock(&fs_info->qgroup_lock);
2355         quota_root = fs_info->quota_root;
2356         if (!quota_root)
2357                 goto out;
2358
2359         qgroup = find_qgroup_rb(fs_info, ref_root);
2360         if (!qgroup)
2361                 goto out;
2362
2363         /*
2364          * in a first step, we check all affected qgroups if any limits would
2365          * be exceeded
2366          */
2367         ulist_reinit(fs_info->qgroup_ulist);
2368         ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
2369                         (uintptr_t)qgroup, GFP_ATOMIC);
2370         if (ret < 0)
2371                 goto out;
2372         ULIST_ITER_INIT(&uiter);
2373         while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2374                 struct btrfs_qgroup *qg;
2375                 struct btrfs_qgroup_list *glist;
2376
2377                 qg = unode_aux_to_qgroup(unode);
2378
2379                 if (enforce && !qgroup_check_limits(qg, num_bytes)) {
2380                         ret = -EDQUOT;
2381                         goto out;
2382                 }
2383
2384                 list_for_each_entry(glist, &qg->groups, next_group) {
2385                         ret = ulist_add(fs_info->qgroup_ulist,
2386                                         glist->group->qgroupid,
2387                                         (uintptr_t)glist->group, GFP_ATOMIC);
2388                         if (ret < 0)
2389                                 goto out;
2390                 }
2391         }
2392         ret = 0;
2393         /*
2394          * no limits exceeded, now record the reservation into all qgroups
2395          */
2396         ULIST_ITER_INIT(&uiter);
2397         while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2398                 struct btrfs_qgroup *qg;
2399
2400                 qg = unode_aux_to_qgroup(unode);
2401
2402                 qg->reserved += num_bytes;
2403         }
2404
2405 out:
2406         spin_unlock(&fs_info->qgroup_lock);
2407         return ret;
2408 }
2409
2410 void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
2411                                u64 ref_root, u64 num_bytes)
2412 {
2413         struct btrfs_root *quota_root;
2414         struct btrfs_qgroup *qgroup;
2415         struct ulist_node *unode;
2416         struct ulist_iterator uiter;
2417         int ret = 0;
2418
2419         if (!is_fstree(ref_root))
2420                 return;
2421
2422         if (num_bytes == 0)
2423                 return;
2424
2425         spin_lock(&fs_info->qgroup_lock);
2426
2427         quota_root = fs_info->quota_root;
2428         if (!quota_root)
2429                 goto out;
2430
2431         qgroup = find_qgroup_rb(fs_info, ref_root);
2432         if (!qgroup)
2433                 goto out;
2434
2435         ulist_reinit(fs_info->qgroup_ulist);
2436         ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
2437                         (uintptr_t)qgroup, GFP_ATOMIC);
2438         if (ret < 0)
2439                 goto out;
2440         ULIST_ITER_INIT(&uiter);
2441         while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2442                 struct btrfs_qgroup *qg;
2443                 struct btrfs_qgroup_list *glist;
2444
2445                 qg = unode_aux_to_qgroup(unode);
2446
2447                 if (WARN_ON(qg->reserved < num_bytes))
2448                         report_reserved_underflow(fs_info, qg, num_bytes);
2449                 else
2450                         qg->reserved -= num_bytes;
2451
2452                 list_for_each_entry(glist, &qg->groups, next_group) {
2453                         ret = ulist_add(fs_info->qgroup_ulist,
2454                                         glist->group->qgroupid,
2455                                         (uintptr_t)glist->group, GFP_ATOMIC);
2456                         if (ret < 0)
2457                                 goto out;
2458                 }
2459         }
2460
2461 out:
2462         spin_unlock(&fs_info->qgroup_lock);
2463 }
2464
2465 void assert_qgroups_uptodate(struct btrfs_trans_handle *trans)
2466 {
2467         if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq)
2468                 return;
2469         btrfs_err(trans->fs_info,
2470                 "qgroups not uptodate in trans handle %p:  list is%s empty, seq is %#x.%x",
2471                 trans, list_empty(&trans->qgroup_ref_list) ? "" : " not",
2472                 (u32)(trans->delayed_ref_elem.seq >> 32),
2473                 (u32)trans->delayed_ref_elem.seq);
2474         BUG();
2475 }
2476
2477 /*
2478  * returns < 0 on error, 0 when more leafs are to be scanned.
2479  * returns 1 when done.
2480  */
2481 static int
2482 qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
2483                    struct btrfs_trans_handle *trans)
2484 {
2485         struct btrfs_key found;
2486         struct extent_buffer *scratch_leaf = NULL;
2487         struct ulist *roots = NULL;
2488         struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
2489         u64 num_bytes;
2490         int slot;
2491         int ret;
2492
2493         mutex_lock(&fs_info->qgroup_rescan_lock);
2494         ret = btrfs_search_slot_for_read(fs_info->extent_root,
2495                                          &fs_info->qgroup_rescan_progress,
2496                                          path, 1, 0);
2497
2498         btrfs_debug(fs_info,
2499                 "current progress key (%llu %u %llu), search_slot ret %d",
2500                 fs_info->qgroup_rescan_progress.objectid,
2501                 fs_info->qgroup_rescan_progress.type,
2502                 fs_info->qgroup_rescan_progress.offset, ret);
2503
2504         if (ret) {
2505                 /*
2506                  * The rescan is about to end, we will not be scanning any
2507                  * further blocks. We cannot unset the RESCAN flag here, because
2508                  * we want to commit the transaction if everything went well.
2509                  * To make the live accounting work in this phase, we set our
2510                  * scan progress pointer such that every real extent objectid
2511                  * will be smaller.
2512                  */
2513                 fs_info->qgroup_rescan_progress.objectid = (u64)-1;
2514                 btrfs_release_path(path);
2515                 mutex_unlock(&fs_info->qgroup_rescan_lock);
2516                 return ret;
2517         }
2518
2519         btrfs_item_key_to_cpu(path->nodes[0], &found,
2520                               btrfs_header_nritems(path->nodes[0]) - 1);
2521         fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
2522
2523         btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2524         scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]);
2525         if (!scratch_leaf) {
2526                 ret = -ENOMEM;
2527                 mutex_unlock(&fs_info->qgroup_rescan_lock);
2528                 goto out;
2529         }
2530         extent_buffer_get(scratch_leaf);
2531         btrfs_tree_read_lock(scratch_leaf);
2532         btrfs_set_lock_blocking_rw(scratch_leaf, BTRFS_READ_LOCK);
2533         slot = path->slots[0];
2534         btrfs_release_path(path);
2535         mutex_unlock(&fs_info->qgroup_rescan_lock);
2536
2537         for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
2538                 btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
2539                 if (found.type != BTRFS_EXTENT_ITEM_KEY &&
2540                     found.type != BTRFS_METADATA_ITEM_KEY)
2541                         continue;
2542                 if (found.type == BTRFS_METADATA_ITEM_KEY)
2543                         num_bytes = fs_info->nodesize;
2544                 else
2545                         num_bytes = found.offset;
2546
2547                 ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0,
2548                                            &roots);
2549                 if (ret < 0)
2550                         goto out;
2551                 /* For rescan, just pass old_roots as NULL */
2552                 ret = btrfs_qgroup_account_extent(trans, fs_info,
2553                                 found.objectid, num_bytes, NULL, roots);
2554                 if (ret < 0)
2555                         goto out;
2556         }
2557 out:
2558         if (scratch_leaf) {
2559                 btrfs_tree_read_unlock_blocking(scratch_leaf);
2560                 free_extent_buffer(scratch_leaf);
2561         }
2562         btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2563
2564         return ret;
2565 }
2566
2567 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
2568 {
2569         struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
2570                                                      qgroup_rescan_work);
2571         struct btrfs_path *path;
2572         struct btrfs_trans_handle *trans = NULL;
2573         int err = -ENOMEM;
2574         int ret = 0;
2575
2576         path = btrfs_alloc_path();
2577         if (!path)
2578                 goto out;
2579
2580         err = 0;
2581         while (!err && !btrfs_fs_closing(fs_info)) {
2582                 trans = btrfs_start_transaction(fs_info->fs_root, 0);
2583                 if (IS_ERR(trans)) {
2584                         err = PTR_ERR(trans);
2585                         break;
2586                 }
2587                 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
2588                         err = -EINTR;
2589                 } else {
2590                         err = qgroup_rescan_leaf(fs_info, path, trans);
2591                 }
2592                 if (err > 0)
2593                         btrfs_commit_transaction(trans);
2594                 else
2595                         btrfs_end_transaction(trans);
2596         }
2597
2598 out:
2599         btrfs_free_path(path);
2600
2601         mutex_lock(&fs_info->qgroup_rescan_lock);
2602         if (!btrfs_fs_closing(fs_info))
2603                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2604
2605         if (err > 0 &&
2606             fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
2607                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2608         } else if (err < 0) {
2609                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2610         }
2611         mutex_unlock(&fs_info->qgroup_rescan_lock);
2612
2613         /*
2614          * only update status, since the previous part has already updated the
2615          * qgroup info.
2616          */
2617         trans = btrfs_start_transaction(fs_info->quota_root, 1);
2618         if (IS_ERR(trans)) {
2619                 err = PTR_ERR(trans);
2620                 btrfs_err(fs_info,
2621                           "fail to start transaction for status update: %d\n",
2622                           err);
2623                 goto done;
2624         }
2625         ret = update_qgroup_status_item(trans, fs_info, fs_info->quota_root);
2626         if (ret < 0) {
2627                 err = ret;
2628                 btrfs_err(fs_info, "fail to update qgroup status: %d", err);
2629         }
2630         btrfs_end_transaction(trans);
2631
2632         if (btrfs_fs_closing(fs_info)) {
2633                 btrfs_info(fs_info, "qgroup scan paused");
2634         } else if (err >= 0) {
2635                 btrfs_info(fs_info, "qgroup scan completed%s",
2636                         err > 0 ? " (inconsistency flag cleared)" : "");
2637         } else {
2638                 btrfs_err(fs_info, "qgroup scan failed with %d", err);
2639         }
2640
2641 done:
2642         mutex_lock(&fs_info->qgroup_rescan_lock);
2643         fs_info->qgroup_rescan_running = false;
2644         mutex_unlock(&fs_info->qgroup_rescan_lock);
2645         complete_all(&fs_info->qgroup_rescan_completion);
2646 }
2647
2648 /*
2649  * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
2650  * memory required for the rescan context.
2651  */
2652 static int
2653 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
2654                    int init_flags)
2655 {
2656         int ret = 0;
2657
2658         if (!init_flags &&
2659             (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) ||
2660              !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))) {
2661                 ret = -EINVAL;
2662                 goto err;
2663         }
2664
2665         mutex_lock(&fs_info->qgroup_rescan_lock);
2666         spin_lock(&fs_info->qgroup_lock);
2667
2668         if (init_flags) {
2669                 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
2670                         ret = -EINPROGRESS;
2671                 else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
2672                         ret = -EINVAL;
2673
2674                 if (ret) {
2675                         spin_unlock(&fs_info->qgroup_lock);
2676                         mutex_unlock(&fs_info->qgroup_rescan_lock);
2677                         goto err;
2678                 }
2679                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2680         }
2681
2682         memset(&fs_info->qgroup_rescan_progress, 0,
2683                 sizeof(fs_info->qgroup_rescan_progress));
2684         fs_info->qgroup_rescan_progress.objectid = progress_objectid;
2685         init_completion(&fs_info->qgroup_rescan_completion);
2686         fs_info->qgroup_rescan_running = true;
2687
2688         spin_unlock(&fs_info->qgroup_lock);
2689         mutex_unlock(&fs_info->qgroup_rescan_lock);
2690
2691         memset(&fs_info->qgroup_rescan_work, 0,
2692                sizeof(fs_info->qgroup_rescan_work));
2693         btrfs_init_work(&fs_info->qgroup_rescan_work,
2694                         btrfs_qgroup_rescan_helper,
2695                         btrfs_qgroup_rescan_worker, NULL, NULL);
2696
2697         if (ret) {
2698 err:
2699                 btrfs_info(fs_info, "qgroup_rescan_init failed with %d", ret);
2700                 return ret;
2701         }
2702
2703         return 0;
2704 }
2705
2706 static void
2707 qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
2708 {
2709         struct rb_node *n;
2710         struct btrfs_qgroup *qgroup;
2711
2712         spin_lock(&fs_info->qgroup_lock);
2713         /* clear all current qgroup tracking information */
2714         for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
2715                 qgroup = rb_entry(n, struct btrfs_qgroup, node);
2716                 qgroup->rfer = 0;
2717                 qgroup->rfer_cmpr = 0;
2718                 qgroup->excl = 0;
2719                 qgroup->excl_cmpr = 0;
2720         }
2721         spin_unlock(&fs_info->qgroup_lock);
2722 }
2723
2724 int
2725 btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
2726 {
2727         int ret = 0;
2728         struct btrfs_trans_handle *trans;
2729
2730         ret = qgroup_rescan_init(fs_info, 0, 1);
2731         if (ret)
2732                 return ret;
2733
2734         /*
2735          * We have set the rescan_progress to 0, which means no more
2736          * delayed refs will be accounted by btrfs_qgroup_account_ref.
2737          * However, btrfs_qgroup_account_ref may be right after its call
2738          * to btrfs_find_all_roots, in which case it would still do the
2739          * accounting.
2740          * To solve this, we're committing the transaction, which will
2741          * ensure we run all delayed refs and only after that, we are
2742          * going to clear all tracking information for a clean start.
2743          */
2744
2745         trans = btrfs_join_transaction(fs_info->fs_root);
2746         if (IS_ERR(trans)) {
2747                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2748                 return PTR_ERR(trans);
2749         }
2750         ret = btrfs_commit_transaction(trans);
2751         if (ret) {
2752                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2753                 return ret;
2754         }
2755
2756         qgroup_rescan_zero_tracking(fs_info);
2757
2758         btrfs_queue_work(fs_info->qgroup_rescan_workers,
2759                          &fs_info->qgroup_rescan_work);
2760
2761         return 0;
2762 }
2763
2764 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
2765                                      bool interruptible)
2766 {
2767         int running;
2768         int ret = 0;
2769
2770         mutex_lock(&fs_info->qgroup_rescan_lock);
2771         spin_lock(&fs_info->qgroup_lock);
2772         running = fs_info->qgroup_rescan_running;
2773         spin_unlock(&fs_info->qgroup_lock);
2774         mutex_unlock(&fs_info->qgroup_rescan_lock);
2775
2776         if (!running)
2777                 return 0;
2778
2779         if (interruptible)
2780                 ret = wait_for_completion_interruptible(
2781                                         &fs_info->qgroup_rescan_completion);
2782         else
2783                 wait_for_completion(&fs_info->qgroup_rescan_completion);
2784
2785         return ret;
2786 }
2787
2788 /*
2789  * this is only called from open_ctree where we're still single threaded, thus
2790  * locking is omitted here.
2791  */
2792 void
2793 btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
2794 {
2795         if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
2796                 btrfs_queue_work(fs_info->qgroup_rescan_workers,
2797                                  &fs_info->qgroup_rescan_work);
2798 }
2799
2800 /*
2801  * Reserve qgroup space for range [start, start + len).
2802  *
2803  * This function will either reserve space from related qgroups or doing
2804  * nothing if the range is already reserved.
2805  *
2806  * Return 0 for successful reserve
2807  * Return <0 for error (including -EQUOT)
2808  *
2809  * NOTE: this function may sleep for memory allocation.
2810  */
2811 int btrfs_qgroup_reserve_data(struct inode *inode, u64 start, u64 len)
2812 {
2813         struct btrfs_root *root = BTRFS_I(inode)->root;
2814         struct extent_changeset changeset;
2815         struct ulist_node *unode;
2816         struct ulist_iterator uiter;
2817         int ret;
2818
2819         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
2820             !is_fstree(root->objectid) || len == 0)
2821                 return 0;
2822
2823         changeset.bytes_changed = 0;
2824         ulist_init(&changeset.range_changed);
2825         ret = set_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
2826                         start + len -1, EXTENT_QGROUP_RESERVED, &changeset);
2827         trace_btrfs_qgroup_reserve_data(inode, start, len,
2828                                         changeset.bytes_changed,
2829                                         QGROUP_RESERVE);
2830         if (ret < 0)
2831                 goto cleanup;
2832         ret = qgroup_reserve(root, changeset.bytes_changed, true);
2833         if (ret < 0)
2834                 goto cleanup;
2835
2836         ulist_release(&changeset.range_changed);
2837         return ret;
2838
2839 cleanup:
2840         /* cleanup already reserved ranges */
2841         ULIST_ITER_INIT(&uiter);
2842         while ((unode = ulist_next(&changeset.range_changed, &uiter)))
2843                 clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val,
2844                                  unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL,
2845                                  GFP_NOFS);
2846         ulist_release(&changeset.range_changed);
2847         return ret;
2848 }
2849
2850 static int __btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len,
2851                                        int free)
2852 {
2853         struct extent_changeset changeset;
2854         int trace_op = QGROUP_RELEASE;
2855         int ret;
2856
2857         changeset.bytes_changed = 0;
2858         ulist_init(&changeset.range_changed);
2859         ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, start, 
2860                         start + len -1, EXTENT_QGROUP_RESERVED, &changeset);
2861         if (ret < 0)
2862                 goto out;
2863
2864         if (free) {
2865                 btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info,
2866                                 BTRFS_I(inode)->root->objectid,
2867                                 changeset.bytes_changed);
2868                 trace_op = QGROUP_FREE;
2869         }
2870         trace_btrfs_qgroup_release_data(inode, start, len,
2871                                         changeset.bytes_changed, trace_op);
2872 out:
2873         ulist_release(&changeset.range_changed);
2874         return ret;
2875 }
2876
2877 /*
2878  * Free a reserved space range from io_tree and related qgroups
2879  *
2880  * Should be called when a range of pages get invalidated before reaching disk.
2881  * Or for error cleanup case.
2882  *
2883  * For data written to disk, use btrfs_qgroup_release_data().
2884  *
2885  * NOTE: This function may sleep for memory allocation.
2886  */
2887 int btrfs_qgroup_free_data(struct inode *inode, u64 start, u64 len)
2888 {
2889         return __btrfs_qgroup_release_data(inode, start, len, 1);
2890 }
2891
2892 /*
2893  * Release a reserved space range from io_tree only.
2894  *
2895  * Should be called when a range of pages get written to disk and corresponding
2896  * FILE_EXTENT is inserted into corresponding root.
2897  *
2898  * Since new qgroup accounting framework will only update qgroup numbers at
2899  * commit_transaction() time, its reserved space shouldn't be freed from
2900  * related qgroups.
2901  *
2902  * But we should release the range from io_tree, to allow further write to be
2903  * COWed.
2904  *
2905  * NOTE: This function may sleep for memory allocation.
2906  */
2907 int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len)
2908 {
2909         return __btrfs_qgroup_release_data(inode, start, len, 0);
2910 }
2911
2912 int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
2913                               bool enforce)
2914 {
2915         struct btrfs_fs_info *fs_info = root->fs_info;
2916         int ret;
2917
2918         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
2919             !is_fstree(root->objectid) || num_bytes == 0)
2920                 return 0;
2921
2922         BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
2923         ret = qgroup_reserve(root, num_bytes, enforce);
2924         if (ret < 0)
2925                 return ret;
2926         atomic_add(num_bytes, &root->qgroup_meta_rsv);
2927         return ret;
2928 }
2929
2930 void btrfs_qgroup_free_meta_all(struct btrfs_root *root)
2931 {
2932         struct btrfs_fs_info *fs_info = root->fs_info;
2933         int reserved;
2934
2935         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
2936             !is_fstree(root->objectid))
2937                 return;
2938
2939         reserved = atomic_xchg(&root->qgroup_meta_rsv, 0);
2940         if (reserved == 0)
2941                 return;
2942         btrfs_qgroup_free_refroot(fs_info, root->objectid, reserved);
2943 }
2944
2945 void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes)
2946 {
2947         struct btrfs_fs_info *fs_info = root->fs_info;
2948
2949         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
2950             !is_fstree(root->objectid))
2951                 return;
2952
2953         BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
2954         WARN_ON(atomic_read(&root->qgroup_meta_rsv) < num_bytes);
2955         atomic_sub(num_bytes, &root->qgroup_meta_rsv);
2956         btrfs_qgroup_free_refroot(fs_info, root->objectid, num_bytes);
2957 }
2958
2959 /*
2960  * Check qgroup reserved space leaking, normally at destroy inode
2961  * time
2962  */
2963 void btrfs_qgroup_check_reserved_leak(struct inode *inode)
2964 {
2965         struct extent_changeset changeset;
2966         struct ulist_node *unode;
2967         struct ulist_iterator iter;
2968         int ret;
2969
2970         changeset.bytes_changed = 0;
2971         ulist_init(&changeset.range_changed);
2972         ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
2973                         EXTENT_QGROUP_RESERVED, &changeset);
2974
2975         WARN_ON(ret < 0);
2976         if (WARN_ON(changeset.bytes_changed)) {
2977                 ULIST_ITER_INIT(&iter);
2978                 while ((unode = ulist_next(&changeset.range_changed, &iter))) {
2979                         btrfs_warn(BTRFS_I(inode)->root->fs_info,
2980                                 "leaking qgroup reserved space, ino: %lu, start: %llu, end: %llu",
2981                                 inode->i_ino, unode->val, unode->aux);
2982                 }
2983                 btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info,
2984                                 BTRFS_I(inode)->root->objectid,
2985                                 changeset.bytes_changed);
2986
2987         }
2988         ulist_release(&changeset.range_changed);
2989 }