2 * Implementation of the diskquota system for the LINUX operating system. QUOTA
3 * is implemented using the BSD system call interface as the means of
4 * communication with the user level. This file contains the generic routines
5 * called by the different filesystems on allocation of an inode or block.
6 * These routines take care of the administration needed to have a consistent
7 * diskquota tracking system. The ideas of both user and group quotas are based
8 * on the Melbourne quota system as used on BSD derived systems. The internal
9 * implementation is based on one of the several variants of the LINUX
10 * inode-subsystem with added complexity of the diskquota system.
12 * Author: Marco van Wieringen <mvw@planets.elm.net>
14 * Fixes: Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96
16 * Revised list management to avoid races
17 * -- Bill Hawes, <whawes@star.net>, 9/98
19 * Fixed races in dquot_transfer(), dqget() and dquot_alloc_...().
20 * As the consequence the locking was moved from dquot_decr_...(),
21 * dquot_incr_...() to calling functions.
22 * invalidate_dquots() now writes modified dquots.
23 * Serialized quota_off() and quota_on() for mount point.
24 * Fixed a few bugs in grow_dquots().
25 * Fixed deadlock in write_dquot() - we no longer account quotas on
27 * remove_dquot_ref() moved to inode.c - it now traverses through inodes
28 * add_dquot_ref() restarts after blocking
29 * Added check for bogus uid and fixed check for group in quotactl.
30 * Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99
32 * Used struct list_head instead of own list struct
33 * Invalidation of referenced dquots is no longer possible
34 * Improved free_dquots list management
35 * Quota and i_blocks are now updated in one place to avoid races
36 * Warnings are now delayed so we won't block in critical section
37 * Write updated not to require dquot lock
38 * Jan Kara, <jack@suse.cz>, 9/2000
40 * Added dynamic quota structure allocation
41 * Jan Kara <jack@suse.cz> 12/2000
43 * Rewritten quota interface. Implemented new quota format and
44 * formats registering.
45 * Jan Kara, <jack@suse.cz>, 2001,2002
48 * Jan Kara, <jack@suse.cz>, 10/2002
50 * Added journalled quota support, fix lock inversion problems
51 * Jan Kara, <jack@suse.cz>, 2003,2004
53 * (C) Copyright 1994 - 1997 Marco van Wieringen
56 #include <linux/errno.h>
57 #include <linux/kernel.h>
59 #include <linux/mount.h>
61 #include <linux/time.h>
62 #include <linux/types.h>
63 #include <linux/string.h>
64 #include <linux/fcntl.h>
65 #include <linux/stat.h>
66 #include <linux/tty.h>
67 #include <linux/file.h>
68 #include <linux/slab.h>
69 #include <linux/sysctl.h>
70 #include <linux/init.h>
71 #include <linux/module.h>
72 #include <linux/proc_fs.h>
73 #include <linux/security.h>
74 #include <linux/sched.h>
75 #include <linux/cred.h>
76 #include <linux/kmod.h>
77 #include <linux/namei.h>
78 #include <linux/capability.h>
79 #include <linux/quotaops.h>
80 #include "../internal.h" /* ugh */
82 #include <linux/uaccess.h>
85 * There are five quota SMP locks:
86 * * dq_list_lock protects all lists with quotas and quota formats.
87 * * dquot->dq_dqb_lock protects data from dq_dqb
88 * * inode->i_lock protects inode->i_blocks, i_bytes and also guards
89 * consistency of dquot->dq_dqb with inode->i_blocks, i_bytes so that
90 * dquot_transfer() can stabilize amount it transfers
91 * * dq_data_lock protects mem_dqinfo structures and modifications of dquot
92 * pointers in the inode
93 * * dq_state_lock protects modifications of quota state (on quotaon and
94 * quotaoff) and readers who care about latest values take it as well.
96 * The spinlock ordering is hence:
97 * dq_data_lock > dq_list_lock > i_lock > dquot->dq_dqb_lock,
98 * dq_list_lock > dq_state_lock
100 * Note that some things (eg. sb pointer, type, id) doesn't change during
101 * the life of the dquot structure and so needn't to be protected by a lock
103 * Operation accessing dquots via inode pointers are protected by dquot_srcu.
104 * Operation of reading pointer needs srcu_read_lock(&dquot_srcu), and
105 * synchronize_srcu(&dquot_srcu) is called after clearing pointers from
106 * inode and before dropping dquot references to avoid use of dquots after
107 * they are freed. dq_data_lock is used to serialize the pointer setting and
108 * clearing operations.
109 * Special care needs to be taken about S_NOQUOTA inode flag (marking that
110 * inode is a quota file). Functions adding pointers from inode to dquots have
111 * to check this flag under dq_data_lock and then (if S_NOQUOTA is not set) they
112 * have to do all pointer modifications before dropping dq_data_lock. This makes
113 * sure they cannot race with quotaon which first sets S_NOQUOTA flag and
114 * then drops all pointers to dquots from an inode.
116 * Each dquot has its dq_lock mutex. Dquot is locked when it is being read to
117 * memory (or space for it is being allocated) on the first dqget(), when it is
118 * being written out, and when it is being released on the last dqput(). The
119 * allocation and release operations are serialized by the dq_lock and by
120 * checking the use count in dquot_release().
122 * Lock ordering (including related VFS locks) is the following:
123 * s_umount > i_mutex > journal_lock > dquot->dq_lock > dqio_sem
126 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
127 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
128 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
129 EXPORT_SYMBOL(dq_data_lock);
130 DEFINE_STATIC_SRCU(dquot_srcu);
132 static DECLARE_WAIT_QUEUE_HEAD(dquot_ref_wq);
134 void __quota_error(struct super_block *sb, const char *func,
135 const char *fmt, ...)
137 if (printk_ratelimit()) {
139 struct va_format vaf;
146 printk(KERN_ERR "Quota error (device %s): %s: %pV\n",
147 sb->s_id, func, &vaf);
152 EXPORT_SYMBOL(__quota_error);
154 #if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING)
155 static char *quotatypes[] = INITQFNAMES;
157 static struct quota_format_type *quota_formats; /* List of registered formats */
158 static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
160 /* SLAB cache for dquot structures */
161 static struct kmem_cache *dquot_cachep;
163 int register_quota_format(struct quota_format_type *fmt)
165 spin_lock(&dq_list_lock);
166 fmt->qf_next = quota_formats;
168 spin_unlock(&dq_list_lock);
171 EXPORT_SYMBOL(register_quota_format);
173 void unregister_quota_format(struct quota_format_type *fmt)
175 struct quota_format_type **actqf;
177 spin_lock(&dq_list_lock);
178 for (actqf = "a_formats; *actqf && *actqf != fmt;
179 actqf = &(*actqf)->qf_next)
182 *actqf = (*actqf)->qf_next;
183 spin_unlock(&dq_list_lock);
185 EXPORT_SYMBOL(unregister_quota_format);
187 static struct quota_format_type *find_quota_format(int id)
189 struct quota_format_type *actqf;
191 spin_lock(&dq_list_lock);
192 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
193 actqf = actqf->qf_next)
195 if (!actqf || !try_module_get(actqf->qf_owner)) {
198 spin_unlock(&dq_list_lock);
200 for (qm = 0; module_names[qm].qm_fmt_id &&
201 module_names[qm].qm_fmt_id != id; qm++)
203 if (!module_names[qm].qm_fmt_id ||
204 request_module(module_names[qm].qm_mod_name))
207 spin_lock(&dq_list_lock);
208 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
209 actqf = actqf->qf_next)
211 if (actqf && !try_module_get(actqf->qf_owner))
214 spin_unlock(&dq_list_lock);
218 static void put_quota_format(struct quota_format_type *fmt)
220 module_put(fmt->qf_owner);
224 * Dquot List Management:
225 * The quota code uses three lists for dquot management: the inuse_list,
226 * free_dquots, and dquot_hash[] array. A single dquot structure may be
227 * on all three lists, depending on its current state.
229 * All dquots are placed to the end of inuse_list when first created, and this
230 * list is used for invalidate operation, which must look at every dquot.
232 * Unused dquots (dq_count == 0) are added to the free_dquots list when freed,
233 * and this list is searched whenever we need an available dquot. Dquots are
234 * removed from the list as soon as they are used again, and
235 * dqstats.free_dquots gives the number of dquots on the list. When
236 * dquot is invalidated it's completely released from memory.
238 * Dquots with a specific identity (device, type and id) are placed on
239 * one of the dquot_hash[] hash chains. The provides an efficient search
240 * mechanism to locate a specific dquot.
243 static LIST_HEAD(inuse_list);
244 static LIST_HEAD(free_dquots);
245 static unsigned int dq_hash_bits, dq_hash_mask;
246 static struct hlist_head *dquot_hash;
248 struct dqstats dqstats;
249 EXPORT_SYMBOL(dqstats);
251 static qsize_t inode_get_rsv_space(struct inode *inode);
252 static qsize_t __inode_get_rsv_space(struct inode *inode);
253 static int __dquot_initialize(struct inode *inode, int type);
255 static inline unsigned int
256 hashfn(const struct super_block *sb, struct kqid qid)
258 unsigned int id = from_kqid(&init_user_ns, qid);
262 tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type);
263 return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask;
267 * Following list functions expect dq_list_lock to be held
269 static inline void insert_dquot_hash(struct dquot *dquot)
271 struct hlist_head *head;
272 head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id);
273 hlist_add_head(&dquot->dq_hash, head);
276 static inline void remove_dquot_hash(struct dquot *dquot)
278 hlist_del_init(&dquot->dq_hash);
281 static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
284 struct hlist_node *node;
287 hlist_for_each (node, dquot_hash+hashent) {
288 dquot = hlist_entry(node, struct dquot, dq_hash);
289 if (dquot->dq_sb == sb && qid_eq(dquot->dq_id, qid))
295 /* Add a dquot to the tail of the free list */
296 static inline void put_dquot_last(struct dquot *dquot)
298 list_add_tail(&dquot->dq_free, &free_dquots);
299 dqstats_inc(DQST_FREE_DQUOTS);
302 static inline void remove_free_dquot(struct dquot *dquot)
304 if (list_empty(&dquot->dq_free))
306 list_del_init(&dquot->dq_free);
307 dqstats_dec(DQST_FREE_DQUOTS);
310 static inline void put_inuse(struct dquot *dquot)
312 /* We add to the back of inuse list so we don't have to restart
313 * when traversing this list and we block */
314 list_add_tail(&dquot->dq_inuse, &inuse_list);
315 dqstats_inc(DQST_ALLOC_DQUOTS);
318 static inline void remove_inuse(struct dquot *dquot)
320 dqstats_dec(DQST_ALLOC_DQUOTS);
321 list_del(&dquot->dq_inuse);
324 * End of list functions needing dq_list_lock
327 static void wait_on_dquot(struct dquot *dquot)
329 mutex_lock(&dquot->dq_lock);
330 mutex_unlock(&dquot->dq_lock);
333 static inline int dquot_dirty(struct dquot *dquot)
335 return test_bit(DQ_MOD_B, &dquot->dq_flags);
338 static inline int mark_dquot_dirty(struct dquot *dquot)
340 return dquot->dq_sb->dq_op->mark_dirty(dquot);
343 /* Mark dquot dirty in atomic manner, and return it's old dirty flag state */
344 int dquot_mark_dquot_dirty(struct dquot *dquot)
348 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
351 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY)
352 return test_and_set_bit(DQ_MOD_B, &dquot->dq_flags);
354 /* If quota is dirty already, we don't have to acquire dq_list_lock */
355 if (test_bit(DQ_MOD_B, &dquot->dq_flags))
358 spin_lock(&dq_list_lock);
359 if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) {
360 list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
361 info[dquot->dq_id.type].dqi_dirty_list);
364 spin_unlock(&dq_list_lock);
367 EXPORT_SYMBOL(dquot_mark_dquot_dirty);
369 /* Dirtify all the dquots - this can block when journalling */
370 static inline int mark_all_dquot_dirty(struct dquot * const *dquot)
375 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
377 /* Even in case of error we have to continue */
378 ret = mark_dquot_dirty(dquot[cnt]);
385 static inline void dqput_all(struct dquot **dquot)
389 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
393 static inline int clear_dquot_dirty(struct dquot *dquot)
395 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY)
396 return test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags);
398 spin_lock(&dq_list_lock);
399 if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags)) {
400 spin_unlock(&dq_list_lock);
403 list_del_init(&dquot->dq_dirty);
404 spin_unlock(&dq_list_lock);
408 void mark_info_dirty(struct super_block *sb, int type)
410 spin_lock(&dq_data_lock);
411 sb_dqopt(sb)->info[type].dqi_flags |= DQF_INFO_DIRTY;
412 spin_unlock(&dq_data_lock);
414 EXPORT_SYMBOL(mark_info_dirty);
417 * Read dquot from disk and alloc space for it
420 int dquot_acquire(struct dquot *dquot)
422 int ret = 0, ret2 = 0;
423 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
425 mutex_lock(&dquot->dq_lock);
426 if (!test_bit(DQ_READ_B, &dquot->dq_flags))
427 ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot);
430 /* Make sure flags update is visible after dquot has been filled */
431 smp_mb__before_atomic();
432 set_bit(DQ_READ_B, &dquot->dq_flags);
433 /* Instantiate dquot if needed */
434 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) {
435 ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
436 /* Write the info if needed */
437 if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
438 ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
439 dquot->dq_sb, dquot->dq_id.type);
449 * Make sure flags update is visible after on-disk struct has been
450 * allocated. Paired with smp_rmb() in dqget().
452 smp_mb__before_atomic();
453 set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
455 mutex_unlock(&dquot->dq_lock);
458 EXPORT_SYMBOL(dquot_acquire);
461 * Write dquot to disk
463 int dquot_commit(struct dquot *dquot)
466 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
468 mutex_lock(&dquot->dq_lock);
469 if (!clear_dquot_dirty(dquot))
471 /* Inactive dquot can be only if there was error during read/init
472 * => we have better not writing it */
473 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
474 ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
478 mutex_unlock(&dquot->dq_lock);
481 EXPORT_SYMBOL(dquot_commit);
486 int dquot_release(struct dquot *dquot)
488 int ret = 0, ret2 = 0;
489 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
491 mutex_lock(&dquot->dq_lock);
492 /* Check whether we are not racing with some other dqget() */
493 if (atomic_read(&dquot->dq_count) > 1)
495 if (dqopt->ops[dquot->dq_id.type]->release_dqblk) {
496 ret = dqopt->ops[dquot->dq_id.type]->release_dqblk(dquot);
498 if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
499 ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
500 dquot->dq_sb, dquot->dq_id.type);
505 clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
507 mutex_unlock(&dquot->dq_lock);
510 EXPORT_SYMBOL(dquot_release);
512 void dquot_destroy(struct dquot *dquot)
514 kmem_cache_free(dquot_cachep, dquot);
516 EXPORT_SYMBOL(dquot_destroy);
518 static inline void do_destroy_dquot(struct dquot *dquot)
520 dquot->dq_sb->dq_op->destroy_dquot(dquot);
523 /* Invalidate all dquots on the list. Note that this function is called after
524 * quota is disabled and pointers from inodes removed so there cannot be new
525 * quota users. There can still be some users of quotas due to inodes being
526 * just deleted or pruned by prune_icache() (those are not attached to any
527 * list) or parallel quotactl call. We have to wait for such users.
529 static void invalidate_dquots(struct super_block *sb, int type)
531 struct dquot *dquot, *tmp;
534 spin_lock(&dq_list_lock);
535 list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
536 if (dquot->dq_sb != sb)
538 if (dquot->dq_id.type != type)
540 /* Wait for dquot users */
541 if (atomic_read(&dquot->dq_count)) {
543 spin_unlock(&dq_list_lock);
545 * Once dqput() wakes us up, we know it's time to free
547 * IMPORTANT: we rely on the fact that there is always
548 * at most one process waiting for dquot to free.
549 * Otherwise dq_count would be > 1 and we would never
552 wait_event(dquot_ref_wq,
553 atomic_read(&dquot->dq_count) == 1);
555 /* At this moment dquot() need not exist (it could be
556 * reclaimed by prune_dqcache(). Hence we must
561 * Quota now has no users and it has been written on last
564 remove_dquot_hash(dquot);
565 remove_free_dquot(dquot);
567 do_destroy_dquot(dquot);
569 spin_unlock(&dq_list_lock);
572 /* Call callback for every active dquot on given filesystem */
573 int dquot_scan_active(struct super_block *sb,
574 int (*fn)(struct dquot *dquot, unsigned long priv),
577 struct dquot *dquot, *old_dquot = NULL;
580 WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount));
582 spin_lock(&dq_list_lock);
583 list_for_each_entry(dquot, &inuse_list, dq_inuse) {
584 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
586 if (dquot->dq_sb != sb)
588 /* Now we have active dquot so we can just increase use count */
589 atomic_inc(&dquot->dq_count);
590 spin_unlock(&dq_list_lock);
591 dqstats_inc(DQST_LOOKUPS);
595 * ->release_dquot() can be racing with us. Our reference
596 * protects us from new calls to it so just wait for any
597 * outstanding call and recheck the DQ_ACTIVE_B after that.
599 wait_on_dquot(dquot);
600 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
601 ret = fn(dquot, priv);
605 spin_lock(&dq_list_lock);
606 /* We are safe to continue now because our dquot could not
607 * be moved out of the inuse list while we hold the reference */
609 spin_unlock(&dq_list_lock);
614 EXPORT_SYMBOL(dquot_scan_active);
616 /* Write all dquot structures to quota files */
617 int dquot_writeback_dquots(struct super_block *sb, int type)
619 struct list_head *dirty;
621 struct quota_info *dqopt = sb_dqopt(sb);
625 WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount));
627 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
628 if (type != -1 && cnt != type)
630 if (!sb_has_quota_active(sb, cnt))
632 spin_lock(&dq_list_lock);
633 dirty = &dqopt->info[cnt].dqi_dirty_list;
634 while (!list_empty(dirty)) {
635 dquot = list_first_entry(dirty, struct dquot,
638 WARN_ON(!test_bit(DQ_ACTIVE_B, &dquot->dq_flags));
640 /* Now we have active dquot from which someone is
641 * holding reference so we can safely just increase
644 spin_unlock(&dq_list_lock);
645 dqstats_inc(DQST_LOOKUPS);
646 err = sb->dq_op->write_dquot(dquot);
649 * Clear dirty bit anyway to avoid infinite
652 clear_dquot_dirty(dquot);
657 spin_lock(&dq_list_lock);
659 spin_unlock(&dq_list_lock);
662 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
663 if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt)
664 && info_dirty(&dqopt->info[cnt]))
665 sb->dq_op->write_info(sb, cnt);
666 dqstats_inc(DQST_SYNCS);
670 EXPORT_SYMBOL(dquot_writeback_dquots);
672 /* Write all dquot structures to disk and make them visible from userspace */
673 int dquot_quota_sync(struct super_block *sb, int type)
675 struct quota_info *dqopt = sb_dqopt(sb);
679 ret = dquot_writeback_dquots(sb, type);
682 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
685 /* This is not very clever (and fast) but currently I don't know about
686 * any other simple way of getting quota data to disk and we must get
687 * them there for userspace to be visible... */
688 if (sb->s_op->sync_fs)
689 sb->s_op->sync_fs(sb, 1);
690 sync_blockdev(sb->s_bdev);
693 * Now when everything is written we can discard the pagecache so
694 * that userspace sees the changes.
696 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
697 if (type != -1 && cnt != type)
699 if (!sb_has_quota_active(sb, cnt))
701 inode_lock(dqopt->files[cnt]);
702 truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
703 inode_unlock(dqopt->files[cnt]);
708 EXPORT_SYMBOL(dquot_quota_sync);
711 dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
713 struct list_head *head;
715 unsigned long freed = 0;
717 spin_lock(&dq_list_lock);
718 head = free_dquots.prev;
719 while (head != &free_dquots && sc->nr_to_scan) {
720 dquot = list_entry(head, struct dquot, dq_free);
721 remove_dquot_hash(dquot);
722 remove_free_dquot(dquot);
724 do_destroy_dquot(dquot);
727 head = free_dquots.prev;
729 spin_unlock(&dq_list_lock);
734 dqcache_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
736 return vfs_pressure_ratio(
737 percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS]));
740 static struct shrinker dqcache_shrinker = {
741 .count_objects = dqcache_shrink_count,
742 .scan_objects = dqcache_shrink_scan,
743 .seeks = DEFAULT_SEEKS,
747 * Put reference to dquot
749 void dqput(struct dquot *dquot)
755 #ifdef CONFIG_QUOTA_DEBUG
756 if (!atomic_read(&dquot->dq_count)) {
757 quota_error(dquot->dq_sb, "trying to free free dquot of %s %d",
758 quotatypes[dquot->dq_id.type],
759 from_kqid(&init_user_ns, dquot->dq_id));
763 dqstats_inc(DQST_DROPS);
765 spin_lock(&dq_list_lock);
766 if (atomic_read(&dquot->dq_count) > 1) {
767 /* We have more than one user... nothing to do */
768 atomic_dec(&dquot->dq_count);
769 /* Releasing dquot during quotaoff phase? */
770 if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_id.type) &&
771 atomic_read(&dquot->dq_count) == 1)
772 wake_up(&dquot_ref_wq);
773 spin_unlock(&dq_list_lock);
776 /* Need to release dquot? */
777 if (dquot_dirty(dquot)) {
778 spin_unlock(&dq_list_lock);
779 /* Commit dquot before releasing */
780 ret = dquot->dq_sb->dq_op->write_dquot(dquot);
782 quota_error(dquot->dq_sb, "Can't write quota structure"
783 " (error %d). Quota may get out of sync!",
786 * We clear dirty bit anyway, so that we avoid
789 clear_dquot_dirty(dquot);
793 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
794 spin_unlock(&dq_list_lock);
795 dquot->dq_sb->dq_op->release_dquot(dquot);
798 atomic_dec(&dquot->dq_count);
799 #ifdef CONFIG_QUOTA_DEBUG
801 BUG_ON(!list_empty(&dquot->dq_free));
803 put_dquot_last(dquot);
804 spin_unlock(&dq_list_lock);
806 EXPORT_SYMBOL(dqput);
808 struct dquot *dquot_alloc(struct super_block *sb, int type)
810 return kmem_cache_zalloc(dquot_cachep, GFP_NOFS);
812 EXPORT_SYMBOL(dquot_alloc);
814 static struct dquot *get_empty_dquot(struct super_block *sb, int type)
818 dquot = sb->dq_op->alloc_dquot(sb, type);
822 mutex_init(&dquot->dq_lock);
823 INIT_LIST_HEAD(&dquot->dq_free);
824 INIT_LIST_HEAD(&dquot->dq_inuse);
825 INIT_HLIST_NODE(&dquot->dq_hash);
826 INIT_LIST_HEAD(&dquot->dq_dirty);
828 dquot->dq_id = make_kqid_invalid(type);
829 atomic_set(&dquot->dq_count, 1);
830 spin_lock_init(&dquot->dq_dqb_lock);
836 * Get reference to dquot
838 * Locking is slightly tricky here. We are guarded from parallel quotaoff()
839 * destroying our dquot by:
840 * a) checking for quota flags under dq_list_lock and
841 * b) getting a reference to dquot before we release dq_list_lock
843 struct dquot *dqget(struct super_block *sb, struct kqid qid)
845 unsigned int hashent = hashfn(sb, qid);
846 struct dquot *dquot, *empty = NULL;
848 if (!qid_has_mapping(sb->s_user_ns, qid))
849 return ERR_PTR(-EINVAL);
851 if (!sb_has_quota_active(sb, qid.type))
852 return ERR_PTR(-ESRCH);
854 spin_lock(&dq_list_lock);
855 spin_lock(&dq_state_lock);
856 if (!sb_has_quota_active(sb, qid.type)) {
857 spin_unlock(&dq_state_lock);
858 spin_unlock(&dq_list_lock);
859 dquot = ERR_PTR(-ESRCH);
862 spin_unlock(&dq_state_lock);
864 dquot = find_dquot(hashent, sb, qid);
867 spin_unlock(&dq_list_lock);
868 empty = get_empty_dquot(sb, qid.type);
870 schedule(); /* Try to wait for a moment... */
876 /* all dquots go on the inuse_list */
878 /* hash it first so it can be found */
879 insert_dquot_hash(dquot);
880 spin_unlock(&dq_list_lock);
881 dqstats_inc(DQST_LOOKUPS);
883 if (!atomic_read(&dquot->dq_count))
884 remove_free_dquot(dquot);
885 atomic_inc(&dquot->dq_count);
886 spin_unlock(&dq_list_lock);
887 dqstats_inc(DQST_CACHE_HITS);
888 dqstats_inc(DQST_LOOKUPS);
890 /* Wait for dq_lock - after this we know that either dquot_release() is
891 * already finished or it will be canceled due to dq_count > 1 test */
892 wait_on_dquot(dquot);
893 /* Read the dquot / allocate space in quota file */
894 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
897 err = sb->dq_op->acquire_dquot(dquot);
900 dquot = ERR_PTR(err);
905 * Make sure following reads see filled structure - paired with
906 * smp_mb__before_atomic() in dquot_acquire().
909 #ifdef CONFIG_QUOTA_DEBUG
910 BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */
914 do_destroy_dquot(empty);
918 EXPORT_SYMBOL(dqget);
920 static inline struct dquot **i_dquot(struct inode *inode)
922 return inode->i_sb->s_op->get_dquots(inode);
925 static int dqinit_needed(struct inode *inode, int type)
927 struct dquot * const *dquots;
930 if (IS_NOQUOTA(inode))
933 dquots = i_dquot(inode);
935 return !dquots[type];
936 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
942 /* This routine is guarded by s_umount semaphore */
943 static void add_dquot_ref(struct super_block *sb, int type)
945 struct inode *inode, *old_inode = NULL;
946 #ifdef CONFIG_QUOTA_DEBUG
950 spin_lock(&sb->s_inode_list_lock);
951 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
952 spin_lock(&inode->i_lock);
953 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
954 !atomic_read(&inode->i_writecount) ||
955 !dqinit_needed(inode, type)) {
956 spin_unlock(&inode->i_lock);
960 spin_unlock(&inode->i_lock);
961 spin_unlock(&sb->s_inode_list_lock);
963 #ifdef CONFIG_QUOTA_DEBUG
964 if (unlikely(inode_get_rsv_space(inode) > 0))
968 __dquot_initialize(inode, type);
971 * We hold a reference to 'inode' so it couldn't have been
972 * removed from s_inodes list while we dropped the
973 * s_inode_list_lock. We cannot iput the inode now as we can be
974 * holding the last reference and we cannot iput it under
975 * s_inode_list_lock. So we keep the reference and iput it
979 spin_lock(&sb->s_inode_list_lock);
981 spin_unlock(&sb->s_inode_list_lock);
984 #ifdef CONFIG_QUOTA_DEBUG
986 quota_error(sb, "Writes happened before quota was turned on "
987 "thus quota information is probably inconsistent. "
988 "Please run quotacheck(8)");
994 * Remove references to dquots from inode and add dquot to list for freeing
995 * if we have the last reference to dquot
997 static void remove_inode_dquot_ref(struct inode *inode, int type,
998 struct list_head *tofree_head)
1000 struct dquot **dquots = i_dquot(inode);
1001 struct dquot *dquot = dquots[type];
1006 dquots[type] = NULL;
1007 if (list_empty(&dquot->dq_free)) {
1009 * The inode still has reference to dquot so it can't be in the
1012 spin_lock(&dq_list_lock);
1013 list_add(&dquot->dq_free, tofree_head);
1014 spin_unlock(&dq_list_lock);
1017 * Dquot is already in a list to put so we won't drop the last
1025 * Free list of dquots
1026 * Dquots are removed from inodes and no new references can be got so we are
1027 * the only ones holding reference
1029 static void put_dquot_list(struct list_head *tofree_head)
1031 struct list_head *act_head;
1032 struct dquot *dquot;
1034 act_head = tofree_head->next;
1035 while (act_head != tofree_head) {
1036 dquot = list_entry(act_head, struct dquot, dq_free);
1037 act_head = act_head->next;
1038 /* Remove dquot from the list so we won't have problems... */
1039 list_del_init(&dquot->dq_free);
1044 static void remove_dquot_ref(struct super_block *sb, int type,
1045 struct list_head *tofree_head)
1047 struct inode *inode;
1050 spin_lock(&sb->s_inode_list_lock);
1051 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1053 * We have to scan also I_NEW inodes because they can already
1054 * have quota pointer initialized. Luckily, we need to touch
1055 * only quota pointers and these have separate locking
1058 spin_lock(&dq_data_lock);
1059 if (!IS_NOQUOTA(inode)) {
1060 if (unlikely(inode_get_rsv_space(inode) > 0))
1062 remove_inode_dquot_ref(inode, type, tofree_head);
1064 spin_unlock(&dq_data_lock);
1066 spin_unlock(&sb->s_inode_list_lock);
1067 #ifdef CONFIG_QUOTA_DEBUG
1069 printk(KERN_WARNING "VFS (%s): Writes happened after quota"
1070 " was disabled thus quota information is probably "
1071 "inconsistent. Please run quotacheck(8).\n", sb->s_id);
1076 /* Gather all references from inodes and drop them */
1077 static void drop_dquot_ref(struct super_block *sb, int type)
1079 LIST_HEAD(tofree_head);
1082 remove_dquot_ref(sb, type, &tofree_head);
1083 synchronize_srcu(&dquot_srcu);
1084 put_dquot_list(&tofree_head);
1089 void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
1091 if (dquot->dq_dqb.dqb_rsvspace >= number)
1092 dquot->dq_dqb.dqb_rsvspace -= number;
1095 dquot->dq_dqb.dqb_rsvspace = 0;
1097 if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <=
1098 dquot->dq_dqb.dqb_bsoftlimit)
1099 dquot->dq_dqb.dqb_btime = (time64_t) 0;
1100 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
1103 static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
1105 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
1106 dquot->dq_dqb.dqb_curinodes >= number)
1107 dquot->dq_dqb.dqb_curinodes -= number;
1109 dquot->dq_dqb.dqb_curinodes = 0;
1110 if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit)
1111 dquot->dq_dqb.dqb_itime = (time64_t) 0;
1112 clear_bit(DQ_INODES_B, &dquot->dq_flags);
1115 static void dquot_decr_space(struct dquot *dquot, qsize_t number)
1117 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
1118 dquot->dq_dqb.dqb_curspace >= number)
1119 dquot->dq_dqb.dqb_curspace -= number;
1121 dquot->dq_dqb.dqb_curspace = 0;
1122 if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <=
1123 dquot->dq_dqb.dqb_bsoftlimit)
1124 dquot->dq_dqb.dqb_btime = (time64_t) 0;
1125 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
1129 struct super_block *w_sb;
1130 struct kqid w_dq_id;
1134 static int warning_issued(struct dquot *dquot, const int warntype)
1136 int flag = (warntype == QUOTA_NL_BHARDWARN ||
1137 warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B :
1138 ((warntype == QUOTA_NL_IHARDWARN ||
1139 warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0);
1143 return test_and_set_bit(flag, &dquot->dq_flags);
1146 #ifdef CONFIG_PRINT_QUOTA_WARNING
1147 static int flag_print_warnings = 1;
1149 static int need_print_warning(struct dquot_warn *warn)
1151 if (!flag_print_warnings)
1154 switch (warn->w_dq_id.type) {
1156 return uid_eq(current_fsuid(), warn->w_dq_id.uid);
1158 return in_group_p(warn->w_dq_id.gid);
1165 /* Print warning to user which exceeded quota */
1166 static void print_warning(struct dquot_warn *warn)
1169 struct tty_struct *tty;
1170 int warntype = warn->w_type;
1172 if (warntype == QUOTA_NL_IHARDBELOW ||
1173 warntype == QUOTA_NL_ISOFTBELOW ||
1174 warntype == QUOTA_NL_BHARDBELOW ||
1175 warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(warn))
1178 tty = get_current_tty();
1181 tty_write_message(tty, warn->w_sb->s_id);
1182 if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN)
1183 tty_write_message(tty, ": warning, ");
1185 tty_write_message(tty, ": write failed, ");
1186 tty_write_message(tty, quotatypes[warn->w_dq_id.type]);
1188 case QUOTA_NL_IHARDWARN:
1189 msg = " file limit reached.\r\n";
1191 case QUOTA_NL_ISOFTLONGWARN:
1192 msg = " file quota exceeded too long.\r\n";
1194 case QUOTA_NL_ISOFTWARN:
1195 msg = " file quota exceeded.\r\n";
1197 case QUOTA_NL_BHARDWARN:
1198 msg = " block limit reached.\r\n";
1200 case QUOTA_NL_BSOFTLONGWARN:
1201 msg = " block quota exceeded too long.\r\n";
1203 case QUOTA_NL_BSOFTWARN:
1204 msg = " block quota exceeded.\r\n";
1207 tty_write_message(tty, msg);
1212 static void prepare_warning(struct dquot_warn *warn, struct dquot *dquot,
1215 if (warning_issued(dquot, warntype))
1217 warn->w_type = warntype;
1218 warn->w_sb = dquot->dq_sb;
1219 warn->w_dq_id = dquot->dq_id;
1223 * Write warnings to the console and send warning messages over netlink.
1225 * Note that this function can call into tty and networking code.
1227 static void flush_warnings(struct dquot_warn *warn)
1231 for (i = 0; i < MAXQUOTAS; i++) {
1232 if (warn[i].w_type == QUOTA_NL_NOWARN)
1234 #ifdef CONFIG_PRINT_QUOTA_WARNING
1235 print_warning(&warn[i]);
1237 quota_send_warning(warn[i].w_dq_id,
1238 warn[i].w_sb->s_dev, warn[i].w_type);
1242 static int ignore_hardlimit(struct dquot *dquot)
1244 struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
1246 return capable(CAP_SYS_RESOURCE) &&
1247 (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
1248 !(info->dqi_flags & DQF_ROOT_SQUASH));
1251 static int dquot_add_inodes(struct dquot *dquot, qsize_t inodes,
1252 struct dquot_warn *warn)
1257 spin_lock(&dquot->dq_dqb_lock);
1258 newinodes = dquot->dq_dqb.dqb_curinodes + inodes;
1259 if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type) ||
1260 test_bit(DQ_FAKE_B, &dquot->dq_flags))
1263 if (dquot->dq_dqb.dqb_ihardlimit &&
1264 newinodes > dquot->dq_dqb.dqb_ihardlimit &&
1265 !ignore_hardlimit(dquot)) {
1266 prepare_warning(warn, dquot, QUOTA_NL_IHARDWARN);
1271 if (dquot->dq_dqb.dqb_isoftlimit &&
1272 newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1273 dquot->dq_dqb.dqb_itime &&
1274 ktime_get_real_seconds() >= dquot->dq_dqb.dqb_itime &&
1275 !ignore_hardlimit(dquot)) {
1276 prepare_warning(warn, dquot, QUOTA_NL_ISOFTLONGWARN);
1281 if (dquot->dq_dqb.dqb_isoftlimit &&
1282 newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1283 dquot->dq_dqb.dqb_itime == 0) {
1284 prepare_warning(warn, dquot, QUOTA_NL_ISOFTWARN);
1285 dquot->dq_dqb.dqb_itime = ktime_get_real_seconds() +
1286 sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type].dqi_igrace;
1289 dquot->dq_dqb.dqb_curinodes = newinodes;
1292 spin_unlock(&dquot->dq_dqb_lock);
1296 static int dquot_add_space(struct dquot *dquot, qsize_t space,
1297 qsize_t rsv_space, unsigned int flags,
1298 struct dquot_warn *warn)
1301 struct super_block *sb = dquot->dq_sb;
1304 spin_lock(&dquot->dq_dqb_lock);
1305 if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) ||
1306 test_bit(DQ_FAKE_B, &dquot->dq_flags))
1309 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
1310 + space + rsv_space;
1312 if (dquot->dq_dqb.dqb_bhardlimit &&
1313 tspace > dquot->dq_dqb.dqb_bhardlimit &&
1314 !ignore_hardlimit(dquot)) {
1315 if (flags & DQUOT_SPACE_WARN)
1316 prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN);
1321 if (dquot->dq_dqb.dqb_bsoftlimit &&
1322 tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1323 dquot->dq_dqb.dqb_btime &&
1324 ktime_get_real_seconds() >= dquot->dq_dqb.dqb_btime &&
1325 !ignore_hardlimit(dquot)) {
1326 if (flags & DQUOT_SPACE_WARN)
1327 prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN);
1332 if (dquot->dq_dqb.dqb_bsoftlimit &&
1333 tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1334 dquot->dq_dqb.dqb_btime == 0) {
1335 if (flags & DQUOT_SPACE_WARN) {
1336 prepare_warning(warn, dquot, QUOTA_NL_BSOFTWARN);
1337 dquot->dq_dqb.dqb_btime = ktime_get_real_seconds() +
1338 sb_dqopt(sb)->info[dquot->dq_id.type].dqi_bgrace;
1341 * We don't allow preallocation to exceed softlimit so exceeding will
1350 * We have to be careful and go through warning generation & grace time
1351 * setting even if DQUOT_SPACE_NOFAIL is set. That's why we check it
1354 if (flags & DQUOT_SPACE_NOFAIL)
1357 dquot->dq_dqb.dqb_rsvspace += rsv_space;
1358 dquot->dq_dqb.dqb_curspace += space;
1360 spin_unlock(&dquot->dq_dqb_lock);
1364 static int info_idq_free(struct dquot *dquot, qsize_t inodes)
1368 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1369 dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit ||
1370 !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type))
1371 return QUOTA_NL_NOWARN;
1373 newinodes = dquot->dq_dqb.dqb_curinodes - inodes;
1374 if (newinodes <= dquot->dq_dqb.dqb_isoftlimit)
1375 return QUOTA_NL_ISOFTBELOW;
1376 if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit &&
1377 newinodes < dquot->dq_dqb.dqb_ihardlimit)
1378 return QUOTA_NL_IHARDBELOW;
1379 return QUOTA_NL_NOWARN;
1382 static int info_bdq_free(struct dquot *dquot, qsize_t space)
1386 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace;
1388 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1389 tspace <= dquot->dq_dqb.dqb_bsoftlimit)
1390 return QUOTA_NL_NOWARN;
1392 if (tspace - space <= dquot->dq_dqb.dqb_bsoftlimit)
1393 return QUOTA_NL_BSOFTBELOW;
1394 if (tspace >= dquot->dq_dqb.dqb_bhardlimit &&
1395 tspace - space < dquot->dq_dqb.dqb_bhardlimit)
1396 return QUOTA_NL_BHARDBELOW;
1397 return QUOTA_NL_NOWARN;
1400 static int dquot_active(const struct inode *inode)
1402 struct super_block *sb = inode->i_sb;
1404 if (IS_NOQUOTA(inode))
1406 return sb_any_quota_loaded(sb) & ~sb_any_quota_suspended(sb);
1410 * Initialize quota pointers in inode
1412 * It is better to call this function outside of any transaction as it
1413 * might need a lot of space in journal for dquot structure allocation.
1415 static int __dquot_initialize(struct inode *inode, int type)
1417 int cnt, init_needed = 0;
1418 struct dquot **dquots, *got[MAXQUOTAS] = {};
1419 struct super_block *sb = inode->i_sb;
1423 if (!dquot_active(inode))
1426 dquots = i_dquot(inode);
1428 /* First get references to structures we might need. */
1429 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1433 struct dquot *dquot;
1435 if (type != -1 && cnt != type)
1438 * The i_dquot should have been initialized in most cases,
1439 * we check it without locking here to avoid unnecessary
1440 * dqget()/dqput() calls.
1445 if (!sb_has_quota_active(sb, cnt))
1452 qid = make_kqid_uid(inode->i_uid);
1455 qid = make_kqid_gid(inode->i_gid);
1458 rc = inode->i_sb->dq_op->get_projid(inode, &projid);
1461 qid = make_kqid_projid(projid);
1464 dquot = dqget(sb, qid);
1465 if (IS_ERR(dquot)) {
1466 /* We raced with somebody turning quotas off... */
1467 if (PTR_ERR(dquot) != -ESRCH) {
1468 ret = PTR_ERR(dquot);
1476 /* All required i_dquot has been initialized */
1480 spin_lock(&dq_data_lock);
1481 if (IS_NOQUOTA(inode))
1483 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1484 if (type != -1 && cnt != type)
1486 /* Avoid races with quotaoff() */
1487 if (!sb_has_quota_active(sb, cnt))
1489 /* We could race with quotaon or dqget() could have failed */
1493 dquots[cnt] = got[cnt];
1496 * Make quota reservation system happy if someone
1497 * did a write before quota was turned on
1499 rsv = inode_get_rsv_space(inode);
1500 if (unlikely(rsv)) {
1501 spin_lock(&inode->i_lock);
1502 /* Get reservation again under proper lock */
1503 rsv = __inode_get_rsv_space(inode);
1504 spin_lock(&dquots[cnt]->dq_dqb_lock);
1505 dquots[cnt]->dq_dqb.dqb_rsvspace += rsv;
1506 spin_unlock(&dquots[cnt]->dq_dqb_lock);
1507 spin_unlock(&inode->i_lock);
1512 spin_unlock(&dq_data_lock);
1514 /* Drop unused references */
1520 int dquot_initialize(struct inode *inode)
1522 return __dquot_initialize(inode, -1);
1524 EXPORT_SYMBOL(dquot_initialize);
1526 bool dquot_initialize_needed(struct inode *inode)
1528 struct dquot **dquots;
1531 if (!dquot_active(inode))
1534 dquots = i_dquot(inode);
1535 for (i = 0; i < MAXQUOTAS; i++)
1536 if (!dquots[i] && sb_has_quota_active(inode->i_sb, i))
1540 EXPORT_SYMBOL(dquot_initialize_needed);
1543 * Release all quotas referenced by inode.
1545 * This function only be called on inode free or converting
1546 * a file to quota file, no other users for the i_dquot in
1547 * both cases, so we needn't call synchronize_srcu() after
1550 static void __dquot_drop(struct inode *inode)
1553 struct dquot **dquots = i_dquot(inode);
1554 struct dquot *put[MAXQUOTAS];
1556 spin_lock(&dq_data_lock);
1557 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1558 put[cnt] = dquots[cnt];
1561 spin_unlock(&dq_data_lock);
1565 void dquot_drop(struct inode *inode)
1567 struct dquot * const *dquots;
1570 if (IS_NOQUOTA(inode))
1574 * Test before calling to rule out calls from proc and such
1575 * where we are not allowed to block. Note that this is
1576 * actually reliable test even without the lock - the caller
1577 * must assure that nobody can come after the DQUOT_DROP and
1578 * add quota pointers back anyway.
1580 dquots = i_dquot(inode);
1581 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1586 if (cnt < MAXQUOTAS)
1587 __dquot_drop(inode);
1589 EXPORT_SYMBOL(dquot_drop);
1592 * inode_reserved_space is managed internally by quota, and protected by
1593 * i_lock similar to i_blocks+i_bytes.
1595 static qsize_t *inode_reserved_space(struct inode * inode)
1597 /* Filesystem must explicitly define it's own method in order to use
1598 * quota reservation interface */
1599 BUG_ON(!inode->i_sb->dq_op->get_reserved_space);
1600 return inode->i_sb->dq_op->get_reserved_space(inode);
1603 static qsize_t __inode_get_rsv_space(struct inode *inode)
1605 if (!inode->i_sb->dq_op->get_reserved_space)
1607 return *inode_reserved_space(inode);
1610 static qsize_t inode_get_rsv_space(struct inode *inode)
1614 if (!inode->i_sb->dq_op->get_reserved_space)
1616 spin_lock(&inode->i_lock);
1617 ret = __inode_get_rsv_space(inode);
1618 spin_unlock(&inode->i_lock);
1623 * This functions updates i_blocks+i_bytes fields and quota information
1624 * (together with appropriate checks).
1626 * NOTE: We absolutely rely on the fact that caller dirties the inode
1627 * (usually helpers in quotaops.h care about this) and holds a handle for
1628 * the current transaction so that dquot write and inode write go into the
1633 * This operation can block, but only after everything is updated
1635 int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
1637 int cnt, ret = 0, index;
1638 struct dquot_warn warn[MAXQUOTAS];
1639 int reserve = flags & DQUOT_SPACE_RESERVE;
1640 struct dquot **dquots;
1642 if (!dquot_active(inode)) {
1644 spin_lock(&inode->i_lock);
1645 *inode_reserved_space(inode) += number;
1646 spin_unlock(&inode->i_lock);
1648 inode_add_bytes(inode, number);
1653 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1654 warn[cnt].w_type = QUOTA_NL_NOWARN;
1656 dquots = i_dquot(inode);
1657 index = srcu_read_lock(&dquot_srcu);
1658 spin_lock(&inode->i_lock);
1659 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1662 if (flags & DQUOT_SPACE_RESERVE) {
1663 ret = dquot_add_space(dquots[cnt], 0, number, flags,
1666 ret = dquot_add_space(dquots[cnt], number, 0, flags,
1670 /* Back out changes we already did */
1671 for (cnt--; cnt >= 0; cnt--) {
1674 spin_lock(&dquots[cnt]->dq_dqb_lock);
1675 if (flags & DQUOT_SPACE_RESERVE) {
1676 dquots[cnt]->dq_dqb.dqb_rsvspace -=
1679 dquots[cnt]->dq_dqb.dqb_curspace -=
1682 spin_unlock(&dquots[cnt]->dq_dqb_lock);
1684 spin_unlock(&inode->i_lock);
1685 goto out_flush_warn;
1689 *inode_reserved_space(inode) += number;
1691 __inode_add_bytes(inode, number);
1692 spin_unlock(&inode->i_lock);
1695 goto out_flush_warn;
1696 mark_all_dquot_dirty(dquots);
1698 srcu_read_unlock(&dquot_srcu, index);
1699 flush_warnings(warn);
1703 EXPORT_SYMBOL(__dquot_alloc_space);
1706 * This operation can block, but only after everything is updated
1708 int dquot_alloc_inode(struct inode *inode)
1710 int cnt, ret = 0, index;
1711 struct dquot_warn warn[MAXQUOTAS];
1712 struct dquot * const *dquots;
1714 if (!dquot_active(inode))
1716 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1717 warn[cnt].w_type = QUOTA_NL_NOWARN;
1719 dquots = i_dquot(inode);
1720 index = srcu_read_lock(&dquot_srcu);
1721 spin_lock(&inode->i_lock);
1722 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1725 ret = dquot_add_inodes(dquots[cnt], 1, &warn[cnt]);
1727 for (cnt--; cnt >= 0; cnt--) {
1730 /* Back out changes we already did */
1731 spin_lock(&dquots[cnt]->dq_dqb_lock);
1732 dquots[cnt]->dq_dqb.dqb_curinodes--;
1733 spin_unlock(&dquots[cnt]->dq_dqb_lock);
1740 spin_unlock(&inode->i_lock);
1742 mark_all_dquot_dirty(dquots);
1743 srcu_read_unlock(&dquot_srcu, index);
1744 flush_warnings(warn);
1747 EXPORT_SYMBOL(dquot_alloc_inode);
1750 * Convert in-memory reserved quotas to real consumed quotas
1752 int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
1754 struct dquot **dquots;
1757 if (!dquot_active(inode)) {
1758 spin_lock(&inode->i_lock);
1759 *inode_reserved_space(inode) -= number;
1760 __inode_add_bytes(inode, number);
1761 spin_unlock(&inode->i_lock);
1765 dquots = i_dquot(inode);
1766 index = srcu_read_lock(&dquot_srcu);
1767 spin_lock(&inode->i_lock);
1768 /* Claim reserved quotas to allocated quotas */
1769 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1771 struct dquot *dquot = dquots[cnt];
1773 spin_lock(&dquot->dq_dqb_lock);
1774 if (WARN_ON_ONCE(dquot->dq_dqb.dqb_rsvspace < number))
1775 number = dquot->dq_dqb.dqb_rsvspace;
1776 dquot->dq_dqb.dqb_curspace += number;
1777 dquot->dq_dqb.dqb_rsvspace -= number;
1778 spin_unlock(&dquot->dq_dqb_lock);
1781 /* Update inode bytes */
1782 *inode_reserved_space(inode) -= number;
1783 __inode_add_bytes(inode, number);
1784 spin_unlock(&inode->i_lock);
1785 mark_all_dquot_dirty(dquots);
1786 srcu_read_unlock(&dquot_srcu, index);
1789 EXPORT_SYMBOL(dquot_claim_space_nodirty);
1792 * Convert allocated space back to in-memory reserved quotas
1794 void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
1796 struct dquot **dquots;
1799 if (!dquot_active(inode)) {
1800 spin_lock(&inode->i_lock);
1801 *inode_reserved_space(inode) += number;
1802 __inode_sub_bytes(inode, number);
1803 spin_unlock(&inode->i_lock);
1807 dquots = i_dquot(inode);
1808 index = srcu_read_lock(&dquot_srcu);
1809 spin_lock(&inode->i_lock);
1810 /* Claim reserved quotas to allocated quotas */
1811 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1813 struct dquot *dquot = dquots[cnt];
1815 spin_lock(&dquot->dq_dqb_lock);
1816 if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number))
1817 number = dquot->dq_dqb.dqb_curspace;
1818 dquot->dq_dqb.dqb_rsvspace += number;
1819 dquot->dq_dqb.dqb_curspace -= number;
1820 spin_unlock(&dquot->dq_dqb_lock);
1823 /* Update inode bytes */
1824 *inode_reserved_space(inode) += number;
1825 __inode_sub_bytes(inode, number);
1826 spin_unlock(&inode->i_lock);
1827 mark_all_dquot_dirty(dquots);
1828 srcu_read_unlock(&dquot_srcu, index);
1831 EXPORT_SYMBOL(dquot_reclaim_space_nodirty);
1834 * This operation can block, but only after everything is updated
1836 void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
1839 struct dquot_warn warn[MAXQUOTAS];
1840 struct dquot **dquots;
1841 int reserve = flags & DQUOT_SPACE_RESERVE, index;
1843 if (!dquot_active(inode)) {
1845 spin_lock(&inode->i_lock);
1846 *inode_reserved_space(inode) -= number;
1847 spin_unlock(&inode->i_lock);
1849 inode_sub_bytes(inode, number);
1854 dquots = i_dquot(inode);
1855 index = srcu_read_lock(&dquot_srcu);
1856 spin_lock(&inode->i_lock);
1857 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1860 warn[cnt].w_type = QUOTA_NL_NOWARN;
1863 spin_lock(&dquots[cnt]->dq_dqb_lock);
1864 wtype = info_bdq_free(dquots[cnt], number);
1865 if (wtype != QUOTA_NL_NOWARN)
1866 prepare_warning(&warn[cnt], dquots[cnt], wtype);
1868 dquot_free_reserved_space(dquots[cnt], number);
1870 dquot_decr_space(dquots[cnt], number);
1871 spin_unlock(&dquots[cnt]->dq_dqb_lock);
1874 *inode_reserved_space(inode) -= number;
1876 __inode_sub_bytes(inode, number);
1877 spin_unlock(&inode->i_lock);
1881 mark_all_dquot_dirty(dquots);
1883 srcu_read_unlock(&dquot_srcu, index);
1884 flush_warnings(warn);
1886 EXPORT_SYMBOL(__dquot_free_space);
1889 * This operation can block, but only after everything is updated
1891 void dquot_free_inode(struct inode *inode)
1894 struct dquot_warn warn[MAXQUOTAS];
1895 struct dquot * const *dquots;
1898 if (!dquot_active(inode))
1901 dquots = i_dquot(inode);
1902 index = srcu_read_lock(&dquot_srcu);
1903 spin_lock(&inode->i_lock);
1904 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1907 warn[cnt].w_type = QUOTA_NL_NOWARN;
1910 spin_lock(&dquots[cnt]->dq_dqb_lock);
1911 wtype = info_idq_free(dquots[cnt], 1);
1912 if (wtype != QUOTA_NL_NOWARN)
1913 prepare_warning(&warn[cnt], dquots[cnt], wtype);
1914 dquot_decr_inodes(dquots[cnt], 1);
1915 spin_unlock(&dquots[cnt]->dq_dqb_lock);
1917 spin_unlock(&inode->i_lock);
1918 mark_all_dquot_dirty(dquots);
1919 srcu_read_unlock(&dquot_srcu, index);
1920 flush_warnings(warn);
1922 EXPORT_SYMBOL(dquot_free_inode);
1925 * Transfer the number of inode and blocks from one diskquota to an other.
1926 * On success, dquot references in transfer_to are consumed and references
1927 * to original dquots that need to be released are placed there. On failure,
1928 * references are kept untouched.
1930 * This operation can block, but only after everything is updated
1931 * A transaction must be started when entering this function.
1933 * We are holding reference on transfer_from & transfer_to, no need to
1934 * protect them by srcu_read_lock().
1936 int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
1939 qsize_t rsv_space = 0;
1940 qsize_t inode_usage = 1;
1941 struct dquot *transfer_from[MAXQUOTAS] = {};
1943 char is_valid[MAXQUOTAS] = {};
1944 struct dquot_warn warn_to[MAXQUOTAS];
1945 struct dquot_warn warn_from_inodes[MAXQUOTAS];
1946 struct dquot_warn warn_from_space[MAXQUOTAS];
1948 if (IS_NOQUOTA(inode))
1951 if (inode->i_sb->dq_op->get_inode_usage) {
1952 ret = inode->i_sb->dq_op->get_inode_usage(inode, &inode_usage);
1957 /* Initialize the arrays */
1958 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1959 warn_to[cnt].w_type = QUOTA_NL_NOWARN;
1960 warn_from_inodes[cnt].w_type = QUOTA_NL_NOWARN;
1961 warn_from_space[cnt].w_type = QUOTA_NL_NOWARN;
1964 spin_lock(&dq_data_lock);
1965 spin_lock(&inode->i_lock);
1966 if (IS_NOQUOTA(inode)) { /* File without quota accounting? */
1967 spin_unlock(&inode->i_lock);
1968 spin_unlock(&dq_data_lock);
1971 cur_space = __inode_get_bytes(inode);
1972 rsv_space = __inode_get_rsv_space(inode);
1974 * Build the transfer_from list, check limits, and update usage in
1975 * the target structures.
1977 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1979 * Skip changes for same uid or gid or for turned off quota-type.
1981 if (!transfer_to[cnt])
1983 /* Avoid races with quotaoff() */
1984 if (!sb_has_quota_active(inode->i_sb, cnt))
1987 transfer_from[cnt] = i_dquot(inode)[cnt];
1988 ret = dquot_add_inodes(transfer_to[cnt], inode_usage,
1992 ret = dquot_add_space(transfer_to[cnt], cur_space, rsv_space, 0,
1995 spin_lock(&transfer_to[cnt]->dq_dqb_lock);
1996 dquot_decr_inodes(transfer_to[cnt], inode_usage);
1997 spin_unlock(&transfer_to[cnt]->dq_dqb_lock);
2002 /* Decrease usage for source structures and update quota pointers */
2003 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2006 /* Due to IO error we might not have transfer_from[] structure */
2007 if (transfer_from[cnt]) {
2010 spin_lock(&transfer_from[cnt]->dq_dqb_lock);
2011 wtype = info_idq_free(transfer_from[cnt], inode_usage);
2012 if (wtype != QUOTA_NL_NOWARN)
2013 prepare_warning(&warn_from_inodes[cnt],
2014 transfer_from[cnt], wtype);
2015 wtype = info_bdq_free(transfer_from[cnt],
2016 cur_space + rsv_space);
2017 if (wtype != QUOTA_NL_NOWARN)
2018 prepare_warning(&warn_from_space[cnt],
2019 transfer_from[cnt], wtype);
2020 dquot_decr_inodes(transfer_from[cnt], inode_usage);
2021 dquot_decr_space(transfer_from[cnt], cur_space);
2022 dquot_free_reserved_space(transfer_from[cnt],
2024 spin_unlock(&transfer_from[cnt]->dq_dqb_lock);
2026 i_dquot(inode)[cnt] = transfer_to[cnt];
2028 spin_unlock(&inode->i_lock);
2029 spin_unlock(&dq_data_lock);
2031 mark_all_dquot_dirty(transfer_from);
2032 mark_all_dquot_dirty(transfer_to);
2033 flush_warnings(warn_to);
2034 flush_warnings(warn_from_inodes);
2035 flush_warnings(warn_from_space);
2036 /* Pass back references to put */
2037 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2039 transfer_to[cnt] = transfer_from[cnt];
2042 /* Back out changes we already did */
2043 for (cnt--; cnt >= 0; cnt--) {
2046 spin_lock(&transfer_to[cnt]->dq_dqb_lock);
2047 dquot_decr_inodes(transfer_to[cnt], inode_usage);
2048 dquot_decr_space(transfer_to[cnt], cur_space);
2049 dquot_free_reserved_space(transfer_to[cnt], rsv_space);
2050 spin_unlock(&transfer_to[cnt]->dq_dqb_lock);
2052 spin_unlock(&inode->i_lock);
2053 spin_unlock(&dq_data_lock);
2054 flush_warnings(warn_to);
2057 EXPORT_SYMBOL(__dquot_transfer);
2059 /* Wrapper for transferring ownership of an inode for uid/gid only
2060 * Called from FSXXX_setattr()
2062 int dquot_transfer(struct inode *inode, struct iattr *iattr)
2064 struct dquot *transfer_to[MAXQUOTAS] = {};
2065 struct dquot *dquot;
2066 struct super_block *sb = inode->i_sb;
2069 if (!dquot_active(inode))
2072 if (iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)){
2073 dquot = dqget(sb, make_kqid_uid(iattr->ia_uid));
2074 if (IS_ERR(dquot)) {
2075 if (PTR_ERR(dquot) != -ESRCH) {
2076 ret = PTR_ERR(dquot);
2081 transfer_to[USRQUOTA] = dquot;
2083 if (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid)){
2084 dquot = dqget(sb, make_kqid_gid(iattr->ia_gid));
2085 if (IS_ERR(dquot)) {
2086 if (PTR_ERR(dquot) != -ESRCH) {
2087 ret = PTR_ERR(dquot);
2092 transfer_to[GRPQUOTA] = dquot;
2094 ret = __dquot_transfer(inode, transfer_to);
2096 dqput_all(transfer_to);
2099 EXPORT_SYMBOL(dquot_transfer);
2102 * Write info of quota file to disk
2104 int dquot_commit_info(struct super_block *sb, int type)
2106 struct quota_info *dqopt = sb_dqopt(sb);
2108 return dqopt->ops[type]->write_file_info(sb, type);
2110 EXPORT_SYMBOL(dquot_commit_info);
2112 int dquot_get_next_id(struct super_block *sb, struct kqid *qid)
2114 struct quota_info *dqopt = sb_dqopt(sb);
2116 if (!sb_has_quota_active(sb, qid->type))
2118 if (!dqopt->ops[qid->type]->get_next_id)
2120 return dqopt->ops[qid->type]->get_next_id(sb, qid);
2122 EXPORT_SYMBOL(dquot_get_next_id);
2125 * Definitions of diskquota operations.
2127 const struct dquot_operations dquot_operations = {
2128 .write_dquot = dquot_commit,
2129 .acquire_dquot = dquot_acquire,
2130 .release_dquot = dquot_release,
2131 .mark_dirty = dquot_mark_dquot_dirty,
2132 .write_info = dquot_commit_info,
2133 .alloc_dquot = dquot_alloc,
2134 .destroy_dquot = dquot_destroy,
2135 .get_next_id = dquot_get_next_id,
2137 EXPORT_SYMBOL(dquot_operations);
2140 * Generic helper for ->open on filesystems supporting disk quotas.
2142 int dquot_file_open(struct inode *inode, struct file *file)
2146 error = generic_file_open(inode, file);
2147 if (!error && (file->f_mode & FMODE_WRITE))
2148 error = dquot_initialize(inode);
2151 EXPORT_SYMBOL(dquot_file_open);
2154 * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
2156 int dquot_disable(struct super_block *sb, int type, unsigned int flags)
2159 struct quota_info *dqopt = sb_dqopt(sb);
2160 struct inode *toputinode[MAXQUOTAS];
2162 /* s_umount should be held in exclusive mode */
2163 if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
2164 up_read(&sb->s_umount);
2166 /* Cannot turn off usage accounting without turning off limits, or
2167 * suspend quotas and simultaneously turn quotas off. */
2168 if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED))
2169 || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED |
2170 DQUOT_USAGE_ENABLED)))
2174 * Skip everything if there's nothing to do. We have to do this because
2175 * sometimes we are called when fill_super() failed and calling
2176 * sync_fs() in such cases does no good.
2178 if (!sb_any_quota_loaded(sb))
2181 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2182 toputinode[cnt] = NULL;
2183 if (type != -1 && cnt != type)
2185 if (!sb_has_quota_loaded(sb, cnt))
2188 if (flags & DQUOT_SUSPENDED) {
2189 spin_lock(&dq_state_lock);
2191 dquot_state_flag(DQUOT_SUSPENDED, cnt);
2192 spin_unlock(&dq_state_lock);
2194 spin_lock(&dq_state_lock);
2195 dqopt->flags &= ~dquot_state_flag(flags, cnt);
2196 /* Turning off suspended quotas? */
2197 if (!sb_has_quota_loaded(sb, cnt) &&
2198 sb_has_quota_suspended(sb, cnt)) {
2199 dqopt->flags &= ~dquot_state_flag(
2200 DQUOT_SUSPENDED, cnt);
2201 spin_unlock(&dq_state_lock);
2202 iput(dqopt->files[cnt]);
2203 dqopt->files[cnt] = NULL;
2206 spin_unlock(&dq_state_lock);
2209 /* We still have to keep quota loaded? */
2210 if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED))
2213 /* Note: these are blocking operations */
2214 drop_dquot_ref(sb, cnt);
2215 invalidate_dquots(sb, cnt);
2217 * Now all dquots should be invalidated, all writes done so we
2218 * should be only users of the info. No locks needed.
2220 if (info_dirty(&dqopt->info[cnt]))
2221 sb->dq_op->write_info(sb, cnt);
2222 if (dqopt->ops[cnt]->free_file_info)
2223 dqopt->ops[cnt]->free_file_info(sb, cnt);
2224 put_quota_format(dqopt->info[cnt].dqi_format);
2226 toputinode[cnt] = dqopt->files[cnt];
2227 if (!sb_has_quota_loaded(sb, cnt))
2228 dqopt->files[cnt] = NULL;
2229 dqopt->info[cnt].dqi_flags = 0;
2230 dqopt->info[cnt].dqi_igrace = 0;
2231 dqopt->info[cnt].dqi_bgrace = 0;
2232 dqopt->ops[cnt] = NULL;
2235 /* Skip syncing and setting flags if quota files are hidden */
2236 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
2239 /* Sync the superblock so that buffers with quota data are written to
2240 * disk (and so userspace sees correct data afterwards). */
2241 if (sb->s_op->sync_fs)
2242 sb->s_op->sync_fs(sb, 1);
2243 sync_blockdev(sb->s_bdev);
2244 /* Now the quota files are just ordinary files and we can set the
2245 * inode flags back. Moreover we discard the pagecache so that
2246 * userspace sees the writes we did bypassing the pagecache. We
2247 * must also discard the blockdev buffers so that we see the
2248 * changes done by userspace on the next quotaon() */
2249 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2250 /* This can happen when suspending quotas on remount-ro... */
2251 if (toputinode[cnt] && !sb_has_quota_loaded(sb, cnt)) {
2252 inode_lock(toputinode[cnt]);
2253 toputinode[cnt]->i_flags &= ~S_NOQUOTA;
2254 truncate_inode_pages(&toputinode[cnt]->i_data, 0);
2255 inode_unlock(toputinode[cnt]);
2256 mark_inode_dirty_sync(toputinode[cnt]);
2259 invalidate_bdev(sb->s_bdev);
2261 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2262 if (toputinode[cnt]) {
2263 /* On remount RO, we keep the inode pointer so that we
2264 * can reenable quota on the subsequent remount RW. We
2265 * have to check 'flags' variable and not use sb_has_
2266 * function because another quotaon / quotaoff could
2267 * change global state before we got here. We refuse
2268 * to suspend quotas when there is pending delete on
2269 * the quota file... */
2270 if (!(flags & DQUOT_SUSPENDED))
2271 iput(toputinode[cnt]);
2272 else if (!toputinode[cnt]->i_nlink)
2277 EXPORT_SYMBOL(dquot_disable);
2279 int dquot_quota_off(struct super_block *sb, int type)
2281 return dquot_disable(sb, type,
2282 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2284 EXPORT_SYMBOL(dquot_quota_off);
2287 * Turn quotas on on a device
2291 * Helper function to turn quotas on when we already have the inode of
2292 * quota file and no quota information is loaded.
2294 static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
2297 struct quota_format_type *fmt = find_quota_format(format_id);
2298 struct super_block *sb = inode->i_sb;
2299 struct quota_info *dqopt = sb_dqopt(sb);
2304 if (!S_ISREG(inode->i_mode)) {
2308 if (IS_RDONLY(inode)) {
2312 if (!sb->s_op->quota_write || !sb->s_op->quota_read ||
2313 (type == PRJQUOTA && sb->dq_op->get_projid == NULL)) {
2317 /* Filesystems outside of init_user_ns not yet supported */
2318 if (sb->s_user_ns != &init_user_ns) {
2322 /* Usage always has to be set... */
2323 if (!(flags & DQUOT_USAGE_ENABLED)) {
2327 if (sb_has_quota_loaded(sb, type)) {
2332 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2333 /* As we bypass the pagecache we must now flush all the
2334 * dirty data and invalidate caches so that kernel sees
2335 * changes from userspace. It is not enough to just flush
2336 * the quota file since if blocksize < pagesize, invalidation
2337 * of the cache could fail because of other unrelated dirty
2339 sync_filesystem(sb);
2340 invalidate_bdev(sb->s_bdev);
2343 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2344 /* We don't want quota and atime on quota files (deadlocks
2345 * possible) Also nobody should write to the file - we use
2346 * special IO operations which ignore the immutable bit. */
2348 inode->i_flags |= S_NOQUOTA;
2349 inode_unlock(inode);
2351 * When S_NOQUOTA is set, remove dquot references as no more
2352 * references can be added
2354 __dquot_drop(inode);
2358 dqopt->files[type] = igrab(inode);
2359 if (!dqopt->files[type])
2360 goto out_file_flags;
2362 if (!fmt->qf_ops->check_quota_file(sb, type))
2365 dqopt->ops[type] = fmt->qf_ops;
2366 dqopt->info[type].dqi_format = fmt;
2367 dqopt->info[type].dqi_fmt_id = format_id;
2368 INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
2369 error = dqopt->ops[type]->read_file_info(sb, type);
2372 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) {
2373 spin_lock(&dq_data_lock);
2374 dqopt->info[type].dqi_flags |= DQF_SYS_FILE;
2375 spin_unlock(&dq_data_lock);
2377 spin_lock(&dq_state_lock);
2378 dqopt->flags |= dquot_state_flag(flags, type);
2379 spin_unlock(&dq_state_lock);
2381 add_dquot_ref(sb, type);
2386 dqopt->files[type] = NULL;
2390 inode->i_flags &= ~S_NOQUOTA;
2391 inode_unlock(inode);
2393 put_quota_format(fmt);
2398 /* Reenable quotas on remount RW */
2399 int dquot_resume(struct super_block *sb, int type)
2401 struct quota_info *dqopt = sb_dqopt(sb);
2402 struct inode *inode;
2406 /* s_umount should be held in exclusive mode */
2407 if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
2408 up_read(&sb->s_umount);
2410 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2411 if (type != -1 && cnt != type)
2413 if (!sb_has_quota_suspended(sb, cnt))
2416 inode = dqopt->files[cnt];
2417 dqopt->files[cnt] = NULL;
2418 spin_lock(&dq_state_lock);
2419 flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
2420 DQUOT_LIMITS_ENABLED,
2422 dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, cnt);
2423 spin_unlock(&dq_state_lock);
2425 flags = dquot_generic_flag(flags, cnt);
2426 ret = vfs_load_quota_inode(inode, cnt,
2427 dqopt->info[cnt].dqi_fmt_id, flags);
2433 EXPORT_SYMBOL(dquot_resume);
2435 int dquot_quota_on(struct super_block *sb, int type, int format_id,
2436 const struct path *path)
2438 int error = security_quota_on(path->dentry);
2441 /* Quota file not on the same filesystem? */
2442 if (path->dentry->d_sb != sb)
2445 error = vfs_load_quota_inode(d_inode(path->dentry), type,
2446 format_id, DQUOT_USAGE_ENABLED |
2447 DQUOT_LIMITS_ENABLED);
2450 EXPORT_SYMBOL(dquot_quota_on);
2453 * More powerful function for turning on quotas allowing setting
2454 * of individual quota flags
2456 int dquot_enable(struct inode *inode, int type, int format_id,
2459 struct super_block *sb = inode->i_sb;
2461 /* Just unsuspend quotas? */
2462 BUG_ON(flags & DQUOT_SUSPENDED);
2463 /* s_umount should be held in exclusive mode */
2464 if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
2465 up_read(&sb->s_umount);
2469 /* Just updating flags needed? */
2470 if (sb_has_quota_loaded(sb, type)) {
2471 if (flags & DQUOT_USAGE_ENABLED &&
2472 sb_has_quota_usage_enabled(sb, type))
2474 if (flags & DQUOT_LIMITS_ENABLED &&
2475 sb_has_quota_limits_enabled(sb, type))
2477 spin_lock(&dq_state_lock);
2478 sb_dqopt(sb)->flags |= dquot_state_flag(flags, type);
2479 spin_unlock(&dq_state_lock);
2483 return vfs_load_quota_inode(inode, type, format_id, flags);
2485 EXPORT_SYMBOL(dquot_enable);
2488 * This function is used when filesystem needs to initialize quotas
2489 * during mount time.
2491 int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
2492 int format_id, int type)
2494 struct dentry *dentry;
2497 dentry = lookup_one_len_unlocked(qf_name, sb->s_root, strlen(qf_name));
2499 return PTR_ERR(dentry);
2501 if (d_really_is_negative(dentry)) {
2506 error = security_quota_on(dentry);
2508 error = vfs_load_quota_inode(d_inode(dentry), type, format_id,
2509 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2515 EXPORT_SYMBOL(dquot_quota_on_mount);
2517 static int dquot_quota_enable(struct super_block *sb, unsigned int flags)
2521 struct quota_info *dqopt = sb_dqopt(sb);
2523 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
2525 /* Accounting cannot be turned on while fs is mounted */
2526 flags &= ~(FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT);
2529 for (type = 0; type < MAXQUOTAS; type++) {
2530 if (!(flags & qtype_enforce_flag(type)))
2532 /* Can't enforce without accounting */
2533 if (!sb_has_quota_usage_enabled(sb, type))
2535 ret = dquot_enable(dqopt->files[type], type,
2536 dqopt->info[type].dqi_fmt_id,
2537 DQUOT_LIMITS_ENABLED);
2543 /* Backout enforcement enablement we already did */
2544 for (type--; type >= 0; type--) {
2545 if (flags & qtype_enforce_flag(type))
2546 dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
2548 /* Error code translation for better compatibility with XFS */
2554 static int dquot_quota_disable(struct super_block *sb, unsigned int flags)
2558 struct quota_info *dqopt = sb_dqopt(sb);
2560 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
2563 * We don't support turning off accounting via quotactl. In principle
2564 * quota infrastructure can do this but filesystems don't expect
2565 * userspace to be able to do it.
2568 (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT))
2571 /* Filter out limits not enabled */
2572 for (type = 0; type < MAXQUOTAS; type++)
2573 if (!sb_has_quota_limits_enabled(sb, type))
2574 flags &= ~qtype_enforce_flag(type);
2578 for (type = 0; type < MAXQUOTAS; type++) {
2579 if (flags & qtype_enforce_flag(type)) {
2580 ret = dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
2587 /* Backout enforcement disabling we already did */
2588 for (type--; type >= 0; type--) {
2589 if (flags & qtype_enforce_flag(type))
2590 dquot_enable(dqopt->files[type], type,
2591 dqopt->info[type].dqi_fmt_id,
2592 DQUOT_LIMITS_ENABLED);
2597 /* Generic routine for getting common part of quota structure */
2598 static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di)
2600 struct mem_dqblk *dm = &dquot->dq_dqb;
2602 memset(di, 0, sizeof(*di));
2603 spin_lock(&dquot->dq_dqb_lock);
2604 di->d_spc_hardlimit = dm->dqb_bhardlimit;
2605 di->d_spc_softlimit = dm->dqb_bsoftlimit;
2606 di->d_ino_hardlimit = dm->dqb_ihardlimit;
2607 di->d_ino_softlimit = dm->dqb_isoftlimit;
2608 di->d_space = dm->dqb_curspace + dm->dqb_rsvspace;
2609 di->d_ino_count = dm->dqb_curinodes;
2610 di->d_spc_timer = dm->dqb_btime;
2611 di->d_ino_timer = dm->dqb_itime;
2612 spin_unlock(&dquot->dq_dqb_lock);
2615 int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
2616 struct qc_dqblk *di)
2618 struct dquot *dquot;
2620 dquot = dqget(sb, qid);
2622 return PTR_ERR(dquot);
2623 do_get_dqblk(dquot, di);
2628 EXPORT_SYMBOL(dquot_get_dqblk);
2630 int dquot_get_next_dqblk(struct super_block *sb, struct kqid *qid,
2631 struct qc_dqblk *di)
2633 struct dquot *dquot;
2636 if (!sb->dq_op->get_next_id)
2638 err = sb->dq_op->get_next_id(sb, qid);
2641 dquot = dqget(sb, *qid);
2643 return PTR_ERR(dquot);
2644 do_get_dqblk(dquot, di);
2649 EXPORT_SYMBOL(dquot_get_next_dqblk);
2651 #define VFS_QC_MASK \
2652 (QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \
2653 QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \
2654 QC_SPC_TIMER | QC_INO_TIMER)
2656 /* Generic routine for setting common part of quota structure */
2657 static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
2659 struct mem_dqblk *dm = &dquot->dq_dqb;
2660 int check_blim = 0, check_ilim = 0;
2661 struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
2663 if (di->d_fieldmask & ~VFS_QC_MASK)
2666 if (((di->d_fieldmask & QC_SPC_SOFT) &&
2667 di->d_spc_softlimit > dqi->dqi_max_spc_limit) ||
2668 ((di->d_fieldmask & QC_SPC_HARD) &&
2669 di->d_spc_hardlimit > dqi->dqi_max_spc_limit) ||
2670 ((di->d_fieldmask & QC_INO_SOFT) &&
2671 (di->d_ino_softlimit > dqi->dqi_max_ino_limit)) ||
2672 ((di->d_fieldmask & QC_INO_HARD) &&
2673 (di->d_ino_hardlimit > dqi->dqi_max_ino_limit)))
2676 spin_lock(&dquot->dq_dqb_lock);
2677 if (di->d_fieldmask & QC_SPACE) {
2678 dm->dqb_curspace = di->d_space - dm->dqb_rsvspace;
2680 set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
2683 if (di->d_fieldmask & QC_SPC_SOFT)
2684 dm->dqb_bsoftlimit = di->d_spc_softlimit;
2685 if (di->d_fieldmask & QC_SPC_HARD)
2686 dm->dqb_bhardlimit = di->d_spc_hardlimit;
2687 if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) {
2689 set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
2692 if (di->d_fieldmask & QC_INO_COUNT) {
2693 dm->dqb_curinodes = di->d_ino_count;
2695 set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
2698 if (di->d_fieldmask & QC_INO_SOFT)
2699 dm->dqb_isoftlimit = di->d_ino_softlimit;
2700 if (di->d_fieldmask & QC_INO_HARD)
2701 dm->dqb_ihardlimit = di->d_ino_hardlimit;
2702 if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) {
2704 set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
2707 if (di->d_fieldmask & QC_SPC_TIMER) {
2708 dm->dqb_btime = di->d_spc_timer;
2710 set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
2713 if (di->d_fieldmask & QC_INO_TIMER) {
2714 dm->dqb_itime = di->d_ino_timer;
2716 set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
2720 if (!dm->dqb_bsoftlimit ||
2721 dm->dqb_curspace + dm->dqb_rsvspace < dm->dqb_bsoftlimit) {
2723 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
2724 } else if (!(di->d_fieldmask & QC_SPC_TIMER))
2725 /* Set grace only if user hasn't provided his own... */
2726 dm->dqb_btime = ktime_get_real_seconds() + dqi->dqi_bgrace;
2729 if (!dm->dqb_isoftlimit ||
2730 dm->dqb_curinodes < dm->dqb_isoftlimit) {
2732 clear_bit(DQ_INODES_B, &dquot->dq_flags);
2733 } else if (!(di->d_fieldmask & QC_INO_TIMER))
2734 /* Set grace only if user hasn't provided his own... */
2735 dm->dqb_itime = ktime_get_real_seconds() + dqi->dqi_igrace;
2737 if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit ||
2739 clear_bit(DQ_FAKE_B, &dquot->dq_flags);
2741 set_bit(DQ_FAKE_B, &dquot->dq_flags);
2742 spin_unlock(&dquot->dq_dqb_lock);
2743 mark_dquot_dirty(dquot);
2748 int dquot_set_dqblk(struct super_block *sb, struct kqid qid,
2749 struct qc_dqblk *di)
2751 struct dquot *dquot;
2754 dquot = dqget(sb, qid);
2755 if (IS_ERR(dquot)) {
2756 rc = PTR_ERR(dquot);
2759 rc = do_set_dqblk(dquot, di);
2764 EXPORT_SYMBOL(dquot_set_dqblk);
2766 /* Generic routine for getting common part of quota file information */
2767 int dquot_get_state(struct super_block *sb, struct qc_state *state)
2769 struct mem_dqinfo *mi;
2770 struct qc_type_state *tstate;
2771 struct quota_info *dqopt = sb_dqopt(sb);
2774 memset(state, 0, sizeof(*state));
2775 for (type = 0; type < MAXQUOTAS; type++) {
2776 if (!sb_has_quota_active(sb, type))
2778 tstate = state->s_state + type;
2779 mi = sb_dqopt(sb)->info + type;
2780 tstate->flags = QCI_ACCT_ENABLED;
2781 spin_lock(&dq_data_lock);
2782 if (mi->dqi_flags & DQF_SYS_FILE)
2783 tstate->flags |= QCI_SYSFILE;
2784 if (mi->dqi_flags & DQF_ROOT_SQUASH)
2785 tstate->flags |= QCI_ROOT_SQUASH;
2786 if (sb_has_quota_limits_enabled(sb, type))
2787 tstate->flags |= QCI_LIMITS_ENFORCED;
2788 tstate->spc_timelimit = mi->dqi_bgrace;
2789 tstate->ino_timelimit = mi->dqi_igrace;
2790 tstate->ino = dqopt->files[type]->i_ino;
2791 tstate->blocks = dqopt->files[type]->i_blocks;
2792 tstate->nextents = 1; /* We don't know... */
2793 spin_unlock(&dq_data_lock);
2797 EXPORT_SYMBOL(dquot_get_state);
2799 /* Generic routine for setting common part of quota file information */
2800 int dquot_set_dqinfo(struct super_block *sb, int type, struct qc_info *ii)
2802 struct mem_dqinfo *mi;
2805 if ((ii->i_fieldmask & QC_WARNS_MASK) ||
2806 (ii->i_fieldmask & QC_RT_SPC_TIMER))
2808 if (!sb_has_quota_active(sb, type))
2810 mi = sb_dqopt(sb)->info + type;
2811 if (ii->i_fieldmask & QC_FLAGS) {
2812 if ((ii->i_flags & QCI_ROOT_SQUASH &&
2813 mi->dqi_format->qf_fmt_id != QFMT_VFS_OLD))
2816 spin_lock(&dq_data_lock);
2817 if (ii->i_fieldmask & QC_SPC_TIMER)
2818 mi->dqi_bgrace = ii->i_spc_timelimit;
2819 if (ii->i_fieldmask & QC_INO_TIMER)
2820 mi->dqi_igrace = ii->i_ino_timelimit;
2821 if (ii->i_fieldmask & QC_FLAGS) {
2822 if (ii->i_flags & QCI_ROOT_SQUASH)
2823 mi->dqi_flags |= DQF_ROOT_SQUASH;
2825 mi->dqi_flags &= ~DQF_ROOT_SQUASH;
2827 spin_unlock(&dq_data_lock);
2828 mark_info_dirty(sb, type);
2829 /* Force write to disk */
2830 sb->dq_op->write_info(sb, type);
2833 EXPORT_SYMBOL(dquot_set_dqinfo);
2835 const struct quotactl_ops dquot_quotactl_sysfile_ops = {
2836 .quota_enable = dquot_quota_enable,
2837 .quota_disable = dquot_quota_disable,
2838 .quota_sync = dquot_quota_sync,
2839 .get_state = dquot_get_state,
2840 .set_info = dquot_set_dqinfo,
2841 .get_dqblk = dquot_get_dqblk,
2842 .get_nextdqblk = dquot_get_next_dqblk,
2843 .set_dqblk = dquot_set_dqblk
2845 EXPORT_SYMBOL(dquot_quotactl_sysfile_ops);
2847 static int do_proc_dqstats(struct ctl_table *table, int write,
2848 void __user *buffer, size_t *lenp, loff_t *ppos)
2850 unsigned int type = (int *)table->data - dqstats.stat;
2852 /* Update global table */
2853 dqstats.stat[type] =
2854 percpu_counter_sum_positive(&dqstats.counter[type]);
2855 return proc_dointvec(table, write, buffer, lenp, ppos);
2858 static struct ctl_table fs_dqstats_table[] = {
2860 .procname = "lookups",
2861 .data = &dqstats.stat[DQST_LOOKUPS],
2862 .maxlen = sizeof(int),
2864 .proc_handler = do_proc_dqstats,
2867 .procname = "drops",
2868 .data = &dqstats.stat[DQST_DROPS],
2869 .maxlen = sizeof(int),
2871 .proc_handler = do_proc_dqstats,
2874 .procname = "reads",
2875 .data = &dqstats.stat[DQST_READS],
2876 .maxlen = sizeof(int),
2878 .proc_handler = do_proc_dqstats,
2881 .procname = "writes",
2882 .data = &dqstats.stat[DQST_WRITES],
2883 .maxlen = sizeof(int),
2885 .proc_handler = do_proc_dqstats,
2888 .procname = "cache_hits",
2889 .data = &dqstats.stat[DQST_CACHE_HITS],
2890 .maxlen = sizeof(int),
2892 .proc_handler = do_proc_dqstats,
2895 .procname = "allocated_dquots",
2896 .data = &dqstats.stat[DQST_ALLOC_DQUOTS],
2897 .maxlen = sizeof(int),
2899 .proc_handler = do_proc_dqstats,
2902 .procname = "free_dquots",
2903 .data = &dqstats.stat[DQST_FREE_DQUOTS],
2904 .maxlen = sizeof(int),
2906 .proc_handler = do_proc_dqstats,
2909 .procname = "syncs",
2910 .data = &dqstats.stat[DQST_SYNCS],
2911 .maxlen = sizeof(int),
2913 .proc_handler = do_proc_dqstats,
2915 #ifdef CONFIG_PRINT_QUOTA_WARNING
2917 .procname = "warnings",
2918 .data = &flag_print_warnings,
2919 .maxlen = sizeof(int),
2921 .proc_handler = proc_dointvec,
2927 static struct ctl_table fs_table[] = {
2929 .procname = "quota",
2931 .child = fs_dqstats_table,
2936 static struct ctl_table sys_table[] = {
2945 static int __init dquot_init(void)
2948 unsigned long nr_hash, order;
2950 printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__);
2952 register_sysctl_table(sys_table);
2954 dquot_cachep = kmem_cache_create("dquot",
2955 sizeof(struct dquot), sizeof(unsigned long) * 4,
2956 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
2957 SLAB_MEM_SPREAD|SLAB_PANIC),
2961 dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order);
2963 panic("Cannot create dquot hash table");
2965 for (i = 0; i < _DQST_DQSTAT_LAST; i++) {
2966 ret = percpu_counter_init(&dqstats.counter[i], 0, GFP_KERNEL);
2968 panic("Cannot create dquot stat counters");
2971 /* Find power-of-two hlist_heads which can fit into allocation */
2972 nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
2976 } while (nr_hash >> dq_hash_bits);
2979 nr_hash = 1UL << dq_hash_bits;
2980 dq_hash_mask = nr_hash - 1;
2981 for (i = 0; i < nr_hash; i++)
2982 INIT_HLIST_HEAD(dquot_hash + i);
2984 pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld,"
2985 " %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order));
2987 register_shrinker(&dqcache_shrinker);
2991 fs_initcall(dquot_init);