quota: Inline dquot_[re]claim_reserved_space() into callsite
[sfrench/cifs-2.6.git] / fs / quota / dquot.c
index 53a17496c5c536a9410cba73c16d70c5efdbba88..411142a2f074786c02b81308efca5427469edc0e 100644 (file)
  * sure they cannot race with quotaon which first sets S_NOQUOTA flag and
  * then drops all pointers to dquots from an inode.
  *
- * Each dquot has its dq_lock mutex. Locked dquots might not be referenced
- * from inodes (dquot_alloc_space() and such don't check the dq_lock).
- * Currently dquot is locked only when it is being read to memory (or space for
- * it is being allocated) on the first dqget() and when it is being released on
- * the last dqput(). The allocation and release oparations are serialized by
- * the dq_lock and by checking the use count in dquot_release().  Write
- * operations on dquots don't hold dq_lock as they copy data under dq_data_lock
- * spinlock to internal buffers before writing.
+ * Each dquot has its dq_lock mutex.  Dquot is locked when it is being read to
+ * memory (or space for it is being allocated) on the first dqget(), when it is
+ * being written out, and when it is being released on the last dqput(). The
+ * allocation and release operations are serialized by the dq_lock and by
+ * checking the use count in dquot_release().
  *
  * Lock ordering (including related VFS locks) is the following:
- *   s_umount > i_mutex > journal_lock > dquot->dq_lock > dqio_mutex
+ *   s_umount > i_mutex > journal_lock > dquot->dq_lock > dqio_sem
  */
 
 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
@@ -129,6 +126,8 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
 EXPORT_SYMBOL(dq_data_lock);
 DEFINE_STATIC_SRCU(dquot_srcu);
 
+static DECLARE_WAIT_QUEUE_HEAD(dquot_ref_wq);
+
 void __quota_error(struct super_block *sb, const char *func,
                   const char *fmt, ...)
 {
@@ -342,6 +341,12 @@ int dquot_mark_dquot_dirty(struct dquot *dquot)
 {
        int ret = 1;
 
+       if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
+               return 0;
+
+       if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY)
+               return test_and_set_bit(DQ_MOD_B, &dquot->dq_flags);
+
        /* If quota is dirty already, we don't have to acquire dq_list_lock */
        if (test_bit(DQ_MOD_B, &dquot->dq_flags))
                return 1;
@@ -381,18 +386,26 @@ static inline void dqput_all(struct dquot **dquot)
                dqput(dquot[cnt]);
 }
 
-/* This function needs dq_list_lock */
 static inline int clear_dquot_dirty(struct dquot *dquot)
 {
-       if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags))
+       if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY)
+               return test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags);
+
+       spin_lock(&dq_list_lock);
+       if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags)) {
+               spin_unlock(&dq_list_lock);
                return 0;
+       }
        list_del_init(&dquot->dq_dirty);
+       spin_unlock(&dq_list_lock);
        return 1;
 }
 
 void mark_info_dirty(struct super_block *sb, int type)
 {
-       set_bit(DQF_INFO_DIRTY_B, &sb_dqopt(sb)->info[type].dqi_flags);
+       spin_lock(&dq_data_lock);
+       sb_dqopt(sb)->info[type].dqi_flags |= DQF_INFO_DIRTY;
+       spin_unlock(&dq_data_lock);
 }
 EXPORT_SYMBOL(mark_info_dirty);
 
@@ -406,7 +419,6 @@ int dquot_acquire(struct dquot *dquot)
        struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
 
        mutex_lock(&dquot->dq_lock);
-       mutex_lock(&dqopt->dqio_mutex);
        if (!test_bit(DQ_READ_B, &dquot->dq_flags))
                ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot);
        if (ret < 0)
@@ -436,7 +448,6 @@ int dquot_acquire(struct dquot *dquot)
        smp_mb__before_atomic();
        set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
 out_iolock:
-       mutex_unlock(&dqopt->dqio_mutex);
        mutex_unlock(&dquot->dq_lock);
        return ret;
 }
@@ -450,21 +461,17 @@ int dquot_commit(struct dquot *dquot)
        int ret = 0;
        struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
 
-       mutex_lock(&dqopt->dqio_mutex);
-       spin_lock(&dq_list_lock);
-       if (!clear_dquot_dirty(dquot)) {
-               spin_unlock(&dq_list_lock);
-               goto out_sem;
-       }
-       spin_unlock(&dq_list_lock);
+       mutex_lock(&dquot->dq_lock);
+       if (!clear_dquot_dirty(dquot))
+               goto out_lock;
        /* Inactive dquot can be only if there was error during read/init
         * => we have better not writing it */
        if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
                ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
        else
                ret = -EIO;
-out_sem:
-       mutex_unlock(&dqopt->dqio_mutex);
+out_lock:
+       mutex_unlock(&dquot->dq_lock);
        return ret;
 }
 EXPORT_SYMBOL(dquot_commit);
@@ -481,7 +488,6 @@ int dquot_release(struct dquot *dquot)
        /* Check whether we are not racing with some other dqget() */
        if (atomic_read(&dquot->dq_count) > 1)
                goto out_dqlock;
-       mutex_lock(&dqopt->dqio_mutex);
        if (dqopt->ops[dquot->dq_id.type]->release_dqblk) {
                ret = dqopt->ops[dquot->dq_id.type]->release_dqblk(dquot);
                /* Write the info */
@@ -493,7 +499,6 @@ int dquot_release(struct dquot *dquot)
                        ret = ret2;
        }
        clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
-       mutex_unlock(&dqopt->dqio_mutex);
 out_dqlock:
        mutex_unlock(&dquot->dq_lock);
        return ret;
@@ -530,22 +535,18 @@ restart:
                        continue;
                /* Wait for dquot users */
                if (atomic_read(&dquot->dq_count)) {
-                       DEFINE_WAIT(wait);
-
                        dqgrab(dquot);
-                       prepare_to_wait(&dquot->dq_wait_unused, &wait,
-                                       TASK_UNINTERRUPTIBLE);
                        spin_unlock(&dq_list_lock);
-                       /* Once dqput() wakes us up, we know it's time to free
+                       /*
+                        * Once dqput() wakes us up, we know it's time to free
                         * the dquot.
                         * IMPORTANT: we rely on the fact that there is always
                         * at most one process waiting for dquot to free.
                         * Otherwise dq_count would be > 1 and we would never
                         * wake up.
                         */
-                       if (atomic_read(&dquot->dq_count) > 1)
-                               schedule();
-                       finish_wait(&dquot->dq_wait_unused, &wait);
+                       wait_event(dquot_ref_wq,
+                                  atomic_read(&dquot->dq_count) == 1);
                        dqput(dquot);
                        /* At this moment dquot() need not exist (it could be
                         * reclaimed by prune_dqcache(). Hence we must
@@ -629,11 +630,9 @@ int dquot_writeback_dquots(struct super_block *sb, int type)
                while (!list_empty(dirty)) {
                        dquot = list_first_entry(dirty, struct dquot,
                                                 dq_dirty);
-                       /* Dirty and inactive can be only bad dquot... */
-                       if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
-                               clear_dquot_dirty(dquot);
-                               continue;
-                       }
+
+                       WARN_ON(!test_bit(DQ_ACTIVE_B, &dquot->dq_flags));
+
                        /* Now we have active dquot from which someone is
                         * holding reference so we can safely just increase
                         * use count */
@@ -759,12 +758,12 @@ we_slept:
                /* Releasing dquot during quotaoff phase? */
                if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_id.type) &&
                    atomic_read(&dquot->dq_count) == 1)
-                       wake_up(&dquot->dq_wait_unused);
+                       wake_up(&dquot_ref_wq);
                spin_unlock(&dq_list_lock);
                return;
        }
        /* Need to release dquot? */
-       if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) {
+       if (dquot_dirty(dquot)) {
                spin_unlock(&dq_list_lock);
                /* Commit dquot before releasing */
                ret = dquot->dq_sb->dq_op->write_dquot(dquot);
@@ -776,14 +775,10 @@ we_slept:
                         * We clear dirty bit anyway, so that we avoid
                         * infinite loop here
                         */
-                       spin_lock(&dq_list_lock);
                        clear_dquot_dirty(dquot);
-                       spin_unlock(&dq_list_lock);
                }
                goto we_slept;
        }
-       /* Clear flag in case dquot was inactive (something bad happened) */
-       clear_dquot_dirty(dquot);
        if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
                spin_unlock(&dq_list_lock);
                dquot->dq_sb->dq_op->release_dquot(dquot);
@@ -818,7 +813,6 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type)
        INIT_LIST_HEAD(&dquot->dq_inuse);
        INIT_HLIST_NODE(&dquot->dq_hash);
        INIT_LIST_HEAD(&dquot->dq_dirty);
-       init_waitqueue_head(&dquot->dq_wait_unused);
        dquot->dq_sb = sb;
        dquot->dq_id = make_kqid_invalid(type);
        atomic_set(&dquot->dq_count, 1);
@@ -1094,27 +1088,6 @@ static inline void dquot_resv_space(struct dquot *dquot, qsize_t number)
        dquot->dq_dqb.dqb_rsvspace += number;
 }
 
-/*
- * Claim reserved quota space
- */
-static void dquot_claim_reserved_space(struct dquot *dquot, qsize_t number)
-{
-       if (dquot->dq_dqb.dqb_rsvspace < number) {
-               WARN_ON_ONCE(1);
-               number = dquot->dq_dqb.dqb_rsvspace;
-       }
-       dquot->dq_dqb.dqb_curspace += number;
-       dquot->dq_dqb.dqb_rsvspace -= number;
-}
-
-static void dquot_reclaim_reserved_space(struct dquot *dquot, qsize_t number)
-{
-       if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number))
-               number = dquot->dq_dqb.dqb_curspace;
-       dquot->dq_dqb.dqb_rsvspace += number;
-       dquot->dq_dqb.dqb_curspace -= number;
-}
-
 static inline
 void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
 {
@@ -1589,40 +1562,6 @@ static qsize_t *inode_reserved_space(struct inode * inode)
        return inode->i_sb->dq_op->get_reserved_space(inode);
 }
 
-void inode_add_rsv_space(struct inode *inode, qsize_t number)
-{
-       spin_lock(&inode->i_lock);
-       *inode_reserved_space(inode) += number;
-       spin_unlock(&inode->i_lock);
-}
-EXPORT_SYMBOL(inode_add_rsv_space);
-
-void inode_claim_rsv_space(struct inode *inode, qsize_t number)
-{
-       spin_lock(&inode->i_lock);
-       *inode_reserved_space(inode) -= number;
-       __inode_add_bytes(inode, number);
-       spin_unlock(&inode->i_lock);
-}
-EXPORT_SYMBOL(inode_claim_rsv_space);
-
-void inode_reclaim_rsv_space(struct inode *inode, qsize_t number)
-{
-       spin_lock(&inode->i_lock);
-       *inode_reserved_space(inode) += number;
-       __inode_sub_bytes(inode, number);
-       spin_unlock(&inode->i_lock);
-}
-EXPORT_SYMBOL(inode_reclaim_rsv_space);
-
-void inode_sub_rsv_space(struct inode *inode, qsize_t number)
-{
-       spin_lock(&inode->i_lock);
-       *inode_reserved_space(inode) -= number;
-       spin_unlock(&inode->i_lock);
-}
-EXPORT_SYMBOL(inode_sub_rsv_space);
-
 static qsize_t inode_get_rsv_space(struct inode *inode)
 {
        qsize_t ret;
@@ -1635,23 +1574,6 @@ static qsize_t inode_get_rsv_space(struct inode *inode)
        return ret;
 }
 
-static void inode_incr_space(struct inode *inode, qsize_t number,
-                               int reserve)
-{
-       if (reserve)
-               inode_add_rsv_space(inode, number);
-       else
-               inode_add_bytes(inode, number);
-}
-
-static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
-{
-       if (reserve)
-               inode_sub_rsv_space(inode, number);
-       else
-               inode_sub_bytes(inode, number);
-}
-
 /*
  * This functions updates i_blocks+i_bytes fields and quota information
  * (together with appropriate checks).
@@ -1673,7 +1595,13 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
        struct dquot **dquots;
 
        if (!dquot_active(inode)) {
-               inode_incr_space(inode, number, reserve);
+               if (reserve) {
+                       spin_lock(&inode->i_lock);
+                       *inode_reserved_space(inode) += number;
+                       spin_unlock(&inode->i_lock);
+               } else {
+                       inode_add_bytes(inode, number);
+               }
                goto out;
        }
 
@@ -1701,7 +1629,13 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
                else
                        dquot_incr_space(dquots[cnt], number);
        }
-       inode_incr_space(inode, number, reserve);
+       if (reserve) {
+               spin_lock(&inode->i_lock);
+               *inode_reserved_space(inode) += number;
+               spin_unlock(&inode->i_lock);
+       } else {
+               inode_add_bytes(inode, number);
+       }
        spin_unlock(&dq_data_lock);
 
        if (reserve)
@@ -1765,7 +1699,10 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
        int cnt, index;
 
        if (!dquot_active(inode)) {
-               inode_claim_rsv_space(inode, number);
+               spin_lock(&inode->i_lock);
+               *inode_reserved_space(inode) -= number;
+               __inode_add_bytes(inode, number);
+               spin_unlock(&inode->i_lock);
                return 0;
        }
 
@@ -1774,11 +1711,20 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
        spin_lock(&dq_data_lock);
        /* Claim reserved quotas to allocated quotas */
        for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
-               if (dquots[cnt])
-                       dquot_claim_reserved_space(dquots[cnt], number);
+               if (dquots[cnt]) {
+                       struct dquot *dquot = dquots[cnt];
+
+                       if (WARN_ON_ONCE(dquot->dq_dqb.dqb_rsvspace < number))
+                               number = dquot->dq_dqb.dqb_rsvspace;
+                       dquot->dq_dqb.dqb_curspace += number;
+                       dquot->dq_dqb.dqb_rsvspace -= number;
+               }
        }
        /* Update inode bytes */
-       inode_claim_rsv_space(inode, number);
+       spin_lock(&inode->i_lock);
+       *inode_reserved_space(inode) -= number;
+       __inode_add_bytes(inode, number);
+       spin_unlock(&inode->i_lock);
        spin_unlock(&dq_data_lock);
        mark_all_dquot_dirty(dquots);
        srcu_read_unlock(&dquot_srcu, index);
@@ -1795,7 +1741,10 @@ void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
        int cnt, index;
 
        if (!dquot_active(inode)) {
-               inode_reclaim_rsv_space(inode, number);
+               spin_lock(&inode->i_lock);
+               *inode_reserved_space(inode) += number;
+               __inode_sub_bytes(inode, number);
+               spin_unlock(&inode->i_lock);
                return;
        }
 
@@ -1804,11 +1753,20 @@ void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
        spin_lock(&dq_data_lock);
        /* Claim reserved quotas to allocated quotas */
        for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
-               if (dquots[cnt])
-                       dquot_reclaim_reserved_space(dquots[cnt], number);
+               if (dquots[cnt]) {
+                       struct dquot *dquot = dquots[cnt];
+
+                       if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number))
+                               number = dquot->dq_dqb.dqb_curspace;
+                       dquot->dq_dqb.dqb_rsvspace += number;
+                       dquot->dq_dqb.dqb_curspace -= number;
+               }
        }
        /* Update inode bytes */
-       inode_reclaim_rsv_space(inode, number);
+       spin_lock(&inode->i_lock);
+       *inode_reserved_space(inode) += number;
+       __inode_sub_bytes(inode, number);
+       spin_unlock(&inode->i_lock);
        spin_unlock(&dq_data_lock);
        mark_all_dquot_dirty(dquots);
        srcu_read_unlock(&dquot_srcu, index);
@@ -1827,7 +1785,13 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
        int reserve = flags & DQUOT_SPACE_RESERVE, index;
 
        if (!dquot_active(inode)) {
-               inode_decr_space(inode, number, reserve);
+               if (reserve) {
+                       spin_lock(&inode->i_lock);
+                       *inode_reserved_space(inode) -= number;
+                       spin_unlock(&inode->i_lock);
+               } else {
+                       inode_sub_bytes(inode, number);
+               }
                return;
        }
 
@@ -1848,7 +1812,13 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
                else
                        dquot_decr_space(dquots[cnt], number);
        }
-       inode_decr_space(inode, number, reserve);
+       if (reserve) {
+               spin_lock(&inode->i_lock);
+               *inode_reserved_space(inode) -= number;
+               spin_unlock(&inode->i_lock);
+       } else {
+               inode_sub_bytes(inode, number);
+       }
        spin_unlock(&dq_data_lock);
 
        if (reserve)
@@ -2057,29 +2027,21 @@ EXPORT_SYMBOL(dquot_transfer);
  */
 int dquot_commit_info(struct super_block *sb, int type)
 {
-       int ret;
        struct quota_info *dqopt = sb_dqopt(sb);
 
-       mutex_lock(&dqopt->dqio_mutex);
-       ret = dqopt->ops[type]->write_file_info(sb, type);
-       mutex_unlock(&dqopt->dqio_mutex);
-       return ret;
+       return dqopt->ops[type]->write_file_info(sb, type);
 }
 EXPORT_SYMBOL(dquot_commit_info);
 
 int dquot_get_next_id(struct super_block *sb, struct kqid *qid)
 {
        struct quota_info *dqopt = sb_dqopt(sb);
-       int err;
 
        if (!sb_has_quota_active(sb, qid->type))
                return -ESRCH;
        if (!dqopt->ops[qid->type]->get_next_id)
                return -ENOSYS;
-       mutex_lock(&dqopt->dqio_mutex);
-       err = dqopt->ops[qid->type]->get_next_id(sb, qid);
-       mutex_unlock(&dqopt->dqio_mutex);
-       return err;
+       return dqopt->ops[qid->type]->get_next_id(sb, qid);
 }
 EXPORT_SYMBOL(dquot_get_next_id);
 
@@ -2328,15 +2290,14 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
        dqopt->info[type].dqi_format = fmt;
        dqopt->info[type].dqi_fmt_id = format_id;
        INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
-       mutex_lock(&dqopt->dqio_mutex);
        error = dqopt->ops[type]->read_file_info(sb, type);
-       if (error < 0) {
-               mutex_unlock(&dqopt->dqio_mutex);
+       if (error < 0)
                goto out_file_init;
-       }
-       if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
+       if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) {
+               spin_lock(&dq_data_lock);
                dqopt->info[type].dqi_flags |= DQF_SYS_FILE;
-       mutex_unlock(&dqopt->dqio_mutex);
+               spin_unlock(&dq_data_lock);
+       }
        spin_lock(&dq_state_lock);
        dqopt->flags |= dquot_state_flag(flags, type);
        spin_unlock(&dq_state_lock);