quota: Allow disabling tracking of dirty dquots in a list
[sfrench/cifs-2.6.git] / fs / quota / dquot.c
index 562f5978488ff1d944affa6d381799eddf5ba578..b867578e62c08ae97ee766eabb675dea4bbf5000 100644 (file)
@@ -126,6 +126,8 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
 EXPORT_SYMBOL(dq_data_lock);
 DEFINE_STATIC_SRCU(dquot_srcu);
 
+static DECLARE_WAIT_QUEUE_HEAD(dquot_ref_wq);
+
 void __quota_error(struct super_block *sb, const char *func,
                   const char *fmt, ...)
 {
@@ -339,6 +341,12 @@ int dquot_mark_dquot_dirty(struct dquot *dquot)
 {
        int ret = 1;
 
+       if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
+               return 0;
+
+       if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY)
+               return test_and_set_bit(DQ_MOD_B, &dquot->dq_flags);
+
        /* If quota is dirty already, we don't have to acquire dq_list_lock */
        if (test_bit(DQ_MOD_B, &dquot->dq_flags))
                return 1;
@@ -378,18 +386,26 @@ static inline void dqput_all(struct dquot **dquot)
                dqput(dquot[cnt]);
 }
 
-/* This function needs dq_list_lock */
 static inline int clear_dquot_dirty(struct dquot *dquot)
 {
-       if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags))
+       if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY)
+               return test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags);
+
+       spin_lock(&dq_list_lock);
+       if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags)) {
+               spin_unlock(&dq_list_lock);
                return 0;
+       }
        list_del_init(&dquot->dq_dirty);
+       spin_unlock(&dq_list_lock);
        return 1;
 }
 
 void mark_info_dirty(struct super_block *sb, int type)
 {
-       set_bit(DQF_INFO_DIRTY_B, &sb_dqopt(sb)->info[type].dqi_flags);
+       spin_lock(&dq_data_lock);
+       sb_dqopt(sb)->info[type].dqi_flags |= DQF_INFO_DIRTY;
+       spin_unlock(&dq_data_lock);
 }
 EXPORT_SYMBOL(mark_info_dirty);
 
@@ -415,10 +431,8 @@ int dquot_acquire(struct dquot *dquot)
                ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
                /* Write the info if needed */
                if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
-                       down_write(&dqopt->dqio_sem);
                        ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
                                        dquot->dq_sb, dquot->dq_id.type);
-                       up_write(&dqopt->dqio_sem);
                }
                if (ret < 0)
                        goto out_iolock;
@@ -448,12 +462,8 @@ int dquot_commit(struct dquot *dquot)
        struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
 
        mutex_lock(&dquot->dq_lock);
-       spin_lock(&dq_list_lock);
-       if (!clear_dquot_dirty(dquot)) {
-               spin_unlock(&dq_list_lock);
+       if (!clear_dquot_dirty(dquot))
                goto out_lock;
-       }
-       spin_unlock(&dq_list_lock);
        /* Inactive dquot can be only if there was error during read/init
         * => we have better not writing it */
        if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
@@ -478,7 +488,6 @@ int dquot_release(struct dquot *dquot)
        /* Check whether we are not racing with some other dqget() */
        if (atomic_read(&dquot->dq_count) > 1)
                goto out_dqlock;
-       down_write(&dqopt->dqio_sem);
        if (dqopt->ops[dquot->dq_id.type]->release_dqblk) {
                ret = dqopt->ops[dquot->dq_id.type]->release_dqblk(dquot);
                /* Write the info */
@@ -490,7 +499,6 @@ int dquot_release(struct dquot *dquot)
                        ret = ret2;
        }
        clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
-       up_write(&dqopt->dqio_sem);
 out_dqlock:
        mutex_unlock(&dquot->dq_lock);
        return ret;
@@ -527,22 +535,18 @@ restart:
                        continue;
                /* Wait for dquot users */
                if (atomic_read(&dquot->dq_count)) {
-                       DEFINE_WAIT(wait);
-
                        dqgrab(dquot);
-                       prepare_to_wait(&dquot->dq_wait_unused, &wait,
-                                       TASK_UNINTERRUPTIBLE);
                        spin_unlock(&dq_list_lock);
-                       /* Once dqput() wakes us up, we know it's time to free
+                       /*
+                        * Once dqput() wakes us up, we know it's time to free
                         * the dquot.
                         * IMPORTANT: we rely on the fact that there is always
                         * at most one process waiting for dquot to free.
                         * Otherwise dq_count would be > 1 and we would never
                         * wake up.
                         */
-                       if (atomic_read(&dquot->dq_count) > 1)
-                               schedule();
-                       finish_wait(&dquot->dq_wait_unused, &wait);
+                       wait_event(dquot_ref_wq,
+                                  atomic_read(&dquot->dq_count) == 1);
                        dqput(dquot);
                        /* At this moment dquot() need not exist (it could be
                         * reclaimed by prune_dqcache(). Hence we must
@@ -626,11 +630,9 @@ int dquot_writeback_dquots(struct super_block *sb, int type)
                while (!list_empty(dirty)) {
                        dquot = list_first_entry(dirty, struct dquot,
                                                 dq_dirty);
-                       /* Dirty and inactive can be only bad dquot... */
-                       if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
-                               clear_dquot_dirty(dquot);
-                               continue;
-                       }
+
+                       WARN_ON(!test_bit(DQ_ACTIVE_B, &dquot->dq_flags));
+
                        /* Now we have active dquot from which someone is
                         * holding reference so we can safely just increase
                         * use count */
@@ -756,12 +758,12 @@ we_slept:
                /* Releasing dquot during quotaoff phase? */
                if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_id.type) &&
                    atomic_read(&dquot->dq_count) == 1)
-                       wake_up(&dquot->dq_wait_unused);
+                       wake_up(&dquot_ref_wq);
                spin_unlock(&dq_list_lock);
                return;
        }
        /* Need to release dquot? */
-       if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) {
+       if (dquot_dirty(dquot)) {
                spin_unlock(&dq_list_lock);
                /* Commit dquot before releasing */
                ret = dquot->dq_sb->dq_op->write_dquot(dquot);
@@ -773,14 +775,10 @@ we_slept:
                         * We clear dirty bit anyway, so that we avoid
                         * infinite loop here
                         */
-                       spin_lock(&dq_list_lock);
                        clear_dquot_dirty(dquot);
-                       spin_unlock(&dq_list_lock);
                }
                goto we_slept;
        }
-       /* Clear flag in case dquot was inactive (something bad happened) */
-       clear_dquot_dirty(dquot);
        if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
                spin_unlock(&dq_list_lock);
                dquot->dq_sb->dq_op->release_dquot(dquot);
@@ -815,7 +813,6 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type)
        INIT_LIST_HEAD(&dquot->dq_inuse);
        INIT_HLIST_NODE(&dquot->dq_hash);
        INIT_LIST_HEAD(&dquot->dq_dirty);
-       init_waitqueue_head(&dquot->dq_wait_unused);
        dquot->dq_sb = sb;
        dquot->dq_id = make_kqid_invalid(type);
        atomic_set(&dquot->dq_count, 1);
@@ -2054,29 +2051,21 @@ EXPORT_SYMBOL(dquot_transfer);
  */
 int dquot_commit_info(struct super_block *sb, int type)
 {
-       int ret;
        struct quota_info *dqopt = sb_dqopt(sb);
 
-       down_write(&dqopt->dqio_sem);
-       ret = dqopt->ops[type]->write_file_info(sb, type);
-       up_write(&dqopt->dqio_sem);
-       return ret;
+       return dqopt->ops[type]->write_file_info(sb, type);
 }
 EXPORT_SYMBOL(dquot_commit_info);
 
 int dquot_get_next_id(struct super_block *sb, struct kqid *qid)
 {
        struct quota_info *dqopt = sb_dqopt(sb);
-       int err;
 
        if (!sb_has_quota_active(sb, qid->type))
                return -ESRCH;
        if (!dqopt->ops[qid->type]->get_next_id)
                return -ENOSYS;
-       down_read(&dqopt->dqio_sem);
-       err = dqopt->ops[qid->type]->get_next_id(sb, qid);
-       up_read(&dqopt->dqio_sem);
-       return err;
+       return dqopt->ops[qid->type]->get_next_id(sb, qid);
 }
 EXPORT_SYMBOL(dquot_get_next_id);
 
@@ -2325,15 +2314,14 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
        dqopt->info[type].dqi_format = fmt;
        dqopt->info[type].dqi_fmt_id = format_id;
        INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
-       down_read(&dqopt->dqio_sem);
        error = dqopt->ops[type]->read_file_info(sb, type);
-       if (error < 0) {
-               up_read(&dqopt->dqio_sem);
+       if (error < 0)
                goto out_file_init;
-       }
-       if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
+       if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) {
+               spin_lock(&dq_data_lock);
                dqopt->info[type].dqi_flags |= DQF_SYS_FILE;
-       up_read(&dqopt->dqio_sem);
+               spin_unlock(&dq_data_lock);
+       }
        spin_lock(&dq_state_lock);
        dqopt->flags |= dquot_state_flag(flags, type);
        spin_unlock(&dq_state_lock);