Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6
[sfrench/cifs-2.6.git] / fs / fs-writeback.c
index d0ff0b8cf30988a23ad144f92f8b7543790bbe67..e3fe9918faafb710446f5bd73445eeca5ce5b0d7 100644 (file)
@@ -274,6 +274,7 @@ __sync_single_inode(struct inode *inode, struct writeback_control *wbc)
        int ret;
 
        BUG_ON(inode->i_state & I_SYNC);
+       WARN_ON(inode->i_state & I_NEW);
 
        /* Set I_SYNC, reset I_DIRTY */
        dirty = inode->i_state & I_DIRTY;
@@ -298,6 +299,7 @@ __sync_single_inode(struct inode *inode, struct writeback_control *wbc)
        }
 
        spin_lock(&inode_lock);
+       WARN_ON(inode->i_state & I_NEW);
        inode->i_state &= ~I_SYNC;
        if (!(inode->i_state & I_FREEING)) {
                if (!(inode->i_state & I_DIRTY) &&
@@ -421,9 +423,6 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
  * If we're a pdlfush thread, then implement pdflush collision avoidance
  * against the entire list.
  *
- * WB_SYNC_HOLD is a hack for sys_sync(): reattach the inode to sb->s_dirty so
- * that it can be located for waiting on in __writeback_single_inode().
- *
  * If `bdi' is non-zero then we're being asked to writeback a specific queue.
  * This function assumes that the blockdev superblock's inodes are backed by
  * a variety of queues, so all inodes are searched.  For other superblocks,
@@ -443,6 +442,7 @@ void generic_sync_sb_inodes(struct super_block *sb,
                                struct writeback_control *wbc)
 {
        const unsigned long start = jiffies;    /* livelock avoidance */
+       int sync = wbc->sync_mode == WB_SYNC_ALL;
 
        spin_lock(&inode_lock);
        if (!wbc->for_kupdate || list_empty(&sb->s_io))
@@ -472,6 +472,11 @@ void generic_sync_sb_inodes(struct super_block *sb,
                        break;
                }
 
+               if (inode->i_state & I_NEW) {
+                       requeue_io(inode);
+                       continue;
+               }
+
                if (wbc->nonblocking && bdi_write_congested(bdi)) {
                        wbc->encountered_congestion = 1;
                        if (!sb_is_blkdev_sb(sb))
@@ -499,10 +504,6 @@ void generic_sync_sb_inodes(struct super_block *sb,
                __iget(inode);
                pages_skipped = wbc->pages_skipped;
                __writeback_single_inode(inode, wbc);
-               if (wbc->sync_mode == WB_SYNC_HOLD) {
-                       inode->dirtied_when = jiffies;
-                       list_move(&inode->i_list, &sb->s_dirty);
-               }
                if (current_is_pdflush())
                        writeback_release(bdi);
                if (wbc->pages_skipped != pages_skipped) {
@@ -523,7 +524,49 @@ void generic_sync_sb_inodes(struct super_block *sb,
                if (!list_empty(&sb->s_more_io))
                        wbc->more_io = 1;
        }
-       spin_unlock(&inode_lock);
+
+       if (sync) {
+               struct inode *inode, *old_inode = NULL;
+
+               /*
+                * Data integrity sync. Must wait for all pages under writeback,
+                * because there may have been pages dirtied before our sync
+                * call, but which had writeout started before we write it out.
+                * In which case, the inode may not be on the dirty list, but
+                * we still have to wait for that writeout.
+                */
+               list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
+                       struct address_space *mapping;
+
+                       if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW))
+                               continue;
+                       mapping = inode->i_mapping;
+                       if (mapping->nrpages == 0)
+                               continue;
+                       __iget(inode);
+                       spin_unlock(&inode_lock);
+                       /*
+                        * We hold a reference to 'inode' so it couldn't have
+                        * been removed from s_inodes list while we dropped the
+                        * inode_lock.  We cannot iput the inode now as we can
+                        * be holding the last reference and we cannot iput it
+                        * under inode_lock. So we keep the reference and iput
+                        * it later.
+                        */
+                       iput(old_inode);
+                       old_inode = inode;
+
+                       filemap_fdatawait(mapping);
+
+                       cond_resched();
+
+                       spin_lock(&inode_lock);
+               }
+               spin_unlock(&inode_lock);
+               iput(old_inode);
+       } else
+               spin_unlock(&inode_lock);
+
        return;         /* Leave any unwritten inodes on s_io */
 }
 EXPORT_SYMBOL_GPL(generic_sync_sb_inodes);
@@ -588,8 +631,7 @@ restart:
 
 /*
  * writeback and wait upon the filesystem's dirty inodes.  The caller will
- * do this in two passes - one to write, and one to wait.  WB_SYNC_HOLD is
- * used to park the written inodes on sb->s_dirty for the wait pass.
+ * do this in two passes - one to write, and one to wait.
  *
  * A finite limit is set on the number of pages which will be written.
  * To prevent infinite livelock of sys_sync().
@@ -600,30 +642,21 @@ restart:
 void sync_inodes_sb(struct super_block *sb, int wait)
 {
        struct writeback_control wbc = {
-               .sync_mode      = wait ? WB_SYNC_ALL : WB_SYNC_HOLD,
+               .sync_mode      = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
                .range_start    = 0,
                .range_end      = LLONG_MAX,
        };
-       unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
-       unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
 
-       wbc.nr_to_write = nr_dirty + nr_unstable +
-                       (inodes_stat.nr_inodes - inodes_stat.nr_unused) +
-                       nr_dirty + nr_unstable;
-       wbc.nr_to_write += wbc.nr_to_write / 2;         /* Bit more for luck */
-       sync_sb_inodes(sb, &wbc);
-}
+       if (!wait) {
+               unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
+               unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
 
-/*
- * Rather lame livelock avoidance.
- */
-static void set_sb_syncing(int val)
-{
-       struct super_block *sb;
-       spin_lock(&sb_lock);
-       list_for_each_entry_reverse(sb, &super_blocks, s_list)
-               sb->s_syncing = val;
-       spin_unlock(&sb_lock);
+               wbc.nr_to_write = nr_dirty + nr_unstable +
+                       (inodes_stat.nr_inodes - inodes_stat.nr_unused);
+       } else
+               wbc.nr_to_write = LONG_MAX; /* doesn't actually matter */
+
+       sync_sb_inodes(sb, &wbc);
 }
 
 /**
@@ -652,9 +685,6 @@ static void __sync_inodes(int wait)
        spin_lock(&sb_lock);
 restart:
        list_for_each_entry(sb, &super_blocks, s_list) {
-               if (sb->s_syncing)
-                       continue;
-               sb->s_syncing = 1;
                sb->s_count++;
                spin_unlock(&sb_lock);
                down_read(&sb->s_umount);
@@ -672,13 +702,10 @@ restart:
 
 void sync_inodes(int wait)
 {
-       set_sb_syncing(0);
        __sync_inodes(0);
 
-       if (wait) {
-               set_sb_syncing(0);
+       if (wait)
                __sync_inodes(1);
-       }
 }
 
 /**