Merge branch 'for-linus' of git://oss.sgi.com:8090/xfs/xfs-2.6
[sfrench/cifs-2.6.git] / fs / buffer.c
index 7db24b9e54490b82f8201cda70d25afb6424d38a..02ebb1f1d3b0e2fa70298d21dbb1b88d4471a65d 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/mm.h>
 #include <linux/percpu.h>
 #include <linux/slab.h>
-#include <linux/smp_lock.h>
 #include <linux/capability.h>
 #include <linux/blkdev.h>
 #include <linux/file.h>
@@ -357,7 +356,7 @@ static void free_more_memory(void)
        for_each_online_pgdat(pgdat) {
                zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
                if (*zones)
-                       try_to_free_pages(zones, GFP_NOFS);
+                       try_to_free_pages(zones, 0, GFP_NOFS);
        }
 }
 
@@ -676,6 +675,39 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
 }
 EXPORT_SYMBOL(mark_buffer_dirty_inode);
 
+/*
+ * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
+ * dirty.
+ *
+ * If warn is true, then emit a warning if the page is not uptodate and has
+ * not been truncated.
+ */
+static int __set_page_dirty(struct page *page,
+               struct address_space *mapping, int warn)
+{
+       if (unlikely(!mapping))
+               return !TestSetPageDirty(page);
+
+       if (TestSetPageDirty(page))
+               return 0;
+
+       write_lock_irq(&mapping->tree_lock);
+       if (page->mapping) {    /* Race with truncate? */
+               WARN_ON_ONCE(warn && !PageUptodate(page));
+
+               if (mapping_cap_account_dirty(mapping)) {
+                       __inc_zone_page_state(page, NR_FILE_DIRTY);
+                       task_io_account_write(PAGE_CACHE_SIZE);
+               }
+               radix_tree_tag_set(&mapping->page_tree,
+                               page_index(page), PAGECACHE_TAG_DIRTY);
+       }
+       write_unlock_irq(&mapping->tree_lock);
+       __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+
+       return 1;
+}
+
 /*
  * Add a page to the dirty page list.
  *
@@ -703,7 +735,7 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
  */
 int __set_page_dirty_buffers(struct page *page)
 {
-       struct address_space * const mapping = page_mapping(page);
+       struct address_space *mapping = page_mapping(page);
 
        if (unlikely(!mapping))
                return !TestSetPageDirty(page);
@@ -720,21 +752,7 @@ int __set_page_dirty_buffers(struct page *page)
        }
        spin_unlock(&mapping->private_lock);
 
-       if (TestSetPageDirty(page))
-               return 0;
-
-       write_lock_irq(&mapping->tree_lock);
-       if (page->mapping) {    /* Race with truncate? */
-               if (mapping_cap_account_dirty(mapping)) {
-                       __inc_zone_page_state(page, NR_FILE_DIRTY);
-                       task_io_account_write(PAGE_CACHE_SIZE);
-               }
-               radix_tree_tag_set(&mapping->page_tree,
-                               page_index(page), PAGECACHE_TAG_DIRTY);
-       }
-       write_unlock_irq(&mapping->tree_lock);
-       __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
-       return 1;
+       return __set_page_dirty(page, mapping, 1);
 }
 EXPORT_SYMBOL(__set_page_dirty_buffers);
 
@@ -982,7 +1000,8 @@ grow_dev_page(struct block_device *bdev, sector_t block,
        struct page *page;
        struct buffer_head *bh;
 
-       page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
+       page = find_or_create_page(inode->i_mapping, index,
+               (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
        if (!page)
                return NULL;
 
@@ -1026,11 +1045,6 @@ failed:
 /*
  * Create buffers for the specified block device block's page.  If
  * that page was dirty, the buffers are set dirty also.
- *
- * Except that's a bug.  Attaching dirty buffers to a dirty
- * blockdev's page can result in filesystem corruption, because
- * some of those buffers may be aliases of filesystem data.
- * grow_dev_page() will go BUG() if this happens.
  */
 static int
 grow_buffers(struct block_device *bdev, sector_t block, int size)
@@ -1137,8 +1151,9 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
  */
 void fastcall mark_buffer_dirty(struct buffer_head *bh)
 {
+       WARN_ON_ONCE(!buffer_uptodate(bh));
        if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
-               __set_page_dirty_nobuffers(bh->b_page);
+               __set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
 }
 
 /*
@@ -1727,6 +1742,7 @@ recover:
        } while ((bh = bh->b_this_page) != head);
        SetPageError(page);
        BUG_ON(PageWriteback(page));
+       mapping_set_error(page->mapping, err);
        set_page_writeback(page);
        do {
                struct buffer_head *next = bh->b_this_page;
@@ -1846,13 +1862,8 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
                if (block_start >= to)
                        break;
                if (buffer_new(bh)) {
-                       void *kaddr;
-
                        clear_buffer_new(bh);
-                       kaddr = kmap_atomic(page, KM_USER0);
-                       memset(kaddr+block_start, 0, bh->b_size);
-                       flush_dcache_page(page);
-                       kunmap_atomic(kaddr, KM_USER0);
+                       zero_user_page(page, block_start, bh->b_size, KM_USER0);
                        set_buffer_uptodate(bh);
                        mark_buffer_dirty(bh);
                }
@@ -1940,10 +1951,8 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
                                        SetPageError(page);
                        }
                        if (!buffer_mapped(bh)) {
-                               void *kaddr = kmap_atomic(page, KM_USER0);
-                               memset(kaddr + i * blocksize, 0, blocksize);
-                               flush_dcache_page(page);
-                               kunmap_atomic(kaddr, KM_USER0);
+                               zero_user_page(page, i * blocksize, blocksize,
+                                               KM_USER0);
                                if (!err)
                                        set_buffer_uptodate(bh);
                                continue;
@@ -2086,7 +2095,6 @@ int cont_prepare_write(struct page *page, unsigned offset,
        long status;
        unsigned zerofrom;
        unsigned blocksize = 1 << inode->i_blkbits;
-       void *kaddr;
 
        while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
                status = -ENOMEM;
@@ -2108,10 +2116,8 @@ int cont_prepare_write(struct page *page, unsigned offset,
                                                PAGE_CACHE_SIZE, get_block);
                if (status)
                        goto out_unmap;
-               kaddr = kmap_atomic(new_page, KM_USER0);
-               memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
-               flush_dcache_page(new_page);
-               kunmap_atomic(kaddr, KM_USER0);
+               zero_user_page(new_page, zerofrom, PAGE_CACHE_SIZE - zerofrom,
+                               KM_USER0);
                generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
                unlock_page(new_page);
                page_cache_release(new_page);
@@ -2138,10 +2144,7 @@ int cont_prepare_write(struct page *page, unsigned offset,
        if (status)
                goto out1;
        if (zerofrom < offset) {
-               kaddr = kmap_atomic(page, KM_USER0);
-               memset(kaddr+zerofrom, 0, offset-zerofrom);
-               flush_dcache_page(page);
-               kunmap_atomic(kaddr, KM_USER0);
+               zero_user_page(page, zerofrom, offset - zerofrom, KM_USER0);
                __block_commit_write(inode, page, zerofrom, offset);
        }
        return 0;
@@ -2191,6 +2194,52 @@ int generic_commit_write(struct file *file, struct page *page,
        return 0;
 }
 
+/*
+ * block_page_mkwrite() is not allowed to change the file size as it gets
+ * called from a page fault handler when a page is first dirtied. Hence we must
+ * be careful to check for EOF conditions here. We set the page up correctly
+ * for a written page which means we get ENOSPC checking when writing into
+ * holes and correct delalloc and unwritten extent mapping on filesystems that
+ * support these features.
+ *
+ * We are not allowed to take the i_mutex here so we have to play games to
+ * protect against truncate races as the page could now be beyond EOF.  Because
+ * vmtruncate() writes the inode size before removing pages, once we have the
+ * page lock we can determine safely if the page is beyond EOF. If it is not
+ * beyond EOF, then the page is guaranteed safe against truncation until we
+ * unlock the page.
+ */
+int
+block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
+                  get_block_t get_block)
+{
+       struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+       unsigned long end;
+       loff_t size;
+       int ret = -EINVAL;
+
+       lock_page(page);
+       size = i_size_read(inode);
+       if ((page->mapping != inode->i_mapping) ||
+           ((page->index << PAGE_CACHE_SHIFT) > size)) {
+               /* page got truncated out from underneath us */
+               goto out_unlock;
+       }
+
+       /* page is wholly or partially inside EOF */
+       if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
+               end = size & ~PAGE_CACHE_MASK;
+       else
+               end = PAGE_CACHE_SIZE;
+
+       ret = block_prepare_write(page, 0, end, get_block);
+       if (!ret)
+               ret = block_commit_write(page, 0, end);
+
+out_unlock:
+       unlock_page(page);
+       return ret;
+}
 
 /*
  * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
@@ -2340,10 +2389,7 @@ failed:
         * Error recovery is pretty slack.  Clear the page and mark it dirty
         * so we'll later zero out any blocks which _were_ allocated.
         */
-       kaddr = kmap_atomic(page, KM_USER0);
-       memset(kaddr, 0, PAGE_CACHE_SIZE);
-       flush_dcache_page(page);
-       kunmap_atomic(kaddr, KM_USER0);
+       zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
        SetPageUptodate(page);
        set_page_dirty(page);
        return ret;
@@ -2382,7 +2428,6 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
        loff_t i_size = i_size_read(inode);
        const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
        unsigned offset;
-       void *kaddr;
        int ret;
 
        /* Is the page fully inside i_size? */
@@ -2413,10 +2458,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
         * the  page size, the remaining memory is zeroed when mapped, and
         * writes to that region are not written out to the file."
         */
-       kaddr = kmap_atomic(page, KM_USER0);
-       memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
-       flush_dcache_page(page);
-       kunmap_atomic(kaddr, KM_USER0);
+       zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
 out:
        ret = mpage_writepage(page, get_block, wbc);
        if (ret == -EAGAIN)
@@ -2437,7 +2479,6 @@ int nobh_truncate_page(struct address_space *mapping, loff_t from)
        unsigned to;
        struct page *page;
        const struct address_space_operations *a_ops = mapping->a_ops;
-       char *kaddr;
        int ret = 0;
 
        if ((offset & (blocksize - 1)) == 0)
@@ -2451,10 +2492,8 @@ int nobh_truncate_page(struct address_space *mapping, loff_t from)
        to = (offset + blocksize) & ~(blocksize - 1);
        ret = a_ops->prepare_write(NULL, page, offset, to);
        if (ret == 0) {
-               kaddr = kmap_atomic(page, KM_USER0);
-               memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
-               flush_dcache_page(page);
-               kunmap_atomic(kaddr, KM_USER0);
+               zero_user_page(page, offset, PAGE_CACHE_SIZE - offset,
+                               KM_USER0);
                /*
                 * It would be more correct to call aops->commit_write()
                 * here, but this is more efficient.
@@ -2480,7 +2519,6 @@ int block_truncate_page(struct address_space *mapping,
        struct inode *inode = mapping->host;
        struct page *page;
        struct buffer_head *bh;
-       void *kaddr;
        int err;
 
        blocksize = 1 << inode->i_blkbits;
@@ -2534,11 +2572,7 @@ int block_truncate_page(struct address_space *mapping,
                        goto unlock;
        }
 
-       kaddr = kmap_atomic(page, KM_USER0);
-       memset(kaddr + offset, 0, length);
-       flush_dcache_page(page);
-       kunmap_atomic(kaddr, KM_USER0);
-
+       zero_user_page(page, offset, length, KM_USER0);
        mark_buffer_dirty(bh);
        err = 0;
 
@@ -2559,7 +2593,6 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
        loff_t i_size = i_size_read(inode);
        const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
        unsigned offset;
-       void *kaddr;
 
        /* Is the page fully inside i_size? */
        if (page->index < end_index)
@@ -2585,10 +2618,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
         * the  page size, the remaining memory is zeroed when mapped, and
         * writes to that region are not written out to the file."
         */
-       kaddr = kmap_atomic(page, KM_USER0);
-       memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
-       flush_dcache_page(page);
-       kunmap_atomic(kaddr, KM_USER0);
+       zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
        return __block_write_full_page(inode, page, get_block, wbc);
 }
 
@@ -2930,8 +2960,9 @@ static void recalc_bh_state(void)
        
 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
 {
-       struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
+       struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
        if (ret) {
+               INIT_LIST_HEAD(&ret->b_assoc_buffers);
                get_cpu_var(bh_accounting).nr++;
                recalc_bh_state();
                put_cpu_var(bh_accounting);
@@ -2950,17 +2981,6 @@ void free_buffer_head(struct buffer_head *bh)
 }
 EXPORT_SYMBOL(free_buffer_head);
 
-static void
-init_buffer_head(void *data, struct kmem_cache *cachep, unsigned long flags)
-{
-       if (flags & SLAB_CTOR_CONSTRUCTOR) {
-               struct buffer_head * bh = (struct buffer_head *)data;
-
-               memset(bh, 0, sizeof(*bh));
-               INIT_LIST_HEAD(&bh->b_assoc_buffers);
-       }
-}
-
 static void buffer_exit_cpu(int cpu)
 {
        int i;
@@ -2978,7 +2998,7 @@ static void buffer_exit_cpu(int cpu)
 static int buffer_cpu_notify(struct notifier_block *self,
                              unsigned long action, void *hcpu)
 {
-       if (action == CPU_DEAD)
+       if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
                buffer_exit_cpu((unsigned long)hcpu);
        return NOTIFY_OK;
 }
@@ -2987,12 +3007,8 @@ void __init buffer_init(void)
 {
        int nrpages;
 
-       bh_cachep = kmem_cache_create("buffer_head",
-                                       sizeof(struct buffer_head), 0,
-                                       (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
-                                       SLAB_MEM_SPREAD),
-                                       init_buffer_head,
-                                       NULL);
+       bh_cachep = KMEM_CACHE(buffer_head,
+                       SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
 
        /*
         * Limit the bh occupancy to 10% of ZONE_NORMAL
@@ -3007,6 +3023,7 @@ EXPORT_SYMBOL(__brelse);
 EXPORT_SYMBOL(__wait_on_buffer);
 EXPORT_SYMBOL(block_commit_write);
 EXPORT_SYMBOL(block_prepare_write);
+EXPORT_SYMBOL(block_page_mkwrite);
 EXPORT_SYMBOL(block_read_full_page);
 EXPORT_SYMBOL(block_sync_page);
 EXPORT_SYMBOL(block_truncate_page);