fs: fix kernel-doc notation warnings
[sfrench/cifs-2.6.git] / fs / buffer.c
index 7249e014819e1a0432621d1d2576fd259f27632d..7ba58386beee459779da2056d662572496ff2096 100644 (file)
@@ -67,14 +67,14 @@ static int sync_buffer(void *word)
        return 0;
 }
 
-void fastcall __lock_buffer(struct buffer_head *bh)
+void __lock_buffer(struct buffer_head *bh)
 {
        wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
                                                        TASK_UNINTERRUPTIBLE);
 }
 EXPORT_SYMBOL(__lock_buffer);
 
-void fastcall unlock_buffer(struct buffer_head *bh)
+void unlock_buffer(struct buffer_head *bh)
 {
        smp_mb__before_clear_bit();
        clear_buffer_locked(bh);
@@ -627,8 +627,7 @@ repeat:
 }
 
 /**
- * sync_mapping_buffers - write out and wait upon a mapping's "associated"
- *                        buffers
+ * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
  * @mapping: the mapping which wants those buffers written
  *
  * Starts I/O against the buffers at mapping->private_list, and waits upon
@@ -678,7 +677,7 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
        } else {
                BUG_ON(mapping->assoc_mapping != buffer_mapping);
        }
-       if (list_empty(&bh->b_assoc_buffers)) {
+       if (!bh->b_assoc_map) {
                spin_lock(&buffer_mapping->private_lock);
                list_move_tail(&bh->b_assoc_buffers,
                                &mapping->private_list);
@@ -794,6 +793,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
 {
        struct buffer_head *bh;
        struct list_head tmp;
+       struct address_space *mapping;
        int err = 0, err2;
 
        INIT_LIST_HEAD(&tmp);
@@ -801,9 +801,14 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
        spin_lock(lock);
        while (!list_empty(list)) {
                bh = BH_ENTRY(list->next);
+               mapping = bh->b_assoc_map;
                __remove_assoc_queue(bh);
+               /* Avoid race with mark_buffer_dirty_inode() which does
+                * a lockless check and we rely on seeing the dirty bit */
+               smp_mb();
                if (buffer_dirty(bh) || buffer_locked(bh)) {
                        list_add(&bh->b_assoc_buffers, &tmp);
+                       bh->b_assoc_map = mapping;
                        if (buffer_dirty(bh)) {
                                get_bh(bh);
                                spin_unlock(lock);
@@ -822,8 +827,17 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
 
        while (!list_empty(&tmp)) {
                bh = BH_ENTRY(tmp.prev);
-               list_del_init(&bh->b_assoc_buffers);
                get_bh(bh);
+               mapping = bh->b_assoc_map;
+               __remove_assoc_queue(bh);
+               /* Avoid race with mark_buffer_dirty_inode() which does
+                * a lockless check and we rely on seeing the dirty bit */
+               smp_mb();
+               if (buffer_dirty(bh)) {
+                       list_add(&bh->b_assoc_buffers,
+                                &mapping->private_list);
+                       bh->b_assoc_map = mapping;
+               }
                spin_unlock(lock);
                wait_on_buffer(bh);
                if (!buffer_uptodate(bh))
@@ -1164,7 +1178,7 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
  * mapping->tree_lock and the global inode_lock.
  */
-void fastcall mark_buffer_dirty(struct buffer_head *bh)
+void mark_buffer_dirty(struct buffer_head *bh)
 {
        WARN_ON_ONCE(!buffer_uptodate(bh));
        if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
@@ -1195,7 +1209,7 @@ void __brelse(struct buffer_head * buf)
 void __bforget(struct buffer_head *bh)
 {
        clear_buffer_dirty(bh);
-       if (!list_empty(&bh->b_assoc_buffers)) {
+       if (bh->b_assoc_map) {
                struct address_space *buffer_mapping = bh->b_page->mapping;
 
                spin_lock(&buffer_mapping->private_lock);
@@ -1436,6 +1450,7 @@ void invalidate_bh_lrus(void)
 {
        on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
 }
+EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
 
 void set_bh_page(struct buffer_head *bh,
                struct page *page, unsigned long offset)
@@ -1798,7 +1813,7 @@ void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
                                        start = max(from, block_start);
                                        size = min(to, block_end) - start;
 
-                                       zero_user_page(page, start, size, KM_USER0);
+                                       zero_user(page, start, size);
                                        set_buffer_uptodate(bh);
                                }
 
@@ -1861,19 +1876,10 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
                                        mark_buffer_dirty(bh);
                                        continue;
                                }
-                               if (block_end > to || block_start < from) {
-                                       void *kaddr;
-
-                                       kaddr = kmap_atomic(page, KM_USER0);
-                                       if (block_end > to)
-                                               memset(kaddr+to, 0,
-                                                       block_end-to);
-                                       if (block_start < from)
-                                               memset(kaddr+block_start,
-                                                       0, from-block_start);
-                                       flush_dcache_page(page);
-                                       kunmap_atomic(kaddr, KM_USER0);
-                               }
+                               if (block_end > to || block_start < from)
+                                       zero_user_segments(page,
+                                               to, block_end,
+                                               block_start, from);
                                continue;
                        }
                }
@@ -2104,8 +2110,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
                                        SetPageError(page);
                        }
                        if (!buffer_mapped(bh)) {
-                               zero_user_page(page, i * blocksize, blocksize,
-                                               KM_USER0);
+                               zero_user(page, i * blocksize, blocksize);
                                if (!err)
                                        set_buffer_uptodate(bh);
                                continue;
@@ -2218,7 +2223,7 @@ int cont_expand_zero(struct file *file, struct address_space *mapping,
                                                &page, &fsdata);
                if (err)
                        goto out;
-               zero_user_page(page, zerofrom, len, KM_USER0);
+               zero_user(page, zerofrom, len);
                err = pagecache_write_end(file, mapping, curpos, len, len,
                                                page, fsdata);
                if (err < 0)
@@ -2245,7 +2250,7 @@ int cont_expand_zero(struct file *file, struct address_space *mapping,
                                                &page, &fsdata);
                if (err)
                        goto out;
-               zero_user_page(page, zerofrom, len, KM_USER0);
+               zero_user(page, zerofrom, len);
                err = pagecache_write_end(file, mapping, curpos, len, len,
                                                page, fsdata);
                if (err < 0)
@@ -2422,7 +2427,6 @@ int nobh_write_begin(struct file *file, struct address_space *mapping,
        unsigned block_in_page;
        unsigned block_start, block_end;
        sector_t block_in_file;
-       char *kaddr;
        int nr_reads = 0;
        int ret = 0;
        int is_mapped_to_disk = 1;
@@ -2493,13 +2497,8 @@ int nobh_write_begin(struct file *file, struct address_space *mapping,
                        continue;
                }
                if (buffer_new(bh) || !buffer_mapped(bh)) {
-                       kaddr = kmap_atomic(page, KM_USER0);
-                       if (block_start < from)
-                               memset(kaddr+block_start, 0, from-block_start);
-                       if (block_end > to)
-                               memset(kaddr + to, 0, block_end - to);
-                       flush_dcache_page(page);
-                       kunmap_atomic(kaddr, KM_USER0);
+                       zero_user_segments(page, block_start, from,
+                                                       to, block_end);
                        continue;
                }
                if (buffer_uptodate(bh))
@@ -2636,7 +2635,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
         * the  page size, the remaining memory is zeroed when mapped, and
         * writes to that region are not written out to the file."
         */
-       zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
+       zero_user_segment(page, offset, PAGE_CACHE_SIZE);
 out:
        ret = mpage_writepage(page, get_block, wbc);
        if (ret == -EAGAIN)
@@ -2709,7 +2708,7 @@ has_buffers:
                if (page_has_buffers(page))
                        goto has_buffers;
        }
-       zero_user_page(page, offset, length, KM_USER0);
+       zero_user(page, offset, length);
        set_page_dirty(page);
        err = 0;
 
@@ -2785,7 +2784,7 @@ int block_truncate_page(struct address_space *mapping,
                        goto unlock;
        }
 
-       zero_user_page(page, offset, length, KM_USER0);
+       zero_user(page, offset, length);
        mark_buffer_dirty(bh);
        err = 0;
 
@@ -2831,7 +2830,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
         * the  page size, the remaining memory is zeroed when mapped, and
         * writes to that region are not written out to the file."
         */
-       zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
+       zero_user_segment(page, offset, PAGE_CACHE_SIZE);
        return __block_write_full_page(inode, page, get_block, wbc);
 }
 
@@ -3037,7 +3036,7 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
        do {
                struct buffer_head *next = bh->b_this_page;
 
-               if (!list_empty(&bh->b_assoc_buffers))
+               if (bh->b_assoc_map)
                        __remove_assoc_queue(bh);
                bh = next;
        } while (bh != head);
@@ -3169,7 +3168,7 @@ static void recalc_bh_state(void)
        
 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
 {
-       struct buffer_head *ret = kmem_cache_zalloc(bh_cachep,
+       struct buffer_head *ret = kmem_cache_alloc(bh_cachep,
                                set_migrateflags(gfp_flags, __GFP_RECLAIMABLE));
        if (ret) {
                INIT_LIST_HEAD(&ret->b_assoc_buffers);
@@ -3213,12 +3212,68 @@ static int buffer_cpu_notify(struct notifier_block *self,
        return NOTIFY_OK;
 }
 
+/**
+ * bh_uptodate_or_lock - Test whether the buffer is uptodate
+ * @bh: struct buffer_head
+ *
+ * Return true if the buffer is up-to-date and false,
+ * with the buffer locked, if not.
+ */
+int bh_uptodate_or_lock(struct buffer_head *bh)
+{
+       if (!buffer_uptodate(bh)) {
+               lock_buffer(bh);
+               if (!buffer_uptodate(bh))
+                       return 0;
+               unlock_buffer(bh);
+       }
+       return 1;
+}
+EXPORT_SYMBOL(bh_uptodate_or_lock);
+
+/**
+ * bh_submit_read - Submit a locked buffer for reading
+ * @bh: struct buffer_head
+ *
+ * Returns zero on success and -EIO on error.
+ */
+int bh_submit_read(struct buffer_head *bh)
+{
+       BUG_ON(!buffer_locked(bh));
+
+       if (buffer_uptodate(bh)) {
+               unlock_buffer(bh);
+               return 0;
+       }
+
+       get_bh(bh);
+       bh->b_end_io = end_buffer_read_sync;
+       submit_bh(READ, bh);
+       wait_on_buffer(bh);
+       if (buffer_uptodate(bh))
+               return 0;
+       return -EIO;
+}
+EXPORT_SYMBOL(bh_submit_read);
+
+static void
+init_buffer_head(struct kmem_cache *cachep, void *data)
+{
+       struct buffer_head *bh = data;
+
+       memset(bh, 0, sizeof(*bh));
+       INIT_LIST_HEAD(&bh->b_assoc_buffers);
+}
+
 void __init buffer_init(void)
 {
        int nrpages;
 
-       bh_cachep = KMEM_CACHE(buffer_head,
-                       SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
+       bh_cachep = kmem_cache_create("buffer_head",
+                       sizeof(struct buffer_head), 0,
+                               (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
+                               SLAB_MEM_SPREAD),
+                               init_buffer_head);
 
        /*
         * Limit the bh occupancy to 10% of ZONE_NORMAL