header cleaning: don't include smp_lock.h when not used
[sfrench/cifs-2.6.git] / fs / buffer.c
1 /*
2  *  linux/fs/buffer.c
3  *
4  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
5  */
6
7 /*
8  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9  *
10  * Removed a lot of unnecessary code and simplified things now that
11  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12  *
13  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
14  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
15  *
16  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17  *
18  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19  */
20
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
23 #include <linux/fs.h>
24 #include <linux/mm.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
44
45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
46
47 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48
49 inline void
50 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51 {
52         bh->b_end_io = handler;
53         bh->b_private = private;
54 }
55
56 static int sync_buffer(void *word)
57 {
58         struct block_device *bd;
59         struct buffer_head *bh
60                 = container_of(word, struct buffer_head, b_state);
61
62         smp_mb();
63         bd = bh->b_bdev;
64         if (bd)
65                 blk_run_address_space(bd->bd_inode->i_mapping);
66         io_schedule();
67         return 0;
68 }
69
70 void fastcall __lock_buffer(struct buffer_head *bh)
71 {
72         wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73                                                         TASK_UNINTERRUPTIBLE);
74 }
75 EXPORT_SYMBOL(__lock_buffer);
76
77 void fastcall unlock_buffer(struct buffer_head *bh)
78 {
79         smp_mb__before_clear_bit();
80         clear_buffer_locked(bh);
81         smp_mb__after_clear_bit();
82         wake_up_bit(&bh->b_state, BH_Lock);
83 }
84
85 /*
86  * Block until a buffer comes unlocked.  This doesn't stop it
87  * from becoming locked again - you have to lock it yourself
88  * if you want to preserve its state.
89  */
90 void __wait_on_buffer(struct buffer_head * bh)
91 {
92         wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
93 }
94
95 static void
96 __clear_page_buffers(struct page *page)
97 {
98         ClearPagePrivate(page);
99         set_page_private(page, 0);
100         page_cache_release(page);
101 }
102
103 static void buffer_io_error(struct buffer_head *bh)
104 {
105         char b[BDEVNAME_SIZE];
106
107         printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
108                         bdevname(bh->b_bdev, b),
109                         (unsigned long long)bh->b_blocknr);
110 }
111
112 /*
113  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
114  * unlock the buffer. This is what ll_rw_block uses too.
115  */
116 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
117 {
118         if (uptodate) {
119                 set_buffer_uptodate(bh);
120         } else {
121                 /* This happens, due to failed READA attempts. */
122                 clear_buffer_uptodate(bh);
123         }
124         unlock_buffer(bh);
125         put_bh(bh);
126 }
127
128 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
129 {
130         char b[BDEVNAME_SIZE];
131
132         if (uptodate) {
133                 set_buffer_uptodate(bh);
134         } else {
135                 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
136                         buffer_io_error(bh);
137                         printk(KERN_WARNING "lost page write due to "
138                                         "I/O error on %s\n",
139                                        bdevname(bh->b_bdev, b));
140                 }
141                 set_buffer_write_io_error(bh);
142                 clear_buffer_uptodate(bh);
143         }
144         unlock_buffer(bh);
145         put_bh(bh);
146 }
147
148 /*
149  * Write out and wait upon all the dirty data associated with a block
150  * device via its mapping.  Does not take the superblock lock.
151  */
152 int sync_blockdev(struct block_device *bdev)
153 {
154         int ret = 0;
155
156         if (bdev)
157                 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
158         return ret;
159 }
160 EXPORT_SYMBOL(sync_blockdev);
161
162 /*
163  * Write out and wait upon all dirty data associated with this
164  * device.   Filesystem data as well as the underlying block
165  * device.  Takes the superblock lock.
166  */
167 int fsync_bdev(struct block_device *bdev)
168 {
169         struct super_block *sb = get_super(bdev);
170         if (sb) {
171                 int res = fsync_super(sb);
172                 drop_super(sb);
173                 return res;
174         }
175         return sync_blockdev(bdev);
176 }
177
178 /**
179  * freeze_bdev  --  lock a filesystem and force it into a consistent state
180  * @bdev:       blockdevice to lock
181  *
182  * This takes the block device bd_mount_sem to make sure no new mounts
183  * happen on bdev until thaw_bdev() is called.
184  * If a superblock is found on this device, we take the s_umount semaphore
185  * on it to make sure nobody unmounts until the snapshot creation is done.
186  */
187 struct super_block *freeze_bdev(struct block_device *bdev)
188 {
189         struct super_block *sb;
190
191         down(&bdev->bd_mount_sem);
192         sb = get_super(bdev);
193         if (sb && !(sb->s_flags & MS_RDONLY)) {
194                 sb->s_frozen = SB_FREEZE_WRITE;
195                 smp_wmb();
196
197                 __fsync_super(sb);
198
199                 sb->s_frozen = SB_FREEZE_TRANS;
200                 smp_wmb();
201
202                 sync_blockdev(sb->s_bdev);
203
204                 if (sb->s_op->write_super_lockfs)
205                         sb->s_op->write_super_lockfs(sb);
206         }
207
208         sync_blockdev(bdev);
209         return sb;      /* thaw_bdev releases s->s_umount and bd_mount_sem */
210 }
211 EXPORT_SYMBOL(freeze_bdev);
212
213 /**
214  * thaw_bdev  -- unlock filesystem
215  * @bdev:       blockdevice to unlock
216  * @sb:         associated superblock
217  *
218  * Unlocks the filesystem and marks it writeable again after freeze_bdev().
219  */
220 void thaw_bdev(struct block_device *bdev, struct super_block *sb)
221 {
222         if (sb) {
223                 BUG_ON(sb->s_bdev != bdev);
224
225                 if (sb->s_op->unlockfs)
226                         sb->s_op->unlockfs(sb);
227                 sb->s_frozen = SB_UNFROZEN;
228                 smp_wmb();
229                 wake_up(&sb->s_wait_unfrozen);
230                 drop_super(sb);
231         }
232
233         up(&bdev->bd_mount_sem);
234 }
235 EXPORT_SYMBOL(thaw_bdev);
236
237 /*
238  * Various filesystems appear to want __find_get_block to be non-blocking.
239  * But it's the page lock which protects the buffers.  To get around this,
240  * we get exclusion from try_to_free_buffers with the blockdev mapping's
241  * private_lock.
242  *
243  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
244  * may be quite high.  This code could TryLock the page, and if that
245  * succeeds, there is no need to take private_lock. (But if
246  * private_lock is contended then so is mapping->tree_lock).
247  */
248 static struct buffer_head *
249 __find_get_block_slow(struct block_device *bdev, sector_t block)
250 {
251         struct inode *bd_inode = bdev->bd_inode;
252         struct address_space *bd_mapping = bd_inode->i_mapping;
253         struct buffer_head *ret = NULL;
254         pgoff_t index;
255         struct buffer_head *bh;
256         struct buffer_head *head;
257         struct page *page;
258         int all_mapped = 1;
259
260         index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
261         page = find_get_page(bd_mapping, index);
262         if (!page)
263                 goto out;
264
265         spin_lock(&bd_mapping->private_lock);
266         if (!page_has_buffers(page))
267                 goto out_unlock;
268         head = page_buffers(page);
269         bh = head;
270         do {
271                 if (bh->b_blocknr == block) {
272                         ret = bh;
273                         get_bh(bh);
274                         goto out_unlock;
275                 }
276                 if (!buffer_mapped(bh))
277                         all_mapped = 0;
278                 bh = bh->b_this_page;
279         } while (bh != head);
280
281         /* we might be here because some of the buffers on this page are
282          * not mapped.  This is due to various races between
283          * file io on the block device and getblk.  It gets dealt with
284          * elsewhere, don't buffer_error if we had some unmapped buffers
285          */
286         if (all_mapped) {
287                 printk("__find_get_block_slow() failed. "
288                         "block=%llu, b_blocknr=%llu\n",
289                         (unsigned long long)block,
290                         (unsigned long long)bh->b_blocknr);
291                 printk("b_state=0x%08lx, b_size=%zu\n",
292                         bh->b_state, bh->b_size);
293                 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
294         }
295 out_unlock:
296         spin_unlock(&bd_mapping->private_lock);
297         page_cache_release(page);
298 out:
299         return ret;
300 }
301
302 /* If invalidate_buffers() will trash dirty buffers, it means some kind
303    of fs corruption is going on. Trashing dirty data always imply losing
304    information that was supposed to be just stored on the physical layer
305    by the user.
306
307    Thus invalidate_buffers in general usage is not allwowed to trash
308    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
309    be preserved.  These buffers are simply skipped.
310   
311    We also skip buffers which are still in use.  For example this can
312    happen if a userspace program is reading the block device.
313
314    NOTE: In the case where the user removed a removable-media-disk even if
315    there's still dirty data not synced on disk (due a bug in the device driver
316    or due an error of the user), by not destroying the dirty buffers we could
317    generate corruption also on the next media inserted, thus a parameter is
318    necessary to handle this case in the most safe way possible (trying
319    to not corrupt also the new disk inserted with the data belonging to
320    the old now corrupted disk). Also for the ramdisk the natural thing
321    to do in order to release the ramdisk memory is to destroy dirty buffers.
322
323    These are two special cases. Normal usage imply the device driver
324    to issue a sync on the device (without waiting I/O completion) and
325    then an invalidate_buffers call that doesn't trash dirty buffers.
326
327    For handling cache coherency with the blkdev pagecache the 'update' case
328    is been introduced. It is needed to re-read from disk any pinned
329    buffer. NOTE: re-reading from disk is destructive so we can do it only
330    when we assume nobody is changing the buffercache under our I/O and when
331    we think the disk contains more recent information than the buffercache.
332    The update == 1 pass marks the buffers we need to update, the update == 2
333    pass does the actual I/O. */
334 void invalidate_bdev(struct block_device *bdev)
335 {
336         struct address_space *mapping = bdev->bd_inode->i_mapping;
337
338         if (mapping->nrpages == 0)
339                 return;
340
341         invalidate_bh_lrus();
342         invalidate_mapping_pages(mapping, 0, -1);
343 }
344
345 /*
346  * Kick pdflush then try to free up some ZONE_NORMAL memory.
347  */
348 static void free_more_memory(void)
349 {
350         struct zone **zones;
351         pg_data_t *pgdat;
352
353         wakeup_pdflush(1024);
354         yield();
355
356         for_each_online_pgdat(pgdat) {
357                 zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
358                 if (*zones)
359                         try_to_free_pages(zones, GFP_NOFS);
360         }
361 }
362
363 /*
364  * I/O completion handler for block_read_full_page() - pages
365  * which come unlocked at the end of I/O.
366  */
367 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
368 {
369         unsigned long flags;
370         struct buffer_head *first;
371         struct buffer_head *tmp;
372         struct page *page;
373         int page_uptodate = 1;
374
375         BUG_ON(!buffer_async_read(bh));
376
377         page = bh->b_page;
378         if (uptodate) {
379                 set_buffer_uptodate(bh);
380         } else {
381                 clear_buffer_uptodate(bh);
382                 if (printk_ratelimit())
383                         buffer_io_error(bh);
384                 SetPageError(page);
385         }
386
387         /*
388          * Be _very_ careful from here on. Bad things can happen if
389          * two buffer heads end IO at almost the same time and both
390          * decide that the page is now completely done.
391          */
392         first = page_buffers(page);
393         local_irq_save(flags);
394         bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
395         clear_buffer_async_read(bh);
396         unlock_buffer(bh);
397         tmp = bh;
398         do {
399                 if (!buffer_uptodate(tmp))
400                         page_uptodate = 0;
401                 if (buffer_async_read(tmp)) {
402                         BUG_ON(!buffer_locked(tmp));
403                         goto still_busy;
404                 }
405                 tmp = tmp->b_this_page;
406         } while (tmp != bh);
407         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
408         local_irq_restore(flags);
409
410         /*
411          * If none of the buffers had errors and they are all
412          * uptodate then we can set the page uptodate.
413          */
414         if (page_uptodate && !PageError(page))
415                 SetPageUptodate(page);
416         unlock_page(page);
417         return;
418
419 still_busy:
420         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
421         local_irq_restore(flags);
422         return;
423 }
424
425 /*
426  * Completion handler for block_write_full_page() - pages which are unlocked
427  * during I/O, and which have PageWriteback cleared upon I/O completion.
428  */
429 static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
430 {
431         char b[BDEVNAME_SIZE];
432         unsigned long flags;
433         struct buffer_head *first;
434         struct buffer_head *tmp;
435         struct page *page;
436
437         BUG_ON(!buffer_async_write(bh));
438
439         page = bh->b_page;
440         if (uptodate) {
441                 set_buffer_uptodate(bh);
442         } else {
443                 if (printk_ratelimit()) {
444                         buffer_io_error(bh);
445                         printk(KERN_WARNING "lost page write due to "
446                                         "I/O error on %s\n",
447                                bdevname(bh->b_bdev, b));
448                 }
449                 set_bit(AS_EIO, &page->mapping->flags);
450                 set_buffer_write_io_error(bh);
451                 clear_buffer_uptodate(bh);
452                 SetPageError(page);
453         }
454
455         first = page_buffers(page);
456         local_irq_save(flags);
457         bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
458
459         clear_buffer_async_write(bh);
460         unlock_buffer(bh);
461         tmp = bh->b_this_page;
462         while (tmp != bh) {
463                 if (buffer_async_write(tmp)) {
464                         BUG_ON(!buffer_locked(tmp));
465                         goto still_busy;
466                 }
467                 tmp = tmp->b_this_page;
468         }
469         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
470         local_irq_restore(flags);
471         end_page_writeback(page);
472         return;
473
474 still_busy:
475         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
476         local_irq_restore(flags);
477         return;
478 }
479
480 /*
481  * If a page's buffers are under async readin (end_buffer_async_read
482  * completion) then there is a possibility that another thread of
483  * control could lock one of the buffers after it has completed
484  * but while some of the other buffers have not completed.  This
485  * locked buffer would confuse end_buffer_async_read() into not unlocking
486  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
487  * that this buffer is not under async I/O.
488  *
489  * The page comes unlocked when it has no locked buffer_async buffers
490  * left.
491  *
492  * PageLocked prevents anyone starting new async I/O reads any of
493  * the buffers.
494  *
495  * PageWriteback is used to prevent simultaneous writeout of the same
496  * page.
497  *
498  * PageLocked prevents anyone from starting writeback of a page which is
499  * under read I/O (PageWriteback is only ever set against a locked page).
500  */
501 static void mark_buffer_async_read(struct buffer_head *bh)
502 {
503         bh->b_end_io = end_buffer_async_read;
504         set_buffer_async_read(bh);
505 }
506
507 void mark_buffer_async_write(struct buffer_head *bh)
508 {
509         bh->b_end_io = end_buffer_async_write;
510         set_buffer_async_write(bh);
511 }
512 EXPORT_SYMBOL(mark_buffer_async_write);
513
514
515 /*
516  * fs/buffer.c contains helper functions for buffer-backed address space's
517  * fsync functions.  A common requirement for buffer-based filesystems is
518  * that certain data from the backing blockdev needs to be written out for
519  * a successful fsync().  For example, ext2 indirect blocks need to be
520  * written back and waited upon before fsync() returns.
521  *
522  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
523  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
524  * management of a list of dependent buffers at ->i_mapping->private_list.
525  *
526  * Locking is a little subtle: try_to_free_buffers() will remove buffers
527  * from their controlling inode's queue when they are being freed.  But
528  * try_to_free_buffers() will be operating against the *blockdev* mapping
529  * at the time, not against the S_ISREG file which depends on those buffers.
530  * So the locking for private_list is via the private_lock in the address_space
531  * which backs the buffers.  Which is different from the address_space 
532  * against which the buffers are listed.  So for a particular address_space,
533  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
534  * mapping->private_list will always be protected by the backing blockdev's
535  * ->private_lock.
536  *
537  * Which introduces a requirement: all buffers on an address_space's
538  * ->private_list must be from the same address_space: the blockdev's.
539  *
540  * address_spaces which do not place buffers at ->private_list via these
541  * utility functions are free to use private_lock and private_list for
542  * whatever they want.  The only requirement is that list_empty(private_list)
543  * be true at clear_inode() time.
544  *
545  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
546  * filesystems should do that.  invalidate_inode_buffers() should just go
547  * BUG_ON(!list_empty).
548  *
549  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
550  * take an address_space, not an inode.  And it should be called
551  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
552  * queued up.
553  *
554  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
555  * list if it is already on a list.  Because if the buffer is on a list,
556  * it *must* already be on the right one.  If not, the filesystem is being
557  * silly.  This will save a ton of locking.  But first we have to ensure
558  * that buffers are taken *off* the old inode's list when they are freed
559  * (presumably in truncate).  That requires careful auditing of all
560  * filesystems (do it inside bforget()).  It could also be done by bringing
561  * b_inode back.
562  */
563
564 /*
565  * The buffer's backing address_space's private_lock must be held
566  */
567 static inline void __remove_assoc_queue(struct buffer_head *bh)
568 {
569         list_del_init(&bh->b_assoc_buffers);
570         WARN_ON(!bh->b_assoc_map);
571         if (buffer_write_io_error(bh))
572                 set_bit(AS_EIO, &bh->b_assoc_map->flags);
573         bh->b_assoc_map = NULL;
574 }
575
576 int inode_has_buffers(struct inode *inode)
577 {
578         return !list_empty(&inode->i_data.private_list);
579 }
580
581 /*
582  * osync is designed to support O_SYNC io.  It waits synchronously for
583  * all already-submitted IO to complete, but does not queue any new
584  * writes to the disk.
585  *
586  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
587  * you dirty the buffers, and then use osync_inode_buffers to wait for
588  * completion.  Any other dirty buffers which are not yet queued for
589  * write will not be flushed to disk by the osync.
590  */
591 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
592 {
593         struct buffer_head *bh;
594         struct list_head *p;
595         int err = 0;
596
597         spin_lock(lock);
598 repeat:
599         list_for_each_prev(p, list) {
600                 bh = BH_ENTRY(p);
601                 if (buffer_locked(bh)) {
602                         get_bh(bh);
603                         spin_unlock(lock);
604                         wait_on_buffer(bh);
605                         if (!buffer_uptodate(bh))
606                                 err = -EIO;
607                         brelse(bh);
608                         spin_lock(lock);
609                         goto repeat;
610                 }
611         }
612         spin_unlock(lock);
613         return err;
614 }
615
616 /**
617  * sync_mapping_buffers - write out and wait upon a mapping's "associated"
618  *                        buffers
619  * @mapping: the mapping which wants those buffers written
620  *
621  * Starts I/O against the buffers at mapping->private_list, and waits upon
622  * that I/O.
623  *
624  * Basically, this is a convenience function for fsync().
625  * @mapping is a file or directory which needs those buffers to be written for
626  * a successful fsync().
627  */
628 int sync_mapping_buffers(struct address_space *mapping)
629 {
630         struct address_space *buffer_mapping = mapping->assoc_mapping;
631
632         if (buffer_mapping == NULL || list_empty(&mapping->private_list))
633                 return 0;
634
635         return fsync_buffers_list(&buffer_mapping->private_lock,
636                                         &mapping->private_list);
637 }
638 EXPORT_SYMBOL(sync_mapping_buffers);
639
640 /*
641  * Called when we've recently written block `bblock', and it is known that
642  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
643  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
644  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
645  */
646 void write_boundary_block(struct block_device *bdev,
647                         sector_t bblock, unsigned blocksize)
648 {
649         struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
650         if (bh) {
651                 if (buffer_dirty(bh))
652                         ll_rw_block(WRITE, 1, &bh);
653                 put_bh(bh);
654         }
655 }
656
657 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
658 {
659         struct address_space *mapping = inode->i_mapping;
660         struct address_space *buffer_mapping = bh->b_page->mapping;
661
662         mark_buffer_dirty(bh);
663         if (!mapping->assoc_mapping) {
664                 mapping->assoc_mapping = buffer_mapping;
665         } else {
666                 BUG_ON(mapping->assoc_mapping != buffer_mapping);
667         }
668         if (list_empty(&bh->b_assoc_buffers)) {
669                 spin_lock(&buffer_mapping->private_lock);
670                 list_move_tail(&bh->b_assoc_buffers,
671                                 &mapping->private_list);
672                 bh->b_assoc_map = mapping;
673                 spin_unlock(&buffer_mapping->private_lock);
674         }
675 }
676 EXPORT_SYMBOL(mark_buffer_dirty_inode);
677
678 /*
679  * Add a page to the dirty page list.
680  *
681  * It is a sad fact of life that this function is called from several places
682  * deeply under spinlocking.  It may not sleep.
683  *
684  * If the page has buffers, the uptodate buffers are set dirty, to preserve
685  * dirty-state coherency between the page and the buffers.  It the page does
686  * not have buffers then when they are later attached they will all be set
687  * dirty.
688  *
689  * The buffers are dirtied before the page is dirtied.  There's a small race
690  * window in which a writepage caller may see the page cleanness but not the
691  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
692  * before the buffers, a concurrent writepage caller could clear the page dirty
693  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
694  * page on the dirty page list.
695  *
696  * We use private_lock to lock against try_to_free_buffers while using the
697  * page's buffer list.  Also use this to protect against clean buffers being
698  * added to the page after it was set dirty.
699  *
700  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
701  * address_space though.
702  */
703 int __set_page_dirty_buffers(struct page *page)
704 {
705         struct address_space * const mapping = page_mapping(page);
706
707         if (unlikely(!mapping))
708                 return !TestSetPageDirty(page);
709
710         spin_lock(&mapping->private_lock);
711         if (page_has_buffers(page)) {
712                 struct buffer_head *head = page_buffers(page);
713                 struct buffer_head *bh = head;
714
715                 do {
716                         set_buffer_dirty(bh);
717                         bh = bh->b_this_page;
718                 } while (bh != head);
719         }
720         spin_unlock(&mapping->private_lock);
721
722         if (TestSetPageDirty(page))
723                 return 0;
724
725         write_lock_irq(&mapping->tree_lock);
726         if (page->mapping) {    /* Race with truncate? */
727                 if (mapping_cap_account_dirty(mapping)) {
728                         __inc_zone_page_state(page, NR_FILE_DIRTY);
729                         task_io_account_write(PAGE_CACHE_SIZE);
730                 }
731                 radix_tree_tag_set(&mapping->page_tree,
732                                 page_index(page), PAGECACHE_TAG_DIRTY);
733         }
734         write_unlock_irq(&mapping->tree_lock);
735         __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
736         return 1;
737 }
738 EXPORT_SYMBOL(__set_page_dirty_buffers);
739
740 /*
741  * Write out and wait upon a list of buffers.
742  *
743  * We have conflicting pressures: we want to make sure that all
744  * initially dirty buffers get waited on, but that any subsequently
745  * dirtied buffers don't.  After all, we don't want fsync to last
746  * forever if somebody is actively writing to the file.
747  *
748  * Do this in two main stages: first we copy dirty buffers to a
749  * temporary inode list, queueing the writes as we go.  Then we clean
750  * up, waiting for those writes to complete.
751  * 
752  * During this second stage, any subsequent updates to the file may end
753  * up refiling the buffer on the original inode's dirty list again, so
754  * there is a chance we will end up with a buffer queued for write but
755  * not yet completed on that list.  So, as a final cleanup we go through
756  * the osync code to catch these locked, dirty buffers without requeuing
757  * any newly dirty buffers for write.
758  */
759 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
760 {
761         struct buffer_head *bh;
762         struct list_head tmp;
763         int err = 0, err2;
764
765         INIT_LIST_HEAD(&tmp);
766
767         spin_lock(lock);
768         while (!list_empty(list)) {
769                 bh = BH_ENTRY(list->next);
770                 __remove_assoc_queue(bh);
771                 if (buffer_dirty(bh) || buffer_locked(bh)) {
772                         list_add(&bh->b_assoc_buffers, &tmp);
773                         if (buffer_dirty(bh)) {
774                                 get_bh(bh);
775                                 spin_unlock(lock);
776                                 /*
777                                  * Ensure any pending I/O completes so that
778                                  * ll_rw_block() actually writes the current
779                                  * contents - it is a noop if I/O is still in
780                                  * flight on potentially older contents.
781                                  */
782                                 ll_rw_block(SWRITE, 1, &bh);
783                                 brelse(bh);
784                                 spin_lock(lock);
785                         }
786                 }
787         }
788
789         while (!list_empty(&tmp)) {
790                 bh = BH_ENTRY(tmp.prev);
791                 list_del_init(&bh->b_assoc_buffers);
792                 get_bh(bh);
793                 spin_unlock(lock);
794                 wait_on_buffer(bh);
795                 if (!buffer_uptodate(bh))
796                         err = -EIO;
797                 brelse(bh);
798                 spin_lock(lock);
799         }
800         
801         spin_unlock(lock);
802         err2 = osync_buffers_list(lock, list);
803         if (err)
804                 return err;
805         else
806                 return err2;
807 }
808
809 /*
810  * Invalidate any and all dirty buffers on a given inode.  We are
811  * probably unmounting the fs, but that doesn't mean we have already
812  * done a sync().  Just drop the buffers from the inode list.
813  *
814  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
815  * assumes that all the buffers are against the blockdev.  Not true
816  * for reiserfs.
817  */
818 void invalidate_inode_buffers(struct inode *inode)
819 {
820         if (inode_has_buffers(inode)) {
821                 struct address_space *mapping = &inode->i_data;
822                 struct list_head *list = &mapping->private_list;
823                 struct address_space *buffer_mapping = mapping->assoc_mapping;
824
825                 spin_lock(&buffer_mapping->private_lock);
826                 while (!list_empty(list))
827                         __remove_assoc_queue(BH_ENTRY(list->next));
828                 spin_unlock(&buffer_mapping->private_lock);
829         }
830 }
831
832 /*
833  * Remove any clean buffers from the inode's buffer list.  This is called
834  * when we're trying to free the inode itself.  Those buffers can pin it.
835  *
836  * Returns true if all buffers were removed.
837  */
838 int remove_inode_buffers(struct inode *inode)
839 {
840         int ret = 1;
841
842         if (inode_has_buffers(inode)) {
843                 struct address_space *mapping = &inode->i_data;
844                 struct list_head *list = &mapping->private_list;
845                 struct address_space *buffer_mapping = mapping->assoc_mapping;
846
847                 spin_lock(&buffer_mapping->private_lock);
848                 while (!list_empty(list)) {
849                         struct buffer_head *bh = BH_ENTRY(list->next);
850                         if (buffer_dirty(bh)) {
851                                 ret = 0;
852                                 break;
853                         }
854                         __remove_assoc_queue(bh);
855                 }
856                 spin_unlock(&buffer_mapping->private_lock);
857         }
858         return ret;
859 }
860
861 /*
862  * Create the appropriate buffers when given a page for data area and
863  * the size of each buffer.. Use the bh->b_this_page linked list to
864  * follow the buffers created.  Return NULL if unable to create more
865  * buffers.
866  *
867  * The retry flag is used to differentiate async IO (paging, swapping)
868  * which may not fail from ordinary buffer allocations.
869  */
870 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
871                 int retry)
872 {
873         struct buffer_head *bh, *head;
874         long offset;
875
876 try_again:
877         head = NULL;
878         offset = PAGE_SIZE;
879         while ((offset -= size) >= 0) {
880                 bh = alloc_buffer_head(GFP_NOFS);
881                 if (!bh)
882                         goto no_grow;
883
884                 bh->b_bdev = NULL;
885                 bh->b_this_page = head;
886                 bh->b_blocknr = -1;
887                 head = bh;
888
889                 bh->b_state = 0;
890                 atomic_set(&bh->b_count, 0);
891                 bh->b_private = NULL;
892                 bh->b_size = size;
893
894                 /* Link the buffer to its page */
895                 set_bh_page(bh, page, offset);
896
897                 init_buffer(bh, NULL, NULL);
898         }
899         return head;
900 /*
901  * In case anything failed, we just free everything we got.
902  */
903 no_grow:
904         if (head) {
905                 do {
906                         bh = head;
907                         head = head->b_this_page;
908                         free_buffer_head(bh);
909                 } while (head);
910         }
911
912         /*
913          * Return failure for non-async IO requests.  Async IO requests
914          * are not allowed to fail, so we have to wait until buffer heads
915          * become available.  But we don't want tasks sleeping with 
916          * partially complete buffers, so all were released above.
917          */
918         if (!retry)
919                 return NULL;
920
921         /* We're _really_ low on memory. Now we just
922          * wait for old buffer heads to become free due to
923          * finishing IO.  Since this is an async request and
924          * the reserve list is empty, we're sure there are 
925          * async buffer heads in use.
926          */
927         free_more_memory();
928         goto try_again;
929 }
930 EXPORT_SYMBOL_GPL(alloc_page_buffers);
931
932 static inline void
933 link_dev_buffers(struct page *page, struct buffer_head *head)
934 {
935         struct buffer_head *bh, *tail;
936
937         bh = head;
938         do {
939                 tail = bh;
940                 bh = bh->b_this_page;
941         } while (bh);
942         tail->b_this_page = head;
943         attach_page_buffers(page, head);
944 }
945
946 /*
947  * Initialise the state of a blockdev page's buffers.
948  */ 
949 static void
950 init_page_buffers(struct page *page, struct block_device *bdev,
951                         sector_t block, int size)
952 {
953         struct buffer_head *head = page_buffers(page);
954         struct buffer_head *bh = head;
955         int uptodate = PageUptodate(page);
956
957         do {
958                 if (!buffer_mapped(bh)) {
959                         init_buffer(bh, NULL, NULL);
960                         bh->b_bdev = bdev;
961                         bh->b_blocknr = block;
962                         if (uptodate)
963                                 set_buffer_uptodate(bh);
964                         set_buffer_mapped(bh);
965                 }
966                 block++;
967                 bh = bh->b_this_page;
968         } while (bh != head);
969 }
970
971 /*
972  * Create the page-cache page that contains the requested block.
973  *
974  * This is user purely for blockdev mappings.
975  */
976 static struct page *
977 grow_dev_page(struct block_device *bdev, sector_t block,
978                 pgoff_t index, int size)
979 {
980         struct inode *inode = bdev->bd_inode;
981         struct page *page;
982         struct buffer_head *bh;
983
984         page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
985         if (!page)
986                 return NULL;
987
988         BUG_ON(!PageLocked(page));
989
990         if (page_has_buffers(page)) {
991                 bh = page_buffers(page);
992                 if (bh->b_size == size) {
993                         init_page_buffers(page, bdev, block, size);
994                         return page;
995                 }
996                 if (!try_to_free_buffers(page))
997                         goto failed;
998         }
999
1000         /*
1001          * Allocate some buffers for this page
1002          */
1003         bh = alloc_page_buffers(page, size, 0);
1004         if (!bh)
1005                 goto failed;
1006
1007         /*
1008          * Link the page to the buffers and initialise them.  Take the
1009          * lock to be atomic wrt __find_get_block(), which does not
1010          * run under the page lock.
1011          */
1012         spin_lock(&inode->i_mapping->private_lock);
1013         link_dev_buffers(page, bh);
1014         init_page_buffers(page, bdev, block, size);
1015         spin_unlock(&inode->i_mapping->private_lock);
1016         return page;
1017
1018 failed:
1019         BUG();
1020         unlock_page(page);
1021         page_cache_release(page);
1022         return NULL;
1023 }
1024
1025 /*
1026  * Create buffers for the specified block device block's page.  If
1027  * that page was dirty, the buffers are set dirty also.
1028  *
1029  * Except that's a bug.  Attaching dirty buffers to a dirty
1030  * blockdev's page can result in filesystem corruption, because
1031  * some of those buffers may be aliases of filesystem data.
1032  * grow_dev_page() will go BUG() if this happens.
1033  */
1034 static int
1035 grow_buffers(struct block_device *bdev, sector_t block, int size)
1036 {
1037         struct page *page;
1038         pgoff_t index;
1039         int sizebits;
1040
1041         sizebits = -1;
1042         do {
1043                 sizebits++;
1044         } while ((size << sizebits) < PAGE_SIZE);
1045
1046         index = block >> sizebits;
1047
1048         /*
1049          * Check for a block which wants to lie outside our maximum possible
1050          * pagecache index.  (this comparison is done using sector_t types).
1051          */
1052         if (unlikely(index != block >> sizebits)) {
1053                 char b[BDEVNAME_SIZE];
1054
1055                 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1056                         "device %s\n",
1057                         __FUNCTION__, (unsigned long long)block,
1058                         bdevname(bdev, b));
1059                 return -EIO;
1060         }
1061         block = index << sizebits;
1062         /* Create a page with the proper size buffers.. */
1063         page = grow_dev_page(bdev, block, index, size);
1064         if (!page)
1065                 return 0;
1066         unlock_page(page);
1067         page_cache_release(page);
1068         return 1;
1069 }
1070
1071 static struct buffer_head *
1072 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1073 {
1074         /* Size must be multiple of hard sectorsize */
1075         if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1076                         (size < 512 || size > PAGE_SIZE))) {
1077                 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1078                                         size);
1079                 printk(KERN_ERR "hardsect size: %d\n",
1080                                         bdev_hardsect_size(bdev));
1081
1082                 dump_stack();
1083                 return NULL;
1084         }
1085
1086         for (;;) {
1087                 struct buffer_head * bh;
1088                 int ret;
1089
1090                 bh = __find_get_block(bdev, block, size);
1091                 if (bh)
1092                         return bh;
1093
1094                 ret = grow_buffers(bdev, block, size);
1095                 if (ret < 0)
1096                         return NULL;
1097                 if (ret == 0)
1098                         free_more_memory();
1099         }
1100 }
1101
1102 /*
1103  * The relationship between dirty buffers and dirty pages:
1104  *
1105  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1106  * the page is tagged dirty in its radix tree.
1107  *
1108  * At all times, the dirtiness of the buffers represents the dirtiness of
1109  * subsections of the page.  If the page has buffers, the page dirty bit is
1110  * merely a hint about the true dirty state.
1111  *
1112  * When a page is set dirty in its entirety, all its buffers are marked dirty
1113  * (if the page has buffers).
1114  *
1115  * When a buffer is marked dirty, its page is dirtied, but the page's other
1116  * buffers are not.
1117  *
1118  * Also.  When blockdev buffers are explicitly read with bread(), they
1119  * individually become uptodate.  But their backing page remains not
1120  * uptodate - even if all of its buffers are uptodate.  A subsequent
1121  * block_read_full_page() against that page will discover all the uptodate
1122  * buffers, will set the page uptodate and will perform no I/O.
1123  */
1124
1125 /**
1126  * mark_buffer_dirty - mark a buffer_head as needing writeout
1127  * @bh: the buffer_head to mark dirty
1128  *
1129  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1130  * backing page dirty, then tag the page as dirty in its address_space's radix
1131  * tree and then attach the address_space's inode to its superblock's dirty
1132  * inode list.
1133  *
1134  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
1135  * mapping->tree_lock and the global inode_lock.
1136  */
1137 void fastcall mark_buffer_dirty(struct buffer_head *bh)
1138 {
1139         if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1140                 __set_page_dirty_nobuffers(bh->b_page);
1141 }
1142
1143 /*
1144  * Decrement a buffer_head's reference count.  If all buffers against a page
1145  * have zero reference count, are clean and unlocked, and if the page is clean
1146  * and unlocked then try_to_free_buffers() may strip the buffers from the page
1147  * in preparation for freeing it (sometimes, rarely, buffers are removed from
1148  * a page but it ends up not being freed, and buffers may later be reattached).
1149  */
1150 void __brelse(struct buffer_head * buf)
1151 {
1152         if (atomic_read(&buf->b_count)) {
1153                 put_bh(buf);
1154                 return;
1155         }
1156         printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1157         WARN_ON(1);
1158 }
1159
1160 /*
1161  * bforget() is like brelse(), except it discards any
1162  * potentially dirty data.
1163  */
1164 void __bforget(struct buffer_head *bh)
1165 {
1166         clear_buffer_dirty(bh);
1167         if (!list_empty(&bh->b_assoc_buffers)) {
1168                 struct address_space *buffer_mapping = bh->b_page->mapping;
1169
1170                 spin_lock(&buffer_mapping->private_lock);
1171                 list_del_init(&bh->b_assoc_buffers);
1172                 bh->b_assoc_map = NULL;
1173                 spin_unlock(&buffer_mapping->private_lock);
1174         }
1175         __brelse(bh);
1176 }
1177
1178 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1179 {
1180         lock_buffer(bh);
1181         if (buffer_uptodate(bh)) {
1182                 unlock_buffer(bh);
1183                 return bh;
1184         } else {
1185                 get_bh(bh);
1186                 bh->b_end_io = end_buffer_read_sync;
1187                 submit_bh(READ, bh);
1188                 wait_on_buffer(bh);
1189                 if (buffer_uptodate(bh))
1190                         return bh;
1191         }
1192         brelse(bh);
1193         return NULL;
1194 }
1195
1196 /*
1197  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1198  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1199  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1200  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1201  * CPU's LRUs at the same time.
1202  *
1203  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1204  * sb_find_get_block().
1205  *
1206  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1207  * a local interrupt disable for that.
1208  */
1209
1210 #define BH_LRU_SIZE     8
1211
1212 struct bh_lru {
1213         struct buffer_head *bhs[BH_LRU_SIZE];
1214 };
1215
1216 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1217
1218 #ifdef CONFIG_SMP
1219 #define bh_lru_lock()   local_irq_disable()
1220 #define bh_lru_unlock() local_irq_enable()
1221 #else
1222 #define bh_lru_lock()   preempt_disable()
1223 #define bh_lru_unlock() preempt_enable()
1224 #endif
1225
1226 static inline void check_irqs_on(void)
1227 {
1228 #ifdef irqs_disabled
1229         BUG_ON(irqs_disabled());
1230 #endif
1231 }
1232
1233 /*
1234  * The LRU management algorithm is dopey-but-simple.  Sorry.
1235  */
1236 static void bh_lru_install(struct buffer_head *bh)
1237 {
1238         struct buffer_head *evictee = NULL;
1239         struct bh_lru *lru;
1240
1241         check_irqs_on();
1242         bh_lru_lock();
1243         lru = &__get_cpu_var(bh_lrus);
1244         if (lru->bhs[0] != bh) {
1245                 struct buffer_head *bhs[BH_LRU_SIZE];
1246                 int in;
1247                 int out = 0;
1248
1249                 get_bh(bh);
1250                 bhs[out++] = bh;
1251                 for (in = 0; in < BH_LRU_SIZE; in++) {
1252                         struct buffer_head *bh2 = lru->bhs[in];
1253
1254                         if (bh2 == bh) {
1255                                 __brelse(bh2);
1256                         } else {
1257                                 if (out >= BH_LRU_SIZE) {
1258                                         BUG_ON(evictee != NULL);
1259                                         evictee = bh2;
1260                                 } else {
1261                                         bhs[out++] = bh2;
1262                                 }
1263                         }
1264                 }
1265                 while (out < BH_LRU_SIZE)
1266                         bhs[out++] = NULL;
1267                 memcpy(lru->bhs, bhs, sizeof(bhs));
1268         }
1269         bh_lru_unlock();
1270
1271         if (evictee)
1272                 __brelse(evictee);
1273 }
1274
1275 /*
1276  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1277  */
1278 static struct buffer_head *
1279 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1280 {
1281         struct buffer_head *ret = NULL;
1282         struct bh_lru *lru;
1283         unsigned int i;
1284
1285         check_irqs_on();
1286         bh_lru_lock();
1287         lru = &__get_cpu_var(bh_lrus);
1288         for (i = 0; i < BH_LRU_SIZE; i++) {
1289                 struct buffer_head *bh = lru->bhs[i];
1290
1291                 if (bh && bh->b_bdev == bdev &&
1292                                 bh->b_blocknr == block && bh->b_size == size) {
1293                         if (i) {
1294                                 while (i) {
1295                                         lru->bhs[i] = lru->bhs[i - 1];
1296                                         i--;
1297                                 }
1298                                 lru->bhs[0] = bh;
1299                         }
1300                         get_bh(bh);
1301                         ret = bh;
1302                         break;
1303                 }
1304         }
1305         bh_lru_unlock();
1306         return ret;
1307 }
1308
1309 /*
1310  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1311  * it in the LRU and mark it as accessed.  If it is not present then return
1312  * NULL
1313  */
1314 struct buffer_head *
1315 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1316 {
1317         struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1318
1319         if (bh == NULL) {
1320                 bh = __find_get_block_slow(bdev, block);
1321                 if (bh)
1322                         bh_lru_install(bh);
1323         }
1324         if (bh)
1325                 touch_buffer(bh);
1326         return bh;
1327 }
1328 EXPORT_SYMBOL(__find_get_block);
1329
1330 /*
1331  * __getblk will locate (and, if necessary, create) the buffer_head
1332  * which corresponds to the passed block_device, block and size. The
1333  * returned buffer has its reference count incremented.
1334  *
1335  * __getblk() cannot fail - it just keeps trying.  If you pass it an
1336  * illegal block number, __getblk() will happily return a buffer_head
1337  * which represents the non-existent block.  Very weird.
1338  *
1339  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1340  * attempt is failing.  FIXME, perhaps?
1341  */
1342 struct buffer_head *
1343 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1344 {
1345         struct buffer_head *bh = __find_get_block(bdev, block, size);
1346
1347         might_sleep();
1348         if (bh == NULL)
1349                 bh = __getblk_slow(bdev, block, size);
1350         return bh;
1351 }
1352 EXPORT_SYMBOL(__getblk);
1353
1354 /*
1355  * Do async read-ahead on a buffer..
1356  */
1357 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1358 {
1359         struct buffer_head *bh = __getblk(bdev, block, size);
1360         if (likely(bh)) {
1361                 ll_rw_block(READA, 1, &bh);
1362                 brelse(bh);
1363         }
1364 }
1365 EXPORT_SYMBOL(__breadahead);
1366
1367 /**
1368  *  __bread() - reads a specified block and returns the bh
1369  *  @bdev: the block_device to read from
1370  *  @block: number of block
1371  *  @size: size (in bytes) to read
1372  * 
1373  *  Reads a specified block, and returns buffer head that contains it.
1374  *  It returns NULL if the block was unreadable.
1375  */
1376 struct buffer_head *
1377 __bread(struct block_device *bdev, sector_t block, unsigned size)
1378 {
1379         struct buffer_head *bh = __getblk(bdev, block, size);
1380
1381         if (likely(bh) && !buffer_uptodate(bh))
1382                 bh = __bread_slow(bh);
1383         return bh;
1384 }
1385 EXPORT_SYMBOL(__bread);
1386
1387 /*
1388  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1389  * This doesn't race because it runs in each cpu either in irq
1390  * or with preempt disabled.
1391  */
1392 static void invalidate_bh_lru(void *arg)
1393 {
1394         struct bh_lru *b = &get_cpu_var(bh_lrus);
1395         int i;
1396
1397         for (i = 0; i < BH_LRU_SIZE; i++) {
1398                 brelse(b->bhs[i]);
1399                 b->bhs[i] = NULL;
1400         }
1401         put_cpu_var(bh_lrus);
1402 }
1403         
1404 void invalidate_bh_lrus(void)
1405 {
1406         on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1407 }
1408
1409 void set_bh_page(struct buffer_head *bh,
1410                 struct page *page, unsigned long offset)
1411 {
1412         bh->b_page = page;
1413         BUG_ON(offset >= PAGE_SIZE);
1414         if (PageHighMem(page))
1415                 /*
1416                  * This catches illegal uses and preserves the offset:
1417                  */
1418                 bh->b_data = (char *)(0 + offset);
1419         else
1420                 bh->b_data = page_address(page) + offset;
1421 }
1422 EXPORT_SYMBOL(set_bh_page);
1423
1424 /*
1425  * Called when truncating a buffer on a page completely.
1426  */
1427 static void discard_buffer(struct buffer_head * bh)
1428 {
1429         lock_buffer(bh);
1430         clear_buffer_dirty(bh);
1431         bh->b_bdev = NULL;
1432         clear_buffer_mapped(bh);
1433         clear_buffer_req(bh);
1434         clear_buffer_new(bh);
1435         clear_buffer_delay(bh);
1436         clear_buffer_unwritten(bh);
1437         unlock_buffer(bh);
1438 }
1439
1440 /**
1441  * block_invalidatepage - invalidate part of all of a buffer-backed page
1442  *
1443  * @page: the page which is affected
1444  * @offset: the index of the truncation point
1445  *
1446  * block_invalidatepage() is called when all or part of the page has become
1447  * invalidatedby a truncate operation.
1448  *
1449  * block_invalidatepage() does not have to release all buffers, but it must
1450  * ensure that no dirty buffer is left outside @offset and that no I/O
1451  * is underway against any of the blocks which are outside the truncation
1452  * point.  Because the caller is about to free (and possibly reuse) those
1453  * blocks on-disk.
1454  */
1455 void block_invalidatepage(struct page *page, unsigned long offset)
1456 {
1457         struct buffer_head *head, *bh, *next;
1458         unsigned int curr_off = 0;
1459
1460         BUG_ON(!PageLocked(page));
1461         if (!page_has_buffers(page))
1462                 goto out;
1463
1464         head = page_buffers(page);
1465         bh = head;
1466         do {
1467                 unsigned int next_off = curr_off + bh->b_size;
1468                 next = bh->b_this_page;
1469
1470                 /*
1471                  * is this block fully invalidated?
1472                  */
1473                 if (offset <= curr_off)
1474                         discard_buffer(bh);
1475                 curr_off = next_off;
1476                 bh = next;
1477         } while (bh != head);
1478
1479         /*
1480          * We release buffers only if the entire page is being invalidated.
1481          * The get_block cached value has been unconditionally invalidated,
1482          * so real IO is not possible anymore.
1483          */
1484         if (offset == 0)
1485                 try_to_release_page(page, 0);
1486 out:
1487         return;
1488 }
1489 EXPORT_SYMBOL(block_invalidatepage);
1490
1491 /*
1492  * We attach and possibly dirty the buffers atomically wrt
1493  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
1494  * is already excluded via the page lock.
1495  */
1496 void create_empty_buffers(struct page *page,
1497                         unsigned long blocksize, unsigned long b_state)
1498 {
1499         struct buffer_head *bh, *head, *tail;
1500
1501         head = alloc_page_buffers(page, blocksize, 1);
1502         bh = head;
1503         do {
1504                 bh->b_state |= b_state;
1505                 tail = bh;
1506                 bh = bh->b_this_page;
1507         } while (bh);
1508         tail->b_this_page = head;
1509
1510         spin_lock(&page->mapping->private_lock);
1511         if (PageUptodate(page) || PageDirty(page)) {
1512                 bh = head;
1513                 do {
1514                         if (PageDirty(page))
1515                                 set_buffer_dirty(bh);
1516                         if (PageUptodate(page))
1517                                 set_buffer_uptodate(bh);
1518                         bh = bh->b_this_page;
1519                 } while (bh != head);
1520         }
1521         attach_page_buffers(page, head);
1522         spin_unlock(&page->mapping->private_lock);
1523 }
1524 EXPORT_SYMBOL(create_empty_buffers);
1525
1526 /*
1527  * We are taking a block for data and we don't want any output from any
1528  * buffer-cache aliases starting from return from that function and
1529  * until the moment when something will explicitly mark the buffer
1530  * dirty (hopefully that will not happen until we will free that block ;-)
1531  * We don't even need to mark it not-uptodate - nobody can expect
1532  * anything from a newly allocated buffer anyway. We used to used
1533  * unmap_buffer() for such invalidation, but that was wrong. We definitely
1534  * don't want to mark the alias unmapped, for example - it would confuse
1535  * anyone who might pick it with bread() afterwards...
1536  *
1537  * Also..  Note that bforget() doesn't lock the buffer.  So there can
1538  * be writeout I/O going on against recently-freed buffers.  We don't
1539  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1540  * only if we really need to.  That happens here.
1541  */
1542 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1543 {
1544         struct buffer_head *old_bh;
1545
1546         might_sleep();
1547
1548         old_bh = __find_get_block_slow(bdev, block);
1549         if (old_bh) {
1550                 clear_buffer_dirty(old_bh);
1551                 wait_on_buffer(old_bh);
1552                 clear_buffer_req(old_bh);
1553                 __brelse(old_bh);
1554         }
1555 }
1556 EXPORT_SYMBOL(unmap_underlying_metadata);
1557
1558 /*
1559  * NOTE! All mapped/uptodate combinations are valid:
1560  *
1561  *      Mapped  Uptodate        Meaning
1562  *
1563  *      No      No              "unknown" - must do get_block()
1564  *      No      Yes             "hole" - zero-filled
1565  *      Yes     No              "allocated" - allocated on disk, not read in
1566  *      Yes     Yes             "valid" - allocated and up-to-date in memory.
1567  *
1568  * "Dirty" is valid only with the last case (mapped+uptodate).
1569  */
1570
1571 /*
1572  * While block_write_full_page is writing back the dirty buffers under
1573  * the page lock, whoever dirtied the buffers may decide to clean them
1574  * again at any time.  We handle that by only looking at the buffer
1575  * state inside lock_buffer().
1576  *
1577  * If block_write_full_page() is called for regular writeback
1578  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1579  * locked buffer.   This only can happen if someone has written the buffer
1580  * directly, with submit_bh().  At the address_space level PageWriteback
1581  * prevents this contention from occurring.
1582  */
1583 static int __block_write_full_page(struct inode *inode, struct page *page,
1584                         get_block_t *get_block, struct writeback_control *wbc)
1585 {
1586         int err;
1587         sector_t block;
1588         sector_t last_block;
1589         struct buffer_head *bh, *head;
1590         const unsigned blocksize = 1 << inode->i_blkbits;
1591         int nr_underway = 0;
1592
1593         BUG_ON(!PageLocked(page));
1594
1595         last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1596
1597         if (!page_has_buffers(page)) {
1598                 create_empty_buffers(page, blocksize,
1599                                         (1 << BH_Dirty)|(1 << BH_Uptodate));
1600         }
1601
1602         /*
1603          * Be very careful.  We have no exclusion from __set_page_dirty_buffers
1604          * here, and the (potentially unmapped) buffers may become dirty at
1605          * any time.  If a buffer becomes dirty here after we've inspected it
1606          * then we just miss that fact, and the page stays dirty.
1607          *
1608          * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1609          * handle that here by just cleaning them.
1610          */
1611
1612         block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1613         head = page_buffers(page);
1614         bh = head;
1615
1616         /*
1617          * Get all the dirty buffers mapped to disk addresses and
1618          * handle any aliases from the underlying blockdev's mapping.
1619          */
1620         do {
1621                 if (block > last_block) {
1622                         /*
1623                          * mapped buffers outside i_size will occur, because
1624                          * this page can be outside i_size when there is a
1625                          * truncate in progress.
1626                          */
1627                         /*
1628                          * The buffer was zeroed by block_write_full_page()
1629                          */
1630                         clear_buffer_dirty(bh);
1631                         set_buffer_uptodate(bh);
1632                 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1633                         WARN_ON(bh->b_size != blocksize);
1634                         err = get_block(inode, block, bh, 1);
1635                         if (err)
1636                                 goto recover;
1637                         if (buffer_new(bh)) {
1638                                 /* blockdev mappings never come here */
1639                                 clear_buffer_new(bh);
1640                                 unmap_underlying_metadata(bh->b_bdev,
1641                                                         bh->b_blocknr);
1642                         }
1643                 }
1644                 bh = bh->b_this_page;
1645                 block++;
1646         } while (bh != head);
1647
1648         do {
1649                 if (!buffer_mapped(bh))
1650                         continue;
1651                 /*
1652                  * If it's a fully non-blocking write attempt and we cannot
1653                  * lock the buffer then redirty the page.  Note that this can
1654                  * potentially cause a busy-wait loop from pdflush and kswapd
1655                  * activity, but those code paths have their own higher-level
1656                  * throttling.
1657                  */
1658                 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1659                         lock_buffer(bh);
1660                 } else if (test_set_buffer_locked(bh)) {
1661                         redirty_page_for_writepage(wbc, page);
1662                         continue;
1663                 }
1664                 if (test_clear_buffer_dirty(bh)) {
1665                         mark_buffer_async_write(bh);
1666                 } else {
1667                         unlock_buffer(bh);
1668                 }
1669         } while ((bh = bh->b_this_page) != head);
1670
1671         /*
1672          * The page and its buffers are protected by PageWriteback(), so we can
1673          * drop the bh refcounts early.
1674          */
1675         BUG_ON(PageWriteback(page));
1676         set_page_writeback(page);
1677
1678         do {
1679                 struct buffer_head *next = bh->b_this_page;
1680                 if (buffer_async_write(bh)) {
1681                         submit_bh(WRITE, bh);
1682                         nr_underway++;
1683                 }
1684                 bh = next;
1685         } while (bh != head);
1686         unlock_page(page);
1687
1688         err = 0;
1689 done:
1690         if (nr_underway == 0) {
1691                 /*
1692                  * The page was marked dirty, but the buffers were
1693                  * clean.  Someone wrote them back by hand with
1694                  * ll_rw_block/submit_bh.  A rare case.
1695                  */
1696                 end_page_writeback(page);
1697
1698                 /*
1699                  * The page and buffer_heads can be released at any time from
1700                  * here on.
1701                  */
1702                 wbc->pages_skipped++;   /* We didn't write this page */
1703         }
1704         return err;
1705
1706 recover:
1707         /*
1708          * ENOSPC, or some other error.  We may already have added some
1709          * blocks to the file, so we need to write these out to avoid
1710          * exposing stale data.
1711          * The page is currently locked and not marked for writeback
1712          */
1713         bh = head;
1714         /* Recovery: lock and submit the mapped buffers */
1715         do {
1716                 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1717                         lock_buffer(bh);
1718                         mark_buffer_async_write(bh);
1719                 } else {
1720                         /*
1721                          * The buffer may have been set dirty during
1722                          * attachment to a dirty page.
1723                          */
1724                         clear_buffer_dirty(bh);
1725                 }
1726         } while ((bh = bh->b_this_page) != head);
1727         SetPageError(page);
1728         BUG_ON(PageWriteback(page));
1729         mapping_set_error(page->mapping, err);
1730         set_page_writeback(page);
1731         do {
1732                 struct buffer_head *next = bh->b_this_page;
1733                 if (buffer_async_write(bh)) {
1734                         clear_buffer_dirty(bh);
1735                         submit_bh(WRITE, bh);
1736                         nr_underway++;
1737                 }
1738                 bh = next;
1739         } while (bh != head);
1740         unlock_page(page);
1741         goto done;
1742 }
1743
1744 static int __block_prepare_write(struct inode *inode, struct page *page,
1745                 unsigned from, unsigned to, get_block_t *get_block)
1746 {
1747         unsigned block_start, block_end;
1748         sector_t block;
1749         int err = 0;
1750         unsigned blocksize, bbits;
1751         struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1752
1753         BUG_ON(!PageLocked(page));
1754         BUG_ON(from > PAGE_CACHE_SIZE);
1755         BUG_ON(to > PAGE_CACHE_SIZE);
1756         BUG_ON(from > to);
1757
1758         blocksize = 1 << inode->i_blkbits;
1759         if (!page_has_buffers(page))
1760                 create_empty_buffers(page, blocksize, 0);
1761         head = page_buffers(page);
1762
1763         bbits = inode->i_blkbits;
1764         block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1765
1766         for(bh = head, block_start = 0; bh != head || !block_start;
1767             block++, block_start=block_end, bh = bh->b_this_page) {
1768                 block_end = block_start + blocksize;
1769                 if (block_end <= from || block_start >= to) {
1770                         if (PageUptodate(page)) {
1771                                 if (!buffer_uptodate(bh))
1772                                         set_buffer_uptodate(bh);
1773                         }
1774                         continue;
1775                 }
1776                 if (buffer_new(bh))
1777                         clear_buffer_new(bh);
1778                 if (!buffer_mapped(bh)) {
1779                         WARN_ON(bh->b_size != blocksize);
1780                         err = get_block(inode, block, bh, 1);
1781                         if (err)
1782                                 break;
1783                         if (buffer_new(bh)) {
1784                                 unmap_underlying_metadata(bh->b_bdev,
1785                                                         bh->b_blocknr);
1786                                 if (PageUptodate(page)) {
1787                                         set_buffer_uptodate(bh);
1788                                         continue;
1789                                 }
1790                                 if (block_end > to || block_start < from) {
1791                                         void *kaddr;
1792
1793                                         kaddr = kmap_atomic(page, KM_USER0);
1794                                         if (block_end > to)
1795                                                 memset(kaddr+to, 0,
1796                                                         block_end-to);
1797                                         if (block_start < from)
1798                                                 memset(kaddr+block_start,
1799                                                         0, from-block_start);
1800                                         flush_dcache_page(page);
1801                                         kunmap_atomic(kaddr, KM_USER0);
1802                                 }
1803                                 continue;
1804                         }
1805                 }
1806                 if (PageUptodate(page)) {
1807                         if (!buffer_uptodate(bh))
1808                                 set_buffer_uptodate(bh);
1809                         continue; 
1810                 }
1811                 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1812                     !buffer_unwritten(bh) &&
1813                      (block_start < from || block_end > to)) {
1814                         ll_rw_block(READ, 1, &bh);
1815                         *wait_bh++=bh;
1816                 }
1817         }
1818         /*
1819          * If we issued read requests - let them complete.
1820          */
1821         while(wait_bh > wait) {
1822                 wait_on_buffer(*--wait_bh);
1823                 if (!buffer_uptodate(*wait_bh))
1824                         err = -EIO;
1825         }
1826         if (!err) {
1827                 bh = head;
1828                 do {
1829                         if (buffer_new(bh))
1830                                 clear_buffer_new(bh);
1831                 } while ((bh = bh->b_this_page) != head);
1832                 return 0;
1833         }
1834         /* Error case: */
1835         /*
1836          * Zero out any newly allocated blocks to avoid exposing stale
1837          * data.  If BH_New is set, we know that the block was newly
1838          * allocated in the above loop.
1839          */
1840         bh = head;
1841         block_start = 0;
1842         do {
1843                 block_end = block_start+blocksize;
1844                 if (block_end <= from)
1845                         goto next_bh;
1846                 if (block_start >= to)
1847                         break;
1848                 if (buffer_new(bh)) {
1849                         void *kaddr;
1850
1851                         clear_buffer_new(bh);
1852                         kaddr = kmap_atomic(page, KM_USER0);
1853                         memset(kaddr+block_start, 0, bh->b_size);
1854                         flush_dcache_page(page);
1855                         kunmap_atomic(kaddr, KM_USER0);
1856                         set_buffer_uptodate(bh);
1857                         mark_buffer_dirty(bh);
1858                 }
1859 next_bh:
1860                 block_start = block_end;
1861                 bh = bh->b_this_page;
1862         } while (bh != head);
1863         return err;
1864 }
1865
1866 static int __block_commit_write(struct inode *inode, struct page *page,
1867                 unsigned from, unsigned to)
1868 {
1869         unsigned block_start, block_end;
1870         int partial = 0;
1871         unsigned blocksize;
1872         struct buffer_head *bh, *head;
1873
1874         blocksize = 1 << inode->i_blkbits;
1875
1876         for(bh = head = page_buffers(page), block_start = 0;
1877             bh != head || !block_start;
1878             block_start=block_end, bh = bh->b_this_page) {
1879                 block_end = block_start + blocksize;
1880                 if (block_end <= from || block_start >= to) {
1881                         if (!buffer_uptodate(bh))
1882                                 partial = 1;
1883                 } else {
1884                         set_buffer_uptodate(bh);
1885                         mark_buffer_dirty(bh);
1886                 }
1887         }
1888
1889         /*
1890          * If this is a partial write which happened to make all buffers
1891          * uptodate then we can optimize away a bogus readpage() for
1892          * the next read(). Here we 'discover' whether the page went
1893          * uptodate as a result of this (potentially partial) write.
1894          */
1895         if (!partial)
1896                 SetPageUptodate(page);
1897         return 0;
1898 }
1899
1900 /*
1901  * Generic "read page" function for block devices that have the normal
1902  * get_block functionality. This is most of the block device filesystems.
1903  * Reads the page asynchronously --- the unlock_buffer() and
1904  * set/clear_buffer_uptodate() functions propagate buffer state into the
1905  * page struct once IO has completed.
1906  */
1907 int block_read_full_page(struct page *page, get_block_t *get_block)
1908 {
1909         struct inode *inode = page->mapping->host;
1910         sector_t iblock, lblock;
1911         struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
1912         unsigned int blocksize;
1913         int nr, i;
1914         int fully_mapped = 1;
1915
1916         BUG_ON(!PageLocked(page));
1917         blocksize = 1 << inode->i_blkbits;
1918         if (!page_has_buffers(page))
1919                 create_empty_buffers(page, blocksize, 0);
1920         head = page_buffers(page);
1921
1922         iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1923         lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
1924         bh = head;
1925         nr = 0;
1926         i = 0;
1927
1928         do {
1929                 if (buffer_uptodate(bh))
1930                         continue;
1931
1932                 if (!buffer_mapped(bh)) {
1933                         int err = 0;
1934
1935                         fully_mapped = 0;
1936                         if (iblock < lblock) {
1937                                 WARN_ON(bh->b_size != blocksize);
1938                                 err = get_block(inode, iblock, bh, 0);
1939                                 if (err)
1940                                         SetPageError(page);
1941                         }
1942                         if (!buffer_mapped(bh)) {
1943                                 void *kaddr = kmap_atomic(page, KM_USER0);
1944                                 memset(kaddr + i * blocksize, 0, blocksize);
1945                                 flush_dcache_page(page);
1946                                 kunmap_atomic(kaddr, KM_USER0);
1947                                 if (!err)
1948                                         set_buffer_uptodate(bh);
1949                                 continue;
1950                         }
1951                         /*
1952                          * get_block() might have updated the buffer
1953                          * synchronously
1954                          */
1955                         if (buffer_uptodate(bh))
1956                                 continue;
1957                 }
1958                 arr[nr++] = bh;
1959         } while (i++, iblock++, (bh = bh->b_this_page) != head);
1960
1961         if (fully_mapped)
1962                 SetPageMappedToDisk(page);
1963
1964         if (!nr) {
1965                 /*
1966                  * All buffers are uptodate - we can set the page uptodate
1967                  * as well. But not if get_block() returned an error.
1968                  */
1969                 if (!PageError(page))
1970                         SetPageUptodate(page);
1971                 unlock_page(page);
1972                 return 0;
1973         }
1974
1975         /* Stage two: lock the buffers */
1976         for (i = 0; i < nr; i++) {
1977                 bh = arr[i];
1978                 lock_buffer(bh);
1979                 mark_buffer_async_read(bh);
1980         }
1981
1982         /*
1983          * Stage 3: start the IO.  Check for uptodateness
1984          * inside the buffer lock in case another process reading
1985          * the underlying blockdev brought it uptodate (the sct fix).
1986          */
1987         for (i = 0; i < nr; i++) {
1988                 bh = arr[i];
1989                 if (buffer_uptodate(bh))
1990                         end_buffer_async_read(bh, 1);
1991                 else
1992                         submit_bh(READ, bh);
1993         }
1994         return 0;
1995 }
1996
1997 /* utility function for filesystems that need to do work on expanding
1998  * truncates.  Uses prepare/commit_write to allow the filesystem to
1999  * deal with the hole.  
2000  */
2001 static int __generic_cont_expand(struct inode *inode, loff_t size,
2002                                  pgoff_t index, unsigned int offset)
2003 {
2004         struct address_space *mapping = inode->i_mapping;
2005         struct page *page;
2006         unsigned long limit;
2007         int err;
2008
2009         err = -EFBIG;
2010         limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2011         if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2012                 send_sig(SIGXFSZ, current, 0);
2013                 goto out;
2014         }
2015         if (size > inode->i_sb->s_maxbytes)
2016                 goto out;
2017
2018         err = -ENOMEM;
2019         page = grab_cache_page(mapping, index);
2020         if (!page)
2021                 goto out;
2022         err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
2023         if (err) {
2024                 /*
2025                  * ->prepare_write() may have instantiated a few blocks
2026                  * outside i_size.  Trim these off again.
2027                  */
2028                 unlock_page(page);
2029                 page_cache_release(page);
2030                 vmtruncate(inode, inode->i_size);
2031                 goto out;
2032         }
2033
2034         err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2035
2036         unlock_page(page);
2037         page_cache_release(page);
2038         if (err > 0)
2039                 err = 0;
2040 out:
2041         return err;
2042 }
2043
2044 int generic_cont_expand(struct inode *inode, loff_t size)
2045 {
2046         pgoff_t index;
2047         unsigned int offset;
2048
2049         offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */
2050
2051         /* ugh.  in prepare/commit_write, if from==to==start of block, we
2052         ** skip the prepare.  make sure we never send an offset for the start
2053         ** of a block
2054         */
2055         if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2056                 /* caller must handle this extra byte. */
2057                 offset++;
2058         }
2059         index = size >> PAGE_CACHE_SHIFT;
2060
2061         return __generic_cont_expand(inode, size, index, offset);
2062 }
2063
2064 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2065 {
2066         loff_t pos = size - 1;
2067         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2068         unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1;
2069
2070         /* prepare/commit_write can handle even if from==to==start of block. */
2071         return __generic_cont_expand(inode, size, index, offset);
2072 }
2073
2074 /*
2075  * For moronic filesystems that do not allow holes in file.
2076  * We may have to extend the file.
2077  */
2078
2079 int cont_prepare_write(struct page *page, unsigned offset,
2080                 unsigned to, get_block_t *get_block, loff_t *bytes)
2081 {
2082         struct address_space *mapping = page->mapping;
2083         struct inode *inode = mapping->host;
2084         struct page *new_page;
2085         pgoff_t pgpos;
2086         long status;
2087         unsigned zerofrom;
2088         unsigned blocksize = 1 << inode->i_blkbits;
2089         void *kaddr;
2090
2091         while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2092                 status = -ENOMEM;
2093                 new_page = grab_cache_page(mapping, pgpos);
2094                 if (!new_page)
2095                         goto out;
2096                 /* we might sleep */
2097                 if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
2098                         unlock_page(new_page);
2099                         page_cache_release(new_page);
2100                         continue;
2101                 }
2102                 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2103                 if (zerofrom & (blocksize-1)) {
2104                         *bytes |= (blocksize-1);
2105                         (*bytes)++;
2106                 }
2107                 status = __block_prepare_write(inode, new_page, zerofrom,
2108                                                 PAGE_CACHE_SIZE, get_block);
2109                 if (status)
2110                         goto out_unmap;
2111                 kaddr = kmap_atomic(new_page, KM_USER0);
2112                 memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
2113                 flush_dcache_page(new_page);
2114                 kunmap_atomic(kaddr, KM_USER0);
2115                 generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
2116                 unlock_page(new_page);
2117                 page_cache_release(new_page);
2118         }
2119
2120         if (page->index < pgpos) {
2121                 /* completely inside the area */
2122                 zerofrom = offset;
2123         } else {
2124                 /* page covers the boundary, find the boundary offset */
2125                 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2126
2127                 /* if we will expand the thing last block will be filled */
2128                 if (to > zerofrom && (zerofrom & (blocksize-1))) {
2129                         *bytes |= (blocksize-1);
2130                         (*bytes)++;
2131                 }
2132
2133                 /* starting below the boundary? Nothing to zero out */
2134                 if (offset <= zerofrom)
2135                         zerofrom = offset;
2136         }
2137         status = __block_prepare_write(inode, page, zerofrom, to, get_block);
2138         if (status)
2139                 goto out1;
2140         if (zerofrom < offset) {
2141                 kaddr = kmap_atomic(page, KM_USER0);
2142                 memset(kaddr+zerofrom, 0, offset-zerofrom);
2143                 flush_dcache_page(page);
2144                 kunmap_atomic(kaddr, KM_USER0);
2145                 __block_commit_write(inode, page, zerofrom, offset);
2146         }
2147         return 0;
2148 out1:
2149         ClearPageUptodate(page);
2150         return status;
2151
2152 out_unmap:
2153         ClearPageUptodate(new_page);
2154         unlock_page(new_page);
2155         page_cache_release(new_page);
2156 out:
2157         return status;
2158 }
2159
2160 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2161                         get_block_t *get_block)
2162 {
2163         struct inode *inode = page->mapping->host;
2164         int err = __block_prepare_write(inode, page, from, to, get_block);
2165         if (err)
2166                 ClearPageUptodate(page);
2167         return err;
2168 }
2169
2170 int block_commit_write(struct page *page, unsigned from, unsigned to)
2171 {
2172         struct inode *inode = page->mapping->host;
2173         __block_commit_write(inode,page,from,to);
2174         return 0;
2175 }
2176
2177 int generic_commit_write(struct file *file, struct page *page,
2178                 unsigned from, unsigned to)
2179 {
2180         struct inode *inode = page->mapping->host;
2181         loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2182         __block_commit_write(inode,page,from,to);
2183         /*
2184          * No need to use i_size_read() here, the i_size
2185          * cannot change under us because we hold i_mutex.
2186          */
2187         if (pos > inode->i_size) {
2188                 i_size_write(inode, pos);
2189                 mark_inode_dirty(inode);
2190         }
2191         return 0;
2192 }
2193
2194
2195 /*
2196  * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
2197  * immediately, while under the page lock.  So it needs a special end_io
2198  * handler which does not touch the bh after unlocking it.
2199  *
2200  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
2201  * a race there is benign: unlock_buffer() only use the bh's address for
2202  * hashing after unlocking the buffer, so it doesn't actually touch the bh
2203  * itself.
2204  */
2205 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2206 {
2207         if (uptodate) {
2208                 set_buffer_uptodate(bh);
2209         } else {
2210                 /* This happens, due to failed READA attempts. */
2211                 clear_buffer_uptodate(bh);
2212         }
2213         unlock_buffer(bh);
2214 }
2215
2216 /*
2217  * On entry, the page is fully not uptodate.
2218  * On exit the page is fully uptodate in the areas outside (from,to)
2219  */
2220 int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2221                         get_block_t *get_block)
2222 {
2223         struct inode *inode = page->mapping->host;
2224         const unsigned blkbits = inode->i_blkbits;
2225         const unsigned blocksize = 1 << blkbits;
2226         struct buffer_head map_bh;
2227         struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
2228         unsigned block_in_page;
2229         unsigned block_start;
2230         sector_t block_in_file;
2231         char *kaddr;
2232         int nr_reads = 0;
2233         int i;
2234         int ret = 0;
2235         int is_mapped_to_disk = 1;
2236
2237         if (PageMappedToDisk(page))
2238                 return 0;
2239
2240         block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2241         map_bh.b_page = page;
2242
2243         /*
2244          * We loop across all blocks in the page, whether or not they are
2245          * part of the affected region.  This is so we can discover if the
2246          * page is fully mapped-to-disk.
2247          */
2248         for (block_start = 0, block_in_page = 0;
2249                   block_start < PAGE_CACHE_SIZE;
2250                   block_in_page++, block_start += blocksize) {
2251                 unsigned block_end = block_start + blocksize;
2252                 int create;
2253
2254                 map_bh.b_state = 0;
2255                 create = 1;
2256                 if (block_start >= to)
2257                         create = 0;
2258                 map_bh.b_size = blocksize;
2259                 ret = get_block(inode, block_in_file + block_in_page,
2260                                         &map_bh, create);
2261                 if (ret)
2262                         goto failed;
2263                 if (!buffer_mapped(&map_bh))
2264                         is_mapped_to_disk = 0;
2265                 if (buffer_new(&map_bh))
2266                         unmap_underlying_metadata(map_bh.b_bdev,
2267                                                         map_bh.b_blocknr);
2268                 if (PageUptodate(page))
2269                         continue;
2270                 if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
2271                         kaddr = kmap_atomic(page, KM_USER0);
2272                         if (block_start < from)
2273                                 memset(kaddr+block_start, 0, from-block_start);
2274                         if (block_end > to)
2275                                 memset(kaddr + to, 0, block_end - to);
2276                         flush_dcache_page(page);
2277                         kunmap_atomic(kaddr, KM_USER0);
2278                         continue;
2279                 }
2280                 if (buffer_uptodate(&map_bh))
2281                         continue;       /* reiserfs does this */
2282                 if (block_start < from || block_end > to) {
2283                         struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
2284
2285                         if (!bh) {
2286                                 ret = -ENOMEM;
2287                                 goto failed;
2288                         }
2289                         bh->b_state = map_bh.b_state;
2290                         atomic_set(&bh->b_count, 0);
2291                         bh->b_this_page = NULL;
2292                         bh->b_page = page;
2293                         bh->b_blocknr = map_bh.b_blocknr;
2294                         bh->b_size = blocksize;
2295                         bh->b_data = (char *)(long)block_start;
2296                         bh->b_bdev = map_bh.b_bdev;
2297                         bh->b_private = NULL;
2298                         read_bh[nr_reads++] = bh;
2299                 }
2300         }
2301
2302         if (nr_reads) {
2303                 struct buffer_head *bh;
2304
2305                 /*
2306                  * The page is locked, so these buffers are protected from
2307                  * any VM or truncate activity.  Hence we don't need to care
2308                  * for the buffer_head refcounts.
2309                  */
2310                 for (i = 0; i < nr_reads; i++) {
2311                         bh = read_bh[i];
2312                         lock_buffer(bh);
2313                         bh->b_end_io = end_buffer_read_nobh;
2314                         submit_bh(READ, bh);
2315                 }
2316                 for (i = 0; i < nr_reads; i++) {
2317                         bh = read_bh[i];
2318                         wait_on_buffer(bh);
2319                         if (!buffer_uptodate(bh))
2320                                 ret = -EIO;
2321                         free_buffer_head(bh);
2322                         read_bh[i] = NULL;
2323                 }
2324                 if (ret)
2325                         goto failed;
2326         }
2327
2328         if (is_mapped_to_disk)
2329                 SetPageMappedToDisk(page);
2330
2331         return 0;
2332
2333 failed:
2334         for (i = 0; i < nr_reads; i++) {
2335                 if (read_bh[i])
2336                         free_buffer_head(read_bh[i]);
2337         }
2338
2339         /*
2340          * Error recovery is pretty slack.  Clear the page and mark it dirty
2341          * so we'll later zero out any blocks which _were_ allocated.
2342          */
2343         kaddr = kmap_atomic(page, KM_USER0);
2344         memset(kaddr, 0, PAGE_CACHE_SIZE);
2345         flush_dcache_page(page);
2346         kunmap_atomic(kaddr, KM_USER0);
2347         SetPageUptodate(page);
2348         set_page_dirty(page);
2349         return ret;
2350 }
2351 EXPORT_SYMBOL(nobh_prepare_write);
2352
2353 /*
2354  * Make sure any changes to nobh_commit_write() are reflected in
2355  * nobh_truncate_page(), since it doesn't call commit_write().
2356  */
2357 int nobh_commit_write(struct file *file, struct page *page,
2358                 unsigned from, unsigned to)
2359 {
2360         struct inode *inode = page->mapping->host;
2361         loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2362
2363         SetPageUptodate(page);
2364         set_page_dirty(page);
2365         if (pos > inode->i_size) {
2366                 i_size_write(inode, pos);
2367                 mark_inode_dirty(inode);
2368         }
2369         return 0;
2370 }
2371 EXPORT_SYMBOL(nobh_commit_write);
2372
2373 /*
2374  * nobh_writepage() - based on block_full_write_page() except
2375  * that it tries to operate without attaching bufferheads to
2376  * the page.
2377  */
2378 int nobh_writepage(struct page *page, get_block_t *get_block,
2379                         struct writeback_control *wbc)
2380 {
2381         struct inode * const inode = page->mapping->host;
2382         loff_t i_size = i_size_read(inode);
2383         const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2384         unsigned offset;
2385         void *kaddr;
2386         int ret;
2387
2388         /* Is the page fully inside i_size? */
2389         if (page->index < end_index)
2390                 goto out;
2391
2392         /* Is the page fully outside i_size? (truncate in progress) */
2393         offset = i_size & (PAGE_CACHE_SIZE-1);
2394         if (page->index >= end_index+1 || !offset) {
2395                 /*
2396                  * The page may have dirty, unmapped buffers.  For example,
2397                  * they may have been added in ext3_writepage().  Make them
2398                  * freeable here, so the page does not leak.
2399                  */
2400 #if 0
2401                 /* Not really sure about this  - do we need this ? */
2402                 if (page->mapping->a_ops->invalidatepage)
2403                         page->mapping->a_ops->invalidatepage(page, offset);
2404 #endif
2405                 unlock_page(page);
2406                 return 0; /* don't care */
2407         }
2408
2409         /*
2410          * The page straddles i_size.  It must be zeroed out on each and every
2411          * writepage invocation because it may be mmapped.  "A file is mapped
2412          * in multiples of the page size.  For a file that is not a multiple of
2413          * the  page size, the remaining memory is zeroed when mapped, and
2414          * writes to that region are not written out to the file."
2415          */
2416         kaddr = kmap_atomic(page, KM_USER0);
2417         memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2418         flush_dcache_page(page);
2419         kunmap_atomic(kaddr, KM_USER0);
2420 out:
2421         ret = mpage_writepage(page, get_block, wbc);
2422         if (ret == -EAGAIN)
2423                 ret = __block_write_full_page(inode, page, get_block, wbc);
2424         return ret;
2425 }
2426 EXPORT_SYMBOL(nobh_writepage);
2427
2428 /*
2429  * This function assumes that ->prepare_write() uses nobh_prepare_write().
2430  */
2431 int nobh_truncate_page(struct address_space *mapping, loff_t from)
2432 {
2433         struct inode *inode = mapping->host;
2434         unsigned blocksize = 1 << inode->i_blkbits;
2435         pgoff_t index = from >> PAGE_CACHE_SHIFT;
2436         unsigned offset = from & (PAGE_CACHE_SIZE-1);
2437         unsigned to;
2438         struct page *page;
2439         const struct address_space_operations *a_ops = mapping->a_ops;
2440         char *kaddr;
2441         int ret = 0;
2442
2443         if ((offset & (blocksize - 1)) == 0)
2444                 goto out;
2445
2446         ret = -ENOMEM;
2447         page = grab_cache_page(mapping, index);
2448         if (!page)
2449                 goto out;
2450
2451         to = (offset + blocksize) & ~(blocksize - 1);
2452         ret = a_ops->prepare_write(NULL, page, offset, to);
2453         if (ret == 0) {
2454                 kaddr = kmap_atomic(page, KM_USER0);
2455                 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2456                 flush_dcache_page(page);
2457                 kunmap_atomic(kaddr, KM_USER0);
2458                 /*
2459                  * It would be more correct to call aops->commit_write()
2460                  * here, but this is more efficient.
2461                  */
2462                 SetPageUptodate(page);
2463                 set_page_dirty(page);
2464         }
2465         unlock_page(page);
2466         page_cache_release(page);
2467 out:
2468         return ret;
2469 }
2470 EXPORT_SYMBOL(nobh_truncate_page);
2471
2472 int block_truncate_page(struct address_space *mapping,
2473                         loff_t from, get_block_t *get_block)
2474 {
2475         pgoff_t index = from >> PAGE_CACHE_SHIFT;
2476         unsigned offset = from & (PAGE_CACHE_SIZE-1);
2477         unsigned blocksize;
2478         sector_t iblock;
2479         unsigned length, pos;
2480         struct inode *inode = mapping->host;
2481         struct page *page;
2482         struct buffer_head *bh;
2483         void *kaddr;
2484         int err;
2485
2486         blocksize = 1 << inode->i_blkbits;
2487         length = offset & (blocksize - 1);
2488
2489         /* Block boundary? Nothing to do */
2490         if (!length)
2491                 return 0;
2492
2493         length = blocksize - length;
2494         iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2495         
2496         page = grab_cache_page(mapping, index);
2497         err = -ENOMEM;
2498         if (!page)
2499                 goto out;
2500
2501         if (!page_has_buffers(page))
2502                 create_empty_buffers(page, blocksize, 0);
2503
2504         /* Find the buffer that contains "offset" */
2505         bh = page_buffers(page);
2506         pos = blocksize;
2507         while (offset >= pos) {
2508                 bh = bh->b_this_page;
2509                 iblock++;
2510                 pos += blocksize;
2511         }
2512
2513         err = 0;
2514         if (!buffer_mapped(bh)) {
2515                 WARN_ON(bh->b_size != blocksize);
2516                 err = get_block(inode, iblock, bh, 0);
2517                 if (err)
2518                         goto unlock;
2519                 /* unmapped? It's a hole - nothing to do */
2520                 if (!buffer_mapped(bh))
2521                         goto unlock;
2522         }
2523
2524         /* Ok, it's mapped. Make sure it's up-to-date */
2525         if (PageUptodate(page))
2526                 set_buffer_uptodate(bh);
2527
2528         if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2529                 err = -EIO;
2530                 ll_rw_block(READ, 1, &bh);
2531                 wait_on_buffer(bh);
2532                 /* Uhhuh. Read error. Complain and punt. */
2533                 if (!buffer_uptodate(bh))
2534                         goto unlock;
2535         }
2536
2537         kaddr = kmap_atomic(page, KM_USER0);
2538         memset(kaddr + offset, 0, length);
2539         flush_dcache_page(page);
2540         kunmap_atomic(kaddr, KM_USER0);
2541
2542         mark_buffer_dirty(bh);
2543         err = 0;
2544
2545 unlock:
2546         unlock_page(page);
2547         page_cache_release(page);
2548 out:
2549         return err;
2550 }
2551
2552 /*
2553  * The generic ->writepage function for buffer-backed address_spaces
2554  */
2555 int block_write_full_page(struct page *page, get_block_t *get_block,
2556                         struct writeback_control *wbc)
2557 {
2558         struct inode * const inode = page->mapping->host;
2559         loff_t i_size = i_size_read(inode);
2560         const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2561         unsigned offset;
2562         void *kaddr;
2563
2564         /* Is the page fully inside i_size? */
2565         if (page->index < end_index)
2566                 return __block_write_full_page(inode, page, get_block, wbc);
2567
2568         /* Is the page fully outside i_size? (truncate in progress) */
2569         offset = i_size & (PAGE_CACHE_SIZE-1);
2570         if (page->index >= end_index+1 || !offset) {
2571                 /*
2572                  * The page may have dirty, unmapped buffers.  For example,
2573                  * they may have been added in ext3_writepage().  Make them
2574                  * freeable here, so the page does not leak.
2575                  */
2576                 do_invalidatepage(page, 0);
2577                 unlock_page(page);
2578                 return 0; /* don't care */
2579         }
2580
2581         /*
2582          * The page straddles i_size.  It must be zeroed out on each and every
2583          * writepage invokation because it may be mmapped.  "A file is mapped
2584          * in multiples of the page size.  For a file that is not a multiple of
2585          * the  page size, the remaining memory is zeroed when mapped, and
2586          * writes to that region are not written out to the file."
2587          */
2588         kaddr = kmap_atomic(page, KM_USER0);
2589         memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2590         flush_dcache_page(page);
2591         kunmap_atomic(kaddr, KM_USER0);
2592         return __block_write_full_page(inode, page, get_block, wbc);
2593 }
2594
2595 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2596                             get_block_t *get_block)
2597 {
2598         struct buffer_head tmp;
2599         struct inode *inode = mapping->host;
2600         tmp.b_state = 0;
2601         tmp.b_blocknr = 0;
2602         tmp.b_size = 1 << inode->i_blkbits;
2603         get_block(inode, block, &tmp, 0);
2604         return tmp.b_blocknr;
2605 }
2606
2607 static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
2608 {
2609         struct buffer_head *bh = bio->bi_private;
2610
2611         if (bio->bi_size)
2612                 return 1;
2613
2614         if (err == -EOPNOTSUPP) {
2615                 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2616                 set_bit(BH_Eopnotsupp, &bh->b_state);
2617         }
2618
2619         bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2620         bio_put(bio);
2621         return 0;
2622 }
2623
2624 int submit_bh(int rw, struct buffer_head * bh)
2625 {
2626         struct bio *bio;
2627         int ret = 0;
2628
2629         BUG_ON(!buffer_locked(bh));
2630         BUG_ON(!buffer_mapped(bh));
2631         BUG_ON(!bh->b_end_io);
2632
2633         if (buffer_ordered(bh) && (rw == WRITE))
2634                 rw = WRITE_BARRIER;
2635
2636         /*
2637          * Only clear out a write error when rewriting, should this
2638          * include WRITE_SYNC as well?
2639          */
2640         if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2641                 clear_buffer_write_io_error(bh);
2642
2643         /*
2644          * from here on down, it's all bio -- do the initial mapping,
2645          * submit_bio -> generic_make_request may further map this bio around
2646          */
2647         bio = bio_alloc(GFP_NOIO, 1);
2648
2649         bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2650         bio->bi_bdev = bh->b_bdev;
2651         bio->bi_io_vec[0].bv_page = bh->b_page;
2652         bio->bi_io_vec[0].bv_len = bh->b_size;
2653         bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2654
2655         bio->bi_vcnt = 1;
2656         bio->bi_idx = 0;
2657         bio->bi_size = bh->b_size;
2658
2659         bio->bi_end_io = end_bio_bh_io_sync;
2660         bio->bi_private = bh;
2661
2662         bio_get(bio);
2663         submit_bio(rw, bio);
2664
2665         if (bio_flagged(bio, BIO_EOPNOTSUPP))
2666                 ret = -EOPNOTSUPP;
2667
2668         bio_put(bio);
2669         return ret;
2670 }
2671
2672 /**
2673  * ll_rw_block: low-level access to block devices (DEPRECATED)
2674  * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2675  * @nr: number of &struct buffer_heads in the array
2676  * @bhs: array of pointers to &struct buffer_head
2677  *
2678  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2679  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
2680  * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2681  * are sent to disk. The fourth %READA option is described in the documentation
2682  * for generic_make_request() which ll_rw_block() calls.
2683  *
2684  * This function drops any buffer that it cannot get a lock on (with the
2685  * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2686  * clean when doing a write request, and any buffer that appears to be
2687  * up-to-date when doing read request.  Further it marks as clean buffers that
2688  * are processed for writing (the buffer cache won't assume that they are
2689  * actually clean until the buffer gets unlocked).
2690  *
2691  * ll_rw_block sets b_end_io to simple completion handler that marks
2692  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2693  * any waiters. 
2694  *
2695  * All of the buffers must be for the same device, and must also be a
2696  * multiple of the current approved size for the device.
2697  */
2698 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2699 {
2700         int i;
2701
2702         for (i = 0; i < nr; i++) {
2703                 struct buffer_head *bh = bhs[i];
2704
2705                 if (rw == SWRITE)
2706                         lock_buffer(bh);
2707                 else if (test_set_buffer_locked(bh))
2708                         continue;
2709
2710                 if (rw == WRITE || rw == SWRITE) {
2711                         if (test_clear_buffer_dirty(bh)) {
2712                                 bh->b_end_io = end_buffer_write_sync;
2713                                 get_bh(bh);
2714                                 submit_bh(WRITE, bh);
2715                                 continue;
2716                         }
2717                 } else {
2718                         if (!buffer_uptodate(bh)) {
2719                                 bh->b_end_io = end_buffer_read_sync;
2720                                 get_bh(bh);
2721                                 submit_bh(rw, bh);
2722                                 continue;
2723                         }
2724                 }
2725                 unlock_buffer(bh);
2726         }
2727 }
2728
2729 /*
2730  * For a data-integrity writeout, we need to wait upon any in-progress I/O
2731  * and then start new I/O and then wait upon it.  The caller must have a ref on
2732  * the buffer_head.
2733  */
2734 int sync_dirty_buffer(struct buffer_head *bh)
2735 {
2736         int ret = 0;
2737
2738         WARN_ON(atomic_read(&bh->b_count) < 1);
2739         lock_buffer(bh);
2740         if (test_clear_buffer_dirty(bh)) {
2741                 get_bh(bh);
2742                 bh->b_end_io = end_buffer_write_sync;
2743                 ret = submit_bh(WRITE, bh);
2744                 wait_on_buffer(bh);
2745                 if (buffer_eopnotsupp(bh)) {
2746                         clear_buffer_eopnotsupp(bh);
2747                         ret = -EOPNOTSUPP;
2748                 }
2749                 if (!ret && !buffer_uptodate(bh))
2750                         ret = -EIO;
2751         } else {
2752                 unlock_buffer(bh);
2753         }
2754         return ret;
2755 }
2756
2757 /*
2758  * try_to_free_buffers() checks if all the buffers on this particular page
2759  * are unused, and releases them if so.
2760  *
2761  * Exclusion against try_to_free_buffers may be obtained by either
2762  * locking the page or by holding its mapping's private_lock.
2763  *
2764  * If the page is dirty but all the buffers are clean then we need to
2765  * be sure to mark the page clean as well.  This is because the page
2766  * may be against a block device, and a later reattachment of buffers
2767  * to a dirty page will set *all* buffers dirty.  Which would corrupt
2768  * filesystem data on the same device.
2769  *
2770  * The same applies to regular filesystem pages: if all the buffers are
2771  * clean then we set the page clean and proceed.  To do that, we require
2772  * total exclusion from __set_page_dirty_buffers().  That is obtained with
2773  * private_lock.
2774  *
2775  * try_to_free_buffers() is non-blocking.
2776  */
2777 static inline int buffer_busy(struct buffer_head *bh)
2778 {
2779         return atomic_read(&bh->b_count) |
2780                 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2781 }
2782
2783 static int
2784 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2785 {
2786         struct buffer_head *head = page_buffers(page);
2787         struct buffer_head *bh;
2788
2789         bh = head;
2790         do {
2791                 if (buffer_write_io_error(bh) && page->mapping)
2792                         set_bit(AS_EIO, &page->mapping->flags);
2793                 if (buffer_busy(bh))
2794                         goto failed;
2795                 bh = bh->b_this_page;
2796         } while (bh != head);
2797
2798         do {
2799                 struct buffer_head *next = bh->b_this_page;
2800
2801                 if (!list_empty(&bh->b_assoc_buffers))
2802                         __remove_assoc_queue(bh);
2803                 bh = next;
2804         } while (bh != head);
2805         *buffers_to_free = head;
2806         __clear_page_buffers(page);
2807         return 1;
2808 failed:
2809         return 0;
2810 }
2811
2812 int try_to_free_buffers(struct page *page)
2813 {
2814         struct address_space * const mapping = page->mapping;
2815         struct buffer_head *buffers_to_free = NULL;
2816         int ret = 0;
2817
2818         BUG_ON(!PageLocked(page));
2819         if (PageWriteback(page))
2820                 return 0;
2821
2822         if (mapping == NULL) {          /* can this still happen? */
2823                 ret = drop_buffers(page, &buffers_to_free);
2824                 goto out;
2825         }
2826
2827         spin_lock(&mapping->private_lock);
2828         ret = drop_buffers(page, &buffers_to_free);
2829
2830         /*
2831          * If the filesystem writes its buffers by hand (eg ext3)
2832          * then we can have clean buffers against a dirty page.  We
2833          * clean the page here; otherwise the VM will never notice
2834          * that the filesystem did any IO at all.
2835          *
2836          * Also, during truncate, discard_buffer will have marked all
2837          * the page's buffers clean.  We discover that here and clean
2838          * the page also.
2839          *
2840          * private_lock must be held over this entire operation in order
2841          * to synchronise against __set_page_dirty_buffers and prevent the
2842          * dirty bit from being lost.
2843          */
2844         if (ret)
2845                 cancel_dirty_page(page, PAGE_CACHE_SIZE);
2846         spin_unlock(&mapping->private_lock);
2847 out:
2848         if (buffers_to_free) {
2849                 struct buffer_head *bh = buffers_to_free;
2850
2851                 do {
2852                         struct buffer_head *next = bh->b_this_page;
2853                         free_buffer_head(bh);
2854                         bh = next;
2855                 } while (bh != buffers_to_free);
2856         }
2857         return ret;
2858 }
2859 EXPORT_SYMBOL(try_to_free_buffers);
2860
2861 void block_sync_page(struct page *page)
2862 {
2863         struct address_space *mapping;
2864
2865         smp_mb();
2866         mapping = page_mapping(page);
2867         if (mapping)
2868                 blk_run_backing_dev(mapping->backing_dev_info, page);
2869 }
2870
2871 /*
2872  * There are no bdflush tunables left.  But distributions are
2873  * still running obsolete flush daemons, so we terminate them here.
2874  *
2875  * Use of bdflush() is deprecated and will be removed in a future kernel.
2876  * The `pdflush' kernel threads fully replace bdflush daemons and this call.
2877  */
2878 asmlinkage long sys_bdflush(int func, long data)
2879 {
2880         static int msg_count;
2881
2882         if (!capable(CAP_SYS_ADMIN))
2883                 return -EPERM;
2884
2885         if (msg_count < 5) {
2886                 msg_count++;
2887                 printk(KERN_INFO
2888                         "warning: process `%s' used the obsolete bdflush"
2889                         " system call\n", current->comm);
2890                 printk(KERN_INFO "Fix your initscripts?\n");
2891         }
2892
2893         if (func == 1)
2894                 do_exit(0);
2895         return 0;
2896 }
2897
2898 /*
2899  * Buffer-head allocation
2900  */
2901 static struct kmem_cache *bh_cachep;
2902
2903 /*
2904  * Once the number of bh's in the machine exceeds this level, we start
2905  * stripping them in writeback.
2906  */
2907 static int max_buffer_heads;
2908
2909 int buffer_heads_over_limit;
2910
2911 struct bh_accounting {
2912         int nr;                 /* Number of live bh's */
2913         int ratelimit;          /* Limit cacheline bouncing */
2914 };
2915
2916 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
2917
2918 static void recalc_bh_state(void)
2919 {
2920         int i;
2921         int tot = 0;
2922
2923         if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
2924                 return;
2925         __get_cpu_var(bh_accounting).ratelimit = 0;
2926         for_each_online_cpu(i)
2927                 tot += per_cpu(bh_accounting, i).nr;
2928         buffer_heads_over_limit = (tot > max_buffer_heads);
2929 }
2930         
2931 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
2932 {
2933         struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
2934         if (ret) {
2935                 get_cpu_var(bh_accounting).nr++;
2936                 recalc_bh_state();
2937                 put_cpu_var(bh_accounting);
2938         }
2939         return ret;
2940 }
2941 EXPORT_SYMBOL(alloc_buffer_head);
2942
2943 void free_buffer_head(struct buffer_head *bh)
2944 {
2945         BUG_ON(!list_empty(&bh->b_assoc_buffers));
2946         kmem_cache_free(bh_cachep, bh);
2947         get_cpu_var(bh_accounting).nr--;
2948         recalc_bh_state();
2949         put_cpu_var(bh_accounting);
2950 }
2951 EXPORT_SYMBOL(free_buffer_head);
2952
2953 static void
2954 init_buffer_head(void *data, struct kmem_cache *cachep, unsigned long flags)
2955 {
2956         if (flags & SLAB_CTOR_CONSTRUCTOR) {
2957                 struct buffer_head * bh = (struct buffer_head *)data;
2958
2959                 memset(bh, 0, sizeof(*bh));
2960                 INIT_LIST_HEAD(&bh->b_assoc_buffers);
2961         }
2962 }
2963
2964 static void buffer_exit_cpu(int cpu)
2965 {
2966         int i;
2967         struct bh_lru *b = &per_cpu(bh_lrus, cpu);
2968
2969         for (i = 0; i < BH_LRU_SIZE; i++) {
2970                 brelse(b->bhs[i]);
2971                 b->bhs[i] = NULL;
2972         }
2973         get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
2974         per_cpu(bh_accounting, cpu).nr = 0;
2975         put_cpu_var(bh_accounting);
2976 }
2977
2978 static int buffer_cpu_notify(struct notifier_block *self,
2979                               unsigned long action, void *hcpu)
2980 {
2981         if (action == CPU_DEAD)
2982                 buffer_exit_cpu((unsigned long)hcpu);
2983         return NOTIFY_OK;
2984 }
2985
2986 void __init buffer_init(void)
2987 {
2988         int nrpages;
2989
2990         bh_cachep = kmem_cache_create("buffer_head",
2991                                         sizeof(struct buffer_head), 0,
2992                                         (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
2993                                         SLAB_MEM_SPREAD),
2994                                         init_buffer_head,
2995                                         NULL);
2996
2997         /*
2998          * Limit the bh occupancy to 10% of ZONE_NORMAL
2999          */
3000         nrpages = (nr_free_buffer_pages() * 10) / 100;
3001         max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3002         hotcpu_notifier(buffer_cpu_notify, 0);
3003 }
3004
3005 EXPORT_SYMBOL(__bforget);
3006 EXPORT_SYMBOL(__brelse);
3007 EXPORT_SYMBOL(__wait_on_buffer);
3008 EXPORT_SYMBOL(block_commit_write);
3009 EXPORT_SYMBOL(block_prepare_write);
3010 EXPORT_SYMBOL(block_read_full_page);
3011 EXPORT_SYMBOL(block_sync_page);
3012 EXPORT_SYMBOL(block_truncate_page);
3013 EXPORT_SYMBOL(block_write_full_page);
3014 EXPORT_SYMBOL(cont_prepare_write);
3015 EXPORT_SYMBOL(end_buffer_read_sync);
3016 EXPORT_SYMBOL(end_buffer_write_sync);
3017 EXPORT_SYMBOL(file_fsync);
3018 EXPORT_SYMBOL(fsync_bdev);
3019 EXPORT_SYMBOL(generic_block_bmap);
3020 EXPORT_SYMBOL(generic_commit_write);
3021 EXPORT_SYMBOL(generic_cont_expand);
3022 EXPORT_SYMBOL(generic_cont_expand_simple);
3023 EXPORT_SYMBOL(init_buffer);
3024 EXPORT_SYMBOL(invalidate_bdev);
3025 EXPORT_SYMBOL(ll_rw_block);
3026 EXPORT_SYMBOL(mark_buffer_dirty);
3027 EXPORT_SYMBOL(submit_bh);
3028 EXPORT_SYMBOL(sync_dirty_buffer);
3029 EXPORT_SYMBOL(unlock_buffer);