Merge branch 'master'
[sfrench/cifs-2.6.git] / fs / xfs / linux-2.6 / xfs_buf.c
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include <linux/stddef.h>
19 #include <linux/errno.h>
20 #include <linux/slab.h>
21 #include <linux/pagemap.h>
22 #include <linux/init.h>
23 #include <linux/vmalloc.h>
24 #include <linux/bio.h>
25 #include <linux/sysctl.h>
26 #include <linux/proc_fs.h>
27 #include <linux/workqueue.h>
28 #include <linux/percpu.h>
29 #include <linux/blkdev.h>
30 #include <linux/hash.h>
31 #include <linux/kthread.h>
32 #include "xfs_linux.h"
33
34 STATIC kmem_cache_t *pagebuf_zone;
35 STATIC kmem_shaker_t pagebuf_shake;
36 STATIC int xfsbufd_wakeup(int, gfp_t);
37 STATIC void pagebuf_delwri_queue(xfs_buf_t *, int);
38
39 STATIC struct workqueue_struct *xfslogd_workqueue;
40 struct workqueue_struct *xfsdatad_workqueue;
41
42 #ifdef PAGEBUF_TRACE
43 void
44 pagebuf_trace(
45         xfs_buf_t       *pb,
46         char            *id,
47         void            *data,
48         void            *ra)
49 {
50         ktrace_enter(pagebuf_trace_buf,
51                 pb, id,
52                 (void *)(unsigned long)pb->pb_flags,
53                 (void *)(unsigned long)pb->pb_hold.counter,
54                 (void *)(unsigned long)pb->pb_sema.count.counter,
55                 (void *)current,
56                 data, ra,
57                 (void *)(unsigned long)((pb->pb_file_offset>>32) & 0xffffffff),
58                 (void *)(unsigned long)(pb->pb_file_offset & 0xffffffff),
59                 (void *)(unsigned long)pb->pb_buffer_length,
60                 NULL, NULL, NULL, NULL, NULL);
61 }
62 ktrace_t *pagebuf_trace_buf;
63 #define PAGEBUF_TRACE_SIZE      4096
64 #define PB_TRACE(pb, id, data)  \
65         pagebuf_trace(pb, id, (void *)data, (void *)__builtin_return_address(0))
66 #else
67 #define PB_TRACE(pb, id, data)  do { } while (0)
68 #endif
69
70 #ifdef PAGEBUF_LOCK_TRACKING
71 # define PB_SET_OWNER(pb)       ((pb)->pb_last_holder = current->pid)
72 # define PB_CLEAR_OWNER(pb)     ((pb)->pb_last_holder = -1)
73 # define PB_GET_OWNER(pb)       ((pb)->pb_last_holder)
74 #else
75 # define PB_SET_OWNER(pb)       do { } while (0)
76 # define PB_CLEAR_OWNER(pb)     do { } while (0)
77 # define PB_GET_OWNER(pb)       do { } while (0)
78 #endif
79
80 #define pb_to_gfp(flags) \
81         ((((flags) & PBF_READ_AHEAD) ? __GFP_NORETRY : \
82           ((flags) & PBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
83
84 #define pb_to_km(flags) \
85          (((flags) & PBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
86
87 #define pagebuf_allocate(flags) \
88         kmem_zone_alloc(pagebuf_zone, pb_to_km(flags))
89 #define pagebuf_deallocate(pb) \
90         kmem_zone_free(pagebuf_zone, (pb));
91
92 /*
93  * Page Region interfaces.
94  *
95  * For pages in filesystems where the blocksize is smaller than the
96  * pagesize, we use the page->private field (long) to hold a bitmap
97  * of uptodate regions within the page.
98  *
99  * Each such region is "bytes per page / bits per long" bytes long.
100  *
101  * NBPPR == number-of-bytes-per-page-region
102  * BTOPR == bytes-to-page-region (rounded up)
103  * BTOPRT == bytes-to-page-region-truncated (rounded down)
104  */
105 #if (BITS_PER_LONG == 32)
106 #define PRSHIFT         (PAGE_CACHE_SHIFT - 5)  /* (32 == 1<<5) */
107 #elif (BITS_PER_LONG == 64)
108 #define PRSHIFT         (PAGE_CACHE_SHIFT - 6)  /* (64 == 1<<6) */
109 #else
110 #error BITS_PER_LONG must be 32 or 64
111 #endif
112 #define NBPPR           (PAGE_CACHE_SIZE/BITS_PER_LONG)
113 #define BTOPR(b)        (((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT)
114 #define BTOPRT(b)       (((unsigned int)(b) >> PRSHIFT))
115
116 STATIC unsigned long
117 page_region_mask(
118         size_t          offset,
119         size_t          length)
120 {
121         unsigned long   mask;
122         int             first, final;
123
124         first = BTOPR(offset);
125         final = BTOPRT(offset + length - 1);
126         first = min(first, final);
127
128         mask = ~0UL;
129         mask <<= BITS_PER_LONG - (final - first);
130         mask >>= BITS_PER_LONG - (final);
131
132         ASSERT(offset + length <= PAGE_CACHE_SIZE);
133         ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0);
134
135         return mask;
136 }
137
138 STATIC inline void
139 set_page_region(
140         struct page     *page,
141         size_t          offset,
142         size_t          length)
143 {
144         set_page_private(page,
145                 page_private(page) | page_region_mask(offset, length));
146         if (page_private(page) == ~0UL)
147                 SetPageUptodate(page);
148 }
149
150 STATIC inline int
151 test_page_region(
152         struct page     *page,
153         size_t          offset,
154         size_t          length)
155 {
156         unsigned long   mask = page_region_mask(offset, length);
157
158         return (mask && (page_private(page) & mask) == mask);
159 }
160
161 /*
162  * Mapping of multi-page buffers into contiguous virtual space
163  */
164
165 typedef struct a_list {
166         void            *vm_addr;
167         struct a_list   *next;
168 } a_list_t;
169
170 STATIC a_list_t         *as_free_head;
171 STATIC int              as_list_len;
172 STATIC DEFINE_SPINLOCK(as_lock);
173
174 /*
175  * Try to batch vunmaps because they are costly.
176  */
177 STATIC void
178 free_address(
179         void            *addr)
180 {
181         a_list_t        *aentry;
182
183         aentry = kmalloc(sizeof(a_list_t), GFP_ATOMIC & ~__GFP_HIGH);
184         if (likely(aentry)) {
185                 spin_lock(&as_lock);
186                 aentry->next = as_free_head;
187                 aentry->vm_addr = addr;
188                 as_free_head = aentry;
189                 as_list_len++;
190                 spin_unlock(&as_lock);
191         } else {
192                 vunmap(addr);
193         }
194 }
195
196 STATIC void
197 purge_addresses(void)
198 {
199         a_list_t        *aentry, *old;
200
201         if (as_free_head == NULL)
202                 return;
203
204         spin_lock(&as_lock);
205         aentry = as_free_head;
206         as_free_head = NULL;
207         as_list_len = 0;
208         spin_unlock(&as_lock);
209
210         while ((old = aentry) != NULL) {
211                 vunmap(aentry->vm_addr);
212                 aentry = aentry->next;
213                 kfree(old);
214         }
215 }
216
217 /*
218  *      Internal pagebuf object manipulation
219  */
220
221 STATIC void
222 _pagebuf_initialize(
223         xfs_buf_t               *pb,
224         xfs_buftarg_t           *target,
225         loff_t                  range_base,
226         size_t                  range_length,
227         page_buf_flags_t        flags)
228 {
229         /*
230          * We don't want certain flags to appear in pb->pb_flags.
231          */
232         flags &= ~(PBF_LOCK|PBF_MAPPED|PBF_DONT_BLOCK|PBF_READ_AHEAD);
233
234         memset(pb, 0, sizeof(xfs_buf_t));
235         atomic_set(&pb->pb_hold, 1);
236         init_MUTEX_LOCKED(&pb->pb_iodonesema);
237         INIT_LIST_HEAD(&pb->pb_list);
238         INIT_LIST_HEAD(&pb->pb_hash_list);
239         init_MUTEX_LOCKED(&pb->pb_sema); /* held, no waiters */
240         PB_SET_OWNER(pb);
241         pb->pb_target = target;
242         pb->pb_file_offset = range_base;
243         /*
244          * Set buffer_length and count_desired to the same value initially.
245          * I/O routines should use count_desired, which will be the same in
246          * most cases but may be reset (e.g. XFS recovery).
247          */
248         pb->pb_buffer_length = pb->pb_count_desired = range_length;
249         pb->pb_flags = flags;
250         pb->pb_bn = XFS_BUF_DADDR_NULL;
251         atomic_set(&pb->pb_pin_count, 0);
252         init_waitqueue_head(&pb->pb_waiters);
253
254         XFS_STATS_INC(pb_create);
255         PB_TRACE(pb, "initialize", target);
256 }
257
258 /*
259  * Allocate a page array capable of holding a specified number
260  * of pages, and point the page buf at it.
261  */
262 STATIC int
263 _pagebuf_get_pages(
264         xfs_buf_t               *pb,
265         int                     page_count,
266         page_buf_flags_t        flags)
267 {
268         /* Make sure that we have a page list */
269         if (pb->pb_pages == NULL) {
270                 pb->pb_offset = page_buf_poff(pb->pb_file_offset);
271                 pb->pb_page_count = page_count;
272                 if (page_count <= PB_PAGES) {
273                         pb->pb_pages = pb->pb_page_array;
274                 } else {
275                         pb->pb_pages = kmem_alloc(sizeof(struct page *) *
276                                         page_count, pb_to_km(flags));
277                         if (pb->pb_pages == NULL)
278                                 return -ENOMEM;
279                 }
280                 memset(pb->pb_pages, 0, sizeof(struct page *) * page_count);
281         }
282         return 0;
283 }
284
285 /*
286  *      Frees pb_pages if it was malloced.
287  */
288 STATIC void
289 _pagebuf_free_pages(
290         xfs_buf_t       *bp)
291 {
292         if (bp->pb_pages != bp->pb_page_array) {
293                 kmem_free(bp->pb_pages,
294                           bp->pb_page_count * sizeof(struct page *));
295         }
296 }
297
298 /*
299  *      Releases the specified buffer.
300  *
301  *      The modification state of any associated pages is left unchanged.
302  *      The buffer most not be on any hash - use pagebuf_rele instead for
303  *      hashed and refcounted buffers
304  */
305 void
306 pagebuf_free(
307         xfs_buf_t               *bp)
308 {
309         PB_TRACE(bp, "free", 0);
310
311         ASSERT(list_empty(&bp->pb_hash_list));
312
313         if (bp->pb_flags & _PBF_PAGE_CACHE) {
314                 uint            i;
315
316                 if ((bp->pb_flags & PBF_MAPPED) && (bp->pb_page_count > 1))
317                         free_address(bp->pb_addr - bp->pb_offset);
318
319                 for (i = 0; i < bp->pb_page_count; i++)
320                         page_cache_release(bp->pb_pages[i]);
321                 _pagebuf_free_pages(bp);
322         } else if (bp->pb_flags & _PBF_KMEM_ALLOC) {
323                  /*
324                   * XXX(hch): bp->pb_count_desired might be incorrect (see
325                   * pagebuf_associate_memory for details), but fortunately
326                   * the Linux version of kmem_free ignores the len argument..
327                   */
328                 kmem_free(bp->pb_addr, bp->pb_count_desired);
329                 _pagebuf_free_pages(bp);
330         }
331
332         pagebuf_deallocate(bp);
333 }
334
335 /*
336  *      Finds all pages for buffer in question and builds it's page list.
337  */
338 STATIC int
339 _pagebuf_lookup_pages(
340         xfs_buf_t               *bp,
341         uint                    flags)
342 {
343         struct address_space    *mapping = bp->pb_target->pbr_mapping;
344         size_t                  blocksize = bp->pb_target->pbr_bsize;
345         size_t                  size = bp->pb_count_desired;
346         size_t                  nbytes, offset;
347         gfp_t                   gfp_mask = pb_to_gfp(flags);
348         unsigned short          page_count, i;
349         pgoff_t                 first;
350         loff_t                  end;
351         int                     error;
352
353         end = bp->pb_file_offset + bp->pb_buffer_length;
354         page_count = page_buf_btoc(end) - page_buf_btoct(bp->pb_file_offset);
355
356         error = _pagebuf_get_pages(bp, page_count, flags);
357         if (unlikely(error))
358                 return error;
359         bp->pb_flags |= _PBF_PAGE_CACHE;
360
361         offset = bp->pb_offset;
362         first = bp->pb_file_offset >> PAGE_CACHE_SHIFT;
363
364         for (i = 0; i < bp->pb_page_count; i++) {
365                 struct page     *page;
366                 uint            retries = 0;
367
368               retry:
369                 page = find_or_create_page(mapping, first + i, gfp_mask);
370                 if (unlikely(page == NULL)) {
371                         if (flags & PBF_READ_AHEAD) {
372                                 bp->pb_page_count = i;
373                                 for (i = 0; i < bp->pb_page_count; i++)
374                                         unlock_page(bp->pb_pages[i]);
375                                 return -ENOMEM;
376                         }
377
378                         /*
379                          * This could deadlock.
380                          *
381                          * But until all the XFS lowlevel code is revamped to
382                          * handle buffer allocation failures we can't do much.
383                          */
384                         if (!(++retries % 100))
385                                 printk(KERN_ERR
386                                         "XFS: possible memory allocation "
387                                         "deadlock in %s (mode:0x%x)\n",
388                                         __FUNCTION__, gfp_mask);
389
390                         XFS_STATS_INC(pb_page_retries);
391                         xfsbufd_wakeup(0, gfp_mask);
392                         blk_congestion_wait(WRITE, HZ/50);
393                         goto retry;
394                 }
395
396                 XFS_STATS_INC(pb_page_found);
397
398                 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
399                 size -= nbytes;
400
401                 if (!PageUptodate(page)) {
402                         page_count--;
403                         if (blocksize >= PAGE_CACHE_SIZE) {
404                                 if (flags & PBF_READ)
405                                         bp->pb_locked = 1;
406                         } else if (!PagePrivate(page)) {
407                                 if (test_page_region(page, offset, nbytes))
408                                         page_count++;
409                         }
410                 }
411
412                 bp->pb_pages[i] = page;
413                 offset = 0;
414         }
415
416         if (!bp->pb_locked) {
417                 for (i = 0; i < bp->pb_page_count; i++)
418                         unlock_page(bp->pb_pages[i]);
419         }
420
421         if (page_count == bp->pb_page_count)
422                 bp->pb_flags |= PBF_DONE;
423
424         PB_TRACE(bp, "lookup_pages", (long)page_count);
425         return error;
426 }
427
428 /*
429  *      Map buffer into kernel address-space if nessecary.
430  */
431 STATIC int
432 _pagebuf_map_pages(
433         xfs_buf_t               *bp,
434         uint                    flags)
435 {
436         /* A single page buffer is always mappable */
437         if (bp->pb_page_count == 1) {
438                 bp->pb_addr = page_address(bp->pb_pages[0]) + bp->pb_offset;
439                 bp->pb_flags |= PBF_MAPPED;
440         } else if (flags & PBF_MAPPED) {
441                 if (as_list_len > 64)
442                         purge_addresses();
443                 bp->pb_addr = vmap(bp->pb_pages, bp->pb_page_count,
444                                 VM_MAP, PAGE_KERNEL);
445                 if (unlikely(bp->pb_addr == NULL))
446                         return -ENOMEM;
447                 bp->pb_addr += bp->pb_offset;
448                 bp->pb_flags |= PBF_MAPPED;
449         }
450
451         return 0;
452 }
453
454 /*
455  *      Finding and Reading Buffers
456  */
457
458 /*
459  *      _pagebuf_find
460  *
461  *      Looks up, and creates if absent, a lockable buffer for
462  *      a given range of an inode.  The buffer is returned
463  *      locked.  If other overlapping buffers exist, they are
464  *      released before the new buffer is created and locked,
465  *      which may imply that this call will block until those buffers
466  *      are unlocked.  No I/O is implied by this call.
467  */
468 xfs_buf_t *
469 _pagebuf_find(
470         xfs_buftarg_t           *btp,   /* block device target          */
471         loff_t                  ioff,   /* starting offset of range     */
472         size_t                  isize,  /* length of range              */
473         page_buf_flags_t        flags,  /* PBF_TRYLOCK                  */
474         xfs_buf_t               *new_pb)/* newly allocated buffer       */
475 {
476         loff_t                  range_base;
477         size_t                  range_length;
478         xfs_bufhash_t           *hash;
479         xfs_buf_t               *pb, *n;
480
481         range_base = (ioff << BBSHIFT);
482         range_length = (isize << BBSHIFT);
483
484         /* Check for IOs smaller than the sector size / not sector aligned */
485         ASSERT(!(range_length < (1 << btp->pbr_sshift)));
486         ASSERT(!(range_base & (loff_t)btp->pbr_smask));
487
488         hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)];
489
490         spin_lock(&hash->bh_lock);
491
492         list_for_each_entry_safe(pb, n, &hash->bh_list, pb_hash_list) {
493                 ASSERT(btp == pb->pb_target);
494                 if (pb->pb_file_offset == range_base &&
495                     pb->pb_buffer_length == range_length) {
496                         /*
497                          * If we look at something bring it to the
498                          * front of the list for next time.
499                          */
500                         atomic_inc(&pb->pb_hold);
501                         list_move(&pb->pb_hash_list, &hash->bh_list);
502                         goto found;
503                 }
504         }
505
506         /* No match found */
507         if (new_pb) {
508                 _pagebuf_initialize(new_pb, btp, range_base,
509                                 range_length, flags);
510                 new_pb->pb_hash = hash;
511                 list_add(&new_pb->pb_hash_list, &hash->bh_list);
512         } else {
513                 XFS_STATS_INC(pb_miss_locked);
514         }
515
516         spin_unlock(&hash->bh_lock);
517         return new_pb;
518
519 found:
520         spin_unlock(&hash->bh_lock);
521
522         /* Attempt to get the semaphore without sleeping,
523          * if this does not work then we need to drop the
524          * spinlock and do a hard attempt on the semaphore.
525          */
526         if (down_trylock(&pb->pb_sema)) {
527                 if (!(flags & PBF_TRYLOCK)) {
528                         /* wait for buffer ownership */
529                         PB_TRACE(pb, "get_lock", 0);
530                         pagebuf_lock(pb);
531                         XFS_STATS_INC(pb_get_locked_waited);
532                 } else {
533                         /* We asked for a trylock and failed, no need
534                          * to look at file offset and length here, we
535                          * know that this pagebuf at least overlaps our
536                          * pagebuf and is locked, therefore our buffer
537                          * either does not exist, or is this buffer
538                          */
539
540                         pagebuf_rele(pb);
541                         XFS_STATS_INC(pb_busy_locked);
542                         return (NULL);
543                 }
544         } else {
545                 /* trylock worked */
546                 PB_SET_OWNER(pb);
547         }
548
549         if (pb->pb_flags & PBF_STALE) {
550                 ASSERT((pb->pb_flags & _PBF_DELWRI_Q) == 0);
551                 pb->pb_flags &= PBF_MAPPED;
552         }
553         PB_TRACE(pb, "got_lock", 0);
554         XFS_STATS_INC(pb_get_locked);
555         return (pb);
556 }
557
558 /*
559  *      xfs_buf_get_flags assembles a buffer covering the specified range.
560  *
561  *      Storage in memory for all portions of the buffer will be allocated,
562  *      although backing storage may not be.
563  */
564 xfs_buf_t *
565 xfs_buf_get_flags(                      /* allocate a buffer            */
566         xfs_buftarg_t           *target,/* target for buffer            */
567         loff_t                  ioff,   /* starting offset of range     */
568         size_t                  isize,  /* length of range              */
569         page_buf_flags_t        flags)  /* PBF_TRYLOCK                  */
570 {
571         xfs_buf_t               *pb, *new_pb;
572         int                     error = 0, i;
573
574         new_pb = pagebuf_allocate(flags);
575         if (unlikely(!new_pb))
576                 return NULL;
577
578         pb = _pagebuf_find(target, ioff, isize, flags, new_pb);
579         if (pb == new_pb) {
580                 error = _pagebuf_lookup_pages(pb, flags);
581                 if (error)
582                         goto no_buffer;
583         } else {
584                 pagebuf_deallocate(new_pb);
585                 if (unlikely(pb == NULL))
586                         return NULL;
587         }
588
589         for (i = 0; i < pb->pb_page_count; i++)
590                 mark_page_accessed(pb->pb_pages[i]);
591
592         if (!(pb->pb_flags & PBF_MAPPED)) {
593                 error = _pagebuf_map_pages(pb, flags);
594                 if (unlikely(error)) {
595                         printk(KERN_WARNING "%s: failed to map pages\n",
596                                         __FUNCTION__);
597                         goto no_buffer;
598                 }
599         }
600
601         XFS_STATS_INC(pb_get);
602
603         /*
604          * Always fill in the block number now, the mapped cases can do
605          * their own overlay of this later.
606          */
607         pb->pb_bn = ioff;
608         pb->pb_count_desired = pb->pb_buffer_length;
609
610         PB_TRACE(pb, "get", (unsigned long)flags);
611         return pb;
612
613  no_buffer:
614         if (flags & (PBF_LOCK | PBF_TRYLOCK))
615                 pagebuf_unlock(pb);
616         pagebuf_rele(pb);
617         return NULL;
618 }
619
620 xfs_buf_t *
621 xfs_buf_read_flags(
622         xfs_buftarg_t           *target,
623         loff_t                  ioff,
624         size_t                  isize,
625         page_buf_flags_t        flags)
626 {
627         xfs_buf_t               *pb;
628
629         flags |= PBF_READ;
630
631         pb = xfs_buf_get_flags(target, ioff, isize, flags);
632         if (pb) {
633                 if (!XFS_BUF_ISDONE(pb)) {
634                         PB_TRACE(pb, "read", (unsigned long)flags);
635                         XFS_STATS_INC(pb_get_read);
636                         pagebuf_iostart(pb, flags);
637                 } else if (flags & PBF_ASYNC) {
638                         PB_TRACE(pb, "read_async", (unsigned long)flags);
639                         /*
640                          * Read ahead call which is already satisfied,
641                          * drop the buffer
642                          */
643                         goto no_buffer;
644                 } else {
645                         PB_TRACE(pb, "read_done", (unsigned long)flags);
646                         /* We do not want read in the flags */
647                         pb->pb_flags &= ~PBF_READ;
648                 }
649         }
650
651         return pb;
652
653  no_buffer:
654         if (flags & (PBF_LOCK | PBF_TRYLOCK))
655                 pagebuf_unlock(pb);
656         pagebuf_rele(pb);
657         return NULL;
658 }
659
660 /*
661  * If we are not low on memory then do the readahead in a deadlock
662  * safe manner.
663  */
664 void
665 pagebuf_readahead(
666         xfs_buftarg_t           *target,
667         loff_t                  ioff,
668         size_t                  isize,
669         page_buf_flags_t        flags)
670 {
671         struct backing_dev_info *bdi;
672
673         bdi = target->pbr_mapping->backing_dev_info;
674         if (bdi_read_congested(bdi))
675                 return;
676
677         flags |= (PBF_TRYLOCK|PBF_ASYNC|PBF_READ_AHEAD);
678         xfs_buf_read_flags(target, ioff, isize, flags);
679 }
680
681 xfs_buf_t *
682 pagebuf_get_empty(
683         size_t                  len,
684         xfs_buftarg_t           *target)
685 {
686         xfs_buf_t               *pb;
687
688         pb = pagebuf_allocate(0);
689         if (pb)
690                 _pagebuf_initialize(pb, target, 0, len, 0);
691         return pb;
692 }
693
694 static inline struct page *
695 mem_to_page(
696         void                    *addr)
697 {
698         if (((unsigned long)addr < VMALLOC_START) ||
699             ((unsigned long)addr >= VMALLOC_END)) {
700                 return virt_to_page(addr);
701         } else {
702                 return vmalloc_to_page(addr);
703         }
704 }
705
706 int
707 pagebuf_associate_memory(
708         xfs_buf_t               *pb,
709         void                    *mem,
710         size_t                  len)
711 {
712         int                     rval;
713         int                     i = 0;
714         size_t                  ptr;
715         size_t                  end, end_cur;
716         off_t                   offset;
717         int                     page_count;
718
719         page_count = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT;
720         offset = (off_t) mem - ((off_t)mem & PAGE_CACHE_MASK);
721         if (offset && (len > PAGE_CACHE_SIZE))
722                 page_count++;
723
724         /* Free any previous set of page pointers */
725         if (pb->pb_pages)
726                 _pagebuf_free_pages(pb);
727
728         pb->pb_pages = NULL;
729         pb->pb_addr = mem;
730
731         rval = _pagebuf_get_pages(pb, page_count, 0);
732         if (rval)
733                 return rval;
734
735         pb->pb_offset = offset;
736         ptr = (size_t) mem & PAGE_CACHE_MASK;
737         end = PAGE_CACHE_ALIGN((size_t) mem + len);
738         end_cur = end;
739         /* set up first page */
740         pb->pb_pages[0] = mem_to_page(mem);
741
742         ptr += PAGE_CACHE_SIZE;
743         pb->pb_page_count = ++i;
744         while (ptr < end) {
745                 pb->pb_pages[i] = mem_to_page((void *)ptr);
746                 pb->pb_page_count = ++i;
747                 ptr += PAGE_CACHE_SIZE;
748         }
749         pb->pb_locked = 0;
750
751         pb->pb_count_desired = pb->pb_buffer_length = len;
752         pb->pb_flags |= PBF_MAPPED;
753
754         return 0;
755 }
756
757 xfs_buf_t *
758 pagebuf_get_no_daddr(
759         size_t                  len,
760         xfs_buftarg_t           *target)
761 {
762         size_t                  malloc_len = len;
763         xfs_buf_t               *bp;
764         void                    *data;
765         int                     error;
766
767         bp = pagebuf_allocate(0);
768         if (unlikely(bp == NULL))
769                 goto fail;
770         _pagebuf_initialize(bp, target, 0, len, 0);
771
772  try_again:
773         data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL);
774         if (unlikely(data == NULL))
775                 goto fail_free_buf;
776
777         /* check whether alignment matches.. */
778         if ((__psunsigned_t)data !=
779             ((__psunsigned_t)data & ~target->pbr_smask)) {
780                 /* .. else double the size and try again */
781                 kmem_free(data, malloc_len);
782                 malloc_len <<= 1;
783                 goto try_again;
784         }
785
786         error = pagebuf_associate_memory(bp, data, len);
787         if (error)
788                 goto fail_free_mem;
789         bp->pb_flags |= _PBF_KMEM_ALLOC;
790
791         pagebuf_unlock(bp);
792
793         PB_TRACE(bp, "no_daddr", data);
794         return bp;
795  fail_free_mem:
796         kmem_free(data, malloc_len);
797  fail_free_buf:
798         pagebuf_free(bp);
799  fail:
800         return NULL;
801 }
802
803 /*
804  *      pagebuf_hold
805  *
806  *      Increment reference count on buffer, to hold the buffer concurrently
807  *      with another thread which may release (free) the buffer asynchronously.
808  *
809  *      Must hold the buffer already to call this function.
810  */
811 void
812 pagebuf_hold(
813         xfs_buf_t               *pb)
814 {
815         atomic_inc(&pb->pb_hold);
816         PB_TRACE(pb, "hold", 0);
817 }
818
819 /*
820  *      pagebuf_rele
821  *
822  *      pagebuf_rele releases a hold on the specified buffer.  If the
823  *      the hold count is 1, pagebuf_rele calls pagebuf_free.
824  */
825 void
826 pagebuf_rele(
827         xfs_buf_t               *pb)
828 {
829         xfs_bufhash_t           *hash = pb->pb_hash;
830
831         PB_TRACE(pb, "rele", pb->pb_relse);
832
833         if (atomic_dec_and_lock(&pb->pb_hold, &hash->bh_lock)) {
834                 if (pb->pb_relse) {
835                         atomic_inc(&pb->pb_hold);
836                         spin_unlock(&hash->bh_lock);
837                         (*(pb->pb_relse)) (pb);
838                 } else if (pb->pb_flags & PBF_FS_MANAGED) {
839                         spin_unlock(&hash->bh_lock);
840                 } else {
841                         ASSERT(!(pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q)));
842                         list_del_init(&pb->pb_hash_list);
843                         spin_unlock(&hash->bh_lock);
844                         pagebuf_free(pb);
845                 }
846         } else {
847                 /*
848                  * Catch reference count leaks
849                  */
850                 ASSERT(atomic_read(&pb->pb_hold) >= 0);
851         }
852 }
853
854
855 /*
856  *      Mutual exclusion on buffers.  Locking model:
857  *
858  *      Buffers associated with inodes for which buffer locking
859  *      is not enabled are not protected by semaphores, and are
860  *      assumed to be exclusively owned by the caller.  There is a
861  *      spinlock in the buffer, used by the caller when concurrent
862  *      access is possible.
863  */
864
865 /*
866  *      pagebuf_cond_lock
867  *
868  *      pagebuf_cond_lock locks a buffer object, if it is not already locked.
869  *      Note that this in no way
870  *      locks the underlying pages, so it is only useful for synchronizing
871  *      concurrent use of page buffer objects, not for synchronizing independent
872  *      access to the underlying pages.
873  */
874 int
875 pagebuf_cond_lock(                      /* lock buffer, if not locked   */
876                                         /* returns -EBUSY if locked)    */
877         xfs_buf_t               *pb)
878 {
879         int                     locked;
880
881         locked = down_trylock(&pb->pb_sema) == 0;
882         if (locked) {
883                 PB_SET_OWNER(pb);
884         }
885         PB_TRACE(pb, "cond_lock", (long)locked);
886         return(locked ? 0 : -EBUSY);
887 }
888
889 #if defined(DEBUG) || defined(XFS_BLI_TRACE)
890 /*
891  *      pagebuf_lock_value
892  *
893  *      Return lock value for a pagebuf
894  */
895 int
896 pagebuf_lock_value(
897         xfs_buf_t               *pb)
898 {
899         return(atomic_read(&pb->pb_sema.count));
900 }
901 #endif
902
903 /*
904  *      pagebuf_lock
905  *
906  *      pagebuf_lock locks a buffer object.  Note that this in no way
907  *      locks the underlying pages, so it is only useful for synchronizing
908  *      concurrent use of page buffer objects, not for synchronizing independent
909  *      access to the underlying pages.
910  */
911 int
912 pagebuf_lock(
913         xfs_buf_t               *pb)
914 {
915         PB_TRACE(pb, "lock", 0);
916         if (atomic_read(&pb->pb_io_remaining))
917                 blk_run_address_space(pb->pb_target->pbr_mapping);
918         down(&pb->pb_sema);
919         PB_SET_OWNER(pb);
920         PB_TRACE(pb, "locked", 0);
921         return 0;
922 }
923
924 /*
925  *      pagebuf_unlock
926  *
927  *      pagebuf_unlock releases the lock on the buffer object created by
928  *      pagebuf_lock or pagebuf_cond_lock (not any pinning of underlying pages
929  *      created by pagebuf_pin).
930  *
931  *      If the buffer is marked delwri but is not queued, do so before we
932  *      unlock the buffer as we need to set flags correctly. We also need to
933  *      take a reference for the delwri queue because the unlocker is going to
934  *      drop their's and they don't know we just queued it.
935  */
936 void
937 pagebuf_unlock(                         /* unlock buffer                */
938         xfs_buf_t               *pb)    /* buffer to unlock             */
939 {
940         if ((pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q)) == PBF_DELWRI) {
941                 atomic_inc(&pb->pb_hold);
942                 pb->pb_flags |= PBF_ASYNC;
943                 pagebuf_delwri_queue(pb, 0);
944         }
945
946         PB_CLEAR_OWNER(pb);
947         up(&pb->pb_sema);
948         PB_TRACE(pb, "unlock", 0);
949 }
950
951
952 /*
953  *      Pinning Buffer Storage in Memory
954  */
955
956 /*
957  *      pagebuf_pin
958  *
959  *      pagebuf_pin locks all of the memory represented by a buffer in
960  *      memory.  Multiple calls to pagebuf_pin and pagebuf_unpin, for
961  *      the same or different buffers affecting a given page, will
962  *      properly count the number of outstanding "pin" requests.  The
963  *      buffer may be released after the pagebuf_pin and a different
964  *      buffer used when calling pagebuf_unpin, if desired.
965  *      pagebuf_pin should be used by the file system when it wants be
966  *      assured that no attempt will be made to force the affected
967  *      memory to disk.  It does not assure that a given logical page
968  *      will not be moved to a different physical page.
969  */
970 void
971 pagebuf_pin(
972         xfs_buf_t               *pb)
973 {
974         atomic_inc(&pb->pb_pin_count);
975         PB_TRACE(pb, "pin", (long)pb->pb_pin_count.counter);
976 }
977
978 /*
979  *      pagebuf_unpin
980  *
981  *      pagebuf_unpin reverses the locking of memory performed by
982  *      pagebuf_pin.  Note that both functions affected the logical
983  *      pages associated with the buffer, not the buffer itself.
984  */
985 void
986 pagebuf_unpin(
987         xfs_buf_t               *pb)
988 {
989         if (atomic_dec_and_test(&pb->pb_pin_count)) {
990                 wake_up_all(&pb->pb_waiters);
991         }
992         PB_TRACE(pb, "unpin", (long)pb->pb_pin_count.counter);
993 }
994
995 int
996 pagebuf_ispin(
997         xfs_buf_t               *pb)
998 {
999         return atomic_read(&pb->pb_pin_count);
1000 }
1001
1002 /*
1003  *      pagebuf_wait_unpin
1004  *
1005  *      pagebuf_wait_unpin waits until all of the memory associated
1006  *      with the buffer is not longer locked in memory.  It returns
1007  *      immediately if none of the affected pages are locked.
1008  */
1009 static inline void
1010 _pagebuf_wait_unpin(
1011         xfs_buf_t               *pb)
1012 {
1013         DECLARE_WAITQUEUE       (wait, current);
1014
1015         if (atomic_read(&pb->pb_pin_count) == 0)
1016                 return;
1017
1018         add_wait_queue(&pb->pb_waiters, &wait);
1019         for (;;) {
1020                 set_current_state(TASK_UNINTERRUPTIBLE);
1021                 if (atomic_read(&pb->pb_pin_count) == 0)
1022                         break;
1023                 if (atomic_read(&pb->pb_io_remaining))
1024                         blk_run_address_space(pb->pb_target->pbr_mapping);
1025                 schedule();
1026         }
1027         remove_wait_queue(&pb->pb_waiters, &wait);
1028         set_current_state(TASK_RUNNING);
1029 }
1030
1031 /*
1032  *      Buffer Utility Routines
1033  */
1034
1035 /*
1036  *      pagebuf_iodone
1037  *
1038  *      pagebuf_iodone marks a buffer for which I/O is in progress
1039  *      done with respect to that I/O.  The pb_iodone routine, if
1040  *      present, will be called as a side-effect.
1041  */
1042 STATIC void
1043 pagebuf_iodone_work(
1044         void                    *v)
1045 {
1046         xfs_buf_t               *bp = (xfs_buf_t *)v;
1047
1048         if (bp->pb_iodone)
1049                 (*(bp->pb_iodone))(bp);
1050         else if (bp->pb_flags & PBF_ASYNC)
1051                 xfs_buf_relse(bp);
1052 }
1053
1054 void
1055 pagebuf_iodone(
1056         xfs_buf_t               *pb,
1057         int                     schedule)
1058 {
1059         pb->pb_flags &= ~(PBF_READ | PBF_WRITE);
1060         if (pb->pb_error == 0)
1061                 pb->pb_flags |= PBF_DONE;
1062
1063         PB_TRACE(pb, "iodone", pb->pb_iodone);
1064
1065         if ((pb->pb_iodone) || (pb->pb_flags & PBF_ASYNC)) {
1066                 if (schedule) {
1067                         INIT_WORK(&pb->pb_iodone_work, pagebuf_iodone_work, pb);
1068                         queue_work(xfslogd_workqueue, &pb->pb_iodone_work);
1069                 } else {
1070                         pagebuf_iodone_work(pb);
1071                 }
1072         } else {
1073                 up(&pb->pb_iodonesema);
1074         }
1075 }
1076
1077 /*
1078  *      pagebuf_ioerror
1079  *
1080  *      pagebuf_ioerror sets the error code for a buffer.
1081  */
1082 void
1083 pagebuf_ioerror(                        /* mark/clear buffer error flag */
1084         xfs_buf_t               *pb,    /* buffer to mark               */
1085         int                     error)  /* error to store (0 if none)   */
1086 {
1087         ASSERT(error >= 0 && error <= 0xffff);
1088         pb->pb_error = (unsigned short)error;
1089         PB_TRACE(pb, "ioerror", (unsigned long)error);
1090 }
1091
1092 /*
1093  *      pagebuf_iostart
1094  *
1095  *      pagebuf_iostart initiates I/O on a buffer, based on the flags supplied.
1096  *      If necessary, it will arrange for any disk space allocation required,
1097  *      and it will break up the request if the block mappings require it.
1098  *      The pb_iodone routine in the buffer supplied will only be called
1099  *      when all of the subsidiary I/O requests, if any, have been completed.
1100  *      pagebuf_iostart calls the pagebuf_ioinitiate routine or
1101  *      pagebuf_iorequest, if the former routine is not defined, to start
1102  *      the I/O on a given low-level request.
1103  */
1104 int
1105 pagebuf_iostart(                        /* start I/O on a buffer          */
1106         xfs_buf_t               *pb,    /* buffer to start                */
1107         page_buf_flags_t        flags)  /* PBF_LOCK, PBF_ASYNC, PBF_READ, */
1108                                         /* PBF_WRITE, PBF_DELWRI,         */
1109                                         /* PBF_DONT_BLOCK                 */
1110 {
1111         int                     status = 0;
1112
1113         PB_TRACE(pb, "iostart", (unsigned long)flags);
1114
1115         if (flags & PBF_DELWRI) {
1116                 pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC);
1117                 pb->pb_flags |= flags & (PBF_DELWRI | PBF_ASYNC);
1118                 pagebuf_delwri_queue(pb, 1);
1119                 return status;
1120         }
1121
1122         pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC | PBF_DELWRI | \
1123                         PBF_READ_AHEAD | _PBF_RUN_QUEUES);
1124         pb->pb_flags |= flags & (PBF_READ | PBF_WRITE | PBF_ASYNC | \
1125                         PBF_READ_AHEAD | _PBF_RUN_QUEUES);
1126
1127         BUG_ON(pb->pb_bn == XFS_BUF_DADDR_NULL);
1128
1129         /* For writes allow an alternate strategy routine to precede
1130          * the actual I/O request (which may not be issued at all in
1131          * a shutdown situation, for example).
1132          */
1133         status = (flags & PBF_WRITE) ?
1134                 pagebuf_iostrategy(pb) : pagebuf_iorequest(pb);
1135
1136         /* Wait for I/O if we are not an async request.
1137          * Note: async I/O request completion will release the buffer,
1138          * and that can already be done by this point.  So using the
1139          * buffer pointer from here on, after async I/O, is invalid.
1140          */
1141         if (!status && !(flags & PBF_ASYNC))
1142                 status = pagebuf_iowait(pb);
1143
1144         return status;
1145 }
1146
1147 /*
1148  * Helper routine for pagebuf_iorequest
1149  */
1150
1151 STATIC __inline__ int
1152 _pagebuf_iolocked(
1153         xfs_buf_t               *pb)
1154 {
1155         ASSERT(pb->pb_flags & (PBF_READ|PBF_WRITE));
1156         if (pb->pb_flags & PBF_READ)
1157                 return pb->pb_locked;
1158         return 0;
1159 }
1160
1161 STATIC __inline__ void
1162 _pagebuf_iodone(
1163         xfs_buf_t               *pb,
1164         int                     schedule)
1165 {
1166         if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) {
1167                 pb->pb_locked = 0;
1168                 pagebuf_iodone(pb, schedule);
1169         }
1170 }
1171
1172 STATIC int
1173 bio_end_io_pagebuf(
1174         struct bio              *bio,
1175         unsigned int            bytes_done,
1176         int                     error)
1177 {
1178         xfs_buf_t               *pb = (xfs_buf_t *)bio->bi_private;
1179         unsigned int            blocksize = pb->pb_target->pbr_bsize;
1180         struct bio_vec          *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1181
1182         if (bio->bi_size)
1183                 return 1;
1184
1185         if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1186                 pb->pb_error = EIO;
1187
1188         do {
1189                 struct page     *page = bvec->bv_page;
1190
1191                 if (unlikely(pb->pb_error)) {
1192                         if (pb->pb_flags & PBF_READ)
1193                                 ClearPageUptodate(page);
1194                         SetPageError(page);
1195                 } else if (blocksize == PAGE_CACHE_SIZE) {
1196                         SetPageUptodate(page);
1197                 } else if (!PagePrivate(page) &&
1198                                 (pb->pb_flags & _PBF_PAGE_CACHE)) {
1199                         set_page_region(page, bvec->bv_offset, bvec->bv_len);
1200                 }
1201
1202                 if (--bvec >= bio->bi_io_vec)
1203                         prefetchw(&bvec->bv_page->flags);
1204
1205                 if (_pagebuf_iolocked(pb)) {
1206                         unlock_page(page);
1207                 }
1208         } while (bvec >= bio->bi_io_vec);
1209
1210         _pagebuf_iodone(pb, 1);
1211         bio_put(bio);
1212         return 0;
1213 }
1214
1215 STATIC void
1216 _pagebuf_ioapply(
1217         xfs_buf_t               *pb)
1218 {
1219         int                     i, rw, map_i, total_nr_pages, nr_pages;
1220         struct bio              *bio;
1221         int                     offset = pb->pb_offset;
1222         int                     size = pb->pb_count_desired;
1223         sector_t                sector = pb->pb_bn;
1224         unsigned int            blocksize = pb->pb_target->pbr_bsize;
1225         int                     locking = _pagebuf_iolocked(pb);
1226
1227         total_nr_pages = pb->pb_page_count;
1228         map_i = 0;
1229
1230         if (pb->pb_flags & _PBF_RUN_QUEUES) {
1231                 pb->pb_flags &= ~_PBF_RUN_QUEUES;
1232                 rw = (pb->pb_flags & PBF_READ) ? READ_SYNC : WRITE_SYNC;
1233         } else {
1234                 rw = (pb->pb_flags & PBF_READ) ? READ : WRITE;
1235         }
1236
1237         if (pb->pb_flags & PBF_ORDERED) {
1238                 ASSERT(!(pb->pb_flags & PBF_READ));
1239                 rw = WRITE_BARRIER;
1240         }
1241
1242         /* Special code path for reading a sub page size pagebuf in --
1243          * we populate up the whole page, and hence the other metadata
1244          * in the same page.  This optimization is only valid when the
1245          * filesystem block size and the page size are equal.
1246          */
1247         if ((pb->pb_buffer_length < PAGE_CACHE_SIZE) &&
1248             (pb->pb_flags & PBF_READ) && locking &&
1249             (blocksize == PAGE_CACHE_SIZE)) {
1250                 bio = bio_alloc(GFP_NOIO, 1);
1251
1252                 bio->bi_bdev = pb->pb_target->pbr_bdev;
1253                 bio->bi_sector = sector - (offset >> BBSHIFT);
1254                 bio->bi_end_io = bio_end_io_pagebuf;
1255                 bio->bi_private = pb;
1256
1257                 bio_add_page(bio, pb->pb_pages[0], PAGE_CACHE_SIZE, 0);
1258                 size = 0;
1259
1260                 atomic_inc(&pb->pb_io_remaining);
1261
1262                 goto submit_io;
1263         }
1264
1265         /* Lock down the pages which we need to for the request */
1266         if (locking && (pb->pb_flags & PBF_WRITE) && (pb->pb_locked == 0)) {
1267                 for (i = 0; size; i++) {
1268                         int             nbytes = PAGE_CACHE_SIZE - offset;
1269                         struct page     *page = pb->pb_pages[i];
1270
1271                         if (nbytes > size)
1272                                 nbytes = size;
1273
1274                         lock_page(page);
1275
1276                         size -= nbytes;
1277                         offset = 0;
1278                 }
1279                 offset = pb->pb_offset;
1280                 size = pb->pb_count_desired;
1281         }
1282
1283 next_chunk:
1284         atomic_inc(&pb->pb_io_remaining);
1285         nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1286         if (nr_pages > total_nr_pages)
1287                 nr_pages = total_nr_pages;
1288
1289         bio = bio_alloc(GFP_NOIO, nr_pages);
1290         bio->bi_bdev = pb->pb_target->pbr_bdev;
1291         bio->bi_sector = sector;
1292         bio->bi_end_io = bio_end_io_pagebuf;
1293         bio->bi_private = pb;
1294
1295         for (; size && nr_pages; nr_pages--, map_i++) {
1296                 int     nbytes = PAGE_CACHE_SIZE - offset;
1297
1298                 if (nbytes > size)
1299                         nbytes = size;
1300
1301                 if (bio_add_page(bio, pb->pb_pages[map_i],
1302                                         nbytes, offset) < nbytes)
1303                         break;
1304
1305                 offset = 0;
1306                 sector += nbytes >> BBSHIFT;
1307                 size -= nbytes;
1308                 total_nr_pages--;
1309         }
1310
1311 submit_io:
1312         if (likely(bio->bi_size)) {
1313                 submit_bio(rw, bio);
1314                 if (size)
1315                         goto next_chunk;
1316         } else {
1317                 bio_put(bio);
1318                 pagebuf_ioerror(pb, EIO);
1319         }
1320 }
1321
1322 /*
1323  *      pagebuf_iorequest -- the core I/O request routine.
1324  */
1325 int
1326 pagebuf_iorequest(                      /* start real I/O               */
1327         xfs_buf_t               *pb)    /* buffer to convey to device   */
1328 {
1329         PB_TRACE(pb, "iorequest", 0);
1330
1331         if (pb->pb_flags & PBF_DELWRI) {
1332                 pagebuf_delwri_queue(pb, 1);
1333                 return 0;
1334         }
1335
1336         if (pb->pb_flags & PBF_WRITE) {
1337                 _pagebuf_wait_unpin(pb);
1338         }
1339
1340         pagebuf_hold(pb);
1341
1342         /* Set the count to 1 initially, this will stop an I/O
1343          * completion callout which happens before we have started
1344          * all the I/O from calling pagebuf_iodone too early.
1345          */
1346         atomic_set(&pb->pb_io_remaining, 1);
1347         _pagebuf_ioapply(pb);
1348         _pagebuf_iodone(pb, 0);
1349
1350         pagebuf_rele(pb);
1351         return 0;
1352 }
1353
1354 /*
1355  *      pagebuf_iowait
1356  *
1357  *      pagebuf_iowait waits for I/O to complete on the buffer supplied.
1358  *      It returns immediately if no I/O is pending.  In any case, it returns
1359  *      the error code, if any, or 0 if there is no error.
1360  */
1361 int
1362 pagebuf_iowait(
1363         xfs_buf_t               *pb)
1364 {
1365         PB_TRACE(pb, "iowait", 0);
1366         if (atomic_read(&pb->pb_io_remaining))
1367                 blk_run_address_space(pb->pb_target->pbr_mapping);
1368         down(&pb->pb_iodonesema);
1369         PB_TRACE(pb, "iowaited", (long)pb->pb_error);
1370         return pb->pb_error;
1371 }
1372
1373 caddr_t
1374 pagebuf_offset(
1375         xfs_buf_t               *pb,
1376         size_t                  offset)
1377 {
1378         struct page             *page;
1379
1380         offset += pb->pb_offset;
1381
1382         page = pb->pb_pages[offset >> PAGE_CACHE_SHIFT];
1383         return (caddr_t) page_address(page) + (offset & (PAGE_CACHE_SIZE - 1));
1384 }
1385
1386 /*
1387  *      pagebuf_iomove
1388  *
1389  *      Move data into or out of a buffer.
1390  */
1391 void
1392 pagebuf_iomove(
1393         xfs_buf_t               *pb,    /* buffer to process            */
1394         size_t                  boff,   /* starting buffer offset       */
1395         size_t                  bsize,  /* length to copy               */
1396         caddr_t                 data,   /* data address                 */
1397         page_buf_rw_t           mode)   /* read/write flag              */
1398 {
1399         size_t                  bend, cpoff, csize;
1400         struct page             *page;
1401
1402         bend = boff + bsize;
1403         while (boff < bend) {
1404                 page = pb->pb_pages[page_buf_btoct(boff + pb->pb_offset)];
1405                 cpoff = page_buf_poff(boff + pb->pb_offset);
1406                 csize = min_t(size_t,
1407                               PAGE_CACHE_SIZE-cpoff, pb->pb_count_desired-boff);
1408
1409                 ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
1410
1411                 switch (mode) {
1412                 case PBRW_ZERO:
1413                         memset(page_address(page) + cpoff, 0, csize);
1414                         break;
1415                 case PBRW_READ:
1416                         memcpy(data, page_address(page) + cpoff, csize);
1417                         break;
1418                 case PBRW_WRITE:
1419                         memcpy(page_address(page) + cpoff, data, csize);
1420                 }
1421
1422                 boff += csize;
1423                 data += csize;
1424         }
1425 }
1426
1427 /*
1428  *      Handling of buftargs.
1429  */
1430
1431 /*
1432  * Wait for any bufs with callbacks that have been submitted but
1433  * have not yet returned... walk the hash list for the target.
1434  */
1435 void
1436 xfs_wait_buftarg(
1437         xfs_buftarg_t   *btp)
1438 {
1439         xfs_buf_t       *bp, *n;
1440         xfs_bufhash_t   *hash;
1441         uint            i;
1442
1443         for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1444                 hash = &btp->bt_hash[i];
1445 again:
1446                 spin_lock(&hash->bh_lock);
1447                 list_for_each_entry_safe(bp, n, &hash->bh_list, pb_hash_list) {
1448                         ASSERT(btp == bp->pb_target);
1449                         if (!(bp->pb_flags & PBF_FS_MANAGED)) {
1450                                 spin_unlock(&hash->bh_lock);
1451                                 /*
1452                                  * Catch superblock reference count leaks
1453                                  * immediately
1454                                  */
1455                                 BUG_ON(bp->pb_bn == 0);
1456                                 delay(100);
1457                                 goto again;
1458                         }
1459                 }
1460                 spin_unlock(&hash->bh_lock);
1461         }
1462 }
1463
1464 /*
1465  * Allocate buffer hash table for a given target.
1466  * For devices containing metadata (i.e. not the log/realtime devices)
1467  * we need to allocate a much larger hash table.
1468  */
1469 STATIC void
1470 xfs_alloc_bufhash(
1471         xfs_buftarg_t           *btp,
1472         int                     external)
1473 {
1474         unsigned int            i;
1475
1476         btp->bt_hashshift = external ? 3 : 8;   /* 8 or 256 buckets */
1477         btp->bt_hashmask = (1 << btp->bt_hashshift) - 1;
1478         btp->bt_hash = kmem_zalloc((1 << btp->bt_hashshift) *
1479                                         sizeof(xfs_bufhash_t), KM_SLEEP);
1480         for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1481                 spin_lock_init(&btp->bt_hash[i].bh_lock);
1482                 INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
1483         }
1484 }
1485
1486 STATIC void
1487 xfs_free_bufhash(
1488         xfs_buftarg_t           *btp)
1489 {
1490         kmem_free(btp->bt_hash,
1491                         (1 << btp->bt_hashshift) * sizeof(xfs_bufhash_t));
1492         btp->bt_hash = NULL;
1493 }
1494
1495 void
1496 xfs_free_buftarg(
1497         xfs_buftarg_t           *btp,
1498         int                     external)
1499 {
1500         xfs_flush_buftarg(btp, 1);
1501         if (external)
1502                 xfs_blkdev_put(btp->pbr_bdev);
1503         xfs_free_bufhash(btp);
1504         iput(btp->pbr_mapping->host);
1505         kmem_free(btp, sizeof(*btp));
1506 }
1507
1508 STATIC int
1509 xfs_setsize_buftarg_flags(
1510         xfs_buftarg_t           *btp,
1511         unsigned int            blocksize,
1512         unsigned int            sectorsize,
1513         int                     verbose)
1514 {
1515         btp->pbr_bsize = blocksize;
1516         btp->pbr_sshift = ffs(sectorsize) - 1;
1517         btp->pbr_smask = sectorsize - 1;
1518
1519         if (set_blocksize(btp->pbr_bdev, sectorsize)) {
1520                 printk(KERN_WARNING
1521                         "XFS: Cannot set_blocksize to %u on device %s\n",
1522                         sectorsize, XFS_BUFTARG_NAME(btp));
1523                 return EINVAL;
1524         }
1525
1526         if (verbose &&
1527             (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) {
1528                 printk(KERN_WARNING
1529                         "XFS: %u byte sectors in use on device %s.  "
1530                         "This is suboptimal; %u or greater is ideal.\n",
1531                         sectorsize, XFS_BUFTARG_NAME(btp),
1532                         (unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG);
1533         }
1534
1535         return 0;
1536 }
1537
1538 /*
1539 * When allocating the initial buffer target we have not yet
1540 * read in the superblock, so don't know what sized sectors
1541 * are being used is at this early stage.  Play safe.
1542 */
1543 STATIC int
1544 xfs_setsize_buftarg_early(
1545         xfs_buftarg_t           *btp,
1546         struct block_device     *bdev)
1547 {
1548         return xfs_setsize_buftarg_flags(btp,
1549                         PAGE_CACHE_SIZE, bdev_hardsect_size(bdev), 0);
1550 }
1551
1552 int
1553 xfs_setsize_buftarg(
1554         xfs_buftarg_t           *btp,
1555         unsigned int            blocksize,
1556         unsigned int            sectorsize)
1557 {
1558         return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1559 }
1560
1561 STATIC int
1562 xfs_mapping_buftarg(
1563         xfs_buftarg_t           *btp,
1564         struct block_device     *bdev)
1565 {
1566         struct backing_dev_info *bdi;
1567         struct inode            *inode;
1568         struct address_space    *mapping;
1569         static struct address_space_operations mapping_aops = {
1570                 .sync_page = block_sync_page,
1571         };
1572
1573         inode = new_inode(bdev->bd_inode->i_sb);
1574         if (!inode) {
1575                 printk(KERN_WARNING
1576                         "XFS: Cannot allocate mapping inode for device %s\n",
1577                         XFS_BUFTARG_NAME(btp));
1578                 return ENOMEM;
1579         }
1580         inode->i_mode = S_IFBLK;
1581         inode->i_bdev = bdev;
1582         inode->i_rdev = bdev->bd_dev;
1583         bdi = blk_get_backing_dev_info(bdev);
1584         if (!bdi)
1585                 bdi = &default_backing_dev_info;
1586         mapping = &inode->i_data;
1587         mapping->a_ops = &mapping_aops;
1588         mapping->backing_dev_info = bdi;
1589         mapping_set_gfp_mask(mapping, GFP_NOFS);
1590         btp->pbr_mapping = mapping;
1591         return 0;
1592 }
1593
1594 xfs_buftarg_t *
1595 xfs_alloc_buftarg(
1596         struct block_device     *bdev,
1597         int                     external)
1598 {
1599         xfs_buftarg_t           *btp;
1600
1601         btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1602
1603         btp->pbr_dev =  bdev->bd_dev;
1604         btp->pbr_bdev = bdev;
1605         if (xfs_setsize_buftarg_early(btp, bdev))
1606                 goto error;
1607         if (xfs_mapping_buftarg(btp, bdev))
1608                 goto error;
1609         xfs_alloc_bufhash(btp, external);
1610         return btp;
1611
1612 error:
1613         kmem_free(btp, sizeof(*btp));
1614         return NULL;
1615 }
1616
1617
1618 /*
1619  * Pagebuf delayed write buffer handling
1620  */
1621
1622 STATIC LIST_HEAD(pbd_delwrite_queue);
1623 STATIC DEFINE_SPINLOCK(pbd_delwrite_lock);
1624
1625 STATIC void
1626 pagebuf_delwri_queue(
1627         xfs_buf_t               *pb,
1628         int                     unlock)
1629 {
1630         PB_TRACE(pb, "delwri_q", (long)unlock);
1631         ASSERT((pb->pb_flags & (PBF_DELWRI|PBF_ASYNC)) ==
1632                                         (PBF_DELWRI|PBF_ASYNC));
1633
1634         spin_lock(&pbd_delwrite_lock);
1635         /* If already in the queue, dequeue and place at tail */
1636         if (!list_empty(&pb->pb_list)) {
1637                 ASSERT(pb->pb_flags & _PBF_DELWRI_Q);
1638                 if (unlock) {
1639                         atomic_dec(&pb->pb_hold);
1640                 }
1641                 list_del(&pb->pb_list);
1642         }
1643
1644         pb->pb_flags |= _PBF_DELWRI_Q;
1645         list_add_tail(&pb->pb_list, &pbd_delwrite_queue);
1646         pb->pb_queuetime = jiffies;
1647         spin_unlock(&pbd_delwrite_lock);
1648
1649         if (unlock)
1650                 pagebuf_unlock(pb);
1651 }
1652
1653 void
1654 pagebuf_delwri_dequeue(
1655         xfs_buf_t               *pb)
1656 {
1657         int                     dequeued = 0;
1658
1659         spin_lock(&pbd_delwrite_lock);
1660         if ((pb->pb_flags & PBF_DELWRI) && !list_empty(&pb->pb_list)) {
1661                 ASSERT(pb->pb_flags & _PBF_DELWRI_Q);
1662                 list_del_init(&pb->pb_list);
1663                 dequeued = 1;
1664         }
1665         pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q);
1666         spin_unlock(&pbd_delwrite_lock);
1667
1668         if (dequeued)
1669                 pagebuf_rele(pb);
1670
1671         PB_TRACE(pb, "delwri_dq", (long)dequeued);
1672 }
1673
1674 STATIC void
1675 pagebuf_runall_queues(
1676         struct workqueue_struct *queue)
1677 {
1678         flush_workqueue(queue);
1679 }
1680
1681 /* Defines for pagebuf daemon */
1682 STATIC struct task_struct *xfsbufd_task;
1683 STATIC int xfsbufd_force_flush;
1684 STATIC int xfsbufd_force_sleep;
1685
1686 STATIC int
1687 xfsbufd_wakeup(
1688         int                     priority,
1689         gfp_t                   mask)
1690 {
1691         if (xfsbufd_force_sleep)
1692                 return 0;
1693         xfsbufd_force_flush = 1;
1694         barrier();
1695         wake_up_process(xfsbufd_task);
1696         return 0;
1697 }
1698
1699 STATIC int
1700 xfsbufd(
1701         void                    *data)
1702 {
1703         struct list_head        tmp;
1704         unsigned long           age;
1705         xfs_buftarg_t           *target;
1706         xfs_buf_t               *pb, *n;
1707
1708         current->flags |= PF_MEMALLOC;
1709
1710         INIT_LIST_HEAD(&tmp);
1711         do {
1712                 if (unlikely(freezing(current))) {
1713                         xfsbufd_force_sleep = 1;
1714                         refrigerator();
1715                 } else {
1716                         xfsbufd_force_sleep = 0;
1717                 }
1718
1719                 schedule_timeout_interruptible(
1720                         xfs_buf_timer_centisecs * msecs_to_jiffies(10));
1721
1722                 age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
1723                 spin_lock(&pbd_delwrite_lock);
1724                 list_for_each_entry_safe(pb, n, &pbd_delwrite_queue, pb_list) {
1725                         PB_TRACE(pb, "walkq1", (long)pagebuf_ispin(pb));
1726                         ASSERT(pb->pb_flags & PBF_DELWRI);
1727
1728                         if (!pagebuf_ispin(pb) && !pagebuf_cond_lock(pb)) {
1729                                 if (!xfsbufd_force_flush &&
1730                                     time_before(jiffies,
1731                                                 pb->pb_queuetime + age)) {
1732                                         pagebuf_unlock(pb);
1733                                         break;
1734                                 }
1735
1736                                 pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q);
1737                                 pb->pb_flags |= PBF_WRITE;
1738                                 list_move(&pb->pb_list, &tmp);
1739                         }
1740                 }
1741                 spin_unlock(&pbd_delwrite_lock);
1742
1743                 while (!list_empty(&tmp)) {
1744                         pb = list_entry(tmp.next, xfs_buf_t, pb_list);
1745                         target = pb->pb_target;
1746
1747                         list_del_init(&pb->pb_list);
1748                         pagebuf_iostrategy(pb);
1749
1750                         blk_run_address_space(target->pbr_mapping);
1751                 }
1752
1753                 if (as_list_len > 0)
1754                         purge_addresses();
1755
1756                 xfsbufd_force_flush = 0;
1757         } while (!kthread_should_stop());
1758
1759         return 0;
1760 }
1761
1762 /*
1763  * Go through all incore buffers, and release buffers if they belong to
1764  * the given device. This is used in filesystem error handling to
1765  * preserve the consistency of its metadata.
1766  */
1767 int
1768 xfs_flush_buftarg(
1769         xfs_buftarg_t           *target,
1770         int                     wait)
1771 {
1772         struct list_head        tmp;
1773         xfs_buf_t               *pb, *n;
1774         int                     pincount = 0;
1775
1776         pagebuf_runall_queues(xfsdatad_workqueue);
1777         pagebuf_runall_queues(xfslogd_workqueue);
1778
1779         INIT_LIST_HEAD(&tmp);
1780         spin_lock(&pbd_delwrite_lock);
1781         list_for_each_entry_safe(pb, n, &pbd_delwrite_queue, pb_list) {
1782
1783                 if (pb->pb_target != target)
1784                         continue;
1785
1786                 ASSERT(pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q));
1787                 PB_TRACE(pb, "walkq2", (long)pagebuf_ispin(pb));
1788                 if (pagebuf_ispin(pb)) {
1789                         pincount++;
1790                         continue;
1791                 }
1792
1793                 list_move(&pb->pb_list, &tmp);
1794         }
1795         spin_unlock(&pbd_delwrite_lock);
1796
1797         /*
1798          * Dropped the delayed write list lock, now walk the temporary list
1799          */
1800         list_for_each_entry_safe(pb, n, &tmp, pb_list) {
1801                 pagebuf_lock(pb);
1802                 pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q);
1803                 pb->pb_flags |= PBF_WRITE;
1804                 if (wait)
1805                         pb->pb_flags &= ~PBF_ASYNC;
1806                 else
1807                         list_del_init(&pb->pb_list);
1808
1809                 pagebuf_iostrategy(pb);
1810         }
1811
1812         /*
1813          * Remaining list items must be flushed before returning
1814          */
1815         while (!list_empty(&tmp)) {
1816                 pb = list_entry(tmp.next, xfs_buf_t, pb_list);
1817
1818                 list_del_init(&pb->pb_list);
1819                 xfs_iowait(pb);
1820                 xfs_buf_relse(pb);
1821         }
1822
1823         if (wait)
1824                 blk_run_address_space(target->pbr_mapping);
1825
1826         return pincount;
1827 }
1828
1829 int __init
1830 pagebuf_init(void)
1831 {
1832         int             error = -ENOMEM;
1833
1834 #ifdef PAGEBUF_TRACE
1835         pagebuf_trace_buf = ktrace_alloc(PAGEBUF_TRACE_SIZE, KM_SLEEP);
1836 #endif
1837
1838         pagebuf_zone = kmem_zone_init(sizeof(xfs_buf_t), "xfs_buf");
1839         if (!pagebuf_zone)
1840                 goto out_free_trace_buf;
1841
1842         xfslogd_workqueue = create_workqueue("xfslogd");
1843         if (!xfslogd_workqueue)
1844                 goto out_free_buf_zone;
1845
1846         xfsdatad_workqueue = create_workqueue("xfsdatad");
1847         if (!xfsdatad_workqueue)
1848                 goto out_destroy_xfslogd_workqueue;
1849
1850         xfsbufd_task = kthread_run(xfsbufd, NULL, "xfsbufd");
1851         if (IS_ERR(xfsbufd_task)) {
1852                 error = PTR_ERR(xfsbufd_task);
1853                 goto out_destroy_xfsdatad_workqueue;
1854         }
1855
1856         pagebuf_shake = kmem_shake_register(xfsbufd_wakeup);
1857         if (!pagebuf_shake)
1858                 goto out_stop_xfsbufd;
1859
1860         return 0;
1861
1862  out_stop_xfsbufd:
1863         kthread_stop(xfsbufd_task);
1864  out_destroy_xfsdatad_workqueue:
1865         destroy_workqueue(xfsdatad_workqueue);
1866  out_destroy_xfslogd_workqueue:
1867         destroy_workqueue(xfslogd_workqueue);
1868  out_free_buf_zone:
1869         kmem_zone_destroy(pagebuf_zone);
1870  out_free_trace_buf:
1871 #ifdef PAGEBUF_TRACE
1872         ktrace_free(pagebuf_trace_buf);
1873 #endif
1874         return error;
1875 }
1876
1877 void
1878 pagebuf_terminate(void)
1879 {
1880         kmem_shake_deregister(pagebuf_shake);
1881         kthread_stop(xfsbufd_task);
1882         destroy_workqueue(xfsdatad_workqueue);
1883         destroy_workqueue(xfslogd_workqueue);
1884         kmem_zone_destroy(pagebuf_zone);
1885 #ifdef PAGEBUF_TRACE
1886         ktrace_free(pagebuf_trace_buf);
1887 #endif
1888 }