Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs
[sfrench/cifs-2.6.git] / fs / xfs / linux-2.6 / xfs_buf.c
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include <linux/stddef.h>
20 #include <linux/errno.h>
21 #include <linux/gfp.h>
22 #include <linux/pagemap.h>
23 #include <linux/init.h>
24 #include <linux/vmalloc.h>
25 #include <linux/bio.h>
26 #include <linux/sysctl.h>
27 #include <linux/proc_fs.h>
28 #include <linux/workqueue.h>
29 #include <linux/percpu.h>
30 #include <linux/blkdev.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/migrate.h>
34 #include <linux/backing-dev.h>
35 #include <linux/freezer.h>
36 #include <linux/list_sort.h>
37
38 #include "xfs_sb.h"
39 #include "xfs_inum.h"
40 #include "xfs_log.h"
41 #include "xfs_ag.h"
42 #include "xfs_mount.h"
43 #include "xfs_trace.h"
44
45 static kmem_zone_t *xfs_buf_zone;
46 STATIC int xfsbufd(void *);
47 STATIC int xfsbufd_wakeup(struct shrinker *, int, gfp_t);
48 STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
49 static struct shrinker xfs_buf_shake = {
50         .shrink = xfsbufd_wakeup,
51         .seeks = DEFAULT_SEEKS,
52 };
53
54 static struct workqueue_struct *xfslogd_workqueue;
55 struct workqueue_struct *xfsdatad_workqueue;
56 struct workqueue_struct *xfsconvertd_workqueue;
57
58 #ifdef XFS_BUF_LOCK_TRACKING
59 # define XB_SET_OWNER(bp)       ((bp)->b_last_holder = current->pid)
60 # define XB_CLEAR_OWNER(bp)     ((bp)->b_last_holder = -1)
61 # define XB_GET_OWNER(bp)       ((bp)->b_last_holder)
62 #else
63 # define XB_SET_OWNER(bp)       do { } while (0)
64 # define XB_CLEAR_OWNER(bp)     do { } while (0)
65 # define XB_GET_OWNER(bp)       do { } while (0)
66 #endif
67
68 #define xb_to_gfp(flags) \
69         ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
70           ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
71
72 #define xb_to_km(flags) \
73          (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
74
75 #define xfs_buf_allocate(flags) \
76         kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
77 #define xfs_buf_deallocate(bp) \
78         kmem_zone_free(xfs_buf_zone, (bp));
79
80 static inline int
81 xfs_buf_is_vmapped(
82         struct xfs_buf  *bp)
83 {
84         /*
85          * Return true if the buffer is vmapped.
86          *
87          * The XBF_MAPPED flag is set if the buffer should be mapped, but the
88          * code is clever enough to know it doesn't have to map a single page,
89          * so the check has to be both for XBF_MAPPED and bp->b_page_count > 1.
90          */
91         return (bp->b_flags & XBF_MAPPED) && bp->b_page_count > 1;
92 }
93
94 static inline int
95 xfs_buf_vmap_len(
96         struct xfs_buf  *bp)
97 {
98         return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
99 }
100
101 /*
102  *      Page Region interfaces.
103  *
104  *      For pages in filesystems where the blocksize is smaller than the
105  *      pagesize, we use the page->private field (long) to hold a bitmap
106  *      of uptodate regions within the page.
107  *
108  *      Each such region is "bytes per page / bits per long" bytes long.
109  *
110  *      NBPPR == number-of-bytes-per-page-region
111  *      BTOPR == bytes-to-page-region (rounded up)
112  *      BTOPRT == bytes-to-page-region-truncated (rounded down)
113  */
114 #if (BITS_PER_LONG == 32)
115 #define PRSHIFT         (PAGE_CACHE_SHIFT - 5)  /* (32 == 1<<5) */
116 #elif (BITS_PER_LONG == 64)
117 #define PRSHIFT         (PAGE_CACHE_SHIFT - 6)  /* (64 == 1<<6) */
118 #else
119 #error BITS_PER_LONG must be 32 or 64
120 #endif
121 #define NBPPR           (PAGE_CACHE_SIZE/BITS_PER_LONG)
122 #define BTOPR(b)        (((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT)
123 #define BTOPRT(b)       (((unsigned int)(b) >> PRSHIFT))
124
125 STATIC unsigned long
126 page_region_mask(
127         size_t          offset,
128         size_t          length)
129 {
130         unsigned long   mask;
131         int             first, final;
132
133         first = BTOPR(offset);
134         final = BTOPRT(offset + length - 1);
135         first = min(first, final);
136
137         mask = ~0UL;
138         mask <<= BITS_PER_LONG - (final - first);
139         mask >>= BITS_PER_LONG - (final);
140
141         ASSERT(offset + length <= PAGE_CACHE_SIZE);
142         ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0);
143
144         return mask;
145 }
146
147 STATIC void
148 set_page_region(
149         struct page     *page,
150         size_t          offset,
151         size_t          length)
152 {
153         set_page_private(page,
154                 page_private(page) | page_region_mask(offset, length));
155         if (page_private(page) == ~0UL)
156                 SetPageUptodate(page);
157 }
158
159 STATIC int
160 test_page_region(
161         struct page     *page,
162         size_t          offset,
163         size_t          length)
164 {
165         unsigned long   mask = page_region_mask(offset, length);
166
167         return (mask && (page_private(page) & mask) == mask);
168 }
169
170 /*
171  *      Internal xfs_buf_t object manipulation
172  */
173
174 STATIC void
175 _xfs_buf_initialize(
176         xfs_buf_t               *bp,
177         xfs_buftarg_t           *target,
178         xfs_off_t               range_base,
179         size_t                  range_length,
180         xfs_buf_flags_t         flags)
181 {
182         /*
183          * We don't want certain flags to appear in b_flags.
184          */
185         flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
186
187         memset(bp, 0, sizeof(xfs_buf_t));
188         atomic_set(&bp->b_hold, 1);
189         init_completion(&bp->b_iowait);
190         INIT_LIST_HEAD(&bp->b_list);
191         INIT_LIST_HEAD(&bp->b_hash_list);
192         init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */
193         XB_SET_OWNER(bp);
194         bp->b_target = target;
195         bp->b_file_offset = range_base;
196         /*
197          * Set buffer_length and count_desired to the same value initially.
198          * I/O routines should use count_desired, which will be the same in
199          * most cases but may be reset (e.g. XFS recovery).
200          */
201         bp->b_buffer_length = bp->b_count_desired = range_length;
202         bp->b_flags = flags;
203         bp->b_bn = XFS_BUF_DADDR_NULL;
204         atomic_set(&bp->b_pin_count, 0);
205         init_waitqueue_head(&bp->b_waiters);
206
207         XFS_STATS_INC(xb_create);
208
209         trace_xfs_buf_init(bp, _RET_IP_);
210 }
211
212 /*
213  *      Allocate a page array capable of holding a specified number
214  *      of pages, and point the page buf at it.
215  */
216 STATIC int
217 _xfs_buf_get_pages(
218         xfs_buf_t               *bp,
219         int                     page_count,
220         xfs_buf_flags_t         flags)
221 {
222         /* Make sure that we have a page list */
223         if (bp->b_pages == NULL) {
224                 bp->b_offset = xfs_buf_poff(bp->b_file_offset);
225                 bp->b_page_count = page_count;
226                 if (page_count <= XB_PAGES) {
227                         bp->b_pages = bp->b_page_array;
228                 } else {
229                         bp->b_pages = kmem_alloc(sizeof(struct page *) *
230                                         page_count, xb_to_km(flags));
231                         if (bp->b_pages == NULL)
232                                 return -ENOMEM;
233                 }
234                 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
235         }
236         return 0;
237 }
238
239 /*
240  *      Frees b_pages if it was allocated.
241  */
242 STATIC void
243 _xfs_buf_free_pages(
244         xfs_buf_t       *bp)
245 {
246         if (bp->b_pages != bp->b_page_array) {
247                 kmem_free(bp->b_pages);
248                 bp->b_pages = NULL;
249         }
250 }
251
252 /*
253  *      Releases the specified buffer.
254  *
255  *      The modification state of any associated pages is left unchanged.
256  *      The buffer most not be on any hash - use xfs_buf_rele instead for
257  *      hashed and refcounted buffers
258  */
259 void
260 xfs_buf_free(
261         xfs_buf_t               *bp)
262 {
263         trace_xfs_buf_free(bp, _RET_IP_);
264
265         ASSERT(list_empty(&bp->b_hash_list));
266
267         if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
268                 uint            i;
269
270                 if (xfs_buf_is_vmapped(bp))
271                         vm_unmap_ram(bp->b_addr - bp->b_offset,
272                                         bp->b_page_count);
273
274                 for (i = 0; i < bp->b_page_count; i++) {
275                         struct page     *page = bp->b_pages[i];
276
277                         if (bp->b_flags & _XBF_PAGE_CACHE)
278                                 ASSERT(!PagePrivate(page));
279                         page_cache_release(page);
280                 }
281         }
282         _xfs_buf_free_pages(bp);
283         xfs_buf_deallocate(bp);
284 }
285
286 /*
287  *      Finds all pages for buffer in question and builds it's page list.
288  */
289 STATIC int
290 _xfs_buf_lookup_pages(
291         xfs_buf_t               *bp,
292         uint                    flags)
293 {
294         struct address_space    *mapping = bp->b_target->bt_mapping;
295         size_t                  blocksize = bp->b_target->bt_bsize;
296         size_t                  size = bp->b_count_desired;
297         size_t                  nbytes, offset;
298         gfp_t                   gfp_mask = xb_to_gfp(flags);
299         unsigned short          page_count, i;
300         pgoff_t                 first;
301         xfs_off_t               end;
302         int                     error;
303
304         end = bp->b_file_offset + bp->b_buffer_length;
305         page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
306
307         error = _xfs_buf_get_pages(bp, page_count, flags);
308         if (unlikely(error))
309                 return error;
310         bp->b_flags |= _XBF_PAGE_CACHE;
311
312         offset = bp->b_offset;
313         first = bp->b_file_offset >> PAGE_CACHE_SHIFT;
314
315         for (i = 0; i < bp->b_page_count; i++) {
316                 struct page     *page;
317                 uint            retries = 0;
318
319               retry:
320                 page = find_or_create_page(mapping, first + i, gfp_mask);
321                 if (unlikely(page == NULL)) {
322                         if (flags & XBF_READ_AHEAD) {
323                                 bp->b_page_count = i;
324                                 for (i = 0; i < bp->b_page_count; i++)
325                                         unlock_page(bp->b_pages[i]);
326                                 return -ENOMEM;
327                         }
328
329                         /*
330                          * This could deadlock.
331                          *
332                          * But until all the XFS lowlevel code is revamped to
333                          * handle buffer allocation failures we can't do much.
334                          */
335                         if (!(++retries % 100))
336                                 printk(KERN_ERR
337                                         "XFS: possible memory allocation "
338                                         "deadlock in %s (mode:0x%x)\n",
339                                         __func__, gfp_mask);
340
341                         XFS_STATS_INC(xb_page_retries);
342                         xfsbufd_wakeup(NULL, 0, gfp_mask);
343                         congestion_wait(BLK_RW_ASYNC, HZ/50);
344                         goto retry;
345                 }
346
347                 XFS_STATS_INC(xb_page_found);
348
349                 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
350                 size -= nbytes;
351
352                 ASSERT(!PagePrivate(page));
353                 if (!PageUptodate(page)) {
354                         page_count--;
355                         if (blocksize >= PAGE_CACHE_SIZE) {
356                                 if (flags & XBF_READ)
357                                         bp->b_flags |= _XBF_PAGE_LOCKED;
358                         } else if (!PagePrivate(page)) {
359                                 if (test_page_region(page, offset, nbytes))
360                                         page_count++;
361                         }
362                 }
363
364                 bp->b_pages[i] = page;
365                 offset = 0;
366         }
367
368         if (!(bp->b_flags & _XBF_PAGE_LOCKED)) {
369                 for (i = 0; i < bp->b_page_count; i++)
370                         unlock_page(bp->b_pages[i]);
371         }
372
373         if (page_count == bp->b_page_count)
374                 bp->b_flags |= XBF_DONE;
375
376         return error;
377 }
378
379 /*
380  *      Map buffer into kernel address-space if nessecary.
381  */
382 STATIC int
383 _xfs_buf_map_pages(
384         xfs_buf_t               *bp,
385         uint                    flags)
386 {
387         /* A single page buffer is always mappable */
388         if (bp->b_page_count == 1) {
389                 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
390                 bp->b_flags |= XBF_MAPPED;
391         } else if (flags & XBF_MAPPED) {
392                 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
393                                         -1, PAGE_KERNEL);
394                 if (unlikely(bp->b_addr == NULL))
395                         return -ENOMEM;
396                 bp->b_addr += bp->b_offset;
397                 bp->b_flags |= XBF_MAPPED;
398         }
399
400         return 0;
401 }
402
403 /*
404  *      Finding and Reading Buffers
405  */
406
407 /*
408  *      Look up, and creates if absent, a lockable buffer for
409  *      a given range of an inode.  The buffer is returned
410  *      locked.  If other overlapping buffers exist, they are
411  *      released before the new buffer is created and locked,
412  *      which may imply that this call will block until those buffers
413  *      are unlocked.  No I/O is implied by this call.
414  */
415 xfs_buf_t *
416 _xfs_buf_find(
417         xfs_buftarg_t           *btp,   /* block device target          */
418         xfs_off_t               ioff,   /* starting offset of range     */
419         size_t                  isize,  /* length of range              */
420         xfs_buf_flags_t         flags,
421         xfs_buf_t               *new_bp)
422 {
423         xfs_off_t               range_base;
424         size_t                  range_length;
425         xfs_bufhash_t           *hash;
426         xfs_buf_t               *bp, *n;
427
428         range_base = (ioff << BBSHIFT);
429         range_length = (isize << BBSHIFT);
430
431         /* Check for IOs smaller than the sector size / not sector aligned */
432         ASSERT(!(range_length < (1 << btp->bt_sshift)));
433         ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
434
435         hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)];
436
437         spin_lock(&hash->bh_lock);
438
439         list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
440                 ASSERT(btp == bp->b_target);
441                 if (bp->b_file_offset == range_base &&
442                     bp->b_buffer_length == range_length) {
443                         /*
444                          * If we look at something, bring it to the
445                          * front of the list for next time.
446                          */
447                         atomic_inc(&bp->b_hold);
448                         list_move(&bp->b_hash_list, &hash->bh_list);
449                         goto found;
450                 }
451         }
452
453         /* No match found */
454         if (new_bp) {
455                 _xfs_buf_initialize(new_bp, btp, range_base,
456                                 range_length, flags);
457                 new_bp->b_hash = hash;
458                 list_add(&new_bp->b_hash_list, &hash->bh_list);
459         } else {
460                 XFS_STATS_INC(xb_miss_locked);
461         }
462
463         spin_unlock(&hash->bh_lock);
464         return new_bp;
465
466 found:
467         spin_unlock(&hash->bh_lock);
468
469         /* Attempt to get the semaphore without sleeping,
470          * if this does not work then we need to drop the
471          * spinlock and do a hard attempt on the semaphore.
472          */
473         if (down_trylock(&bp->b_sema)) {
474                 if (!(flags & XBF_TRYLOCK)) {
475                         /* wait for buffer ownership */
476                         xfs_buf_lock(bp);
477                         XFS_STATS_INC(xb_get_locked_waited);
478                 } else {
479                         /* We asked for a trylock and failed, no need
480                          * to look at file offset and length here, we
481                          * know that this buffer at least overlaps our
482                          * buffer and is locked, therefore our buffer
483                          * either does not exist, or is this buffer.
484                          */
485                         xfs_buf_rele(bp);
486                         XFS_STATS_INC(xb_busy_locked);
487                         return NULL;
488                 }
489         } else {
490                 /* trylock worked */
491                 XB_SET_OWNER(bp);
492         }
493
494         if (bp->b_flags & XBF_STALE) {
495                 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
496                 bp->b_flags &= XBF_MAPPED;
497         }
498
499         trace_xfs_buf_find(bp, flags, _RET_IP_);
500         XFS_STATS_INC(xb_get_locked);
501         return bp;
502 }
503
504 /*
505  *      Assembles a buffer covering the specified range.
506  *      Storage in memory for all portions of the buffer will be allocated,
507  *      although backing storage may not be.
508  */
509 xfs_buf_t *
510 xfs_buf_get(
511         xfs_buftarg_t           *target,/* target for buffer            */
512         xfs_off_t               ioff,   /* starting offset of range     */
513         size_t                  isize,  /* length of range              */
514         xfs_buf_flags_t         flags)
515 {
516         xfs_buf_t               *bp, *new_bp;
517         int                     error = 0, i;
518
519         new_bp = xfs_buf_allocate(flags);
520         if (unlikely(!new_bp))
521                 return NULL;
522
523         bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
524         if (bp == new_bp) {
525                 error = _xfs_buf_lookup_pages(bp, flags);
526                 if (error)
527                         goto no_buffer;
528         } else {
529                 xfs_buf_deallocate(new_bp);
530                 if (unlikely(bp == NULL))
531                         return NULL;
532         }
533
534         for (i = 0; i < bp->b_page_count; i++)
535                 mark_page_accessed(bp->b_pages[i]);
536
537         if (!(bp->b_flags & XBF_MAPPED)) {
538                 error = _xfs_buf_map_pages(bp, flags);
539                 if (unlikely(error)) {
540                         printk(KERN_WARNING "%s: failed to map pages\n",
541                                         __func__);
542                         goto no_buffer;
543                 }
544         }
545
546         XFS_STATS_INC(xb_get);
547
548         /*
549          * Always fill in the block number now, the mapped cases can do
550          * their own overlay of this later.
551          */
552         bp->b_bn = ioff;
553         bp->b_count_desired = bp->b_buffer_length;
554
555         trace_xfs_buf_get(bp, flags, _RET_IP_);
556         return bp;
557
558  no_buffer:
559         if (flags & (XBF_LOCK | XBF_TRYLOCK))
560                 xfs_buf_unlock(bp);
561         xfs_buf_rele(bp);
562         return NULL;
563 }
564
565 STATIC int
566 _xfs_buf_read(
567         xfs_buf_t               *bp,
568         xfs_buf_flags_t         flags)
569 {
570         int                     status;
571
572         ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE)));
573         ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
574
575         bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
576                         XBF_READ_AHEAD | _XBF_RUN_QUEUES);
577         bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | \
578                         XBF_READ_AHEAD | _XBF_RUN_QUEUES);
579
580         status = xfs_buf_iorequest(bp);
581         if (status || XFS_BUF_ISERROR(bp) || (flags & XBF_ASYNC))
582                 return status;
583         return xfs_buf_iowait(bp);
584 }
585
586 xfs_buf_t *
587 xfs_buf_read(
588         xfs_buftarg_t           *target,
589         xfs_off_t               ioff,
590         size_t                  isize,
591         xfs_buf_flags_t         flags)
592 {
593         xfs_buf_t               *bp;
594
595         flags |= XBF_READ;
596
597         bp = xfs_buf_get(target, ioff, isize, flags);
598         if (bp) {
599                 trace_xfs_buf_read(bp, flags, _RET_IP_);
600
601                 if (!XFS_BUF_ISDONE(bp)) {
602                         XFS_STATS_INC(xb_get_read);
603                         _xfs_buf_read(bp, flags);
604                 } else if (flags & XBF_ASYNC) {
605                         /*
606                          * Read ahead call which is already satisfied,
607                          * drop the buffer
608                          */
609                         goto no_buffer;
610                 } else {
611                         /* We do not want read in the flags */
612                         bp->b_flags &= ~XBF_READ;
613                 }
614         }
615
616         return bp;
617
618  no_buffer:
619         if (flags & (XBF_LOCK | XBF_TRYLOCK))
620                 xfs_buf_unlock(bp);
621         xfs_buf_rele(bp);
622         return NULL;
623 }
624
625 /*
626  *      If we are not low on memory then do the readahead in a deadlock
627  *      safe manner.
628  */
629 void
630 xfs_buf_readahead(
631         xfs_buftarg_t           *target,
632         xfs_off_t               ioff,
633         size_t                  isize,
634         xfs_buf_flags_t         flags)
635 {
636         struct backing_dev_info *bdi;
637
638         bdi = target->bt_mapping->backing_dev_info;
639         if (bdi_read_congested(bdi))
640                 return;
641
642         flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
643         xfs_buf_read(target, ioff, isize, flags);
644 }
645
646 xfs_buf_t *
647 xfs_buf_get_empty(
648         size_t                  len,
649         xfs_buftarg_t           *target)
650 {
651         xfs_buf_t               *bp;
652
653         bp = xfs_buf_allocate(0);
654         if (bp)
655                 _xfs_buf_initialize(bp, target, 0, len, 0);
656         return bp;
657 }
658
659 static inline struct page *
660 mem_to_page(
661         void                    *addr)
662 {
663         if ((!is_vmalloc_addr(addr))) {
664                 return virt_to_page(addr);
665         } else {
666                 return vmalloc_to_page(addr);
667         }
668 }
669
670 int
671 xfs_buf_associate_memory(
672         xfs_buf_t               *bp,
673         void                    *mem,
674         size_t                  len)
675 {
676         int                     rval;
677         int                     i = 0;
678         unsigned long           pageaddr;
679         unsigned long           offset;
680         size_t                  buflen;
681         int                     page_count;
682
683         pageaddr = (unsigned long)mem & PAGE_CACHE_MASK;
684         offset = (unsigned long)mem - pageaddr;
685         buflen = PAGE_CACHE_ALIGN(len + offset);
686         page_count = buflen >> PAGE_CACHE_SHIFT;
687
688         /* Free any previous set of page pointers */
689         if (bp->b_pages)
690                 _xfs_buf_free_pages(bp);
691
692         bp->b_pages = NULL;
693         bp->b_addr = mem;
694
695         rval = _xfs_buf_get_pages(bp, page_count, XBF_DONT_BLOCK);
696         if (rval)
697                 return rval;
698
699         bp->b_offset = offset;
700
701         for (i = 0; i < bp->b_page_count; i++) {
702                 bp->b_pages[i] = mem_to_page((void *)pageaddr);
703                 pageaddr += PAGE_CACHE_SIZE;
704         }
705
706         bp->b_count_desired = len;
707         bp->b_buffer_length = buflen;
708         bp->b_flags |= XBF_MAPPED;
709         bp->b_flags &= ~_XBF_PAGE_LOCKED;
710
711         return 0;
712 }
713
714 xfs_buf_t *
715 xfs_buf_get_noaddr(
716         size_t                  len,
717         xfs_buftarg_t           *target)
718 {
719         unsigned long           page_count = PAGE_ALIGN(len) >> PAGE_SHIFT;
720         int                     error, i;
721         xfs_buf_t               *bp;
722
723         bp = xfs_buf_allocate(0);
724         if (unlikely(bp == NULL))
725                 goto fail;
726         _xfs_buf_initialize(bp, target, 0, len, 0);
727
728         error = _xfs_buf_get_pages(bp, page_count, 0);
729         if (error)
730                 goto fail_free_buf;
731
732         for (i = 0; i < page_count; i++) {
733                 bp->b_pages[i] = alloc_page(GFP_KERNEL);
734                 if (!bp->b_pages[i])
735                         goto fail_free_mem;
736         }
737         bp->b_flags |= _XBF_PAGES;
738
739         error = _xfs_buf_map_pages(bp, XBF_MAPPED);
740         if (unlikely(error)) {
741                 printk(KERN_WARNING "%s: failed to map pages\n",
742                                 __func__);
743                 goto fail_free_mem;
744         }
745
746         xfs_buf_unlock(bp);
747
748         trace_xfs_buf_get_noaddr(bp, _RET_IP_);
749         return bp;
750
751  fail_free_mem:
752         while (--i >= 0)
753                 __free_page(bp->b_pages[i]);
754         _xfs_buf_free_pages(bp);
755  fail_free_buf:
756         xfs_buf_deallocate(bp);
757  fail:
758         return NULL;
759 }
760
761 /*
762  *      Increment reference count on buffer, to hold the buffer concurrently
763  *      with another thread which may release (free) the buffer asynchronously.
764  *      Must hold the buffer already to call this function.
765  */
766 void
767 xfs_buf_hold(
768         xfs_buf_t               *bp)
769 {
770         trace_xfs_buf_hold(bp, _RET_IP_);
771         atomic_inc(&bp->b_hold);
772 }
773
774 /*
775  *      Releases a hold on the specified buffer.  If the
776  *      the hold count is 1, calls xfs_buf_free.
777  */
778 void
779 xfs_buf_rele(
780         xfs_buf_t               *bp)
781 {
782         xfs_bufhash_t           *hash = bp->b_hash;
783
784         trace_xfs_buf_rele(bp, _RET_IP_);
785
786         if (unlikely(!hash)) {
787                 ASSERT(!bp->b_relse);
788                 if (atomic_dec_and_test(&bp->b_hold))
789                         xfs_buf_free(bp);
790                 return;
791         }
792
793         ASSERT(atomic_read(&bp->b_hold) > 0);
794         if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) {
795                 if (bp->b_relse) {
796                         atomic_inc(&bp->b_hold);
797                         spin_unlock(&hash->bh_lock);
798                         (*(bp->b_relse)) (bp);
799                 } else if (bp->b_flags & XBF_FS_MANAGED) {
800                         spin_unlock(&hash->bh_lock);
801                 } else {
802                         ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
803                         list_del_init(&bp->b_hash_list);
804                         spin_unlock(&hash->bh_lock);
805                         xfs_buf_free(bp);
806                 }
807         }
808 }
809
810
811 /*
812  *      Mutual exclusion on buffers.  Locking model:
813  *
814  *      Buffers associated with inodes for which buffer locking
815  *      is not enabled are not protected by semaphores, and are
816  *      assumed to be exclusively owned by the caller.  There is a
817  *      spinlock in the buffer, used by the caller when concurrent
818  *      access is possible.
819  */
820
821 /*
822  *      Locks a buffer object, if it is not already locked.
823  *      Note that this in no way locks the underlying pages, so it is only
824  *      useful for synchronizing concurrent use of buffer objects, not for
825  *      synchronizing independent access to the underlying pages.
826  */
827 int
828 xfs_buf_cond_lock(
829         xfs_buf_t               *bp)
830 {
831         int                     locked;
832
833         locked = down_trylock(&bp->b_sema) == 0;
834         if (locked)
835                 XB_SET_OWNER(bp);
836
837         trace_xfs_buf_cond_lock(bp, _RET_IP_);
838         return locked ? 0 : -EBUSY;
839 }
840
841 int
842 xfs_buf_lock_value(
843         xfs_buf_t               *bp)
844 {
845         return bp->b_sema.count;
846 }
847
848 /*
849  *      Locks a buffer object.
850  *      Note that this in no way locks the underlying pages, so it is only
851  *      useful for synchronizing concurrent use of buffer objects, not for
852  *      synchronizing independent access to the underlying pages.
853  *
854  *      If we come across a stale, pinned, locked buffer, we know that we
855  *      are being asked to lock a buffer that has been reallocated. Because
856  *      it is pinned, we know that the log has not been pushed to disk and
857  *      hence it will still be locked. Rather than sleeping until someone
858  *      else pushes the log, push it ourselves before trying to get the lock.
859  */
860 void
861 xfs_buf_lock(
862         xfs_buf_t               *bp)
863 {
864         trace_xfs_buf_lock(bp, _RET_IP_);
865
866         if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
867                 xfs_log_force(bp->b_mount, 0);
868         if (atomic_read(&bp->b_io_remaining))
869                 blk_run_address_space(bp->b_target->bt_mapping);
870         down(&bp->b_sema);
871         XB_SET_OWNER(bp);
872
873         trace_xfs_buf_lock_done(bp, _RET_IP_);
874 }
875
876 /*
877  *      Releases the lock on the buffer object.
878  *      If the buffer is marked delwri but is not queued, do so before we
879  *      unlock the buffer as we need to set flags correctly.  We also need to
880  *      take a reference for the delwri queue because the unlocker is going to
881  *      drop their's and they don't know we just queued it.
882  */
883 void
884 xfs_buf_unlock(
885         xfs_buf_t               *bp)
886 {
887         if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
888                 atomic_inc(&bp->b_hold);
889                 bp->b_flags |= XBF_ASYNC;
890                 xfs_buf_delwri_queue(bp, 0);
891         }
892
893         XB_CLEAR_OWNER(bp);
894         up(&bp->b_sema);
895
896         trace_xfs_buf_unlock(bp, _RET_IP_);
897 }
898
899 STATIC void
900 xfs_buf_wait_unpin(
901         xfs_buf_t               *bp)
902 {
903         DECLARE_WAITQUEUE       (wait, current);
904
905         if (atomic_read(&bp->b_pin_count) == 0)
906                 return;
907
908         add_wait_queue(&bp->b_waiters, &wait);
909         for (;;) {
910                 set_current_state(TASK_UNINTERRUPTIBLE);
911                 if (atomic_read(&bp->b_pin_count) == 0)
912                         break;
913                 if (atomic_read(&bp->b_io_remaining))
914                         blk_run_address_space(bp->b_target->bt_mapping);
915                 schedule();
916         }
917         remove_wait_queue(&bp->b_waiters, &wait);
918         set_current_state(TASK_RUNNING);
919 }
920
921 /*
922  *      Buffer Utility Routines
923  */
924
925 STATIC void
926 xfs_buf_iodone_work(
927         struct work_struct      *work)
928 {
929         xfs_buf_t               *bp =
930                 container_of(work, xfs_buf_t, b_iodone_work);
931
932         /*
933          * We can get an EOPNOTSUPP to ordered writes.  Here we clear the
934          * ordered flag and reissue them.  Because we can't tell the higher
935          * layers directly that they should not issue ordered I/O anymore, they
936          * need to check if the _XFS_BARRIER_FAILED flag was set during I/O completion.
937          */
938         if ((bp->b_error == EOPNOTSUPP) &&
939             (bp->b_flags & (XBF_ORDERED|XBF_ASYNC)) == (XBF_ORDERED|XBF_ASYNC)) {
940                 trace_xfs_buf_ordered_retry(bp, _RET_IP_);
941                 bp->b_flags &= ~XBF_ORDERED;
942                 bp->b_flags |= _XFS_BARRIER_FAILED;
943                 xfs_buf_iorequest(bp);
944         } else if (bp->b_iodone)
945                 (*(bp->b_iodone))(bp);
946         else if (bp->b_flags & XBF_ASYNC)
947                 xfs_buf_relse(bp);
948 }
949
950 void
951 xfs_buf_ioend(
952         xfs_buf_t               *bp,
953         int                     schedule)
954 {
955         trace_xfs_buf_iodone(bp, _RET_IP_);
956
957         bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
958         if (bp->b_error == 0)
959                 bp->b_flags |= XBF_DONE;
960
961         if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
962                 if (schedule) {
963                         INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
964                         queue_work(xfslogd_workqueue, &bp->b_iodone_work);
965                 } else {
966                         xfs_buf_iodone_work(&bp->b_iodone_work);
967                 }
968         } else {
969                 complete(&bp->b_iowait);
970         }
971 }
972
973 void
974 xfs_buf_ioerror(
975         xfs_buf_t               *bp,
976         int                     error)
977 {
978         ASSERT(error >= 0 && error <= 0xffff);
979         bp->b_error = (unsigned short)error;
980         trace_xfs_buf_ioerror(bp, error, _RET_IP_);
981 }
982
983 int
984 xfs_bwrite(
985         struct xfs_mount        *mp,
986         struct xfs_buf          *bp)
987 {
988         int                     error;
989
990         bp->b_mount = mp;
991         bp->b_flags |= XBF_WRITE;
992         bp->b_flags &= ~(XBF_ASYNC | XBF_READ);
993
994         xfs_buf_delwri_dequeue(bp);
995         xfs_bdstrat_cb(bp);
996
997         error = xfs_buf_iowait(bp);
998         if (error)
999                 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1000         xfs_buf_relse(bp);
1001         return error;
1002 }
1003
1004 void
1005 xfs_bdwrite(
1006         void                    *mp,
1007         struct xfs_buf          *bp)
1008 {
1009         trace_xfs_buf_bdwrite(bp, _RET_IP_);
1010
1011         bp->b_mount = mp;
1012
1013         bp->b_flags &= ~XBF_READ;
1014         bp->b_flags |= (XBF_DELWRI | XBF_ASYNC);
1015
1016         xfs_buf_delwri_queue(bp, 1);
1017 }
1018
1019 /*
1020  * Called when we want to stop a buffer from getting written or read.
1021  * We attach the EIO error, muck with its flags, and call biodone
1022  * so that the proper iodone callbacks get called.
1023  */
1024 STATIC int
1025 xfs_bioerror(
1026         xfs_buf_t *bp)
1027 {
1028 #ifdef XFSERRORDEBUG
1029         ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
1030 #endif
1031
1032         /*
1033          * No need to wait until the buffer is unpinned, we aren't flushing it.
1034          */
1035         XFS_BUF_ERROR(bp, EIO);
1036
1037         /*
1038          * We're calling biodone, so delete XBF_DONE flag.
1039          */
1040         XFS_BUF_UNREAD(bp);
1041         XFS_BUF_UNDELAYWRITE(bp);
1042         XFS_BUF_UNDONE(bp);
1043         XFS_BUF_STALE(bp);
1044
1045         xfs_biodone(bp);
1046
1047         return EIO;
1048 }
1049
1050 /*
1051  * Same as xfs_bioerror, except that we are releasing the buffer
1052  * here ourselves, and avoiding the biodone call.
1053  * This is meant for userdata errors; metadata bufs come with
1054  * iodone functions attached, so that we can track down errors.
1055  */
1056 STATIC int
1057 xfs_bioerror_relse(
1058         struct xfs_buf  *bp)
1059 {
1060         int64_t         fl = XFS_BUF_BFLAGS(bp);
1061         /*
1062          * No need to wait until the buffer is unpinned.
1063          * We aren't flushing it.
1064          *
1065          * chunkhold expects B_DONE to be set, whether
1066          * we actually finish the I/O or not. We don't want to
1067          * change that interface.
1068          */
1069         XFS_BUF_UNREAD(bp);
1070         XFS_BUF_UNDELAYWRITE(bp);
1071         XFS_BUF_DONE(bp);
1072         XFS_BUF_STALE(bp);
1073         XFS_BUF_CLR_IODONE_FUNC(bp);
1074         if (!(fl & XBF_ASYNC)) {
1075                 /*
1076                  * Mark b_error and B_ERROR _both_.
1077                  * Lot's of chunkcache code assumes that.
1078                  * There's no reason to mark error for
1079                  * ASYNC buffers.
1080                  */
1081                 XFS_BUF_ERROR(bp, EIO);
1082                 XFS_BUF_FINISH_IOWAIT(bp);
1083         } else {
1084                 xfs_buf_relse(bp);
1085         }
1086
1087         return EIO;
1088 }
1089
1090
1091 /*
1092  * All xfs metadata buffers except log state machine buffers
1093  * get this attached as their b_bdstrat callback function.
1094  * This is so that we can catch a buffer
1095  * after prematurely unpinning it to forcibly shutdown the filesystem.
1096  */
1097 int
1098 xfs_bdstrat_cb(
1099         struct xfs_buf  *bp)
1100 {
1101         if (XFS_FORCED_SHUTDOWN(bp->b_mount)) {
1102                 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1103                 /*
1104                  * Metadata write that didn't get logged but
1105                  * written delayed anyway. These aren't associated
1106                  * with a transaction, and can be ignored.
1107                  */
1108                 if (!bp->b_iodone && !XFS_BUF_ISREAD(bp))
1109                         return xfs_bioerror_relse(bp);
1110                 else
1111                         return xfs_bioerror(bp);
1112         }
1113
1114         xfs_buf_iorequest(bp);
1115         return 0;
1116 }
1117
1118 /*
1119  * Wrapper around bdstrat so that we can stop data from going to disk in case
1120  * we are shutting down the filesystem.  Typically user data goes thru this
1121  * path; one of the exceptions is the superblock.
1122  */
1123 void
1124 xfsbdstrat(
1125         struct xfs_mount        *mp,
1126         struct xfs_buf          *bp)
1127 {
1128         if (XFS_FORCED_SHUTDOWN(mp)) {
1129                 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1130                 xfs_bioerror_relse(bp);
1131                 return;
1132         }
1133
1134         xfs_buf_iorequest(bp);
1135 }
1136
1137 STATIC void
1138 _xfs_buf_ioend(
1139         xfs_buf_t               *bp,
1140         int                     schedule)
1141 {
1142         if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1143                 bp->b_flags &= ~_XBF_PAGE_LOCKED;
1144                 xfs_buf_ioend(bp, schedule);
1145         }
1146 }
1147
1148 STATIC void
1149 xfs_buf_bio_end_io(
1150         struct bio              *bio,
1151         int                     error)
1152 {
1153         xfs_buf_t               *bp = (xfs_buf_t *)bio->bi_private;
1154         unsigned int            blocksize = bp->b_target->bt_bsize;
1155         struct bio_vec          *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1156
1157         xfs_buf_ioerror(bp, -error);
1158
1159         if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1160                 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1161
1162         do {
1163                 struct page     *page = bvec->bv_page;
1164
1165                 ASSERT(!PagePrivate(page));
1166                 if (unlikely(bp->b_error)) {
1167                         if (bp->b_flags & XBF_READ)
1168                                 ClearPageUptodate(page);
1169                 } else if (blocksize >= PAGE_CACHE_SIZE) {
1170                         SetPageUptodate(page);
1171                 } else if (!PagePrivate(page) &&
1172                                 (bp->b_flags & _XBF_PAGE_CACHE)) {
1173                         set_page_region(page, bvec->bv_offset, bvec->bv_len);
1174                 }
1175
1176                 if (--bvec >= bio->bi_io_vec)
1177                         prefetchw(&bvec->bv_page->flags);
1178
1179                 if (bp->b_flags & _XBF_PAGE_LOCKED)
1180                         unlock_page(page);
1181         } while (bvec >= bio->bi_io_vec);
1182
1183         _xfs_buf_ioend(bp, 1);
1184         bio_put(bio);
1185 }
1186
1187 STATIC void
1188 _xfs_buf_ioapply(
1189         xfs_buf_t               *bp)
1190 {
1191         int                     rw, map_i, total_nr_pages, nr_pages;
1192         struct bio              *bio;
1193         int                     offset = bp->b_offset;
1194         int                     size = bp->b_count_desired;
1195         sector_t                sector = bp->b_bn;
1196         unsigned int            blocksize = bp->b_target->bt_bsize;
1197
1198         total_nr_pages = bp->b_page_count;
1199         map_i = 0;
1200
1201         if (bp->b_flags & XBF_ORDERED) {
1202                 ASSERT(!(bp->b_flags & XBF_READ));
1203                 rw = WRITE_BARRIER;
1204         } else if (bp->b_flags & XBF_LOG_BUFFER) {
1205                 ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
1206                 bp->b_flags &= ~_XBF_RUN_QUEUES;
1207                 rw = (bp->b_flags & XBF_WRITE) ? WRITE_SYNC : READ_SYNC;
1208         } else if (bp->b_flags & _XBF_RUN_QUEUES) {
1209                 ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
1210                 bp->b_flags &= ~_XBF_RUN_QUEUES;
1211                 rw = (bp->b_flags & XBF_WRITE) ? WRITE_META : READ_META;
1212         } else {
1213                 rw = (bp->b_flags & XBF_WRITE) ? WRITE :
1214                      (bp->b_flags & XBF_READ_AHEAD) ? READA : READ;
1215         }
1216
1217         /* Special code path for reading a sub page size buffer in --
1218          * we populate up the whole page, and hence the other metadata
1219          * in the same page.  This optimization is only valid when the
1220          * filesystem block size is not smaller than the page size.
1221          */
1222         if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
1223             ((bp->b_flags & (XBF_READ|_XBF_PAGE_LOCKED)) ==
1224               (XBF_READ|_XBF_PAGE_LOCKED)) &&
1225             (blocksize >= PAGE_CACHE_SIZE)) {
1226                 bio = bio_alloc(GFP_NOIO, 1);
1227
1228                 bio->bi_bdev = bp->b_target->bt_bdev;
1229                 bio->bi_sector = sector - (offset >> BBSHIFT);
1230                 bio->bi_end_io = xfs_buf_bio_end_io;
1231                 bio->bi_private = bp;
1232
1233                 bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0);
1234                 size = 0;
1235
1236                 atomic_inc(&bp->b_io_remaining);
1237
1238                 goto submit_io;
1239         }
1240
1241 next_chunk:
1242         atomic_inc(&bp->b_io_remaining);
1243         nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1244         if (nr_pages > total_nr_pages)
1245                 nr_pages = total_nr_pages;
1246
1247         bio = bio_alloc(GFP_NOIO, nr_pages);
1248         bio->bi_bdev = bp->b_target->bt_bdev;
1249         bio->bi_sector = sector;
1250         bio->bi_end_io = xfs_buf_bio_end_io;
1251         bio->bi_private = bp;
1252
1253         for (; size && nr_pages; nr_pages--, map_i++) {
1254                 int     rbytes, nbytes = PAGE_CACHE_SIZE - offset;
1255
1256                 if (nbytes > size)
1257                         nbytes = size;
1258
1259                 rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
1260                 if (rbytes < nbytes)
1261                         break;
1262
1263                 offset = 0;
1264                 sector += nbytes >> BBSHIFT;
1265                 size -= nbytes;
1266                 total_nr_pages--;
1267         }
1268
1269 submit_io:
1270         if (likely(bio->bi_size)) {
1271                 if (xfs_buf_is_vmapped(bp)) {
1272                         flush_kernel_vmap_range(bp->b_addr,
1273                                                 xfs_buf_vmap_len(bp));
1274                 }
1275                 submit_bio(rw, bio);
1276                 if (size)
1277                         goto next_chunk;
1278         } else {
1279                 /*
1280                  * if we get here, no pages were added to the bio. However,
1281                  * we can't just error out here - if the pages are locked then
1282                  * we have to unlock them otherwise we can hang on a later
1283                  * access to the page.
1284                  */
1285                 xfs_buf_ioerror(bp, EIO);
1286                 if (bp->b_flags & _XBF_PAGE_LOCKED) {
1287                         int i;
1288                         for (i = 0; i < bp->b_page_count; i++)
1289                                 unlock_page(bp->b_pages[i]);
1290                 }
1291                 bio_put(bio);
1292         }
1293 }
1294
1295 int
1296 xfs_buf_iorequest(
1297         xfs_buf_t               *bp)
1298 {
1299         trace_xfs_buf_iorequest(bp, _RET_IP_);
1300
1301         if (bp->b_flags & XBF_DELWRI) {
1302                 xfs_buf_delwri_queue(bp, 1);
1303                 return 0;
1304         }
1305
1306         if (bp->b_flags & XBF_WRITE) {
1307                 xfs_buf_wait_unpin(bp);
1308         }
1309
1310         xfs_buf_hold(bp);
1311
1312         /* Set the count to 1 initially, this will stop an I/O
1313          * completion callout which happens before we have started
1314          * all the I/O from calling xfs_buf_ioend too early.
1315          */
1316         atomic_set(&bp->b_io_remaining, 1);
1317         _xfs_buf_ioapply(bp);
1318         _xfs_buf_ioend(bp, 0);
1319
1320         xfs_buf_rele(bp);
1321         return 0;
1322 }
1323
1324 /*
1325  *      Waits for I/O to complete on the buffer supplied.
1326  *      It returns immediately if no I/O is pending.
1327  *      It returns the I/O error code, if any, or 0 if there was no error.
1328  */
1329 int
1330 xfs_buf_iowait(
1331         xfs_buf_t               *bp)
1332 {
1333         trace_xfs_buf_iowait(bp, _RET_IP_);
1334
1335         if (atomic_read(&bp->b_io_remaining))
1336                 blk_run_address_space(bp->b_target->bt_mapping);
1337         wait_for_completion(&bp->b_iowait);
1338
1339         trace_xfs_buf_iowait_done(bp, _RET_IP_);
1340         return bp->b_error;
1341 }
1342
1343 xfs_caddr_t
1344 xfs_buf_offset(
1345         xfs_buf_t               *bp,
1346         size_t                  offset)
1347 {
1348         struct page             *page;
1349
1350         if (bp->b_flags & XBF_MAPPED)
1351                 return XFS_BUF_PTR(bp) + offset;
1352
1353         offset += bp->b_offset;
1354         page = bp->b_pages[offset >> PAGE_CACHE_SHIFT];
1355         return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1));
1356 }
1357
1358 /*
1359  *      Move data into or out of a buffer.
1360  */
1361 void
1362 xfs_buf_iomove(
1363         xfs_buf_t               *bp,    /* buffer to process            */
1364         size_t                  boff,   /* starting buffer offset       */
1365         size_t                  bsize,  /* length to copy               */
1366         void                    *data,  /* data address                 */
1367         xfs_buf_rw_t            mode)   /* read/write/zero flag         */
1368 {
1369         size_t                  bend, cpoff, csize;
1370         struct page             *page;
1371
1372         bend = boff + bsize;
1373         while (boff < bend) {
1374                 page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
1375                 cpoff = xfs_buf_poff(boff + bp->b_offset);
1376                 csize = min_t(size_t,
1377                               PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff);
1378
1379                 ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
1380
1381                 switch (mode) {
1382                 case XBRW_ZERO:
1383                         memset(page_address(page) + cpoff, 0, csize);
1384                         break;
1385                 case XBRW_READ:
1386                         memcpy(data, page_address(page) + cpoff, csize);
1387                         break;
1388                 case XBRW_WRITE:
1389                         memcpy(page_address(page) + cpoff, data, csize);
1390                 }
1391
1392                 boff += csize;
1393                 data += csize;
1394         }
1395 }
1396
1397 /*
1398  *      Handling of buffer targets (buftargs).
1399  */
1400
1401 /*
1402  *      Wait for any bufs with callbacks that have been submitted but
1403  *      have not yet returned... walk the hash list for the target.
1404  */
1405 void
1406 xfs_wait_buftarg(
1407         xfs_buftarg_t   *btp)
1408 {
1409         xfs_buf_t       *bp, *n;
1410         xfs_bufhash_t   *hash;
1411         uint            i;
1412
1413         for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1414                 hash = &btp->bt_hash[i];
1415 again:
1416                 spin_lock(&hash->bh_lock);
1417                 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
1418                         ASSERT(btp == bp->b_target);
1419                         if (!(bp->b_flags & XBF_FS_MANAGED)) {
1420                                 spin_unlock(&hash->bh_lock);
1421                                 /*
1422                                  * Catch superblock reference count leaks
1423                                  * immediately
1424                                  */
1425                                 BUG_ON(bp->b_bn == 0);
1426                                 delay(100);
1427                                 goto again;
1428                         }
1429                 }
1430                 spin_unlock(&hash->bh_lock);
1431         }
1432 }
1433
1434 /*
1435  *      Allocate buffer hash table for a given target.
1436  *      For devices containing metadata (i.e. not the log/realtime devices)
1437  *      we need to allocate a much larger hash table.
1438  */
1439 STATIC void
1440 xfs_alloc_bufhash(
1441         xfs_buftarg_t           *btp,
1442         int                     external)
1443 {
1444         unsigned int            i;
1445
1446         btp->bt_hashshift = external ? 3 : 8;   /* 8 or 256 buckets */
1447         btp->bt_hashmask = (1 << btp->bt_hashshift) - 1;
1448         btp->bt_hash = kmem_zalloc_large((1 << btp->bt_hashshift) *
1449                                          sizeof(xfs_bufhash_t));
1450         for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1451                 spin_lock_init(&btp->bt_hash[i].bh_lock);
1452                 INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
1453         }
1454 }
1455
1456 STATIC void
1457 xfs_free_bufhash(
1458         xfs_buftarg_t           *btp)
1459 {
1460         kmem_free_large(btp->bt_hash);
1461         btp->bt_hash = NULL;
1462 }
1463
1464 /*
1465  *      buftarg list for delwrite queue processing
1466  */
1467 static LIST_HEAD(xfs_buftarg_list);
1468 static DEFINE_SPINLOCK(xfs_buftarg_lock);
1469
1470 STATIC void
1471 xfs_register_buftarg(
1472         xfs_buftarg_t           *btp)
1473 {
1474         spin_lock(&xfs_buftarg_lock);
1475         list_add(&btp->bt_list, &xfs_buftarg_list);
1476         spin_unlock(&xfs_buftarg_lock);
1477 }
1478
1479 STATIC void
1480 xfs_unregister_buftarg(
1481         xfs_buftarg_t           *btp)
1482 {
1483         spin_lock(&xfs_buftarg_lock);
1484         list_del(&btp->bt_list);
1485         spin_unlock(&xfs_buftarg_lock);
1486 }
1487
1488 void
1489 xfs_free_buftarg(
1490         struct xfs_mount        *mp,
1491         struct xfs_buftarg      *btp)
1492 {
1493         xfs_flush_buftarg(btp, 1);
1494         if (mp->m_flags & XFS_MOUNT_BARRIER)
1495                 xfs_blkdev_issue_flush(btp);
1496         xfs_free_bufhash(btp);
1497         iput(btp->bt_mapping->host);
1498
1499         /* Unregister the buftarg first so that we don't get a
1500          * wakeup finding a non-existent task
1501          */
1502         xfs_unregister_buftarg(btp);
1503         kthread_stop(btp->bt_task);
1504
1505         kmem_free(btp);
1506 }
1507
1508 STATIC int
1509 xfs_setsize_buftarg_flags(
1510         xfs_buftarg_t           *btp,
1511         unsigned int            blocksize,
1512         unsigned int            sectorsize,
1513         int                     verbose)
1514 {
1515         btp->bt_bsize = blocksize;
1516         btp->bt_sshift = ffs(sectorsize) - 1;
1517         btp->bt_smask = sectorsize - 1;
1518
1519         if (set_blocksize(btp->bt_bdev, sectorsize)) {
1520                 printk(KERN_WARNING
1521                         "XFS: Cannot set_blocksize to %u on device %s\n",
1522                         sectorsize, XFS_BUFTARG_NAME(btp));
1523                 return EINVAL;
1524         }
1525
1526         if (verbose &&
1527             (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) {
1528                 printk(KERN_WARNING
1529                         "XFS: %u byte sectors in use on device %s.  "
1530                         "This is suboptimal; %u or greater is ideal.\n",
1531                         sectorsize, XFS_BUFTARG_NAME(btp),
1532                         (unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG);
1533         }
1534
1535         return 0;
1536 }
1537
1538 /*
1539  *      When allocating the initial buffer target we have not yet
1540  *      read in the superblock, so don't know what sized sectors
1541  *      are being used is at this early stage.  Play safe.
1542  */
1543 STATIC int
1544 xfs_setsize_buftarg_early(
1545         xfs_buftarg_t           *btp,
1546         struct block_device     *bdev)
1547 {
1548         return xfs_setsize_buftarg_flags(btp,
1549                         PAGE_CACHE_SIZE, bdev_logical_block_size(bdev), 0);
1550 }
1551
1552 int
1553 xfs_setsize_buftarg(
1554         xfs_buftarg_t           *btp,
1555         unsigned int            blocksize,
1556         unsigned int            sectorsize)
1557 {
1558         return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1559 }
1560
1561 STATIC int
1562 xfs_mapping_buftarg(
1563         xfs_buftarg_t           *btp,
1564         struct block_device     *bdev)
1565 {
1566         struct backing_dev_info *bdi;
1567         struct inode            *inode;
1568         struct address_space    *mapping;
1569         static const struct address_space_operations mapping_aops = {
1570                 .sync_page = block_sync_page,
1571                 .migratepage = fail_migrate_page,
1572         };
1573
1574         inode = new_inode(bdev->bd_inode->i_sb);
1575         if (!inode) {
1576                 printk(KERN_WARNING
1577                         "XFS: Cannot allocate mapping inode for device %s\n",
1578                         XFS_BUFTARG_NAME(btp));
1579                 return ENOMEM;
1580         }
1581         inode->i_mode = S_IFBLK;
1582         inode->i_bdev = bdev;
1583         inode->i_rdev = bdev->bd_dev;
1584         bdi = blk_get_backing_dev_info(bdev);
1585         if (!bdi)
1586                 bdi = &default_backing_dev_info;
1587         mapping = &inode->i_data;
1588         mapping->a_ops = &mapping_aops;
1589         mapping->backing_dev_info = bdi;
1590         mapping_set_gfp_mask(mapping, GFP_NOFS);
1591         btp->bt_mapping = mapping;
1592         return 0;
1593 }
1594
1595 STATIC int
1596 xfs_alloc_delwrite_queue(
1597         xfs_buftarg_t           *btp,
1598         const char              *fsname)
1599 {
1600         int     error = 0;
1601
1602         INIT_LIST_HEAD(&btp->bt_list);
1603         INIT_LIST_HEAD(&btp->bt_delwrite_queue);
1604         spin_lock_init(&btp->bt_delwrite_lock);
1605         btp->bt_flags = 0;
1606         btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd/%s", fsname);
1607         if (IS_ERR(btp->bt_task)) {
1608                 error = PTR_ERR(btp->bt_task);
1609                 goto out_error;
1610         }
1611         xfs_register_buftarg(btp);
1612 out_error:
1613         return error;
1614 }
1615
1616 xfs_buftarg_t *
1617 xfs_alloc_buftarg(
1618         struct block_device     *bdev,
1619         int                     external,
1620         const char              *fsname)
1621 {
1622         xfs_buftarg_t           *btp;
1623
1624         btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1625
1626         btp->bt_dev =  bdev->bd_dev;
1627         btp->bt_bdev = bdev;
1628         if (xfs_setsize_buftarg_early(btp, bdev))
1629                 goto error;
1630         if (xfs_mapping_buftarg(btp, bdev))
1631                 goto error;
1632         if (xfs_alloc_delwrite_queue(btp, fsname))
1633                 goto error;
1634         xfs_alloc_bufhash(btp, external);
1635         return btp;
1636
1637 error:
1638         kmem_free(btp);
1639         return NULL;
1640 }
1641
1642
1643 /*
1644  *      Delayed write buffer handling
1645  */
1646 STATIC void
1647 xfs_buf_delwri_queue(
1648         xfs_buf_t               *bp,
1649         int                     unlock)
1650 {
1651         struct list_head        *dwq = &bp->b_target->bt_delwrite_queue;
1652         spinlock_t              *dwlk = &bp->b_target->bt_delwrite_lock;
1653
1654         trace_xfs_buf_delwri_queue(bp, _RET_IP_);
1655
1656         ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
1657
1658         spin_lock(dwlk);
1659         /* If already in the queue, dequeue and place at tail */
1660         if (!list_empty(&bp->b_list)) {
1661                 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1662                 if (unlock)
1663                         atomic_dec(&bp->b_hold);
1664                 list_del(&bp->b_list);
1665         }
1666
1667         if (list_empty(dwq)) {
1668                 /* start xfsbufd as it is about to have something to do */
1669                 wake_up_process(bp->b_target->bt_task);
1670         }
1671
1672         bp->b_flags |= _XBF_DELWRI_Q;
1673         list_add_tail(&bp->b_list, dwq);
1674         bp->b_queuetime = jiffies;
1675         spin_unlock(dwlk);
1676
1677         if (unlock)
1678                 xfs_buf_unlock(bp);
1679 }
1680
1681 void
1682 xfs_buf_delwri_dequeue(
1683         xfs_buf_t               *bp)
1684 {
1685         spinlock_t              *dwlk = &bp->b_target->bt_delwrite_lock;
1686         int                     dequeued = 0;
1687
1688         spin_lock(dwlk);
1689         if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
1690                 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1691                 list_del_init(&bp->b_list);
1692                 dequeued = 1;
1693         }
1694         bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
1695         spin_unlock(dwlk);
1696
1697         if (dequeued)
1698                 xfs_buf_rele(bp);
1699
1700         trace_xfs_buf_delwri_dequeue(bp, _RET_IP_);
1701 }
1702
1703 /*
1704  * If a delwri buffer needs to be pushed before it has aged out, then promote
1705  * it to the head of the delwri queue so that it will be flushed on the next
1706  * xfsbufd run. We do this by resetting the queuetime of the buffer to be older
1707  * than the age currently needed to flush the buffer. Hence the next time the
1708  * xfsbufd sees it is guaranteed to be considered old enough to flush.
1709  */
1710 void
1711 xfs_buf_delwri_promote(
1712         struct xfs_buf  *bp)
1713 {
1714         struct xfs_buftarg *btp = bp->b_target;
1715         long            age = xfs_buf_age_centisecs * msecs_to_jiffies(10) + 1;
1716
1717         ASSERT(bp->b_flags & XBF_DELWRI);
1718         ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1719
1720         /*
1721          * Check the buffer age before locking the delayed write queue as we
1722          * don't need to promote buffers that are already past the flush age.
1723          */
1724         if (bp->b_queuetime < jiffies - age)
1725                 return;
1726         bp->b_queuetime = jiffies - age;
1727         spin_lock(&btp->bt_delwrite_lock);
1728         list_move(&bp->b_list, &btp->bt_delwrite_queue);
1729         spin_unlock(&btp->bt_delwrite_lock);
1730 }
1731
1732 STATIC void
1733 xfs_buf_runall_queues(
1734         struct workqueue_struct *queue)
1735 {
1736         flush_workqueue(queue);
1737 }
1738
1739 STATIC int
1740 xfsbufd_wakeup(
1741         struct shrinker         *shrink,
1742         int                     priority,
1743         gfp_t                   mask)
1744 {
1745         xfs_buftarg_t           *btp;
1746
1747         spin_lock(&xfs_buftarg_lock);
1748         list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
1749                 if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
1750                         continue;
1751                 if (list_empty(&btp->bt_delwrite_queue))
1752                         continue;
1753                 set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
1754                 wake_up_process(btp->bt_task);
1755         }
1756         spin_unlock(&xfs_buftarg_lock);
1757         return 0;
1758 }
1759
1760 /*
1761  * Move as many buffers as specified to the supplied list
1762  * idicating if we skipped any buffers to prevent deadlocks.
1763  */
1764 STATIC int
1765 xfs_buf_delwri_split(
1766         xfs_buftarg_t   *target,
1767         struct list_head *list,
1768         unsigned long   age)
1769 {
1770         xfs_buf_t       *bp, *n;
1771         struct list_head *dwq = &target->bt_delwrite_queue;
1772         spinlock_t      *dwlk = &target->bt_delwrite_lock;
1773         int             skipped = 0;
1774         int             force;
1775
1776         force = test_and_clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1777         INIT_LIST_HEAD(list);
1778         spin_lock(dwlk);
1779         list_for_each_entry_safe(bp, n, dwq, b_list) {
1780                 trace_xfs_buf_delwri_split(bp, _RET_IP_);
1781                 ASSERT(bp->b_flags & XBF_DELWRI);
1782
1783                 if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) {
1784                         if (!force &&
1785                             time_before(jiffies, bp->b_queuetime + age)) {
1786                                 xfs_buf_unlock(bp);
1787                                 break;
1788                         }
1789
1790                         bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|
1791                                          _XBF_RUN_QUEUES);
1792                         bp->b_flags |= XBF_WRITE;
1793                         list_move_tail(&bp->b_list, list);
1794                 } else
1795                         skipped++;
1796         }
1797         spin_unlock(dwlk);
1798
1799         return skipped;
1800
1801 }
1802
1803 /*
1804  * Compare function is more complex than it needs to be because
1805  * the return value is only 32 bits and we are doing comparisons
1806  * on 64 bit values
1807  */
1808 static int
1809 xfs_buf_cmp(
1810         void            *priv,
1811         struct list_head *a,
1812         struct list_head *b)
1813 {
1814         struct xfs_buf  *ap = container_of(a, struct xfs_buf, b_list);
1815         struct xfs_buf  *bp = container_of(b, struct xfs_buf, b_list);
1816         xfs_daddr_t             diff;
1817
1818         diff = ap->b_bn - bp->b_bn;
1819         if (diff < 0)
1820                 return -1;
1821         if (diff > 0)
1822                 return 1;
1823         return 0;
1824 }
1825
1826 void
1827 xfs_buf_delwri_sort(
1828         xfs_buftarg_t   *target,
1829         struct list_head *list)
1830 {
1831         list_sort(NULL, list, xfs_buf_cmp);
1832 }
1833
1834 STATIC int
1835 xfsbufd(
1836         void            *data)
1837 {
1838         xfs_buftarg_t   *target = (xfs_buftarg_t *)data;
1839
1840         current->flags |= PF_MEMALLOC;
1841
1842         set_freezable();
1843
1844         do {
1845                 long    age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
1846                 long    tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10);
1847                 int     count = 0;
1848                 struct list_head tmp;
1849
1850                 if (unlikely(freezing(current))) {
1851                         set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1852                         refrigerator();
1853                 } else {
1854                         clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1855                 }
1856
1857                 /* sleep for a long time if there is nothing to do. */
1858                 if (list_empty(&target->bt_delwrite_queue))
1859                         tout = MAX_SCHEDULE_TIMEOUT;
1860                 schedule_timeout_interruptible(tout);
1861
1862                 xfs_buf_delwri_split(target, &tmp, age);
1863                 list_sort(NULL, &tmp, xfs_buf_cmp);
1864                 while (!list_empty(&tmp)) {
1865                         struct xfs_buf *bp;
1866                         bp = list_first_entry(&tmp, struct xfs_buf, b_list);
1867                         list_del_init(&bp->b_list);
1868                         xfs_bdstrat_cb(bp);
1869                         count++;
1870                 }
1871                 if (count)
1872                         blk_run_address_space(target->bt_mapping);
1873
1874         } while (!kthread_should_stop());
1875
1876         return 0;
1877 }
1878
1879 /*
1880  *      Go through all incore buffers, and release buffers if they belong to
1881  *      the given device. This is used in filesystem error handling to
1882  *      preserve the consistency of its metadata.
1883  */
1884 int
1885 xfs_flush_buftarg(
1886         xfs_buftarg_t   *target,
1887         int             wait)
1888 {
1889         xfs_buf_t       *bp;
1890         int             pincount = 0;
1891         LIST_HEAD(tmp_list);
1892         LIST_HEAD(wait_list);
1893
1894         xfs_buf_runall_queues(xfsconvertd_workqueue);
1895         xfs_buf_runall_queues(xfsdatad_workqueue);
1896         xfs_buf_runall_queues(xfslogd_workqueue);
1897
1898         set_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1899         pincount = xfs_buf_delwri_split(target, &tmp_list, 0);
1900
1901         /*
1902          * Dropped the delayed write list lock, now walk the temporary list.
1903          * All I/O is issued async and then if we need to wait for completion
1904          * we do that after issuing all the IO.
1905          */
1906         list_sort(NULL, &tmp_list, xfs_buf_cmp);
1907         while (!list_empty(&tmp_list)) {
1908                 bp = list_first_entry(&tmp_list, struct xfs_buf, b_list);
1909                 ASSERT(target == bp->b_target);
1910                 list_del_init(&bp->b_list);
1911                 if (wait) {
1912                         bp->b_flags &= ~XBF_ASYNC;
1913                         list_add(&bp->b_list, &wait_list);
1914                 }
1915                 xfs_bdstrat_cb(bp);
1916         }
1917
1918         if (wait) {
1919                 /* Expedite and wait for IO to complete. */
1920                 blk_run_address_space(target->bt_mapping);
1921                 while (!list_empty(&wait_list)) {
1922                         bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
1923
1924                         list_del_init(&bp->b_list);
1925                         xfs_iowait(bp);
1926                         xfs_buf_relse(bp);
1927                 }
1928         }
1929
1930         return pincount;
1931 }
1932
1933 int __init
1934 xfs_buf_init(void)
1935 {
1936         xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1937                                                 KM_ZONE_HWALIGN, NULL);
1938         if (!xfs_buf_zone)
1939                 goto out;
1940
1941         xfslogd_workqueue = create_workqueue("xfslogd");
1942         if (!xfslogd_workqueue)
1943                 goto out_free_buf_zone;
1944
1945         xfsdatad_workqueue = create_workqueue("xfsdatad");
1946         if (!xfsdatad_workqueue)
1947                 goto out_destroy_xfslogd_workqueue;
1948
1949         xfsconvertd_workqueue = create_workqueue("xfsconvertd");
1950         if (!xfsconvertd_workqueue)
1951                 goto out_destroy_xfsdatad_workqueue;
1952
1953         register_shrinker(&xfs_buf_shake);
1954         return 0;
1955
1956  out_destroy_xfsdatad_workqueue:
1957         destroy_workqueue(xfsdatad_workqueue);
1958  out_destroy_xfslogd_workqueue:
1959         destroy_workqueue(xfslogd_workqueue);
1960  out_free_buf_zone:
1961         kmem_zone_destroy(xfs_buf_zone);
1962  out:
1963         return -ENOMEM;
1964 }
1965
1966 void
1967 xfs_buf_terminate(void)
1968 {
1969         unregister_shrinker(&xfs_buf_shake);
1970         destroy_workqueue(xfsconvertd_workqueue);
1971         destroy_workqueue(xfsdatad_workqueue);
1972         destroy_workqueue(xfslogd_workqueue);
1973         kmem_zone_destroy(xfs_buf_zone);
1974 }
1975
1976 #ifdef CONFIG_KDB_MODULES
1977 struct list_head *
1978 xfs_get_buftarg_list(void)
1979 {
1980         return &xfs_buftarg_list;
1981 }
1982 #endif