Merge tag 'mvebu-fixes-4.5-2' of git://git.infradead.org/linux-mvebu into fixes
[sfrench/cifs-2.6.git] / fs / dax.c
1 /*
2  * fs/dax.c - Direct Access filesystem code
3  * Copyright (c) 2013-2014 Intel Corporation
4  * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5  * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  */
16
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
21 #include <linux/fs.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
25 #include <linux/mm.h>
26 #include <linux/mutex.h>
27 #include <linux/pagevec.h>
28 #include <linux/pmem.h>
29 #include <linux/sched.h>
30 #include <linux/uio.h>
31 #include <linux/vmstat.h>
32 #include <linux/pfn_t.h>
33 #include <linux/sizes.h>
34
35 static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
36 {
37         struct request_queue *q = bdev->bd_queue;
38         long rc = -EIO;
39
40         dax->addr = (void __pmem *) ERR_PTR(-EIO);
41         if (blk_queue_enter(q, true) != 0)
42                 return rc;
43
44         rc = bdev_direct_access(bdev, dax);
45         if (rc < 0) {
46                 dax->addr = (void __pmem *) ERR_PTR(rc);
47                 blk_queue_exit(q);
48                 return rc;
49         }
50         return rc;
51 }
52
53 static void dax_unmap_atomic(struct block_device *bdev,
54                 const struct blk_dax_ctl *dax)
55 {
56         if (IS_ERR(dax->addr))
57                 return;
58         blk_queue_exit(bdev->bd_queue);
59 }
60
61 struct page *read_dax_sector(struct block_device *bdev, sector_t n)
62 {
63         struct page *page = alloc_pages(GFP_KERNEL, 0);
64         struct blk_dax_ctl dax = {
65                 .size = PAGE_SIZE,
66                 .sector = n & ~((((int) PAGE_SIZE) / 512) - 1),
67         };
68         long rc;
69
70         if (!page)
71                 return ERR_PTR(-ENOMEM);
72
73         rc = dax_map_atomic(bdev, &dax);
74         if (rc < 0)
75                 return ERR_PTR(rc);
76         memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE);
77         dax_unmap_atomic(bdev, &dax);
78         return page;
79 }
80
81 /*
82  * dax_clear_blocks() is called from within transaction context from XFS,
83  * and hence this means the stack from this point must follow GFP_NOFS
84  * semantics for all operations.
85  */
86 int dax_clear_blocks(struct inode *inode, sector_t block, long _size)
87 {
88         struct block_device *bdev = inode->i_sb->s_bdev;
89         struct blk_dax_ctl dax = {
90                 .sector = block << (inode->i_blkbits - 9),
91                 .size = _size,
92         };
93
94         might_sleep();
95         do {
96                 long count, sz;
97
98                 count = dax_map_atomic(bdev, &dax);
99                 if (count < 0)
100                         return count;
101                 sz = min_t(long, count, SZ_128K);
102                 clear_pmem(dax.addr, sz);
103                 dax.size -= sz;
104                 dax.sector += sz / 512;
105                 dax_unmap_atomic(bdev, &dax);
106                 cond_resched();
107         } while (dax.size);
108
109         wmb_pmem();
110         return 0;
111 }
112 EXPORT_SYMBOL_GPL(dax_clear_blocks);
113
114 /* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
115 static void dax_new_buf(void __pmem *addr, unsigned size, unsigned first,
116                 loff_t pos, loff_t end)
117 {
118         loff_t final = end - pos + first; /* The final byte of the buffer */
119
120         if (first > 0)
121                 clear_pmem(addr, first);
122         if (final < size)
123                 clear_pmem(addr + final, size - final);
124 }
125
126 static bool buffer_written(struct buffer_head *bh)
127 {
128         return buffer_mapped(bh) && !buffer_unwritten(bh);
129 }
130
131 /*
132  * When ext4 encounters a hole, it returns without modifying the buffer_head
133  * which means that we can't trust b_size.  To cope with this, we set b_state
134  * to 0 before calling get_block and, if any bit is set, we know we can trust
135  * b_size.  Unfortunate, really, since ext4 knows precisely how long a hole is
136  * and would save us time calling get_block repeatedly.
137  */
138 static bool buffer_size_valid(struct buffer_head *bh)
139 {
140         return bh->b_state != 0;
141 }
142
143
144 static sector_t to_sector(const struct buffer_head *bh,
145                 const struct inode *inode)
146 {
147         sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
148
149         return sector;
150 }
151
152 static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
153                       loff_t start, loff_t end, get_block_t get_block,
154                       struct buffer_head *bh)
155 {
156         loff_t pos = start, max = start, bh_max = start;
157         bool hole = false, need_wmb = false;
158         struct block_device *bdev = NULL;
159         int rw = iov_iter_rw(iter), rc;
160         long map_len = 0;
161         struct blk_dax_ctl dax = {
162                 .addr = (void __pmem *) ERR_PTR(-EIO),
163         };
164
165         if (rw == READ)
166                 end = min(end, i_size_read(inode));
167
168         while (pos < end) {
169                 size_t len;
170                 if (pos == max) {
171                         unsigned blkbits = inode->i_blkbits;
172                         long page = pos >> PAGE_SHIFT;
173                         sector_t block = page << (PAGE_SHIFT - blkbits);
174                         unsigned first = pos - (block << blkbits);
175                         long size;
176
177                         if (pos == bh_max) {
178                                 bh->b_size = PAGE_ALIGN(end - pos);
179                                 bh->b_state = 0;
180                                 rc = get_block(inode, block, bh, rw == WRITE);
181                                 if (rc)
182                                         break;
183                                 if (!buffer_size_valid(bh))
184                                         bh->b_size = 1 << blkbits;
185                                 bh_max = pos - first + bh->b_size;
186                                 bdev = bh->b_bdev;
187                         } else {
188                                 unsigned done = bh->b_size -
189                                                 (bh_max - (pos - first));
190                                 bh->b_blocknr += done >> blkbits;
191                                 bh->b_size -= done;
192                         }
193
194                         hole = rw == READ && !buffer_written(bh);
195                         if (hole) {
196                                 size = bh->b_size - first;
197                         } else {
198                                 dax_unmap_atomic(bdev, &dax);
199                                 dax.sector = to_sector(bh, inode);
200                                 dax.size = bh->b_size;
201                                 map_len = dax_map_atomic(bdev, &dax);
202                                 if (map_len < 0) {
203                                         rc = map_len;
204                                         break;
205                                 }
206                                 if (buffer_unwritten(bh) || buffer_new(bh)) {
207                                         dax_new_buf(dax.addr, map_len, first,
208                                                         pos, end);
209                                         need_wmb = true;
210                                 }
211                                 dax.addr += first;
212                                 size = map_len - first;
213                         }
214                         max = min(pos + size, end);
215                 }
216
217                 if (iov_iter_rw(iter) == WRITE) {
218                         len = copy_from_iter_pmem(dax.addr, max - pos, iter);
219                         need_wmb = true;
220                 } else if (!hole)
221                         len = copy_to_iter((void __force *) dax.addr, max - pos,
222                                         iter);
223                 else
224                         len = iov_iter_zero(max - pos, iter);
225
226                 if (!len) {
227                         rc = -EFAULT;
228                         break;
229                 }
230
231                 pos += len;
232                 if (!IS_ERR(dax.addr))
233                         dax.addr += len;
234         }
235
236         if (need_wmb)
237                 wmb_pmem();
238         dax_unmap_atomic(bdev, &dax);
239
240         return (pos == start) ? rc : pos - start;
241 }
242
243 /**
244  * dax_do_io - Perform I/O to a DAX file
245  * @iocb: The control block for this I/O
246  * @inode: The file which the I/O is directed at
247  * @iter: The addresses to do I/O from or to
248  * @pos: The file offset where the I/O starts
249  * @get_block: The filesystem method used to translate file offsets to blocks
250  * @end_io: A filesystem callback for I/O completion
251  * @flags: See below
252  *
253  * This function uses the same locking scheme as do_blockdev_direct_IO:
254  * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
255  * caller for writes.  For reads, we take and release the i_mutex ourselves.
256  * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
257  * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
258  * is in progress.
259  */
260 ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
261                   struct iov_iter *iter, loff_t pos, get_block_t get_block,
262                   dio_iodone_t end_io, int flags)
263 {
264         struct buffer_head bh;
265         ssize_t retval = -EINVAL;
266         loff_t end = pos + iov_iter_count(iter);
267
268         memset(&bh, 0, sizeof(bh));
269         bh.b_bdev = inode->i_sb->s_bdev;
270
271         if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) {
272                 struct address_space *mapping = inode->i_mapping;
273                 inode_lock(inode);
274                 retval = filemap_write_and_wait_range(mapping, pos, end - 1);
275                 if (retval) {
276                         inode_unlock(inode);
277                         goto out;
278                 }
279         }
280
281         /* Protects against truncate */
282         if (!(flags & DIO_SKIP_DIO_COUNT))
283                 inode_dio_begin(inode);
284
285         retval = dax_io(inode, iter, pos, end, get_block, &bh);
286
287         if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
288                 inode_unlock(inode);
289
290         if ((retval > 0) && end_io)
291                 end_io(iocb, pos, retval, bh.b_private);
292
293         if (!(flags & DIO_SKIP_DIO_COUNT))
294                 inode_dio_end(inode);
295  out:
296         return retval;
297 }
298 EXPORT_SYMBOL_GPL(dax_do_io);
299
300 /*
301  * The user has performed a load from a hole in the file.  Allocating
302  * a new page in the file would cause excessive storage usage for
303  * workloads with sparse files.  We allocate a page cache page instead.
304  * We'll kick it out of the page cache if it's ever written to,
305  * otherwise it will simply fall out of the page cache under memory
306  * pressure without ever having been dirtied.
307  */
308 static int dax_load_hole(struct address_space *mapping, struct page *page,
309                                                         struct vm_fault *vmf)
310 {
311         unsigned long size;
312         struct inode *inode = mapping->host;
313         if (!page)
314                 page = find_or_create_page(mapping, vmf->pgoff,
315                                                 GFP_KERNEL | __GFP_ZERO);
316         if (!page)
317                 return VM_FAULT_OOM;
318         /* Recheck i_size under page lock to avoid truncate race */
319         size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
320         if (vmf->pgoff >= size) {
321                 unlock_page(page);
322                 page_cache_release(page);
323                 return VM_FAULT_SIGBUS;
324         }
325
326         vmf->page = page;
327         return VM_FAULT_LOCKED;
328 }
329
330 static int copy_user_bh(struct page *to, struct inode *inode,
331                 struct buffer_head *bh, unsigned long vaddr)
332 {
333         struct blk_dax_ctl dax = {
334                 .sector = to_sector(bh, inode),
335                 .size = bh->b_size,
336         };
337         struct block_device *bdev = bh->b_bdev;
338         void *vto;
339
340         if (dax_map_atomic(bdev, &dax) < 0)
341                 return PTR_ERR(dax.addr);
342         vto = kmap_atomic(to);
343         copy_user_page(vto, (void __force *)dax.addr, vaddr, to);
344         kunmap_atomic(vto);
345         dax_unmap_atomic(bdev, &dax);
346         return 0;
347 }
348
349 #define NO_SECTOR -1
350 #define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_CACHE_SHIFT))
351
352 static int dax_radix_entry(struct address_space *mapping, pgoff_t index,
353                 sector_t sector, bool pmd_entry, bool dirty)
354 {
355         struct radix_tree_root *page_tree = &mapping->page_tree;
356         pgoff_t pmd_index = DAX_PMD_INDEX(index);
357         int type, error = 0;
358         void *entry;
359
360         WARN_ON_ONCE(pmd_entry && !dirty);
361         if (dirty)
362                 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
363
364         spin_lock_irq(&mapping->tree_lock);
365
366         entry = radix_tree_lookup(page_tree, pmd_index);
367         if (entry && RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD) {
368                 index = pmd_index;
369                 goto dirty;
370         }
371
372         entry = radix_tree_lookup(page_tree, index);
373         if (entry) {
374                 type = RADIX_DAX_TYPE(entry);
375                 if (WARN_ON_ONCE(type != RADIX_DAX_PTE &&
376                                         type != RADIX_DAX_PMD)) {
377                         error = -EIO;
378                         goto unlock;
379                 }
380
381                 if (!pmd_entry || type == RADIX_DAX_PMD)
382                         goto dirty;
383
384                 /*
385                  * We only insert dirty PMD entries into the radix tree.  This
386                  * means we don't need to worry about removing a dirty PTE
387                  * entry and inserting a clean PMD entry, thus reducing the
388                  * range we would flush with a follow-up fsync/msync call.
389                  */
390                 radix_tree_delete(&mapping->page_tree, index);
391                 mapping->nrexceptional--;
392         }
393
394         if (sector == NO_SECTOR) {
395                 /*
396                  * This can happen during correct operation if our pfn_mkwrite
397                  * fault raced against a hole punch operation.  If this
398                  * happens the pte that was hole punched will have been
399                  * unmapped and the radix tree entry will have been removed by
400                  * the time we are called, but the call will still happen.  We
401                  * will return all the way up to wp_pfn_shared(), where the
402                  * pte_same() check will fail, eventually causing page fault
403                  * to be retried by the CPU.
404                  */
405                 goto unlock;
406         }
407
408         error = radix_tree_insert(page_tree, index,
409                         RADIX_DAX_ENTRY(sector, pmd_entry));
410         if (error)
411                 goto unlock;
412
413         mapping->nrexceptional++;
414  dirty:
415         if (dirty)
416                 radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
417  unlock:
418         spin_unlock_irq(&mapping->tree_lock);
419         return error;
420 }
421
422 static int dax_writeback_one(struct block_device *bdev,
423                 struct address_space *mapping, pgoff_t index, void *entry)
424 {
425         struct radix_tree_root *page_tree = &mapping->page_tree;
426         int type = RADIX_DAX_TYPE(entry);
427         struct radix_tree_node *node;
428         struct blk_dax_ctl dax;
429         void **slot;
430         int ret = 0;
431
432         spin_lock_irq(&mapping->tree_lock);
433         /*
434          * Regular page slots are stabilized by the page lock even
435          * without the tree itself locked.  These unlocked entries
436          * need verification under the tree lock.
437          */
438         if (!__radix_tree_lookup(page_tree, index, &node, &slot))
439                 goto unlock;
440         if (*slot != entry)
441                 goto unlock;
442
443         /* another fsync thread may have already written back this entry */
444         if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
445                 goto unlock;
446
447         if (WARN_ON_ONCE(type != RADIX_DAX_PTE && type != RADIX_DAX_PMD)) {
448                 ret = -EIO;
449                 goto unlock;
450         }
451
452         dax.sector = RADIX_DAX_SECTOR(entry);
453         dax.size = (type == RADIX_DAX_PMD ? PMD_SIZE : PAGE_SIZE);
454         spin_unlock_irq(&mapping->tree_lock);
455
456         /*
457          * We cannot hold tree_lock while calling dax_map_atomic() because it
458          * eventually calls cond_resched().
459          */
460         ret = dax_map_atomic(bdev, &dax);
461         if (ret < 0)
462                 return ret;
463
464         if (WARN_ON_ONCE(ret < dax.size)) {
465                 ret = -EIO;
466                 goto unmap;
467         }
468
469         wb_cache_pmem(dax.addr, dax.size);
470
471         spin_lock_irq(&mapping->tree_lock);
472         radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
473         spin_unlock_irq(&mapping->tree_lock);
474  unmap:
475         dax_unmap_atomic(bdev, &dax);
476         return ret;
477
478  unlock:
479         spin_unlock_irq(&mapping->tree_lock);
480         return ret;
481 }
482
483 /*
484  * Flush the mapping to the persistent domain within the byte range of [start,
485  * end]. This is required by data integrity operations to ensure file data is
486  * on persistent storage prior to completion of the operation.
487  */
488 int dax_writeback_mapping_range(struct address_space *mapping, loff_t start,
489                 loff_t end)
490 {
491         struct inode *inode = mapping->host;
492         struct block_device *bdev = inode->i_sb->s_bdev;
493         pgoff_t start_index, end_index, pmd_index;
494         pgoff_t indices[PAGEVEC_SIZE];
495         struct pagevec pvec;
496         bool done = false;
497         int i, ret = 0;
498         void *entry;
499
500         if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
501                 return -EIO;
502
503         start_index = start >> PAGE_CACHE_SHIFT;
504         end_index = end >> PAGE_CACHE_SHIFT;
505         pmd_index = DAX_PMD_INDEX(start_index);
506
507         rcu_read_lock();
508         entry = radix_tree_lookup(&mapping->page_tree, pmd_index);
509         rcu_read_unlock();
510
511         /* see if the start of our range is covered by a PMD entry */
512         if (entry && RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD)
513                 start_index = pmd_index;
514
515         tag_pages_for_writeback(mapping, start_index, end_index);
516
517         pagevec_init(&pvec, 0);
518         while (!done) {
519                 pvec.nr = find_get_entries_tag(mapping, start_index,
520                                 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
521                                 pvec.pages, indices);
522
523                 if (pvec.nr == 0)
524                         break;
525
526                 for (i = 0; i < pvec.nr; i++) {
527                         if (indices[i] > end_index) {
528                                 done = true;
529                                 break;
530                         }
531
532                         ret = dax_writeback_one(bdev, mapping, indices[i],
533                                         pvec.pages[i]);
534                         if (ret < 0)
535                                 return ret;
536                 }
537         }
538         wmb_pmem();
539         return 0;
540 }
541 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
542
543 static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
544                         struct vm_area_struct *vma, struct vm_fault *vmf)
545 {
546         unsigned long vaddr = (unsigned long)vmf->virtual_address;
547         struct address_space *mapping = inode->i_mapping;
548         struct block_device *bdev = bh->b_bdev;
549         struct blk_dax_ctl dax = {
550                 .sector = to_sector(bh, inode),
551                 .size = bh->b_size,
552         };
553         pgoff_t size;
554         int error;
555
556         i_mmap_lock_read(mapping);
557
558         /*
559          * Check truncate didn't happen while we were allocating a block.
560          * If it did, this block may or may not be still allocated to the
561          * file.  We can't tell the filesystem to free it because we can't
562          * take i_mutex here.  In the worst case, the file still has blocks
563          * allocated past the end of the file.
564          */
565         size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
566         if (unlikely(vmf->pgoff >= size)) {
567                 error = -EIO;
568                 goto out;
569         }
570
571         if (dax_map_atomic(bdev, &dax) < 0) {
572                 error = PTR_ERR(dax.addr);
573                 goto out;
574         }
575
576         if (buffer_unwritten(bh) || buffer_new(bh)) {
577                 clear_pmem(dax.addr, PAGE_SIZE);
578                 wmb_pmem();
579         }
580         dax_unmap_atomic(bdev, &dax);
581
582         error = dax_radix_entry(mapping, vmf->pgoff, dax.sector, false,
583                         vmf->flags & FAULT_FLAG_WRITE);
584         if (error)
585                 goto out;
586
587         error = vm_insert_mixed(vma, vaddr, dax.pfn);
588
589  out:
590         i_mmap_unlock_read(mapping);
591
592         return error;
593 }
594
595 /**
596  * __dax_fault - handle a page fault on a DAX file
597  * @vma: The virtual memory area where the fault occurred
598  * @vmf: The description of the fault
599  * @get_block: The filesystem method used to translate file offsets to blocks
600  * @complete_unwritten: The filesystem method used to convert unwritten blocks
601  *      to written so the data written to them is exposed. This is required for
602  *      required by write faults for filesystems that will return unwritten
603  *      extent mappings from @get_block, but it is optional for reads as
604  *      dax_insert_mapping() will always zero unwritten blocks. If the fs does
605  *      not support unwritten extents, the it should pass NULL.
606  *
607  * When a page fault occurs, filesystems may call this helper in their
608  * fault handler for DAX files. __dax_fault() assumes the caller has done all
609  * the necessary locking for the page fault to proceed successfully.
610  */
611 int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
612                         get_block_t get_block, dax_iodone_t complete_unwritten)
613 {
614         struct file *file = vma->vm_file;
615         struct address_space *mapping = file->f_mapping;
616         struct inode *inode = mapping->host;
617         struct page *page;
618         struct buffer_head bh;
619         unsigned long vaddr = (unsigned long)vmf->virtual_address;
620         unsigned blkbits = inode->i_blkbits;
621         sector_t block;
622         pgoff_t size;
623         int error;
624         int major = 0;
625
626         size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
627         if (vmf->pgoff >= size)
628                 return VM_FAULT_SIGBUS;
629
630         memset(&bh, 0, sizeof(bh));
631         block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits);
632         bh.b_bdev = inode->i_sb->s_bdev;
633         bh.b_size = PAGE_SIZE;
634
635  repeat:
636         page = find_get_page(mapping, vmf->pgoff);
637         if (page) {
638                 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
639                         page_cache_release(page);
640                         return VM_FAULT_RETRY;
641                 }
642                 if (unlikely(page->mapping != mapping)) {
643                         unlock_page(page);
644                         page_cache_release(page);
645                         goto repeat;
646                 }
647                 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
648                 if (unlikely(vmf->pgoff >= size)) {
649                         /*
650                          * We have a struct page covering a hole in the file
651                          * from a read fault and we've raced with a truncate
652                          */
653                         error = -EIO;
654                         goto unlock_page;
655                 }
656         }
657
658         error = get_block(inode, block, &bh, 0);
659         if (!error && (bh.b_size < PAGE_SIZE))
660                 error = -EIO;           /* fs corruption? */
661         if (error)
662                 goto unlock_page;
663
664         if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) {
665                 if (vmf->flags & FAULT_FLAG_WRITE) {
666                         error = get_block(inode, block, &bh, 1);
667                         count_vm_event(PGMAJFAULT);
668                         mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
669                         major = VM_FAULT_MAJOR;
670                         if (!error && (bh.b_size < PAGE_SIZE))
671                                 error = -EIO;
672                         if (error)
673                                 goto unlock_page;
674                 } else {
675                         return dax_load_hole(mapping, page, vmf);
676                 }
677         }
678
679         if (vmf->cow_page) {
680                 struct page *new_page = vmf->cow_page;
681                 if (buffer_written(&bh))
682                         error = copy_user_bh(new_page, inode, &bh, vaddr);
683                 else
684                         clear_user_highpage(new_page, vaddr);
685                 if (error)
686                         goto unlock_page;
687                 vmf->page = page;
688                 if (!page) {
689                         i_mmap_lock_read(mapping);
690                         /* Check we didn't race with truncate */
691                         size = (i_size_read(inode) + PAGE_SIZE - 1) >>
692                                                                 PAGE_SHIFT;
693                         if (vmf->pgoff >= size) {
694                                 i_mmap_unlock_read(mapping);
695                                 error = -EIO;
696                                 goto out;
697                         }
698                 }
699                 return VM_FAULT_LOCKED;
700         }
701
702         /* Check we didn't race with a read fault installing a new page */
703         if (!page && major)
704                 page = find_lock_page(mapping, vmf->pgoff);
705
706         if (page) {
707                 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
708                                                         PAGE_CACHE_SIZE, 0);
709                 delete_from_page_cache(page);
710                 unlock_page(page);
711                 page_cache_release(page);
712                 page = NULL;
713         }
714
715         /*
716          * If we successfully insert the new mapping over an unwritten extent,
717          * we need to ensure we convert the unwritten extent. If there is an
718          * error inserting the mapping, the filesystem needs to leave it as
719          * unwritten to prevent exposure of the stale underlying data to
720          * userspace, but we still need to call the completion function so
721          * the private resources on the mapping buffer can be released. We
722          * indicate what the callback should do via the uptodate variable, same
723          * as for normal BH based IO completions.
724          */
725         error = dax_insert_mapping(inode, &bh, vma, vmf);
726         if (buffer_unwritten(&bh)) {
727                 if (complete_unwritten)
728                         complete_unwritten(&bh, !error);
729                 else
730                         WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE));
731         }
732
733  out:
734         if (error == -ENOMEM)
735                 return VM_FAULT_OOM | major;
736         /* -EBUSY is fine, somebody else faulted on the same PTE */
737         if ((error < 0) && (error != -EBUSY))
738                 return VM_FAULT_SIGBUS | major;
739         return VM_FAULT_NOPAGE | major;
740
741  unlock_page:
742         if (page) {
743                 unlock_page(page);
744                 page_cache_release(page);
745         }
746         goto out;
747 }
748 EXPORT_SYMBOL(__dax_fault);
749
750 /**
751  * dax_fault - handle a page fault on a DAX file
752  * @vma: The virtual memory area where the fault occurred
753  * @vmf: The description of the fault
754  * @get_block: The filesystem method used to translate file offsets to blocks
755  *
756  * When a page fault occurs, filesystems may call this helper in their
757  * fault handler for DAX files.
758  */
759 int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
760               get_block_t get_block, dax_iodone_t complete_unwritten)
761 {
762         int result;
763         struct super_block *sb = file_inode(vma->vm_file)->i_sb;
764
765         if (vmf->flags & FAULT_FLAG_WRITE) {
766                 sb_start_pagefault(sb);
767                 file_update_time(vma->vm_file);
768         }
769         result = __dax_fault(vma, vmf, get_block, complete_unwritten);
770         if (vmf->flags & FAULT_FLAG_WRITE)
771                 sb_end_pagefault(sb);
772
773         return result;
774 }
775 EXPORT_SYMBOL_GPL(dax_fault);
776
777 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
778 /*
779  * The 'colour' (ie low bits) within a PMD of a page offset.  This comes up
780  * more often than one might expect in the below function.
781  */
782 #define PG_PMD_COLOUR   ((PMD_SIZE >> PAGE_SHIFT) - 1)
783
784 static void __dax_dbg(struct buffer_head *bh, unsigned long address,
785                 const char *reason, const char *fn)
786 {
787         if (bh) {
788                 char bname[BDEVNAME_SIZE];
789                 bdevname(bh->b_bdev, bname);
790                 pr_debug("%s: %s addr: %lx dev %s state %lx start %lld "
791                         "length %zd fallback: %s\n", fn, current->comm,
792                         address, bname, bh->b_state, (u64)bh->b_blocknr,
793                         bh->b_size, reason);
794         } else {
795                 pr_debug("%s: %s addr: %lx fallback: %s\n", fn,
796                         current->comm, address, reason);
797         }
798 }
799
800 #define dax_pmd_dbg(bh, address, reason)        __dax_dbg(bh, address, reason, "dax_pmd")
801
802 int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
803                 pmd_t *pmd, unsigned int flags, get_block_t get_block,
804                 dax_iodone_t complete_unwritten)
805 {
806         struct file *file = vma->vm_file;
807         struct address_space *mapping = file->f_mapping;
808         struct inode *inode = mapping->host;
809         struct buffer_head bh;
810         unsigned blkbits = inode->i_blkbits;
811         unsigned long pmd_addr = address & PMD_MASK;
812         bool write = flags & FAULT_FLAG_WRITE;
813         struct block_device *bdev;
814         pgoff_t size, pgoff;
815         sector_t block;
816         int error, result = 0;
817         bool alloc = false;
818
819         /* dax pmd mappings require pfn_t_devmap() */
820         if (!IS_ENABLED(CONFIG_FS_DAX_PMD))
821                 return VM_FAULT_FALLBACK;
822
823         /* Fall back to PTEs if we're going to COW */
824         if (write && !(vma->vm_flags & VM_SHARED)) {
825                 split_huge_pmd(vma, pmd, address);
826                 dax_pmd_dbg(NULL, address, "cow write");
827                 return VM_FAULT_FALLBACK;
828         }
829         /* If the PMD would extend outside the VMA */
830         if (pmd_addr < vma->vm_start) {
831                 dax_pmd_dbg(NULL, address, "vma start unaligned");
832                 return VM_FAULT_FALLBACK;
833         }
834         if ((pmd_addr + PMD_SIZE) > vma->vm_end) {
835                 dax_pmd_dbg(NULL, address, "vma end unaligned");
836                 return VM_FAULT_FALLBACK;
837         }
838
839         pgoff = linear_page_index(vma, pmd_addr);
840         size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
841         if (pgoff >= size)
842                 return VM_FAULT_SIGBUS;
843         /* If the PMD would cover blocks out of the file */
844         if ((pgoff | PG_PMD_COLOUR) >= size) {
845                 dax_pmd_dbg(NULL, address,
846                                 "offset + huge page size > file size");
847                 return VM_FAULT_FALLBACK;
848         }
849
850         memset(&bh, 0, sizeof(bh));
851         bh.b_bdev = inode->i_sb->s_bdev;
852         block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);
853
854         bh.b_size = PMD_SIZE;
855
856         if (get_block(inode, block, &bh, 0) != 0)
857                 return VM_FAULT_SIGBUS;
858
859         if (!buffer_mapped(&bh) && write) {
860                 if (get_block(inode, block, &bh, 1) != 0)
861                         return VM_FAULT_SIGBUS;
862                 alloc = true;
863         }
864
865         bdev = bh.b_bdev;
866
867         /*
868          * If the filesystem isn't willing to tell us the length of a hole,
869          * just fall back to PTEs.  Calling get_block 512 times in a loop
870          * would be silly.
871          */
872         if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) {
873                 dax_pmd_dbg(&bh, address, "allocated block too small");
874                 return VM_FAULT_FALLBACK;
875         }
876
877         /*
878          * If we allocated new storage, make sure no process has any
879          * zero pages covering this hole
880          */
881         if (alloc) {
882                 loff_t lstart = pgoff << PAGE_SHIFT;
883                 loff_t lend = lstart + PMD_SIZE - 1; /* inclusive */
884
885                 truncate_pagecache_range(inode, lstart, lend);
886         }
887
888         i_mmap_lock_read(mapping);
889
890         /*
891          * If a truncate happened while we were allocating blocks, we may
892          * leave blocks allocated to the file that are beyond EOF.  We can't
893          * take i_mutex here, so just leave them hanging; they'll be freed
894          * when the file is deleted.
895          */
896         size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
897         if (pgoff >= size) {
898                 result = VM_FAULT_SIGBUS;
899                 goto out;
900         }
901         if ((pgoff | PG_PMD_COLOUR) >= size) {
902                 dax_pmd_dbg(&bh, address,
903                                 "offset + huge page size > file size");
904                 goto fallback;
905         }
906
907         if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) {
908                 spinlock_t *ptl;
909                 pmd_t entry;
910                 struct page *zero_page = get_huge_zero_page();
911
912                 if (unlikely(!zero_page)) {
913                         dax_pmd_dbg(&bh, address, "no zero page");
914                         goto fallback;
915                 }
916
917                 ptl = pmd_lock(vma->vm_mm, pmd);
918                 if (!pmd_none(*pmd)) {
919                         spin_unlock(ptl);
920                         dax_pmd_dbg(&bh, address, "pmd already present");
921                         goto fallback;
922                 }
923
924                 dev_dbg(part_to_dev(bdev->bd_part),
925                                 "%s: %s addr: %lx pfn: <zero> sect: %llx\n",
926                                 __func__, current->comm, address,
927                                 (unsigned long long) to_sector(&bh, inode));
928
929                 entry = mk_pmd(zero_page, vma->vm_page_prot);
930                 entry = pmd_mkhuge(entry);
931                 set_pmd_at(vma->vm_mm, pmd_addr, pmd, entry);
932                 result = VM_FAULT_NOPAGE;
933                 spin_unlock(ptl);
934         } else {
935                 struct blk_dax_ctl dax = {
936                         .sector = to_sector(&bh, inode),
937                         .size = PMD_SIZE,
938                 };
939                 long length = dax_map_atomic(bdev, &dax);
940
941                 if (length < 0) {
942                         result = VM_FAULT_SIGBUS;
943                         goto out;
944                 }
945                 if (length < PMD_SIZE) {
946                         dax_pmd_dbg(&bh, address, "dax-length too small");
947                         dax_unmap_atomic(bdev, &dax);
948                         goto fallback;
949                 }
950                 if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR) {
951                         dax_pmd_dbg(&bh, address, "pfn unaligned");
952                         dax_unmap_atomic(bdev, &dax);
953                         goto fallback;
954                 }
955
956                 if (!pfn_t_devmap(dax.pfn)) {
957                         dax_unmap_atomic(bdev, &dax);
958                         dax_pmd_dbg(&bh, address, "pfn not in memmap");
959                         goto fallback;
960                 }
961
962                 if (buffer_unwritten(&bh) || buffer_new(&bh)) {
963                         clear_pmem(dax.addr, PMD_SIZE);
964                         wmb_pmem();
965                         count_vm_event(PGMAJFAULT);
966                         mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
967                         result |= VM_FAULT_MAJOR;
968                 }
969                 dax_unmap_atomic(bdev, &dax);
970
971                 /*
972                  * For PTE faults we insert a radix tree entry for reads, and
973                  * leave it clean.  Then on the first write we dirty the radix
974                  * tree entry via the dax_pfn_mkwrite() path.  This sequence
975                  * allows the dax_pfn_mkwrite() call to be simpler and avoid a
976                  * call into get_block() to translate the pgoff to a sector in
977                  * order to be able to create a new radix tree entry.
978                  *
979                  * The PMD path doesn't have an equivalent to
980                  * dax_pfn_mkwrite(), though, so for a read followed by a
981                  * write we traverse all the way through __dax_pmd_fault()
982                  * twice.  This means we can just skip inserting a radix tree
983                  * entry completely on the initial read and just wait until
984                  * the write to insert a dirty entry.
985                  */
986                 if (write) {
987                         error = dax_radix_entry(mapping, pgoff, dax.sector,
988                                         true, true);
989                         if (error) {
990                                 dax_pmd_dbg(&bh, address,
991                                                 "PMD radix insertion failed");
992                                 goto fallback;
993                         }
994                 }
995
996                 dev_dbg(part_to_dev(bdev->bd_part),
997                                 "%s: %s addr: %lx pfn: %lx sect: %llx\n",
998                                 __func__, current->comm, address,
999                                 pfn_t_to_pfn(dax.pfn),
1000                                 (unsigned long long) dax.sector);
1001                 result |= vmf_insert_pfn_pmd(vma, address, pmd,
1002                                 dax.pfn, write);
1003         }
1004
1005  out:
1006         i_mmap_unlock_read(mapping);
1007
1008         if (buffer_unwritten(&bh))
1009                 complete_unwritten(&bh, !(result & VM_FAULT_ERROR));
1010
1011         return result;
1012
1013  fallback:
1014         count_vm_event(THP_FAULT_FALLBACK);
1015         result = VM_FAULT_FALLBACK;
1016         goto out;
1017 }
1018 EXPORT_SYMBOL_GPL(__dax_pmd_fault);
1019
1020 /**
1021  * dax_pmd_fault - handle a PMD fault on a DAX file
1022  * @vma: The virtual memory area where the fault occurred
1023  * @vmf: The description of the fault
1024  * @get_block: The filesystem method used to translate file offsets to blocks
1025  *
1026  * When a page fault occurs, filesystems may call this helper in their
1027  * pmd_fault handler for DAX files.
1028  */
1029 int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
1030                         pmd_t *pmd, unsigned int flags, get_block_t get_block,
1031                         dax_iodone_t complete_unwritten)
1032 {
1033         int result;
1034         struct super_block *sb = file_inode(vma->vm_file)->i_sb;
1035
1036         if (flags & FAULT_FLAG_WRITE) {
1037                 sb_start_pagefault(sb);
1038                 file_update_time(vma->vm_file);
1039         }
1040         result = __dax_pmd_fault(vma, address, pmd, flags, get_block,
1041                                 complete_unwritten);
1042         if (flags & FAULT_FLAG_WRITE)
1043                 sb_end_pagefault(sb);
1044
1045         return result;
1046 }
1047 EXPORT_SYMBOL_GPL(dax_pmd_fault);
1048 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1049
1050 /**
1051  * dax_pfn_mkwrite - handle first write to DAX page
1052  * @vma: The virtual memory area where the fault occurred
1053  * @vmf: The description of the fault
1054  */
1055 int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1056 {
1057         struct file *file = vma->vm_file;
1058
1059         /*
1060          * We pass NO_SECTOR to dax_radix_entry() because we expect that a
1061          * RADIX_DAX_PTE entry already exists in the radix tree from a
1062          * previous call to __dax_fault().  We just want to look up that PTE
1063          * entry using vmf->pgoff and make sure the dirty tag is set.  This
1064          * saves us from having to make a call to get_block() here to look
1065          * up the sector.
1066          */
1067         dax_radix_entry(file->f_mapping, vmf->pgoff, NO_SECTOR, false, true);
1068         return VM_FAULT_NOPAGE;
1069 }
1070 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
1071
1072 /**
1073  * dax_zero_page_range - zero a range within a page of a DAX file
1074  * @inode: The file being truncated
1075  * @from: The file offset that is being truncated to
1076  * @length: The number of bytes to zero
1077  * @get_block: The filesystem method used to translate file offsets to blocks
1078  *
1079  * This function can be called by a filesystem when it is zeroing part of a
1080  * page in a DAX file.  This is intended for hole-punch operations.  If
1081  * you are truncating a file, the helper function dax_truncate_page() may be
1082  * more convenient.
1083  *
1084  * We work in terms of PAGE_CACHE_SIZE here for commonality with
1085  * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
1086  * took care of disposing of the unnecessary blocks.  Even if the filesystem
1087  * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
1088  * since the file might be mmapped.
1089  */
1090 int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
1091                                                         get_block_t get_block)
1092 {
1093         struct buffer_head bh;
1094         pgoff_t index = from >> PAGE_CACHE_SHIFT;
1095         unsigned offset = from & (PAGE_CACHE_SIZE-1);
1096         int err;
1097
1098         /* Block boundary? Nothing to do */
1099         if (!length)
1100                 return 0;
1101         BUG_ON((offset + length) > PAGE_CACHE_SIZE);
1102
1103         memset(&bh, 0, sizeof(bh));
1104         bh.b_bdev = inode->i_sb->s_bdev;
1105         bh.b_size = PAGE_CACHE_SIZE;
1106         err = get_block(inode, index, &bh, 0);
1107         if (err < 0)
1108                 return err;
1109         if (buffer_written(&bh)) {
1110                 struct block_device *bdev = bh.b_bdev;
1111                 struct blk_dax_ctl dax = {
1112                         .sector = to_sector(&bh, inode),
1113                         .size = PAGE_CACHE_SIZE,
1114                 };
1115
1116                 if (dax_map_atomic(bdev, &dax) < 0)
1117                         return PTR_ERR(dax.addr);
1118                 clear_pmem(dax.addr + offset, length);
1119                 wmb_pmem();
1120                 dax_unmap_atomic(bdev, &dax);
1121         }
1122
1123         return 0;
1124 }
1125 EXPORT_SYMBOL_GPL(dax_zero_page_range);
1126
1127 /**
1128  * dax_truncate_page - handle a partial page being truncated in a DAX file
1129  * @inode: The file being truncated
1130  * @from: The file offset that is being truncated to
1131  * @get_block: The filesystem method used to translate file offsets to blocks
1132  *
1133  * Similar to block_truncate_page(), this function can be called by a
1134  * filesystem when it is truncating a DAX file to handle the partial page.
1135  *
1136  * We work in terms of PAGE_CACHE_SIZE here for commonality with
1137  * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
1138  * took care of disposing of the unnecessary blocks.  Even if the filesystem
1139  * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
1140  * since the file might be mmapped.
1141  */
1142 int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
1143 {
1144         unsigned length = PAGE_CACHE_ALIGN(from) - from;
1145         return dax_zero_page_range(inode, from, length, get_block);
1146 }
1147 EXPORT_SYMBOL_GPL(dax_truncate_page);