dax: update to new mmu_notifier semantic
[sfrench/cifs-2.6.git] / fs / dax.c
1 /*
2  * fs/dax.c - Direct Access filesystem code
3  * Copyright (c) 2013-2014 Intel Corporation
4  * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5  * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  */
16
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
21 #include <linux/fs.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
25 #include <linux/mm.h>
26 #include <linux/mutex.h>
27 #include <linux/pagevec.h>
28 #include <linux/sched.h>
29 #include <linux/sched/signal.h>
30 #include <linux/uio.h>
31 #include <linux/vmstat.h>
32 #include <linux/pfn_t.h>
33 #include <linux/sizes.h>
34 #include <linux/mmu_notifier.h>
35 #include <linux/iomap.h>
36 #include "internal.h"
37
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/fs_dax.h>
40
41 /* We choose 4096 entries - same as per-zone page wait tables */
42 #define DAX_WAIT_TABLE_BITS 12
43 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
44
45 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
46
47 static int __init init_dax_wait_table(void)
48 {
49         int i;
50
51         for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
52                 init_waitqueue_head(wait_table + i);
53         return 0;
54 }
55 fs_initcall(init_dax_wait_table);
56
57 static int dax_is_pmd_entry(void *entry)
58 {
59         return (unsigned long)entry & RADIX_DAX_PMD;
60 }
61
62 static int dax_is_pte_entry(void *entry)
63 {
64         return !((unsigned long)entry & RADIX_DAX_PMD);
65 }
66
67 static int dax_is_zero_entry(void *entry)
68 {
69         return (unsigned long)entry & RADIX_DAX_HZP;
70 }
71
72 static int dax_is_empty_entry(void *entry)
73 {
74         return (unsigned long)entry & RADIX_DAX_EMPTY;
75 }
76
77 /*
78  * DAX radix tree locking
79  */
80 struct exceptional_entry_key {
81         struct address_space *mapping;
82         pgoff_t entry_start;
83 };
84
85 struct wait_exceptional_entry_queue {
86         wait_queue_entry_t wait;
87         struct exceptional_entry_key key;
88 };
89
90 static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
91                 pgoff_t index, void *entry, struct exceptional_entry_key *key)
92 {
93         unsigned long hash;
94
95         /*
96          * If 'entry' is a PMD, align the 'index' that we use for the wait
97          * queue to the start of that PMD.  This ensures that all offsets in
98          * the range covered by the PMD map to the same bit lock.
99          */
100         if (dax_is_pmd_entry(entry))
101                 index &= ~((1UL << (PMD_SHIFT - PAGE_SHIFT)) - 1);
102
103         key->mapping = mapping;
104         key->entry_start = index;
105
106         hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
107         return wait_table + hash;
108 }
109
110 static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode,
111                                        int sync, void *keyp)
112 {
113         struct exceptional_entry_key *key = keyp;
114         struct wait_exceptional_entry_queue *ewait =
115                 container_of(wait, struct wait_exceptional_entry_queue, wait);
116
117         if (key->mapping != ewait->key.mapping ||
118             key->entry_start != ewait->key.entry_start)
119                 return 0;
120         return autoremove_wake_function(wait, mode, sync, NULL);
121 }
122
123 /*
124  * Check whether the given slot is locked. The function must be called with
125  * mapping->tree_lock held
126  */
127 static inline int slot_locked(struct address_space *mapping, void **slot)
128 {
129         unsigned long entry = (unsigned long)
130                 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
131         return entry & RADIX_DAX_ENTRY_LOCK;
132 }
133
134 /*
135  * Mark the given slot is locked. The function must be called with
136  * mapping->tree_lock held
137  */
138 static inline void *lock_slot(struct address_space *mapping, void **slot)
139 {
140         unsigned long entry = (unsigned long)
141                 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
142
143         entry |= RADIX_DAX_ENTRY_LOCK;
144         radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
145         return (void *)entry;
146 }
147
148 /*
149  * Mark the given slot is unlocked. The function must be called with
150  * mapping->tree_lock held
151  */
152 static inline void *unlock_slot(struct address_space *mapping, void **slot)
153 {
154         unsigned long entry = (unsigned long)
155                 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
156
157         entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
158         radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
159         return (void *)entry;
160 }
161
162 /*
163  * Lookup entry in radix tree, wait for it to become unlocked if it is
164  * exceptional entry and return it. The caller must call
165  * put_unlocked_mapping_entry() when he decided not to lock the entry or
166  * put_locked_mapping_entry() when he locked the entry and now wants to
167  * unlock it.
168  *
169  * The function must be called with mapping->tree_lock held.
170  */
171 static void *get_unlocked_mapping_entry(struct address_space *mapping,
172                                         pgoff_t index, void ***slotp)
173 {
174         void *entry, **slot;
175         struct wait_exceptional_entry_queue ewait;
176         wait_queue_head_t *wq;
177
178         init_wait(&ewait.wait);
179         ewait.wait.func = wake_exceptional_entry_func;
180
181         for (;;) {
182                 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL,
183                                           &slot);
184                 if (!entry || !radix_tree_exceptional_entry(entry) ||
185                     !slot_locked(mapping, slot)) {
186                         if (slotp)
187                                 *slotp = slot;
188                         return entry;
189                 }
190
191                 wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
192                 prepare_to_wait_exclusive(wq, &ewait.wait,
193                                           TASK_UNINTERRUPTIBLE);
194                 spin_unlock_irq(&mapping->tree_lock);
195                 schedule();
196                 finish_wait(wq, &ewait.wait);
197                 spin_lock_irq(&mapping->tree_lock);
198         }
199 }
200
201 static void dax_unlock_mapping_entry(struct address_space *mapping,
202                                      pgoff_t index)
203 {
204         void *entry, **slot;
205
206         spin_lock_irq(&mapping->tree_lock);
207         entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
208         if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
209                          !slot_locked(mapping, slot))) {
210                 spin_unlock_irq(&mapping->tree_lock);
211                 return;
212         }
213         unlock_slot(mapping, slot);
214         spin_unlock_irq(&mapping->tree_lock);
215         dax_wake_mapping_entry_waiter(mapping, index, entry, false);
216 }
217
218 static void put_locked_mapping_entry(struct address_space *mapping,
219                                      pgoff_t index, void *entry)
220 {
221         if (!radix_tree_exceptional_entry(entry)) {
222                 unlock_page(entry);
223                 put_page(entry);
224         } else {
225                 dax_unlock_mapping_entry(mapping, index);
226         }
227 }
228
229 /*
230  * Called when we are done with radix tree entry we looked up via
231  * get_unlocked_mapping_entry() and which we didn't lock in the end.
232  */
233 static void put_unlocked_mapping_entry(struct address_space *mapping,
234                                        pgoff_t index, void *entry)
235 {
236         if (!radix_tree_exceptional_entry(entry))
237                 return;
238
239         /* We have to wake up next waiter for the radix tree entry lock */
240         dax_wake_mapping_entry_waiter(mapping, index, entry, false);
241 }
242
243 /*
244  * Find radix tree entry at given index. If it points to a page, return with
245  * the page locked. If it points to the exceptional entry, return with the
246  * radix tree entry locked. If the radix tree doesn't contain given index,
247  * create empty exceptional entry for the index and return with it locked.
248  *
249  * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
250  * either return that locked entry or will return an error.  This error will
251  * happen if there are any 4k entries (either zero pages or DAX entries)
252  * within the 2MiB range that we are requesting.
253  *
254  * We always favor 4k entries over 2MiB entries. There isn't a flow where we
255  * evict 4k entries in order to 'upgrade' them to a 2MiB entry.  A 2MiB
256  * insertion will fail if it finds any 4k entries already in the tree, and a
257  * 4k insertion will cause an existing 2MiB entry to be unmapped and
258  * downgraded to 4k entries.  This happens for both 2MiB huge zero pages as
259  * well as 2MiB empty entries.
260  *
261  * The exception to this downgrade path is for 2MiB DAX PMD entries that have
262  * real storage backing them.  We will leave these real 2MiB DAX entries in
263  * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
264  *
265  * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
266  * persistent memory the benefit is doubtful. We can add that later if we can
267  * show it helps.
268  */
269 static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
270                 unsigned long size_flag)
271 {
272         bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
273         void *entry, **slot;
274
275 restart:
276         spin_lock_irq(&mapping->tree_lock);
277         entry = get_unlocked_mapping_entry(mapping, index, &slot);
278
279         if (entry) {
280                 if (size_flag & RADIX_DAX_PMD) {
281                         if (!radix_tree_exceptional_entry(entry) ||
282                             dax_is_pte_entry(entry)) {
283                                 put_unlocked_mapping_entry(mapping, index,
284                                                 entry);
285                                 entry = ERR_PTR(-EEXIST);
286                                 goto out_unlock;
287                         }
288                 } else { /* trying to grab a PTE entry */
289                         if (radix_tree_exceptional_entry(entry) &&
290                             dax_is_pmd_entry(entry) &&
291                             (dax_is_zero_entry(entry) ||
292                              dax_is_empty_entry(entry))) {
293                                 pmd_downgrade = true;
294                         }
295                 }
296         }
297
298         /* No entry for given index? Make sure radix tree is big enough. */
299         if (!entry || pmd_downgrade) {
300                 int err;
301
302                 if (pmd_downgrade) {
303                         /*
304                          * Make sure 'entry' remains valid while we drop
305                          * mapping->tree_lock.
306                          */
307                         entry = lock_slot(mapping, slot);
308                 }
309
310                 spin_unlock_irq(&mapping->tree_lock);
311                 /*
312                  * Besides huge zero pages the only other thing that gets
313                  * downgraded are empty entries which don't need to be
314                  * unmapped.
315                  */
316                 if (pmd_downgrade && dax_is_zero_entry(entry))
317                         unmap_mapping_range(mapping,
318                                 (index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
319
320                 err = radix_tree_preload(
321                                 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
322                 if (err) {
323                         if (pmd_downgrade)
324                                 put_locked_mapping_entry(mapping, index, entry);
325                         return ERR_PTR(err);
326                 }
327                 spin_lock_irq(&mapping->tree_lock);
328
329                 if (!entry) {
330                         /*
331                          * We needed to drop the page_tree lock while calling
332                          * radix_tree_preload() and we didn't have an entry to
333                          * lock.  See if another thread inserted an entry at
334                          * our index during this time.
335                          */
336                         entry = __radix_tree_lookup(&mapping->page_tree, index,
337                                         NULL, &slot);
338                         if (entry) {
339                                 radix_tree_preload_end();
340                                 spin_unlock_irq(&mapping->tree_lock);
341                                 goto restart;
342                         }
343                 }
344
345                 if (pmd_downgrade) {
346                         radix_tree_delete(&mapping->page_tree, index);
347                         mapping->nrexceptional--;
348                         dax_wake_mapping_entry_waiter(mapping, index, entry,
349                                         true);
350                 }
351
352                 entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
353
354                 err = __radix_tree_insert(&mapping->page_tree, index,
355                                 dax_radix_order(entry), entry);
356                 radix_tree_preload_end();
357                 if (err) {
358                         spin_unlock_irq(&mapping->tree_lock);
359                         /*
360                          * Our insertion of a DAX entry failed, most likely
361                          * because we were inserting a PMD entry and it
362                          * collided with a PTE sized entry at a different
363                          * index in the PMD range.  We haven't inserted
364                          * anything into the radix tree and have no waiters to
365                          * wake.
366                          */
367                         return ERR_PTR(err);
368                 }
369                 /* Good, we have inserted empty locked entry into the tree. */
370                 mapping->nrexceptional++;
371                 spin_unlock_irq(&mapping->tree_lock);
372                 return entry;
373         }
374         /* Normal page in radix tree? */
375         if (!radix_tree_exceptional_entry(entry)) {
376                 struct page *page = entry;
377
378                 get_page(page);
379                 spin_unlock_irq(&mapping->tree_lock);
380                 lock_page(page);
381                 /* Page got truncated? Retry... */
382                 if (unlikely(page->mapping != mapping)) {
383                         unlock_page(page);
384                         put_page(page);
385                         goto restart;
386                 }
387                 return page;
388         }
389         entry = lock_slot(mapping, slot);
390  out_unlock:
391         spin_unlock_irq(&mapping->tree_lock);
392         return entry;
393 }
394
395 /*
396  * We do not necessarily hold the mapping->tree_lock when we call this
397  * function so it is possible that 'entry' is no longer a valid item in the
398  * radix tree.  This is okay because all we really need to do is to find the
399  * correct waitqueue where tasks might be waiting for that old 'entry' and
400  * wake them.
401  */
402 void dax_wake_mapping_entry_waiter(struct address_space *mapping,
403                 pgoff_t index, void *entry, bool wake_all)
404 {
405         struct exceptional_entry_key key;
406         wait_queue_head_t *wq;
407
408         wq = dax_entry_waitqueue(mapping, index, entry, &key);
409
410         /*
411          * Checking for locked entry and prepare_to_wait_exclusive() happens
412          * under mapping->tree_lock, ditto for entry handling in our callers.
413          * So at this point all tasks that could have seen our entry locked
414          * must be in the waitqueue and the following check will see them.
415          */
416         if (waitqueue_active(wq))
417                 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
418 }
419
420 static int __dax_invalidate_mapping_entry(struct address_space *mapping,
421                                           pgoff_t index, bool trunc)
422 {
423         int ret = 0;
424         void *entry;
425         struct radix_tree_root *page_tree = &mapping->page_tree;
426
427         spin_lock_irq(&mapping->tree_lock);
428         entry = get_unlocked_mapping_entry(mapping, index, NULL);
429         if (!entry || !radix_tree_exceptional_entry(entry))
430                 goto out;
431         if (!trunc &&
432             (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
433              radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)))
434                 goto out;
435         radix_tree_delete(page_tree, index);
436         mapping->nrexceptional--;
437         ret = 1;
438 out:
439         put_unlocked_mapping_entry(mapping, index, entry);
440         spin_unlock_irq(&mapping->tree_lock);
441         return ret;
442 }
443 /*
444  * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
445  * entry to get unlocked before deleting it.
446  */
447 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
448 {
449         int ret = __dax_invalidate_mapping_entry(mapping, index, true);
450
451         /*
452          * This gets called from truncate / punch_hole path. As such, the caller
453          * must hold locks protecting against concurrent modifications of the
454          * radix tree (usually fs-private i_mmap_sem for writing). Since the
455          * caller has seen exceptional entry for this index, we better find it
456          * at that index as well...
457          */
458         WARN_ON_ONCE(!ret);
459         return ret;
460 }
461
462 /*
463  * Invalidate exceptional DAX entry if it is clean.
464  */
465 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
466                                       pgoff_t index)
467 {
468         return __dax_invalidate_mapping_entry(mapping, index, false);
469 }
470
471 /*
472  * The user has performed a load from a hole in the file.  Allocating
473  * a new page in the file would cause excessive storage usage for
474  * workloads with sparse files.  We allocate a page cache page instead.
475  * We'll kick it out of the page cache if it's ever written to,
476  * otherwise it will simply fall out of the page cache under memory
477  * pressure without ever having been dirtied.
478  */
479 static int dax_load_hole(struct address_space *mapping, void **entry,
480                          struct vm_fault *vmf)
481 {
482         struct inode *inode = mapping->host;
483         struct page *page;
484         int ret;
485
486         /* Hole page already exists? Return it...  */
487         if (!radix_tree_exceptional_entry(*entry)) {
488                 page = *entry;
489                 goto finish_fault;
490         }
491
492         /* This will replace locked radix tree entry with a hole page */
493         page = find_or_create_page(mapping, vmf->pgoff,
494                                    vmf->gfp_mask | __GFP_ZERO);
495         if (!page) {
496                 ret = VM_FAULT_OOM;
497                 goto out;
498         }
499
500 finish_fault:
501         vmf->page = page;
502         ret = finish_fault(vmf);
503         vmf->page = NULL;
504         *entry = page;
505         if (!ret) {
506                 /* Grab reference for PTE that is now referencing the page */
507                 get_page(page);
508                 ret = VM_FAULT_NOPAGE;
509         }
510 out:
511         trace_dax_load_hole(inode, vmf, ret);
512         return ret;
513 }
514
515 static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
516                 sector_t sector, size_t size, struct page *to,
517                 unsigned long vaddr)
518 {
519         void *vto, *kaddr;
520         pgoff_t pgoff;
521         pfn_t pfn;
522         long rc;
523         int id;
524
525         rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
526         if (rc)
527                 return rc;
528
529         id = dax_read_lock();
530         rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
531         if (rc < 0) {
532                 dax_read_unlock(id);
533                 return rc;
534         }
535         vto = kmap_atomic(to);
536         copy_user_page(vto, (void __force *)kaddr, vaddr, to);
537         kunmap_atomic(vto);
538         dax_read_unlock(id);
539         return 0;
540 }
541
542 /*
543  * By this point grab_mapping_entry() has ensured that we have a locked entry
544  * of the appropriate size so we don't have to worry about downgrading PMDs to
545  * PTEs.  If we happen to be trying to insert a PTE and there is a PMD
546  * already in the tree, we will skip the insertion and just dirty the PMD as
547  * appropriate.
548  */
549 static void *dax_insert_mapping_entry(struct address_space *mapping,
550                                       struct vm_fault *vmf,
551                                       void *entry, sector_t sector,
552                                       unsigned long flags)
553 {
554         struct radix_tree_root *page_tree = &mapping->page_tree;
555         int error = 0;
556         bool hole_fill = false;
557         void *new_entry;
558         pgoff_t index = vmf->pgoff;
559
560         if (vmf->flags & FAULT_FLAG_WRITE)
561                 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
562
563         /* Replacing hole page with block mapping? */
564         if (!radix_tree_exceptional_entry(entry)) {
565                 hole_fill = true;
566                 /*
567                  * Unmap the page now before we remove it from page cache below.
568                  * The page is locked so it cannot be faulted in again.
569                  */
570                 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
571                                     PAGE_SIZE, 0);
572                 error = radix_tree_preload(vmf->gfp_mask & ~__GFP_HIGHMEM);
573                 if (error)
574                         return ERR_PTR(error);
575         } else if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_HZP)) {
576                 /* replacing huge zero page with PMD block mapping */
577                 unmap_mapping_range(mapping,
578                         (vmf->pgoff << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
579         }
580
581         spin_lock_irq(&mapping->tree_lock);
582         new_entry = dax_radix_locked_entry(sector, flags);
583
584         if (hole_fill) {
585                 __delete_from_page_cache(entry, NULL);
586                 /* Drop pagecache reference */
587                 put_page(entry);
588                 error = __radix_tree_insert(page_tree, index,
589                                 dax_radix_order(new_entry), new_entry);
590                 if (error) {
591                         new_entry = ERR_PTR(error);
592                         goto unlock;
593                 }
594                 mapping->nrexceptional++;
595         } else if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
596                 /*
597                  * Only swap our new entry into the radix tree if the current
598                  * entry is a zero page or an empty entry.  If a normal PTE or
599                  * PMD entry is already in the tree, we leave it alone.  This
600                  * means that if we are trying to insert a PTE and the
601                  * existing entry is a PMD, we will just leave the PMD in the
602                  * tree and dirty it if necessary.
603                  */
604                 struct radix_tree_node *node;
605                 void **slot;
606                 void *ret;
607
608                 ret = __radix_tree_lookup(page_tree, index, &node, &slot);
609                 WARN_ON_ONCE(ret != entry);
610                 __radix_tree_replace(page_tree, node, slot,
611                                      new_entry, NULL, NULL);
612         }
613         if (vmf->flags & FAULT_FLAG_WRITE)
614                 radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
615  unlock:
616         spin_unlock_irq(&mapping->tree_lock);
617         if (hole_fill) {
618                 radix_tree_preload_end();
619                 /*
620                  * We don't need hole page anymore, it has been replaced with
621                  * locked radix tree entry now.
622                  */
623                 if (mapping->a_ops->freepage)
624                         mapping->a_ops->freepage(entry);
625                 unlock_page(entry);
626                 put_page(entry);
627         }
628         return new_entry;
629 }
630
631 static inline unsigned long
632 pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
633 {
634         unsigned long address;
635
636         address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
637         VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
638         return address;
639 }
640
641 /* Walk all mappings of a given index of a file and writeprotect them */
642 static void dax_mapping_entry_mkclean(struct address_space *mapping,
643                                       pgoff_t index, unsigned long pfn)
644 {
645         struct vm_area_struct *vma;
646         pte_t pte, *ptep = NULL;
647         pmd_t *pmdp = NULL;
648         spinlock_t *ptl;
649
650         i_mmap_lock_read(mapping);
651         vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
652                 unsigned long address, start, end;
653
654                 cond_resched();
655
656                 if (!(vma->vm_flags & VM_SHARED))
657                         continue;
658
659                 address = pgoff_address(index, vma);
660
661                 /*
662                  * Note because we provide start/end to follow_pte_pmd it will
663                  * call mmu_notifier_invalidate_range_start() on our behalf
664                  * before taking any lock.
665                  */
666                 if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl))
667                         continue;
668
669                 if (pmdp) {
670 #ifdef CONFIG_FS_DAX_PMD
671                         pmd_t pmd;
672
673                         if (pfn != pmd_pfn(*pmdp))
674                                 goto unlock_pmd;
675                         if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
676                                 goto unlock_pmd;
677
678                         flush_cache_page(vma, address, pfn);
679                         pmd = pmdp_huge_clear_flush(vma, address, pmdp);
680                         pmd = pmd_wrprotect(pmd);
681                         pmd = pmd_mkclean(pmd);
682                         set_pmd_at(vma->vm_mm, address, pmdp, pmd);
683                         mmu_notifier_invalidate_range(vma->vm_mm, start, end);
684 unlock_pmd:
685                         spin_unlock(ptl);
686 #endif
687                 } else {
688                         if (pfn != pte_pfn(*ptep))
689                                 goto unlock_pte;
690                         if (!pte_dirty(*ptep) && !pte_write(*ptep))
691                                 goto unlock_pte;
692
693                         flush_cache_page(vma, address, pfn);
694                         pte = ptep_clear_flush(vma, address, ptep);
695                         pte = pte_wrprotect(pte);
696                         pte = pte_mkclean(pte);
697                         set_pte_at(vma->vm_mm, address, ptep, pte);
698                         mmu_notifier_invalidate_range(vma->vm_mm, start, end);
699 unlock_pte:
700                         pte_unmap_unlock(ptep, ptl);
701                 }
702
703                 mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
704         }
705         i_mmap_unlock_read(mapping);
706 }
707
708 static int dax_writeback_one(struct block_device *bdev,
709                 struct dax_device *dax_dev, struct address_space *mapping,
710                 pgoff_t index, void *entry)
711 {
712         struct radix_tree_root *page_tree = &mapping->page_tree;
713         void *entry2, **slot, *kaddr;
714         long ret = 0, id;
715         sector_t sector;
716         pgoff_t pgoff;
717         size_t size;
718         pfn_t pfn;
719
720         /*
721          * A page got tagged dirty in DAX mapping? Something is seriously
722          * wrong.
723          */
724         if (WARN_ON(!radix_tree_exceptional_entry(entry)))
725                 return -EIO;
726
727         spin_lock_irq(&mapping->tree_lock);
728         entry2 = get_unlocked_mapping_entry(mapping, index, &slot);
729         /* Entry got punched out / reallocated? */
730         if (!entry2 || !radix_tree_exceptional_entry(entry2))
731                 goto put_unlocked;
732         /*
733          * Entry got reallocated elsewhere? No need to writeback. We have to
734          * compare sectors as we must not bail out due to difference in lockbit
735          * or entry type.
736          */
737         if (dax_radix_sector(entry2) != dax_radix_sector(entry))
738                 goto put_unlocked;
739         if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
740                                 dax_is_zero_entry(entry))) {
741                 ret = -EIO;
742                 goto put_unlocked;
743         }
744
745         /* Another fsync thread may have already written back this entry */
746         if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
747                 goto put_unlocked;
748         /* Lock the entry to serialize with page faults */
749         entry = lock_slot(mapping, slot);
750         /*
751          * We can clear the tag now but we have to be careful so that concurrent
752          * dax_writeback_one() calls for the same index cannot finish before we
753          * actually flush the caches. This is achieved as the calls will look
754          * at the entry only under tree_lock and once they do that they will
755          * see the entry locked and wait for it to unlock.
756          */
757         radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
758         spin_unlock_irq(&mapping->tree_lock);
759
760         /*
761          * Even if dax_writeback_mapping_range() was given a wbc->range_start
762          * in the middle of a PMD, the 'index' we are given will be aligned to
763          * the start index of the PMD, as will the sector we pull from
764          * 'entry'.  This allows us to flush for PMD_SIZE and not have to
765          * worry about partial PMD writebacks.
766          */
767         sector = dax_radix_sector(entry);
768         size = PAGE_SIZE << dax_radix_order(entry);
769
770         id = dax_read_lock();
771         ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
772         if (ret)
773                 goto dax_unlock;
774
775         /*
776          * dax_direct_access() may sleep, so cannot hold tree_lock over
777          * its invocation.
778          */
779         ret = dax_direct_access(dax_dev, pgoff, size / PAGE_SIZE, &kaddr, &pfn);
780         if (ret < 0)
781                 goto dax_unlock;
782
783         if (WARN_ON_ONCE(ret < size / PAGE_SIZE)) {
784                 ret = -EIO;
785                 goto dax_unlock;
786         }
787
788         dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(pfn));
789         dax_flush(dax_dev, pgoff, kaddr, size);
790         /*
791          * After we have flushed the cache, we can clear the dirty tag. There
792          * cannot be new dirty data in the pfn after the flush has completed as
793          * the pfn mappings are writeprotected and fault waits for mapping
794          * entry lock.
795          */
796         spin_lock_irq(&mapping->tree_lock);
797         radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY);
798         spin_unlock_irq(&mapping->tree_lock);
799         trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT);
800  dax_unlock:
801         dax_read_unlock(id);
802         put_locked_mapping_entry(mapping, index, entry);
803         return ret;
804
805  put_unlocked:
806         put_unlocked_mapping_entry(mapping, index, entry2);
807         spin_unlock_irq(&mapping->tree_lock);
808         return ret;
809 }
810
811 /*
812  * Flush the mapping to the persistent domain within the byte range of [start,
813  * end]. This is required by data integrity operations to ensure file data is
814  * on persistent storage prior to completion of the operation.
815  */
816 int dax_writeback_mapping_range(struct address_space *mapping,
817                 struct block_device *bdev, struct writeback_control *wbc)
818 {
819         struct inode *inode = mapping->host;
820         pgoff_t start_index, end_index;
821         pgoff_t indices[PAGEVEC_SIZE];
822         struct dax_device *dax_dev;
823         struct pagevec pvec;
824         bool done = false;
825         int i, ret = 0;
826
827         if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
828                 return -EIO;
829
830         if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
831                 return 0;
832
833         dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
834         if (!dax_dev)
835                 return -EIO;
836
837         start_index = wbc->range_start >> PAGE_SHIFT;
838         end_index = wbc->range_end >> PAGE_SHIFT;
839
840         trace_dax_writeback_range(inode, start_index, end_index);
841
842         tag_pages_for_writeback(mapping, start_index, end_index);
843
844         pagevec_init(&pvec, 0);
845         while (!done) {
846                 pvec.nr = find_get_entries_tag(mapping, start_index,
847                                 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
848                                 pvec.pages, indices);
849
850                 if (pvec.nr == 0)
851                         break;
852
853                 for (i = 0; i < pvec.nr; i++) {
854                         if (indices[i] > end_index) {
855                                 done = true;
856                                 break;
857                         }
858
859                         ret = dax_writeback_one(bdev, dax_dev, mapping,
860                                         indices[i], pvec.pages[i]);
861                         if (ret < 0) {
862                                 mapping_set_error(mapping, ret);
863                                 goto out;
864                         }
865                 }
866                 start_index = indices[pvec.nr - 1] + 1;
867         }
868 out:
869         put_dax(dax_dev);
870         trace_dax_writeback_range_done(inode, start_index, end_index);
871         return (ret < 0 ? ret : 0);
872 }
873 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
874
875 static int dax_insert_mapping(struct address_space *mapping,
876                 struct block_device *bdev, struct dax_device *dax_dev,
877                 sector_t sector, size_t size, void **entryp,
878                 struct vm_area_struct *vma, struct vm_fault *vmf)
879 {
880         unsigned long vaddr = vmf->address;
881         void *entry = *entryp;
882         void *ret, *kaddr;
883         pgoff_t pgoff;
884         int id, rc;
885         pfn_t pfn;
886
887         rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
888         if (rc)
889                 return rc;
890
891         id = dax_read_lock();
892         rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
893         if (rc < 0) {
894                 dax_read_unlock(id);
895                 return rc;
896         }
897         dax_read_unlock(id);
898
899         ret = dax_insert_mapping_entry(mapping, vmf, entry, sector, 0);
900         if (IS_ERR(ret))
901                 return PTR_ERR(ret);
902         *entryp = ret;
903
904         trace_dax_insert_mapping(mapping->host, vmf, ret);
905         return vm_insert_mixed(vma, vaddr, pfn);
906 }
907
908 /**
909  * dax_pfn_mkwrite - handle first write to DAX page
910  * @vmf: The description of the fault
911  */
912 int dax_pfn_mkwrite(struct vm_fault *vmf)
913 {
914         struct file *file = vmf->vma->vm_file;
915         struct address_space *mapping = file->f_mapping;
916         struct inode *inode = mapping->host;
917         void *entry, **slot;
918         pgoff_t index = vmf->pgoff;
919
920         spin_lock_irq(&mapping->tree_lock);
921         entry = get_unlocked_mapping_entry(mapping, index, &slot);
922         if (!entry || !radix_tree_exceptional_entry(entry)) {
923                 if (entry)
924                         put_unlocked_mapping_entry(mapping, index, entry);
925                 spin_unlock_irq(&mapping->tree_lock);
926                 trace_dax_pfn_mkwrite_no_entry(inode, vmf, VM_FAULT_NOPAGE);
927                 return VM_FAULT_NOPAGE;
928         }
929         radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY);
930         entry = lock_slot(mapping, slot);
931         spin_unlock_irq(&mapping->tree_lock);
932         /*
933          * If we race with somebody updating the PTE and finish_mkwrite_fault()
934          * fails, we don't care. We need to return VM_FAULT_NOPAGE and retry
935          * the fault in either case.
936          */
937         finish_mkwrite_fault(vmf);
938         put_locked_mapping_entry(mapping, index, entry);
939         trace_dax_pfn_mkwrite(inode, vmf, VM_FAULT_NOPAGE);
940         return VM_FAULT_NOPAGE;
941 }
942 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
943
944 static bool dax_range_is_aligned(struct block_device *bdev,
945                                  unsigned int offset, unsigned int length)
946 {
947         unsigned short sector_size = bdev_logical_block_size(bdev);
948
949         if (!IS_ALIGNED(offset, sector_size))
950                 return false;
951         if (!IS_ALIGNED(length, sector_size))
952                 return false;
953
954         return true;
955 }
956
957 int __dax_zero_page_range(struct block_device *bdev,
958                 struct dax_device *dax_dev, sector_t sector,
959                 unsigned int offset, unsigned int size)
960 {
961         if (dax_range_is_aligned(bdev, offset, size)) {
962                 sector_t start_sector = sector + (offset >> 9);
963
964                 return blkdev_issue_zeroout(bdev, start_sector,
965                                 size >> 9, GFP_NOFS, 0);
966         } else {
967                 pgoff_t pgoff;
968                 long rc, id;
969                 void *kaddr;
970                 pfn_t pfn;
971
972                 rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
973                 if (rc)
974                         return rc;
975
976                 id = dax_read_lock();
977                 rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr,
978                                 &pfn);
979                 if (rc < 0) {
980                         dax_read_unlock(id);
981                         return rc;
982                 }
983                 memset(kaddr + offset, 0, size);
984                 dax_flush(dax_dev, pgoff, kaddr + offset, size);
985                 dax_read_unlock(id);
986         }
987         return 0;
988 }
989 EXPORT_SYMBOL_GPL(__dax_zero_page_range);
990
991 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
992 {
993         return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
994 }
995
996 static loff_t
997 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
998                 struct iomap *iomap)
999 {
1000         struct block_device *bdev = iomap->bdev;
1001         struct dax_device *dax_dev = iomap->dax_dev;
1002         struct iov_iter *iter = data;
1003         loff_t end = pos + length, done = 0;
1004         ssize_t ret = 0;
1005         int id;
1006
1007         if (iov_iter_rw(iter) == READ) {
1008                 end = min(end, i_size_read(inode));
1009                 if (pos >= end)
1010                         return 0;
1011
1012                 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1013                         return iov_iter_zero(min(length, end - pos), iter);
1014         }
1015
1016         if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
1017                 return -EIO;
1018
1019         /*
1020          * Write can allocate block for an area which has a hole page mapped
1021          * into page tables. We have to tear down these mappings so that data
1022          * written by write(2) is visible in mmap.
1023          */
1024         if (iomap->flags & IOMAP_F_NEW) {
1025                 invalidate_inode_pages2_range(inode->i_mapping,
1026                                               pos >> PAGE_SHIFT,
1027                                               (end - 1) >> PAGE_SHIFT);
1028         }
1029
1030         id = dax_read_lock();
1031         while (pos < end) {
1032                 unsigned offset = pos & (PAGE_SIZE - 1);
1033                 const size_t size = ALIGN(length + offset, PAGE_SIZE);
1034                 const sector_t sector = dax_iomap_sector(iomap, pos);
1035                 ssize_t map_len;
1036                 pgoff_t pgoff;
1037                 void *kaddr;
1038                 pfn_t pfn;
1039
1040                 if (fatal_signal_pending(current)) {
1041                         ret = -EINTR;
1042                         break;
1043                 }
1044
1045                 ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
1046                 if (ret)
1047                         break;
1048
1049                 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
1050                                 &kaddr, &pfn);
1051                 if (map_len < 0) {
1052                         ret = map_len;
1053                         break;
1054                 }
1055
1056                 map_len = PFN_PHYS(map_len);
1057                 kaddr += offset;
1058                 map_len -= offset;
1059                 if (map_len > end - pos)
1060                         map_len = end - pos;
1061
1062                 if (iov_iter_rw(iter) == WRITE)
1063                         map_len = dax_copy_from_iter(dax_dev, pgoff, kaddr,
1064                                         map_len, iter);
1065                 else
1066                         map_len = copy_to_iter(kaddr, map_len, iter);
1067                 if (map_len <= 0) {
1068                         ret = map_len ? map_len : -EFAULT;
1069                         break;
1070                 }
1071
1072                 pos += map_len;
1073                 length -= map_len;
1074                 done += map_len;
1075         }
1076         dax_read_unlock(id);
1077
1078         return done ? done : ret;
1079 }
1080
1081 /**
1082  * dax_iomap_rw - Perform I/O to a DAX file
1083  * @iocb:       The control block for this I/O
1084  * @iter:       The addresses to do I/O from or to
1085  * @ops:        iomap ops passed from the file system
1086  *
1087  * This function performs read and write operations to directly mapped
1088  * persistent memory.  The callers needs to take care of read/write exclusion
1089  * and evicting any page cache pages in the region under I/O.
1090  */
1091 ssize_t
1092 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1093                 const struct iomap_ops *ops)
1094 {
1095         struct address_space *mapping = iocb->ki_filp->f_mapping;
1096         struct inode *inode = mapping->host;
1097         loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1098         unsigned flags = 0;
1099
1100         if (iov_iter_rw(iter) == WRITE) {
1101                 lockdep_assert_held_exclusive(&inode->i_rwsem);
1102                 flags |= IOMAP_WRITE;
1103         } else {
1104                 lockdep_assert_held(&inode->i_rwsem);
1105         }
1106
1107         while (iov_iter_count(iter)) {
1108                 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1109                                 iter, dax_iomap_actor);
1110                 if (ret <= 0)
1111                         break;
1112                 pos += ret;
1113                 done += ret;
1114         }
1115
1116         iocb->ki_pos += done;
1117         return done ? done : ret;
1118 }
1119 EXPORT_SYMBOL_GPL(dax_iomap_rw);
1120
1121 static int dax_fault_return(int error)
1122 {
1123         if (error == 0)
1124                 return VM_FAULT_NOPAGE;
1125         if (error == -ENOMEM)
1126                 return VM_FAULT_OOM;
1127         return VM_FAULT_SIGBUS;
1128 }
1129
1130 static int dax_iomap_pte_fault(struct vm_fault *vmf,
1131                                const struct iomap_ops *ops)
1132 {
1133         struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1134         struct inode *inode = mapping->host;
1135         unsigned long vaddr = vmf->address;
1136         loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1137         sector_t sector;
1138         struct iomap iomap = { 0 };
1139         unsigned flags = IOMAP_FAULT;
1140         int error, major = 0;
1141         int vmf_ret = 0;
1142         void *entry;
1143
1144         trace_dax_pte_fault(inode, vmf, vmf_ret);
1145         /*
1146          * Check whether offset isn't beyond end of file now. Caller is supposed
1147          * to hold locks serializing us with truncate / punch hole so this is
1148          * a reliable test.
1149          */
1150         if (pos >= i_size_read(inode)) {
1151                 vmf_ret = VM_FAULT_SIGBUS;
1152                 goto out;
1153         }
1154
1155         if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
1156                 flags |= IOMAP_WRITE;
1157
1158         entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
1159         if (IS_ERR(entry)) {
1160                 vmf_ret = dax_fault_return(PTR_ERR(entry));
1161                 goto out;
1162         }
1163
1164         /*
1165          * It is possible, particularly with mixed reads & writes to private
1166          * mappings, that we have raced with a PMD fault that overlaps with
1167          * the PTE we need to set up.  If so just return and the fault will be
1168          * retried.
1169          */
1170         if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1171                 vmf_ret = VM_FAULT_NOPAGE;
1172                 goto unlock_entry;
1173         }
1174
1175         /*
1176          * Note that we don't bother to use iomap_apply here: DAX required
1177          * the file system block size to be equal the page size, which means
1178          * that we never have to deal with more than a single extent here.
1179          */
1180         error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1181         if (error) {
1182                 vmf_ret = dax_fault_return(error);
1183                 goto unlock_entry;
1184         }
1185         if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1186                 error = -EIO;   /* fs corruption? */
1187                 goto error_finish_iomap;
1188         }
1189
1190         sector = dax_iomap_sector(&iomap, pos);
1191
1192         if (vmf->cow_page) {
1193                 switch (iomap.type) {
1194                 case IOMAP_HOLE:
1195                 case IOMAP_UNWRITTEN:
1196                         clear_user_highpage(vmf->cow_page, vaddr);
1197                         break;
1198                 case IOMAP_MAPPED:
1199                         error = copy_user_dax(iomap.bdev, iomap.dax_dev,
1200                                         sector, PAGE_SIZE, vmf->cow_page, vaddr);
1201                         break;
1202                 default:
1203                         WARN_ON_ONCE(1);
1204                         error = -EIO;
1205                         break;
1206                 }
1207
1208                 if (error)
1209                         goto error_finish_iomap;
1210
1211                 __SetPageUptodate(vmf->cow_page);
1212                 vmf_ret = finish_fault(vmf);
1213                 if (!vmf_ret)
1214                         vmf_ret = VM_FAULT_DONE_COW;
1215                 goto finish_iomap;
1216         }
1217
1218         switch (iomap.type) {
1219         case IOMAP_MAPPED:
1220                 if (iomap.flags & IOMAP_F_NEW) {
1221                         count_vm_event(PGMAJFAULT);
1222                         count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
1223                         major = VM_FAULT_MAJOR;
1224                 }
1225                 error = dax_insert_mapping(mapping, iomap.bdev, iomap.dax_dev,
1226                                 sector, PAGE_SIZE, &entry, vmf->vma, vmf);
1227                 /* -EBUSY is fine, somebody else faulted on the same PTE */
1228                 if (error == -EBUSY)
1229                         error = 0;
1230                 break;
1231         case IOMAP_UNWRITTEN:
1232         case IOMAP_HOLE:
1233                 if (!(vmf->flags & FAULT_FLAG_WRITE)) {
1234                         vmf_ret = dax_load_hole(mapping, &entry, vmf);
1235                         goto finish_iomap;
1236                 }
1237                 /*FALLTHRU*/
1238         default:
1239                 WARN_ON_ONCE(1);
1240                 error = -EIO;
1241                 break;
1242         }
1243
1244  error_finish_iomap:
1245         vmf_ret = dax_fault_return(error) | major;
1246  finish_iomap:
1247         if (ops->iomap_end) {
1248                 int copied = PAGE_SIZE;
1249
1250                 if (vmf_ret & VM_FAULT_ERROR)
1251                         copied = 0;
1252                 /*
1253                  * The fault is done by now and there's no way back (other
1254                  * thread may be already happily using PTE we have installed).
1255                  * Just ignore error from ->iomap_end since we cannot do much
1256                  * with it.
1257                  */
1258                 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1259         }
1260  unlock_entry:
1261         put_locked_mapping_entry(mapping, vmf->pgoff, entry);
1262  out:
1263         trace_dax_pte_fault_done(inode, vmf, vmf_ret);
1264         return vmf_ret;
1265 }
1266
1267 #ifdef CONFIG_FS_DAX_PMD
1268 /*
1269  * The 'colour' (ie low bits) within a PMD of a page offset.  This comes up
1270  * more often than one might expect in the below functions.
1271  */
1272 #define PG_PMD_COLOUR   ((PMD_SIZE >> PAGE_SHIFT) - 1)
1273
1274 static int dax_pmd_insert_mapping(struct vm_fault *vmf, struct iomap *iomap,
1275                 loff_t pos, void **entryp)
1276 {
1277         struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1278         const sector_t sector = dax_iomap_sector(iomap, pos);
1279         struct dax_device *dax_dev = iomap->dax_dev;
1280         struct block_device *bdev = iomap->bdev;
1281         struct inode *inode = mapping->host;
1282         const size_t size = PMD_SIZE;
1283         void *ret = NULL, *kaddr;
1284         long length = 0;
1285         pgoff_t pgoff;
1286         pfn_t pfn;
1287         int id;
1288
1289         if (bdev_dax_pgoff(bdev, sector, size, &pgoff) != 0)
1290                 goto fallback;
1291
1292         id = dax_read_lock();
1293         length = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
1294         if (length < 0)
1295                 goto unlock_fallback;
1296         length = PFN_PHYS(length);
1297
1298         if (length < size)
1299                 goto unlock_fallback;
1300         if (pfn_t_to_pfn(pfn) & PG_PMD_COLOUR)
1301                 goto unlock_fallback;
1302         if (!pfn_t_devmap(pfn))
1303                 goto unlock_fallback;
1304         dax_read_unlock(id);
1305
1306         ret = dax_insert_mapping_entry(mapping, vmf, *entryp, sector,
1307                         RADIX_DAX_PMD);
1308         if (IS_ERR(ret))
1309                 goto fallback;
1310         *entryp = ret;
1311
1312         trace_dax_pmd_insert_mapping(inode, vmf, length, pfn, ret);
1313         return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
1314                         pfn, vmf->flags & FAULT_FLAG_WRITE);
1315
1316 unlock_fallback:
1317         dax_read_unlock(id);
1318 fallback:
1319         trace_dax_pmd_insert_mapping_fallback(inode, vmf, length, pfn, ret);
1320         return VM_FAULT_FALLBACK;
1321 }
1322
1323 static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
1324                 void **entryp)
1325 {
1326         struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1327         unsigned long pmd_addr = vmf->address & PMD_MASK;
1328         struct inode *inode = mapping->host;
1329         struct page *zero_page;
1330         void *ret = NULL;
1331         spinlock_t *ptl;
1332         pmd_t pmd_entry;
1333
1334         zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1335
1336         if (unlikely(!zero_page))
1337                 goto fallback;
1338
1339         ret = dax_insert_mapping_entry(mapping, vmf, *entryp, 0,
1340                         RADIX_DAX_PMD | RADIX_DAX_HZP);
1341         if (IS_ERR(ret))
1342                 goto fallback;
1343         *entryp = ret;
1344
1345         ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1346         if (!pmd_none(*(vmf->pmd))) {
1347                 spin_unlock(ptl);
1348                 goto fallback;
1349         }
1350
1351         pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1352         pmd_entry = pmd_mkhuge(pmd_entry);
1353         set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1354         spin_unlock(ptl);
1355         trace_dax_pmd_load_hole(inode, vmf, zero_page, ret);
1356         return VM_FAULT_NOPAGE;
1357
1358 fallback:
1359         trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret);
1360         return VM_FAULT_FALLBACK;
1361 }
1362
1363 static int dax_iomap_pmd_fault(struct vm_fault *vmf,
1364                                const struct iomap_ops *ops)
1365 {
1366         struct vm_area_struct *vma = vmf->vma;
1367         struct address_space *mapping = vma->vm_file->f_mapping;
1368         unsigned long pmd_addr = vmf->address & PMD_MASK;
1369         bool write = vmf->flags & FAULT_FLAG_WRITE;
1370         unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1371         struct inode *inode = mapping->host;
1372         int result = VM_FAULT_FALLBACK;
1373         struct iomap iomap = { 0 };
1374         pgoff_t max_pgoff, pgoff;
1375         void *entry;
1376         loff_t pos;
1377         int error;
1378
1379         /*
1380          * Check whether offset isn't beyond end of file now. Caller is
1381          * supposed to hold locks serializing us with truncate / punch hole so
1382          * this is a reliable test.
1383          */
1384         pgoff = linear_page_index(vma, pmd_addr);
1385         max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT;
1386
1387         trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1388
1389         /*
1390          * Make sure that the faulting address's PMD offset (color) matches
1391          * the PMD offset from the start of the file.  This is necessary so
1392          * that a PMD range in the page table overlaps exactly with a PMD
1393          * range in the radix tree.
1394          */
1395         if ((vmf->pgoff & PG_PMD_COLOUR) !=
1396             ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1397                 goto fallback;
1398
1399         /* Fall back to PTEs if we're going to COW */
1400         if (write && !(vma->vm_flags & VM_SHARED))
1401                 goto fallback;
1402
1403         /* If the PMD would extend outside the VMA */
1404         if (pmd_addr < vma->vm_start)
1405                 goto fallback;
1406         if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1407                 goto fallback;
1408
1409         if (pgoff > max_pgoff) {
1410                 result = VM_FAULT_SIGBUS;
1411                 goto out;
1412         }
1413
1414         /* If the PMD would extend beyond the file size */
1415         if ((pgoff | PG_PMD_COLOUR) > max_pgoff)
1416                 goto fallback;
1417
1418         /*
1419          * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
1420          * PMD or a HZP entry.  If it can't (because a 4k page is already in
1421          * the tree, for instance), it will return -EEXIST and we just fall
1422          * back to 4k entries.
1423          */
1424         entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
1425         if (IS_ERR(entry))
1426                 goto fallback;
1427
1428         /*
1429          * It is possible, particularly with mixed reads & writes to private
1430          * mappings, that we have raced with a PTE fault that overlaps with
1431          * the PMD we need to set up.  If so just return and the fault will be
1432          * retried.
1433          */
1434         if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1435                         !pmd_devmap(*vmf->pmd)) {
1436                 result = 0;
1437                 goto unlock_entry;
1438         }
1439
1440         /*
1441          * Note that we don't use iomap_apply here.  We aren't doing I/O, only
1442          * setting up a mapping, so really we're using iomap_begin() as a way
1443          * to look up our filesystem block.
1444          */
1445         pos = (loff_t)pgoff << PAGE_SHIFT;
1446         error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1447         if (error)
1448                 goto unlock_entry;
1449
1450         if (iomap.offset + iomap.length < pos + PMD_SIZE)
1451                 goto finish_iomap;
1452
1453         switch (iomap.type) {
1454         case IOMAP_MAPPED:
1455                 result = dax_pmd_insert_mapping(vmf, &iomap, pos, &entry);
1456                 break;
1457         case IOMAP_UNWRITTEN:
1458         case IOMAP_HOLE:
1459                 if (WARN_ON_ONCE(write))
1460                         break;
1461                 result = dax_pmd_load_hole(vmf, &iomap, &entry);
1462                 break;
1463         default:
1464                 WARN_ON_ONCE(1);
1465                 break;
1466         }
1467
1468  finish_iomap:
1469         if (ops->iomap_end) {
1470                 int copied = PMD_SIZE;
1471
1472                 if (result == VM_FAULT_FALLBACK)
1473                         copied = 0;
1474                 /*
1475                  * The fault is done by now and there's no way back (other
1476                  * thread may be already happily using PMD we have installed).
1477                  * Just ignore error from ->iomap_end since we cannot do much
1478                  * with it.
1479                  */
1480                 ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1481                                 &iomap);
1482         }
1483  unlock_entry:
1484         put_locked_mapping_entry(mapping, pgoff, entry);
1485  fallback:
1486         if (result == VM_FAULT_FALLBACK) {
1487                 split_huge_pmd(vma, vmf->pmd, vmf->address);
1488                 count_vm_event(THP_FAULT_FALLBACK);
1489         }
1490 out:
1491         trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
1492         return result;
1493 }
1494 #else
1495 static int dax_iomap_pmd_fault(struct vm_fault *vmf,
1496                                const struct iomap_ops *ops)
1497 {
1498         return VM_FAULT_FALLBACK;
1499 }
1500 #endif /* CONFIG_FS_DAX_PMD */
1501
1502 /**
1503  * dax_iomap_fault - handle a page fault on a DAX file
1504  * @vmf: The description of the fault
1505  * @ops: iomap ops passed from the file system
1506  *
1507  * When a page fault occurs, filesystems may call this helper in
1508  * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1509  * has done all the necessary locking for page fault to proceed
1510  * successfully.
1511  */
1512 int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1513                     const struct iomap_ops *ops)
1514 {
1515         switch (pe_size) {
1516         case PE_SIZE_PTE:
1517                 return dax_iomap_pte_fault(vmf, ops);
1518         case PE_SIZE_PMD:
1519                 return dax_iomap_pmd_fault(vmf, ops);
1520         default:
1521                 return VM_FAULT_FALLBACK;
1522         }
1523 }
1524 EXPORT_SYMBOL_GPL(dax_iomap_fault);