Merge branch 'for-4.12/dax' into libnvdimm-for-next
[sfrench/cifs-2.6.git] / fs / dax.c
1 /*
2  * fs/dax.c - Direct Access filesystem code
3  * Copyright (c) 2013-2014 Intel Corporation
4  * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5  * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  */
16
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
21 #include <linux/fs.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
25 #include <linux/mm.h>
26 #include <linux/mutex.h>
27 #include <linux/pagevec.h>
28 #include <linux/pmem.h>
29 #include <linux/sched.h>
30 #include <linux/sched/signal.h>
31 #include <linux/uio.h>
32 #include <linux/vmstat.h>
33 #include <linux/pfn_t.h>
34 #include <linux/sizes.h>
35 #include <linux/mmu_notifier.h>
36 #include <linux/iomap.h>
37 #include "internal.h"
38
39 #define CREATE_TRACE_POINTS
40 #include <trace/events/fs_dax.h>
41
42 /* We choose 4096 entries - same as per-zone page wait tables */
43 #define DAX_WAIT_TABLE_BITS 12
44 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
45
46 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
47
48 static int __init init_dax_wait_table(void)
49 {
50         int i;
51
52         for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
53                 init_waitqueue_head(wait_table + i);
54         return 0;
55 }
56 fs_initcall(init_dax_wait_table);
57
58 static int dax_is_pmd_entry(void *entry)
59 {
60         return (unsigned long)entry & RADIX_DAX_PMD;
61 }
62
63 static int dax_is_pte_entry(void *entry)
64 {
65         return !((unsigned long)entry & RADIX_DAX_PMD);
66 }
67
68 static int dax_is_zero_entry(void *entry)
69 {
70         return (unsigned long)entry & RADIX_DAX_HZP;
71 }
72
73 static int dax_is_empty_entry(void *entry)
74 {
75         return (unsigned long)entry & RADIX_DAX_EMPTY;
76 }
77
78 /*
79  * DAX radix tree locking
80  */
81 struct exceptional_entry_key {
82         struct address_space *mapping;
83         pgoff_t entry_start;
84 };
85
86 struct wait_exceptional_entry_queue {
87         wait_queue_t wait;
88         struct exceptional_entry_key key;
89 };
90
91 static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
92                 pgoff_t index, void *entry, struct exceptional_entry_key *key)
93 {
94         unsigned long hash;
95
96         /*
97          * If 'entry' is a PMD, align the 'index' that we use for the wait
98          * queue to the start of that PMD.  This ensures that all offsets in
99          * the range covered by the PMD map to the same bit lock.
100          */
101         if (dax_is_pmd_entry(entry))
102                 index &= ~((1UL << (PMD_SHIFT - PAGE_SHIFT)) - 1);
103
104         key->mapping = mapping;
105         key->entry_start = index;
106
107         hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
108         return wait_table + hash;
109 }
110
111 static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode,
112                                        int sync, void *keyp)
113 {
114         struct exceptional_entry_key *key = keyp;
115         struct wait_exceptional_entry_queue *ewait =
116                 container_of(wait, struct wait_exceptional_entry_queue, wait);
117
118         if (key->mapping != ewait->key.mapping ||
119             key->entry_start != ewait->key.entry_start)
120                 return 0;
121         return autoremove_wake_function(wait, mode, sync, NULL);
122 }
123
124 /*
125  * Check whether the given slot is locked. The function must be called with
126  * mapping->tree_lock held
127  */
128 static inline int slot_locked(struct address_space *mapping, void **slot)
129 {
130         unsigned long entry = (unsigned long)
131                 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
132         return entry & RADIX_DAX_ENTRY_LOCK;
133 }
134
135 /*
136  * Mark the given slot is locked. The function must be called with
137  * mapping->tree_lock held
138  */
139 static inline void *lock_slot(struct address_space *mapping, void **slot)
140 {
141         unsigned long entry = (unsigned long)
142                 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
143
144         entry |= RADIX_DAX_ENTRY_LOCK;
145         radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
146         return (void *)entry;
147 }
148
149 /*
150  * Mark the given slot is unlocked. The function must be called with
151  * mapping->tree_lock held
152  */
153 static inline void *unlock_slot(struct address_space *mapping, void **slot)
154 {
155         unsigned long entry = (unsigned long)
156                 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
157
158         entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
159         radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
160         return (void *)entry;
161 }
162
163 /*
164  * Lookup entry in radix tree, wait for it to become unlocked if it is
165  * exceptional entry and return it. The caller must call
166  * put_unlocked_mapping_entry() when he decided not to lock the entry or
167  * put_locked_mapping_entry() when he locked the entry and now wants to
168  * unlock it.
169  *
170  * The function must be called with mapping->tree_lock held.
171  */
172 static void *get_unlocked_mapping_entry(struct address_space *mapping,
173                                         pgoff_t index, void ***slotp)
174 {
175         void *entry, **slot;
176         struct wait_exceptional_entry_queue ewait;
177         wait_queue_head_t *wq;
178
179         init_wait(&ewait.wait);
180         ewait.wait.func = wake_exceptional_entry_func;
181
182         for (;;) {
183                 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL,
184                                           &slot);
185                 if (!entry || !radix_tree_exceptional_entry(entry) ||
186                     !slot_locked(mapping, slot)) {
187                         if (slotp)
188                                 *slotp = slot;
189                         return entry;
190                 }
191
192                 wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
193                 prepare_to_wait_exclusive(wq, &ewait.wait,
194                                           TASK_UNINTERRUPTIBLE);
195                 spin_unlock_irq(&mapping->tree_lock);
196                 schedule();
197                 finish_wait(wq, &ewait.wait);
198                 spin_lock_irq(&mapping->tree_lock);
199         }
200 }
201
202 static void dax_unlock_mapping_entry(struct address_space *mapping,
203                                      pgoff_t index)
204 {
205         void *entry, **slot;
206
207         spin_lock_irq(&mapping->tree_lock);
208         entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
209         if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
210                          !slot_locked(mapping, slot))) {
211                 spin_unlock_irq(&mapping->tree_lock);
212                 return;
213         }
214         unlock_slot(mapping, slot);
215         spin_unlock_irq(&mapping->tree_lock);
216         dax_wake_mapping_entry_waiter(mapping, index, entry, false);
217 }
218
219 static void put_locked_mapping_entry(struct address_space *mapping,
220                                      pgoff_t index, void *entry)
221 {
222         if (!radix_tree_exceptional_entry(entry)) {
223                 unlock_page(entry);
224                 put_page(entry);
225         } else {
226                 dax_unlock_mapping_entry(mapping, index);
227         }
228 }
229
230 /*
231  * Called when we are done with radix tree entry we looked up via
232  * get_unlocked_mapping_entry() and which we didn't lock in the end.
233  */
234 static void put_unlocked_mapping_entry(struct address_space *mapping,
235                                        pgoff_t index, void *entry)
236 {
237         if (!radix_tree_exceptional_entry(entry))
238                 return;
239
240         /* We have to wake up next waiter for the radix tree entry lock */
241         dax_wake_mapping_entry_waiter(mapping, index, entry, false);
242 }
243
244 /*
245  * Find radix tree entry at given index. If it points to a page, return with
246  * the page locked. If it points to the exceptional entry, return with the
247  * radix tree entry locked. If the radix tree doesn't contain given index,
248  * create empty exceptional entry for the index and return with it locked.
249  *
250  * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
251  * either return that locked entry or will return an error.  This error will
252  * happen if there are any 4k entries (either zero pages or DAX entries)
253  * within the 2MiB range that we are requesting.
254  *
255  * We always favor 4k entries over 2MiB entries. There isn't a flow where we
256  * evict 4k entries in order to 'upgrade' them to a 2MiB entry.  A 2MiB
257  * insertion will fail if it finds any 4k entries already in the tree, and a
258  * 4k insertion will cause an existing 2MiB entry to be unmapped and
259  * downgraded to 4k entries.  This happens for both 2MiB huge zero pages as
260  * well as 2MiB empty entries.
261  *
262  * The exception to this downgrade path is for 2MiB DAX PMD entries that have
263  * real storage backing them.  We will leave these real 2MiB DAX entries in
264  * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
265  *
266  * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
267  * persistent memory the benefit is doubtful. We can add that later if we can
268  * show it helps.
269  */
270 static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
271                 unsigned long size_flag)
272 {
273         bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
274         void *entry, **slot;
275
276 restart:
277         spin_lock_irq(&mapping->tree_lock);
278         entry = get_unlocked_mapping_entry(mapping, index, &slot);
279
280         if (entry) {
281                 if (size_flag & RADIX_DAX_PMD) {
282                         if (!radix_tree_exceptional_entry(entry) ||
283                             dax_is_pte_entry(entry)) {
284                                 put_unlocked_mapping_entry(mapping, index,
285                                                 entry);
286                                 entry = ERR_PTR(-EEXIST);
287                                 goto out_unlock;
288                         }
289                 } else { /* trying to grab a PTE entry */
290                         if (radix_tree_exceptional_entry(entry) &&
291                             dax_is_pmd_entry(entry) &&
292                             (dax_is_zero_entry(entry) ||
293                              dax_is_empty_entry(entry))) {
294                                 pmd_downgrade = true;
295                         }
296                 }
297         }
298
299         /* No entry for given index? Make sure radix tree is big enough. */
300         if (!entry || pmd_downgrade) {
301                 int err;
302
303                 if (pmd_downgrade) {
304                         /*
305                          * Make sure 'entry' remains valid while we drop
306                          * mapping->tree_lock.
307                          */
308                         entry = lock_slot(mapping, slot);
309                 }
310
311                 spin_unlock_irq(&mapping->tree_lock);
312                 /*
313                  * Besides huge zero pages the only other thing that gets
314                  * downgraded are empty entries which don't need to be
315                  * unmapped.
316                  */
317                 if (pmd_downgrade && dax_is_zero_entry(entry))
318                         unmap_mapping_range(mapping,
319                                 (index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
320
321                 err = radix_tree_preload(
322                                 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
323                 if (err) {
324                         if (pmd_downgrade)
325                                 put_locked_mapping_entry(mapping, index, entry);
326                         return ERR_PTR(err);
327                 }
328                 spin_lock_irq(&mapping->tree_lock);
329
330                 if (pmd_downgrade) {
331                         radix_tree_delete(&mapping->page_tree, index);
332                         mapping->nrexceptional--;
333                         dax_wake_mapping_entry_waiter(mapping, index, entry,
334                                         true);
335                 }
336
337                 entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
338
339                 err = __radix_tree_insert(&mapping->page_tree, index,
340                                 dax_radix_order(entry), entry);
341                 radix_tree_preload_end();
342                 if (err) {
343                         spin_unlock_irq(&mapping->tree_lock);
344                         /*
345                          * Someone already created the entry?  This is a
346                          * normal failure when inserting PMDs in a range
347                          * that already contains PTEs.  In that case we want
348                          * to return -EEXIST immediately.
349                          */
350                         if (err == -EEXIST && !(size_flag & RADIX_DAX_PMD))
351                                 goto restart;
352                         /*
353                          * Our insertion of a DAX PMD entry failed, most
354                          * likely because it collided with a PTE sized entry
355                          * at a different index in the PMD range.  We haven't
356                          * inserted anything into the radix tree and have no
357                          * waiters to wake.
358                          */
359                         return ERR_PTR(err);
360                 }
361                 /* Good, we have inserted empty locked entry into the tree. */
362                 mapping->nrexceptional++;
363                 spin_unlock_irq(&mapping->tree_lock);
364                 return entry;
365         }
366         /* Normal page in radix tree? */
367         if (!radix_tree_exceptional_entry(entry)) {
368                 struct page *page = entry;
369
370                 get_page(page);
371                 spin_unlock_irq(&mapping->tree_lock);
372                 lock_page(page);
373                 /* Page got truncated? Retry... */
374                 if (unlikely(page->mapping != mapping)) {
375                         unlock_page(page);
376                         put_page(page);
377                         goto restart;
378                 }
379                 return page;
380         }
381         entry = lock_slot(mapping, slot);
382  out_unlock:
383         spin_unlock_irq(&mapping->tree_lock);
384         return entry;
385 }
386
387 /*
388  * We do not necessarily hold the mapping->tree_lock when we call this
389  * function so it is possible that 'entry' is no longer a valid item in the
390  * radix tree.  This is okay because all we really need to do is to find the
391  * correct waitqueue where tasks might be waiting for that old 'entry' and
392  * wake them.
393  */
394 void dax_wake_mapping_entry_waiter(struct address_space *mapping,
395                 pgoff_t index, void *entry, bool wake_all)
396 {
397         struct exceptional_entry_key key;
398         wait_queue_head_t *wq;
399
400         wq = dax_entry_waitqueue(mapping, index, entry, &key);
401
402         /*
403          * Checking for locked entry and prepare_to_wait_exclusive() happens
404          * under mapping->tree_lock, ditto for entry handling in our callers.
405          * So at this point all tasks that could have seen our entry locked
406          * must be in the waitqueue and the following check will see them.
407          */
408         if (waitqueue_active(wq))
409                 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
410 }
411
412 static int __dax_invalidate_mapping_entry(struct address_space *mapping,
413                                           pgoff_t index, bool trunc)
414 {
415         int ret = 0;
416         void *entry;
417         struct radix_tree_root *page_tree = &mapping->page_tree;
418
419         spin_lock_irq(&mapping->tree_lock);
420         entry = get_unlocked_mapping_entry(mapping, index, NULL);
421         if (!entry || !radix_tree_exceptional_entry(entry))
422                 goto out;
423         if (!trunc &&
424             (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
425              radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)))
426                 goto out;
427         radix_tree_delete(page_tree, index);
428         mapping->nrexceptional--;
429         ret = 1;
430 out:
431         put_unlocked_mapping_entry(mapping, index, entry);
432         spin_unlock_irq(&mapping->tree_lock);
433         return ret;
434 }
435 /*
436  * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
437  * entry to get unlocked before deleting it.
438  */
439 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
440 {
441         int ret = __dax_invalidate_mapping_entry(mapping, index, true);
442
443         /*
444          * This gets called from truncate / punch_hole path. As such, the caller
445          * must hold locks protecting against concurrent modifications of the
446          * radix tree (usually fs-private i_mmap_sem for writing). Since the
447          * caller has seen exceptional entry for this index, we better find it
448          * at that index as well...
449          */
450         WARN_ON_ONCE(!ret);
451         return ret;
452 }
453
454 /*
455  * Invalidate exceptional DAX entry if easily possible. This handles DAX
456  * entries for invalidate_inode_pages() so we evict the entry only if we can
457  * do so without blocking.
458  */
459 int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index)
460 {
461         int ret = 0;
462         void *entry, **slot;
463         struct radix_tree_root *page_tree = &mapping->page_tree;
464
465         spin_lock_irq(&mapping->tree_lock);
466         entry = __radix_tree_lookup(page_tree, index, NULL, &slot);
467         if (!entry || !radix_tree_exceptional_entry(entry) ||
468             slot_locked(mapping, slot))
469                 goto out;
470         if (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
471             radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
472                 goto out;
473         radix_tree_delete(page_tree, index);
474         mapping->nrexceptional--;
475         ret = 1;
476 out:
477         spin_unlock_irq(&mapping->tree_lock);
478         if (ret)
479                 dax_wake_mapping_entry_waiter(mapping, index, entry, true);
480         return ret;
481 }
482
483 /*
484  * Invalidate exceptional DAX entry if it is clean.
485  */
486 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
487                                       pgoff_t index)
488 {
489         return __dax_invalidate_mapping_entry(mapping, index, false);
490 }
491
492 /*
493  * The user has performed a load from a hole in the file.  Allocating
494  * a new page in the file would cause excessive storage usage for
495  * workloads with sparse files.  We allocate a page cache page instead.
496  * We'll kick it out of the page cache if it's ever written to,
497  * otherwise it will simply fall out of the page cache under memory
498  * pressure without ever having been dirtied.
499  */
500 static int dax_load_hole(struct address_space *mapping, void **entry,
501                          struct vm_fault *vmf)
502 {
503         struct page *page;
504         int ret;
505
506         /* Hole page already exists? Return it...  */
507         if (!radix_tree_exceptional_entry(*entry)) {
508                 page = *entry;
509                 goto out;
510         }
511
512         /* This will replace locked radix tree entry with a hole page */
513         page = find_or_create_page(mapping, vmf->pgoff,
514                                    vmf->gfp_mask | __GFP_ZERO);
515         if (!page)
516                 return VM_FAULT_OOM;
517  out:
518         vmf->page = page;
519         ret = finish_fault(vmf);
520         vmf->page = NULL;
521         *entry = page;
522         if (!ret) {
523                 /* Grab reference for PTE that is now referencing the page */
524                 get_page(page);
525                 return VM_FAULT_NOPAGE;
526         }
527         return ret;
528 }
529
530 static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
531                 sector_t sector, size_t size, struct page *to,
532                 unsigned long vaddr)
533 {
534         void *vto, *kaddr;
535         pgoff_t pgoff;
536         pfn_t pfn;
537         long rc;
538         int id;
539
540         rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
541         if (rc)
542                 return rc;
543
544         id = dax_read_lock();
545         rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
546         if (rc < 0) {
547                 dax_read_unlock(id);
548                 return rc;
549         }
550         vto = kmap_atomic(to);
551         copy_user_page(vto, (void __force *)kaddr, vaddr, to);
552         kunmap_atomic(vto);
553         dax_read_unlock(id);
554         return 0;
555 }
556
557 /*
558  * By this point grab_mapping_entry() has ensured that we have a locked entry
559  * of the appropriate size so we don't have to worry about downgrading PMDs to
560  * PTEs.  If we happen to be trying to insert a PTE and there is a PMD
561  * already in the tree, we will skip the insertion and just dirty the PMD as
562  * appropriate.
563  */
564 static void *dax_insert_mapping_entry(struct address_space *mapping,
565                                       struct vm_fault *vmf,
566                                       void *entry, sector_t sector,
567                                       unsigned long flags)
568 {
569         struct radix_tree_root *page_tree = &mapping->page_tree;
570         int error = 0;
571         bool hole_fill = false;
572         void *new_entry;
573         pgoff_t index = vmf->pgoff;
574
575         if (vmf->flags & FAULT_FLAG_WRITE)
576                 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
577
578         /* Replacing hole page with block mapping? */
579         if (!radix_tree_exceptional_entry(entry)) {
580                 hole_fill = true;
581                 /*
582                  * Unmap the page now before we remove it from page cache below.
583                  * The page is locked so it cannot be faulted in again.
584                  */
585                 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
586                                     PAGE_SIZE, 0);
587                 error = radix_tree_preload(vmf->gfp_mask & ~__GFP_HIGHMEM);
588                 if (error)
589                         return ERR_PTR(error);
590         } else if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_HZP)) {
591                 /* replacing huge zero page with PMD block mapping */
592                 unmap_mapping_range(mapping,
593                         (vmf->pgoff << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
594         }
595
596         spin_lock_irq(&mapping->tree_lock);
597         new_entry = dax_radix_locked_entry(sector, flags);
598
599         if (hole_fill) {
600                 __delete_from_page_cache(entry, NULL);
601                 /* Drop pagecache reference */
602                 put_page(entry);
603                 error = __radix_tree_insert(page_tree, index,
604                                 dax_radix_order(new_entry), new_entry);
605                 if (error) {
606                         new_entry = ERR_PTR(error);
607                         goto unlock;
608                 }
609                 mapping->nrexceptional++;
610         } else if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
611                 /*
612                  * Only swap our new entry into the radix tree if the current
613                  * entry is a zero page or an empty entry.  If a normal PTE or
614                  * PMD entry is already in the tree, we leave it alone.  This
615                  * means that if we are trying to insert a PTE and the
616                  * existing entry is a PMD, we will just leave the PMD in the
617                  * tree and dirty it if necessary.
618                  */
619                 struct radix_tree_node *node;
620                 void **slot;
621                 void *ret;
622
623                 ret = __radix_tree_lookup(page_tree, index, &node, &slot);
624                 WARN_ON_ONCE(ret != entry);
625                 __radix_tree_replace(page_tree, node, slot,
626                                      new_entry, NULL, NULL);
627         }
628         if (vmf->flags & FAULT_FLAG_WRITE)
629                 radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
630  unlock:
631         spin_unlock_irq(&mapping->tree_lock);
632         if (hole_fill) {
633                 radix_tree_preload_end();
634                 /*
635                  * We don't need hole page anymore, it has been replaced with
636                  * locked radix tree entry now.
637                  */
638                 if (mapping->a_ops->freepage)
639                         mapping->a_ops->freepage(entry);
640                 unlock_page(entry);
641                 put_page(entry);
642         }
643         return new_entry;
644 }
645
646 static inline unsigned long
647 pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
648 {
649         unsigned long address;
650
651         address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
652         VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
653         return address;
654 }
655
656 /* Walk all mappings of a given index of a file and writeprotect them */
657 static void dax_mapping_entry_mkclean(struct address_space *mapping,
658                                       pgoff_t index, unsigned long pfn)
659 {
660         struct vm_area_struct *vma;
661         pte_t pte, *ptep = NULL;
662         pmd_t *pmdp = NULL;
663         spinlock_t *ptl;
664         bool changed;
665
666         i_mmap_lock_read(mapping);
667         vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
668                 unsigned long address;
669
670                 cond_resched();
671
672                 if (!(vma->vm_flags & VM_SHARED))
673                         continue;
674
675                 address = pgoff_address(index, vma);
676                 changed = false;
677                 if (follow_pte_pmd(vma->vm_mm, address, &ptep, &pmdp, &ptl))
678                         continue;
679
680                 if (pmdp) {
681 #ifdef CONFIG_FS_DAX_PMD
682                         pmd_t pmd;
683
684                         if (pfn != pmd_pfn(*pmdp))
685                                 goto unlock_pmd;
686                         if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
687                                 goto unlock_pmd;
688
689                         flush_cache_page(vma, address, pfn);
690                         pmd = pmdp_huge_clear_flush(vma, address, pmdp);
691                         pmd = pmd_wrprotect(pmd);
692                         pmd = pmd_mkclean(pmd);
693                         set_pmd_at(vma->vm_mm, address, pmdp, pmd);
694                         changed = true;
695 unlock_pmd:
696                         spin_unlock(ptl);
697 #endif
698                 } else {
699                         if (pfn != pte_pfn(*ptep))
700                                 goto unlock_pte;
701                         if (!pte_dirty(*ptep) && !pte_write(*ptep))
702                                 goto unlock_pte;
703
704                         flush_cache_page(vma, address, pfn);
705                         pte = ptep_clear_flush(vma, address, ptep);
706                         pte = pte_wrprotect(pte);
707                         pte = pte_mkclean(pte);
708                         set_pte_at(vma->vm_mm, address, ptep, pte);
709                         changed = true;
710 unlock_pte:
711                         pte_unmap_unlock(ptep, ptl);
712                 }
713
714                 if (changed)
715                         mmu_notifier_invalidate_page(vma->vm_mm, address);
716         }
717         i_mmap_unlock_read(mapping);
718 }
719
720 static int dax_writeback_one(struct block_device *bdev,
721                 struct dax_device *dax_dev, struct address_space *mapping,
722                 pgoff_t index, void *entry)
723 {
724         struct radix_tree_root *page_tree = &mapping->page_tree;
725         void *entry2, **slot, *kaddr;
726         long ret = 0, id;
727         sector_t sector;
728         pgoff_t pgoff;
729         size_t size;
730         pfn_t pfn;
731
732         /*
733          * A page got tagged dirty in DAX mapping? Something is seriously
734          * wrong.
735          */
736         if (WARN_ON(!radix_tree_exceptional_entry(entry)))
737                 return -EIO;
738
739         spin_lock_irq(&mapping->tree_lock);
740         entry2 = get_unlocked_mapping_entry(mapping, index, &slot);
741         /* Entry got punched out / reallocated? */
742         if (!entry2 || !radix_tree_exceptional_entry(entry2))
743                 goto put_unlocked;
744         /*
745          * Entry got reallocated elsewhere? No need to writeback. We have to
746          * compare sectors as we must not bail out due to difference in lockbit
747          * or entry type.
748          */
749         if (dax_radix_sector(entry2) != dax_radix_sector(entry))
750                 goto put_unlocked;
751         if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
752                                 dax_is_zero_entry(entry))) {
753                 ret = -EIO;
754                 goto put_unlocked;
755         }
756
757         /* Another fsync thread may have already written back this entry */
758         if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
759                 goto put_unlocked;
760         /* Lock the entry to serialize with page faults */
761         entry = lock_slot(mapping, slot);
762         /*
763          * We can clear the tag now but we have to be careful so that concurrent
764          * dax_writeback_one() calls for the same index cannot finish before we
765          * actually flush the caches. This is achieved as the calls will look
766          * at the entry only under tree_lock and once they do that they will
767          * see the entry locked and wait for it to unlock.
768          */
769         radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
770         spin_unlock_irq(&mapping->tree_lock);
771
772         /*
773          * Even if dax_writeback_mapping_range() was given a wbc->range_start
774          * in the middle of a PMD, the 'index' we are given will be aligned to
775          * the start index of the PMD, as will the sector we pull from
776          * 'entry'.  This allows us to flush for PMD_SIZE and not have to
777          * worry about partial PMD writebacks.
778          */
779         sector = dax_radix_sector(entry);
780         size = PAGE_SIZE << dax_radix_order(entry);
781
782         id = dax_read_lock();
783         ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
784         if (ret)
785                 goto dax_unlock;
786
787         /*
788          * dax_direct_access() may sleep, so cannot hold tree_lock over
789          * its invocation.
790          */
791         ret = dax_direct_access(dax_dev, pgoff, size / PAGE_SIZE, &kaddr, &pfn);
792         if (ret < 0)
793                 goto dax_unlock;
794
795         if (WARN_ON_ONCE(ret < size / PAGE_SIZE)) {
796                 ret = -EIO;
797                 goto dax_unlock;
798         }
799
800         dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(pfn));
801         wb_cache_pmem(kaddr, size);
802         /*
803          * After we have flushed the cache, we can clear the dirty tag. There
804          * cannot be new dirty data in the pfn after the flush has completed as
805          * the pfn mappings are writeprotected and fault waits for mapping
806          * entry lock.
807          */
808         spin_lock_irq(&mapping->tree_lock);
809         radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY);
810         spin_unlock_irq(&mapping->tree_lock);
811  dax_unlock:
812         dax_read_unlock(id);
813         put_locked_mapping_entry(mapping, index, entry);
814         return ret;
815
816  put_unlocked:
817         put_unlocked_mapping_entry(mapping, index, entry2);
818         spin_unlock_irq(&mapping->tree_lock);
819         return ret;
820 }
821
822 /*
823  * Flush the mapping to the persistent domain within the byte range of [start,
824  * end]. This is required by data integrity operations to ensure file data is
825  * on persistent storage prior to completion of the operation.
826  */
827 int dax_writeback_mapping_range(struct address_space *mapping,
828                 struct block_device *bdev, struct writeback_control *wbc)
829 {
830         struct inode *inode = mapping->host;
831         pgoff_t start_index, end_index;
832         pgoff_t indices[PAGEVEC_SIZE];
833         struct dax_device *dax_dev;
834         struct pagevec pvec;
835         bool done = false;
836         int i, ret = 0;
837
838         if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
839                 return -EIO;
840
841         if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
842                 return 0;
843
844         dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
845         if (!dax_dev)
846                 return -EIO;
847
848         start_index = wbc->range_start >> PAGE_SHIFT;
849         end_index = wbc->range_end >> PAGE_SHIFT;
850
851         tag_pages_for_writeback(mapping, start_index, end_index);
852
853         pagevec_init(&pvec, 0);
854         while (!done) {
855                 pvec.nr = find_get_entries_tag(mapping, start_index,
856                                 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
857                                 pvec.pages, indices);
858
859                 if (pvec.nr == 0)
860                         break;
861
862                 for (i = 0; i < pvec.nr; i++) {
863                         if (indices[i] > end_index) {
864                                 done = true;
865                                 break;
866                         }
867
868                         ret = dax_writeback_one(bdev, dax_dev, mapping,
869                                         indices[i], pvec.pages[i]);
870                         if (ret < 0) {
871                                 put_dax(dax_dev);
872                                 return ret;
873                         }
874                 }
875         }
876         put_dax(dax_dev);
877         return 0;
878 }
879 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
880
881 static int dax_insert_mapping(struct address_space *mapping,
882                 struct block_device *bdev, struct dax_device *dax_dev,
883                 sector_t sector, size_t size, void **entryp,
884                 struct vm_area_struct *vma, struct vm_fault *vmf)
885 {
886         unsigned long vaddr = vmf->address;
887         void *entry = *entryp;
888         void *ret, *kaddr;
889         pgoff_t pgoff;
890         int id, rc;
891         pfn_t pfn;
892
893         rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
894         if (rc)
895                 return rc;
896
897         id = dax_read_lock();
898         rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
899         if (rc < 0) {
900                 dax_read_unlock(id);
901                 return rc;
902         }
903         dax_read_unlock(id);
904
905         ret = dax_insert_mapping_entry(mapping, vmf, entry, sector, 0);
906         if (IS_ERR(ret))
907                 return PTR_ERR(ret);
908         *entryp = ret;
909
910         return vm_insert_mixed(vma, vaddr, pfn);
911 }
912
913 /**
914  * dax_pfn_mkwrite - handle first write to DAX page
915  * @vmf: The description of the fault
916  */
917 int dax_pfn_mkwrite(struct vm_fault *vmf)
918 {
919         struct file *file = vmf->vma->vm_file;
920         struct address_space *mapping = file->f_mapping;
921         void *entry, **slot;
922         pgoff_t index = vmf->pgoff;
923
924         spin_lock_irq(&mapping->tree_lock);
925         entry = get_unlocked_mapping_entry(mapping, index, &slot);
926         if (!entry || !radix_tree_exceptional_entry(entry)) {
927                 if (entry)
928                         put_unlocked_mapping_entry(mapping, index, entry);
929                 spin_unlock_irq(&mapping->tree_lock);
930                 return VM_FAULT_NOPAGE;
931         }
932         radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY);
933         entry = lock_slot(mapping, slot);
934         spin_unlock_irq(&mapping->tree_lock);
935         /*
936          * If we race with somebody updating the PTE and finish_mkwrite_fault()
937          * fails, we don't care. We need to return VM_FAULT_NOPAGE and retry
938          * the fault in either case.
939          */
940         finish_mkwrite_fault(vmf);
941         put_locked_mapping_entry(mapping, index, entry);
942         return VM_FAULT_NOPAGE;
943 }
944 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
945
946 static bool dax_range_is_aligned(struct block_device *bdev,
947                                  unsigned int offset, unsigned int length)
948 {
949         unsigned short sector_size = bdev_logical_block_size(bdev);
950
951         if (!IS_ALIGNED(offset, sector_size))
952                 return false;
953         if (!IS_ALIGNED(length, sector_size))
954                 return false;
955
956         return true;
957 }
958
959 int __dax_zero_page_range(struct block_device *bdev,
960                 struct dax_device *dax_dev, sector_t sector,
961                 unsigned int offset, unsigned int size)
962 {
963         if (dax_range_is_aligned(bdev, offset, size)) {
964                 sector_t start_sector = sector + (offset >> 9);
965
966                 return blkdev_issue_zeroout(bdev, start_sector,
967                                 size >> 9, GFP_NOFS, true);
968         } else {
969                 pgoff_t pgoff;
970                 long rc, id;
971                 void *kaddr;
972                 pfn_t pfn;
973
974                 rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
975                 if (rc)
976                         return rc;
977
978                 id = dax_read_lock();
979                 rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr,
980                                 &pfn);
981                 if (rc < 0) {
982                         dax_read_unlock(id);
983                         return rc;
984                 }
985                 clear_pmem(kaddr + offset, size);
986                 dax_read_unlock(id);
987         }
988         return 0;
989 }
990 EXPORT_SYMBOL_GPL(__dax_zero_page_range);
991
992 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
993 {
994         return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
995 }
996
997 static loff_t
998 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
999                 struct iomap *iomap)
1000 {
1001         struct block_device *bdev = iomap->bdev;
1002         struct dax_device *dax_dev = iomap->dax_dev;
1003         struct iov_iter *iter = data;
1004         loff_t end = pos + length, done = 0;
1005         ssize_t ret = 0;
1006         int id;
1007
1008         if (iov_iter_rw(iter) == READ) {
1009                 end = min(end, i_size_read(inode));
1010                 if (pos >= end)
1011                         return 0;
1012
1013                 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1014                         return iov_iter_zero(min(length, end - pos), iter);
1015         }
1016
1017         if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
1018                 return -EIO;
1019
1020         /*
1021          * Write can allocate block for an area which has a hole page mapped
1022          * into page tables. We have to tear down these mappings so that data
1023          * written by write(2) is visible in mmap.
1024          */
1025         if ((iomap->flags & IOMAP_F_NEW) && inode->i_mapping->nrpages) {
1026                 invalidate_inode_pages2_range(inode->i_mapping,
1027                                               pos >> PAGE_SHIFT,
1028                                               (end - 1) >> PAGE_SHIFT);
1029         }
1030
1031         id = dax_read_lock();
1032         while (pos < end) {
1033                 unsigned offset = pos & (PAGE_SIZE - 1);
1034                 const size_t size = ALIGN(length + offset, PAGE_SIZE);
1035                 const sector_t sector = dax_iomap_sector(iomap, pos);
1036                 ssize_t map_len;
1037                 pgoff_t pgoff;
1038                 void *kaddr;
1039                 pfn_t pfn;
1040
1041                 if (fatal_signal_pending(current)) {
1042                         ret = -EINTR;
1043                         break;
1044                 }
1045
1046                 ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
1047                 if (ret)
1048                         break;
1049
1050                 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
1051                                 &kaddr, &pfn);
1052                 if (map_len < 0) {
1053                         ret = map_len;
1054                         break;
1055                 }
1056
1057                 map_len = PFN_PHYS(map_len);
1058                 kaddr += offset;
1059                 map_len -= offset;
1060                 if (map_len > end - pos)
1061                         map_len = end - pos;
1062
1063                 if (iov_iter_rw(iter) == WRITE)
1064                         map_len = copy_from_iter_pmem(kaddr, map_len, iter);
1065                 else
1066                         map_len = copy_to_iter(kaddr, map_len, iter);
1067                 if (map_len <= 0) {
1068                         ret = map_len ? map_len : -EFAULT;
1069                         break;
1070                 }
1071
1072                 pos += map_len;
1073                 length -= map_len;
1074                 done += map_len;
1075         }
1076         dax_read_unlock(id);
1077
1078         return done ? done : ret;
1079 }
1080
1081 /**
1082  * dax_iomap_rw - Perform I/O to a DAX file
1083  * @iocb:       The control block for this I/O
1084  * @iter:       The addresses to do I/O from or to
1085  * @ops:        iomap ops passed from the file system
1086  *
1087  * This function performs read and write operations to directly mapped
1088  * persistent memory.  The callers needs to take care of read/write exclusion
1089  * and evicting any page cache pages in the region under I/O.
1090  */
1091 ssize_t
1092 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1093                 const struct iomap_ops *ops)
1094 {
1095         struct address_space *mapping = iocb->ki_filp->f_mapping;
1096         struct inode *inode = mapping->host;
1097         loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1098         unsigned flags = 0;
1099
1100         if (iov_iter_rw(iter) == WRITE) {
1101                 lockdep_assert_held_exclusive(&inode->i_rwsem);
1102                 flags |= IOMAP_WRITE;
1103         } else {
1104                 lockdep_assert_held(&inode->i_rwsem);
1105         }
1106
1107         while (iov_iter_count(iter)) {
1108                 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1109                                 iter, dax_iomap_actor);
1110                 if (ret <= 0)
1111                         break;
1112                 pos += ret;
1113                 done += ret;
1114         }
1115
1116         iocb->ki_pos += done;
1117         return done ? done : ret;
1118 }
1119 EXPORT_SYMBOL_GPL(dax_iomap_rw);
1120
1121 static int dax_fault_return(int error)
1122 {
1123         if (error == 0)
1124                 return VM_FAULT_NOPAGE;
1125         if (error == -ENOMEM)
1126                 return VM_FAULT_OOM;
1127         return VM_FAULT_SIGBUS;
1128 }
1129
1130 static int dax_iomap_pte_fault(struct vm_fault *vmf,
1131                                const struct iomap_ops *ops)
1132 {
1133         struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1134         struct inode *inode = mapping->host;
1135         unsigned long vaddr = vmf->address;
1136         loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1137         sector_t sector;
1138         struct iomap iomap = { 0 };
1139         unsigned flags = IOMAP_FAULT;
1140         int error, major = 0;
1141         int vmf_ret = 0;
1142         void *entry;
1143
1144         /*
1145          * Check whether offset isn't beyond end of file now. Caller is supposed
1146          * to hold locks serializing us with truncate / punch hole so this is
1147          * a reliable test.
1148          */
1149         if (pos >= i_size_read(inode))
1150                 return VM_FAULT_SIGBUS;
1151
1152         if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
1153                 flags |= IOMAP_WRITE;
1154
1155         /*
1156          * Note that we don't bother to use iomap_apply here: DAX required
1157          * the file system block size to be equal the page size, which means
1158          * that we never have to deal with more than a single extent here.
1159          */
1160         error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1161         if (error)
1162                 return dax_fault_return(error);
1163         if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1164                 vmf_ret = dax_fault_return(-EIO);       /* fs corruption? */
1165                 goto finish_iomap;
1166         }
1167
1168         entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
1169         if (IS_ERR(entry)) {
1170                 vmf_ret = dax_fault_return(PTR_ERR(entry));
1171                 goto finish_iomap;
1172         }
1173
1174         sector = dax_iomap_sector(&iomap, pos);
1175
1176         if (vmf->cow_page) {
1177                 switch (iomap.type) {
1178                 case IOMAP_HOLE:
1179                 case IOMAP_UNWRITTEN:
1180                         clear_user_highpage(vmf->cow_page, vaddr);
1181                         break;
1182                 case IOMAP_MAPPED:
1183                         error = copy_user_dax(iomap.bdev, iomap.dax_dev,
1184                                         sector, PAGE_SIZE, vmf->cow_page, vaddr);
1185                         break;
1186                 default:
1187                         WARN_ON_ONCE(1);
1188                         error = -EIO;
1189                         break;
1190                 }
1191
1192                 if (error)
1193                         goto error_unlock_entry;
1194
1195                 __SetPageUptodate(vmf->cow_page);
1196                 vmf_ret = finish_fault(vmf);
1197                 if (!vmf_ret)
1198                         vmf_ret = VM_FAULT_DONE_COW;
1199                 goto unlock_entry;
1200         }
1201
1202         switch (iomap.type) {
1203         case IOMAP_MAPPED:
1204                 if (iomap.flags & IOMAP_F_NEW) {
1205                         count_vm_event(PGMAJFAULT);
1206                         mem_cgroup_count_vm_event(vmf->vma->vm_mm, PGMAJFAULT);
1207                         major = VM_FAULT_MAJOR;
1208                 }
1209                 error = dax_insert_mapping(mapping, iomap.bdev, iomap.dax_dev,
1210                                 sector, PAGE_SIZE, &entry, vmf->vma, vmf);
1211                 /* -EBUSY is fine, somebody else faulted on the same PTE */
1212                 if (error == -EBUSY)
1213                         error = 0;
1214                 break;
1215         case IOMAP_UNWRITTEN:
1216         case IOMAP_HOLE:
1217                 if (!(vmf->flags & FAULT_FLAG_WRITE)) {
1218                         vmf_ret = dax_load_hole(mapping, &entry, vmf);
1219                         goto unlock_entry;
1220                 }
1221                 /*FALLTHRU*/
1222         default:
1223                 WARN_ON_ONCE(1);
1224                 error = -EIO;
1225                 break;
1226         }
1227
1228  error_unlock_entry:
1229         vmf_ret = dax_fault_return(error) | major;
1230  unlock_entry:
1231         put_locked_mapping_entry(mapping, vmf->pgoff, entry);
1232  finish_iomap:
1233         if (ops->iomap_end) {
1234                 int copied = PAGE_SIZE;
1235
1236                 if (vmf_ret & VM_FAULT_ERROR)
1237                         copied = 0;
1238                 /*
1239                  * The fault is done by now and there's no way back (other
1240                  * thread may be already happily using PTE we have installed).
1241                  * Just ignore error from ->iomap_end since we cannot do much
1242                  * with it.
1243                  */
1244                 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1245         }
1246         return vmf_ret;
1247 }
1248
1249 #ifdef CONFIG_FS_DAX_PMD
1250 /*
1251  * The 'colour' (ie low bits) within a PMD of a page offset.  This comes up
1252  * more often than one might expect in the below functions.
1253  */
1254 #define PG_PMD_COLOUR   ((PMD_SIZE >> PAGE_SHIFT) - 1)
1255
1256 static int dax_pmd_insert_mapping(struct vm_fault *vmf, struct iomap *iomap,
1257                 loff_t pos, void **entryp)
1258 {
1259         struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1260         const sector_t sector = dax_iomap_sector(iomap, pos);
1261         struct dax_device *dax_dev = iomap->dax_dev;
1262         struct block_device *bdev = iomap->bdev;
1263         struct inode *inode = mapping->host;
1264         const size_t size = PMD_SIZE;
1265         void *ret = NULL, *kaddr;
1266         long length = 0;
1267         pgoff_t pgoff;
1268         pfn_t pfn;
1269         int id;
1270
1271         if (bdev_dax_pgoff(bdev, sector, size, &pgoff) != 0)
1272                 goto fallback;
1273
1274         id = dax_read_lock();
1275         length = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
1276         if (length < 0)
1277                 goto unlock_fallback;
1278         length = PFN_PHYS(length);
1279
1280         if (length < size)
1281                 goto unlock_fallback;
1282         if (pfn_t_to_pfn(pfn) & PG_PMD_COLOUR)
1283                 goto unlock_fallback;
1284         if (!pfn_t_devmap(pfn))
1285                 goto unlock_fallback;
1286         dax_read_unlock(id);
1287
1288         ret = dax_insert_mapping_entry(mapping, vmf, *entryp, sector,
1289                         RADIX_DAX_PMD);
1290         if (IS_ERR(ret))
1291                 goto fallback;
1292         *entryp = ret;
1293
1294         trace_dax_pmd_insert_mapping(inode, vmf, length, pfn, ret);
1295         return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
1296                         pfn, vmf->flags & FAULT_FLAG_WRITE);
1297
1298 unlock_fallback:
1299         dax_read_unlock(id);
1300 fallback:
1301         trace_dax_pmd_insert_mapping_fallback(inode, vmf, length, pfn, ret);
1302         return VM_FAULT_FALLBACK;
1303 }
1304
1305 static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
1306                 void **entryp)
1307 {
1308         struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1309         unsigned long pmd_addr = vmf->address & PMD_MASK;
1310         struct inode *inode = mapping->host;
1311         struct page *zero_page;
1312         void *ret = NULL;
1313         spinlock_t *ptl;
1314         pmd_t pmd_entry;
1315
1316         zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1317
1318         if (unlikely(!zero_page))
1319                 goto fallback;
1320
1321         ret = dax_insert_mapping_entry(mapping, vmf, *entryp, 0,
1322                         RADIX_DAX_PMD | RADIX_DAX_HZP);
1323         if (IS_ERR(ret))
1324                 goto fallback;
1325         *entryp = ret;
1326
1327         ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1328         if (!pmd_none(*(vmf->pmd))) {
1329                 spin_unlock(ptl);
1330                 goto fallback;
1331         }
1332
1333         pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1334         pmd_entry = pmd_mkhuge(pmd_entry);
1335         set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1336         spin_unlock(ptl);
1337         trace_dax_pmd_load_hole(inode, vmf, zero_page, ret);
1338         return VM_FAULT_NOPAGE;
1339
1340 fallback:
1341         trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret);
1342         return VM_FAULT_FALLBACK;
1343 }
1344
1345 static int dax_iomap_pmd_fault(struct vm_fault *vmf,
1346                                const struct iomap_ops *ops)
1347 {
1348         struct vm_area_struct *vma = vmf->vma;
1349         struct address_space *mapping = vma->vm_file->f_mapping;
1350         unsigned long pmd_addr = vmf->address & PMD_MASK;
1351         bool write = vmf->flags & FAULT_FLAG_WRITE;
1352         unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1353         struct inode *inode = mapping->host;
1354         int result = VM_FAULT_FALLBACK;
1355         struct iomap iomap = { 0 };
1356         pgoff_t max_pgoff, pgoff;
1357         void *entry;
1358         loff_t pos;
1359         int error;
1360
1361         /*
1362          * Check whether offset isn't beyond end of file now. Caller is
1363          * supposed to hold locks serializing us with truncate / punch hole so
1364          * this is a reliable test.
1365          */
1366         pgoff = linear_page_index(vma, pmd_addr);
1367         max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT;
1368
1369         trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1370
1371         /* Fall back to PTEs if we're going to COW */
1372         if (write && !(vma->vm_flags & VM_SHARED))
1373                 goto fallback;
1374
1375         /* If the PMD would extend outside the VMA */
1376         if (pmd_addr < vma->vm_start)
1377                 goto fallback;
1378         if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1379                 goto fallback;
1380
1381         if (pgoff > max_pgoff) {
1382                 result = VM_FAULT_SIGBUS;
1383                 goto out;
1384         }
1385
1386         /* If the PMD would extend beyond the file size */
1387         if ((pgoff | PG_PMD_COLOUR) > max_pgoff)
1388                 goto fallback;
1389
1390         /*
1391          * Note that we don't use iomap_apply here.  We aren't doing I/O, only
1392          * setting up a mapping, so really we're using iomap_begin() as a way
1393          * to look up our filesystem block.
1394          */
1395         pos = (loff_t)pgoff << PAGE_SHIFT;
1396         error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1397         if (error)
1398                 goto fallback;
1399
1400         if (iomap.offset + iomap.length < pos + PMD_SIZE)
1401                 goto finish_iomap;
1402
1403         /*
1404          * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
1405          * PMD or a HZP entry.  If it can't (because a 4k page is already in
1406          * the tree, for instance), it will return -EEXIST and we just fall
1407          * back to 4k entries.
1408          */
1409         entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
1410         if (IS_ERR(entry))
1411                 goto finish_iomap;
1412
1413         switch (iomap.type) {
1414         case IOMAP_MAPPED:
1415                 result = dax_pmd_insert_mapping(vmf, &iomap, pos, &entry);
1416                 break;
1417         case IOMAP_UNWRITTEN:
1418         case IOMAP_HOLE:
1419                 if (WARN_ON_ONCE(write))
1420                         goto unlock_entry;
1421                 result = dax_pmd_load_hole(vmf, &iomap, &entry);
1422                 break;
1423         default:
1424                 WARN_ON_ONCE(1);
1425                 break;
1426         }
1427
1428  unlock_entry:
1429         put_locked_mapping_entry(mapping, pgoff, entry);
1430  finish_iomap:
1431         if (ops->iomap_end) {
1432                 int copied = PMD_SIZE;
1433
1434                 if (result == VM_FAULT_FALLBACK)
1435                         copied = 0;
1436                 /*
1437                  * The fault is done by now and there's no way back (other
1438                  * thread may be already happily using PMD we have installed).
1439                  * Just ignore error from ->iomap_end since we cannot do much
1440                  * with it.
1441                  */
1442                 ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1443                                 &iomap);
1444         }
1445  fallback:
1446         if (result == VM_FAULT_FALLBACK) {
1447                 split_huge_pmd(vma, vmf->pmd, vmf->address);
1448                 count_vm_event(THP_FAULT_FALLBACK);
1449         }
1450 out:
1451         trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
1452         return result;
1453 }
1454 #else
1455 static int dax_iomap_pmd_fault(struct vm_fault *vmf,
1456                                const struct iomap_ops *ops)
1457 {
1458         return VM_FAULT_FALLBACK;
1459 }
1460 #endif /* CONFIG_FS_DAX_PMD */
1461
1462 /**
1463  * dax_iomap_fault - handle a page fault on a DAX file
1464  * @vmf: The description of the fault
1465  * @ops: iomap ops passed from the file system
1466  *
1467  * When a page fault occurs, filesystems may call this helper in
1468  * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1469  * has done all the necessary locking for page fault to proceed
1470  * successfully.
1471  */
1472 int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1473                     const struct iomap_ops *ops)
1474 {
1475         switch (pe_size) {
1476         case PE_SIZE_PTE:
1477                 return dax_iomap_pte_fault(vmf, ops);
1478         case PE_SIZE_PMD:
1479                 return dax_iomap_pmd_fault(vmf, ops);
1480         default:
1481                 return VM_FAULT_FALLBACK;
1482         }
1483 }
1484 EXPORT_SYMBOL_GPL(dax_iomap_fault);