btrfs: Remove root arg from btrfs_log_inode_parent
[sfrench/cifs-2.6.git] / fs / dax.c
1 /*
2  * fs/dax.c - Direct Access filesystem code
3  * Copyright (c) 2013-2014 Intel Corporation
4  * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5  * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  */
16
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
21 #include <linux/fs.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
25 #include <linux/mm.h>
26 #include <linux/mutex.h>
27 #include <linux/pagevec.h>
28 #include <linux/sched.h>
29 #include <linux/sched/signal.h>
30 #include <linux/uio.h>
31 #include <linux/vmstat.h>
32 #include <linux/pfn_t.h>
33 #include <linux/sizes.h>
34 #include <linux/mmu_notifier.h>
35 #include <linux/iomap.h>
36 #include "internal.h"
37
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/fs_dax.h>
40
41 /* We choose 4096 entries - same as per-zone page wait tables */
42 #define DAX_WAIT_TABLE_BITS 12
43 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
44
45 /* The 'colour' (ie low bits) within a PMD of a page offset.  */
46 #define PG_PMD_COLOUR   ((PMD_SIZE >> PAGE_SHIFT) - 1)
47 #define PG_PMD_NR       (PMD_SIZE >> PAGE_SHIFT)
48
49 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
50
51 static int __init init_dax_wait_table(void)
52 {
53         int i;
54
55         for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
56                 init_waitqueue_head(wait_table + i);
57         return 0;
58 }
59 fs_initcall(init_dax_wait_table);
60
61 /*
62  * We use lowest available bit in exceptional entry for locking, one bit for
63  * the entry size (PMD) and two more to tell us if the entry is a zero page or
64  * an empty entry that is just used for locking.  In total four special bits.
65  *
66  * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
67  * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
68  * block allocation.
69  */
70 #define RADIX_DAX_SHIFT         (RADIX_TREE_EXCEPTIONAL_SHIFT + 4)
71 #define RADIX_DAX_ENTRY_LOCK    (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
72 #define RADIX_DAX_PMD           (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
73 #define RADIX_DAX_ZERO_PAGE     (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
74 #define RADIX_DAX_EMPTY         (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3))
75
76 static unsigned long dax_radix_sector(void *entry)
77 {
78         return (unsigned long)entry >> RADIX_DAX_SHIFT;
79 }
80
81 static void *dax_radix_locked_entry(sector_t sector, unsigned long flags)
82 {
83         return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags |
84                         ((unsigned long)sector << RADIX_DAX_SHIFT) |
85                         RADIX_DAX_ENTRY_LOCK);
86 }
87
88 static unsigned int dax_radix_order(void *entry)
89 {
90         if ((unsigned long)entry & RADIX_DAX_PMD)
91                 return PMD_SHIFT - PAGE_SHIFT;
92         return 0;
93 }
94
95 static int dax_is_pmd_entry(void *entry)
96 {
97         return (unsigned long)entry & RADIX_DAX_PMD;
98 }
99
100 static int dax_is_pte_entry(void *entry)
101 {
102         return !((unsigned long)entry & RADIX_DAX_PMD);
103 }
104
105 static int dax_is_zero_entry(void *entry)
106 {
107         return (unsigned long)entry & RADIX_DAX_ZERO_PAGE;
108 }
109
110 static int dax_is_empty_entry(void *entry)
111 {
112         return (unsigned long)entry & RADIX_DAX_EMPTY;
113 }
114
115 /*
116  * DAX radix tree locking
117  */
118 struct exceptional_entry_key {
119         struct address_space *mapping;
120         pgoff_t entry_start;
121 };
122
123 struct wait_exceptional_entry_queue {
124         wait_queue_entry_t wait;
125         struct exceptional_entry_key key;
126 };
127
128 static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
129                 pgoff_t index, void *entry, struct exceptional_entry_key *key)
130 {
131         unsigned long hash;
132
133         /*
134          * If 'entry' is a PMD, align the 'index' that we use for the wait
135          * queue to the start of that PMD.  This ensures that all offsets in
136          * the range covered by the PMD map to the same bit lock.
137          */
138         if (dax_is_pmd_entry(entry))
139                 index &= ~PG_PMD_COLOUR;
140
141         key->mapping = mapping;
142         key->entry_start = index;
143
144         hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
145         return wait_table + hash;
146 }
147
148 static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode,
149                                        int sync, void *keyp)
150 {
151         struct exceptional_entry_key *key = keyp;
152         struct wait_exceptional_entry_queue *ewait =
153                 container_of(wait, struct wait_exceptional_entry_queue, wait);
154
155         if (key->mapping != ewait->key.mapping ||
156             key->entry_start != ewait->key.entry_start)
157                 return 0;
158         return autoremove_wake_function(wait, mode, sync, NULL);
159 }
160
161 /*
162  * We do not necessarily hold the mapping->tree_lock when we call this
163  * function so it is possible that 'entry' is no longer a valid item in the
164  * radix tree.  This is okay because all we really need to do is to find the
165  * correct waitqueue where tasks might be waiting for that old 'entry' and
166  * wake them.
167  */
168 static void dax_wake_mapping_entry_waiter(struct address_space *mapping,
169                 pgoff_t index, void *entry, bool wake_all)
170 {
171         struct exceptional_entry_key key;
172         wait_queue_head_t *wq;
173
174         wq = dax_entry_waitqueue(mapping, index, entry, &key);
175
176         /*
177          * Checking for locked entry and prepare_to_wait_exclusive() happens
178          * under mapping->tree_lock, ditto for entry handling in our callers.
179          * So at this point all tasks that could have seen our entry locked
180          * must be in the waitqueue and the following check will see them.
181          */
182         if (waitqueue_active(wq))
183                 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
184 }
185
186 /*
187  * Check whether the given slot is locked. The function must be called with
188  * mapping->tree_lock held
189  */
190 static inline int slot_locked(struct address_space *mapping, void **slot)
191 {
192         unsigned long entry = (unsigned long)
193                 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
194         return entry & RADIX_DAX_ENTRY_LOCK;
195 }
196
197 /*
198  * Mark the given slot is locked. The function must be called with
199  * mapping->tree_lock held
200  */
201 static inline void *lock_slot(struct address_space *mapping, void **slot)
202 {
203         unsigned long entry = (unsigned long)
204                 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
205
206         entry |= RADIX_DAX_ENTRY_LOCK;
207         radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
208         return (void *)entry;
209 }
210
211 /*
212  * Mark the given slot is unlocked. The function must be called with
213  * mapping->tree_lock held
214  */
215 static inline void *unlock_slot(struct address_space *mapping, void **slot)
216 {
217         unsigned long entry = (unsigned long)
218                 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
219
220         entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
221         radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
222         return (void *)entry;
223 }
224
225 /*
226  * Lookup entry in radix tree, wait for it to become unlocked if it is
227  * exceptional entry and return it. The caller must call
228  * put_unlocked_mapping_entry() when he decided not to lock the entry or
229  * put_locked_mapping_entry() when he locked the entry and now wants to
230  * unlock it.
231  *
232  * The function must be called with mapping->tree_lock held.
233  */
234 static void *get_unlocked_mapping_entry(struct address_space *mapping,
235                                         pgoff_t index, void ***slotp)
236 {
237         void *entry, **slot;
238         struct wait_exceptional_entry_queue ewait;
239         wait_queue_head_t *wq;
240
241         init_wait(&ewait.wait);
242         ewait.wait.func = wake_exceptional_entry_func;
243
244         for (;;) {
245                 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL,
246                                           &slot);
247                 if (!entry ||
248                     WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)) ||
249                     !slot_locked(mapping, slot)) {
250                         if (slotp)
251                                 *slotp = slot;
252                         return entry;
253                 }
254
255                 wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
256                 prepare_to_wait_exclusive(wq, &ewait.wait,
257                                           TASK_UNINTERRUPTIBLE);
258                 spin_unlock_irq(&mapping->tree_lock);
259                 schedule();
260                 finish_wait(wq, &ewait.wait);
261                 spin_lock_irq(&mapping->tree_lock);
262         }
263 }
264
265 static void dax_unlock_mapping_entry(struct address_space *mapping,
266                                      pgoff_t index)
267 {
268         void *entry, **slot;
269
270         spin_lock_irq(&mapping->tree_lock);
271         entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
272         if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
273                          !slot_locked(mapping, slot))) {
274                 spin_unlock_irq(&mapping->tree_lock);
275                 return;
276         }
277         unlock_slot(mapping, slot);
278         spin_unlock_irq(&mapping->tree_lock);
279         dax_wake_mapping_entry_waiter(mapping, index, entry, false);
280 }
281
282 static void put_locked_mapping_entry(struct address_space *mapping,
283                 pgoff_t index)
284 {
285         dax_unlock_mapping_entry(mapping, index);
286 }
287
288 /*
289  * Called when we are done with radix tree entry we looked up via
290  * get_unlocked_mapping_entry() and which we didn't lock in the end.
291  */
292 static void put_unlocked_mapping_entry(struct address_space *mapping,
293                                        pgoff_t index, void *entry)
294 {
295         if (!entry)
296                 return;
297
298         /* We have to wake up next waiter for the radix tree entry lock */
299         dax_wake_mapping_entry_waiter(mapping, index, entry, false);
300 }
301
302 /*
303  * Find radix tree entry at given index. If it points to an exceptional entry,
304  * return it with the radix tree entry locked. If the radix tree doesn't
305  * contain given index, create an empty exceptional entry for the index and
306  * return with it locked.
307  *
308  * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
309  * either return that locked entry or will return an error.  This error will
310  * happen if there are any 4k entries within the 2MiB range that we are
311  * requesting.
312  *
313  * We always favor 4k entries over 2MiB entries. There isn't a flow where we
314  * evict 4k entries in order to 'upgrade' them to a 2MiB entry.  A 2MiB
315  * insertion will fail if it finds any 4k entries already in the tree, and a
316  * 4k insertion will cause an existing 2MiB entry to be unmapped and
317  * downgraded to 4k entries.  This happens for both 2MiB huge zero pages as
318  * well as 2MiB empty entries.
319  *
320  * The exception to this downgrade path is for 2MiB DAX PMD entries that have
321  * real storage backing them.  We will leave these real 2MiB DAX entries in
322  * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
323  *
324  * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
325  * persistent memory the benefit is doubtful. We can add that later if we can
326  * show it helps.
327  */
328 static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
329                 unsigned long size_flag)
330 {
331         bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
332         void *entry, **slot;
333
334 restart:
335         spin_lock_irq(&mapping->tree_lock);
336         entry = get_unlocked_mapping_entry(mapping, index, &slot);
337
338         if (WARN_ON_ONCE(entry && !radix_tree_exceptional_entry(entry))) {
339                 entry = ERR_PTR(-EIO);
340                 goto out_unlock;
341         }
342
343         if (entry) {
344                 if (size_flag & RADIX_DAX_PMD) {
345                         if (dax_is_pte_entry(entry)) {
346                                 put_unlocked_mapping_entry(mapping, index,
347                                                 entry);
348                                 entry = ERR_PTR(-EEXIST);
349                                 goto out_unlock;
350                         }
351                 } else { /* trying to grab a PTE entry */
352                         if (dax_is_pmd_entry(entry) &&
353                             (dax_is_zero_entry(entry) ||
354                              dax_is_empty_entry(entry))) {
355                                 pmd_downgrade = true;
356                         }
357                 }
358         }
359
360         /* No entry for given index? Make sure radix tree is big enough. */
361         if (!entry || pmd_downgrade) {
362                 int err;
363
364                 if (pmd_downgrade) {
365                         /*
366                          * Make sure 'entry' remains valid while we drop
367                          * mapping->tree_lock.
368                          */
369                         entry = lock_slot(mapping, slot);
370                 }
371
372                 spin_unlock_irq(&mapping->tree_lock);
373                 /*
374                  * Besides huge zero pages the only other thing that gets
375                  * downgraded are empty entries which don't need to be
376                  * unmapped.
377                  */
378                 if (pmd_downgrade && dax_is_zero_entry(entry))
379                         unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
380                                                         PG_PMD_NR, false);
381
382                 err = radix_tree_preload(
383                                 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
384                 if (err) {
385                         if (pmd_downgrade)
386                                 put_locked_mapping_entry(mapping, index);
387                         return ERR_PTR(err);
388                 }
389                 spin_lock_irq(&mapping->tree_lock);
390
391                 if (!entry) {
392                         /*
393                          * We needed to drop the page_tree lock while calling
394                          * radix_tree_preload() and we didn't have an entry to
395                          * lock.  See if another thread inserted an entry at
396                          * our index during this time.
397                          */
398                         entry = __radix_tree_lookup(&mapping->page_tree, index,
399                                         NULL, &slot);
400                         if (entry) {
401                                 radix_tree_preload_end();
402                                 spin_unlock_irq(&mapping->tree_lock);
403                                 goto restart;
404                         }
405                 }
406
407                 if (pmd_downgrade) {
408                         radix_tree_delete(&mapping->page_tree, index);
409                         mapping->nrexceptional--;
410                         dax_wake_mapping_entry_waiter(mapping, index, entry,
411                                         true);
412                 }
413
414                 entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
415
416                 err = __radix_tree_insert(&mapping->page_tree, index,
417                                 dax_radix_order(entry), entry);
418                 radix_tree_preload_end();
419                 if (err) {
420                         spin_unlock_irq(&mapping->tree_lock);
421                         /*
422                          * Our insertion of a DAX entry failed, most likely
423                          * because we were inserting a PMD entry and it
424                          * collided with a PTE sized entry at a different
425                          * index in the PMD range.  We haven't inserted
426                          * anything into the radix tree and have no waiters to
427                          * wake.
428                          */
429                         return ERR_PTR(err);
430                 }
431                 /* Good, we have inserted empty locked entry into the tree. */
432                 mapping->nrexceptional++;
433                 spin_unlock_irq(&mapping->tree_lock);
434                 return entry;
435         }
436         entry = lock_slot(mapping, slot);
437  out_unlock:
438         spin_unlock_irq(&mapping->tree_lock);
439         return entry;
440 }
441
442 static int __dax_invalidate_mapping_entry(struct address_space *mapping,
443                                           pgoff_t index, bool trunc)
444 {
445         int ret = 0;
446         void *entry;
447         struct radix_tree_root *page_tree = &mapping->page_tree;
448
449         spin_lock_irq(&mapping->tree_lock);
450         entry = get_unlocked_mapping_entry(mapping, index, NULL);
451         if (!entry || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)))
452                 goto out;
453         if (!trunc &&
454             (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
455              radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)))
456                 goto out;
457         radix_tree_delete(page_tree, index);
458         mapping->nrexceptional--;
459         ret = 1;
460 out:
461         put_unlocked_mapping_entry(mapping, index, entry);
462         spin_unlock_irq(&mapping->tree_lock);
463         return ret;
464 }
465 /*
466  * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
467  * entry to get unlocked before deleting it.
468  */
469 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
470 {
471         int ret = __dax_invalidate_mapping_entry(mapping, index, true);
472
473         /*
474          * This gets called from truncate / punch_hole path. As such, the caller
475          * must hold locks protecting against concurrent modifications of the
476          * radix tree (usually fs-private i_mmap_sem for writing). Since the
477          * caller has seen exceptional entry for this index, we better find it
478          * at that index as well...
479          */
480         WARN_ON_ONCE(!ret);
481         return ret;
482 }
483
484 /*
485  * Invalidate exceptional DAX entry if it is clean.
486  */
487 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
488                                       pgoff_t index)
489 {
490         return __dax_invalidate_mapping_entry(mapping, index, false);
491 }
492
493 static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
494                 sector_t sector, size_t size, struct page *to,
495                 unsigned long vaddr)
496 {
497         void *vto, *kaddr;
498         pgoff_t pgoff;
499         pfn_t pfn;
500         long rc;
501         int id;
502
503         rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
504         if (rc)
505                 return rc;
506
507         id = dax_read_lock();
508         rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
509         if (rc < 0) {
510                 dax_read_unlock(id);
511                 return rc;
512         }
513         vto = kmap_atomic(to);
514         copy_user_page(vto, (void __force *)kaddr, vaddr, to);
515         kunmap_atomic(vto);
516         dax_read_unlock(id);
517         return 0;
518 }
519
520 /*
521  * By this point grab_mapping_entry() has ensured that we have a locked entry
522  * of the appropriate size so we don't have to worry about downgrading PMDs to
523  * PTEs.  If we happen to be trying to insert a PTE and there is a PMD
524  * already in the tree, we will skip the insertion and just dirty the PMD as
525  * appropriate.
526  */
527 static void *dax_insert_mapping_entry(struct address_space *mapping,
528                                       struct vm_fault *vmf,
529                                       void *entry, sector_t sector,
530                                       unsigned long flags, bool dirty)
531 {
532         struct radix_tree_root *page_tree = &mapping->page_tree;
533         void *new_entry;
534         pgoff_t index = vmf->pgoff;
535
536         if (dirty)
537                 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
538
539         if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) {
540                 /* we are replacing a zero page with block mapping */
541                 if (dax_is_pmd_entry(entry))
542                         unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
543                                                         PG_PMD_NR, false);
544                 else /* pte entry */
545                         unmap_mapping_pages(mapping, vmf->pgoff, 1, false);
546         }
547
548         spin_lock_irq(&mapping->tree_lock);
549         new_entry = dax_radix_locked_entry(sector, flags);
550
551         if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
552                 /*
553                  * Only swap our new entry into the radix tree if the current
554                  * entry is a zero page or an empty entry.  If a normal PTE or
555                  * PMD entry is already in the tree, we leave it alone.  This
556                  * means that if we are trying to insert a PTE and the
557                  * existing entry is a PMD, we will just leave the PMD in the
558                  * tree and dirty it if necessary.
559                  */
560                 struct radix_tree_node *node;
561                 void **slot;
562                 void *ret;
563
564                 ret = __radix_tree_lookup(page_tree, index, &node, &slot);
565                 WARN_ON_ONCE(ret != entry);
566                 __radix_tree_replace(page_tree, node, slot,
567                                      new_entry, NULL);
568                 entry = new_entry;
569         }
570
571         if (dirty)
572                 radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
573
574         spin_unlock_irq(&mapping->tree_lock);
575         return entry;
576 }
577
578 static inline unsigned long
579 pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
580 {
581         unsigned long address;
582
583         address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
584         VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
585         return address;
586 }
587
588 /* Walk all mappings of a given index of a file and writeprotect them */
589 static void dax_mapping_entry_mkclean(struct address_space *mapping,
590                                       pgoff_t index, unsigned long pfn)
591 {
592         struct vm_area_struct *vma;
593         pte_t pte, *ptep = NULL;
594         pmd_t *pmdp = NULL;
595         spinlock_t *ptl;
596
597         i_mmap_lock_read(mapping);
598         vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
599                 unsigned long address, start, end;
600
601                 cond_resched();
602
603                 if (!(vma->vm_flags & VM_SHARED))
604                         continue;
605
606                 address = pgoff_address(index, vma);
607
608                 /*
609                  * Note because we provide start/end to follow_pte_pmd it will
610                  * call mmu_notifier_invalidate_range_start() on our behalf
611                  * before taking any lock.
612                  */
613                 if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl))
614                         continue;
615
616                 /*
617                  * No need to call mmu_notifier_invalidate_range() as we are
618                  * downgrading page table protection not changing it to point
619                  * to a new page.
620                  *
621                  * See Documentation/vm/mmu_notifier.txt
622                  */
623                 if (pmdp) {
624 #ifdef CONFIG_FS_DAX_PMD
625                         pmd_t pmd;
626
627                         if (pfn != pmd_pfn(*pmdp))
628                                 goto unlock_pmd;
629                         if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
630                                 goto unlock_pmd;
631
632                         flush_cache_page(vma, address, pfn);
633                         pmd = pmdp_huge_clear_flush(vma, address, pmdp);
634                         pmd = pmd_wrprotect(pmd);
635                         pmd = pmd_mkclean(pmd);
636                         set_pmd_at(vma->vm_mm, address, pmdp, pmd);
637 unlock_pmd:
638 #endif
639                         spin_unlock(ptl);
640                 } else {
641                         if (pfn != pte_pfn(*ptep))
642                                 goto unlock_pte;
643                         if (!pte_dirty(*ptep) && !pte_write(*ptep))
644                                 goto unlock_pte;
645
646                         flush_cache_page(vma, address, pfn);
647                         pte = ptep_clear_flush(vma, address, ptep);
648                         pte = pte_wrprotect(pte);
649                         pte = pte_mkclean(pte);
650                         set_pte_at(vma->vm_mm, address, ptep, pte);
651 unlock_pte:
652                         pte_unmap_unlock(ptep, ptl);
653                 }
654
655                 mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
656         }
657         i_mmap_unlock_read(mapping);
658 }
659
660 static int dax_writeback_one(struct block_device *bdev,
661                 struct dax_device *dax_dev, struct address_space *mapping,
662                 pgoff_t index, void *entry)
663 {
664         struct radix_tree_root *page_tree = &mapping->page_tree;
665         void *entry2, **slot, *kaddr;
666         long ret = 0, id;
667         sector_t sector;
668         pgoff_t pgoff;
669         size_t size;
670         pfn_t pfn;
671
672         /*
673          * A page got tagged dirty in DAX mapping? Something is seriously
674          * wrong.
675          */
676         if (WARN_ON(!radix_tree_exceptional_entry(entry)))
677                 return -EIO;
678
679         spin_lock_irq(&mapping->tree_lock);
680         entry2 = get_unlocked_mapping_entry(mapping, index, &slot);
681         /* Entry got punched out / reallocated? */
682         if (!entry2 || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry2)))
683                 goto put_unlocked;
684         /*
685          * Entry got reallocated elsewhere? No need to writeback. We have to
686          * compare sectors as we must not bail out due to difference in lockbit
687          * or entry type.
688          */
689         if (dax_radix_sector(entry2) != dax_radix_sector(entry))
690                 goto put_unlocked;
691         if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
692                                 dax_is_zero_entry(entry))) {
693                 ret = -EIO;
694                 goto put_unlocked;
695         }
696
697         /* Another fsync thread may have already written back this entry */
698         if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
699                 goto put_unlocked;
700         /* Lock the entry to serialize with page faults */
701         entry = lock_slot(mapping, slot);
702         /*
703          * We can clear the tag now but we have to be careful so that concurrent
704          * dax_writeback_one() calls for the same index cannot finish before we
705          * actually flush the caches. This is achieved as the calls will look
706          * at the entry only under tree_lock and once they do that they will
707          * see the entry locked and wait for it to unlock.
708          */
709         radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
710         spin_unlock_irq(&mapping->tree_lock);
711
712         /*
713          * Even if dax_writeback_mapping_range() was given a wbc->range_start
714          * in the middle of a PMD, the 'index' we are given will be aligned to
715          * the start index of the PMD, as will the sector we pull from
716          * 'entry'.  This allows us to flush for PMD_SIZE and not have to
717          * worry about partial PMD writebacks.
718          */
719         sector = dax_radix_sector(entry);
720         size = PAGE_SIZE << dax_radix_order(entry);
721
722         id = dax_read_lock();
723         ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
724         if (ret)
725                 goto dax_unlock;
726
727         /*
728          * dax_direct_access() may sleep, so cannot hold tree_lock over
729          * its invocation.
730          */
731         ret = dax_direct_access(dax_dev, pgoff, size / PAGE_SIZE, &kaddr, &pfn);
732         if (ret < 0)
733                 goto dax_unlock;
734
735         if (WARN_ON_ONCE(ret < size / PAGE_SIZE)) {
736                 ret = -EIO;
737                 goto dax_unlock;
738         }
739
740         dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(pfn));
741         dax_flush(dax_dev, kaddr, size);
742         /*
743          * After we have flushed the cache, we can clear the dirty tag. There
744          * cannot be new dirty data in the pfn after the flush has completed as
745          * the pfn mappings are writeprotected and fault waits for mapping
746          * entry lock.
747          */
748         spin_lock_irq(&mapping->tree_lock);
749         radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY);
750         spin_unlock_irq(&mapping->tree_lock);
751         trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT);
752  dax_unlock:
753         dax_read_unlock(id);
754         put_locked_mapping_entry(mapping, index);
755         return ret;
756
757  put_unlocked:
758         put_unlocked_mapping_entry(mapping, index, entry2);
759         spin_unlock_irq(&mapping->tree_lock);
760         return ret;
761 }
762
763 /*
764  * Flush the mapping to the persistent domain within the byte range of [start,
765  * end]. This is required by data integrity operations to ensure file data is
766  * on persistent storage prior to completion of the operation.
767  */
768 int dax_writeback_mapping_range(struct address_space *mapping,
769                 struct block_device *bdev, struct writeback_control *wbc)
770 {
771         struct inode *inode = mapping->host;
772         pgoff_t start_index, end_index;
773         pgoff_t indices[PAGEVEC_SIZE];
774         struct dax_device *dax_dev;
775         struct pagevec pvec;
776         bool done = false;
777         int i, ret = 0;
778
779         if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
780                 return -EIO;
781
782         if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
783                 return 0;
784
785         dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
786         if (!dax_dev)
787                 return -EIO;
788
789         start_index = wbc->range_start >> PAGE_SHIFT;
790         end_index = wbc->range_end >> PAGE_SHIFT;
791
792         trace_dax_writeback_range(inode, start_index, end_index);
793
794         tag_pages_for_writeback(mapping, start_index, end_index);
795
796         pagevec_init(&pvec);
797         while (!done) {
798                 pvec.nr = find_get_entries_tag(mapping, start_index,
799                                 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
800                                 pvec.pages, indices);
801
802                 if (pvec.nr == 0)
803                         break;
804
805                 for (i = 0; i < pvec.nr; i++) {
806                         if (indices[i] > end_index) {
807                                 done = true;
808                                 break;
809                         }
810
811                         ret = dax_writeback_one(bdev, dax_dev, mapping,
812                                         indices[i], pvec.pages[i]);
813                         if (ret < 0) {
814                                 mapping_set_error(mapping, ret);
815                                 goto out;
816                         }
817                 }
818                 start_index = indices[pvec.nr - 1] + 1;
819         }
820 out:
821         put_dax(dax_dev);
822         trace_dax_writeback_range_done(inode, start_index, end_index);
823         return (ret < 0 ? ret : 0);
824 }
825 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
826
827 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
828 {
829         return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
830 }
831
832 static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
833                          pfn_t *pfnp)
834 {
835         const sector_t sector = dax_iomap_sector(iomap, pos);
836         pgoff_t pgoff;
837         void *kaddr;
838         int id, rc;
839         long length;
840
841         rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff);
842         if (rc)
843                 return rc;
844         id = dax_read_lock();
845         length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
846                                    &kaddr, pfnp);
847         if (length < 0) {
848                 rc = length;
849                 goto out;
850         }
851         rc = -EINVAL;
852         if (PFN_PHYS(length) < size)
853                 goto out;
854         if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
855                 goto out;
856         /* For larger pages we need devmap */
857         if (length > 1 && !pfn_t_devmap(*pfnp))
858                 goto out;
859         rc = 0;
860 out:
861         dax_read_unlock(id);
862         return rc;
863 }
864
865 /*
866  * The user has performed a load from a hole in the file.  Allocating a new
867  * page in the file would cause excessive storage usage for workloads with
868  * sparse files.  Instead we insert a read-only mapping of the 4k zero page.
869  * If this page is ever written to we will re-fault and change the mapping to
870  * point to real DAX storage instead.
871  */
872 static int dax_load_hole(struct address_space *mapping, void *entry,
873                          struct vm_fault *vmf)
874 {
875         struct inode *inode = mapping->host;
876         unsigned long vaddr = vmf->address;
877         int ret = VM_FAULT_NOPAGE;
878         struct page *zero_page;
879         void *entry2;
880
881         zero_page = ZERO_PAGE(0);
882         if (unlikely(!zero_page)) {
883                 ret = VM_FAULT_OOM;
884                 goto out;
885         }
886
887         entry2 = dax_insert_mapping_entry(mapping, vmf, entry, 0,
888                         RADIX_DAX_ZERO_PAGE, false);
889         if (IS_ERR(entry2)) {
890                 ret = VM_FAULT_SIGBUS;
891                 goto out;
892         }
893
894         vm_insert_mixed(vmf->vma, vaddr, page_to_pfn_t(zero_page));
895 out:
896         trace_dax_load_hole(inode, vmf, ret);
897         return ret;
898 }
899
900 static bool dax_range_is_aligned(struct block_device *bdev,
901                                  unsigned int offset, unsigned int length)
902 {
903         unsigned short sector_size = bdev_logical_block_size(bdev);
904
905         if (!IS_ALIGNED(offset, sector_size))
906                 return false;
907         if (!IS_ALIGNED(length, sector_size))
908                 return false;
909
910         return true;
911 }
912
913 int __dax_zero_page_range(struct block_device *bdev,
914                 struct dax_device *dax_dev, sector_t sector,
915                 unsigned int offset, unsigned int size)
916 {
917         if (dax_range_is_aligned(bdev, offset, size)) {
918                 sector_t start_sector = sector + (offset >> 9);
919
920                 return blkdev_issue_zeroout(bdev, start_sector,
921                                 size >> 9, GFP_NOFS, 0);
922         } else {
923                 pgoff_t pgoff;
924                 long rc, id;
925                 void *kaddr;
926                 pfn_t pfn;
927
928                 rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
929                 if (rc)
930                         return rc;
931
932                 id = dax_read_lock();
933                 rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr,
934                                 &pfn);
935                 if (rc < 0) {
936                         dax_read_unlock(id);
937                         return rc;
938                 }
939                 memset(kaddr + offset, 0, size);
940                 dax_flush(dax_dev, kaddr + offset, size);
941                 dax_read_unlock(id);
942         }
943         return 0;
944 }
945 EXPORT_SYMBOL_GPL(__dax_zero_page_range);
946
947 static loff_t
948 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
949                 struct iomap *iomap)
950 {
951         struct block_device *bdev = iomap->bdev;
952         struct dax_device *dax_dev = iomap->dax_dev;
953         struct iov_iter *iter = data;
954         loff_t end = pos + length, done = 0;
955         ssize_t ret = 0;
956         int id;
957
958         if (iov_iter_rw(iter) == READ) {
959                 end = min(end, i_size_read(inode));
960                 if (pos >= end)
961                         return 0;
962
963                 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
964                         return iov_iter_zero(min(length, end - pos), iter);
965         }
966
967         if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
968                 return -EIO;
969
970         /*
971          * Write can allocate block for an area which has a hole page mapped
972          * into page tables. We have to tear down these mappings so that data
973          * written by write(2) is visible in mmap.
974          */
975         if (iomap->flags & IOMAP_F_NEW) {
976                 invalidate_inode_pages2_range(inode->i_mapping,
977                                               pos >> PAGE_SHIFT,
978                                               (end - 1) >> PAGE_SHIFT);
979         }
980
981         id = dax_read_lock();
982         while (pos < end) {
983                 unsigned offset = pos & (PAGE_SIZE - 1);
984                 const size_t size = ALIGN(length + offset, PAGE_SIZE);
985                 const sector_t sector = dax_iomap_sector(iomap, pos);
986                 ssize_t map_len;
987                 pgoff_t pgoff;
988                 void *kaddr;
989                 pfn_t pfn;
990
991                 if (fatal_signal_pending(current)) {
992                         ret = -EINTR;
993                         break;
994                 }
995
996                 ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
997                 if (ret)
998                         break;
999
1000                 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
1001                                 &kaddr, &pfn);
1002                 if (map_len < 0) {
1003                         ret = map_len;
1004                         break;
1005                 }
1006
1007                 map_len = PFN_PHYS(map_len);
1008                 kaddr += offset;
1009                 map_len -= offset;
1010                 if (map_len > end - pos)
1011                         map_len = end - pos;
1012
1013                 /*
1014                  * The userspace address for the memory copy has already been
1015                  * validated via access_ok() in either vfs_read() or
1016                  * vfs_write(), depending on which operation we are doing.
1017                  */
1018                 if (iov_iter_rw(iter) == WRITE)
1019                         map_len = dax_copy_from_iter(dax_dev, pgoff, kaddr,
1020                                         map_len, iter);
1021                 else
1022                         map_len = copy_to_iter(kaddr, map_len, iter);
1023                 if (map_len <= 0) {
1024                         ret = map_len ? map_len : -EFAULT;
1025                         break;
1026                 }
1027
1028                 pos += map_len;
1029                 length -= map_len;
1030                 done += map_len;
1031         }
1032         dax_read_unlock(id);
1033
1034         return done ? done : ret;
1035 }
1036
1037 /**
1038  * dax_iomap_rw - Perform I/O to a DAX file
1039  * @iocb:       The control block for this I/O
1040  * @iter:       The addresses to do I/O from or to
1041  * @ops:        iomap ops passed from the file system
1042  *
1043  * This function performs read and write operations to directly mapped
1044  * persistent memory.  The callers needs to take care of read/write exclusion
1045  * and evicting any page cache pages in the region under I/O.
1046  */
1047 ssize_t
1048 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1049                 const struct iomap_ops *ops)
1050 {
1051         struct address_space *mapping = iocb->ki_filp->f_mapping;
1052         struct inode *inode = mapping->host;
1053         loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1054         unsigned flags = 0;
1055
1056         if (iov_iter_rw(iter) == WRITE) {
1057                 lockdep_assert_held_exclusive(&inode->i_rwsem);
1058                 flags |= IOMAP_WRITE;
1059         } else {
1060                 lockdep_assert_held(&inode->i_rwsem);
1061         }
1062
1063         while (iov_iter_count(iter)) {
1064                 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1065                                 iter, dax_iomap_actor);
1066                 if (ret <= 0)
1067                         break;
1068                 pos += ret;
1069                 done += ret;
1070         }
1071
1072         iocb->ki_pos += done;
1073         return done ? done : ret;
1074 }
1075 EXPORT_SYMBOL_GPL(dax_iomap_rw);
1076
1077 static int dax_fault_return(int error)
1078 {
1079         if (error == 0)
1080                 return VM_FAULT_NOPAGE;
1081         if (error == -ENOMEM)
1082                 return VM_FAULT_OOM;
1083         return VM_FAULT_SIGBUS;
1084 }
1085
1086 /*
1087  * MAP_SYNC on a dax mapping guarantees dirty metadata is
1088  * flushed on write-faults (non-cow), but not read-faults.
1089  */
1090 static bool dax_fault_is_synchronous(unsigned long flags,
1091                 struct vm_area_struct *vma, struct iomap *iomap)
1092 {
1093         return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC)
1094                 && (iomap->flags & IOMAP_F_DIRTY);
1095 }
1096
1097 static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
1098                                int *iomap_errp, const struct iomap_ops *ops)
1099 {
1100         struct vm_area_struct *vma = vmf->vma;
1101         struct address_space *mapping = vma->vm_file->f_mapping;
1102         struct inode *inode = mapping->host;
1103         unsigned long vaddr = vmf->address;
1104         loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1105         struct iomap iomap = { 0 };
1106         unsigned flags = IOMAP_FAULT;
1107         int error, major = 0;
1108         bool write = vmf->flags & FAULT_FLAG_WRITE;
1109         bool sync;
1110         int vmf_ret = 0;
1111         void *entry;
1112         pfn_t pfn;
1113
1114         trace_dax_pte_fault(inode, vmf, vmf_ret);
1115         /*
1116          * Check whether offset isn't beyond end of file now. Caller is supposed
1117          * to hold locks serializing us with truncate / punch hole so this is
1118          * a reliable test.
1119          */
1120         if (pos >= i_size_read(inode)) {
1121                 vmf_ret = VM_FAULT_SIGBUS;
1122                 goto out;
1123         }
1124
1125         if (write && !vmf->cow_page)
1126                 flags |= IOMAP_WRITE;
1127
1128         entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
1129         if (IS_ERR(entry)) {
1130                 vmf_ret = dax_fault_return(PTR_ERR(entry));
1131                 goto out;
1132         }
1133
1134         /*
1135          * It is possible, particularly with mixed reads & writes to private
1136          * mappings, that we have raced with a PMD fault that overlaps with
1137          * the PTE we need to set up.  If so just return and the fault will be
1138          * retried.
1139          */
1140         if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1141                 vmf_ret = VM_FAULT_NOPAGE;
1142                 goto unlock_entry;
1143         }
1144
1145         /*
1146          * Note that we don't bother to use iomap_apply here: DAX required
1147          * the file system block size to be equal the page size, which means
1148          * that we never have to deal with more than a single extent here.
1149          */
1150         error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1151         if (iomap_errp)
1152                 *iomap_errp = error;
1153         if (error) {
1154                 vmf_ret = dax_fault_return(error);
1155                 goto unlock_entry;
1156         }
1157         if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1158                 error = -EIO;   /* fs corruption? */
1159                 goto error_finish_iomap;
1160         }
1161
1162         if (vmf->cow_page) {
1163                 sector_t sector = dax_iomap_sector(&iomap, pos);
1164
1165                 switch (iomap.type) {
1166                 case IOMAP_HOLE:
1167                 case IOMAP_UNWRITTEN:
1168                         clear_user_highpage(vmf->cow_page, vaddr);
1169                         break;
1170                 case IOMAP_MAPPED:
1171                         error = copy_user_dax(iomap.bdev, iomap.dax_dev,
1172                                         sector, PAGE_SIZE, vmf->cow_page, vaddr);
1173                         break;
1174                 default:
1175                         WARN_ON_ONCE(1);
1176                         error = -EIO;
1177                         break;
1178                 }
1179
1180                 if (error)
1181                         goto error_finish_iomap;
1182
1183                 __SetPageUptodate(vmf->cow_page);
1184                 vmf_ret = finish_fault(vmf);
1185                 if (!vmf_ret)
1186                         vmf_ret = VM_FAULT_DONE_COW;
1187                 goto finish_iomap;
1188         }
1189
1190         sync = dax_fault_is_synchronous(flags, vma, &iomap);
1191
1192         switch (iomap.type) {
1193         case IOMAP_MAPPED:
1194                 if (iomap.flags & IOMAP_F_NEW) {
1195                         count_vm_event(PGMAJFAULT);
1196                         count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
1197                         major = VM_FAULT_MAJOR;
1198                 }
1199                 error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn);
1200                 if (error < 0)
1201                         goto error_finish_iomap;
1202
1203                 entry = dax_insert_mapping_entry(mapping, vmf, entry,
1204                                                  dax_iomap_sector(&iomap, pos),
1205                                                  0, write && !sync);
1206                 if (IS_ERR(entry)) {
1207                         error = PTR_ERR(entry);
1208                         goto error_finish_iomap;
1209                 }
1210
1211                 /*
1212                  * If we are doing synchronous page fault and inode needs fsync,
1213                  * we can insert PTE into page tables only after that happens.
1214                  * Skip insertion for now and return the pfn so that caller can
1215                  * insert it after fsync is done.
1216                  */
1217                 if (sync) {
1218                         if (WARN_ON_ONCE(!pfnp)) {
1219                                 error = -EIO;
1220                                 goto error_finish_iomap;
1221                         }
1222                         *pfnp = pfn;
1223                         vmf_ret = VM_FAULT_NEEDDSYNC | major;
1224                         goto finish_iomap;
1225                 }
1226                 trace_dax_insert_mapping(inode, vmf, entry);
1227                 if (write)
1228                         error = vm_insert_mixed_mkwrite(vma, vaddr, pfn);
1229                 else
1230                         error = vm_insert_mixed(vma, vaddr, pfn);
1231
1232                 /* -EBUSY is fine, somebody else faulted on the same PTE */
1233                 if (error == -EBUSY)
1234                         error = 0;
1235                 break;
1236         case IOMAP_UNWRITTEN:
1237         case IOMAP_HOLE:
1238                 if (!write) {
1239                         vmf_ret = dax_load_hole(mapping, entry, vmf);
1240                         goto finish_iomap;
1241                 }
1242                 /*FALLTHRU*/
1243         default:
1244                 WARN_ON_ONCE(1);
1245                 error = -EIO;
1246                 break;
1247         }
1248
1249  error_finish_iomap:
1250         vmf_ret = dax_fault_return(error) | major;
1251  finish_iomap:
1252         if (ops->iomap_end) {
1253                 int copied = PAGE_SIZE;
1254
1255                 if (vmf_ret & VM_FAULT_ERROR)
1256                         copied = 0;
1257                 /*
1258                  * The fault is done by now and there's no way back (other
1259                  * thread may be already happily using PTE we have installed).
1260                  * Just ignore error from ->iomap_end since we cannot do much
1261                  * with it.
1262                  */
1263                 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1264         }
1265  unlock_entry:
1266         put_locked_mapping_entry(mapping, vmf->pgoff);
1267  out:
1268         trace_dax_pte_fault_done(inode, vmf, vmf_ret);
1269         return vmf_ret;
1270 }
1271
1272 #ifdef CONFIG_FS_DAX_PMD
1273 static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
1274                 void *entry)
1275 {
1276         struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1277         unsigned long pmd_addr = vmf->address & PMD_MASK;
1278         struct inode *inode = mapping->host;
1279         struct page *zero_page;
1280         void *ret = NULL;
1281         spinlock_t *ptl;
1282         pmd_t pmd_entry;
1283
1284         zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1285
1286         if (unlikely(!zero_page))
1287                 goto fallback;
1288
1289         ret = dax_insert_mapping_entry(mapping, vmf, entry, 0,
1290                         RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE, false);
1291         if (IS_ERR(ret))
1292                 goto fallback;
1293
1294         ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1295         if (!pmd_none(*(vmf->pmd))) {
1296                 spin_unlock(ptl);
1297                 goto fallback;
1298         }
1299
1300         pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1301         pmd_entry = pmd_mkhuge(pmd_entry);
1302         set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1303         spin_unlock(ptl);
1304         trace_dax_pmd_load_hole(inode, vmf, zero_page, ret);
1305         return VM_FAULT_NOPAGE;
1306
1307 fallback:
1308         trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret);
1309         return VM_FAULT_FALLBACK;
1310 }
1311
1312 static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1313                                const struct iomap_ops *ops)
1314 {
1315         struct vm_area_struct *vma = vmf->vma;
1316         struct address_space *mapping = vma->vm_file->f_mapping;
1317         unsigned long pmd_addr = vmf->address & PMD_MASK;
1318         bool write = vmf->flags & FAULT_FLAG_WRITE;
1319         bool sync;
1320         unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1321         struct inode *inode = mapping->host;
1322         int result = VM_FAULT_FALLBACK;
1323         struct iomap iomap = { 0 };
1324         pgoff_t max_pgoff, pgoff;
1325         void *entry;
1326         loff_t pos;
1327         int error;
1328         pfn_t pfn;
1329
1330         /*
1331          * Check whether offset isn't beyond end of file now. Caller is
1332          * supposed to hold locks serializing us with truncate / punch hole so
1333          * this is a reliable test.
1334          */
1335         pgoff = linear_page_index(vma, pmd_addr);
1336         max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1337
1338         trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1339
1340         /*
1341          * Make sure that the faulting address's PMD offset (color) matches
1342          * the PMD offset from the start of the file.  This is necessary so
1343          * that a PMD range in the page table overlaps exactly with a PMD
1344          * range in the radix tree.
1345          */
1346         if ((vmf->pgoff & PG_PMD_COLOUR) !=
1347             ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1348                 goto fallback;
1349
1350         /* Fall back to PTEs if we're going to COW */
1351         if (write && !(vma->vm_flags & VM_SHARED))
1352                 goto fallback;
1353
1354         /* If the PMD would extend outside the VMA */
1355         if (pmd_addr < vma->vm_start)
1356                 goto fallback;
1357         if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1358                 goto fallback;
1359
1360         if (pgoff >= max_pgoff) {
1361                 result = VM_FAULT_SIGBUS;
1362                 goto out;
1363         }
1364
1365         /* If the PMD would extend beyond the file size */
1366         if ((pgoff | PG_PMD_COLOUR) >= max_pgoff)
1367                 goto fallback;
1368
1369         /*
1370          * grab_mapping_entry() will make sure we get a 2MiB empty entry, a
1371          * 2MiB zero page entry or a DAX PMD.  If it can't (because a 4k page
1372          * is already in the tree, for instance), it will return -EEXIST and
1373          * we just fall back to 4k entries.
1374          */
1375         entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
1376         if (IS_ERR(entry))
1377                 goto fallback;
1378
1379         /*
1380          * It is possible, particularly with mixed reads & writes to private
1381          * mappings, that we have raced with a PTE fault that overlaps with
1382          * the PMD we need to set up.  If so just return and the fault will be
1383          * retried.
1384          */
1385         if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1386                         !pmd_devmap(*vmf->pmd)) {
1387                 result = 0;
1388                 goto unlock_entry;
1389         }
1390
1391         /*
1392          * Note that we don't use iomap_apply here.  We aren't doing I/O, only
1393          * setting up a mapping, so really we're using iomap_begin() as a way
1394          * to look up our filesystem block.
1395          */
1396         pos = (loff_t)pgoff << PAGE_SHIFT;
1397         error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1398         if (error)
1399                 goto unlock_entry;
1400
1401         if (iomap.offset + iomap.length < pos + PMD_SIZE)
1402                 goto finish_iomap;
1403
1404         sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap);
1405
1406         switch (iomap.type) {
1407         case IOMAP_MAPPED:
1408                 error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn);
1409                 if (error < 0)
1410                         goto finish_iomap;
1411
1412                 entry = dax_insert_mapping_entry(mapping, vmf, entry,
1413                                                 dax_iomap_sector(&iomap, pos),
1414                                                 RADIX_DAX_PMD, write && !sync);
1415                 if (IS_ERR(entry))
1416                         goto finish_iomap;
1417
1418                 /*
1419                  * If we are doing synchronous page fault and inode needs fsync,
1420                  * we can insert PMD into page tables only after that happens.
1421                  * Skip insertion for now and return the pfn so that caller can
1422                  * insert it after fsync is done.
1423                  */
1424                 if (sync) {
1425                         if (WARN_ON_ONCE(!pfnp))
1426                                 goto finish_iomap;
1427                         *pfnp = pfn;
1428                         result = VM_FAULT_NEEDDSYNC;
1429                         goto finish_iomap;
1430                 }
1431
1432                 trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
1433                 result = vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn,
1434                                             write);
1435                 break;
1436         case IOMAP_UNWRITTEN:
1437         case IOMAP_HOLE:
1438                 if (WARN_ON_ONCE(write))
1439                         break;
1440                 result = dax_pmd_load_hole(vmf, &iomap, entry);
1441                 break;
1442         default:
1443                 WARN_ON_ONCE(1);
1444                 break;
1445         }
1446
1447  finish_iomap:
1448         if (ops->iomap_end) {
1449                 int copied = PMD_SIZE;
1450
1451                 if (result == VM_FAULT_FALLBACK)
1452                         copied = 0;
1453                 /*
1454                  * The fault is done by now and there's no way back (other
1455                  * thread may be already happily using PMD we have installed).
1456                  * Just ignore error from ->iomap_end since we cannot do much
1457                  * with it.
1458                  */
1459                 ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1460                                 &iomap);
1461         }
1462  unlock_entry:
1463         put_locked_mapping_entry(mapping, pgoff);
1464  fallback:
1465         if (result == VM_FAULT_FALLBACK) {
1466                 split_huge_pmd(vma, vmf->pmd, vmf->address);
1467                 count_vm_event(THP_FAULT_FALLBACK);
1468         }
1469 out:
1470         trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
1471         return result;
1472 }
1473 #else
1474 static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1475                                const struct iomap_ops *ops)
1476 {
1477         return VM_FAULT_FALLBACK;
1478 }
1479 #endif /* CONFIG_FS_DAX_PMD */
1480
1481 /**
1482  * dax_iomap_fault - handle a page fault on a DAX file
1483  * @vmf: The description of the fault
1484  * @pe_size: Size of the page to fault in
1485  * @pfnp: PFN to insert for synchronous faults if fsync is required
1486  * @iomap_errp: Storage for detailed error code in case of error
1487  * @ops: Iomap ops passed from the file system
1488  *
1489  * When a page fault occurs, filesystems may call this helper in
1490  * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1491  * has done all the necessary locking for page fault to proceed
1492  * successfully.
1493  */
1494 int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1495                     pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
1496 {
1497         switch (pe_size) {
1498         case PE_SIZE_PTE:
1499                 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
1500         case PE_SIZE_PMD:
1501                 return dax_iomap_pmd_fault(vmf, pfnp, ops);
1502         default:
1503                 return VM_FAULT_FALLBACK;
1504         }
1505 }
1506 EXPORT_SYMBOL_GPL(dax_iomap_fault);
1507
1508 /**
1509  * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
1510  * @vmf: The description of the fault
1511  * @pe_size: Size of entry to be inserted
1512  * @pfn: PFN to insert
1513  *
1514  * This function inserts writeable PTE or PMD entry into page tables for mmaped
1515  * DAX file.  It takes care of marking corresponding radix tree entry as dirty
1516  * as well.
1517  */
1518 static int dax_insert_pfn_mkwrite(struct vm_fault *vmf,
1519                                   enum page_entry_size pe_size,
1520                                   pfn_t pfn)
1521 {
1522         struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1523         void *entry, **slot;
1524         pgoff_t index = vmf->pgoff;
1525         int vmf_ret, error;
1526
1527         spin_lock_irq(&mapping->tree_lock);
1528         entry = get_unlocked_mapping_entry(mapping, index, &slot);
1529         /* Did we race with someone splitting entry or so? */
1530         if (!entry ||
1531             (pe_size == PE_SIZE_PTE && !dax_is_pte_entry(entry)) ||
1532             (pe_size == PE_SIZE_PMD && !dax_is_pmd_entry(entry))) {
1533                 put_unlocked_mapping_entry(mapping, index, entry);
1534                 spin_unlock_irq(&mapping->tree_lock);
1535                 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
1536                                                       VM_FAULT_NOPAGE);
1537                 return VM_FAULT_NOPAGE;
1538         }
1539         radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY);
1540         entry = lock_slot(mapping, slot);
1541         spin_unlock_irq(&mapping->tree_lock);
1542         switch (pe_size) {
1543         case PE_SIZE_PTE:
1544                 error = vm_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1545                 vmf_ret = dax_fault_return(error);
1546                 break;
1547 #ifdef CONFIG_FS_DAX_PMD
1548         case PE_SIZE_PMD:
1549                 vmf_ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
1550                         pfn, true);
1551                 break;
1552 #endif
1553         default:
1554                 vmf_ret = VM_FAULT_FALLBACK;
1555         }
1556         put_locked_mapping_entry(mapping, index);
1557         trace_dax_insert_pfn_mkwrite(mapping->host, vmf, vmf_ret);
1558         return vmf_ret;
1559 }
1560
1561 /**
1562  * dax_finish_sync_fault - finish synchronous page fault
1563  * @vmf: The description of the fault
1564  * @pe_size: Size of entry to be inserted
1565  * @pfn: PFN to insert
1566  *
1567  * This function ensures that the file range touched by the page fault is
1568  * stored persistently on the media and handles inserting of appropriate page
1569  * table entry.
1570  */
1571 int dax_finish_sync_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1572                           pfn_t pfn)
1573 {
1574         int err;
1575         loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
1576         size_t len = 0;
1577
1578         if (pe_size == PE_SIZE_PTE)
1579                 len = PAGE_SIZE;
1580         else if (pe_size == PE_SIZE_PMD)
1581                 len = PMD_SIZE;
1582         else
1583                 WARN_ON_ONCE(1);
1584         err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
1585         if (err)
1586                 return VM_FAULT_SIGBUS;
1587         return dax_insert_pfn_mkwrite(vmf, pe_size, pfn);
1588 }
1589 EXPORT_SYMBOL_GPL(dax_finish_sync_fault);