Merge branch 'topic/ppc-kvm' into next
[sfrench/cifs-2.6.git] / fs / dax.c
1 /*
2  * fs/dax.c - Direct Access filesystem code
3  * Copyright (c) 2013-2014 Intel Corporation
4  * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5  * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  */
16
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
21 #include <linux/fs.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
25 #include <linux/mm.h>
26 #include <linux/mutex.h>
27 #include <linux/pagevec.h>
28 #include <linux/sched.h>
29 #include <linux/sched/signal.h>
30 #include <linux/uio.h>
31 #include <linux/vmstat.h>
32 #include <linux/pfn_t.h>
33 #include <linux/sizes.h>
34 #include <linux/mmu_notifier.h>
35 #include <linux/iomap.h>
36 #include "internal.h"
37
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/fs_dax.h>
40
41 static inline unsigned int pe_order(enum page_entry_size pe_size)
42 {
43         if (pe_size == PE_SIZE_PTE)
44                 return PAGE_SHIFT - PAGE_SHIFT;
45         if (pe_size == PE_SIZE_PMD)
46                 return PMD_SHIFT - PAGE_SHIFT;
47         if (pe_size == PE_SIZE_PUD)
48                 return PUD_SHIFT - PAGE_SHIFT;
49         return ~0;
50 }
51
52 /* We choose 4096 entries - same as per-zone page wait tables */
53 #define DAX_WAIT_TABLE_BITS 12
54 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
55
56 /* The 'colour' (ie low bits) within a PMD of a page offset.  */
57 #define PG_PMD_COLOUR   ((PMD_SIZE >> PAGE_SHIFT) - 1)
58 #define PG_PMD_NR       (PMD_SIZE >> PAGE_SHIFT)
59
60 /* The order of a PMD entry */
61 #define PMD_ORDER       (PMD_SHIFT - PAGE_SHIFT)
62
63 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
64
65 static int __init init_dax_wait_table(void)
66 {
67         int i;
68
69         for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
70                 init_waitqueue_head(wait_table + i);
71         return 0;
72 }
73 fs_initcall(init_dax_wait_table);
74
75 /*
76  * DAX pagecache entries use XArray value entries so they can't be mistaken
77  * for pages.  We use one bit for locking, one bit for the entry size (PMD)
78  * and two more to tell us if the entry is a zero page or an empty entry that
79  * is just used for locking.  In total four special bits.
80  *
81  * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
82  * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
83  * block allocation.
84  */
85 #define DAX_SHIFT       (4)
86 #define DAX_LOCKED      (1UL << 0)
87 #define DAX_PMD         (1UL << 1)
88 #define DAX_ZERO_PAGE   (1UL << 2)
89 #define DAX_EMPTY       (1UL << 3)
90
91 static unsigned long dax_to_pfn(void *entry)
92 {
93         return xa_to_value(entry) >> DAX_SHIFT;
94 }
95
96 static void *dax_make_entry(pfn_t pfn, unsigned long flags)
97 {
98         return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
99 }
100
101 static bool dax_is_locked(void *entry)
102 {
103         return xa_to_value(entry) & DAX_LOCKED;
104 }
105
106 static unsigned int dax_entry_order(void *entry)
107 {
108         if (xa_to_value(entry) & DAX_PMD)
109                 return PMD_ORDER;
110         return 0;
111 }
112
113 static unsigned long dax_is_pmd_entry(void *entry)
114 {
115         return xa_to_value(entry) & DAX_PMD;
116 }
117
118 static bool dax_is_pte_entry(void *entry)
119 {
120         return !(xa_to_value(entry) & DAX_PMD);
121 }
122
123 static int dax_is_zero_entry(void *entry)
124 {
125         return xa_to_value(entry) & DAX_ZERO_PAGE;
126 }
127
128 static int dax_is_empty_entry(void *entry)
129 {
130         return xa_to_value(entry) & DAX_EMPTY;
131 }
132
133 /*
134  * DAX page cache entry locking
135  */
136 struct exceptional_entry_key {
137         struct xarray *xa;
138         pgoff_t entry_start;
139 };
140
141 struct wait_exceptional_entry_queue {
142         wait_queue_entry_t wait;
143         struct exceptional_entry_key key;
144 };
145
146 static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
147                 void *entry, struct exceptional_entry_key *key)
148 {
149         unsigned long hash;
150         unsigned long index = xas->xa_index;
151
152         /*
153          * If 'entry' is a PMD, align the 'index' that we use for the wait
154          * queue to the start of that PMD.  This ensures that all offsets in
155          * the range covered by the PMD map to the same bit lock.
156          */
157         if (dax_is_pmd_entry(entry))
158                 index &= ~PG_PMD_COLOUR;
159         key->xa = xas->xa;
160         key->entry_start = index;
161
162         hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS);
163         return wait_table + hash;
164 }
165
166 static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
167                 unsigned int mode, int sync, void *keyp)
168 {
169         struct exceptional_entry_key *key = keyp;
170         struct wait_exceptional_entry_queue *ewait =
171                 container_of(wait, struct wait_exceptional_entry_queue, wait);
172
173         if (key->xa != ewait->key.xa ||
174             key->entry_start != ewait->key.entry_start)
175                 return 0;
176         return autoremove_wake_function(wait, mode, sync, NULL);
177 }
178
179 /*
180  * @entry may no longer be the entry at the index in the mapping.
181  * The important information it's conveying is whether the entry at
182  * this index used to be a PMD entry.
183  */
184 static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
185 {
186         struct exceptional_entry_key key;
187         wait_queue_head_t *wq;
188
189         wq = dax_entry_waitqueue(xas, entry, &key);
190
191         /*
192          * Checking for locked entry and prepare_to_wait_exclusive() happens
193          * under the i_pages lock, ditto for entry handling in our callers.
194          * So at this point all tasks that could have seen our entry locked
195          * must be in the waitqueue and the following check will see them.
196          */
197         if (waitqueue_active(wq))
198                 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
199 }
200
201 /*
202  * Look up entry in page cache, wait for it to become unlocked if it
203  * is a DAX entry and return it.  The caller must subsequently call
204  * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry()
205  * if it did.
206  *
207  * Must be called with the i_pages lock held.
208  */
209 static void *get_unlocked_entry(struct xa_state *xas)
210 {
211         void *entry;
212         struct wait_exceptional_entry_queue ewait;
213         wait_queue_head_t *wq;
214
215         init_wait(&ewait.wait);
216         ewait.wait.func = wake_exceptional_entry_func;
217
218         for (;;) {
219                 entry = xas_find_conflict(xas);
220                 if (!entry || WARN_ON_ONCE(!xa_is_value(entry)) ||
221                                 !dax_is_locked(entry))
222                         return entry;
223
224                 wq = dax_entry_waitqueue(xas, entry, &ewait.key);
225                 prepare_to_wait_exclusive(wq, &ewait.wait,
226                                           TASK_UNINTERRUPTIBLE);
227                 xas_unlock_irq(xas);
228                 xas_reset(xas);
229                 schedule();
230                 finish_wait(wq, &ewait.wait);
231                 xas_lock_irq(xas);
232         }
233 }
234
235 /*
236  * The only thing keeping the address space around is the i_pages lock
237  * (it's cycled in clear_inode() after removing the entries from i_pages)
238  * After we call xas_unlock_irq(), we cannot touch xas->xa.
239  */
240 static void wait_entry_unlocked(struct xa_state *xas, void *entry)
241 {
242         struct wait_exceptional_entry_queue ewait;
243         wait_queue_head_t *wq;
244
245         init_wait(&ewait.wait);
246         ewait.wait.func = wake_exceptional_entry_func;
247
248         wq = dax_entry_waitqueue(xas, entry, &ewait.key);
249         /*
250          * Unlike get_unlocked_entry() there is no guarantee that this
251          * path ever successfully retrieves an unlocked entry before an
252          * inode dies. Perform a non-exclusive wait in case this path
253          * never successfully performs its own wake up.
254          */
255         prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
256         xas_unlock_irq(xas);
257         schedule();
258         finish_wait(wq, &ewait.wait);
259 }
260
261 static void put_unlocked_entry(struct xa_state *xas, void *entry)
262 {
263         /* If we were the only waiter woken, wake the next one */
264         if (entry)
265                 dax_wake_entry(xas, entry, false);
266 }
267
268 /*
269  * We used the xa_state to get the entry, but then we locked the entry and
270  * dropped the xa_lock, so we know the xa_state is stale and must be reset
271  * before use.
272  */
273 static void dax_unlock_entry(struct xa_state *xas, void *entry)
274 {
275         void *old;
276
277         BUG_ON(dax_is_locked(entry));
278         xas_reset(xas);
279         xas_lock_irq(xas);
280         old = xas_store(xas, entry);
281         xas_unlock_irq(xas);
282         BUG_ON(!dax_is_locked(old));
283         dax_wake_entry(xas, entry, false);
284 }
285
286 /*
287  * Return: The entry stored at this location before it was locked.
288  */
289 static void *dax_lock_entry(struct xa_state *xas, void *entry)
290 {
291         unsigned long v = xa_to_value(entry);
292         return xas_store(xas, xa_mk_value(v | DAX_LOCKED));
293 }
294
295 static unsigned long dax_entry_size(void *entry)
296 {
297         if (dax_is_zero_entry(entry))
298                 return 0;
299         else if (dax_is_empty_entry(entry))
300                 return 0;
301         else if (dax_is_pmd_entry(entry))
302                 return PMD_SIZE;
303         else
304                 return PAGE_SIZE;
305 }
306
307 static unsigned long dax_end_pfn(void *entry)
308 {
309         return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
310 }
311
312 /*
313  * Iterate through all mapped pfns represented by an entry, i.e. skip
314  * 'empty' and 'zero' entries.
315  */
316 #define for_each_mapped_pfn(entry, pfn) \
317         for (pfn = dax_to_pfn(entry); \
318                         pfn < dax_end_pfn(entry); pfn++)
319
320 /*
321  * TODO: for reflink+dax we need a way to associate a single page with
322  * multiple address_space instances at different linear_page_index()
323  * offsets.
324  */
325 static void dax_associate_entry(void *entry, struct address_space *mapping,
326                 struct vm_area_struct *vma, unsigned long address)
327 {
328         unsigned long size = dax_entry_size(entry), pfn, index;
329         int i = 0;
330
331         if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
332                 return;
333
334         index = linear_page_index(vma, address & ~(size - 1));
335         for_each_mapped_pfn(entry, pfn) {
336                 struct page *page = pfn_to_page(pfn);
337
338                 WARN_ON_ONCE(page->mapping);
339                 page->mapping = mapping;
340                 page->index = index + i++;
341         }
342 }
343
344 static void dax_disassociate_entry(void *entry, struct address_space *mapping,
345                 bool trunc)
346 {
347         unsigned long pfn;
348
349         if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
350                 return;
351
352         for_each_mapped_pfn(entry, pfn) {
353                 struct page *page = pfn_to_page(pfn);
354
355                 WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
356                 WARN_ON_ONCE(page->mapping && page->mapping != mapping);
357                 page->mapping = NULL;
358                 page->index = 0;
359         }
360 }
361
362 static struct page *dax_busy_page(void *entry)
363 {
364         unsigned long pfn;
365
366         for_each_mapped_pfn(entry, pfn) {
367                 struct page *page = pfn_to_page(pfn);
368
369                 if (page_ref_count(page) > 1)
370                         return page;
371         }
372         return NULL;
373 }
374
375 /*
376  * dax_lock_mapping_entry - Lock the DAX entry corresponding to a page
377  * @page: The page whose entry we want to lock
378  *
379  * Context: Process context.
380  * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could
381  * not be locked.
382  */
383 dax_entry_t dax_lock_page(struct page *page)
384 {
385         XA_STATE(xas, NULL, 0);
386         void *entry;
387
388         /* Ensure page->mapping isn't freed while we look at it */
389         rcu_read_lock();
390         for (;;) {
391                 struct address_space *mapping = READ_ONCE(page->mapping);
392
393                 entry = NULL;
394                 if (!mapping || !dax_mapping(mapping))
395                         break;
396
397                 /*
398                  * In the device-dax case there's no need to lock, a
399                  * struct dev_pagemap pin is sufficient to keep the
400                  * inode alive, and we assume we have dev_pagemap pin
401                  * otherwise we would not have a valid pfn_to_page()
402                  * translation.
403                  */
404                 entry = (void *)~0UL;
405                 if (S_ISCHR(mapping->host->i_mode))
406                         break;
407
408                 xas.xa = &mapping->i_pages;
409                 xas_lock_irq(&xas);
410                 if (mapping != page->mapping) {
411                         xas_unlock_irq(&xas);
412                         continue;
413                 }
414                 xas_set(&xas, page->index);
415                 entry = xas_load(&xas);
416                 if (dax_is_locked(entry)) {
417                         rcu_read_unlock();
418                         wait_entry_unlocked(&xas, entry);
419                         rcu_read_lock();
420                         continue;
421                 }
422                 dax_lock_entry(&xas, entry);
423                 xas_unlock_irq(&xas);
424                 break;
425         }
426         rcu_read_unlock();
427         return (dax_entry_t)entry;
428 }
429
430 void dax_unlock_page(struct page *page, dax_entry_t cookie)
431 {
432         struct address_space *mapping = page->mapping;
433         XA_STATE(xas, &mapping->i_pages, page->index);
434
435         if (S_ISCHR(mapping->host->i_mode))
436                 return;
437
438         dax_unlock_entry(&xas, (void *)cookie);
439 }
440
441 /*
442  * Find page cache entry at given index. If it is a DAX entry, return it
443  * with the entry locked. If the page cache doesn't contain an entry at
444  * that index, add a locked empty entry.
445  *
446  * When requesting an entry with size DAX_PMD, grab_mapping_entry() will
447  * either return that locked entry or will return VM_FAULT_FALLBACK.
448  * This will happen if there are any PTE entries within the PMD range
449  * that we are requesting.
450  *
451  * We always favor PTE entries over PMD entries. There isn't a flow where we
452  * evict PTE entries in order to 'upgrade' them to a PMD entry.  A PMD
453  * insertion will fail if it finds any PTE entries already in the tree, and a
454  * PTE insertion will cause an existing PMD entry to be unmapped and
455  * downgraded to PTE entries.  This happens for both PMD zero pages as
456  * well as PMD empty entries.
457  *
458  * The exception to this downgrade path is for PMD entries that have
459  * real storage backing them.  We will leave these real PMD entries in
460  * the tree, and PTE writes will simply dirty the entire PMD entry.
461  *
462  * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
463  * persistent memory the benefit is doubtful. We can add that later if we can
464  * show it helps.
465  *
466  * On error, this function does not return an ERR_PTR.  Instead it returns
467  * a VM_FAULT code, encoded as an xarray internal entry.  The ERR_PTR values
468  * overlap with xarray value entries.
469  */
470 static void *grab_mapping_entry(struct xa_state *xas,
471                 struct address_space *mapping, unsigned long size_flag)
472 {
473         unsigned long index = xas->xa_index;
474         bool pmd_downgrade = false; /* splitting PMD entry into PTE entries? */
475         void *entry;
476
477 retry:
478         xas_lock_irq(xas);
479         entry = get_unlocked_entry(xas);
480
481         if (entry) {
482                 if (!xa_is_value(entry)) {
483                         xas_set_err(xas, EIO);
484                         goto out_unlock;
485                 }
486
487                 if (size_flag & DAX_PMD) {
488                         if (dax_is_pte_entry(entry)) {
489                                 put_unlocked_entry(xas, entry);
490                                 goto fallback;
491                         }
492                 } else { /* trying to grab a PTE entry */
493                         if (dax_is_pmd_entry(entry) &&
494                             (dax_is_zero_entry(entry) ||
495                              dax_is_empty_entry(entry))) {
496                                 pmd_downgrade = true;
497                         }
498                 }
499         }
500
501         if (pmd_downgrade) {
502                 /*
503                  * Make sure 'entry' remains valid while we drop
504                  * the i_pages lock.
505                  */
506                 dax_lock_entry(xas, entry);
507
508                 /*
509                  * Besides huge zero pages the only other thing that gets
510                  * downgraded are empty entries which don't need to be
511                  * unmapped.
512                  */
513                 if (dax_is_zero_entry(entry)) {
514                         xas_unlock_irq(xas);
515                         unmap_mapping_pages(mapping,
516                                         xas->xa_index & ~PG_PMD_COLOUR,
517                                         PG_PMD_NR, false);
518                         xas_reset(xas);
519                         xas_lock_irq(xas);
520                 }
521
522                 dax_disassociate_entry(entry, mapping, false);
523                 xas_store(xas, NULL);   /* undo the PMD join */
524                 dax_wake_entry(xas, entry, true);
525                 mapping->nrexceptional--;
526                 entry = NULL;
527                 xas_set(xas, index);
528         }
529
530         if (entry) {
531                 dax_lock_entry(xas, entry);
532         } else {
533                 entry = dax_make_entry(pfn_to_pfn_t(0), size_flag | DAX_EMPTY);
534                 dax_lock_entry(xas, entry);
535                 if (xas_error(xas))
536                         goto out_unlock;
537                 mapping->nrexceptional++;
538         }
539
540 out_unlock:
541         xas_unlock_irq(xas);
542         if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM))
543                 goto retry;
544         if (xas->xa_node == XA_ERROR(-ENOMEM))
545                 return xa_mk_internal(VM_FAULT_OOM);
546         if (xas_error(xas))
547                 return xa_mk_internal(VM_FAULT_SIGBUS);
548         return entry;
549 fallback:
550         xas_unlock_irq(xas);
551         return xa_mk_internal(VM_FAULT_FALLBACK);
552 }
553
554 /**
555  * dax_layout_busy_page - find first pinned page in @mapping
556  * @mapping: address space to scan for a page with ref count > 1
557  *
558  * DAX requires ZONE_DEVICE mapped pages. These pages are never
559  * 'onlined' to the page allocator so they are considered idle when
560  * page->count == 1. A filesystem uses this interface to determine if
561  * any page in the mapping is busy, i.e. for DMA, or other
562  * get_user_pages() usages.
563  *
564  * It is expected that the filesystem is holding locks to block the
565  * establishment of new mappings in this address_space. I.e. it expects
566  * to be able to run unmap_mapping_range() and subsequently not race
567  * mapping_mapped() becoming true.
568  */
569 struct page *dax_layout_busy_page(struct address_space *mapping)
570 {
571         XA_STATE(xas, &mapping->i_pages, 0);
572         void *entry;
573         unsigned int scanned = 0;
574         struct page *page = NULL;
575
576         /*
577          * In the 'limited' case get_user_pages() for dax is disabled.
578          */
579         if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
580                 return NULL;
581
582         if (!dax_mapping(mapping) || !mapping_mapped(mapping))
583                 return NULL;
584
585         /*
586          * If we race get_user_pages_fast() here either we'll see the
587          * elevated page count in the iteration and wait, or
588          * get_user_pages_fast() will see that the page it took a reference
589          * against is no longer mapped in the page tables and bail to the
590          * get_user_pages() slow path.  The slow path is protected by
591          * pte_lock() and pmd_lock(). New references are not taken without
592          * holding those locks, and unmap_mapping_range() will not zero the
593          * pte or pmd without holding the respective lock, so we are
594          * guaranteed to either see new references or prevent new
595          * references from being established.
596          */
597         unmap_mapping_range(mapping, 0, 0, 1);
598
599         xas_lock_irq(&xas);
600         xas_for_each(&xas, entry, ULONG_MAX) {
601                 if (WARN_ON_ONCE(!xa_is_value(entry)))
602                         continue;
603                 if (unlikely(dax_is_locked(entry)))
604                         entry = get_unlocked_entry(&xas);
605                 if (entry)
606                         page = dax_busy_page(entry);
607                 put_unlocked_entry(&xas, entry);
608                 if (page)
609                         break;
610                 if (++scanned % XA_CHECK_SCHED)
611                         continue;
612
613                 xas_pause(&xas);
614                 xas_unlock_irq(&xas);
615                 cond_resched();
616                 xas_lock_irq(&xas);
617         }
618         xas_unlock_irq(&xas);
619         return page;
620 }
621 EXPORT_SYMBOL_GPL(dax_layout_busy_page);
622
623 static int __dax_invalidate_entry(struct address_space *mapping,
624                                           pgoff_t index, bool trunc)
625 {
626         XA_STATE(xas, &mapping->i_pages, index);
627         int ret = 0;
628         void *entry;
629
630         xas_lock_irq(&xas);
631         entry = get_unlocked_entry(&xas);
632         if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
633                 goto out;
634         if (!trunc &&
635             (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) ||
636              xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE)))
637                 goto out;
638         dax_disassociate_entry(entry, mapping, trunc);
639         xas_store(&xas, NULL);
640         mapping->nrexceptional--;
641         ret = 1;
642 out:
643         put_unlocked_entry(&xas, entry);
644         xas_unlock_irq(&xas);
645         return ret;
646 }
647
648 /*
649  * Delete DAX entry at @index from @mapping.  Wait for it
650  * to be unlocked before deleting it.
651  */
652 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
653 {
654         int ret = __dax_invalidate_entry(mapping, index, true);
655
656         /*
657          * This gets called from truncate / punch_hole path. As such, the caller
658          * must hold locks protecting against concurrent modifications of the
659          * page cache (usually fs-private i_mmap_sem for writing). Since the
660          * caller has seen a DAX entry for this index, we better find it
661          * at that index as well...
662          */
663         WARN_ON_ONCE(!ret);
664         return ret;
665 }
666
667 /*
668  * Invalidate DAX entry if it is clean.
669  */
670 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
671                                       pgoff_t index)
672 {
673         return __dax_invalidate_entry(mapping, index, false);
674 }
675
676 static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
677                 sector_t sector, size_t size, struct page *to,
678                 unsigned long vaddr)
679 {
680         void *vto, *kaddr;
681         pgoff_t pgoff;
682         long rc;
683         int id;
684
685         rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
686         if (rc)
687                 return rc;
688
689         id = dax_read_lock();
690         rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, NULL);
691         if (rc < 0) {
692                 dax_read_unlock(id);
693                 return rc;
694         }
695         vto = kmap_atomic(to);
696         copy_user_page(vto, (void __force *)kaddr, vaddr, to);
697         kunmap_atomic(vto);
698         dax_read_unlock(id);
699         return 0;
700 }
701
702 /*
703  * By this point grab_mapping_entry() has ensured that we have a locked entry
704  * of the appropriate size so we don't have to worry about downgrading PMDs to
705  * PTEs.  If we happen to be trying to insert a PTE and there is a PMD
706  * already in the tree, we will skip the insertion and just dirty the PMD as
707  * appropriate.
708  */
709 static void *dax_insert_entry(struct xa_state *xas,
710                 struct address_space *mapping, struct vm_fault *vmf,
711                 void *entry, pfn_t pfn, unsigned long flags, bool dirty)
712 {
713         void *new_entry = dax_make_entry(pfn, flags);
714
715         if (dirty)
716                 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
717
718         if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) {
719                 unsigned long index = xas->xa_index;
720                 /* we are replacing a zero page with block mapping */
721                 if (dax_is_pmd_entry(entry))
722                         unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
723                                         PG_PMD_NR, false);
724                 else /* pte entry */
725                         unmap_mapping_pages(mapping, index, 1, false);
726         }
727
728         xas_reset(xas);
729         xas_lock_irq(xas);
730         if (dax_entry_size(entry) != dax_entry_size(new_entry)) {
731                 dax_disassociate_entry(entry, mapping, false);
732                 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
733         }
734
735         if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
736                 /*
737                  * Only swap our new entry into the page cache if the current
738                  * entry is a zero page or an empty entry.  If a normal PTE or
739                  * PMD entry is already in the cache, we leave it alone.  This
740                  * means that if we are trying to insert a PTE and the
741                  * existing entry is a PMD, we will just leave the PMD in the
742                  * tree and dirty it if necessary.
743                  */
744                 void *old = dax_lock_entry(xas, new_entry);
745                 WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) |
746                                         DAX_LOCKED));
747                 entry = new_entry;
748         } else {
749                 xas_load(xas);  /* Walk the xa_state */
750         }
751
752         if (dirty)
753                 xas_set_mark(xas, PAGECACHE_TAG_DIRTY);
754
755         xas_unlock_irq(xas);
756         return entry;
757 }
758
759 static inline
760 unsigned long pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
761 {
762         unsigned long address;
763
764         address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
765         VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
766         return address;
767 }
768
769 /* Walk all mappings of a given index of a file and writeprotect them */
770 static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
771                 unsigned long pfn)
772 {
773         struct vm_area_struct *vma;
774         pte_t pte, *ptep = NULL;
775         pmd_t *pmdp = NULL;
776         spinlock_t *ptl;
777
778         i_mmap_lock_read(mapping);
779         vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
780                 struct mmu_notifier_range range;
781                 unsigned long address;
782
783                 cond_resched();
784
785                 if (!(vma->vm_flags & VM_SHARED))
786                         continue;
787
788                 address = pgoff_address(index, vma);
789
790                 /*
791                  * Note because we provide range to follow_pte_pmd it will
792                  * call mmu_notifier_invalidate_range_start() on our behalf
793                  * before taking any lock.
794                  */
795                 if (follow_pte_pmd(vma->vm_mm, address, &range,
796                                    &ptep, &pmdp, &ptl))
797                         continue;
798
799                 /*
800                  * No need to call mmu_notifier_invalidate_range() as we are
801                  * downgrading page table protection not changing it to point
802                  * to a new page.
803                  *
804                  * See Documentation/vm/mmu_notifier.rst
805                  */
806                 if (pmdp) {
807 #ifdef CONFIG_FS_DAX_PMD
808                         pmd_t pmd;
809
810                         if (pfn != pmd_pfn(*pmdp))
811                                 goto unlock_pmd;
812                         if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
813                                 goto unlock_pmd;
814
815                         flush_cache_page(vma, address, pfn);
816                         pmd = pmdp_huge_clear_flush(vma, address, pmdp);
817                         pmd = pmd_wrprotect(pmd);
818                         pmd = pmd_mkclean(pmd);
819                         set_pmd_at(vma->vm_mm, address, pmdp, pmd);
820 unlock_pmd:
821 #endif
822                         spin_unlock(ptl);
823                 } else {
824                         if (pfn != pte_pfn(*ptep))
825                                 goto unlock_pte;
826                         if (!pte_dirty(*ptep) && !pte_write(*ptep))
827                                 goto unlock_pte;
828
829                         flush_cache_page(vma, address, pfn);
830                         pte = ptep_clear_flush(vma, address, ptep);
831                         pte = pte_wrprotect(pte);
832                         pte = pte_mkclean(pte);
833                         set_pte_at(vma->vm_mm, address, ptep, pte);
834 unlock_pte:
835                         pte_unmap_unlock(ptep, ptl);
836                 }
837
838                 mmu_notifier_invalidate_range_end(&range);
839         }
840         i_mmap_unlock_read(mapping);
841 }
842
843 static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
844                 struct address_space *mapping, void *entry)
845 {
846         unsigned long pfn, index, count;
847         long ret = 0;
848
849         /*
850          * A page got tagged dirty in DAX mapping? Something is seriously
851          * wrong.
852          */
853         if (WARN_ON(!xa_is_value(entry)))
854                 return -EIO;
855
856         if (unlikely(dax_is_locked(entry))) {
857                 void *old_entry = entry;
858
859                 entry = get_unlocked_entry(xas);
860
861                 /* Entry got punched out / reallocated? */
862                 if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
863                         goto put_unlocked;
864                 /*
865                  * Entry got reallocated elsewhere? No need to writeback.
866                  * We have to compare pfns as we must not bail out due to
867                  * difference in lockbit or entry type.
868                  */
869                 if (dax_to_pfn(old_entry) != dax_to_pfn(entry))
870                         goto put_unlocked;
871                 if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
872                                         dax_is_zero_entry(entry))) {
873                         ret = -EIO;
874                         goto put_unlocked;
875                 }
876
877                 /* Another fsync thread may have already done this entry */
878                 if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE))
879                         goto put_unlocked;
880         }
881
882         /* Lock the entry to serialize with page faults */
883         dax_lock_entry(xas, entry);
884
885         /*
886          * We can clear the tag now but we have to be careful so that concurrent
887          * dax_writeback_one() calls for the same index cannot finish before we
888          * actually flush the caches. This is achieved as the calls will look
889          * at the entry only under the i_pages lock and once they do that
890          * they will see the entry locked and wait for it to unlock.
891          */
892         xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE);
893         xas_unlock_irq(xas);
894
895         /*
896          * If dax_writeback_mapping_range() was given a wbc->range_start
897          * in the middle of a PMD, the 'index' we use needs to be
898          * aligned to the start of the PMD.
899          * This allows us to flush for PMD_SIZE and not have to worry about
900          * partial PMD writebacks.
901          */
902         pfn = dax_to_pfn(entry);
903         count = 1UL << dax_entry_order(entry);
904         index = xas->xa_index & ~(count - 1);
905
906         dax_entry_mkclean(mapping, index, pfn);
907         dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE);
908         /*
909          * After we have flushed the cache, we can clear the dirty tag. There
910          * cannot be new dirty data in the pfn after the flush has completed as
911          * the pfn mappings are writeprotected and fault waits for mapping
912          * entry lock.
913          */
914         xas_reset(xas);
915         xas_lock_irq(xas);
916         xas_store(xas, entry);
917         xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
918         dax_wake_entry(xas, entry, false);
919
920         trace_dax_writeback_one(mapping->host, index, count);
921         return ret;
922
923  put_unlocked:
924         put_unlocked_entry(xas, entry);
925         return ret;
926 }
927
928 /*
929  * Flush the mapping to the persistent domain within the byte range of [start,
930  * end]. This is required by data integrity operations to ensure file data is
931  * on persistent storage prior to completion of the operation.
932  */
933 int dax_writeback_mapping_range(struct address_space *mapping,
934                 struct block_device *bdev, struct writeback_control *wbc)
935 {
936         XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT);
937         struct inode *inode = mapping->host;
938         pgoff_t end_index = wbc->range_end >> PAGE_SHIFT;
939         struct dax_device *dax_dev;
940         void *entry;
941         int ret = 0;
942         unsigned int scanned = 0;
943
944         if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
945                 return -EIO;
946
947         if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
948                 return 0;
949
950         dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
951         if (!dax_dev)
952                 return -EIO;
953
954         trace_dax_writeback_range(inode, xas.xa_index, end_index);
955
956         tag_pages_for_writeback(mapping, xas.xa_index, end_index);
957
958         xas_lock_irq(&xas);
959         xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) {
960                 ret = dax_writeback_one(&xas, dax_dev, mapping, entry);
961                 if (ret < 0) {
962                         mapping_set_error(mapping, ret);
963                         break;
964                 }
965                 if (++scanned % XA_CHECK_SCHED)
966                         continue;
967
968                 xas_pause(&xas);
969                 xas_unlock_irq(&xas);
970                 cond_resched();
971                 xas_lock_irq(&xas);
972         }
973         xas_unlock_irq(&xas);
974         put_dax(dax_dev);
975         trace_dax_writeback_range_done(inode, xas.xa_index, end_index);
976         return ret;
977 }
978 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
979
980 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
981 {
982         return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
983 }
984
985 static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
986                          pfn_t *pfnp)
987 {
988         const sector_t sector = dax_iomap_sector(iomap, pos);
989         pgoff_t pgoff;
990         int id, rc;
991         long length;
992
993         rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff);
994         if (rc)
995                 return rc;
996         id = dax_read_lock();
997         length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
998                                    NULL, pfnp);
999         if (length < 0) {
1000                 rc = length;
1001                 goto out;
1002         }
1003         rc = -EINVAL;
1004         if (PFN_PHYS(length) < size)
1005                 goto out;
1006         if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
1007                 goto out;
1008         /* For larger pages we need devmap */
1009         if (length > 1 && !pfn_t_devmap(*pfnp))
1010                 goto out;
1011         rc = 0;
1012 out:
1013         dax_read_unlock(id);
1014         return rc;
1015 }
1016
1017 /*
1018  * The user has performed a load from a hole in the file.  Allocating a new
1019  * page in the file would cause excessive storage usage for workloads with
1020  * sparse files.  Instead we insert a read-only mapping of the 4k zero page.
1021  * If this page is ever written to we will re-fault and change the mapping to
1022  * point to real DAX storage instead.
1023  */
1024 static vm_fault_t dax_load_hole(struct xa_state *xas,
1025                 struct address_space *mapping, void **entry,
1026                 struct vm_fault *vmf)
1027 {
1028         struct inode *inode = mapping->host;
1029         unsigned long vaddr = vmf->address;
1030         pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
1031         vm_fault_t ret;
1032
1033         *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
1034                         DAX_ZERO_PAGE, false);
1035
1036         ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
1037         trace_dax_load_hole(inode, vmf, ret);
1038         return ret;
1039 }
1040
1041 static bool dax_range_is_aligned(struct block_device *bdev,
1042                                  unsigned int offset, unsigned int length)
1043 {
1044         unsigned short sector_size = bdev_logical_block_size(bdev);
1045
1046         if (!IS_ALIGNED(offset, sector_size))
1047                 return false;
1048         if (!IS_ALIGNED(length, sector_size))
1049                 return false;
1050
1051         return true;
1052 }
1053
1054 int __dax_zero_page_range(struct block_device *bdev,
1055                 struct dax_device *dax_dev, sector_t sector,
1056                 unsigned int offset, unsigned int size)
1057 {
1058         if (dax_range_is_aligned(bdev, offset, size)) {
1059                 sector_t start_sector = sector + (offset >> 9);
1060
1061                 return blkdev_issue_zeroout(bdev, start_sector,
1062                                 size >> 9, GFP_NOFS, 0);
1063         } else {
1064                 pgoff_t pgoff;
1065                 long rc, id;
1066                 void *kaddr;
1067
1068                 rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
1069                 if (rc)
1070                         return rc;
1071
1072                 id = dax_read_lock();
1073                 rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL);
1074                 if (rc < 0) {
1075                         dax_read_unlock(id);
1076                         return rc;
1077                 }
1078                 memset(kaddr + offset, 0, size);
1079                 dax_flush(dax_dev, kaddr + offset, size);
1080                 dax_read_unlock(id);
1081         }
1082         return 0;
1083 }
1084 EXPORT_SYMBOL_GPL(__dax_zero_page_range);
1085
1086 static loff_t
1087 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1088                 struct iomap *iomap)
1089 {
1090         struct block_device *bdev = iomap->bdev;
1091         struct dax_device *dax_dev = iomap->dax_dev;
1092         struct iov_iter *iter = data;
1093         loff_t end = pos + length, done = 0;
1094         ssize_t ret = 0;
1095         size_t xfer;
1096         int id;
1097
1098         if (iov_iter_rw(iter) == READ) {
1099                 end = min(end, i_size_read(inode));
1100                 if (pos >= end)
1101                         return 0;
1102
1103                 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1104                         return iov_iter_zero(min(length, end - pos), iter);
1105         }
1106
1107         if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
1108                 return -EIO;
1109
1110         /*
1111          * Write can allocate block for an area which has a hole page mapped
1112          * into page tables. We have to tear down these mappings so that data
1113          * written by write(2) is visible in mmap.
1114          */
1115         if (iomap->flags & IOMAP_F_NEW) {
1116                 invalidate_inode_pages2_range(inode->i_mapping,
1117                                               pos >> PAGE_SHIFT,
1118                                               (end - 1) >> PAGE_SHIFT);
1119         }
1120
1121         id = dax_read_lock();
1122         while (pos < end) {
1123                 unsigned offset = pos & (PAGE_SIZE - 1);
1124                 const size_t size = ALIGN(length + offset, PAGE_SIZE);
1125                 const sector_t sector = dax_iomap_sector(iomap, pos);
1126                 ssize_t map_len;
1127                 pgoff_t pgoff;
1128                 void *kaddr;
1129
1130                 if (fatal_signal_pending(current)) {
1131                         ret = -EINTR;
1132                         break;
1133                 }
1134
1135                 ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
1136                 if (ret)
1137                         break;
1138
1139                 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
1140                                 &kaddr, NULL);
1141                 if (map_len < 0) {
1142                         ret = map_len;
1143                         break;
1144                 }
1145
1146                 map_len = PFN_PHYS(map_len);
1147                 kaddr += offset;
1148                 map_len -= offset;
1149                 if (map_len > end - pos)
1150                         map_len = end - pos;
1151
1152                 /*
1153                  * The userspace address for the memory copy has already been
1154                  * validated via access_ok() in either vfs_read() or
1155                  * vfs_write(), depending on which operation we are doing.
1156                  */
1157                 if (iov_iter_rw(iter) == WRITE)
1158                         xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
1159                                         map_len, iter);
1160                 else
1161                         xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
1162                                         map_len, iter);
1163
1164                 pos += xfer;
1165                 length -= xfer;
1166                 done += xfer;
1167
1168                 if (xfer == 0)
1169                         ret = -EFAULT;
1170                 if (xfer < map_len)
1171                         break;
1172         }
1173         dax_read_unlock(id);
1174
1175         return done ? done : ret;
1176 }
1177
1178 /**
1179  * dax_iomap_rw - Perform I/O to a DAX file
1180  * @iocb:       The control block for this I/O
1181  * @iter:       The addresses to do I/O from or to
1182  * @ops:        iomap ops passed from the file system
1183  *
1184  * This function performs read and write operations to directly mapped
1185  * persistent memory.  The callers needs to take care of read/write exclusion
1186  * and evicting any page cache pages in the region under I/O.
1187  */
1188 ssize_t
1189 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1190                 const struct iomap_ops *ops)
1191 {
1192         struct address_space *mapping = iocb->ki_filp->f_mapping;
1193         struct inode *inode = mapping->host;
1194         loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1195         unsigned flags = 0;
1196
1197         if (iov_iter_rw(iter) == WRITE) {
1198                 lockdep_assert_held_exclusive(&inode->i_rwsem);
1199                 flags |= IOMAP_WRITE;
1200         } else {
1201                 lockdep_assert_held(&inode->i_rwsem);
1202         }
1203
1204         while (iov_iter_count(iter)) {
1205                 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1206                                 iter, dax_iomap_actor);
1207                 if (ret <= 0)
1208                         break;
1209                 pos += ret;
1210                 done += ret;
1211         }
1212
1213         iocb->ki_pos += done;
1214         return done ? done : ret;
1215 }
1216 EXPORT_SYMBOL_GPL(dax_iomap_rw);
1217
1218 static vm_fault_t dax_fault_return(int error)
1219 {
1220         if (error == 0)
1221                 return VM_FAULT_NOPAGE;
1222         return vmf_error(error);
1223 }
1224
1225 /*
1226  * MAP_SYNC on a dax mapping guarantees dirty metadata is
1227  * flushed on write-faults (non-cow), but not read-faults.
1228  */
1229 static bool dax_fault_is_synchronous(unsigned long flags,
1230                 struct vm_area_struct *vma, struct iomap *iomap)
1231 {
1232         return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC)
1233                 && (iomap->flags & IOMAP_F_DIRTY);
1234 }
1235
1236 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
1237                                int *iomap_errp, const struct iomap_ops *ops)
1238 {
1239         struct vm_area_struct *vma = vmf->vma;
1240         struct address_space *mapping = vma->vm_file->f_mapping;
1241         XA_STATE(xas, &mapping->i_pages, vmf->pgoff);
1242         struct inode *inode = mapping->host;
1243         unsigned long vaddr = vmf->address;
1244         loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1245         struct iomap iomap = { 0 };
1246         unsigned flags = IOMAP_FAULT;
1247         int error, major = 0;
1248         bool write = vmf->flags & FAULT_FLAG_WRITE;
1249         bool sync;
1250         vm_fault_t ret = 0;
1251         void *entry;
1252         pfn_t pfn;
1253
1254         trace_dax_pte_fault(inode, vmf, ret);
1255         /*
1256          * Check whether offset isn't beyond end of file now. Caller is supposed
1257          * to hold locks serializing us with truncate / punch hole so this is
1258          * a reliable test.
1259          */
1260         if (pos >= i_size_read(inode)) {
1261                 ret = VM_FAULT_SIGBUS;
1262                 goto out;
1263         }
1264
1265         if (write && !vmf->cow_page)
1266                 flags |= IOMAP_WRITE;
1267
1268         entry = grab_mapping_entry(&xas, mapping, 0);
1269         if (xa_is_internal(entry)) {
1270                 ret = xa_to_internal(entry);
1271                 goto out;
1272         }
1273
1274         /*
1275          * It is possible, particularly with mixed reads & writes to private
1276          * mappings, that we have raced with a PMD fault that overlaps with
1277          * the PTE we need to set up.  If so just return and the fault will be
1278          * retried.
1279          */
1280         if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1281                 ret = VM_FAULT_NOPAGE;
1282                 goto unlock_entry;
1283         }
1284
1285         /*
1286          * Note that we don't bother to use iomap_apply here: DAX required
1287          * the file system block size to be equal the page size, which means
1288          * that we never have to deal with more than a single extent here.
1289          */
1290         error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1291         if (iomap_errp)
1292                 *iomap_errp = error;
1293         if (error) {
1294                 ret = dax_fault_return(error);
1295                 goto unlock_entry;
1296         }
1297         if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1298                 error = -EIO;   /* fs corruption? */
1299                 goto error_finish_iomap;
1300         }
1301
1302         if (vmf->cow_page) {
1303                 sector_t sector = dax_iomap_sector(&iomap, pos);
1304
1305                 switch (iomap.type) {
1306                 case IOMAP_HOLE:
1307                 case IOMAP_UNWRITTEN:
1308                         clear_user_highpage(vmf->cow_page, vaddr);
1309                         break;
1310                 case IOMAP_MAPPED:
1311                         error = copy_user_dax(iomap.bdev, iomap.dax_dev,
1312                                         sector, PAGE_SIZE, vmf->cow_page, vaddr);
1313                         break;
1314                 default:
1315                         WARN_ON_ONCE(1);
1316                         error = -EIO;
1317                         break;
1318                 }
1319
1320                 if (error)
1321                         goto error_finish_iomap;
1322
1323                 __SetPageUptodate(vmf->cow_page);
1324                 ret = finish_fault(vmf);
1325                 if (!ret)
1326                         ret = VM_FAULT_DONE_COW;
1327                 goto finish_iomap;
1328         }
1329
1330         sync = dax_fault_is_synchronous(flags, vma, &iomap);
1331
1332         switch (iomap.type) {
1333         case IOMAP_MAPPED:
1334                 if (iomap.flags & IOMAP_F_NEW) {
1335                         count_vm_event(PGMAJFAULT);
1336                         count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
1337                         major = VM_FAULT_MAJOR;
1338                 }
1339                 error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn);
1340                 if (error < 0)
1341                         goto error_finish_iomap;
1342
1343                 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
1344                                                  0, write && !sync);
1345
1346                 /*
1347                  * If we are doing synchronous page fault and inode needs fsync,
1348                  * we can insert PTE into page tables only after that happens.
1349                  * Skip insertion for now and return the pfn so that caller can
1350                  * insert it after fsync is done.
1351                  */
1352                 if (sync) {
1353                         if (WARN_ON_ONCE(!pfnp)) {
1354                                 error = -EIO;
1355                                 goto error_finish_iomap;
1356                         }
1357                         *pfnp = pfn;
1358                         ret = VM_FAULT_NEEDDSYNC | major;
1359                         goto finish_iomap;
1360                 }
1361                 trace_dax_insert_mapping(inode, vmf, entry);
1362                 if (write)
1363                         ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn);
1364                 else
1365                         ret = vmf_insert_mixed(vma, vaddr, pfn);
1366
1367                 goto finish_iomap;
1368         case IOMAP_UNWRITTEN:
1369         case IOMAP_HOLE:
1370                 if (!write) {
1371                         ret = dax_load_hole(&xas, mapping, &entry, vmf);
1372                         goto finish_iomap;
1373                 }
1374                 /*FALLTHRU*/
1375         default:
1376                 WARN_ON_ONCE(1);
1377                 error = -EIO;
1378                 break;
1379         }
1380
1381  error_finish_iomap:
1382         ret = dax_fault_return(error);
1383  finish_iomap:
1384         if (ops->iomap_end) {
1385                 int copied = PAGE_SIZE;
1386
1387                 if (ret & VM_FAULT_ERROR)
1388                         copied = 0;
1389                 /*
1390                  * The fault is done by now and there's no way back (other
1391                  * thread may be already happily using PTE we have installed).
1392                  * Just ignore error from ->iomap_end since we cannot do much
1393                  * with it.
1394                  */
1395                 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1396         }
1397  unlock_entry:
1398         dax_unlock_entry(&xas, entry);
1399  out:
1400         trace_dax_pte_fault_done(inode, vmf, ret);
1401         return ret | major;
1402 }
1403
1404 #ifdef CONFIG_FS_DAX_PMD
1405 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1406                 struct iomap *iomap, void **entry)
1407 {
1408         struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1409         unsigned long pmd_addr = vmf->address & PMD_MASK;
1410         struct inode *inode = mapping->host;
1411         struct page *zero_page;
1412         spinlock_t *ptl;
1413         pmd_t pmd_entry;
1414         pfn_t pfn;
1415
1416         zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1417
1418         if (unlikely(!zero_page))
1419                 goto fallback;
1420
1421         pfn = page_to_pfn_t(zero_page);
1422         *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
1423                         DAX_PMD | DAX_ZERO_PAGE, false);
1424
1425         ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1426         if (!pmd_none(*(vmf->pmd))) {
1427                 spin_unlock(ptl);
1428                 goto fallback;
1429         }
1430
1431         pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1432         pmd_entry = pmd_mkhuge(pmd_entry);
1433         set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1434         spin_unlock(ptl);
1435         trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
1436         return VM_FAULT_NOPAGE;
1437
1438 fallback:
1439         trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
1440         return VM_FAULT_FALLBACK;
1441 }
1442
1443 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1444                                const struct iomap_ops *ops)
1445 {
1446         struct vm_area_struct *vma = vmf->vma;
1447         struct address_space *mapping = vma->vm_file->f_mapping;
1448         XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
1449         unsigned long pmd_addr = vmf->address & PMD_MASK;
1450         bool write = vmf->flags & FAULT_FLAG_WRITE;
1451         bool sync;
1452         unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1453         struct inode *inode = mapping->host;
1454         vm_fault_t result = VM_FAULT_FALLBACK;
1455         struct iomap iomap = { 0 };
1456         pgoff_t max_pgoff;
1457         void *entry;
1458         loff_t pos;
1459         int error;
1460         pfn_t pfn;
1461
1462         /*
1463          * Check whether offset isn't beyond end of file now. Caller is
1464          * supposed to hold locks serializing us with truncate / punch hole so
1465          * this is a reliable test.
1466          */
1467         max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1468
1469         trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1470
1471         /*
1472          * Make sure that the faulting address's PMD offset (color) matches
1473          * the PMD offset from the start of the file.  This is necessary so
1474          * that a PMD range in the page table overlaps exactly with a PMD
1475          * range in the page cache.
1476          */
1477         if ((vmf->pgoff & PG_PMD_COLOUR) !=
1478             ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1479                 goto fallback;
1480
1481         /* Fall back to PTEs if we're going to COW */
1482         if (write && !(vma->vm_flags & VM_SHARED))
1483                 goto fallback;
1484
1485         /* If the PMD would extend outside the VMA */
1486         if (pmd_addr < vma->vm_start)
1487                 goto fallback;
1488         if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1489                 goto fallback;
1490
1491         if (xas.xa_index >= max_pgoff) {
1492                 result = VM_FAULT_SIGBUS;
1493                 goto out;
1494         }
1495
1496         /* If the PMD would extend beyond the file size */
1497         if ((xas.xa_index | PG_PMD_COLOUR) >= max_pgoff)
1498                 goto fallback;
1499
1500         /*
1501          * grab_mapping_entry() will make sure we get an empty PMD entry,
1502          * a zero PMD entry or a DAX PMD.  If it can't (because a PTE
1503          * entry is already in the array, for instance), it will return
1504          * VM_FAULT_FALLBACK.
1505          */
1506         entry = grab_mapping_entry(&xas, mapping, DAX_PMD);
1507         if (xa_is_internal(entry)) {
1508                 result = xa_to_internal(entry);
1509                 goto fallback;
1510         }
1511
1512         /*
1513          * It is possible, particularly with mixed reads & writes to private
1514          * mappings, that we have raced with a PTE fault that overlaps with
1515          * the PMD we need to set up.  If so just return and the fault will be
1516          * retried.
1517          */
1518         if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1519                         !pmd_devmap(*vmf->pmd)) {
1520                 result = 0;
1521                 goto unlock_entry;
1522         }
1523
1524         /*
1525          * Note that we don't use iomap_apply here.  We aren't doing I/O, only
1526          * setting up a mapping, so really we're using iomap_begin() as a way
1527          * to look up our filesystem block.
1528          */
1529         pos = (loff_t)xas.xa_index << PAGE_SHIFT;
1530         error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1531         if (error)
1532                 goto unlock_entry;
1533
1534         if (iomap.offset + iomap.length < pos + PMD_SIZE)
1535                 goto finish_iomap;
1536
1537         sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap);
1538
1539         switch (iomap.type) {
1540         case IOMAP_MAPPED:
1541                 error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn);
1542                 if (error < 0)
1543                         goto finish_iomap;
1544
1545                 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
1546                                                 DAX_PMD, write && !sync);
1547
1548                 /*
1549                  * If we are doing synchronous page fault and inode needs fsync,
1550                  * we can insert PMD into page tables only after that happens.
1551                  * Skip insertion for now and return the pfn so that caller can
1552                  * insert it after fsync is done.
1553                  */
1554                 if (sync) {
1555                         if (WARN_ON_ONCE(!pfnp))
1556                                 goto finish_iomap;
1557                         *pfnp = pfn;
1558                         result = VM_FAULT_NEEDDSYNC;
1559                         goto finish_iomap;
1560                 }
1561
1562                 trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
1563                 result = vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn,
1564                                             write);
1565                 break;
1566         case IOMAP_UNWRITTEN:
1567         case IOMAP_HOLE:
1568                 if (WARN_ON_ONCE(write))
1569                         break;
1570                 result = dax_pmd_load_hole(&xas, vmf, &iomap, &entry);
1571                 break;
1572         default:
1573                 WARN_ON_ONCE(1);
1574                 break;
1575         }
1576
1577  finish_iomap:
1578         if (ops->iomap_end) {
1579                 int copied = PMD_SIZE;
1580
1581                 if (result == VM_FAULT_FALLBACK)
1582                         copied = 0;
1583                 /*
1584                  * The fault is done by now and there's no way back (other
1585                  * thread may be already happily using PMD we have installed).
1586                  * Just ignore error from ->iomap_end since we cannot do much
1587                  * with it.
1588                  */
1589                 ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1590                                 &iomap);
1591         }
1592  unlock_entry:
1593         dax_unlock_entry(&xas, entry);
1594  fallback:
1595         if (result == VM_FAULT_FALLBACK) {
1596                 split_huge_pmd(vma, vmf->pmd, vmf->address);
1597                 count_vm_event(THP_FAULT_FALLBACK);
1598         }
1599 out:
1600         trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
1601         return result;
1602 }
1603 #else
1604 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1605                                const struct iomap_ops *ops)
1606 {
1607         return VM_FAULT_FALLBACK;
1608 }
1609 #endif /* CONFIG_FS_DAX_PMD */
1610
1611 /**
1612  * dax_iomap_fault - handle a page fault on a DAX file
1613  * @vmf: The description of the fault
1614  * @pe_size: Size of the page to fault in
1615  * @pfnp: PFN to insert for synchronous faults if fsync is required
1616  * @iomap_errp: Storage for detailed error code in case of error
1617  * @ops: Iomap ops passed from the file system
1618  *
1619  * When a page fault occurs, filesystems may call this helper in
1620  * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1621  * has done all the necessary locking for page fault to proceed
1622  * successfully.
1623  */
1624 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1625                     pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
1626 {
1627         switch (pe_size) {
1628         case PE_SIZE_PTE:
1629                 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
1630         case PE_SIZE_PMD:
1631                 return dax_iomap_pmd_fault(vmf, pfnp, ops);
1632         default:
1633                 return VM_FAULT_FALLBACK;
1634         }
1635 }
1636 EXPORT_SYMBOL_GPL(dax_iomap_fault);
1637
1638 /*
1639  * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
1640  * @vmf: The description of the fault
1641  * @pfn: PFN to insert
1642  * @order: Order of entry to insert.
1643  *
1644  * This function inserts a writeable PTE or PMD entry into the page tables
1645  * for an mmaped DAX file.  It also marks the page cache entry as dirty.
1646  */
1647 static vm_fault_t
1648 dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
1649 {
1650         struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1651         XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
1652         void *entry;
1653         vm_fault_t ret;
1654
1655         xas_lock_irq(&xas);
1656         entry = get_unlocked_entry(&xas);
1657         /* Did we race with someone splitting entry or so? */
1658         if (!entry ||
1659             (order == 0 && !dax_is_pte_entry(entry)) ||
1660             (order == PMD_ORDER && !dax_is_pmd_entry(entry))) {
1661                 put_unlocked_entry(&xas, entry);
1662                 xas_unlock_irq(&xas);
1663                 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
1664                                                       VM_FAULT_NOPAGE);
1665                 return VM_FAULT_NOPAGE;
1666         }
1667         xas_set_mark(&xas, PAGECACHE_TAG_DIRTY);
1668         dax_lock_entry(&xas, entry);
1669         xas_unlock_irq(&xas);
1670         if (order == 0)
1671                 ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1672 #ifdef CONFIG_FS_DAX_PMD
1673         else if (order == PMD_ORDER)
1674                 ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
1675                         pfn, true);
1676 #endif
1677         else
1678                 ret = VM_FAULT_FALLBACK;
1679         dax_unlock_entry(&xas, entry);
1680         trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
1681         return ret;
1682 }
1683
1684 /**
1685  * dax_finish_sync_fault - finish synchronous page fault
1686  * @vmf: The description of the fault
1687  * @pe_size: Size of entry to be inserted
1688  * @pfn: PFN to insert
1689  *
1690  * This function ensures that the file range touched by the page fault is
1691  * stored persistently on the media and handles inserting of appropriate page
1692  * table entry.
1693  */
1694 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
1695                 enum page_entry_size pe_size, pfn_t pfn)
1696 {
1697         int err;
1698         loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
1699         unsigned int order = pe_order(pe_size);
1700         size_t len = PAGE_SIZE << order;
1701
1702         err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
1703         if (err)
1704                 return VM_FAULT_SIGBUS;
1705         return dax_insert_pfn_mkwrite(vmf, pfn, order);
1706 }
1707 EXPORT_SYMBOL_GPL(dax_finish_sync_fault);