1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2015 Red Hat, Inc.
9 #include <linux/sched/signal.h>
10 #include <linux/pagemap.h>
11 #include <linux/rmap.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
14 #include <linux/userfaultfd_k.h>
15 #include <linux/mmu_notifier.h>
16 #include <linux/hugetlb.h>
17 #include <linux/shmem_fs.h>
18 #include <asm/tlbflush.h>
22 static __always_inline
23 bool validate_dst_vma(struct vm_area_struct *dst_vma, unsigned long dst_end)
25 /* Make sure that the dst range is fully within dst_vma. */
26 if (dst_end > dst_vma->vm_end)
30 * Check the vma is registered in uffd, this is required to
31 * enforce the VM_MAYWRITE check done at uffd registration
34 if (!dst_vma->vm_userfaultfd_ctx.ctx)
40 static __always_inline
41 struct vm_area_struct *find_vma_and_prepare_anon(struct mm_struct *mm,
44 struct vm_area_struct *vma;
46 mmap_assert_locked(mm);
47 vma = vma_lookup(mm, addr);
49 vma = ERR_PTR(-ENOENT);
50 else if (!(vma->vm_flags & VM_SHARED) &&
51 unlikely(anon_vma_prepare(vma)))
52 vma = ERR_PTR(-ENOMEM);
57 #ifdef CONFIG_PER_VMA_LOCK
59 * lock_vma() - Lookup and lock vma corresponding to @address.
60 * @mm: mm to search vma in.
61 * @address: address that the vma should contain.
63 * Should be called without holding mmap_lock. vma should be unlocked after use
66 * Return: A locked vma containing @address, -ENOENT if no vma is found, or
67 * -ENOMEM if anon_vma couldn't be allocated.
69 static struct vm_area_struct *lock_vma(struct mm_struct *mm,
70 unsigned long address)
72 struct vm_area_struct *vma;
74 vma = lock_vma_under_rcu(mm, address);
77 * lock_vma_under_rcu() only checks anon_vma for private
78 * anonymous mappings. But we need to ensure it is assigned in
79 * private file-backed vmas as well.
81 if (!(vma->vm_flags & VM_SHARED) && unlikely(!vma->anon_vma))
88 vma = find_vma_and_prepare_anon(mm, address);
91 * We cannot use vma_start_read() as it may fail due to
92 * false locked (see comment in vma_start_read()). We
93 * can avoid that by directly locking vm_lock under
94 * mmap_lock, which guarantees that nobody can lock the
95 * vma for write (vma_start_write()) under us.
97 down_read(&vma->vm_lock->lock);
100 mmap_read_unlock(mm);
104 static struct vm_area_struct *uffd_mfill_lock(struct mm_struct *dst_mm,
105 unsigned long dst_start,
108 struct vm_area_struct *dst_vma;
110 dst_vma = lock_vma(dst_mm, dst_start);
111 if (IS_ERR(dst_vma) || validate_dst_vma(dst_vma, dst_start + len))
114 vma_end_read(dst_vma);
115 return ERR_PTR(-ENOENT);
118 static void uffd_mfill_unlock(struct vm_area_struct *vma)
125 static struct vm_area_struct *uffd_mfill_lock(struct mm_struct *dst_mm,
126 unsigned long dst_start,
129 struct vm_area_struct *dst_vma;
131 mmap_read_lock(dst_mm);
132 dst_vma = find_vma_and_prepare_anon(dst_mm, dst_start);
136 if (validate_dst_vma(dst_vma, dst_start + len))
139 dst_vma = ERR_PTR(-ENOENT);
141 mmap_read_unlock(dst_mm);
145 static void uffd_mfill_unlock(struct vm_area_struct *vma)
147 mmap_read_unlock(vma->vm_mm);
151 /* Check if dst_addr is outside of file's size. Must be called with ptl held. */
152 static bool mfill_file_over_size(struct vm_area_struct *dst_vma,
153 unsigned long dst_addr)
156 pgoff_t offset, max_off;
158 if (!dst_vma->vm_file)
161 inode = dst_vma->vm_file->f_inode;
162 offset = linear_page_index(dst_vma, dst_addr);
163 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
164 return offset >= max_off;
168 * Install PTEs, to map dst_addr (within dst_vma) to page.
170 * This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem
171 * and anon, and for both shared and private VMAs.
173 int mfill_atomic_install_pte(pmd_t *dst_pmd,
174 struct vm_area_struct *dst_vma,
175 unsigned long dst_addr, struct page *page,
176 bool newly_allocated, uffd_flags_t flags)
179 struct mm_struct *dst_mm = dst_vma->vm_mm;
180 pte_t _dst_pte, *dst_pte;
181 bool writable = dst_vma->vm_flags & VM_WRITE;
182 bool vm_shared = dst_vma->vm_flags & VM_SHARED;
183 bool page_in_cache = page_mapping(page);
187 _dst_pte = mk_pte(page, dst_vma->vm_page_prot);
188 _dst_pte = pte_mkdirty(_dst_pte);
189 if (page_in_cache && !vm_shared)
192 _dst_pte = pte_mkwrite(_dst_pte, dst_vma);
193 if (flags & MFILL_ATOMIC_WP)
194 _dst_pte = pte_mkuffd_wp(_dst_pte);
197 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
201 if (mfill_file_over_size(dst_vma, dst_addr)) {
208 * We allow to overwrite a pte marker: consider when both MISSING|WP
209 * registered, we firstly wr-protect a none pte which has no page cache
210 * page backing it, then access the page.
212 if (!pte_none_mostly(ptep_get(dst_pte)))
215 folio = page_folio(page);
217 /* Usually, cache pages are already added to LRU */
219 folio_add_lru(folio);
220 folio_add_file_rmap_pte(folio, page, dst_vma);
222 folio_add_new_anon_rmap(folio, dst_vma, dst_addr);
223 folio_add_lru_vma(folio, dst_vma);
227 * Must happen after rmap, as mm_counter() checks mapping (via
228 * PageAnon()), which is set by __page_set_anon_rmap().
230 inc_mm_counter(dst_mm, mm_counter(folio));
232 set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
234 /* No need to invalidate - it was non-present before */
235 update_mmu_cache(dst_vma, dst_addr, dst_pte);
238 pte_unmap_unlock(dst_pte, ptl);
243 static int mfill_atomic_pte_copy(pmd_t *dst_pmd,
244 struct vm_area_struct *dst_vma,
245 unsigned long dst_addr,
246 unsigned long src_addr,
248 struct folio **foliop)
256 folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, dst_vma,
261 kaddr = kmap_local_folio(folio, 0);
263 * The read mmap_lock is held here. Despite the
264 * mmap_lock being read recursive a deadlock is still
265 * possible if a writer has taken a lock. For example:
267 * process A thread 1 takes read lock on own mmap_lock
268 * process A thread 2 calls mmap, blocks taking write lock
269 * process B thread 1 takes page fault, read lock on own mmap lock
270 * process B thread 2 calls mmap, blocks taking write lock
271 * process A thread 1 blocks taking read lock on process B
272 * process B thread 1 blocks taking read lock on process A
274 * Disable page faults to prevent potential deadlock
275 * and retry the copy outside the mmap_lock.
278 ret = copy_from_user(kaddr, (const void __user *) src_addr,
283 /* fallback to copy_from_user outside mmap_lock */
287 /* don't free the page */
291 flush_dcache_folio(folio);
298 * The memory barrier inside __folio_mark_uptodate makes sure that
299 * preceding stores to the page contents become visible before
300 * the set_pte_at() write.
302 __folio_mark_uptodate(folio);
305 if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL))
308 ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
309 &folio->page, true, flags);
319 static int mfill_atomic_pte_zeroed_folio(pmd_t *dst_pmd,
320 struct vm_area_struct *dst_vma,
321 unsigned long dst_addr)
326 folio = vma_alloc_zeroed_movable_folio(dst_vma, dst_addr);
330 if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL))
334 * The memory barrier inside __folio_mark_uptodate makes sure that
335 * zeroing out the folio become visible before mapping the page
336 * using set_pte_at(). See do_anonymous_page().
338 __folio_mark_uptodate(folio);
340 ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
341 &folio->page, true, 0);
351 static int mfill_atomic_pte_zeropage(pmd_t *dst_pmd,
352 struct vm_area_struct *dst_vma,
353 unsigned long dst_addr)
355 pte_t _dst_pte, *dst_pte;
359 if (mm_forbids_zeropage(dst_vma->vm_mm))
360 return mfill_atomic_pte_zeroed_folio(dst_pmd, dst_vma, dst_addr);
362 _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
363 dst_vma->vm_page_prot));
365 dst_pte = pte_offset_map_lock(dst_vma->vm_mm, dst_pmd, dst_addr, &ptl);
368 if (mfill_file_over_size(dst_vma, dst_addr)) {
373 if (!pte_none(ptep_get(dst_pte)))
375 set_pte_at(dst_vma->vm_mm, dst_addr, dst_pte, _dst_pte);
376 /* No need to invalidate - it was non-present before */
377 update_mmu_cache(dst_vma, dst_addr, dst_pte);
380 pte_unmap_unlock(dst_pte, ptl);
385 /* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */
386 static int mfill_atomic_pte_continue(pmd_t *dst_pmd,
387 struct vm_area_struct *dst_vma,
388 unsigned long dst_addr,
391 struct inode *inode = file_inode(dst_vma->vm_file);
392 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
397 ret = shmem_get_folio(inode, pgoff, &folio, SGP_NOALLOC);
398 /* Our caller expects us to return -EFAULT if we failed to find folio */
408 page = folio_file_page(folio, pgoff);
409 if (PageHWPoison(page)) {
414 ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
429 /* Handles UFFDIO_POISON for all non-hugetlb VMAs. */
430 static int mfill_atomic_pte_poison(pmd_t *dst_pmd,
431 struct vm_area_struct *dst_vma,
432 unsigned long dst_addr,
436 struct mm_struct *dst_mm = dst_vma->vm_mm;
437 pte_t _dst_pte, *dst_pte;
440 _dst_pte = make_pte_marker(PTE_MARKER_POISONED);
442 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
446 if (mfill_file_over_size(dst_vma, dst_addr)) {
452 /* Refuse to overwrite any PTE, even a PTE marker (e.g. UFFD WP). */
453 if (!pte_none(ptep_get(dst_pte)))
456 set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
458 /* No need to invalidate - it was non-present before */
459 update_mmu_cache(dst_vma, dst_addr, dst_pte);
462 pte_unmap_unlock(dst_pte, ptl);
467 static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
473 pgd = pgd_offset(mm, address);
474 p4d = p4d_alloc(mm, pgd, address);
477 pud = pud_alloc(mm, p4d, address);
481 * Note that we didn't run this because the pmd was
482 * missing, the *pmd may be already established and in
483 * turn it may also be a trans_huge_pmd.
485 return pmd_alloc(mm, pud, address);
488 #ifdef CONFIG_HUGETLB_PAGE
490 * mfill_atomic processing for HUGETLB vmas. Note that this routine is
491 * called with either vma-lock or mmap_lock held, it will release the lock
494 static __always_inline ssize_t mfill_atomic_hugetlb(
495 struct userfaultfd_ctx *ctx,
496 struct vm_area_struct *dst_vma,
497 unsigned long dst_start,
498 unsigned long src_start,
502 struct mm_struct *dst_mm = dst_vma->vm_mm;
505 unsigned long src_addr, dst_addr;
508 unsigned long vma_hpagesize;
511 struct address_space *mapping;
514 * There is no default zero huge page for all huge page sizes as
515 * supported by hugetlb. A PMD_SIZE huge pages may exist as used
516 * by THP. Since we can not reliably insert a zero page, this
517 * feature is not supported.
519 if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE)) {
520 up_read(&ctx->map_changing_lock);
521 uffd_mfill_unlock(dst_vma);
525 src_addr = src_start;
526 dst_addr = dst_start;
529 vma_hpagesize = vma_kernel_pagesize(dst_vma);
532 * Validate alignment based on huge page size
535 if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1))
540 * On routine entry dst_vma is set. If we had to drop mmap_lock and
541 * retry, dst_vma will be set to NULL and we must lookup again.
544 dst_vma = uffd_mfill_lock(dst_mm, dst_start, len);
545 if (IS_ERR(dst_vma)) {
546 err = PTR_ERR(dst_vma);
551 if (!is_vm_hugetlb_page(dst_vma))
555 if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
559 * If memory mappings are changing because of non-cooperative
560 * operation (e.g. mremap) running in parallel, bail out and
561 * request the user to retry later
563 down_read(&ctx->map_changing_lock);
565 if (atomic_read(&ctx->mmap_changing))
569 while (src_addr < src_start + len) {
570 BUG_ON(dst_addr >= dst_start + len);
573 * Serialize via vma_lock and hugetlb_fault_mutex.
574 * vma_lock ensures the dst_pte remains valid even
575 * in the case of shared pmds. fault mutex prevents
576 * races with other faulting threads.
578 idx = linear_page_index(dst_vma, dst_addr);
579 mapping = dst_vma->vm_file->f_mapping;
580 hash = hugetlb_fault_mutex_hash(mapping, idx);
581 mutex_lock(&hugetlb_fault_mutex_table[hash]);
582 hugetlb_vma_lock_read(dst_vma);
585 dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize);
587 hugetlb_vma_unlock_read(dst_vma);
588 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
592 if (!uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE) &&
593 !huge_pte_none_mostly(huge_ptep_get(dst_pte))) {
595 hugetlb_vma_unlock_read(dst_vma);
596 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
600 err = hugetlb_mfill_atomic_pte(dst_pte, dst_vma, dst_addr,
601 src_addr, flags, &folio);
603 hugetlb_vma_unlock_read(dst_vma);
604 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
608 if (unlikely(err == -ENOENT)) {
609 up_read(&ctx->map_changing_lock);
610 uffd_mfill_unlock(dst_vma);
613 err = copy_folio_from_user(folio,
614 (const void __user *)src_addr, true);
626 dst_addr += vma_hpagesize;
627 src_addr += vma_hpagesize;
628 copied += vma_hpagesize;
630 if (fatal_signal_pending(current))
638 up_read(&ctx->map_changing_lock);
640 uffd_mfill_unlock(dst_vma);
646 BUG_ON(!copied && !err);
647 return copied ? copied : err;
649 #else /* !CONFIG_HUGETLB_PAGE */
650 /* fail at build time if gcc attempts to use this */
651 extern ssize_t mfill_atomic_hugetlb(struct userfaultfd_ctx *ctx,
652 struct vm_area_struct *dst_vma,
653 unsigned long dst_start,
654 unsigned long src_start,
657 #endif /* CONFIG_HUGETLB_PAGE */
659 static __always_inline ssize_t mfill_atomic_pte(pmd_t *dst_pmd,
660 struct vm_area_struct *dst_vma,
661 unsigned long dst_addr,
662 unsigned long src_addr,
664 struct folio **foliop)
668 if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
669 return mfill_atomic_pte_continue(dst_pmd, dst_vma,
671 } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
672 return mfill_atomic_pte_poison(dst_pmd, dst_vma,
677 * The normal page fault path for a shmem will invoke the
678 * fault, fill the hole in the file and COW it right away. The
679 * result generates plain anonymous memory. So when we are
680 * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll
681 * generate anonymous memory directly without actually filling
682 * the hole. For the MAP_PRIVATE case the robustness check
683 * only happens in the pagetable (to verify it's still none)
684 * and not in the radix tree.
686 if (!(dst_vma->vm_flags & VM_SHARED)) {
687 if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY))
688 err = mfill_atomic_pte_copy(dst_pmd, dst_vma,
692 err = mfill_atomic_pte_zeropage(dst_pmd,
695 err = shmem_mfill_atomic_pte(dst_pmd, dst_vma,
703 static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
704 unsigned long dst_start,
705 unsigned long src_start,
709 struct mm_struct *dst_mm = ctx->mm;
710 struct vm_area_struct *dst_vma;
713 unsigned long src_addr, dst_addr;
718 * Sanitize the command parameters:
720 BUG_ON(dst_start & ~PAGE_MASK);
721 BUG_ON(len & ~PAGE_MASK);
723 /* Does the address range wrap, or is the span zero-sized? */
724 BUG_ON(src_start + len <= src_start);
725 BUG_ON(dst_start + len <= dst_start);
727 src_addr = src_start;
728 dst_addr = dst_start;
733 * Make sure the vma is not shared, that the dst range is
734 * both valid and fully within a single existing vma.
736 dst_vma = uffd_mfill_lock(dst_mm, dst_start, len);
737 if (IS_ERR(dst_vma)) {
738 err = PTR_ERR(dst_vma);
743 * If memory mappings are changing because of non-cooperative
744 * operation (e.g. mremap) running in parallel, bail out and
745 * request the user to retry later
747 down_read(&ctx->map_changing_lock);
749 if (atomic_read(&ctx->mmap_changing))
754 * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
755 * it will overwrite vm_ops, so vma_is_anonymous must return false.
757 if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
758 dst_vma->vm_flags & VM_SHARED))
762 * validate 'mode' now that we know the dst_vma: don't allow
763 * a wrprotect copy if the userfaultfd didn't register as WP.
765 if ((flags & MFILL_ATOMIC_WP) && !(dst_vma->vm_flags & VM_UFFD_WP))
769 * If this is a HUGETLB vma, pass off to appropriate routine
771 if (is_vm_hugetlb_page(dst_vma))
772 return mfill_atomic_hugetlb(ctx, dst_vma, dst_start,
773 src_start, len, flags);
775 if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
777 if (!vma_is_shmem(dst_vma) &&
778 uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE))
781 while (src_addr < src_start + len) {
784 BUG_ON(dst_addr >= dst_start + len);
786 dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
787 if (unlikely(!dst_pmd)) {
792 dst_pmdval = pmdp_get_lockless(dst_pmd);
794 * If the dst_pmd is mapped as THP don't
795 * override it and just be strict.
797 if (unlikely(pmd_trans_huge(dst_pmdval))) {
801 if (unlikely(pmd_none(dst_pmdval)) &&
802 unlikely(__pte_alloc(dst_mm, dst_pmd))) {
806 /* If an huge pmd materialized from under us fail */
807 if (unlikely(pmd_trans_huge(*dst_pmd))) {
812 BUG_ON(pmd_none(*dst_pmd));
813 BUG_ON(pmd_trans_huge(*dst_pmd));
815 err = mfill_atomic_pte(dst_pmd, dst_vma, dst_addr,
816 src_addr, flags, &folio);
819 if (unlikely(err == -ENOENT)) {
822 up_read(&ctx->map_changing_lock);
823 uffd_mfill_unlock(dst_vma);
826 kaddr = kmap_local_folio(folio, 0);
827 err = copy_from_user(kaddr,
828 (const void __user *) src_addr,
835 flush_dcache_folio(folio);
841 dst_addr += PAGE_SIZE;
842 src_addr += PAGE_SIZE;
845 if (fatal_signal_pending(current))
853 up_read(&ctx->map_changing_lock);
854 uffd_mfill_unlock(dst_vma);
860 BUG_ON(!copied && !err);
861 return copied ? copied : err;
864 ssize_t mfill_atomic_copy(struct userfaultfd_ctx *ctx, unsigned long dst_start,
865 unsigned long src_start, unsigned long len,
868 return mfill_atomic(ctx, dst_start, src_start, len,
869 uffd_flags_set_mode(flags, MFILL_ATOMIC_COPY));
872 ssize_t mfill_atomic_zeropage(struct userfaultfd_ctx *ctx,
876 return mfill_atomic(ctx, start, 0, len,
877 uffd_flags_set_mode(0, MFILL_ATOMIC_ZEROPAGE));
880 ssize_t mfill_atomic_continue(struct userfaultfd_ctx *ctx, unsigned long start,
881 unsigned long len, uffd_flags_t flags)
885 * A caller might reasonably assume that UFFDIO_CONTINUE contains an
886 * smp_wmb() to ensure that any writes to the about-to-be-mapped page by
887 * the thread doing the UFFDIO_CONTINUE are guaranteed to be visible to
888 * subsequent loads from the page through the newly mapped address range.
892 return mfill_atomic(ctx, start, 0, len,
893 uffd_flags_set_mode(flags, MFILL_ATOMIC_CONTINUE));
896 ssize_t mfill_atomic_poison(struct userfaultfd_ctx *ctx, unsigned long start,
897 unsigned long len, uffd_flags_t flags)
899 return mfill_atomic(ctx, start, 0, len,
900 uffd_flags_set_mode(flags, MFILL_ATOMIC_POISON));
903 long uffd_wp_range(struct vm_area_struct *dst_vma,
904 unsigned long start, unsigned long len, bool enable_wp)
906 unsigned int mm_cp_flags;
907 struct mmu_gather tlb;
910 VM_WARN_ONCE(start < dst_vma->vm_start || start + len > dst_vma->vm_end,
911 "The address range exceeds VMA boundary.\n");
913 mm_cp_flags = MM_CP_UFFD_WP;
915 mm_cp_flags = MM_CP_UFFD_WP_RESOLVE;
918 * vma->vm_page_prot already reflects that uffd-wp is enabled for this
919 * VMA (see userfaultfd_set_vm_flags()) and that all PTEs are supposed
920 * to be write-protected as default whenever protection changes.
921 * Try upgrading write permissions manually.
923 if (!enable_wp && vma_wants_manual_pte_write_upgrade(dst_vma))
924 mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE;
925 tlb_gather_mmu(&tlb, dst_vma->vm_mm);
926 ret = change_protection(&tlb, dst_vma, start, start + len, mm_cp_flags);
927 tlb_finish_mmu(&tlb);
932 int mwriteprotect_range(struct userfaultfd_ctx *ctx, unsigned long start,
933 unsigned long len, bool enable_wp)
935 struct mm_struct *dst_mm = ctx->mm;
936 unsigned long end = start + len;
937 unsigned long _start, _end;
938 struct vm_area_struct *dst_vma;
939 unsigned long page_mask;
941 VMA_ITERATOR(vmi, dst_mm, start);
944 * Sanitize the command parameters:
946 BUG_ON(start & ~PAGE_MASK);
947 BUG_ON(len & ~PAGE_MASK);
949 /* Does the address range wrap, or is the span zero-sized? */
950 BUG_ON(start + len <= start);
952 mmap_read_lock(dst_mm);
955 * If memory mappings are changing because of non-cooperative
956 * operation (e.g. mremap) running in parallel, bail out and
957 * request the user to retry later
959 down_read(&ctx->map_changing_lock);
961 if (atomic_read(&ctx->mmap_changing))
965 for_each_vma_range(vmi, dst_vma, end) {
967 if (!userfaultfd_wp(dst_vma)) {
972 if (is_vm_hugetlb_page(dst_vma)) {
974 page_mask = vma_kernel_pagesize(dst_vma) - 1;
975 if ((start & page_mask) || (len & page_mask))
979 _start = max(dst_vma->vm_start, start);
980 _end = min(dst_vma->vm_end, end);
982 err = uffd_wp_range(dst_vma, _start, _end - _start, enable_wp);
984 /* Return 0 on success, <0 on failures */
990 up_read(&ctx->map_changing_lock);
991 mmap_read_unlock(dst_mm);
996 void double_pt_lock(spinlock_t *ptl1,
1001 spinlock_t *ptl_tmp;
1004 /* exchange ptl1 and ptl2 */
1009 /* lock in virtual address order to avoid lock inversion */
1012 spin_lock_nested(ptl2, SINGLE_DEPTH_NESTING);
1017 void double_pt_unlock(spinlock_t *ptl1,
1030 static int move_present_pte(struct mm_struct *mm,
1031 struct vm_area_struct *dst_vma,
1032 struct vm_area_struct *src_vma,
1033 unsigned long dst_addr, unsigned long src_addr,
1034 pte_t *dst_pte, pte_t *src_pte,
1035 pte_t orig_dst_pte, pte_t orig_src_pte,
1036 spinlock_t *dst_ptl, spinlock_t *src_ptl,
1037 struct folio *src_folio)
1041 double_pt_lock(dst_ptl, src_ptl);
1043 if (!pte_same(ptep_get(src_pte), orig_src_pte) ||
1044 !pte_same(ptep_get(dst_pte), orig_dst_pte)) {
1048 if (folio_test_large(src_folio) ||
1049 folio_maybe_dma_pinned(src_folio) ||
1050 !PageAnonExclusive(&src_folio->page)) {
1055 orig_src_pte = ptep_clear_flush(src_vma, src_addr, src_pte);
1056 /* Folio got pinned from under us. Put it back and fail the move. */
1057 if (folio_maybe_dma_pinned(src_folio)) {
1058 set_pte_at(mm, src_addr, src_pte, orig_src_pte);
1063 folio_move_anon_rmap(src_folio, dst_vma);
1064 WRITE_ONCE(src_folio->index, linear_page_index(dst_vma, dst_addr));
1066 orig_dst_pte = mk_pte(&src_folio->page, dst_vma->vm_page_prot);
1067 /* Follow mremap() behavior and treat the entry dirty after the move */
1068 orig_dst_pte = pte_mkwrite(pte_mkdirty(orig_dst_pte), dst_vma);
1070 set_pte_at(mm, dst_addr, dst_pte, orig_dst_pte);
1072 double_pt_unlock(dst_ptl, src_ptl);
1076 static int move_swap_pte(struct mm_struct *mm,
1077 unsigned long dst_addr, unsigned long src_addr,
1078 pte_t *dst_pte, pte_t *src_pte,
1079 pte_t orig_dst_pte, pte_t orig_src_pte,
1080 spinlock_t *dst_ptl, spinlock_t *src_ptl)
1082 if (!pte_swp_exclusive(orig_src_pte))
1085 double_pt_lock(dst_ptl, src_ptl);
1087 if (!pte_same(ptep_get(src_pte), orig_src_pte) ||
1088 !pte_same(ptep_get(dst_pte), orig_dst_pte)) {
1089 double_pt_unlock(dst_ptl, src_ptl);
1093 orig_src_pte = ptep_get_and_clear(mm, src_addr, src_pte);
1094 set_pte_at(mm, dst_addr, dst_pte, orig_src_pte);
1095 double_pt_unlock(dst_ptl, src_ptl);
1100 static int move_zeropage_pte(struct mm_struct *mm,
1101 struct vm_area_struct *dst_vma,
1102 struct vm_area_struct *src_vma,
1103 unsigned long dst_addr, unsigned long src_addr,
1104 pte_t *dst_pte, pte_t *src_pte,
1105 pte_t orig_dst_pte, pte_t orig_src_pte,
1106 spinlock_t *dst_ptl, spinlock_t *src_ptl)
1110 double_pt_lock(dst_ptl, src_ptl);
1111 if (!pte_same(ptep_get(src_pte), orig_src_pte) ||
1112 !pte_same(ptep_get(dst_pte), orig_dst_pte)) {
1113 double_pt_unlock(dst_ptl, src_ptl);
1117 zero_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
1118 dst_vma->vm_page_prot));
1119 ptep_clear_flush(src_vma, src_addr, src_pte);
1120 set_pte_at(mm, dst_addr, dst_pte, zero_pte);
1121 double_pt_unlock(dst_ptl, src_ptl);
1128 * The mmap_lock for reading is held by the caller. Just move the page
1129 * from src_pmd to dst_pmd if possible, and return true if succeeded
1130 * in moving the page.
1132 static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd,
1133 struct vm_area_struct *dst_vma,
1134 struct vm_area_struct *src_vma,
1135 unsigned long dst_addr, unsigned long src_addr,
1139 pte_t orig_src_pte, orig_dst_pte;
1140 pte_t src_folio_pte;
1141 spinlock_t *src_ptl, *dst_ptl;
1142 pte_t *src_pte = NULL;
1143 pte_t *dst_pte = NULL;
1145 struct folio *src_folio = NULL;
1146 struct anon_vma *src_anon_vma = NULL;
1147 struct mmu_notifier_range range;
1150 flush_cache_range(src_vma, src_addr, src_addr + PAGE_SIZE);
1151 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
1152 src_addr, src_addr + PAGE_SIZE);
1153 mmu_notifier_invalidate_range_start(&range);
1155 dst_pte = pte_offset_map_nolock(mm, dst_pmd, dst_addr, &dst_ptl);
1157 /* Retry if a huge pmd materialized from under us */
1158 if (unlikely(!dst_pte)) {
1163 src_pte = pte_offset_map_nolock(mm, src_pmd, src_addr, &src_ptl);
1166 * We held the mmap_lock for reading so MADV_DONTNEED
1167 * can zap transparent huge pages under us, or the
1168 * transparent huge page fault can establish new
1169 * transparent huge pages under us.
1171 if (unlikely(!src_pte)) {
1176 /* Sanity checks before the operation */
1177 if (WARN_ON_ONCE(pmd_none(*dst_pmd)) || WARN_ON_ONCE(pmd_none(*src_pmd)) ||
1178 WARN_ON_ONCE(pmd_trans_huge(*dst_pmd)) || WARN_ON_ONCE(pmd_trans_huge(*src_pmd))) {
1184 orig_dst_pte = ptep_get(dst_pte);
1185 spin_unlock(dst_ptl);
1186 if (!pte_none(orig_dst_pte)) {
1192 orig_src_pte = ptep_get(src_pte);
1193 spin_unlock(src_ptl);
1194 if (pte_none(orig_src_pte)) {
1195 if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES))
1197 else /* nothing to do to move a hole */
1202 /* If PTE changed after we locked the folio them start over */
1203 if (src_folio && unlikely(!pte_same(src_folio_pte, orig_src_pte))) {
1208 if (pte_present(orig_src_pte)) {
1209 if (is_zero_pfn(pte_pfn(orig_src_pte))) {
1210 err = move_zeropage_pte(mm, dst_vma, src_vma,
1211 dst_addr, src_addr, dst_pte, src_pte,
1212 orig_dst_pte, orig_src_pte,
1218 * Pin and lock both source folio and anon_vma. Since we are in
1219 * RCU read section, we can't block, so on contention have to
1220 * unmap the ptes, obtain the lock and retry.
1223 struct folio *folio;
1226 * Pin the page while holding the lock to be sure the
1227 * page isn't freed under us
1230 if (!pte_same(orig_src_pte, ptep_get(src_pte))) {
1231 spin_unlock(src_ptl);
1236 folio = vm_normal_folio(src_vma, src_addr, orig_src_pte);
1237 if (!folio || !PageAnonExclusive(&folio->page)) {
1238 spin_unlock(src_ptl);
1245 src_folio_pte = orig_src_pte;
1246 spin_unlock(src_ptl);
1248 if (!folio_trylock(src_folio)) {
1249 pte_unmap(&orig_src_pte);
1250 pte_unmap(&orig_dst_pte);
1251 src_pte = dst_pte = NULL;
1252 /* now we can block and wait */
1253 folio_lock(src_folio);
1257 if (WARN_ON_ONCE(!folio_test_anon(src_folio))) {
1263 /* at this point we have src_folio locked */
1264 if (folio_test_large(src_folio)) {
1265 /* split_folio() can block */
1266 pte_unmap(&orig_src_pte);
1267 pte_unmap(&orig_dst_pte);
1268 src_pte = dst_pte = NULL;
1269 err = split_folio(src_folio);
1272 /* have to reacquire the folio after it got split */
1273 folio_unlock(src_folio);
1274 folio_put(src_folio);
1279 if (!src_anon_vma) {
1281 * folio_referenced walks the anon_vma chain
1282 * without the folio lock. Serialize against it with
1283 * the anon_vma lock, the folio lock is not enough.
1285 src_anon_vma = folio_get_anon_vma(src_folio);
1286 if (!src_anon_vma) {
1287 /* page was unmapped from under us */
1291 if (!anon_vma_trylock_write(src_anon_vma)) {
1292 pte_unmap(&orig_src_pte);
1293 pte_unmap(&orig_dst_pte);
1294 src_pte = dst_pte = NULL;
1295 /* now we can block and wait */
1296 anon_vma_lock_write(src_anon_vma);
1301 err = move_present_pte(mm, dst_vma, src_vma,
1302 dst_addr, src_addr, dst_pte, src_pte,
1303 orig_dst_pte, orig_src_pte,
1304 dst_ptl, src_ptl, src_folio);
1306 entry = pte_to_swp_entry(orig_src_pte);
1307 if (non_swap_entry(entry)) {
1308 if (is_migration_entry(entry)) {
1309 pte_unmap(&orig_src_pte);
1310 pte_unmap(&orig_dst_pte);
1311 src_pte = dst_pte = NULL;
1312 migration_entry_wait(mm, src_pmd, src_addr);
1319 err = move_swap_pte(mm, dst_addr, src_addr,
1321 orig_dst_pte, orig_src_pte,
1327 anon_vma_unlock_write(src_anon_vma);
1328 put_anon_vma(src_anon_vma);
1331 folio_unlock(src_folio);
1332 folio_put(src_folio);
1338 mmu_notifier_invalidate_range_end(&range);
1343 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1344 static inline bool move_splits_huge_pmd(unsigned long dst_addr,
1345 unsigned long src_addr,
1346 unsigned long src_end)
1348 return (src_addr & ~HPAGE_PMD_MASK) || (dst_addr & ~HPAGE_PMD_MASK) ||
1349 src_end - src_addr < HPAGE_PMD_SIZE;
1352 static inline bool move_splits_huge_pmd(unsigned long dst_addr,
1353 unsigned long src_addr,
1354 unsigned long src_end)
1356 /* This is unreachable anyway, just to avoid warnings when HPAGE_PMD_SIZE==0 */
1361 static inline bool vma_move_compatible(struct vm_area_struct *vma)
1363 return !(vma->vm_flags & (VM_PFNMAP | VM_IO | VM_HUGETLB |
1364 VM_MIXEDMAP | VM_SHADOW_STACK));
1367 static int validate_move_areas(struct userfaultfd_ctx *ctx,
1368 struct vm_area_struct *src_vma,
1369 struct vm_area_struct *dst_vma)
1371 /* Only allow moving if both have the same access and protection */
1372 if ((src_vma->vm_flags & VM_ACCESS_FLAGS) != (dst_vma->vm_flags & VM_ACCESS_FLAGS) ||
1373 pgprot_val(src_vma->vm_page_prot) != pgprot_val(dst_vma->vm_page_prot))
1376 /* Only allow moving if both are mlocked or both aren't */
1377 if ((src_vma->vm_flags & VM_LOCKED) != (dst_vma->vm_flags & VM_LOCKED))
1381 * For now, we keep it simple and only move between writable VMAs.
1382 * Access flags are equal, therefore cheching only the source is enough.
1384 if (!(src_vma->vm_flags & VM_WRITE))
1387 /* Check if vma flags indicate content which can be moved */
1388 if (!vma_move_compatible(src_vma) || !vma_move_compatible(dst_vma))
1391 /* Ensure dst_vma is registered in uffd we are operating on */
1392 if (!dst_vma->vm_userfaultfd_ctx.ctx ||
1393 dst_vma->vm_userfaultfd_ctx.ctx != ctx)
1396 /* Only allow moving across anonymous vmas */
1397 if (!vma_is_anonymous(src_vma) || !vma_is_anonymous(dst_vma))
1403 static __always_inline
1404 int find_vmas_mm_locked(struct mm_struct *mm,
1405 unsigned long dst_start,
1406 unsigned long src_start,
1407 struct vm_area_struct **dst_vmap,
1408 struct vm_area_struct **src_vmap)
1410 struct vm_area_struct *vma;
1412 mmap_assert_locked(mm);
1413 vma = find_vma_and_prepare_anon(mm, dst_start);
1415 return PTR_ERR(vma);
1418 /* Skip finding src_vma if src_start is in dst_vma */
1419 if (src_start >= vma->vm_start && src_start < vma->vm_end)
1422 vma = vma_lookup(mm, src_start);
1430 #ifdef CONFIG_PER_VMA_LOCK
1431 static int uffd_move_lock(struct mm_struct *mm,
1432 unsigned long dst_start,
1433 unsigned long src_start,
1434 struct vm_area_struct **dst_vmap,
1435 struct vm_area_struct **src_vmap)
1437 struct vm_area_struct *vma;
1440 vma = lock_vma(mm, dst_start);
1442 return PTR_ERR(vma);
1446 * Skip finding src_vma if src_start is in dst_vma. This also ensures
1447 * that we don't lock the same vma twice.
1449 if (src_start >= vma->vm_start && src_start < vma->vm_end) {
1455 * Using lock_vma() to get src_vma can lead to following deadlock:
1459 * vma_start_read(dst_vma)
1460 * mmap_write_lock(mm)
1461 * vma_start_write(src_vma)
1462 * vma_start_read(src_vma)
1463 * mmap_read_lock(mm)
1464 * vma_start_write(dst_vma)
1466 *src_vmap = lock_vma_under_rcu(mm, src_start);
1467 if (likely(*src_vmap))
1470 /* Undo any locking and retry in mmap_lock critical section */
1471 vma_end_read(*dst_vmap);
1474 err = find_vmas_mm_locked(mm, dst_start, src_start, dst_vmap, src_vmap);
1477 * See comment in lock_vma() as to why not using
1478 * vma_start_read() here.
1480 down_read(&(*dst_vmap)->vm_lock->lock);
1481 if (*dst_vmap != *src_vmap)
1482 down_read_nested(&(*src_vmap)->vm_lock->lock,
1483 SINGLE_DEPTH_NESTING);
1485 mmap_read_unlock(mm);
1489 static void uffd_move_unlock(struct vm_area_struct *dst_vma,
1490 struct vm_area_struct *src_vma)
1492 vma_end_read(src_vma);
1493 if (src_vma != dst_vma)
1494 vma_end_read(dst_vma);
1499 static int uffd_move_lock(struct mm_struct *mm,
1500 unsigned long dst_start,
1501 unsigned long src_start,
1502 struct vm_area_struct **dst_vmap,
1503 struct vm_area_struct **src_vmap)
1508 err = find_vmas_mm_locked(mm, dst_start, src_start, dst_vmap, src_vmap);
1510 mmap_read_unlock(mm);
1514 static void uffd_move_unlock(struct vm_area_struct *dst_vma,
1515 struct vm_area_struct *src_vma)
1517 mmap_assert_locked(src_vma->vm_mm);
1518 mmap_read_unlock(dst_vma->vm_mm);
1523 * move_pages - move arbitrary anonymous pages of an existing vma
1524 * @ctx: pointer to the userfaultfd context
1525 * @dst_start: start of the destination virtual memory range
1526 * @src_start: start of the source virtual memory range
1527 * @len: length of the virtual memory range
1528 * @mode: flags from uffdio_move.mode
1530 * It will either use the mmap_lock in read mode or per-vma locks
1532 * move_pages() remaps arbitrary anonymous pages atomically in zero
1533 * copy. It only works on non shared anonymous pages because those can
1534 * be relocated without generating non linear anon_vmas in the rmap
1537 * It provides a zero copy mechanism to handle userspace page faults.
1538 * The source vma pages should have mapcount == 1, which can be
1539 * enforced by using madvise(MADV_DONTFORK) on src vma.
1541 * The thread receiving the page during the userland page fault
1542 * will receive the faulting page in the source vma through the network,
1543 * storage or any other I/O device (MADV_DONTFORK in the source vma
1544 * avoids move_pages() to fail with -EBUSY if the process forks before
1545 * move_pages() is called), then it will call move_pages() to map the
1546 * page in the faulting address in the destination vma.
1548 * This userfaultfd command works purely via pagetables, so it's the
1549 * most efficient way to move physical non shared anonymous pages
1550 * across different virtual addresses. Unlike mremap()/mmap()/munmap()
1551 * it does not create any new vmas. The mapping in the destination
1552 * address is atomic.
1554 * It only works if the vma protection bits are identical from the
1555 * source and destination vma.
1557 * It can remap non shared anonymous pages within the same vma too.
1559 * If the source virtual memory range has any unmapped holes, or if
1560 * the destination virtual memory range is not a whole unmapped hole,
1561 * move_pages() will fail respectively with -ENOENT or -EEXIST. This
1562 * provides a very strict behavior to avoid any chance of memory
1563 * corruption going unnoticed if there are userland race conditions.
1564 * Only one thread should resolve the userland page fault at any given
1565 * time for any given faulting address. This means that if two threads
1566 * try to both call move_pages() on the same destination address at the
1567 * same time, the second thread will get an explicit error from this
1570 * The command retval will return "len" is successful. The command
1571 * however can be interrupted by fatal signals or errors. If
1572 * interrupted it will return the number of bytes successfully
1573 * remapped before the interruption if any, or the negative error if
1574 * none. It will never return zero. Either it will return an error or
1575 * an amount of bytes successfully moved. If the retval reports a
1576 * "short" remap, the move_pages() command should be repeated by
1577 * userland with src+retval, dst+reval, len-retval if it wants to know
1578 * about the error that interrupted it.
1580 * The UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES flag can be specified to
1581 * prevent -ENOENT errors to materialize if there are holes in the
1582 * source virtual range that is being remapped. The holes will be
1583 * accounted as successfully remapped in the retval of the
1584 * command. This is mostly useful to remap hugepage naturally aligned
1585 * virtual regions without knowing if there are transparent hugepage
1586 * in the regions or not, but preventing the risk of having to split
1587 * the hugepmd during the remap.
1589 * If there's any rmap walk that is taking the anon_vma locks without
1590 * first obtaining the folio lock (the only current instance is
1591 * folio_referenced), they will have to verify if the folio->mapping
1592 * has changed after taking the anon_vma lock. If it changed they
1593 * should release the lock and retry obtaining a new anon_vma, because
1594 * it means the anon_vma was changed by move_pages() before the lock
1595 * could be obtained. This is the only additional complexity added to
1596 * the rmap code to provide this anonymous page remapping functionality.
1598 ssize_t move_pages(struct userfaultfd_ctx *ctx, unsigned long dst_start,
1599 unsigned long src_start, unsigned long len, __u64 mode)
1601 struct mm_struct *mm = ctx->mm;
1602 struct vm_area_struct *src_vma, *dst_vma;
1603 unsigned long src_addr, dst_addr;
1604 pmd_t *src_pmd, *dst_pmd;
1608 /* Sanitize the command parameters. */
1609 if (WARN_ON_ONCE(src_start & ~PAGE_MASK) ||
1610 WARN_ON_ONCE(dst_start & ~PAGE_MASK) ||
1611 WARN_ON_ONCE(len & ~PAGE_MASK))
1614 /* Does the address range wrap, or is the span zero-sized? */
1615 if (WARN_ON_ONCE(src_start + len <= src_start) ||
1616 WARN_ON_ONCE(dst_start + len <= dst_start))
1619 err = uffd_move_lock(mm, dst_start, src_start, &dst_vma, &src_vma);
1623 /* Re-check after taking map_changing_lock */
1625 down_read(&ctx->map_changing_lock);
1626 if (likely(atomic_read(&ctx->mmap_changing)))
1629 * Make sure the vma is not shared, that the src and dst remap
1630 * ranges are both valid and fully within a single existing
1634 if (src_vma->vm_flags & VM_SHARED)
1636 if (src_start + len > src_vma->vm_end)
1639 if (dst_vma->vm_flags & VM_SHARED)
1641 if (dst_start + len > dst_vma->vm_end)
1644 err = validate_move_areas(ctx, src_vma, dst_vma);
1648 for (src_addr = src_start, dst_addr = dst_start;
1649 src_addr < src_start + len;) {
1652 unsigned long step_size;
1655 * Below works because anonymous area would not have a
1656 * transparent huge PUD. If file-backed support is added,
1657 * that case would need to be handled here.
1659 src_pmd = mm_find_pmd(mm, src_addr);
1660 if (unlikely(!src_pmd)) {
1661 if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES)) {
1665 src_pmd = mm_alloc_pmd(mm, src_addr);
1666 if (unlikely(!src_pmd)) {
1671 dst_pmd = mm_alloc_pmd(mm, dst_addr);
1672 if (unlikely(!dst_pmd)) {
1677 dst_pmdval = pmdp_get_lockless(dst_pmd);
1679 * If the dst_pmd is mapped as THP don't override it and just
1680 * be strict. If dst_pmd changes into TPH after this check, the
1681 * move_pages_huge_pmd() will detect the change and retry
1682 * while move_pages_pte() will detect the change and fail.
1684 if (unlikely(pmd_trans_huge(dst_pmdval))) {
1689 ptl = pmd_trans_huge_lock(src_pmd, src_vma);
1691 if (pmd_devmap(*src_pmd)) {
1697 /* Check if we can move the pmd without splitting it. */
1698 if (move_splits_huge_pmd(dst_addr, src_addr, src_start + len) ||
1699 !pmd_none(dst_pmdval)) {
1700 struct folio *folio = pfn_folio(pmd_pfn(*src_pmd));
1702 if (!folio || (!is_huge_zero_page(&folio->page) &&
1703 !PageAnonExclusive(&folio->page))) {
1710 split_huge_pmd(src_vma, src_pmd, src_addr);
1711 /* The folio will be split by move_pages_pte() */
1715 err = move_pages_huge_pmd(mm, dst_pmd, src_pmd,
1716 dst_pmdval, dst_vma, src_vma,
1717 dst_addr, src_addr);
1718 step_size = HPAGE_PMD_SIZE;
1720 if (pmd_none(*src_pmd)) {
1721 if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES)) {
1725 if (unlikely(__pte_alloc(mm, src_pmd))) {
1731 if (unlikely(pte_alloc(mm, dst_pmd))) {
1736 err = move_pages_pte(mm, dst_pmd, src_pmd,
1738 dst_addr, src_addr, mode);
1739 step_size = PAGE_SIZE;
1744 if (fatal_signal_pending(current)) {
1745 /* Do not override an error */
1746 if (!err || err == -EAGAIN)
1757 /* Proceed to the next page */
1758 dst_addr += step_size;
1759 src_addr += step_size;
1764 up_read(&ctx->map_changing_lock);
1765 uffd_move_unlock(dst_vma, src_vma);
1767 VM_WARN_ON(moved < 0);
1768 VM_WARN_ON(err > 0);
1769 VM_WARN_ON(!moved && !err);
1770 return moved ? moved : err;