userfaultfd: mcopy_atomic: introduce mfill_atomic_pte helper
authorMike Rapoport <rppt@linux.vnet.ibm.com>
Wed, 6 Sep 2017 23:23:06 +0000 (16:23 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 7 Sep 2017 00:27:28 +0000 (17:27 -0700)
Shuffle the code a bit to improve readability.

Link: http://lkml.kernel.org/r/1497939652-16528-5-git-send-email-rppt@linux.vnet.ibm.com
Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Pavel Emelyanov <xemul@virtuozzo.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/userfaultfd.c

index 8bcb501bce60b84f8bbc3c79cb2790bae2daa86a..48c015c80120bf969ef33ebdfc6f232fd7613224 100644 (file)
@@ -371,6 +371,34 @@ extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
                                      bool zeropage);
 #endif /* CONFIG_HUGETLB_PAGE */
 
+static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
+                                               pmd_t *dst_pmd,
+                                               struct vm_area_struct *dst_vma,
+                                               unsigned long dst_addr,
+                                               unsigned long src_addr,
+                                               struct page **page,
+                                               bool zeropage)
+{
+       ssize_t err;
+
+       if (vma_is_anonymous(dst_vma)) {
+               if (!zeropage)
+                       err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
+                                              dst_addr, src_addr, page);
+               else
+                       err = mfill_zeropage_pte(dst_mm, dst_pmd,
+                                                dst_vma, dst_addr);
+       } else {
+               err = -EINVAL; /* if zeropage is true return -EINVAL */
+               if (likely(!zeropage))
+                       err = shmem_mcopy_atomic_pte(dst_mm, dst_pmd,
+                                                    dst_vma, dst_addr,
+                                                    src_addr, page);
+       }
+
+       return err;
+}
+
 static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
                                              unsigned long dst_start,
                                              unsigned long src_start,
@@ -487,22 +515,8 @@ retry:
                BUG_ON(pmd_none(*dst_pmd));
                BUG_ON(pmd_trans_huge(*dst_pmd));
 
-               if (vma_is_anonymous(dst_vma)) {
-                       if (!zeropage)
-                               err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
-                                                      dst_addr, src_addr,
-                                                      &page);
-                       else
-                               err = mfill_zeropage_pte(dst_mm, dst_pmd,
-                                                        dst_vma, dst_addr);
-               } else {
-                       err = -EINVAL; /* if zeropage is true return -EINVAL */
-                       if (likely(!zeropage))
-                               err = shmem_mcopy_atomic_pte(dst_mm, dst_pmd,
-                                                            dst_vma, dst_addr,
-                                                            src_addr, &page);
-               }
-
+               err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
+                                      src_addr, &page, zeropage);
                cond_resched();
 
                if (unlikely(err == -EFAULT)) {