Merge branch 'master' into mm-hotfixes-stable
authorAndrew Morton <akpm@linux-foundation.org>
Sun, 16 Oct 2022 23:06:53 +0000 (16:06 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 16 Oct 2022 23:06:53 +0000 (16:06 -0700)
fs/nilfs2/inode.c
fs/nilfs2/segment.c
include/linux/hugetlb.h
mm/damon/core.c
mm/gup.c
mm/hugetlb.c

index 67f63cfeade5c4867155c9b0f0939b9785d95c99..232dd7b6cca14ba973fce7351d28579801942db9 100644 (file)
@@ -328,6 +328,7 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
        struct inode *inode;
        struct nilfs_inode_info *ii;
        struct nilfs_root *root;
+       struct buffer_head *bh;
        int err = -ENOMEM;
        ino_t ino;
 
@@ -343,11 +344,25 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
        ii->i_state = BIT(NILFS_I_NEW);
        ii->i_root = root;
 
-       err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
+       err = nilfs_ifile_create_inode(root->ifile, &ino, &bh);
        if (unlikely(err))
                goto failed_ifile_create_inode;
        /* reference count of i_bh inherits from nilfs_mdt_read_block() */
 
+       if (unlikely(ino < NILFS_USER_INO)) {
+               nilfs_warn(sb,
+                          "inode bitmap is inconsistent for reserved inodes");
+               do {
+                       brelse(bh);
+                       err = nilfs_ifile_create_inode(root->ifile, &ino, &bh);
+                       if (unlikely(err))
+                               goto failed_ifile_create_inode;
+               } while (ino < NILFS_USER_INO);
+
+               nilfs_info(sb, "repaired inode bitmap for reserved inodes");
+       }
+       ii->i_bh = bh;
+
        atomic64_inc(&root->inodes_count);
        inode_init_owner(&init_user_ns, inode, dir, mode);
        inode->i_ino = ino;
@@ -440,6 +455,8 @@ int nilfs_read_inode_common(struct inode *inode,
        inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
        inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
        inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
+       if (nilfs_is_metadata_file_inode(inode) && !S_ISREG(inode->i_mode))
+               return -EIO; /* this inode is for metadata and corrupted */
        if (inode->i_nlink == 0)
                return -ESTALE; /* this inode is deleted */
 
index 0afe0832c7547174e17b4efa6b39b51d0c131ee6..5276ab525f010c2a29f0a290a9b201c904fd9ced 100644 (file)
@@ -2786,10 +2786,9 @@ int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
        inode_attach_wb(nilfs->ns_bdev->bd_inode, NULL);
 
        err = nilfs_segctor_start_thread(nilfs->ns_writer);
-       if (err) {
-               kfree(nilfs->ns_writer);
-               nilfs->ns_writer = NULL;
-       }
+       if (unlikely(err))
+               nilfs_detach_log_writer(sb);
+
        return err;
 }
 
index 3ec981a0d8b3a5b37f5cbd632f618ab139b36f48..67c88b82fc32d4acd3f3c78accf4c42737a0995a 100644 (file)
@@ -207,8 +207,8 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
 struct page *follow_huge_pd(struct vm_area_struct *vma,
                            unsigned long address, hugepd_t hpd,
                            int flags, int pdshift);
-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
-                               pmd_t *pmd, int flags);
+struct page *follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address,
+                                int flags);
 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
                                pud_t *pud, int flags);
 struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
@@ -312,8 +312,8 @@ static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
        return NULL;
 }
 
-static inline struct page *follow_huge_pmd(struct mm_struct *mm,
-                               unsigned long address, pmd_t *pmd, int flags)
+static inline struct page *follow_huge_pmd_pte(struct vm_area_struct *vma,
+                               unsigned long address, int flags)
 {
        return NULL;
 }
index 7d25dc582fe34427d2063793f5667e9fe3a0e423..4cbe7867b547c0783a3a8e1a1a29707dd5b0bf5e 100644 (file)
@@ -313,6 +313,7 @@ struct damon_target *damon_new_target(void)
        t->pid = NULL;
        t->nr_regions = 0;
        INIT_LIST_HEAD(&t->regions_list);
+       INIT_LIST_HEAD(&t->list);
 
        return t;
 }
index 00926abb44263b72b09872405bcaa93f344ae41b..251cb6a10bc0d67def35caff446a072284ebd811 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -530,6 +530,18 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
        if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
                         (FOLL_PIN | FOLL_GET)))
                return ERR_PTR(-EINVAL);
+
+       /*
+        * Considering PTE level hugetlb, like continuous-PTE hugetlb on
+        * ARM64 architecture.
+        */
+       if (is_vm_hugetlb_page(vma)) {
+               page = follow_huge_pmd_pte(vma, address, flags);
+               if (page)
+                       return page;
+               return no_page_table(vma, flags);
+       }
+
 retry:
        if (unlikely(pmd_bad(*pmd)))
                return no_page_table(vma, flags);
@@ -662,7 +674,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
        if (pmd_none(pmdval))
                return no_page_table(vma, flags);
        if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) {
-               page = follow_huge_pmd(mm, address, pmd, flags);
+               page = follow_huge_pmd_pte(vma, address, flags);
                if (page)
                        return page;
                return no_page_table(vma, flags);
index 0bdfc7e1c933f59c8084f9b4288201450c3d90cb..9564bf817e6a8d92999d2653304abf406a3f96d2 100644 (file)
@@ -6946,12 +6946,13 @@ follow_huge_pd(struct vm_area_struct *vma,
 }
 
 struct page * __weak
-follow_huge_pmd(struct mm_struct *mm, unsigned long address,
-               pmd_t *pmd, int flags)
+follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address, int flags)
 {
+       struct hstate *h = hstate_vma(vma);
+       struct mm_struct *mm = vma->vm_mm;
        struct page *page = NULL;
        spinlock_t *ptl;
-       pte_t pte;
+       pte_t *ptep, pte;
 
        /*
         * FOLL_PIN is not supported for follow_page(). Ordinary GUP goes via
@@ -6961,17 +6962,15 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
                return NULL;
 
 retry:
-       ptl = pmd_lockptr(mm, pmd);
-       spin_lock(ptl);
-       /*
-        * make sure that the address range covered by this pmd is not
-        * unmapped from other threads.
-        */
-       if (!pmd_huge(*pmd))
-               goto out;
-       pte = huge_ptep_get((pte_t *)pmd);
+       ptep = huge_pte_offset(mm, address, huge_page_size(h));
+       if (!ptep)
+               return NULL;
+
+       ptl = huge_pte_lock(h, mm, ptep);
+       pte = huge_ptep_get(ptep);
        if (pte_present(pte)) {
-               page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
+               page = pte_page(pte) +
+                       ((address & ~huge_page_mask(h)) >> PAGE_SHIFT);
                /*
                 * try_grab_page() should always succeed here, because: a) we
                 * hold the pmd (ptl) lock, and b) we've just checked that the
@@ -6987,7 +6986,7 @@ retry:
        } else {
                if (is_hugetlb_entry_migration(pte)) {
                        spin_unlock(ptl);
-                       __migration_entry_wait_huge((pte_t *)pmd, ptl);
+                       __migration_entry_wait_huge(ptep, ptl);
                        goto retry;
                }
                /*