mm/truncate.c: fix THP handling in invalidate_mapping_pages()
[sfrench/cifs-2.6.git] / mm / truncate.c
index 83a059e8cd1de2c950dfc4ad790fcf1140df34b6..2330223841fbbdf40c4e50764a11a557d9c7b426 100644 (file)
@@ -67,17 +67,14 @@ static void truncate_exceptional_entry(struct address_space *mapping,
 
 /*
  * Invalidate exceptional entry if easily possible. This handles exceptional
- * entries for invalidate_inode_pages() so for DAX it evicts only unlocked and
- * clean entries.
+ * entries for invalidate_inode_pages().
  */
 static int invalidate_exceptional_entry(struct address_space *mapping,
                                        pgoff_t index, void *entry)
 {
-       /* Handled by shmem itself */
-       if (shmem_mapping(mapping))
+       /* Handled by shmem itself, or for DAX we do nothing. */
+       if (shmem_mapping(mapping) || dax_mapping(mapping))
                return 1;
-       if (dax_mapping(mapping))
-               return dax_invalidate_mapping_entry(mapping, index);
        clear_shadow_entry(mapping, index, entry);
        return 1;
 }
@@ -533,9 +530,15 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
                        } else if (PageTransHuge(page)) {
                                index += HPAGE_PMD_NR - 1;
                                i += HPAGE_PMD_NR - 1;
-                               /* 'end' is in the middle of THP */
-                               if (index ==  round_down(end, HPAGE_PMD_NR))
+                               /*
+                                * 'end' is in the middle of THP. Don't
+                                * invalidate the page as the part outside of
+                                * 'end' could be still useful.
+                                */
+                               if (index > end) {
+                                       unlock_page(page);
                                        continue;
+                               }
                        }
 
                        ret = invalidate_inode_page(page);
@@ -689,7 +692,17 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
                cond_resched();
                index++;
        }
-
+       /*
+        * For DAX we invalidate page tables after invalidating radix tree.  We
+        * could invalidate page tables while invalidating each entry however
+        * that would be expensive. And doing range unmapping before doesn't
+        * work as we have no cheap way to find whether radix tree entry didn't
+        * get remapped later.
+        */
+       if (dax_mapping(mapping)) {
+               unmap_mapping_range(mapping, (loff_t)start << PAGE_SHIFT,
+                                   (loff_t)(end - start + 1) << PAGE_SHIFT, 0);
+       }
 out:
        cleancache_invalidate_inode(mapping);
        return ret;