mm: fix data corruption due to stale mmap reads
[sfrench/cifs-2.6.git] / mm / truncate.c
index 83a059e8cd1de2c950dfc4ad790fcf1140df34b6..6479ed2afc53fb9dd8d9719051ea77e7a5b200af 100644 (file)
@@ -67,17 +67,14 @@ static void truncate_exceptional_entry(struct address_space *mapping,
 
 /*
  * Invalidate exceptional entry if easily possible. This handles exceptional
- * entries for invalidate_inode_pages() so for DAX it evicts only unlocked and
- * clean entries.
+ * entries for invalidate_inode_pages().
  */
 static int invalidate_exceptional_entry(struct address_space *mapping,
                                        pgoff_t index, void *entry)
 {
-       /* Handled by shmem itself */
-       if (shmem_mapping(mapping))
+       /* Handled by shmem itself, or for DAX we do nothing. */
+       if (shmem_mapping(mapping) || dax_mapping(mapping))
                return 1;
-       if (dax_mapping(mapping))
-               return dax_invalidate_mapping_entry(mapping, index);
        clear_shadow_entry(mapping, index, entry);
        return 1;
 }
@@ -689,7 +686,17 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
                cond_resched();
                index++;
        }
-
+       /*
+        * For DAX we invalidate page tables after invalidating radix tree.  We
+        * could invalidate page tables while invalidating each entry however
+        * that would be expensive. And doing range unmapping before doesn't
+        * work as we have no cheap way to find whether radix tree entry didn't
+        * get remapped later.
+        */
+       if (dax_mapping(mapping)) {
+               unmap_mapping_range(mapping, (loff_t)start << PAGE_SHIFT,
+                                   (loff_t)(end - start + 1) << PAGE_SHIFT, 0);
+       }
 out:
        cleancache_invalidate_inode(mapping);
        return ret;