mm: Convert truncate to XArray
authorMatthew Wilcox <willy@infradead.org>
Sun, 26 Nov 2017 03:52:46 +0000 (22:52 -0500)
committerMatthew Wilcox <willy@infradead.org>
Sun, 21 Oct 2018 14:46:37 +0000 (10:46 -0400)
This is essentially xa_cmpxchg() with the locking handled above us,
and it doesn't have to handle replacing a NULL entry.

Signed-off-by: Matthew Wilcox <willy@infradead.org>
mm/truncate.c

index ed778555c9f36ed0ce0e05b5c4f1e057ed986a53..45d68e90b7037669fce5b50849c4d5964451c2d4 100644 (file)
 static inline void __clear_shadow_entry(struct address_space *mapping,
                                pgoff_t index, void *entry)
 {
-       struct radix_tree_node *node;
-       void **slot;
+       XA_STATE(xas, &mapping->i_pages, index);
 
-       if (!__radix_tree_lookup(&mapping->i_pages, index, &node, &slot))
+       xas_set_update(&xas, workingset_update_node);
+       if (xas_load(&xas) != entry)
                return;
-       if (*slot != entry)
-               return;
-       __radix_tree_replace(&mapping->i_pages, node, slot, NULL,
-                            workingset_update_node);
+       xas_store(&xas, NULL);
        mapping->nrexceptional--;
 }
 
@@ -738,10 +735,10 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
                index++;
        }
        /*
-        * For DAX we invalidate page tables after invalidating radix tree.  We
+        * For DAX we invalidate page tables after invalidating page cache.  We
         * could invalidate page tables while invalidating each entry however
         * that would be expensive. And doing range unmapping before doesn't
-        * work as we have no cheap way to find whether radix tree entry didn't
+        * work as we have no cheap way to find whether page cache entry didn't
         * get remapped later.
         */
        if (dax_mapping(mapping)) {