Add virt_to_head_page and consolidate code in slab and slub
authorChristoph Lameter <clameter@sgi.com>
Sun, 6 May 2007 21:49:41 +0000 (14:49 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Mon, 7 May 2007 19:12:54 +0000 (12:12 -0700)
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/mm.h
mm/slab.c
mm/slub.c

index 695b90437bbc763bba0bb935793450d102f1dbf7..4670ebd1f6229222bbd6df7343362b14f661603e 100644 (file)
@@ -286,6 +286,12 @@ static inline void get_page(struct page *page)
        atomic_inc(&page->_count);
 }
 
+static inline struct page *virt_to_head_page(const void *x)
+{
+       struct page *page = virt_to_page(x);
+       return compound_head(page);
+}
+
 /*
  * Setup the page count before being freed into the page allocator for
  * the first time (boot or memory hotplug)
index f4b2e22b5c616bdb7a29e33d207fdf8fbe66a1f2..3e984afc199c9f2d86bc95ee596d7c4ca253dc62 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -614,20 +614,19 @@ static inline void page_set_slab(struct page *page, struct slab *slab)
 
 static inline struct slab *page_get_slab(struct page *page)
 {
-       page = compound_head(page);
        BUG_ON(!PageSlab(page));
        return (struct slab *)page->lru.prev;
 }
 
 static inline struct kmem_cache *virt_to_cache(const void *obj)
 {
-       struct page *page = virt_to_page(obj);
+       struct page *page = virt_to_head_page(obj);
        return page_get_cache(page);
 }
 
 static inline struct slab *virt_to_slab(const void *obj)
 {
-       struct page *page = virt_to_page(obj);
+       struct page *page = virt_to_head_page(obj);
        return page_get_slab(page);
 }
 
@@ -2876,7 +2875,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
 
        objp -= obj_offset(cachep);
        kfree_debugcheck(objp);
-       page = virt_to_page(objp);
+       page = virt_to_head_page(objp);
 
        slabp = page_get_slab(page);
 
@@ -3100,7 +3099,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
                struct slab *slabp;
                unsigned objnr;
 
-               slabp = page_get_slab(virt_to_page(objp));
+               slabp = page_get_slab(virt_to_head_page(objp));
                objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
                slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
        }
index 8fa1c6e937f5ccc475ee270115f2d7e1dd106e8b..347c11e80d8ec8c010a6ac3e9d0957c955ee037a 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1323,9 +1323,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
 {
        struct page * page;
 
-       page = virt_to_page(x);
-
-       page = compound_head(page);
+       page = virt_to_head_page(x);
 
        if (unlikely(PageError(page) && (s->flags & SLAB_STORE_USER)))
                set_tracking(s, x, TRACK_FREE);
@@ -1336,7 +1334,7 @@ EXPORT_SYMBOL(kmem_cache_free);
 /* Figure out on which slab object the object resides */
 static struct page *get_object_page(const void *x)
 {
-       struct page *page = compound_head(virt_to_page(x));
+       struct page *page = virt_to_head_page(x);
 
        if (!PageSlab(page))
                return NULL;
@@ -2076,7 +2074,7 @@ void kfree(const void *x)
        if (!x)
                return;
 
-       page = compound_head(virt_to_page(x));
+       page = virt_to_head_page(x);
 
        s = page->slab;
 
@@ -2112,7 +2110,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
                return NULL;
        }
 
-       page = compound_head(virt_to_page(p));
+       page = virt_to_head_page(p);
 
        new_cache = get_slab(new_size, flags);