Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/drzeus/mmc
[sfrench/cifs-2.6.git] / mm / slub.c
index 9b2d6178d06ce7259941c6aafa7b20cf81228e40..04151da399c6ce8c344409c2ce7fbf289ac645e5 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -211,7 +211,8 @@ static inline void ClearSlabDebug(struct page *page)
 #define MAX_OBJECTS_PER_SLAB 65535
 
 /* Internal SLUB flags */
-#define __OBJECT_POISON 0x80000000     /* Poison object */
+#define __OBJECT_POISON                0x80000000 /* Poison object */
+#define __SYSFS_ADD_DEFERRED   0x40000000 /* Not yet visible via sysfs */
 
 /* Not all arches define cache_line_size */
 #ifndef cache_line_size
@@ -1131,6 +1132,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
                slab_pad_check(s, page);
                for_each_object(p, s, page_address(page))
                        check_object(s, page, p, 0);
+               ClearSlabDebug(page);
        }
 
        mod_zone_page_state(page_zone(page),
@@ -1169,7 +1171,6 @@ static void discard_slab(struct kmem_cache *s, struct page *page)
 
        atomic_long_dec(&n->nr_slabs);
        reset_page_mapcount(page);
-       ClearSlabDebug(page);
        __ClearPageSlab(page);
        free_slab(s, page);
 }
@@ -1656,6 +1657,7 @@ static void __always_inline slab_free(struct kmem_cache *s,
        unsigned long flags;
 
        local_irq_save(flags);
+       debug_check_no_locks_freed(object, s->objsize);
        if (likely(page == s->cpu_slab[smp_processor_id()] &&
                                                !SlabDebug(page))) {
                object[page->offset] = page->lockless_freelist;
@@ -1875,9 +1877,16 @@ static struct kmem_cache_node * __init early_kmem_cache_node_alloc(gfp_t gfpflag
 
        BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node));
 
-       page = new_slab(kmalloc_caches, gfpflags | GFP_THISNODE, node);
+       page = new_slab(kmalloc_caches, gfpflags, node);
 
        BUG_ON(!page);
+       if (page_to_nid(page) != node) {
+               printk(KERN_ERR "SLUB: Unable to allocate memory from "
+                               "node %d\n", node);
+               printk(KERN_ERR "SLUB: Allocating a useless per node structure "
+                               "in order to be able to continue\n");
+       }
+
        n = page->freelist;
        BUG_ON(!n);
        page->freelist = get_freepointer(kmalloc_caches, n);
@@ -2276,10 +2285,26 @@ panic:
 }
 
 #ifdef CONFIG_ZONE_DMA
+
+static void sysfs_add_func(struct work_struct *w)
+{
+       struct kmem_cache *s;
+
+       down_write(&slub_lock);
+       list_for_each_entry(s, &slab_caches, list) {
+               if (s->flags & __SYSFS_ADD_DEFERRED) {
+                       s->flags &= ~__SYSFS_ADD_DEFERRED;
+                       sysfs_slab_add(s);
+               }
+       }
+       up_write(&slub_lock);
+}
+
+static DECLARE_WORK(sysfs_add_work, sysfs_add_func);
+
 static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
 {
        struct kmem_cache *s;
-       struct kmem_cache *x;
        char *text;
        size_t realsize;
 
@@ -2288,22 +2313,36 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
                return s;
 
        /* Dynamically create dma cache */
-       x = kmalloc(kmem_size, flags & ~SLUB_DMA);
-       if (!x)
-               panic("Unable to allocate memory for dma cache\n");
+       if (flags & __GFP_WAIT)
+               down_write(&slub_lock);
+       else {
+               if (!down_write_trylock(&slub_lock))
+                       goto out;
+       }
+
+       if (kmalloc_caches_dma[index])
+               goto unlock_out;
 
        realsize = kmalloc_caches[index].objsize;
-       text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
-                       (unsigned int)realsize);
-       s = create_kmalloc_cache(x, text, realsize, flags);
-       down_write(&slub_lock);
-       if (!kmalloc_caches_dma[index]) {
-               kmalloc_caches_dma[index] = s;
-               up_write(&slub_lock);
-               return s;
+       text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d", (unsigned int)realsize),
+       s = kmalloc(kmem_size, flags & ~SLUB_DMA);
+
+       if (!s || !text || !kmem_cache_open(s, flags, text,
+                       realsize, ARCH_KMALLOC_MINALIGN,
+                       SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) {
+               kfree(s);
+               kfree(text);
+               goto unlock_out;
        }
+
+       list_add(&s->list, &slab_caches);
+       kmalloc_caches_dma[index] = s;
+
+       schedule_work(&sysfs_add_work);
+
+unlock_out:
        up_write(&slub_lock);
-       kmem_cache_destroy(s);
+out:
        return kmalloc_caches_dma[index];
 }
 #endif
@@ -2499,15 +2538,11 @@ int kmem_cache_shrink(struct kmem_cache *s)
                                slab_unlock(page);
                                discard_slab(s, page);
                        } else {
-                               if (n->nr_partial > MAX_PARTIAL)
-                                       list_move(&page->lru,
-                                       slabs_by_inuse + page->inuse);
+                               list_move(&page->lru,
+                               slabs_by_inuse + page->inuse);
                        }
                }
 
-               if (n->nr_partial <= MAX_PARTIAL)
-                       goto out;
-
                /*
                 * Rebuild the partial list with the slabs filled up most
                 * first and the least used slabs at the end.
@@ -2515,7 +2550,6 @@ int kmem_cache_shrink(struct kmem_cache *s)
                for (i = s->objects - 1; i >= 0; i--)
                        list_splice(slabs_by_inuse + i, n->partial.prev);
 
-       out:
                spin_unlock_irqrestore(&n->list_lock, flags);
        }
 
@@ -3085,7 +3119,7 @@ static int list_locations(struct kmem_cache *s, char *buf,
                unsigned long flags;
                struct page *page;
 
-               if (!atomic_read(&n->nr_slabs))
+               if (!atomic_long_read(&n->nr_slabs))
                        continue;
 
                spin_lock_irqsave(&n->list_lock, flags);
@@ -3220,7 +3254,7 @@ static unsigned long slab_objects(struct kmem_cache *s,
                }
 
                if (flags & SO_FULL) {
-                       int full_slabs = atomic_read(&n->nr_slabs)
+                       int full_slabs = atomic_long_read(&n->nr_slabs)
                                        - per_cpu[node]
                                        - n->nr_partial;
 
@@ -3256,7 +3290,7 @@ static int any_slab_objects(struct kmem_cache *s)
        for_each_node(node) {
                struct kmem_cache_node *n = get_node(s, node);
 
-               if (n->nr_partial || atomic_read(&n->nr_slabs))
+               if (n->nr_partial || atomic_long_read(&n->nr_slabs))
                        return 1;
        }
        return 0;