*/
#include <linux/mm.h>
+#include <linux/swap.h> /* struct reclaim_state */
#include <linux/module.h>
#include <linux/bit_spinlock.h>
#include <linux/interrupt.h>
return atomic_long_read(&n->nr_slabs);
}
+static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
+{
+ return atomic_long_read(&n->nr_slabs);
+}
+
static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
{
struct kmem_cache_node *n = get_node(s, node);
static inline unsigned long slabs_node(struct kmem_cache *s, int node)
{ return 0; }
+static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
+ { return 0; }
static inline void inc_slabs_node(struct kmem_cache *s, int node,
int objects) {}
static inline void dec_slabs_node(struct kmem_cache *s, int node,
__ClearPageSlab(page);
reset_page_mapcount(page);
+ if (current->reclaim_state)
+ current->reclaim_state->reclaimed_slab += pages;
__free_pages(page, order);
}
return 1;
}
+static int count_free(struct page *page)
+{
+ return page->objects - page->inuse;
+}
+
+static unsigned long count_partial(struct kmem_cache_node *n,
+ int (*get_count)(struct page *))
+{
+ unsigned long flags;
+ unsigned long x = 0;
+ struct page *page;
+
+ spin_lock_irqsave(&n->list_lock, flags);
+ list_for_each_entry(page, &n->partial, lru)
+ x += get_count(page);
+ spin_unlock_irqrestore(&n->list_lock, flags);
+ return x;
+}
+
+static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
+{
+#ifdef CONFIG_SLUB_DEBUG
+ return atomic_long_read(&n->total_objects);
+#else
+ return 0;
+#endif
+}
+
+static noinline void
+slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
+{
+ int node;
+
+ printk(KERN_WARNING
+ "SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n",
+ nid, gfpflags);
+ printk(KERN_WARNING " cache: %s, object size: %d, buffer size: %d, "
+ "default order: %d, min order: %d\n", s->name, s->objsize,
+ s->size, oo_order(s->oo), oo_order(s->min));
+
+ for_each_online_node(node) {
+ struct kmem_cache_node *n = get_node(s, node);
+ unsigned long nr_slabs;
+ unsigned long nr_objs;
+ unsigned long nr_free;
+
+ if (!n)
+ continue;
+
+ nr_free = count_partial(n, count_free);
+ nr_slabs = node_nr_slabs(n);
+ nr_objs = node_nr_objs(n);
+
+ printk(KERN_WARNING
+ " node %d: slabs: %ld, objs: %ld, free: %ld\n",
+ node, nr_slabs, nr_objs, nr_free);
+ }
+}
+
/*
* Slow path. The lockless freelist is empty or we need to perform
* debugging duties.
c->page = new;
goto load_freelist;
}
+ if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
+ slab_out_of_memory(s, gfpflags, node);
return NULL;
debug:
if (!alloc_debug_processing(s, c->page, object, addr))
{
void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_);
- kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
- s->objsize, s->size, gfpflags);
+ trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags);
return ret;
}
{
void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
- kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
- s->objsize, s->size, gfpflags, node);
+ trace_kmem_cache_alloc_node(_RET_IP_, ret,
+ s->objsize, s->size, gfpflags, node);
return ret;
}
slab_free(s, page, x, _RET_IP_);
- kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, x);
+ trace_kmem_cache_free(_RET_IP_, x);
}
EXPORT_SYMBOL(kmem_cache_free);
* Doh this slab cannot be placed using slub_max_order.
*/
order = slab_order(size, 1, MAX_ORDER, 1);
- if (order <= MAX_ORDER)
+ if (order < MAX_ORDER)
return order;
return -ENOSYS;
}
static int __init setup_slub_max_order(char *str)
{
get_option(&str, &slub_max_order);
+ slub_max_order = min(slub_max_order, MAX_ORDER - 1);
return 1;
}
ret = slab_alloc(s, flags, -1, _RET_IP_);
- kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret,
- size, s->size, flags);
+ trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
return ret;
}
if (unlikely(size > SLUB_MAX_SIZE)) {
ret = kmalloc_large_node(size, flags, node);
- kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
- _RET_IP_, ret,
- size, PAGE_SIZE << get_order(size),
- flags, node);
+ trace_kmalloc_node(_RET_IP_, ret,
+ size, PAGE_SIZE << get_order(size),
+ flags, node);
return ret;
}
ret = slab_alloc(s, flags, node, _RET_IP_);
- kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret,
- size, s->size, flags, node);
+ trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
return ret;
}
struct page *page;
void *object = (void *)x;
+ trace_kfree(_RET_IP_, x);
+
if (unlikely(ZERO_OR_NULL_PTR(x)))
return;
return;
}
slab_free(page->slab, page, object, _RET_IP_);
-
- kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, x);
}
EXPORT_SYMBOL(kfree);
ret = slab_alloc(s, gfpflags, -1, caller);
/* Honor the call site pointer we recieved. */
- kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, caller, ret, size,
- s->size, gfpflags);
+ trace_kmalloc(caller, ret, size, s->size, gfpflags);
return ret;
}
ret = slab_alloc(s, gfpflags, node, caller);
/* Honor the call site pointer we recieved. */
- kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, caller, ret,
- size, s->size, gfpflags, node);
+ trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
return ret;
}
#ifdef CONFIG_SLUB_DEBUG
-static unsigned long count_partial(struct kmem_cache_node *n,
- int (*get_count)(struct page *))
-{
- unsigned long flags;
- unsigned long x = 0;
- struct page *page;
-
- spin_lock_irqsave(&n->list_lock, flags);
- list_for_each_entry(page, &n->partial, lru)
- x += get_count(page);
- spin_unlock_irqrestore(&n->list_lock, flags);
- return x;
-}
-
static int count_inuse(struct page *page)
{
return page->inuse;
return page->objects;
}
-static int count_free(struct page *page)
-{
- return page->objects - page->inuse;
-}
-
static int validate_slab(struct kmem_cache *s, struct page *page,
unsigned long *map)
{