Merge branch 'linus' into tracing/kmemtrace
authorIngo Molnar <mingo@elte.hu>
Wed, 31 Dec 2008 07:14:29 +0000 (08:14 +0100)
committerIngo Molnar <mingo@elte.hu>
Wed, 31 Dec 2008 07:14:29 +0000 (08:14 +0100)
Conflicts:
mm/slub.c

19 files changed:
Documentation/ABI/testing/debugfs-kmemtrace [new file with mode: 0644]
Documentation/kernel-parameters.txt
Documentation/sysrq.txt
Documentation/vm/kmemtrace.txt [new file with mode: 0644]
MAINTAINERS
drivers/char/sysrq.c
include/linux/kmemtrace.h [new file with mode: 0644]
include/linux/slab_def.h
include/linux/slob_def.h
include/linux/slub_def.h
init/main.c
kernel/relay.c
kernel/trace/trace_functions_graph.c
lib/Kconfig.debug
mm/Makefile
mm/kmemtrace.c [new file with mode: 0644]
mm/slab.c
mm/slob.c
mm/slub.c

diff --git a/Documentation/ABI/testing/debugfs-kmemtrace b/Documentation/ABI/testing/debugfs-kmemtrace
new file mode 100644 (file)
index 0000000..5e6a92a
--- /dev/null
@@ -0,0 +1,71 @@
+What:          /sys/kernel/debug/kmemtrace/
+Date:          July 2008
+Contact:       Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
+Description:
+
+In kmemtrace-enabled kernels, the following files are created:
+
+/sys/kernel/debug/kmemtrace/
+       cpu<n>          (0400)  Per-CPU tracing data, see below. (binary)
+       total_overruns  (0400)  Total number of bytes which were dropped from
+                               cpu<n> files because of full buffer condition,
+                               non-binary. (text)
+       abi_version     (0400)  Kernel's kmemtrace ABI version. (text)
+
+Each per-CPU file should be read according to the relay interface. That is,
+the reader should set affinity to that specific CPU and, as currently done by
+the userspace application (though there are other methods), use poll() with
+an infinite timeout before every read(). Otherwise, erroneous data may be
+read. The binary data has the following _core_ format:
+
+       Event ID        (1 byte)        Unsigned integer, one of:
+               0 - represents an allocation (KMEMTRACE_EVENT_ALLOC)
+               1 - represents a freeing of previously allocated memory
+                   (KMEMTRACE_EVENT_FREE)
+       Type ID         (1 byte)        Unsigned integer, one of:
+               0 - this is a kmalloc() / kfree()
+               1 - this is a kmem_cache_alloc() / kmem_cache_free()
+               2 - this is a __get_free_pages() et al.
+       Event size      (2 bytes)       Unsigned integer representing the
+                                       size of this event. Used to extend
+                                       kmemtrace. Discard the bytes you
+                                       don't know about.
+       Sequence number (4 bytes)       Signed integer used to reorder data
+                                       logged on SMP machines. Wraparound
+                                       must be taken into account, although
+                                       it is unlikely.
+       Caller address  (8 bytes)       Return address to the caller.
+       Pointer to mem  (8 bytes)       Pointer to target memory area. Can be
+                                       NULL, but not all such calls might be
+                                       recorded.
+
+In case of KMEMTRACE_EVENT_ALLOC events, the next fields follow:
+
+       Requested bytes (8 bytes)       Total number of requested bytes,
+                                       unsigned, must not be zero.
+       Allocated bytes (8 bytes)       Total number of actually allocated
+                                       bytes, unsigned, must not be lower
+                                       than requested bytes.
+       Requested flags (4 bytes)       GFP flags supplied by the caller.
+       Target CPU      (4 bytes)       Signed integer, valid for event id 1.
+                                       If equal to -1, target CPU is the same
+                                       as origin CPU, but the reverse might
+                                       not be true.
+
+The data is made available in the same endianness the machine has.
+
+Other event ids and type ids may be defined and added. Other fields may be
+added by increasing event size, but see below for details.
+Every modification to the ABI, including new id definitions, are followed
+by bumping the ABI version by one.
+
+Adding new data to the packet (features) is done at the end of the mandatory
+data:
+       Feature size    (2 byte)
+       Feature ID      (1 byte)
+       Feature data    (Feature size - 3 bytes)
+
+
+Users:
+       kmemtrace-user - git://repo.or.cz/kmemtrace-user.git
+
index a2d8805c03d5588f317a9677add758c94051e6a6..af600c0fe0eccbc6e571535fcca99a64a24a23e9 100644 (file)
@@ -49,6 +49,7 @@ parameter is applicable:
        ISAPNP  ISA PnP code is enabled.
        ISDN    Appropriate ISDN support is enabled.
        JOY     Appropriate joystick support is enabled.
+       KMEMTRACE kmemtrace is enabled.
        LIBATA  Libata driver is enabled
        LP      Printer support is enabled.
        LOOP    Loopback device support is enabled.
@@ -1033,6 +1034,15 @@ and is between 256 and 4096 characters. It is defined in the file
                        use the HighMem zone if it exists, and the Normal
                        zone if it does not.
 
+       kmemtrace.enable=       [KNL,KMEMTRACE] Format: { yes | no }
+                               Controls whether kmemtrace is enabled
+                               at boot-time.
+
+       kmemtrace.subbufs=n     [KNL,KMEMTRACE] Overrides the number of
+                       subbufs kmemtrace's relay channel has. Set this
+                       higher than default (KMEMTRACE_N_SUBBUFS in code) if
+                       you experience buffer overruns.
+
        movablecore=nn[KMG]     [KNL,X86-32,IA-64,PPC,X86-64] This parameter
                        is similar to kernelcore except it specifies the
                        amount of memory used for migratable allocations.
index 10a0263ebb3f01e832c7827cc75d7fe54b341a6f..56b53e005d189c0fe967beed267bd00793f79d0a 100644 (file)
@@ -114,6 +114,8 @@ On all -  write a character to /proc/sysrq-trigger.  e.g.:
 
 'x'    - Used by xmon interface on ppc/powerpc platforms.
 
+'z'    - Dump the ftrace buffer
+
 '0'-'9' - Sets the console log level, controlling which kernel messages
           will be printed to your console. ('0', for example would make
           it so that only emergency messages like PANICs or OOPSes would
diff --git a/Documentation/vm/kmemtrace.txt b/Documentation/vm/kmemtrace.txt
new file mode 100644 (file)
index 0000000..a956d9b
--- /dev/null
@@ -0,0 +1,126 @@
+                       kmemtrace - Kernel Memory Tracer
+
+                         by Eduard - Gabriel Munteanu
+                            <eduard.munteanu@linux360.ro>
+
+I. Introduction
+===============
+
+kmemtrace helps kernel developers figure out two things:
+1) how different allocators (SLAB, SLUB etc.) perform
+2) how kernel code allocates memory and how much
+
+To do this, we trace every allocation and export information to the userspace
+through the relay interface. We export things such as the number of requested
+bytes, the number of bytes actually allocated (i.e. including internal
+fragmentation), whether this is a slab allocation or a plain kmalloc() and so
+on.
+
+The actual analysis is performed by a userspace tool (see section III for
+details on where to get it from). It logs the data exported by the kernel,
+processes it and (as of writing this) can provide the following information:
+- the total amount of memory allocated and fragmentation per call-site
+- the amount of memory allocated and fragmentation per allocation
+- total memory allocated and fragmentation in the collected dataset
+- number of cross-CPU allocation and frees (makes sense in NUMA environments)
+
+Moreover, it can potentially find inconsistent and erroneous behavior in
+kernel code, such as using slab free functions on kmalloc'ed memory or
+allocating less memory than requested (but not truly failed allocations).
+
+kmemtrace also makes provisions for tracing on some arch and analysing the
+data on another.
+
+II. Design and goals
+====================
+
+kmemtrace was designed to handle rather large amounts of data. Thus, it uses
+the relay interface to export whatever is logged to userspace, which then
+stores it. Analysis and reporting is done asynchronously, that is, after the
+data is collected and stored. By design, it allows one to log and analyse
+on different machines and different arches.
+
+As of writing this, the ABI is not considered stable, though it might not
+change much. However, no guarantees are made about compatibility yet. When
+deemed stable, the ABI should still allow easy extension while maintaining
+backward compatibility. This is described further in Documentation/ABI.
+
+Summary of design goals:
+       - allow logging and analysis to be done across different machines
+       - be fast and anticipate usage in high-load environments (*)
+       - be reasonably extensible
+       - make it possible for GNU/Linux distributions to have kmemtrace
+       included in their repositories
+
+(*) - one of the reasons Pekka Enberg's original userspace data analysis
+    tool's code was rewritten from Perl to C (although this is more than a
+    simple conversion)
+
+
+III. Quick usage guide
+======================
+
+1) Get a kernel that supports kmemtrace and build it accordingly (i.e. enable
+CONFIG_KMEMTRACE).
+
+2) Get the userspace tool and build it:
+$ git-clone git://repo.or.cz/kmemtrace-user.git                # current repository
+$ cd kmemtrace-user/
+$ ./autogen.sh
+$ ./configure
+$ make
+
+3) Boot the kmemtrace-enabled kernel if you haven't, preferably in the
+'single' runlevel (so that relay buffers don't fill up easily), and run
+kmemtrace:
+# '$' does not mean user, but root here.
+$ mount -t debugfs none /sys/kernel/debug
+$ mount -t proc none /proc
+$ cd path/to/kmemtrace-user/
+$ ./kmemtraced
+Wait a bit, then stop it with CTRL+C.
+$ cat /sys/kernel/debug/kmemtrace/total_overruns       # Check if we didn't
+                                                       # overrun, should
+                                                       # be zero.
+$ (Optionally) [Run kmemtrace_check separately on each cpu[0-9]*.out file to
+               check its correctness]
+$ ./kmemtrace-report
+
+Now you should have a nice and short summary of how the allocator performs.
+
+IV. FAQ and known issues
+========================
+
+Q: 'cat /sys/kernel/debug/kmemtrace/total_overruns' is non-zero, how do I fix
+this? Should I worry?
+A: If it's non-zero, this affects kmemtrace's accuracy, depending on how
+large the number is. You can fix it by supplying a higher
+'kmemtrace.subbufs=N' kernel parameter.
+---
+
+Q: kmemtrace_check reports errors, how do I fix this? Should I worry?
+A: This is a bug and should be reported. It can occur for a variety of
+reasons:
+       - possible bugs in relay code
+       - possible misuse of relay by kmemtrace
+       - timestamps being collected unorderly
+Or you may fix it yourself and send us a patch.
+---
+
+Q: kmemtrace_report shows many errors, how do I fix this? Should I worry?
+A: This is a known issue and I'm working on it. These might be true errors
+in kernel code, which may have inconsistent behavior (e.g. allocating memory
+with kmem_cache_alloc() and freeing it with kfree()). Pekka Enberg pointed
+out this behavior may work with SLAB, but may fail with other allocators.
+
+It may also be due to lack of tracing in some unusual allocator functions.
+
+We don't want bug reports regarding this issue yet.
+---
+
+V. See also
+===========
+
+Documentation/kernel-parameters.txt
+Documentation/ABI/testing/debugfs-kmemtrace
+
index ceb32ee51f9de33077fcf95a3ff1845465b2a130..72d6eed3f204f8fafb4d94b2916634b54ec32c0e 100644 (file)
@@ -2573,6 +2573,12 @@ M:       jason.wessel@windriver.com
 L:     kgdb-bugreport@lists.sourceforge.net
 S:     Maintained
 
+KMEMTRACE
+P:     Eduard - Gabriel Munteanu
+M:     eduard.munteanu@linux360.ro
+L:     linux-kernel@vger.kernel.org
+S:     Maintained
+
 KPROBES
 P:     Ananth N Mavinakayanahalli
 M:     ananth@in.ibm.com
index 94966edfb44dedc340d4d81f230c43a0aec9b7bf..785a08ef9a13c77b53549a60598109ed242fc9ea 100644 (file)
@@ -283,7 +283,7 @@ static void sysrq_ftrace_dump(int key, struct tty_struct *tty)
 }
 static struct sysrq_key_op sysrq_ftrace_dump_op = {
        .handler        = sysrq_ftrace_dump,
-       .help_msg       = "dumpZ-ftrace-buffer",
+       .help_msg       = "dump-ftrace-buffer(Z)",
        .action_msg     = "Dump ftrace buffer",
        .enable_mask    = SYSRQ_ENABLE_DUMP,
 };
diff --git a/include/linux/kmemtrace.h b/include/linux/kmemtrace.h
new file mode 100644 (file)
index 0000000..5bea8ea
--- /dev/null
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2008 Eduard - Gabriel Munteanu
+ *
+ * This file is released under GPL version 2.
+ */
+
+#ifndef _LINUX_KMEMTRACE_H
+#define _LINUX_KMEMTRACE_H
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <linux/marker.h>
+
+enum kmemtrace_type_id {
+       KMEMTRACE_TYPE_KMALLOC = 0,     /* kmalloc() or kfree(). */
+       KMEMTRACE_TYPE_CACHE,           /* kmem_cache_*(). */
+       KMEMTRACE_TYPE_PAGES,           /* __get_free_pages() and friends. */
+};
+
+#ifdef CONFIG_KMEMTRACE
+
+extern void kmemtrace_init(void);
+
+static inline void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
+                                            unsigned long call_site,
+                                            const void *ptr,
+                                            size_t bytes_req,
+                                            size_t bytes_alloc,
+                                            gfp_t gfp_flags,
+                                            int node)
+{
+       trace_mark(kmemtrace_alloc, "type_id %d call_site %lu ptr %lu "
+                  "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d",
+                  type_id, call_site, (unsigned long) ptr,
+                  (unsigned long) bytes_req, (unsigned long) bytes_alloc,
+                  (unsigned long) gfp_flags, node);
+}
+
+static inline void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
+                                      unsigned long call_site,
+                                      const void *ptr)
+{
+       trace_mark(kmemtrace_free, "type_id %d call_site %lu ptr %lu",
+                  type_id, call_site, (unsigned long) ptr);
+}
+
+#else /* CONFIG_KMEMTRACE */
+
+static inline void kmemtrace_init(void)
+{
+}
+
+static inline void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
+                                            unsigned long call_site,
+                                            const void *ptr,
+                                            size_t bytes_req,
+                                            size_t bytes_alloc,
+                                            gfp_t gfp_flags,
+                                            int node)
+{
+}
+
+static inline void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
+                                      unsigned long call_site,
+                                      const void *ptr)
+{
+}
+
+#endif /* CONFIG_KMEMTRACE */
+
+static inline void kmemtrace_mark_alloc(enum kmemtrace_type_id type_id,
+                                       unsigned long call_site,
+                                       const void *ptr,
+                                       size_t bytes_req,
+                                       size_t bytes_alloc,
+                                       gfp_t gfp_flags)
+{
+       kmemtrace_mark_alloc_node(type_id, call_site, ptr,
+                                 bytes_req, bytes_alloc, gfp_flags, -1);
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_KMEMTRACE_H */
+
index 39c3a5eb8ebe677851fedd8c8ff6a70730d17c0b..7555ce99f6d240dede80ddec2f16729ad3a5881b 100644 (file)
@@ -14,6 +14,7 @@
 #include <asm/page.h>          /* kmalloc_sizes.h needs PAGE_SIZE */
 #include <asm/cache.h>         /* kmalloc_sizes.h needs L1_CACHE_BYTES */
 #include <linux/compiler.h>
+#include <linux/kmemtrace.h>
 
 /* Size description struct for general caches. */
 struct cache_sizes {
@@ -28,8 +29,26 @@ extern struct cache_sizes malloc_sizes[];
 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
 void *__kmalloc(size_t size, gfp_t flags);
 
-static inline void *kmalloc(size_t size, gfp_t flags)
+#ifdef CONFIG_KMEMTRACE
+extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
+extern size_t slab_buffer_size(struct kmem_cache *cachep);
+#else
+static __always_inline void *
+kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
 {
+       return kmem_cache_alloc(cachep, flags);
+}
+static inline size_t slab_buffer_size(struct kmem_cache *cachep)
+{
+       return 0;
+}
+#endif
+
+static __always_inline void *kmalloc(size_t size, gfp_t flags)
+{
+       struct kmem_cache *cachep;
+       void *ret;
+
        if (__builtin_constant_p(size)) {
                int i = 0;
 
@@ -50,10 +69,17 @@ static inline void *kmalloc(size_t size, gfp_t flags)
 found:
 #ifdef CONFIG_ZONE_DMA
                if (flags & GFP_DMA)
-                       return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep,
-                                               flags);
+                       cachep = malloc_sizes[i].cs_dmacachep;
+               else
 #endif
-               return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags);
+                       cachep = malloc_sizes[i].cs_cachep;
+
+               ret = kmem_cache_alloc_notrace(cachep, flags);
+
+               kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret,
+                                    size, slab_buffer_size(cachep), flags);
+
+               return ret;
        }
        return __kmalloc(size, flags);
 }
@@ -62,8 +88,25 @@ found:
 extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
 
-static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
+#ifdef CONFIG_KMEMTRACE
+extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
+                                          gfp_t flags,
+                                          int nodeid);
+#else
+static __always_inline void *
+kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
+                             gfp_t flags,
+                             int nodeid)
+{
+       return kmem_cache_alloc_node(cachep, flags, nodeid);
+}
+#endif
+
+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
 {
+       struct kmem_cache *cachep;
+       void *ret;
+
        if (__builtin_constant_p(size)) {
                int i = 0;
 
@@ -84,11 +127,18 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
 found:
 #ifdef CONFIG_ZONE_DMA
                if (flags & GFP_DMA)
-                       return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep,
-                                               flags, node);
+                       cachep = malloc_sizes[i].cs_dmacachep;
+               else
 #endif
-               return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep,
-                                               flags, node);
+                       cachep = malloc_sizes[i].cs_cachep;
+
+               ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
+
+               kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_,
+                                         ret, size, slab_buffer_size(cachep),
+                                         flags, node);
+
+               return ret;
        }
        return __kmalloc_node(size, flags, node);
 }
index 59a3fa476ab9f7afe3f407c72b616c23143324a7..0ec00b39d006471e585a11e74759ff832f6f028b 100644 (file)
@@ -3,14 +3,15 @@
 
 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
 
-static inline void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
+static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
+                                             gfp_t flags)
 {
        return kmem_cache_alloc_node(cachep, flags, -1);
 }
 
 void *__kmalloc_node(size_t size, gfp_t flags, int node);
 
-static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
 {
        return __kmalloc_node(size, flags, node);
 }
@@ -23,12 +24,12 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
  * kmalloc is the normal method of allocating memory
  * in the kernel.
  */
-static inline void *kmalloc(size_t size, gfp_t flags)
+static __always_inline void *kmalloc(size_t size, gfp_t flags)
 {
        return __kmalloc_node(size, flags, -1);
 }
 
-static inline void *__kmalloc(size_t size, gfp_t flags)
+static __always_inline void *__kmalloc(size_t size, gfp_t flags)
 {
        return kmalloc(size, flags);
 }
index 2f5c16b1aacd3d7bd83a50d90fcba527017a0373..dc28432b5b9abf8c566a74d363b197fc3d0bca5f 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/gfp.h>
 #include <linux/workqueue.h>
 #include <linux/kobject.h>
+#include <linux/kmemtrace.h>
 
 enum stat_item {
        ALLOC_FASTPATH,         /* Allocation from cpu slab */
@@ -204,13 +205,31 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
 void *__kmalloc(size_t size, gfp_t flags);
 
+#ifdef CONFIG_KMEMTRACE
+extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
+#else
+static __always_inline void *
+kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
+{
+       return kmem_cache_alloc(s, gfpflags);
+}
+#endif
+
 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
 {
-       return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size));
+       unsigned int order = get_order(size);
+       void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
+
+       kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret,
+                            size, PAGE_SIZE << order, flags);
+
+       return ret;
 }
 
 static __always_inline void *kmalloc(size_t size, gfp_t flags)
 {
+       void *ret;
+
        if (__builtin_constant_p(size)) {
                if (size > PAGE_SIZE)
                        return kmalloc_large(size, flags);
@@ -221,7 +240,13 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
                        if (!s)
                                return ZERO_SIZE_PTR;
 
-                       return kmem_cache_alloc(s, flags);
+                       ret = kmem_cache_alloc_notrace(s, flags);
+
+                       kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC,
+                                            _THIS_IP_, ret,
+                                            size, s->size, flags);
+
+                       return ret;
                }
        }
        return __kmalloc(size, flags);
@@ -231,8 +256,24 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
 void *__kmalloc_node(size_t size, gfp_t flags, int node);
 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
 
+#ifdef CONFIG_KMEMTRACE
+extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
+                                          gfp_t gfpflags,
+                                          int node);
+#else
+static __always_inline void *
+kmem_cache_alloc_node_notrace(struct kmem_cache *s,
+                             gfp_t gfpflags,
+                             int node)
+{
+       return kmem_cache_alloc_node(s, gfpflags, node);
+}
+#endif
+
 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
 {
+       void *ret;
+
        if (__builtin_constant_p(size) &&
                size <= PAGE_SIZE && !(flags & SLUB_DMA)) {
                        struct kmem_cache *s = kmalloc_slab(size);
@@ -240,7 +281,13 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
                if (!s)
                        return ZERO_SIZE_PTR;
 
-               return kmem_cache_alloc_node(s, flags, node);
+               ret = kmem_cache_alloc_node_notrace(s, flags, node);
+
+               kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
+                                         _THIS_IP_, ret,
+                                         size, s->size, flags, node);
+
+               return ret;
        }
        return __kmalloc_node(size, flags, node);
 }
index 2a7ce0f8e45353af2204c017c1921afdd0ba5c91..535b1d4bd5f406fdc0ce25c7a05c0b4c8ed35b97 100644 (file)
@@ -70,6 +70,7 @@
 #include <asm/setup.h>
 #include <asm/sections.h>
 #include <asm/cacheflush.h>
+#include <linux/kmemtrace.h>
 
 #ifdef CONFIG_X86_LOCAL_APIC
 #include <asm/smp.h>
@@ -665,6 +666,7 @@ asmlinkage void __init start_kernel(void)
        enable_debug_pagealloc();
        cpu_hotplug_init();
        kmem_cache_init();
+       kmemtrace_init();
        debug_objects_mem_init();
        idr_init_cache();
        setup_per_cpu_pageset();
index 09ac2008f77b419a5c169e41f3ddcbab6aa90746..d06450670c8671541791bb16cf41d4f7f478e110 100644 (file)
@@ -675,9 +675,7 @@ int relay_late_setup_files(struct rchan *chan,
         */
        for_each_online_cpu(i) {
                if (unlikely(!chan->buf[i])) {
-                       printk(KERN_ERR "relay_late_setup_files: CPU %u "
-                                       "has no buffer, it must have!\n", i);
-                       BUG();
+                       WARN_ONCE(1, KERN_ERR "CPU has no buffer!\n");
                        err = -EINVAL;
                        break;
                }
index 4bf39fcae97afadc384b104e5d77be0120e375e3..bc7d90850be5c2977bfe4f81803d5d3f7c7bedec 100644 (file)
@@ -592,6 +592,12 @@ print_graph_comment(struct print_entry *trace, struct trace_seq *s,
        if (ent->flags & TRACE_FLAG_CONT)
                trace_seq_print_cont(s, iter);
 
+       /* Strip ending newline */
+       if (s->buffer[s->len - 1] == '\n') {
+               s->buffer[s->len - 1] = '\0';
+               s->len--;
+       }
+
        ret = trace_seq_printf(s, " */\n");
        if (!ret)
                return TRACE_TYPE_PARTIAL_LINE;
index 2e75478e9c696bc6933ca8952c0a013dd30eb78a..aafb82dfb8aab79dfaf1f57ebbe3be44d54ce6e9 100644 (file)
@@ -835,6 +835,26 @@ config FIREWIRE_OHCI_REMOTE_DMA
 
          If unsure, say N.
 
+config KMEMTRACE
+       bool "Kernel memory tracer (kmemtrace)"
+       depends on RELAY && DEBUG_FS && MARKERS
+       help
+         kmemtrace provides tracing for slab allocator functions, such as
+         kmalloc, kfree, kmem_cache_alloc, kmem_cache_free etc.. Collected
+         data is then fed to the userspace application in order to analyse
+         allocation hotspots, internal fragmentation and so on, making it
+         possible to see how well an allocator performs, as well as debug
+         and profile kernel code.
+
+         This requires an userspace application to use. See
+         Documentation/vm/kmemtrace.txt for more information.
+
+         Saying Y will make the kernel somewhat larger and slower. However,
+         if you disable kmemtrace at run-time or boot-time, the performance
+         impact is minimal (depending on the arch the kernel is built for).
+
+         If unsure, say N.
+
 menuconfig BUILD_DOCSRC
        bool "Build targets in Documentation/ tree"
        depends on HEADERS_CHECK
index 51c27709cc7c0b6d15e682bbb6e9c07f63acabaa..c92e8af132062fca5444e4d497a9989a05c31254 100644 (file)
@@ -35,3 +35,4 @@ obj-$(CONFIG_MIGRATION) += migrate.o
 obj-$(CONFIG_SMP) += allocpercpu.o
 obj-$(CONFIG_QUICKLIST) += quicklist.o
 obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o
+obj-$(CONFIG_KMEMTRACE) += kmemtrace.o
diff --git a/mm/kmemtrace.c b/mm/kmemtrace.c
new file mode 100644 (file)
index 0000000..2a70a80
--- /dev/null
@@ -0,0 +1,333 @@
+/*
+ * Copyright (C) 2008 Pekka Enberg, Eduard - Gabriel Munteanu
+ *
+ * This file is released under GPL version 2.
+ */
+
+#include <linux/string.h>
+#include <linux/debugfs.h>
+#include <linux/relay.h>
+#include <linux/module.h>
+#include <linux/marker.h>
+#include <linux/gfp.h>
+#include <linux/kmemtrace.h>
+
+#define KMEMTRACE_SUBBUF_SIZE          524288
+#define KMEMTRACE_DEF_N_SUBBUFS                20
+
+static struct rchan *kmemtrace_chan;
+static u32 kmemtrace_buf_overruns;
+
+static unsigned int kmemtrace_n_subbufs;
+
+/* disabled by default */
+static unsigned int kmemtrace_enabled;
+
+/*
+ * The sequence number is used for reordering kmemtrace packets
+ * in userspace, since they are logged as per-CPU data.
+ *
+ * atomic_t should always be a 32-bit signed integer. Wraparound is not
+ * likely to occur, but userspace can deal with it by expecting a certain
+ * sequence number in the next packet that will be read.
+ */
+static atomic_t kmemtrace_seq_num;
+
+#define KMEMTRACE_ABI_VERSION          1
+
+static u32 kmemtrace_abi_version __read_mostly = KMEMTRACE_ABI_VERSION;
+
+enum kmemtrace_event_id {
+       KMEMTRACE_EVENT_ALLOC = 0,
+       KMEMTRACE_EVENT_FREE,
+};
+
+struct kmemtrace_event {
+       u8              event_id;
+       u8              type_id;
+       u16             event_size;
+       s32             seq_num;
+       u64             call_site;
+       u64             ptr;
+} __attribute__ ((__packed__));
+
+struct kmemtrace_stats_alloc {
+       u64             bytes_req;
+       u64             bytes_alloc;
+       u32             gfp_flags;
+       s32             numa_node;
+} __attribute__ ((__packed__));
+
+static void kmemtrace_probe_alloc(void *probe_data, void *call_data,
+                                 const char *format, va_list *args)
+{
+       unsigned long flags;
+       struct kmemtrace_event *ev;
+       struct kmemtrace_stats_alloc *stats;
+       void *buf;
+
+       local_irq_save(flags);
+
+       buf = relay_reserve(kmemtrace_chan,
+                           sizeof(struct kmemtrace_event) +
+                           sizeof(struct kmemtrace_stats_alloc));
+       if (!buf)
+               goto failed;
+
+       /*
+        * Don't convert this to use structure initializers,
+        * C99 does not guarantee the rvalues evaluation order.
+        */
+
+       ev = buf;
+       ev->event_id = KMEMTRACE_EVENT_ALLOC;
+       ev->type_id = va_arg(*args, int);
+       ev->event_size = sizeof(struct kmemtrace_event) +
+                        sizeof(struct kmemtrace_stats_alloc);
+       ev->seq_num = atomic_add_return(1, &kmemtrace_seq_num);
+       ev->call_site = va_arg(*args, unsigned long);
+       ev->ptr = va_arg(*args, unsigned long);
+
+       stats = buf + sizeof(struct kmemtrace_event);
+       stats->bytes_req = va_arg(*args, unsigned long);
+       stats->bytes_alloc = va_arg(*args, unsigned long);
+       stats->gfp_flags = va_arg(*args, unsigned long);
+       stats->numa_node = va_arg(*args, int);
+
+failed:
+       local_irq_restore(flags);
+}
+
+static void kmemtrace_probe_free(void *probe_data, void *call_data,
+                                const char *format, va_list *args)
+{
+       unsigned long flags;
+       struct kmemtrace_event *ev;
+
+       local_irq_save(flags);
+
+       ev = relay_reserve(kmemtrace_chan, sizeof(struct kmemtrace_event));
+       if (!ev)
+               goto failed;
+
+       /*
+        * Don't convert this to use structure initializers,
+        * C99 does not guarantee the rvalues evaluation order.
+        */
+       ev->event_id = KMEMTRACE_EVENT_FREE;
+       ev->type_id = va_arg(*args, int);
+       ev->event_size = sizeof(struct kmemtrace_event);
+       ev->seq_num = atomic_add_return(1, &kmemtrace_seq_num);
+       ev->call_site = va_arg(*args, unsigned long);
+       ev->ptr = va_arg(*args, unsigned long);
+
+failed:
+       local_irq_restore(flags);
+}
+
+static struct dentry *
+kmemtrace_create_buf_file(const char *filename, struct dentry *parent,
+                         int mode, struct rchan_buf *buf, int *is_global)
+{
+       return debugfs_create_file(filename, mode, parent, buf,
+                                  &relay_file_operations);
+}
+
+static int kmemtrace_remove_buf_file(struct dentry *dentry)
+{
+       debugfs_remove(dentry);
+
+       return 0;
+}
+
+static int kmemtrace_subbuf_start(struct rchan_buf *buf,
+                                 void *subbuf,
+                                 void *prev_subbuf,
+                                 size_t prev_padding)
+{
+       if (relay_buf_full(buf)) {
+               /*
+                * We know it's not SMP-safe, but neither
+                * debugfs_create_u32() is.
+                */
+               kmemtrace_buf_overruns++;
+               return 0;
+       }
+
+       return 1;
+}
+
+static struct rchan_callbacks relay_callbacks = {
+       .create_buf_file = kmemtrace_create_buf_file,
+       .remove_buf_file = kmemtrace_remove_buf_file,
+       .subbuf_start = kmemtrace_subbuf_start,
+};
+
+static struct dentry *kmemtrace_dir;
+static struct dentry *kmemtrace_overruns_dentry;
+static struct dentry *kmemtrace_abi_version_dentry;
+
+static struct dentry *kmemtrace_enabled_dentry;
+
+static int kmemtrace_start_probes(void)
+{
+       int err;
+
+       err = marker_probe_register("kmemtrace_alloc", "type_id %d "
+                                   "call_site %lu ptr %lu "
+                                   "bytes_req %lu bytes_alloc %lu "
+                                   "gfp_flags %lu node %d",
+                                   kmemtrace_probe_alloc, NULL);
+       if (err)
+               return err;
+       err = marker_probe_register("kmemtrace_free", "type_id %d "
+                                   "call_site %lu ptr %lu",
+                                   kmemtrace_probe_free, NULL);
+
+       return err;
+}
+
+static void kmemtrace_stop_probes(void)
+{
+       marker_probe_unregister("kmemtrace_alloc",
+                               kmemtrace_probe_alloc, NULL);
+       marker_probe_unregister("kmemtrace_free",
+                               kmemtrace_probe_free, NULL);
+}
+
+static int kmemtrace_enabled_get(void *data, u64 *val)
+{
+       *val = *((int *) data);
+
+       return 0;
+}
+
+static int kmemtrace_enabled_set(void *data, u64 val)
+{
+       u64 old_val = kmemtrace_enabled;
+
+       *((int *) data) = !!val;
+
+       if (old_val == val)
+               return 0;
+       if (val)
+               kmemtrace_start_probes();
+       else
+               kmemtrace_stop_probes();
+
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(kmemtrace_enabled_fops,
+                       kmemtrace_enabled_get,
+                       kmemtrace_enabled_set, "%llu\n");
+
+static void kmemtrace_cleanup(void)
+{
+       if (kmemtrace_enabled_dentry)
+               debugfs_remove(kmemtrace_enabled_dentry);
+
+       kmemtrace_stop_probes();
+
+       if (kmemtrace_abi_version_dentry)
+               debugfs_remove(kmemtrace_abi_version_dentry);
+       if (kmemtrace_overruns_dentry)
+               debugfs_remove(kmemtrace_overruns_dentry);
+
+       relay_close(kmemtrace_chan);
+       kmemtrace_chan = NULL;
+
+       if (kmemtrace_dir)
+               debugfs_remove(kmemtrace_dir);
+}
+
+static int __init kmemtrace_setup_late(void)
+{
+       if (!kmemtrace_chan)
+               goto failed;
+
+       kmemtrace_dir = debugfs_create_dir("kmemtrace", NULL);
+       if (!kmemtrace_dir)
+               goto cleanup;
+
+       kmemtrace_abi_version_dentry =
+               debugfs_create_u32("abi_version", S_IRUSR,
+                                  kmemtrace_dir, &kmemtrace_abi_version);
+       kmemtrace_overruns_dentry =
+               debugfs_create_u32("total_overruns", S_IRUSR,
+                                  kmemtrace_dir, &kmemtrace_buf_overruns);
+       if (!kmemtrace_overruns_dentry || !kmemtrace_abi_version_dentry)
+               goto cleanup;
+
+       kmemtrace_enabled_dentry =
+               debugfs_create_file("enabled", S_IRUSR | S_IWUSR,
+                                   kmemtrace_dir, &kmemtrace_enabled,
+                                   &kmemtrace_enabled_fops);
+       if (!kmemtrace_enabled_dentry)
+               goto cleanup;
+
+       if (relay_late_setup_files(kmemtrace_chan, "cpu", kmemtrace_dir))
+               goto cleanup;
+
+       printk(KERN_INFO "kmemtrace: fully up.\n");
+
+       return 0;
+
+cleanup:
+       kmemtrace_cleanup();
+failed:
+       return 1;
+}
+late_initcall(kmemtrace_setup_late);
+
+static int __init kmemtrace_set_boot_enabled(char *str)
+{
+       if (!str)
+               return -EINVAL;
+
+       if (!strcmp(str, "yes"))
+               kmemtrace_enabled = 1;
+       else if (!strcmp(str, "no"))
+               kmemtrace_enabled = 0;
+       else
+               return -EINVAL;
+
+       return 0;
+}
+early_param("kmemtrace.enable", kmemtrace_set_boot_enabled);
+
+static int __init kmemtrace_set_subbufs(char *str)
+{
+       get_option(&str, &kmemtrace_n_subbufs);
+       return 0;
+}
+early_param("kmemtrace.subbufs", kmemtrace_set_subbufs);
+
+void kmemtrace_init(void)
+{
+       if (!kmemtrace_n_subbufs)
+               kmemtrace_n_subbufs = KMEMTRACE_DEF_N_SUBBUFS;
+
+       kmemtrace_chan = relay_open(NULL, NULL, KMEMTRACE_SUBBUF_SIZE,
+                                   kmemtrace_n_subbufs, &relay_callbacks,
+                                   NULL);
+       if (!kmemtrace_chan) {
+               printk(KERN_ERR "kmemtrace: could not open relay channel.\n");
+               return;
+       }
+
+       if (!kmemtrace_enabled) {
+               printk(KERN_INFO "kmemtrace: disabled. Pass "
+                       "kemtrace.enable=yes as kernel parameter for "
+                       "boot-time tracing.\n");
+               return;
+       }
+       if (kmemtrace_start_probes()) {
+               printk(KERN_ERR "kmemtrace: could not register marker probes!\n");
+               kmemtrace_cleanup();
+               return;
+       }
+
+       printk(KERN_INFO "kmemtrace: enabled.\n");
+}
+
index f97e564bdf118f140e66fb8ad1978d20abf2c939..e0ad07f97f4bc7af80e1f99213bf28aba8812173 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
 #include       <linux/cpu.h>
 #include       <linux/sysctl.h>
 #include       <linux/module.h>
+#include       <linux/kmemtrace.h>
 #include       <linux/rcupdate.h>
 #include       <linux/string.h>
 #include       <linux/uaccess.h>
@@ -568,6 +569,14 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
 
 #endif
 
+#ifdef CONFIG_KMEMTRACE
+size_t slab_buffer_size(struct kmem_cache *cachep)
+{
+       return cachep->buffer_size;
+}
+EXPORT_SYMBOL(slab_buffer_size);
+#endif
+
 /*
  * Do not go above this order unless 0 objects fit into the slab.
  */
@@ -3550,10 +3559,23 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
  */
 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
 {
-       return __cache_alloc(cachep, flags, __builtin_return_address(0));
+       void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
+
+       kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
+                            obj_size(cachep), cachep->buffer_size, flags);
+
+       return ret;
 }
 EXPORT_SYMBOL(kmem_cache_alloc);
 
+#ifdef CONFIG_KMEMTRACE
+void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
+{
+       return __cache_alloc(cachep, flags, __builtin_return_address(0));
+}
+EXPORT_SYMBOL(kmem_cache_alloc_notrace);
+#endif
+
 /**
  * kmem_ptr_validate - check if an untrusted pointer might be a slab entry.
  * @cachep: the cache we're checking against
@@ -3598,23 +3620,47 @@ out:
 #ifdef CONFIG_NUMA
 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
 {
-       return __cache_alloc_node(cachep, flags, nodeid,
-                       __builtin_return_address(0));
+       void *ret = __cache_alloc_node(cachep, flags, nodeid,
+                                      __builtin_return_address(0));
+
+       kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
+                                 obj_size(cachep), cachep->buffer_size,
+                                 flags, nodeid);
+
+       return ret;
 }
 EXPORT_SYMBOL(kmem_cache_alloc_node);
 
+#ifdef CONFIG_KMEMTRACE
+void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
+                                   gfp_t flags,
+                                   int nodeid)
+{
+       return __cache_alloc_node(cachep, flags, nodeid,
+                                 __builtin_return_address(0));
+}
+EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
+#endif
+
 static __always_inline void *
 __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
 {
        struct kmem_cache *cachep;
+       void *ret;
 
        cachep = kmem_find_general_cachep(size, flags);
        if (unlikely(ZERO_OR_NULL_PTR(cachep)))
                return cachep;
-       return kmem_cache_alloc_node(cachep, flags, node);
+       ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
+
+       kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
+                                 (unsigned long) caller, ret,
+                                 size, cachep->buffer_size, flags, node);
+
+       return ret;
 }
 
-#ifdef CONFIG_DEBUG_SLAB
+#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE)
 void *__kmalloc_node(size_t size, gfp_t flags, int node)
 {
        return __do_kmalloc_node(size, flags, node,
@@ -3647,6 +3693,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
                                          void *caller)
 {
        struct kmem_cache *cachep;
+       void *ret;
 
        /* If you want to save a few bytes .text space: replace
         * __ with kmem_.
@@ -3656,11 +3703,17 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
        cachep = __find_general_cachep(size, flags);
        if (unlikely(ZERO_OR_NULL_PTR(cachep)))
                return cachep;
-       return __cache_alloc(cachep, flags, caller);
+       ret = __cache_alloc(cachep, flags, caller);
+
+       kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC,
+                            (unsigned long) caller, ret,
+                            size, cachep->buffer_size, flags);
+
+       return ret;
 }
 
 
-#ifdef CONFIG_DEBUG_SLAB
+#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE)
 void *__kmalloc(size_t size, gfp_t flags)
 {
        return __do_kmalloc(size, flags, __builtin_return_address(0));
@@ -3699,6 +3752,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
                debug_check_no_obj_freed(objp, obj_size(cachep));
        __cache_free(cachep, objp);
        local_irq_restore(flags);
+
+       kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, objp);
 }
 EXPORT_SYMBOL(kmem_cache_free);
 
@@ -3725,6 +3780,8 @@ void kfree(const void *objp)
        debug_check_no_obj_freed(objp, obj_size(c));
        __cache_free(c, (void *)objp);
        local_irq_restore(flags);
+
+       kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, objp);
 }
 EXPORT_SYMBOL(kfree);
 
index bf7e8fc3aed806542e44cc7b1d222d9e2b56dc3a..0f1a49f40690e583d98a23bcabdfbb794e4aa3a3 100644 (file)
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -65,6 +65,7 @@
 #include <linux/module.h>
 #include <linux/rcupdate.h>
 #include <linux/list.h>
+#include <linux/kmemtrace.h>
 #include <asm/atomic.h>
 
 /*
@@ -463,27 +464,38 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
 {
        unsigned int *m;
        int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+       void *ret;
 
        if (size < PAGE_SIZE - align) {
                if (!size)
                        return ZERO_SIZE_PTR;
 
                m = slob_alloc(size + align, gfp, align, node);
+
                if (!m)
                        return NULL;
                *m = size;
-               return (void *)m + align;
+               ret = (void *)m + align;
+
+               kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
+                                         _RET_IP_, ret,
+                                         size, size + align, gfp, node);
        } else {
-               void *ret;
+               unsigned int order = get_order(size);
 
-               ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node);
+               ret = slob_new_page(gfp | __GFP_COMP, order, node);
                if (ret) {
                        struct page *page;
                        page = virt_to_page(ret);
                        page->private = size;
                }
-               return ret;
+
+               kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
+                                         _RET_IP_, ret,
+                                         size, PAGE_SIZE << order, gfp, node);
        }
+
+       return ret;
 }
 EXPORT_SYMBOL(__kmalloc_node);
 
@@ -501,6 +513,8 @@ void kfree(const void *block)
                slob_free(m, *m + align);
        } else
                put_page(&sp->page);
+
+       kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, block);
 }
 EXPORT_SYMBOL(kfree);
 
@@ -569,10 +583,19 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
 {
        void *b;
 
-       if (c->size < PAGE_SIZE)
+       if (c->size < PAGE_SIZE) {
                b = slob_alloc(c->size, flags, c->align, node);
-       else
+               kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE,
+                                         _RET_IP_, b, c->size,
+                                         SLOB_UNITS(c->size) * SLOB_UNIT,
+                                         flags, node);
+       } else {
                b = slob_new_page(flags, get_order(c->size), node);
+               kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE,
+                                         _RET_IP_, b, c->size,
+                                         PAGE_SIZE << get_order(c->size),
+                                         flags, node);
+       }
 
        if (c->ctor)
                c->ctor(b);
@@ -608,6 +631,8 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
        } else {
                __kmem_cache_free(b, c->size);
        }
+
+       kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, b);
 }
 EXPORT_SYMBOL(kmem_cache_free);
 
index 6cb7ad10785227f2b889bcff96d6610ddeabe37a..121acd17de973f14438fb6336df96f6904a01cd6 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -16,6 +16,7 @@
 #include <linux/slab.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
+#include <linux/kmemtrace.h>
 #include <linux/cpu.h>
 #include <linux/cpuset.h>
 #include <linux/mempolicy.h>
@@ -1623,18 +1624,46 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
 
 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
 {
-       return slab_alloc(s, gfpflags, -1, _RET_IP_);
+       void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_);
+
+       kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
+                            s->objsize, s->size, gfpflags);
+
+       return ret;
 }
 EXPORT_SYMBOL(kmem_cache_alloc);
 
+#ifdef CONFIG_KMEMTRACE
+void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
+{
+       return slab_alloc(s, gfpflags, -1, _RET_IP_);
+}
+EXPORT_SYMBOL(kmem_cache_alloc_notrace);
+#endif
+
 #ifdef CONFIG_NUMA
 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
 {
-       return slab_alloc(s, gfpflags, node, _RET_IP_);
+       void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
+
+       kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
+                                 s->objsize, s->size, gfpflags, node);
+
+       return ret;
 }
 EXPORT_SYMBOL(kmem_cache_alloc_node);
 #endif
 
+#ifdef CONFIG_KMEMTRACE
+void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
+                                   gfp_t gfpflags,
+                                   int node)
+{
+       return slab_alloc(s, gfpflags, node, _RET_IP_);
+}
+EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
+#endif
+
 /*
  * Slow patch handling. This may still be called frequently since objects
  * have a longer lifetime than the cpu slabs in most processing loads.
@@ -1742,6 +1771,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
        page = virt_to_head_page(x);
 
        slab_free(s, page, x, _RET_IP_);
+
+       kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, x);
 }
 EXPORT_SYMBOL(kmem_cache_free);
 
@@ -2657,6 +2688,7 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
 void *__kmalloc(size_t size, gfp_t flags)
 {
        struct kmem_cache *s;
+       void *ret;
 
        if (unlikely(size > PAGE_SIZE))
                return kmalloc_large(size, flags);
@@ -2666,7 +2698,12 @@ void *__kmalloc(size_t size, gfp_t flags)
        if (unlikely(ZERO_OR_NULL_PTR(s)))
                return s;
 
-       return slab_alloc(s, flags, -1, _RET_IP_);
+       ret = slab_alloc(s, flags, -1, _RET_IP_);
+
+       kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret,
+                            size, s->size, flags);
+
+       return ret;
 }
 EXPORT_SYMBOL(__kmalloc);
 
@@ -2685,16 +2722,30 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
 void *__kmalloc_node(size_t size, gfp_t flags, int node)
 {
        struct kmem_cache *s;
+       void *ret;
 
-       if (unlikely(size > PAGE_SIZE))
-               return kmalloc_large_node(size, flags, node);
+       if (unlikely(size > PAGE_SIZE)) {
+               ret = kmalloc_large_node(size, flags, node);
+
+               kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
+                                         _RET_IP_, ret,
+                                         size, PAGE_SIZE << get_order(size),
+                                         flags, node);
+
+               return ret;
+       }
 
        s = get_slab(size, flags);
 
        if (unlikely(ZERO_OR_NULL_PTR(s)))
                return s;
 
-       return slab_alloc(s, flags, node, _RET_IP_);
+       ret = slab_alloc(s, flags, node, _RET_IP_);
+
+       kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret,
+                                 size, s->size, flags, node);
+
+       return ret;
 }
 EXPORT_SYMBOL(__kmalloc_node);
 #endif
@@ -2752,6 +2803,8 @@ void kfree(const void *x)
                return;
        }
        slab_free(page->slab, page, object, _RET_IP_);
+
+       kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, x);
 }
 EXPORT_SYMBOL(kfree);
 
@@ -3221,6 +3274,7 @@ static struct notifier_block __cpuinitdata slab_notifier = {
 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
 {
        struct kmem_cache *s;
+       void *ret;
 
        if (unlikely(size > PAGE_SIZE))
                return kmalloc_large(size, gfpflags);
@@ -3230,13 +3284,20 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
        if (unlikely(ZERO_OR_NULL_PTR(s)))
                return s;
 
-       return slab_alloc(s, gfpflags, -1, caller);
+       ret = slab_alloc(s, gfpflags, -1, caller);
+
+       /* Honor the call site pointer we recieved. */
+       kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, caller, ret, size,
+                            s->size, gfpflags);
+
+       return ret;
 }
 
 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
                                        int node, unsigned long caller)
 {
        struct kmem_cache *s;
+       void *ret;
 
        if (unlikely(size > PAGE_SIZE))
                return kmalloc_large_node(size, gfpflags, node);
@@ -3246,7 +3307,13 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
        if (unlikely(ZERO_OR_NULL_PTR(s)))
                return s;
 
-       return slab_alloc(s, gfpflags, node, caller);
+       ret = slab_alloc(s, gfpflags, node, caller);
+
+       /* Honor the call site pointer we recieved. */
+       kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, caller, ret,
+                                 size, s->size, gfpflags, node);
+
+       return ret;
 }
 
 #ifdef CONFIG_SLUB_DEBUG