kmemtrace: Core implementation.
authorEduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
Sun, 10 Aug 2008 17:14:03 +0000 (20:14 +0300)
committerPekka Enberg <penberg@cs.helsinki.fi>
Mon, 29 Dec 2008 13:34:01 +0000 (15:34 +0200)
kmemtrace provides tracing for slab allocator functions, such as kmalloc,
kfree, kmem_cache_alloc, kmem_cache_free etc.. Collected data is then fed
to the userspace application in order to analyse allocation hotspots,
internal fragmentation and so on, making it possible to see how well an
allocator performs, as well as debug and profile kernel code.

Signed-off-by: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Documentation/kernel-parameters.txt
MAINTAINERS
include/linux/kmemtrace.h [new file with mode: 0644]
init/main.c
lib/Kconfig.debug
mm/Makefile
mm/kmemtrace.c [new file with mode: 0644]

index e0f346d201edb70fae654c55f6be842c4465a5ff..542c2d8843db793cc7f6b999e91e387091925dab 100644 (file)
@@ -49,6 +49,7 @@ parameter is applicable:
        ISAPNP  ISA PnP code is enabled.
        ISDN    Appropriate ISDN support is enabled.
        JOY     Appropriate joystick support is enabled.
+       KMEMTRACE kmemtrace is enabled.
        LIBATA  Libata driver is enabled
        LP      Printer support is enabled.
        LOOP    Loopback device support is enabled.
@@ -1018,6 +1019,15 @@ and is between 256 and 4096 characters. It is defined in the file
                        use the HighMem zone if it exists, and the Normal
                        zone if it does not.
 
+       kmemtrace.enable=       [KNL,KMEMTRACE] Format: { yes | no }
+                               Controls whether kmemtrace is enabled
+                               at boot-time.
+
+       kmemtrace.subbufs=n     [KNL,KMEMTRACE] Overrides the number of
+                       subbufs kmemtrace's relay channel has. Set this
+                       higher than default (KMEMTRACE_N_SUBBUFS in code) if
+                       you experience buffer overruns.
+
        movablecore=nn[KMG]     [KNL,X86-32,IA-64,PPC,X86-64] This parameter
                        is similar to kernelcore except it specifies the
                        amount of memory used for migratable allocations.
index 618c1ef4a397502749b2d08a49e80806e99825d1..e2b3c85550510906a9900c5ad682317df8a85f8d 100644 (file)
@@ -2565,6 +2565,12 @@ M:       jason.wessel@windriver.com
 L:     kgdb-bugreport@lists.sourceforge.net
 S:     Maintained
 
+KMEMTRACE
+P:     Eduard - Gabriel Munteanu
+M:     eduard.munteanu@linux360.ro
+L:     linux-kernel@vger.kernel.org
+S:     Maintained
+
 KPROBES
 P:     Ananth N Mavinakayanahalli
 M:     ananth@in.ibm.com
diff --git a/include/linux/kmemtrace.h b/include/linux/kmemtrace.h
new file mode 100644 (file)
index 0000000..2c33201
--- /dev/null
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2008 Eduard - Gabriel Munteanu
+ *
+ * This file is released under GPL version 2.
+ */
+
+#ifndef _LINUX_KMEMTRACE_H
+#define _LINUX_KMEMTRACE_H
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <linux/marker.h>
+
+enum kmemtrace_type_id {
+       KMEMTRACE_TYPE_KMALLOC = 0,     /* kmalloc() or kfree(). */
+       KMEMTRACE_TYPE_CACHE,           /* kmem_cache_*(). */
+       KMEMTRACE_TYPE_PAGES,           /* __get_free_pages() and friends. */
+};
+
+#ifdef CONFIG_KMEMTRACE
+
+extern void kmemtrace_init(void);
+
+static inline void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
+                                            unsigned long call_site,
+                                            const void *ptr,
+                                            size_t bytes_req,
+                                            size_t bytes_alloc,
+                                            gfp_t gfp_flags,
+                                            int node)
+{
+       trace_mark(kmemtrace_alloc, "type_id %d call_site %lu ptr %lu "
+                  "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d",
+                  type_id, call_site, (unsigned long) ptr,
+                  bytes_req, bytes_alloc, (unsigned long) gfp_flags, node);
+}
+
+static inline void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
+                                      unsigned long call_site,
+                                      const void *ptr)
+{
+       trace_mark(kmemtrace_free, "type_id %d call_site %lu ptr %lu",
+                  type_id, call_site, (unsigned long) ptr);
+}
+
+#else /* CONFIG_KMEMTRACE */
+
+static inline void kmemtrace_init(void)
+{
+}
+
+static inline void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
+                                            unsigned long call_site,
+                                            const void *ptr,
+                                            size_t bytes_req,
+                                            size_t bytes_alloc,
+                                            gfp_t gfp_flags,
+                                            int node)
+{
+}
+
+static inline void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
+                                      unsigned long call_site,
+                                      const void *ptr)
+{
+}
+
+#endif /* CONFIG_KMEMTRACE */
+
+static inline void kmemtrace_mark_alloc(enum kmemtrace_type_id type_id,
+                                       unsigned long call_site,
+                                       const void *ptr,
+                                       size_t bytes_req,
+                                       size_t bytes_alloc,
+                                       gfp_t gfp_flags)
+{
+       kmemtrace_mark_alloc_node(type_id, call_site, ptr,
+                                 bytes_req, bytes_alloc, gfp_flags, -1);
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_KMEMTRACE_H */
+
index 7e117a231af10313f1b9bd963bf404eecaf94c9e..be1fe2242a55e9a8703e3d252e9212aa9b6b9b8b 100644 (file)
@@ -69,6 +69,7 @@
 #include <asm/setup.h>
 #include <asm/sections.h>
 #include <asm/cacheflush.h>
+#include <linux/kmemtrace.h>
 
 #ifdef CONFIG_X86_LOCAL_APIC
 #include <asm/smp.h>
@@ -653,6 +654,7 @@ asmlinkage void __init start_kernel(void)
        enable_debug_pagealloc();
        cpu_hotplug_init();
        kmem_cache_init();
+       kmemtrace_init();
        debug_objects_mem_init();
        idr_init_cache();
        setup_per_cpu_pageset();
index b0f239e443bc0fbb11a27ee98dbaf4e641d21971..78d669b461d208921de4f028c12a6aa995acdc73 100644 (file)
@@ -803,6 +803,34 @@ config FIREWIRE_OHCI_REMOTE_DMA
 
          If unsure, say N.
 
+config KMEMTRACE
+       bool "Kernel memory tracer (kmemtrace)"
+       depends on RELAY && DEBUG_FS && MARKERS
+       help
+         kmemtrace provides tracing for slab allocator functions, such as
+         kmalloc, kfree, kmem_cache_alloc, kmem_cache_free etc.. Collected
+         data is then fed to the userspace application in order to analyse
+         allocation hotspots, internal fragmentation and so on, making it
+         possible to see how well an allocator performs, as well as debug
+         and profile kernel code.
+
+         This requires an userspace application to use. See
+         Documentation/vm/kmemtrace.txt for more information.
+
+         Saying Y will make the kernel somewhat larger and slower. However,
+         if you disable kmemtrace at run-time or boot-time, the performance
+         impact is minimal (depending on the arch the kernel is built for).
+
+         If unsure, say N.
+
+config KMEMTRACE_DEFAULT_ENABLED
+       bool "Enabled by default at boot"
+       depends on KMEMTRACE
+       help
+         Say Y here to enable kmemtrace at boot-time by default. Whatever
+         the choice, the behavior can be overridden by a kernel parameter,
+         as described in documentation.
+
 menuconfig BUILD_DOCSRC
        bool "Build targets in Documentation/ tree"
        depends on HEADERS_CHECK
index c06b45a1ff5f64cf2e007258bfdaf9af5c280d98..3782eb66d4b33a692c9b5396a080fb47c582ea1e 100644 (file)
@@ -34,3 +34,4 @@ obj-$(CONFIG_MIGRATION) += migrate.o
 obj-$(CONFIG_SMP) += allocpercpu.o
 obj-$(CONFIG_QUICKLIST) += quicklist.o
 obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o
+obj-$(CONFIG_KMEMTRACE) += kmemtrace.o
diff --git a/mm/kmemtrace.c b/mm/kmemtrace.c
new file mode 100644 (file)
index 0000000..83ad1cc
--- /dev/null
@@ -0,0 +1,335 @@
+/*
+ * Copyright (C) 2008 Pekka Enberg, Eduard - Gabriel Munteanu
+ *
+ * This file is released under GPL version 2.
+ */
+
+#include <linux/string.h>
+#include <linux/debugfs.h>
+#include <linux/relay.h>
+#include <linux/module.h>
+#include <linux/marker.h>
+#include <linux/gfp.h>
+#include <linux/kmemtrace.h>
+
+#define KMEMTRACE_SUBBUF_SIZE          524288
+#define KMEMTRACE_DEF_N_SUBBUFS                20
+
+static struct rchan *kmemtrace_chan;
+static u32 kmemtrace_buf_overruns;
+
+static unsigned int kmemtrace_n_subbufs;
+#ifdef CONFIG_KMEMTRACE_DEFAULT_ENABLED
+static unsigned int kmemtrace_enabled = 1;
+#else
+static unsigned int kmemtrace_enabled = 0;
+#endif
+
+/*
+ * The sequence number is used for reordering kmemtrace packets
+ * in userspace, since they are logged as per-CPU data.
+ *
+ * atomic_t should always be a 32-bit signed integer. Wraparound is not
+ * likely to occur, but userspace can deal with it by expecting a certain
+ * sequence number in the next packet that will be read.
+ */
+static atomic_t kmemtrace_seq_num;
+
+#define KMEMTRACE_ABI_VERSION          1
+
+static u32 kmemtrace_abi_version __read_mostly = KMEMTRACE_ABI_VERSION;
+
+enum kmemtrace_event_id {
+       KMEMTRACE_EVENT_ALLOC = 0,
+       KMEMTRACE_EVENT_FREE,
+};
+
+struct kmemtrace_event {
+       u8              event_id;
+       u8              type_id;
+       u16             event_size;
+       s32             seq_num;
+       u64             call_site;
+       u64             ptr;
+} __attribute__ ((__packed__));
+
+struct kmemtrace_stats_alloc {
+       u64             bytes_req;
+       u64             bytes_alloc;
+       u32             gfp_flags;
+       s32             numa_node;
+} __attribute__ ((__packed__));
+
+static void kmemtrace_probe_alloc(void *probe_data, void *call_data,
+                                 const char *format, va_list *args)
+{
+       unsigned long flags;
+       struct kmemtrace_event *ev;
+       struct kmemtrace_stats_alloc *stats;
+       void *buf;
+
+       local_irq_save(flags);
+
+       buf = relay_reserve(kmemtrace_chan,
+                           sizeof(struct kmemtrace_event) +
+                           sizeof(struct kmemtrace_stats_alloc));
+       if (!buf)
+               goto failed;
+
+       /*
+        * Don't convert this to use structure initializers,
+        * C99 does not guarantee the rvalues evaluation order.
+        */
+
+       ev = buf;
+       ev->event_id = KMEMTRACE_EVENT_ALLOC;
+       ev->type_id = va_arg(*args, int);
+       ev->event_size = sizeof(struct kmemtrace_event) +
+                        sizeof(struct kmemtrace_stats_alloc);
+       ev->seq_num = atomic_add_return(1, &kmemtrace_seq_num);
+       ev->call_site = va_arg(*args, unsigned long);
+       ev->ptr = va_arg(*args, unsigned long);
+
+       stats = buf + sizeof(struct kmemtrace_event);
+       stats->bytes_req = va_arg(*args, unsigned long);
+       stats->bytes_alloc = va_arg(*args, unsigned long);
+       stats->gfp_flags = va_arg(*args, unsigned long);
+       stats->numa_node = va_arg(*args, int);
+
+failed:
+       local_irq_restore(flags);
+}
+
+static void kmemtrace_probe_free(void *probe_data, void *call_data,
+                                const char *format, va_list *args)
+{
+       unsigned long flags;
+       struct kmemtrace_event *ev;
+
+       local_irq_save(flags);
+
+       ev = relay_reserve(kmemtrace_chan, sizeof(struct kmemtrace_event));
+       if (!ev)
+               goto failed;
+
+       /*
+        * Don't convert this to use structure initializers,
+        * C99 does not guarantee the rvalues evaluation order.
+        */
+       ev->event_id = KMEMTRACE_EVENT_FREE;
+       ev->type_id = va_arg(*args, int);
+       ev->event_size = sizeof(struct kmemtrace_event);
+       ev->seq_num = atomic_add_return(1, &kmemtrace_seq_num);
+       ev->call_site = va_arg(*args, unsigned long);
+       ev->ptr = va_arg(*args, unsigned long);
+
+failed:
+       local_irq_restore(flags);
+}
+
+static struct dentry *
+kmemtrace_create_buf_file(const char *filename, struct dentry *parent,
+                         int mode, struct rchan_buf *buf, int *is_global)
+{
+       return debugfs_create_file(filename, mode, parent, buf,
+                                  &relay_file_operations);
+}
+
+static int kmemtrace_remove_buf_file(struct dentry *dentry)
+{
+       debugfs_remove(dentry);
+
+       return 0;
+}
+
+static int kmemtrace_subbuf_start(struct rchan_buf *buf,
+                                 void *subbuf,
+                                 void *prev_subbuf,
+                                 size_t prev_padding)
+{
+       if (relay_buf_full(buf)) {
+               /*
+                * We know it's not SMP-safe, but neither
+                * debugfs_create_u32() is.
+                */
+               kmemtrace_buf_overruns++;
+               return 0;
+       }
+
+       return 1;
+}
+
+static struct rchan_callbacks relay_callbacks = {
+       .create_buf_file = kmemtrace_create_buf_file,
+       .remove_buf_file = kmemtrace_remove_buf_file,
+       .subbuf_start = kmemtrace_subbuf_start,
+};
+
+static struct dentry *kmemtrace_dir;
+static struct dentry *kmemtrace_overruns_dentry;
+static struct dentry *kmemtrace_abi_version_dentry;
+
+static struct dentry *kmemtrace_enabled_dentry;
+
+static int kmemtrace_start_probes(void)
+{
+       int err;
+
+       err = marker_probe_register("kmemtrace_alloc", "type_id %d "
+                                   "call_site %lu ptr %lu "
+                                   "bytes_req %lu bytes_alloc %lu "
+                                   "gfp_flags %lu node %d",
+                                   kmemtrace_probe_alloc, NULL);
+       if (err)
+               return err;
+       err = marker_probe_register("kmemtrace_free", "type_id %d "
+                                   "call_site %lu ptr %lu",
+                                   kmemtrace_probe_free, NULL);
+
+       return err;
+}
+
+static void kmemtrace_stop_probes(void)
+{
+       marker_probe_unregister("kmemtrace_alloc",
+                               kmemtrace_probe_alloc, NULL);
+       marker_probe_unregister("kmemtrace_free",
+                               kmemtrace_probe_free, NULL);
+}
+
+static int kmemtrace_enabled_get(void *data, u64 *val)
+{
+       *val = *((int *) data);
+
+       return 0;
+}
+
+static int kmemtrace_enabled_set(void *data, u64 val)
+{
+       u64 old_val = kmemtrace_enabled;
+
+       *((int *) data) = !!val;
+
+       if (old_val == val)
+               return 0;
+       if (val)
+               kmemtrace_start_probes();
+       else
+               kmemtrace_stop_probes();
+
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(kmemtrace_enabled_fops,
+                       kmemtrace_enabled_get,
+                       kmemtrace_enabled_set, "%llu\n");
+
+static void kmemtrace_cleanup(void)
+{
+       if (kmemtrace_enabled_dentry)
+               debugfs_remove(kmemtrace_enabled_dentry);
+
+       kmemtrace_stop_probes();
+
+       if (kmemtrace_abi_version_dentry)
+               debugfs_remove(kmemtrace_abi_version_dentry);
+       if (kmemtrace_overruns_dentry)
+               debugfs_remove(kmemtrace_overruns_dentry);
+
+       relay_close(kmemtrace_chan);
+       kmemtrace_chan = NULL;
+
+       if (kmemtrace_dir)
+               debugfs_remove(kmemtrace_dir);
+}
+
+static int __init kmemtrace_setup_late(void)
+{
+       if (!kmemtrace_chan)
+               goto failed;
+
+       kmemtrace_dir = debugfs_create_dir("kmemtrace", NULL);
+       if (!kmemtrace_dir)
+               goto cleanup;
+
+       kmemtrace_abi_version_dentry =
+               debugfs_create_u32("abi_version", S_IRUSR,
+                                  kmemtrace_dir, &kmemtrace_abi_version);
+       kmemtrace_overruns_dentry =
+               debugfs_create_u32("total_overruns", S_IRUSR,
+                                  kmemtrace_dir, &kmemtrace_buf_overruns);
+       if (!kmemtrace_overruns_dentry || !kmemtrace_abi_version_dentry)
+               goto cleanup;
+
+       kmemtrace_enabled_dentry =
+               debugfs_create_file("enabled", S_IRUSR | S_IWUSR,
+                                   kmemtrace_dir, &kmemtrace_enabled,
+                                   &kmemtrace_enabled_fops);
+       if (!kmemtrace_enabled_dentry)
+               goto cleanup;
+
+       if (relay_late_setup_files(kmemtrace_chan, "cpu", kmemtrace_dir))
+               goto cleanup;
+
+       printk(KERN_INFO "kmemtrace: fully up.\n");
+
+       return 0;
+
+cleanup:
+       kmemtrace_cleanup();
+failed:
+       return 1;
+}
+late_initcall(kmemtrace_setup_late);
+
+static int __init kmemtrace_set_boot_enabled(char *str)
+{
+       if (!str)
+               return -EINVAL;
+
+       if (!strcmp(str, "yes"))
+               kmemtrace_enabled = 1;
+       else if (!strcmp(str, "no"))
+               kmemtrace_enabled = 0;
+       else
+               return -EINVAL;
+
+       return 0;
+}
+early_param("kmemtrace.enable", kmemtrace_set_boot_enabled);
+
+static int __init kmemtrace_set_subbufs(char *str)
+{
+       get_option(&str, &kmemtrace_n_subbufs);
+       return 0;
+}
+early_param("kmemtrace.subbufs", kmemtrace_set_subbufs);
+
+void kmemtrace_init(void)
+{
+       if (!kmemtrace_enabled)
+               return;
+
+       if (!kmemtrace_n_subbufs)
+               kmemtrace_n_subbufs = KMEMTRACE_DEF_N_SUBBUFS;
+
+       kmemtrace_chan = relay_open(NULL, NULL, KMEMTRACE_SUBBUF_SIZE,
+                                   kmemtrace_n_subbufs, &relay_callbacks,
+                                   NULL);
+       if (unlikely(!kmemtrace_chan)) {
+               printk(KERN_ERR "kmemtrace: could not open relay channel.\n");
+               return;
+       }
+
+       if (unlikely(kmemtrace_start_probes()))
+               goto probe_fail;
+
+       printk(KERN_INFO "kmemtrace: early init successful.\n");
+
+       return;
+
+probe_fail:
+       printk(KERN_ERR "kmemtrace: could not register marker probes!\n");
+       kmemtrace_cleanup();
+}
+