Merge commit 'v2.6.35-rc3' into perf/core
authorIngo Molnar <mingo@elte.hu>
Fri, 18 Jun 2010 08:53:12 +0000 (10:53 +0200)
committerIngo Molnar <mingo@elte.hu>
Fri, 18 Jun 2010 08:53:19 +0000 (10:53 +0200)
Merge reason: Go from -rc1 base to -rc3 base, merge in fixes.

113 files changed:
Documentation/ABI/testing/debugfs-kmemtrace [deleted file]
Documentation/trace/kmemtrace.txt [deleted file]
MAINTAINERS
Makefile
arch/alpha/include/asm/local64.h [new file with mode: 0644]
arch/arm/include/asm/local64.h [new file with mode: 0644]
arch/arm/kernel/perf_event.c
arch/avr32/include/asm/local64.h [new file with mode: 0644]
arch/blackfin/include/asm/local64.h [new file with mode: 0644]
arch/cris/include/asm/local64.h [new file with mode: 0644]
arch/frv/include/asm/local64.h [new file with mode: 0644]
arch/frv/kernel/local64.h [new file with mode: 0644]
arch/h8300/include/asm/local64.h [new file with mode: 0644]
arch/ia64/include/asm/local64.h [new file with mode: 0644]
arch/m32r/include/asm/local64.h [new file with mode: 0644]
arch/m68k/include/asm/local64.h [new file with mode: 0644]
arch/microblaze/include/asm/local64.h [new file with mode: 0644]
arch/mips/include/asm/local64.h [new file with mode: 0644]
arch/mn10300/include/asm/local64.h [new file with mode: 0644]
arch/parisc/include/asm/local64.h [new file with mode: 0644]
arch/powerpc/include/asm/local64.h [new file with mode: 0644]
arch/powerpc/include/asm/perf_event.h
arch/powerpc/kernel/misc.S
arch/powerpc/kernel/perf_event.c
arch/s390/include/asm/local64.h [new file with mode: 0644]
arch/score/include/asm/local64.h [new file with mode: 0644]
arch/sh/include/asm/local64.h [new file with mode: 0644]
arch/sh/kernel/perf_event.c
arch/sparc/include/asm/local64.h [new file with mode: 0644]
arch/sparc/include/asm/perf_event.h
arch/sparc/kernel/helpers.S
arch/sparc/kernel/perf_event.c
arch/x86/include/asm/local64.h [new file with mode: 0644]
arch/x86/include/asm/perf_event.h
arch/x86/include/asm/stacktrace.h
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_p4.c
arch/x86/kernel/dumpstack.c
arch/x86/kernel/dumpstack.h [deleted file]
arch/x86/kernel/dumpstack_32.c
arch/x86/kernel/dumpstack_64.c
arch/x86/kernel/stacktrace.c
arch/xtensa/include/asm/local64.h [new file with mode: 0644]
fs/exec.c
include/asm-generic/local64.h [new file with mode: 0644]
include/linux/ftrace_event.h
include/linux/kmemtrace.h [deleted file]
include/linux/perf_event.h
include/linux/slab_def.h
include/linux/slub_def.h
include/trace/boot.h [deleted file]
include/trace/ftrace.h
init/main.c
kernel/perf_event.c
kernel/sched.c
kernel/trace/Kconfig
kernel/trace/Makefile
kernel/trace/ftrace.c
kernel/trace/kmemtrace.c [deleted file]
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_boot.c [deleted file]
kernel/trace/trace_clock.c
kernel/trace/trace_entries.h
kernel/trace/trace_event_perf.c
kernel/trace/trace_events.c
kernel/trace/trace_functions.c
kernel/trace/trace_sched_wakeup.c
kernel/trace/trace_stack.c
kernel/trace/trace_sysprof.c
mm/mmap.c
mm/slab.c
mm/slob.c
mm/slub.c
scripts/package/Makefile
tools/perf/.gitignore
tools/perf/Documentation/perf-probe.txt
tools/perf/Documentation/perf-record.txt
tools/perf/Documentation/perf-stat.txt
tools/perf/Documentation/perf-top.txt
tools/perf/MANIFEST [new file with mode: 0644]
tools/perf/Makefile
tools/perf/builtin-annotate.c
tools/perf/builtin-buildid-cache.c
tools/perf/builtin-diff.c
tools/perf/builtin-probe.c
tools/perf/builtin-record.c
tools/perf/builtin-report.c
tools/perf/builtin-stat.c
tools/perf/builtin-top.c
tools/perf/feature-tests.mak [new file with mode: 0644]
tools/perf/perf-archive.sh
tools/perf/perf.c
tools/perf/util/build-id.c
tools/perf/util/cache.h
tools/perf/util/callchain.c
tools/perf/util/callchain.h
tools/perf/util/config.c
tools/perf/util/cpumap.c
tools/perf/util/cpumap.h
tools/perf/util/debug.c
tools/perf/util/event.c
tools/perf/util/event.h
tools/perf/util/header.c
tools/perf/util/hist.c
tools/perf/util/probe-finder.c
tools/perf/util/session.c
tools/perf/util/sort.c
tools/perf/util/sort.h
tools/perf/util/symbol.c
tools/perf/util/symbol.h
tools/perf/util/util.h

diff --git a/Documentation/ABI/testing/debugfs-kmemtrace b/Documentation/ABI/testing/debugfs-kmemtrace
deleted file mode 100644 (file)
index 5e6a92a..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-What:          /sys/kernel/debug/kmemtrace/
-Date:          July 2008
-Contact:       Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
-Description:
-
-In kmemtrace-enabled kernels, the following files are created:
-
-/sys/kernel/debug/kmemtrace/
-       cpu<n>          (0400)  Per-CPU tracing data, see below. (binary)
-       total_overruns  (0400)  Total number of bytes which were dropped from
-                               cpu<n> files because of full buffer condition,
-                               non-binary. (text)
-       abi_version     (0400)  Kernel's kmemtrace ABI version. (text)
-
-Each per-CPU file should be read according to the relay interface. That is,
-the reader should set affinity to that specific CPU and, as currently done by
-the userspace application (though there are other methods), use poll() with
-an infinite timeout before every read(). Otherwise, erroneous data may be
-read. The binary data has the following _core_ format:
-
-       Event ID        (1 byte)        Unsigned integer, one of:
-               0 - represents an allocation (KMEMTRACE_EVENT_ALLOC)
-               1 - represents a freeing of previously allocated memory
-                   (KMEMTRACE_EVENT_FREE)
-       Type ID         (1 byte)        Unsigned integer, one of:
-               0 - this is a kmalloc() / kfree()
-               1 - this is a kmem_cache_alloc() / kmem_cache_free()
-               2 - this is a __get_free_pages() et al.
-       Event size      (2 bytes)       Unsigned integer representing the
-                                       size of this event. Used to extend
-                                       kmemtrace. Discard the bytes you
-                                       don't know about.
-       Sequence number (4 bytes)       Signed integer used to reorder data
-                                       logged on SMP machines. Wraparound
-                                       must be taken into account, although
-                                       it is unlikely.
-       Caller address  (8 bytes)       Return address to the caller.
-       Pointer to mem  (8 bytes)       Pointer to target memory area. Can be
-                                       NULL, but not all such calls might be
-                                       recorded.
-
-In case of KMEMTRACE_EVENT_ALLOC events, the next fields follow:
-
-       Requested bytes (8 bytes)       Total number of requested bytes,
-                                       unsigned, must not be zero.
-       Allocated bytes (8 bytes)       Total number of actually allocated
-                                       bytes, unsigned, must not be lower
-                                       than requested bytes.
-       Requested flags (4 bytes)       GFP flags supplied by the caller.
-       Target CPU      (4 bytes)       Signed integer, valid for event id 1.
-                                       If equal to -1, target CPU is the same
-                                       as origin CPU, but the reverse might
-                                       not be true.
-
-The data is made available in the same endianness the machine has.
-
-Other event ids and type ids may be defined and added. Other fields may be
-added by increasing event size, but see below for details.
-Every modification to the ABI, including new id definitions, are followed
-by bumping the ABI version by one.
-
-Adding new data to the packet (features) is done at the end of the mandatory
-data:
-       Feature size    (2 byte)
-       Feature ID      (1 byte)
-       Feature data    (Feature size - 3 bytes)
-
-
-Users:
-       kmemtrace-user - git://repo.or.cz/kmemtrace-user.git
-
diff --git a/Documentation/trace/kmemtrace.txt b/Documentation/trace/kmemtrace.txt
deleted file mode 100644 (file)
index 6308735..0000000
+++ /dev/null
@@ -1,126 +0,0 @@
-                       kmemtrace - Kernel Memory Tracer
-
-                         by Eduard - Gabriel Munteanu
-                            <eduard.munteanu@linux360.ro>
-
-I. Introduction
-===============
-
-kmemtrace helps kernel developers figure out two things:
-1) how different allocators (SLAB, SLUB etc.) perform
-2) how kernel code allocates memory and how much
-
-To do this, we trace every allocation and export information to the userspace
-through the relay interface. We export things such as the number of requested
-bytes, the number of bytes actually allocated (i.e. including internal
-fragmentation), whether this is a slab allocation or a plain kmalloc() and so
-on.
-
-The actual analysis is performed by a userspace tool (see section III for
-details on where to get it from). It logs the data exported by the kernel,
-processes it and (as of writing this) can provide the following information:
-- the total amount of memory allocated and fragmentation per call-site
-- the amount of memory allocated and fragmentation per allocation
-- total memory allocated and fragmentation in the collected dataset
-- number of cross-CPU allocation and frees (makes sense in NUMA environments)
-
-Moreover, it can potentially find inconsistent and erroneous behavior in
-kernel code, such as using slab free functions on kmalloc'ed memory or
-allocating less memory than requested (but not truly failed allocations).
-
-kmemtrace also makes provisions for tracing on some arch and analysing the
-data on another.
-
-II. Design and goals
-====================
-
-kmemtrace was designed to handle rather large amounts of data. Thus, it uses
-the relay interface to export whatever is logged to userspace, which then
-stores it. Analysis and reporting is done asynchronously, that is, after the
-data is collected and stored. By design, it allows one to log and analyse
-on different machines and different arches.
-
-As of writing this, the ABI is not considered stable, though it might not
-change much. However, no guarantees are made about compatibility yet. When
-deemed stable, the ABI should still allow easy extension while maintaining
-backward compatibility. This is described further in Documentation/ABI.
-
-Summary of design goals:
-       - allow logging and analysis to be done across different machines
-       - be fast and anticipate usage in high-load environments (*)
-       - be reasonably extensible
-       - make it possible for GNU/Linux distributions to have kmemtrace
-       included in their repositories
-
-(*) - one of the reasons Pekka Enberg's original userspace data analysis
-    tool's code was rewritten from Perl to C (although this is more than a
-    simple conversion)
-
-
-III. Quick usage guide
-======================
-
-1) Get a kernel that supports kmemtrace and build it accordingly (i.e. enable
-CONFIG_KMEMTRACE).
-
-2) Get the userspace tool and build it:
-$ git clone git://repo.or.cz/kmemtrace-user.git                # current repository
-$ cd kmemtrace-user/
-$ ./autogen.sh
-$ ./configure
-$ make
-
-3) Boot the kmemtrace-enabled kernel if you haven't, preferably in the
-'single' runlevel (so that relay buffers don't fill up easily), and run
-kmemtrace:
-# '$' does not mean user, but root here.
-$ mount -t debugfs none /sys/kernel/debug
-$ mount -t proc none /proc
-$ cd path/to/kmemtrace-user/
-$ ./kmemtraced
-Wait a bit, then stop it with CTRL+C.
-$ cat /sys/kernel/debug/kmemtrace/total_overruns       # Check if we didn't
-                                                       # overrun, should
-                                                       # be zero.
-$ (Optionally) [Run kmemtrace_check separately on each cpu[0-9]*.out file to
-               check its correctness]
-$ ./kmemtrace-report
-
-Now you should have a nice and short summary of how the allocator performs.
-
-IV. FAQ and known issues
-========================
-
-Q: 'cat /sys/kernel/debug/kmemtrace/total_overruns' is non-zero, how do I fix
-this? Should I worry?
-A: If it's non-zero, this affects kmemtrace's accuracy, depending on how
-large the number is. You can fix it by supplying a higher
-'kmemtrace.subbufs=N' kernel parameter.
----
-
-Q: kmemtrace_check reports errors, how do I fix this? Should I worry?
-A: This is a bug and should be reported. It can occur for a variety of
-reasons:
-       - possible bugs in relay code
-       - possible misuse of relay by kmemtrace
-       - timestamps being collected unorderly
-Or you may fix it yourself and send us a patch.
----
-
-Q: kmemtrace_report shows many errors, how do I fix this? Should I worry?
-A: This is a known issue and I'm working on it. These might be true errors
-in kernel code, which may have inconsistent behavior (e.g. allocating memory
-with kmem_cache_alloc() and freeing it with kfree()). Pekka Enberg pointed
-out this behavior may work with SLAB, but may fail with other allocators.
-
-It may also be due to lack of tracing in some unusual allocator functions.
-
-We don't want bug reports regarding this issue yet.
----
-
-V. See also
-===========
-
-Documentation/kernel-parameters.txt
-Documentation/ABI/testing/debugfs-kmemtrace
-
index 6d119c98b89bfe8bc55da0594243153280f1c005..b7ae5fb9f8bcb29126c66741b4d55b9294f7f32c 100644 (file)
@@ -3368,13 +3368,6 @@ F:       include/linux/kmemleak.h
 F:     mm/kmemleak.c
 F:     mm/kmemleak-test.c
 
-KMEMTRACE
-M:     Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
-S:     Maintained
-F:     Documentation/trace/kmemtrace.txt
-F:     include/linux/kmemtrace.h
-F:     kernel/trace/kmemtrace.c
-
 KPROBES
 M:     Ananth N Mavinakayanahalli <ananth@in.ibm.com>
 M:     Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
index d49d96c35ce5055c2f46ed8a9f3c35feef5c9eb2..865718f4bceade5c87d9cf793f8d402f2bbba519 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -414,7 +414,7 @@ endif
 no-dot-config-targets := clean mrproper distclean \
                         cscope TAGS tags help %docs check% \
                         include/linux/version.h headers_% \
-                        kernelrelease kernelversion
+                        kernelrelease kernelversion %src-pkg
 
 config-targets := 0
 mixed-targets  := 0
@@ -1228,6 +1228,8 @@ distclean: mrproper
 # rpm target kept for backward compatibility
 package-dir    := $(srctree)/scripts/package
 
+%src-pkg: FORCE
+       $(Q)$(MAKE) $(build)=$(package-dir) $@
 %pkg: include/config/kernel.release FORCE
        $(Q)$(MAKE) $(build)=$(package-dir) $@
 rpm: include/config/kernel.release FORCE
diff --git a/arch/alpha/include/asm/local64.h b/arch/alpha/include/asm/local64.h
new file mode 100644 (file)
index 0000000..36c93b5
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/local64.h>
diff --git a/arch/arm/include/asm/local64.h b/arch/arm/include/asm/local64.h
new file mode 100644 (file)
index 0000000..36c93b5
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/local64.h>
index c45768614c8aae8cb794cee41b5da85cc7136a2e..5b7cfafc0720550e7bab62eb9d1ab2e912029fb4 100644 (file)
@@ -164,20 +164,20 @@ armpmu_event_set_period(struct perf_event *event,
                        struct hw_perf_event *hwc,
                        int idx)
 {
-       s64 left = atomic64_read(&hwc->period_left);
+       s64 left = local64_read(&hwc->period_left);
        s64 period = hwc->sample_period;
        int ret = 0;
 
        if (unlikely(left <= -period)) {
                left = period;
-               atomic64_set(&hwc->period_left, left);
+               local64_set(&hwc->period_left, left);
                hwc->last_period = period;
                ret = 1;
        }
 
        if (unlikely(left <= 0)) {
                left += period;
-               atomic64_set(&hwc->period_left, left);
+               local64_set(&hwc->period_left, left);
                hwc->last_period = period;
                ret = 1;
        }
@@ -185,7 +185,7 @@ armpmu_event_set_period(struct perf_event *event,
        if (left > (s64)armpmu->max_period)
                left = armpmu->max_period;
 
-       atomic64_set(&hwc->prev_count, (u64)-left);
+       local64_set(&hwc->prev_count, (u64)-left);
 
        armpmu->write_counter(idx, (u64)(-left) & 0xffffffff);
 
@@ -204,18 +204,18 @@ armpmu_event_update(struct perf_event *event,
        s64 delta;
 
 again:
-       prev_raw_count = atomic64_read(&hwc->prev_count);
+       prev_raw_count = local64_read(&hwc->prev_count);
        new_raw_count = armpmu->read_counter(idx);
 
-       if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
+       if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
                             new_raw_count) != prev_raw_count)
                goto again;
 
        delta = (new_raw_count << shift) - (prev_raw_count << shift);
        delta >>= shift;
 
-       atomic64_add(delta, &event->count);
-       atomic64_sub(delta, &hwc->period_left);
+       local64_add(delta, &event->count);
+       local64_sub(delta, &hwc->period_left);
 
        return new_raw_count;
 }
@@ -478,7 +478,7 @@ __hw_perf_event_init(struct perf_event *event)
        if (!hwc->sample_period) {
                hwc->sample_period  = armpmu->max_period;
                hwc->last_period    = hwc->sample_period;
-               atomic64_set(&hwc->period_left, hwc->sample_period);
+               local64_set(&hwc->period_left, hwc->sample_period);
        }
 
        err = 0;
diff --git a/arch/avr32/include/asm/local64.h b/arch/avr32/include/asm/local64.h
new file mode 100644 (file)
index 0000000..36c93b5
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/local64.h>
diff --git a/arch/blackfin/include/asm/local64.h b/arch/blackfin/include/asm/local64.h
new file mode 100644 (file)
index 0000000..36c93b5
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/local64.h>
diff --git a/arch/cris/include/asm/local64.h b/arch/cris/include/asm/local64.h
new file mode 100644 (file)
index 0000000..36c93b5
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/local64.h>
diff --git a/arch/frv/include/asm/local64.h b/arch/frv/include/asm/local64.h
new file mode 100644 (file)
index 0000000..36c93b5
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/local64.h>
diff --git a/arch/frv/kernel/local64.h b/arch/frv/kernel/local64.h
new file mode 100644 (file)
index 0000000..36c93b5
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/local64.h>
diff --git a/arch/h8300/include/asm/local64.h b/arch/h8300/include/asm/local64.h
new file mode 100644 (file)
index 0000000..36c93b5
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/local64.h>
diff --git a/arch/ia64/include/asm/local64.h b/arch/ia64/include/asm/local64.h
new file mode 100644 (file)
index 0000000..36c93b5
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/local64.h>
diff --git a/arch/m32r/include/asm/local64.h b/arch/m32r/include/asm/local64.h
new file mode 100644 (file)
index 0000000..36c93b5
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/local64.h>
diff --git a/arch/m68k/include/asm/local64.h b/arch/m68k/include/asm/local64.h
new file mode 100644 (file)
index 0000000..36c93b5
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/local64.h>
diff --git a/arch/microblaze/include/asm/local64.h b/arch/microblaze/include/asm/local64.h
new file mode 100644 (file)
index 0000000..36c93b5
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/local64.h>
diff --git a/arch/mips/include/asm/local64.h b/arch/mips/include/asm/local64.h
new file mode 100644 (file)
index 0000000..36c93b5
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/local64.h>
diff --git a/arch/mn10300/include/asm/local64.h b/arch/mn10300/include/asm/local64.h
new file mode 100644 (file)
index 0000000..36c93b5
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/local64.h>
diff --git a/arch/parisc/include/asm/local64.h b/arch/parisc/include/asm/local64.h
new file mode 100644 (file)
index 0000000..36c93b5
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/local64.h>
diff --git a/arch/powerpc/include/asm/local64.h b/arch/powerpc/include/asm/local64.h
new file mode 100644 (file)
index 0000000..36c93b5
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/local64.h>
index e6d4ce69b126eef07a463fc144ef2020ed53ad05..5c16b891d501f8f3943a10311d1020da608843d1 100644 (file)
 #ifdef CONFIG_FSL_EMB_PERF_EVENT
 #include <asm/perf_event_fsl_emb.h>
 #endif
+
+#ifdef CONFIG_PERF_EVENTS
+#include <asm/ptrace.h>
+#include <asm/reg.h>
+
+#define perf_arch_fetch_caller_regs(regs, __ip)                        \
+       do {                                                    \
+               (regs)->nip = __ip;                             \
+               (regs)->gpr[1] = *(unsigned long *)__get_SP();  \
+               asm volatile("mfmsr %0" : "=r" ((regs)->msr));  \
+       } while (0)
+#endif
index 22e507c8a5566d1f4dfcfe9db67482e3c0e83e35..2d29752cbe169ea9398fbd1381cd9eff64ec28c8 100644 (file)
@@ -127,29 +127,3 @@ _GLOBAL(__setup_cpu_power7)
 _GLOBAL(__restore_cpu_power7)
        /* place holder */
        blr
-
-/*
- * Get a minimal set of registers for our caller's nth caller.
- * r3 = regs pointer, r5 = n.
- *
- * We only get R1 (stack pointer), NIP (next instruction pointer)
- * and LR (link register).  These are all we can get in the
- * general case without doing complicated stack unwinding, but
- * fortunately they are enough to do a stack backtrace, which
- * is all we need them for.
- */
-_GLOBAL(perf_arch_fetch_caller_regs)
-       mr      r6,r1
-       cmpwi   r5,0
-       mflr    r4
-       ble     2f
-       mtctr   r5
-1:     PPC_LL  r6,0(r6)
-       bdnz    1b
-       PPC_LL  r4,PPC_LR_STKOFF(r6)
-2:     PPC_LL  r7,0(r6)
-       PPC_LL  r7,PPC_LR_STKOFF(r7)
-       PPC_STL r6,GPR1-STACK_FRAME_OVERHEAD(r3)
-       PPC_STL r4,_NIP-STACK_FRAME_OVERHEAD(r3)
-       PPC_STL r7,_LINK-STACK_FRAME_OVERHEAD(r3)
-       blr
index 43b83c35cf54b0dfb07bc5abef8ee558d5f3cd9c..af1d9a7c65d1f769689cf84330dfbd5f19e8e6ee 100644 (file)
@@ -410,15 +410,15 @@ static void power_pmu_read(struct perf_event *event)
         * Therefore we treat them like NMIs.
         */
        do {
-               prev = atomic64_read(&event->hw.prev_count);
+               prev = local64_read(&event->hw.prev_count);
                barrier();
                val = read_pmc(event->hw.idx);
-       } while (atomic64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
+       } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
 
        /* The counters are only 32 bits wide */
        delta = (val - prev) & 0xfffffffful;
-       atomic64_add(delta, &event->count);
-       atomic64_sub(delta, &event->hw.period_left);
+       local64_add(delta, &event->count);
+       local64_sub(delta, &event->hw.period_left);
 }
 
 /*
@@ -444,10 +444,10 @@ static void freeze_limited_counters(struct cpu_hw_events *cpuhw,
                if (!event->hw.idx)
                        continue;
                val = (event->hw.idx == 5) ? pmc5 : pmc6;
-               prev = atomic64_read(&event->hw.prev_count);
+               prev = local64_read(&event->hw.prev_count);
                event->hw.idx = 0;
                delta = (val - prev) & 0xfffffffful;
-               atomic64_add(delta, &event->count);
+               local64_add(delta, &event->count);
        }
 }
 
@@ -462,7 +462,7 @@ static void thaw_limited_counters(struct cpu_hw_events *cpuhw,
                event = cpuhw->limited_counter[i];
                event->hw.idx = cpuhw->limited_hwidx[i];
                val = (event->hw.idx == 5) ? pmc5 : pmc6;
-               atomic64_set(&event->hw.prev_count, val);
+               local64_set(&event->hw.prev_count, val);
                perf_event_update_userpage(event);
        }
 }
@@ -666,11 +666,11 @@ void hw_perf_enable(void)
                }
                val = 0;
                if (event->hw.sample_period) {
-                       left = atomic64_read(&event->hw.period_left);
+                       left = local64_read(&event->hw.period_left);
                        if (left < 0x80000000L)
                                val = 0x80000000L - left;
                }
-               atomic64_set(&event->hw.prev_count, val);
+               local64_set(&event->hw.prev_count, val);
                event->hw.idx = idx;
                write_pmc(idx, val);
                perf_event_update_userpage(event);
@@ -754,7 +754,7 @@ static int power_pmu_enable(struct perf_event *event)
         * skip the schedulability test here, it will be peformed
         * at commit time(->commit_txn) as a whole
         */
-       if (cpuhw->group_flag & PERF_EVENT_TXN_STARTED)
+       if (cpuhw->group_flag & PERF_EVENT_TXN)
                goto nocheck;
 
        if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1))
@@ -842,8 +842,8 @@ static void power_pmu_unthrottle(struct perf_event *event)
        if (left < 0x80000000L)
                val = 0x80000000L - left;
        write_pmc(event->hw.idx, val);
-       atomic64_set(&event->hw.prev_count, val);
-       atomic64_set(&event->hw.period_left, left);
+       local64_set(&event->hw.prev_count, val);
+       local64_set(&event->hw.period_left, left);
        perf_event_update_userpage(event);
        perf_enable();
        local_irq_restore(flags);
@@ -858,7 +858,7 @@ void power_pmu_start_txn(const struct pmu *pmu)
 {
        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
 
-       cpuhw->group_flag |= PERF_EVENT_TXN_STARTED;
+       cpuhw->group_flag |= PERF_EVENT_TXN;
        cpuhw->n_txn_start = cpuhw->n_events;
 }
 
@@ -871,7 +871,7 @@ void power_pmu_cancel_txn(const struct pmu *pmu)
 {
        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
 
-       cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED;
+       cpuhw->group_flag &= ~PERF_EVENT_TXN;
 }
 
 /*
@@ -897,6 +897,7 @@ int power_pmu_commit_txn(const struct pmu *pmu)
        for (i = cpuhw->n_txn_start; i < n; ++i)
                cpuhw->event[i]->hw.config = cpuhw->events[i];
 
+       cpuhw->group_flag &= ~PERF_EVENT_TXN;
        return 0;
 }
 
@@ -1108,7 +1109,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
        event->hw.config = events[n];
        event->hw.event_base = cflags[n];
        event->hw.last_period = event->hw.sample_period;
-       atomic64_set(&event->hw.period_left, event->hw.last_period);
+       local64_set(&event->hw.period_left, event->hw.last_period);
 
        /*
         * See if we need to reserve the PMU.
@@ -1146,16 +1147,16 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
        int record = 0;
 
        /* we don't have to worry about interrupts here */
-       prev = atomic64_read(&event->hw.prev_count);
+       prev = local64_read(&event->hw.prev_count);
        delta = (val - prev) & 0xfffffffful;
-       atomic64_add(delta, &event->count);
+       local64_add(delta, &event->count);
 
        /*
         * See if the total period for this event has expired,
         * and update for the next period.
         */
        val = 0;
-       left = atomic64_read(&event->hw.period_left) - delta;
+       left = local64_read(&event->hw.period_left) - delta;
        if (period) {
                if (left <= 0) {
                        left += period;
@@ -1193,8 +1194,8 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
        }
 
        write_pmc(event->hw.idx, val);
-       atomic64_set(&event->hw.prev_count, val);
-       atomic64_set(&event->hw.period_left, left);
+       local64_set(&event->hw.prev_count, val);
+       local64_set(&event->hw.period_left, left);
        perf_event_update_userpage(event);
 }
 
diff --git a/arch/s390/include/asm/local64.h b/arch/s390/include/asm/local64.h
new file mode 100644 (file)
index 0000000..36c93b5
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/local64.h>
diff --git a/arch/score/include/asm/local64.h b/arch/score/include/asm/local64.h
new file mode 100644 (file)
index 0000000..36c93b5
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/local64.h>
diff --git a/arch/sh/include/asm/local64.h b/arch/sh/include/asm/local64.h
new file mode 100644 (file)
index 0000000..36c93b5
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/local64.h>
index 81b6de41ae5d1c3bfb87c340acee5291a499a036..7a3dc356725839f2cf8579491efd8d02ba11b483 100644 (file)
@@ -185,10 +185,10 @@ static void sh_perf_event_update(struct perf_event *event,
         * this is the simplest approach for maintaining consistency.
         */
 again:
-       prev_raw_count = atomic64_read(&hwc->prev_count);
+       prev_raw_count = local64_read(&hwc->prev_count);
        new_raw_count = sh_pmu->read(idx);
 
-       if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
+       if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
                             new_raw_count) != prev_raw_count)
                goto again;
 
@@ -203,7 +203,7 @@ again:
        delta = (new_raw_count << shift) - (prev_raw_count << shift);
        delta >>= shift;
 
-       atomic64_add(delta, &event->count);
+       local64_add(delta, &event->count);
 }
 
 static void sh_pmu_disable(struct perf_event *event)
diff --git a/arch/sparc/include/asm/local64.h b/arch/sparc/include/asm/local64.h
new file mode 100644 (file)
index 0000000..36c93b5
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/local64.h>
index 7e2669894ce8346152ba1910e4122f954dd7a1a2..74c4e0cd889c700a0a901937bd716366fb882c10 100644 (file)
@@ -6,7 +6,15 @@ extern void set_perf_event_pending(void);
 #define        PERF_EVENT_INDEX_OFFSET 0
 
 #ifdef CONFIG_PERF_EVENTS
+#include <asm/ptrace.h>
+
 extern void init_hw_perf_events(void);
+
+extern void
+__perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip);
+
+#define perf_arch_fetch_caller_regs(pt_regs, ip)       \
+       __perf_arch_fetch_caller_regs(pt_regs, ip, 1);
 #else
 static inline void init_hw_perf_events(void)   { }
 #endif
index 92090cc9e82937a2daf6730dca89668ce9e04869..682fee06a16b044e28275935f45fba73accee3a5 100644 (file)
@@ -47,9 +47,9 @@ stack_trace_flush:
        .size           stack_trace_flush,.-stack_trace_flush
 
 #ifdef CONFIG_PERF_EVENTS
-       .globl          perf_arch_fetch_caller_regs
-       .type           perf_arch_fetch_caller_regs,#function
-perf_arch_fetch_caller_regs:
+       .globl          __perf_arch_fetch_caller_regs
+       .type           __perf_arch_fetch_caller_regs,#function
+__perf_arch_fetch_caller_regs:
        /* We always read the %pstate into %o5 since we will use
         * that to construct a fake %tstate to store into the regs.
         */
index 0ec92c8861dd3a68fe0465b71ccf8007ad6e40a7..8a6660da8e080486b86d5e3056f36d5e65210160 100644 (file)
@@ -572,18 +572,18 @@ static u64 sparc_perf_event_update(struct perf_event *event,
        s64 delta;
 
 again:
-       prev_raw_count = atomic64_read(&hwc->prev_count);
+       prev_raw_count = local64_read(&hwc->prev_count);
        new_raw_count = read_pmc(idx);
 
-       if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
+       if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
                             new_raw_count) != prev_raw_count)
                goto again;
 
        delta = (new_raw_count << shift) - (prev_raw_count << shift);
        delta >>= shift;
 
-       atomic64_add(delta, &event->count);
-       atomic64_sub(delta, &hwc->period_left);
+       local64_add(delta, &event->count);
+       local64_sub(delta, &hwc->period_left);
 
        return new_raw_count;
 }
@@ -591,27 +591,27 @@ again:
 static int sparc_perf_event_set_period(struct perf_event *event,
                                       struct hw_perf_event *hwc, int idx)
 {
-       s64 left = atomic64_read(&hwc->period_left);
+       s64 left = local64_read(&hwc->period_left);
        s64 period = hwc->sample_period;
        int ret = 0;
 
        if (unlikely(left <= -period)) {
                left = period;
-               atomic64_set(&hwc->period_left, left);
+               local64_set(&hwc->period_left, left);
                hwc->last_period = period;
                ret = 1;
        }
 
        if (unlikely(left <= 0)) {
                left += period;
-               atomic64_set(&hwc->period_left, left);
+               local64_set(&hwc->period_left, left);
                hwc->last_period = period;
                ret = 1;
        }
        if (left > MAX_PERIOD)
                left = MAX_PERIOD;
 
-       atomic64_set(&hwc->prev_count, (u64)-left);
+       local64_set(&hwc->prev_count, (u64)-left);
 
        write_pmc(idx, (u64)(-left) & 0xffffffff);
 
@@ -1005,7 +1005,7 @@ static int sparc_pmu_enable(struct perf_event *event)
         * skip the schedulability test here, it will be peformed
         * at commit time(->commit_txn) as a whole
         */
-       if (cpuc->group_flag & PERF_EVENT_TXN_STARTED)
+       if (cpuc->group_flag & PERF_EVENT_TXN)
                goto nocheck;
 
        if (check_excludes(cpuc->event, n0, 1))
@@ -1087,7 +1087,7 @@ static int __hw_perf_event_init(struct perf_event *event)
        if (!hwc->sample_period) {
                hwc->sample_period = MAX_PERIOD;
                hwc->last_period = hwc->sample_period;
-               atomic64_set(&hwc->period_left, hwc->sample_period);
+               local64_set(&hwc->period_left, hwc->sample_period);
        }
 
        return 0;
@@ -1102,7 +1102,7 @@ static void sparc_pmu_start_txn(const struct pmu *pmu)
 {
        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
 
-       cpuhw->group_flag |= PERF_EVENT_TXN_STARTED;
+       cpuhw->group_flag |= PERF_EVENT_TXN;
 }
 
 /*
@@ -1114,7 +1114,7 @@ static void sparc_pmu_cancel_txn(const struct pmu *pmu)
 {
        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
 
-       cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED;
+       cpuhw->group_flag &= ~PERF_EVENT_TXN;
 }
 
 /*
@@ -1137,6 +1137,7 @@ static int sparc_pmu_commit_txn(const struct pmu *pmu)
        if (sparc_check_constraints(cpuc->event, cpuc->events, n))
                return -EAGAIN;
 
+       cpuc->group_flag &= ~PERF_EVENT_TXN;
        return 0;
 }
 
diff --git a/arch/x86/include/asm/local64.h b/arch/x86/include/asm/local64.h
new file mode 100644 (file)
index 0000000..36c93b5
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/local64.h>
index 254883d0c7e088424ed983154613b4ad889435e1..6e742cc4251b49b2474830107da0378951c874be 100644 (file)
@@ -68,8 +68,9 @@ union cpuid10_eax {
 
 union cpuid10_edx {
        struct {
-               unsigned int num_counters_fixed:4;
-               unsigned int reserved:28;
+               unsigned int num_counters_fixed:5;
+               unsigned int bit_width_fixed:8;
+               unsigned int reserved:19;
        } split;
        unsigned int full;
 };
@@ -140,6 +141,19 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
 extern unsigned long perf_misc_flags(struct pt_regs *regs);
 #define perf_misc_flags(regs)  perf_misc_flags(regs)
 
+#include <asm/stacktrace.h>
+
+/*
+ * We abuse bit 3 from flags to pass exact information, see perf_misc_flags
+ * and the comment with PERF_EFLAGS_EXACT.
+ */
+#define perf_arch_fetch_caller_regs(regs, __ip)                {       \
+       (regs)->ip = (__ip);                                    \
+       (regs)->bp = caller_frame_pointer();                    \
+       (regs)->cs = __KERNEL_CS;                               \
+       regs->flags = 0;                                        \
+}
+
 #else
 static inline void init_hw_perf_events(void)           { }
 static inline void perf_events_lapic_init(void)        { }
index 4dab78edbad9ff7315a8c30aebeb734f4e87da1e..2b16a2ad23dc6b9647028c0808f8f45b094e74ac 100644 (file)
@@ -1,6 +1,13 @@
+/*
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
+ */
+
 #ifndef _ASM_X86_STACKTRACE_H
 #define _ASM_X86_STACKTRACE_H
 
+#include <linux/uaccess.h>
+
 extern int kstack_depth_to_print;
 
 struct thread_info;
@@ -42,4 +49,46 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
                unsigned long *stack, unsigned long bp,
                const struct stacktrace_ops *ops, void *data);
 
+#ifdef CONFIG_X86_32
+#define STACKSLOTS_PER_LINE 8
+#define get_bp(bp) asm("movl %%ebp, %0" : "=r" (bp) :)
+#else
+#define STACKSLOTS_PER_LINE 4
+#define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :)
+#endif
+
+extern void
+show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
+               unsigned long *stack, unsigned long bp, char *log_lvl);
+
+extern void
+show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
+               unsigned long *sp, unsigned long bp, char *log_lvl);
+
+extern unsigned int code_bytes;
+
+/* The form of the top of the frame on the stack */
+struct stack_frame {
+       struct stack_frame *next_frame;
+       unsigned long return_address;
+};
+
+struct stack_frame_ia32 {
+    u32 next_frame;
+    u32 return_address;
+};
+
+static inline unsigned long caller_frame_pointer(void)
+{
+       struct stack_frame *frame;
+
+       get_bp(frame);
+
+#ifdef CONFIG_FRAME_POINTER
+       frame = frame->next_frame;
+#endif
+
+       return (unsigned long)frame;
+}
+
 #endif /* _ASM_X86_STACKTRACE_H */
index 5db5b7d65a180f6a7f0c2cb970d63e04129add2a..f2da20fda02ddf6fcd449a88ba399fe4ed44af2a 100644 (file)
@@ -220,6 +220,7 @@ struct x86_pmu {
                                                 struct perf_event *event);
        struct event_constraint *event_constraints;
        void            (*quirks)(void);
+       int             perfctr_second_write;
 
        int             (*cpu_prepare)(int cpu);
        void            (*cpu_starting)(int cpu);
@@ -295,10 +296,10 @@ x86_perf_event_update(struct perf_event *event)
         * count to the generic event atomically:
         */
 again:
-       prev_raw_count = atomic64_read(&hwc->prev_count);
+       prev_raw_count = local64_read(&hwc->prev_count);
        rdmsrl(hwc->event_base + idx, new_raw_count);
 
-       if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
+       if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
                                        new_raw_count) != prev_raw_count)
                goto again;
 
@@ -313,8 +314,8 @@ again:
        delta = (new_raw_count << shift) - (prev_raw_count << shift);
        delta >>= shift;
 
-       atomic64_add(delta, &event->count);
-       atomic64_sub(delta, &hwc->period_left);
+       local64_add(delta, &event->count);
+       local64_sub(delta, &hwc->period_left);
 
        return new_raw_count;
 }
@@ -438,7 +439,7 @@ static int x86_setup_perfctr(struct perf_event *event)
        if (!hwc->sample_period) {
                hwc->sample_period = x86_pmu.max_period;
                hwc->last_period = hwc->sample_period;
-               atomic64_set(&hwc->period_left, hwc->sample_period);
+               local64_set(&hwc->period_left, hwc->sample_period);
        } else {
                /*
                 * If we have a PMU initialized but no APIC
@@ -885,7 +886,7 @@ static int
 x86_perf_event_set_period(struct perf_event *event)
 {
        struct hw_perf_event *hwc = &event->hw;
-       s64 left = atomic64_read(&hwc->period_left);
+       s64 left = local64_read(&hwc->period_left);
        s64 period = hwc->sample_period;
        int ret = 0, idx = hwc->idx;
 
@@ -897,14 +898,14 @@ x86_perf_event_set_period(struct perf_event *event)
         */
        if (unlikely(left <= -period)) {
                left = period;
-               atomic64_set(&hwc->period_left, left);
+               local64_set(&hwc->period_left, left);
                hwc->last_period = period;
                ret = 1;
        }
 
        if (unlikely(left <= 0)) {
                left += period;
-               atomic64_set(&hwc->period_left, left);
+               local64_set(&hwc->period_left, left);
                hwc->last_period = period;
                ret = 1;
        }
@@ -923,10 +924,19 @@ x86_perf_event_set_period(struct perf_event *event)
         * The hw event starts counting from this event offset,
         * mark it to be able to extra future deltas:
         */
-       atomic64_set(&hwc->prev_count, (u64)-left);
+       local64_set(&hwc->prev_count, (u64)-left);
 
-       wrmsrl(hwc->event_base + idx,
+       wrmsrl(hwc->event_base + idx, (u64)(-left) & x86_pmu.cntval_mask);
+
+       /*
+        * Due to erratum on certan cpu we need
+        * a second write to be sure the register
+        * is updated properly
+        */
+       if (x86_pmu.perfctr_second_write) {
+               wrmsrl(hwc->event_base + idx,
                        (u64)(-left) & x86_pmu.cntval_mask);
+       }
 
        perf_event_update_userpage(event);
 
@@ -969,7 +979,7 @@ static int x86_pmu_enable(struct perf_event *event)
         * skip the schedulability test here, it will be peformed
         * at commit time(->commit_txn) as a whole
         */
-       if (cpuc->group_flag & PERF_EVENT_TXN_STARTED)
+       if (cpuc->group_flag & PERF_EVENT_TXN)
                goto out;
 
        ret = x86_pmu.schedule_events(cpuc, n, assign);
@@ -1096,7 +1106,7 @@ static void x86_pmu_disable(struct perf_event *event)
         * The events never got scheduled and ->cancel_txn will truncate
         * the event_list.
         */
-       if (cpuc->group_flag & PERF_EVENT_TXN_STARTED)
+       if (cpuc->group_flag & PERF_EVENT_TXN)
                return;
 
        x86_pmu_stop(event);
@@ -1388,7 +1398,7 @@ static void x86_pmu_start_txn(const struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
-       cpuc->group_flag |= PERF_EVENT_TXN_STARTED;
+       cpuc->group_flag |= PERF_EVENT_TXN;
        cpuc->n_txn = 0;
 }
 
@@ -1401,7 +1411,7 @@ static void x86_pmu_cancel_txn(const struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
-       cpuc->group_flag &= ~PERF_EVENT_TXN_STARTED;
+       cpuc->group_flag &= ~PERF_EVENT_TXN;
        /*
         * Truncate the collected events.
         */
@@ -1435,11 +1445,7 @@ static int x86_pmu_commit_txn(const struct pmu *pmu)
         */
        memcpy(cpuc->assign, assign, n*sizeof(int));
 
-       /*
-        * Clear out the txn count so that ->cancel_txn() which gets
-        * run after ->commit_txn() doesn't undo things.
-        */
-       cpuc->n_txn = 0;
+       cpuc->group_flag &= ~PERF_EVENT_TXN;
 
        return 0;
 }
@@ -1607,8 +1613,6 @@ static const struct stacktrace_ops backtrace_ops = {
        .walk_stack             = print_context_stack_bp,
 };
 
-#include "../dumpstack.h"
-
 static void
 perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
 {
@@ -1730,22 +1734,6 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
        return entry;
 }
 
-void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
-{
-       regs->ip = ip;
-       /*
-        * perf_arch_fetch_caller_regs adds another call, we need to increment
-        * the skip level
-        */
-       regs->bp = rewind_frame_pointer(skip + 1);
-       regs->cs = __KERNEL_CS;
-       /*
-        * We abuse bit 3 to pass exact information, see perf_misc_flags
-        * and the comment with PERF_EFLAGS_EXACT.
-        */
-       regs->flags = 0;
-}
-
 unsigned long perf_instruction_pointer(struct pt_regs *regs)
 {
        unsigned long ip;
index ae85d69644d182f31c9711eba6c5ddb21862bcec..9286e736a70af948d43ed9756e65fabbba2dc991 100644 (file)
@@ -829,6 +829,15 @@ static __initconst const struct x86_pmu p4_pmu = {
        .max_period             = (1ULL << 39) - 1,
        .hw_config              = p4_hw_config,
        .schedule_events        = p4_pmu_schedule_events,
+       /*
+        * This handles erratum N15 in intel doc 249199-029,
+        * the counter may not be updated correctly on write
+        * so we need a second write operation to do the trick
+        * (the official workaround didn't work)
+        *
+        * the former idea is taken from OProfile code
+        */
+       .perfctr_second_write   = 1,
 };
 
 static __init int p4_pmu_init(void)
index c89a386930b7f4d9bc9c74dc0fa0151056860493..6e8752c1bd5241fc9e7e63ee088f06c84d0526fb 100644 (file)
@@ -18,7 +18,6 @@
 
 #include <asm/stacktrace.h>
 
-#include "dumpstack.h"
 
 int panic_on_unrecovered_nmi;
 int panic_on_io_nmi;
diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h
deleted file mode 100644 (file)
index e1a93be..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- *  Copyright (C) 1991, 1992  Linus Torvalds
- *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
- */
-
-#ifndef DUMPSTACK_H
-#define DUMPSTACK_H
-
-#ifdef CONFIG_X86_32
-#define STACKSLOTS_PER_LINE 8
-#define get_bp(bp) asm("movl %%ebp, %0" : "=r" (bp) :)
-#else
-#define STACKSLOTS_PER_LINE 4
-#define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :)
-#endif
-
-#include <linux/uaccess.h>
-
-extern void
-show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
-               unsigned long *stack, unsigned long bp, char *log_lvl);
-
-extern void
-show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
-               unsigned long *sp, unsigned long bp, char *log_lvl);
-
-extern unsigned int code_bytes;
-
-/* The form of the top of the frame on the stack */
-struct stack_frame {
-       struct stack_frame *next_frame;
-       unsigned long return_address;
-};
-
-struct stack_frame_ia32 {
-    u32 next_frame;
-    u32 return_address;
-};
-
-static inline unsigned long rewind_frame_pointer(int n)
-{
-       struct stack_frame *frame;
-
-       get_bp(frame);
-
-#ifdef CONFIG_FRAME_POINTER
-       while (n--) {
-               if (probe_kernel_address(&frame->next_frame, frame))
-                       break;
-       }
-#endif
-
-       return (unsigned long)frame;
-}
-
-#endif /* DUMPSTACK_H */
index 11540a189d9311e6a0fb4c44e685fc82799fdd0e..0f6376ffa2d9b6da338a6145c38f8b3307fa6e36 100644 (file)
@@ -16,8 +16,6 @@
 
 #include <asm/stacktrace.h>
 
-#include "dumpstack.h"
-
 
 void dump_trace(struct task_struct *task, struct pt_regs *regs,
                unsigned long *stack, unsigned long bp,
index 272c9f1f05f31bf20492dfb6be35bed716be26b1..57a21f11c791b38a2b88559349cd041935e262da 100644 (file)
@@ -16,7 +16,6 @@
 
 #include <asm/stacktrace.h>
 
-#include "dumpstack.h"
 
 #define N_EXCEPTION_STACKS_END \
                (N_EXCEPTION_STACKS + DEBUG_STKSZ/EXCEPTION_STKSZ - 2)
index 922eefbb3f6c72b7b511791b083b916040b9f95e..b53c525368a75cf07489b0327de138bfab5b16d5 100644 (file)
@@ -23,11 +23,16 @@ static int save_stack_stack(void *data, char *name)
        return 0;
 }
 
-static void save_stack_address(void *data, unsigned long addr, int reliable)
+static void
+__save_stack_address(void *data, unsigned long addr, bool reliable, bool nosched)
 {
        struct stack_trace *trace = data;
+#ifdef CONFIG_FRAME_POINTER
        if (!reliable)
                return;
+#endif
+       if (nosched && in_sched_functions(addr))
+               return;
        if (trace->skip > 0) {
                trace->skip--;
                return;
@@ -36,20 +41,15 @@ static void save_stack_address(void *data, unsigned long addr, int reliable)
                trace->entries[trace->nr_entries++] = addr;
 }
 
+static void save_stack_address(void *data, unsigned long addr, int reliable)
+{
+       return __save_stack_address(data, addr, reliable, false);
+}
+
 static void
 save_stack_address_nosched(void *data, unsigned long addr, int reliable)
 {
-       struct stack_trace *trace = (struct stack_trace *)data;
-       if (!reliable)
-               return;
-       if (in_sched_functions(addr))
-               return;
-       if (trace->skip > 0) {
-               trace->skip--;
-               return;
-       }
-       if (trace->nr_entries < trace->max_entries)
-               trace->entries[trace->nr_entries++] = addr;
+       return __save_stack_address(data, addr, reliable, true);
 }
 
 static const struct stacktrace_ops save_stack_ops = {
@@ -96,12 +96,13 @@ EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
 
 /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
 
-struct stack_frame {
+struct stack_frame_user {
        const void __user       *next_fp;
        unsigned long           ret_addr;
 };
 
-static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
+static int
+copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
 {
        int ret;
 
@@ -126,7 +127,7 @@ static inline void __save_stack_trace_user(struct stack_trace *trace)
                trace->entries[trace->nr_entries++] = regs->ip;
 
        while (trace->nr_entries < trace->max_entries) {
-               struct stack_frame frame;
+               struct stack_frame_user frame;
 
                frame.next_fp = NULL;
                frame.ret_addr = 0;
diff --git a/arch/xtensa/include/asm/local64.h b/arch/xtensa/include/asm/local64.h
new file mode 100644 (file)
index 0000000..36c93b5
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/local64.h>
index e19de6a80339b3ceeae267283962f271ac6c5e55..97d91a03fb1339c03f2b78c42a0010d0266b1fbd 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -653,6 +653,7 @@ int setup_arg_pages(struct linux_binprm *bprm,
        else
                stack_base = vma->vm_start - stack_expand;
 #endif
+       current->mm->start_stack = bprm->p;
        ret = expand_stack(vma, stack_base);
        if (ret)
                ret = -EFAULT;
diff --git a/include/asm-generic/local64.h b/include/asm-generic/local64.h
new file mode 100644 (file)
index 0000000..02ac760
--- /dev/null
@@ -0,0 +1,96 @@
+#ifndef _ASM_GENERIC_LOCAL64_H
+#define _ASM_GENERIC_LOCAL64_H
+
+#include <linux/percpu.h>
+#include <asm/types.h>
+
+/*
+ * A signed long type for operations which are atomic for a single CPU.
+ * Usually used in combination with per-cpu variables.
+ *
+ * This is the default implementation, which uses atomic64_t.  Which is
+ * rather pointless.  The whole point behind local64_t is that some processors
+ * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs
+ * running on this CPU.  local64_t allows exploitation of such capabilities.
+ */
+
+/* Implement in terms of atomics. */
+
+#if BITS_PER_LONG == 64
+
+#include <asm/local.h>
+
+typedef struct {
+       local_t a;
+} local64_t;
+
+#define LOCAL64_INIT(i)        { LOCAL_INIT(i) }
+
+#define local64_read(l)                local_read(&(l)->a)
+#define local64_set(l,i)       local_set((&(l)->a),(i))
+#define local64_inc(l)         local_inc(&(l)->a)
+#define local64_dec(l)         local_dec(&(l)->a)
+#define local64_add(i,l)       local_add((i),(&(l)->a))
+#define local64_sub(i,l)       local_sub((i),(&(l)->a))
+
+#define local64_sub_and_test(i, l) local_sub_and_test((i), (&(l)->a))
+#define local64_dec_and_test(l) local_dec_and_test(&(l)->a)
+#define local64_inc_and_test(l) local_inc_and_test(&(l)->a)
+#define local64_add_negative(i, l) local_add_negative((i), (&(l)->a))
+#define local64_add_return(i, l) local_add_return((i), (&(l)->a))
+#define local64_sub_return(i, l) local_sub_return((i), (&(l)->a))
+#define local64_inc_return(l)  local_inc_return(&(l)->a)
+
+#define local64_cmpxchg(l, o, n) local_cmpxchg((&(l)->a), (o), (n))
+#define local64_xchg(l, n)     local_xchg((&(l)->a), (n))
+#define local64_add_unless(l, _a, u) local_add_unless((&(l)->a), (_a), (u))
+#define local64_inc_not_zero(l)        local_inc_not_zero(&(l)->a)
+
+/* Non-atomic variants, ie. preemption disabled and won't be touched
+ * in interrupt, etc.  Some archs can optimize this case well. */
+#define __local64_inc(l)       local64_set((l), local64_read(l) + 1)
+#define __local64_dec(l)       local64_set((l), local64_read(l) - 1)
+#define __local64_add(i,l)     local64_set((l), local64_read(l) + (i))
+#define __local64_sub(i,l)     local64_set((l), local64_read(l) - (i))
+
+#else /* BITS_PER_LONG != 64 */
+
+#include <asm/atomic.h>
+
+/* Don't use typedef: don't want them to be mixed with atomic_t's. */
+typedef struct {
+       atomic64_t a;
+} local64_t;
+
+#define LOCAL64_INIT(i)        { ATOMIC_LONG_INIT(i) }
+
+#define local64_read(l)                atomic64_read(&(l)->a)
+#define local64_set(l,i)       atomic64_set((&(l)->a),(i))
+#define local64_inc(l)         atomic64_inc(&(l)->a)
+#define local64_dec(l)         atomic64_dec(&(l)->a)
+#define local64_add(i,l)       atomic64_add((i),(&(l)->a))
+#define local64_sub(i,l)       atomic64_sub((i),(&(l)->a))
+
+#define local64_sub_and_test(i, l) atomic64_sub_and_test((i), (&(l)->a))
+#define local64_dec_and_test(l) atomic64_dec_and_test(&(l)->a)
+#define local64_inc_and_test(l) atomic64_inc_and_test(&(l)->a)
+#define local64_add_negative(i, l) atomic64_add_negative((i), (&(l)->a))
+#define local64_add_return(i, l) atomic64_add_return((i), (&(l)->a))
+#define local64_sub_return(i, l) atomic64_sub_return((i), (&(l)->a))
+#define local64_inc_return(l)  atomic64_inc_return(&(l)->a)
+
+#define local64_cmpxchg(l, o, n) atomic64_cmpxchg((&(l)->a), (o), (n))
+#define local64_xchg(l, n)     atomic64_xchg((&(l)->a), (n))
+#define local64_add_unless(l, _a, u) atomic64_add_unless((&(l)->a), (_a), (u))
+#define local64_inc_not_zero(l)        atomic64_inc_not_zero(&(l)->a)
+
+/* Non-atomic variants, ie. preemption disabled and won't be touched
+ * in interrupt, etc.  Some archs can optimize this case well. */
+#define __local64_inc(l)       local64_set((l), local64_read(l) + 1)
+#define __local64_dec(l)       local64_set((l), local64_read(l) - 1)
+#define __local64_add(i,l)     local64_set((l), local64_read(l) + (i))
+#define __local64_sub(i,l)     local64_set((l), local64_read(l) - (i))
+
+#endif /* BITS_PER_LONG != 64 */
+
+#endif /* _ASM_GENERIC_LOCAL64_H */
index 3167f2df4126c12e195be536c687c577212da651..0af31cd335d6b412a1aa4baef6faf42e19ea3763 100644 (file)
@@ -257,8 +257,7 @@ static inline void
 perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
                       u64 count, struct pt_regs *regs, void *head)
 {
-       perf_tp_event(addr, count, raw_data, size, regs, head);
-       perf_swevent_put_recursion_context(rctx);
+       perf_tp_event(addr, count, raw_data, size, regs, head, rctx);
 }
 #endif
 
diff --git a/include/linux/kmemtrace.h b/include/linux/kmemtrace.h
deleted file mode 100644 (file)
index b616d39..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2008 Eduard - Gabriel Munteanu
- *
- * This file is released under GPL version 2.
- */
-
-#ifndef _LINUX_KMEMTRACE_H
-#define _LINUX_KMEMTRACE_H
-
-#ifdef __KERNEL__
-
-#include <trace/events/kmem.h>
-
-#ifdef CONFIG_KMEMTRACE
-extern void kmemtrace_init(void);
-#else
-static inline void kmemtrace_init(void)
-{
-}
-#endif
-
-#endif /* __KERNEL__ */
-
-#endif /* _LINUX_KMEMTRACE_H */
-
index 5d0266d94985c65acbd8b13a41961964cdde4a72..63b5aa5dce6948359476c5b75dba228454d1997c 100644 (file)
@@ -214,8 +214,9 @@ struct perf_event_attr {
                                 *  See also PERF_RECORD_MISC_EXACT_IP
                                 */
                                precise_ip     :  2, /* skid constraint       */
+                               mmap_data      :  1, /* non-exec mmap data    */
 
-                               __reserved_1   : 47;
+                               __reserved_1   : 46;
 
        union {
                __u32           wakeup_events;    /* wakeup every n events */
@@ -461,6 +462,7 @@ enum perf_callchain_context {
 
 #ifdef CONFIG_PERF_EVENTS
 # include <asm/perf_event.h>
+# include <asm/local64.h>
 #endif
 
 struct perf_guest_info_callbacks {
@@ -535,10 +537,10 @@ struct hw_perf_event {
                struct arch_hw_breakpoint       info;
 #endif
        };
-       atomic64_t                      prev_count;
+       local64_t                       prev_count;
        u64                             sample_period;
        u64                             last_period;
-       atomic64_t                      period_left;
+       local64_t                       period_left;
        u64                             interrupts;
 
        u64                             freq_time_stamp;
@@ -548,7 +550,10 @@ struct hw_perf_event {
 
 struct perf_event;
 
-#define PERF_EVENT_TXN_STARTED 1
+/*
+ * Common implementation detail of pmu::{start,commit,cancel}_txn
+ */
+#define PERF_EVENT_TXN 0x1
 
 /**
  * struct pmu - generic performance monitoring unit
@@ -562,14 +567,28 @@ struct pmu {
        void (*unthrottle)              (struct perf_event *event);
 
        /*
-        * group events scheduling is treated as a transaction,
-        * add group events as a whole and perform one schedulability test.
-        * If test fails, roll back the whole group
+        * Group events scheduling is treated as a transaction, add group
+        * events as a whole and perform one schedulability test. If the test
+        * fails, roll back the whole group
         */
 
+       /*
+        * Start the transaction, after this ->enable() doesn't need
+        * to do schedulability tests.
+        */
        void (*start_txn)       (const struct pmu *pmu);
-       void (*cancel_txn)      (const struct pmu *pmu);
+       /*
+        * If ->start_txn() disabled the ->enable() schedulability test
+        * then ->commit_txn() is required to perform one. On success
+        * the transaction is closed. On error the transaction is kept
+        * open until ->cancel_txn() is called.
+        */
        int  (*commit_txn)      (const struct pmu *pmu);
+       /*
+        * Will cancel the transaction, assumes ->disable() is called for
+        * each successfull ->enable() during the transaction.
+        */
+       void (*cancel_txn)      (const struct pmu *pmu);
 };
 
 /**
@@ -584,7 +603,9 @@ enum perf_event_active_state {
 
 struct file;
 
-struct perf_mmap_data {
+#define PERF_BUFFER_WRITABLE           0x01
+
+struct perf_buffer {
        atomic_t                        refcount;
        struct rcu_head                 rcu_head;
 #ifdef CONFIG_PERF_USE_VMALLOC
@@ -650,7 +671,8 @@ struct perf_event {
 
        enum perf_event_active_state    state;
        unsigned int                    attach_state;
-       atomic64_t                      count;
+       local64_t                       count;
+       atomic64_t                      child_count;
 
        /*
         * These are the total time in nanoseconds that the event
@@ -709,7 +731,7 @@ struct perf_event {
        atomic_t                        mmap_count;
        int                             mmap_locked;
        struct user_struct              *mmap_user;
-       struct perf_mmap_data           *data;
+       struct perf_buffer              *buffer;
 
        /* poll related */
        wait_queue_head_t               waitq;
@@ -807,7 +829,7 @@ struct perf_cpu_context {
 
 struct perf_output_handle {
        struct perf_event               *event;
-       struct perf_mmap_data           *data;
+       struct perf_buffer              *buffer;
        unsigned long                   wakeup;
        unsigned long                   size;
        void                            *addr;
@@ -910,8 +932,10 @@ extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
 
 extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
 
-extern void
-perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip);
+#ifndef perf_arch_fetch_caller_regs
+static inline void
+perf_arch_fetch_caller_regs(struct regs *regs, unsigned long ip) { }
+#endif
 
 /*
  * Take a snapshot of the regs. Skip ip and frame pointer to
@@ -921,31 +945,11 @@ perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip);
  * - bp for callchains
  * - eflags, for future purposes, just in case
  */
-static inline void perf_fetch_caller_regs(struct pt_regs *regs, int skip)
+static inline void perf_fetch_caller_regs(struct pt_regs *regs)
 {
-       unsigned long ip;
-
        memset(regs, 0, sizeof(*regs));
 
-       switch (skip) {
-       case 1 :
-               ip = CALLER_ADDR0;
-               break;
-       case 2 :
-               ip = CALLER_ADDR1;
-               break;
-       case 3 :
-               ip = CALLER_ADDR2;
-               break;
-       case 4:
-               ip = CALLER_ADDR3;
-               break;
-       /* No need to support further for now */
-       default:
-               ip = 0;
-       }
-
-       return perf_arch_fetch_caller_regs(regs, ip, skip);
+       perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
 }
 
 static inline void
@@ -955,21 +959,14 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
                struct pt_regs hot_regs;
 
                if (!regs) {
-                       perf_fetch_caller_regs(&hot_regs, 1);
+                       perf_fetch_caller_regs(&hot_regs);
                        regs = &hot_regs;
                }
                __perf_sw_event(event_id, nr, nmi, regs, addr);
        }
 }
 
-extern void __perf_event_mmap(struct vm_area_struct *vma);
-
-static inline void perf_event_mmap(struct vm_area_struct *vma)
-{
-       if (vma->vm_flags & VM_EXEC)
-               __perf_event_mmap(vma);
-}
-
+extern void perf_event_mmap(struct vm_area_struct *vma);
 extern struct perf_guest_info_callbacks *perf_guest_cbs;
 extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
 extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
@@ -1001,7 +998,7 @@ static inline bool perf_paranoid_kernel(void)
 extern void perf_event_init(void);
 extern void perf_tp_event(u64 addr, u64 count, void *record,
                          int entry_size, struct pt_regs *regs,
-                         struct hlist_head *head);
+                         struct hlist_head *head, int rctx);
 extern void perf_bp_event(struct perf_event *event, void *data);
 
 #ifndef perf_misc_flags
index 1812dac8c496b8694d18b45c372b86c411cf0400..1acfa73ce2ac4597559c729301c2c6479ea49c7e 100644 (file)
@@ -14,7 +14,8 @@
 #include <asm/page.h>          /* kmalloc_sizes.h needs PAGE_SIZE */
 #include <asm/cache.h>         /* kmalloc_sizes.h needs L1_CACHE_BYTES */
 #include <linux/compiler.h>
-#include <linux/kmemtrace.h>
+
+#include <trace/events/kmem.h>
 
 #ifndef ARCH_KMALLOC_MINALIGN
 /*
index 4ba59cfc1f7562c0bb8cb900c4f8d444554d25d5..6447a723ecb170b9c3fd4476e8a1c1016b2b69de 100644 (file)
 #include <linux/gfp.h>
 #include <linux/workqueue.h>
 #include <linux/kobject.h>
-#include <linux/kmemtrace.h>
 #include <linux/kmemleak.h>
 
+#include <trace/events/kmem.h>
+
 enum stat_item {
        ALLOC_FASTPATH,         /* Allocation from cpu slab */
        ALLOC_SLOWPATH,         /* Allocation by getting a new cpu slab */
diff --git a/include/trace/boot.h b/include/trace/boot.h
deleted file mode 100644 (file)
index 088ea08..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-#ifndef _LINUX_TRACE_BOOT_H
-#define _LINUX_TRACE_BOOT_H
-
-#include <linux/module.h>
-#include <linux/kallsyms.h>
-#include <linux/init.h>
-
-/*
- * Structure which defines the trace of an initcall
- * while it is called.
- * You don't have to fill the func field since it is
- * only used internally by the tracer.
- */
-struct boot_trace_call {
-       pid_t                   caller;
-       char                    func[KSYM_SYMBOL_LEN];
-};
-
-/*
- * Structure which defines the trace of an initcall
- * while it returns.
- */
-struct boot_trace_ret {
-       char                    func[KSYM_SYMBOL_LEN];
-       int                             result;
-       unsigned long long      duration;               /* nsecs */
-};
-
-#ifdef CONFIG_BOOT_TRACER
-/* Append the traces on the ring-buffer */
-extern void trace_boot_call(struct boot_trace_call *bt, initcall_t fn);
-extern void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn);
-
-/* Tells the tracer that smp_pre_initcall is finished.
- * So we can start the tracing
- */
-extern void start_boot_trace(void);
-
-/* Resume the tracing of other necessary events
- * such as sched switches
- */
-extern void enable_boot_trace(void);
-
-/* Suspend this tracing. Actually, only sched_switches tracing have
- * to be suspended. Initcalls doesn't need it.)
- */
-extern void disable_boot_trace(void);
-#else
-static inline
-void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) { }
-
-static inline
-void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn) { }
-
-static inline void start_boot_trace(void) { }
-static inline void enable_boot_trace(void) { }
-static inline void disable_boot_trace(void) { }
-#endif /* CONFIG_BOOT_TRACER */
-
-#endif /* __LINUX_TRACE_BOOT_H */
index 5a64905d7278a47fb683a0aceb63cef029dd467b..fc013a8201e972431969db800771a7584832dbe6 100644 (file)
@@ -705,7 +705,7 @@ perf_trace_##call(void *__data, proto)                                      \
        int __data_size;                                                \
        int rctx;                                                       \
                                                                        \
-       perf_fetch_caller_regs(&__regs, 1);                             \
+       perf_fetch_caller_regs(&__regs);                                \
                                                                        \
        __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
        __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
index 3bdb152f412f32b8edeeb1036a24f934988b5be2..e2a2bf3a169f5f2bddc503917eb6c32aa2385774 100644 (file)
 #include <linux/ftrace.h>
 #include <linux/async.h>
 #include <linux/kmemcheck.h>
-#include <linux/kmemtrace.h>
 #include <linux/sfi.h>
 #include <linux/shmem_fs.h>
 #include <linux/slab.h>
-#include <trace/boot.h>
 
 #include <asm/io.h>
 #include <asm/bugs.h>
@@ -653,7 +651,6 @@ asmlinkage void __init start_kernel(void)
 #endif
        page_cgroup_init();
        enable_debug_pagealloc();
-       kmemtrace_init();
        kmemleak_init();
        debug_objects_mem_init();
        idr_init_cache();
@@ -715,38 +712,33 @@ int initcall_debug;
 core_param(initcall_debug, initcall_debug, bool, 0644);
 
 static char msgbuf[64];
-static struct boot_trace_call call;
-static struct boot_trace_ret ret;
 
 int do_one_initcall(initcall_t fn)
 {
        int count = preempt_count();
        ktime_t calltime, delta, rettime;
+       unsigned long long duration;
+       int ret;
 
        if (initcall_debug) {
-               call.caller = task_pid_nr(current);
-               printk("calling  %pF @ %i\n", fn, call.caller);
+               printk("calling  %pF @ %i\n", fn, task_pid_nr(current));
                calltime = ktime_get();
-               trace_boot_call(&call, fn);
-               enable_boot_trace();
        }
 
-       ret.result = fn();
+       ret = fn();
 
        if (initcall_debug) {
-               disable_boot_trace();
                rettime = ktime_get();
                delta = ktime_sub(rettime, calltime);
-               ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
-               trace_boot_ret(&ret, fn);
-               printk("initcall %pF returned %d after %Ld usecs\n", fn,
-                       ret.result, ret.duration);
+               duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+               printk("initcall %pF returned %d after %lld usecs\n", fn,
+                       ret, duration);
        }
 
        msgbuf[0] = 0;
 
-       if (ret.result && ret.result != -ENODEV && initcall_debug)
-               sprintf(msgbuf, "error code %d ", ret.result);
+       if (ret && ret != -ENODEV && initcall_debug)
+               sprintf(msgbuf, "error code %d ", ret);
 
        if (preempt_count() != count) {
                strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
@@ -760,7 +752,7 @@ int do_one_initcall(initcall_t fn)
                printk("initcall %pF returned with %s\n", fn, msgbuf);
        }
 
-       return ret.result;
+       return ret;
 }
 
 
@@ -880,7 +872,6 @@ static int __init kernel_init(void * unused)
        smp_prepare_cpus(setup_max_cpus);
 
        do_pre_smp_initcalls();
-       start_boot_trace();
 
        smp_init();
        sched_init_smp();
index ff86c558af4c28dd4c9a7ea92cc592b70bee6d7d..c772a3d4000d85a1fc0691626d3ff26c90bb1c12 100644 (file)
@@ -675,7 +675,6 @@ group_sched_in(struct perf_event *group_event,
        struct perf_event *event, *partial_group = NULL;
        const struct pmu *pmu = group_event->pmu;
        bool txn = false;
-       int ret;
 
        if (group_event->state == PERF_EVENT_STATE_OFF)
                return 0;
@@ -703,14 +702,8 @@ group_sched_in(struct perf_event *group_event,
                }
        }
 
-       if (!txn)
-               return 0;
-
-       ret = pmu->commit_txn(pmu);
-       if (!ret) {
-               pmu->cancel_txn(pmu);
+       if (!txn || !pmu->commit_txn(pmu))
                return 0;
-       }
 
 group_error:
        /*
@@ -1155,9 +1148,9 @@ static void __perf_event_sync_stat(struct perf_event *event,
         * In order to keep per-task stats reliable we need to flip the event
         * values when we flip the contexts.
         */
-       value = atomic64_read(&next_event->count);
-       value = atomic64_xchg(&event->count, value);
-       atomic64_set(&next_event->count, value);
+       value = local64_read(&next_event->count);
+       value = local64_xchg(&event->count, value);
+       local64_set(&next_event->count, value);
 
        swap(event->total_time_enabled, next_event->total_time_enabled);
        swap(event->total_time_running, next_event->total_time_running);
@@ -1547,10 +1540,10 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
 
        hwc->sample_period = sample_period;
 
-       if (atomic64_read(&hwc->period_left) > 8*sample_period) {
+       if (local64_read(&hwc->period_left) > 8*sample_period) {
                perf_disable();
                perf_event_stop(event);
-               atomic64_set(&hwc->period_left, 0);
+               local64_set(&hwc->period_left, 0);
                perf_event_start(event);
                perf_enable();
        }
@@ -1591,7 +1584,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
 
                perf_disable();
                event->pmu->read(event);
-               now = atomic64_read(&event->count);
+               now = local64_read(&event->count);
                delta = now - hwc->freq_count_stamp;
                hwc->freq_count_stamp = now;
 
@@ -1743,6 +1736,11 @@ static void __perf_event_read(void *info)
        event->pmu->read(event);
 }
 
+static inline u64 perf_event_count(struct perf_event *event)
+{
+       return local64_read(&event->count) + atomic64_read(&event->child_count);
+}
+
 static u64 perf_event_read(struct perf_event *event)
 {
        /*
@@ -1762,7 +1760,7 @@ static u64 perf_event_read(struct perf_event *event)
                raw_spin_unlock_irqrestore(&ctx->lock, flags);
        }
 
-       return atomic64_read(&event->count);
+       return perf_event_count(event);
 }
 
 /*
@@ -1883,7 +1881,7 @@ static void free_event_rcu(struct rcu_head *head)
 }
 
 static void perf_pending_sync(struct perf_event *event);
-static void perf_mmap_data_put(struct perf_mmap_data *data);
+static void perf_buffer_put(struct perf_buffer *buffer);
 
 static void free_event(struct perf_event *event)
 {
@@ -1891,7 +1889,7 @@ static void free_event(struct perf_event *event)
 
        if (!event->parent) {
                atomic_dec(&nr_events);
-               if (event->attr.mmap)
+               if (event->attr.mmap || event->attr.mmap_data)
                        atomic_dec(&nr_mmap_events);
                if (event->attr.comm)
                        atomic_dec(&nr_comm_events);
@@ -1899,9 +1897,9 @@ static void free_event(struct perf_event *event)
                        atomic_dec(&nr_task_events);
        }
 
-       if (event->data) {
-               perf_mmap_data_put(event->data);
-               event->data = NULL;
+       if (event->buffer) {
+               perf_buffer_put(event->buffer);
+               event->buffer = NULL;
        }
 
        if (event->destroy)
@@ -2126,13 +2124,13 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
 static unsigned int perf_poll(struct file *file, poll_table *wait)
 {
        struct perf_event *event = file->private_data;
-       struct perf_mmap_data *data;
+       struct perf_buffer *buffer;
        unsigned int events = POLL_HUP;
 
        rcu_read_lock();
-       data = rcu_dereference(event->data);
-       if (data)
-               events = atomic_xchg(&data->poll, 0);
+       buffer = rcu_dereference(event->buffer);
+       if (buffer)
+               events = atomic_xchg(&buffer->poll, 0);
        rcu_read_unlock();
 
        poll_wait(file, &event->waitq, wait);
@@ -2143,7 +2141,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
 static void perf_event_reset(struct perf_event *event)
 {
        (void)perf_event_read(event);
-       atomic64_set(&event->count, 0);
+       local64_set(&event->count, 0);
        perf_event_update_userpage(event);
 }
 
@@ -2342,14 +2340,14 @@ static int perf_event_index(struct perf_event *event)
 void perf_event_update_userpage(struct perf_event *event)
 {
        struct perf_event_mmap_page *userpg;
-       struct perf_mmap_data *data;
+       struct perf_buffer *buffer;
 
        rcu_read_lock();
-       data = rcu_dereference(event->data);
-       if (!data)
+       buffer = rcu_dereference(event->buffer);
+       if (!buffer)
                goto unlock;
 
-       userpg = data->user_page;
+       userpg = buffer->user_page;
 
        /*
         * Disable preemption so as to not let the corresponding user-space
@@ -2359,9 +2357,9 @@ void perf_event_update_userpage(struct perf_event *event)
        ++userpg->lock;
        barrier();
        userpg->index = perf_event_index(event);
-       userpg->offset = atomic64_read(&event->count);
+       userpg->offset = perf_event_count(event);
        if (event->state == PERF_EVENT_STATE_ACTIVE)
-               userpg->offset -= atomic64_read(&event->hw.prev_count);
+               userpg->offset -= local64_read(&event->hw.prev_count);
 
        userpg->time_enabled = event->total_time_enabled +
                        atomic64_read(&event->child_total_time_enabled);
@@ -2376,6 +2374,25 @@ unlock:
        rcu_read_unlock();
 }
 
+static unsigned long perf_data_size(struct perf_buffer *buffer);
+
+static void
+perf_buffer_init(struct perf_buffer *buffer, long watermark, int flags)
+{
+       long max_size = perf_data_size(buffer);
+
+       if (watermark)
+               buffer->watermark = min(max_size, watermark);
+
+       if (!buffer->watermark)
+               buffer->watermark = max_size / 2;
+
+       if (flags & PERF_BUFFER_WRITABLE)
+               buffer->writable = 1;
+
+       atomic_set(&buffer->refcount, 1);
+}
+
 #ifndef CONFIG_PERF_USE_VMALLOC
 
 /*
@@ -2383,15 +2400,15 @@ unlock:
  */
 
 static struct page *
-perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff)
+perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff)
 {
-       if (pgoff > data->nr_pages)
+       if (pgoff > buffer->nr_pages)
                return NULL;
 
        if (pgoff == 0)
-               return virt_to_page(data->user_page);
+               return virt_to_page(buffer->user_page);
 
-       return virt_to_page(data->data_pages[pgoff - 1]);
+       return virt_to_page(buffer->data_pages[pgoff - 1]);
 }
 
 static void *perf_mmap_alloc_page(int cpu)
@@ -2407,42 +2424,44 @@ static void *perf_mmap_alloc_page(int cpu)
        return page_address(page);
 }
 
-static struct perf_mmap_data *
-perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
+static struct perf_buffer *
+perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags)
 {
-       struct perf_mmap_data *data;
+       struct perf_buffer *buffer;
        unsigned long size;
        int i;
 
-       size = sizeof(struct perf_mmap_data);
+       size = sizeof(struct perf_buffer);
        size += nr_pages * sizeof(void *);
 
-       data = kzalloc(size, GFP_KERNEL);
-       if (!data)
+       buffer = kzalloc(size, GFP_KERNEL);
+       if (!buffer)
                goto fail;
 
-       data->user_page = perf_mmap_alloc_page(event->cpu);
-       if (!data->user_page)
+       buffer->user_page = perf_mmap_alloc_page(cpu);
+       if (!buffer->user_page)
                goto fail_user_page;
 
        for (i = 0; i < nr_pages; i++) {
-               data->data_pages[i] = perf_mmap_alloc_page(event->cpu);
-               if (!data->data_pages[i])
+               buffer->data_pages[i] = perf_mmap_alloc_page(cpu);
+               if (!buffer->data_pages[i])
                        goto fail_data_pages;
        }
 
-       data->nr_pages = nr_pages;
+       buffer->nr_pages = nr_pages;
+
+       perf_buffer_init(buffer, watermark, flags);
 
-       return data;
+       return buffer;
 
 fail_data_pages:
        for (i--; i >= 0; i--)
-               free_page((unsigned long)data->data_pages[i]);
+               free_page((unsigned long)buffer->data_pages[i]);
 
-       free_page((unsigned long)data->user_page);
+       free_page((unsigned long)buffer->user_page);
 
 fail_user_page:
-       kfree(data);
+       kfree(buffer);
 
 fail:
        return NULL;
@@ -2456,17 +2475,17 @@ static void perf_mmap_free_page(unsigned long addr)
        __free_page(page);
 }
 
-static void perf_mmap_data_free(struct perf_mmap_data *data)
+static void perf_buffer_free(struct perf_buffer *buffer)
 {
        int i;
 
-       perf_mmap_free_page((unsigned long)data->user_page);
-       for (i = 0; i < data->nr_pages; i++)
-               perf_mmap_free_page((unsigned long)data->data_pages[i]);
-       kfree(data);
+       perf_mmap_free_page((unsigned long)buffer->user_page);
+       for (i = 0; i < buffer->nr_pages; i++)
+               perf_mmap_free_page((unsigned long)buffer->data_pages[i]);
+       kfree(buffer);
 }
 
-static inline int page_order(struct perf_mmap_data *data)
+static inline int page_order(struct perf_buffer *buffer)
 {
        return 0;
 }
@@ -2479,18 +2498,18 @@ static inline int page_order(struct perf_mmap_data *data)
  * Required for architectures that have d-cache aliasing issues.
  */
 
-static inline int page_order(struct perf_mmap_data *data)
+static inline int page_order(struct perf_buffer *buffer)
 {
-       return data->page_order;
+       return buffer->page_order;
 }
 
 static struct page *
-perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff)
+perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff)
 {
-       if (pgoff > (1UL << page_order(data)))
+       if (pgoff > (1UL << page_order(buffer)))
                return NULL;
 
-       return vmalloc_to_page((void *)data->user_page + pgoff * PAGE_SIZE);
+       return vmalloc_to_page((void *)buffer->user_page + pgoff * PAGE_SIZE);
 }
 
 static void perf_mmap_unmark_page(void *addr)
@@ -2500,57 +2519,59 @@ static void perf_mmap_unmark_page(void *addr)
        page->mapping = NULL;
 }
 
-static void perf_mmap_data_free_work(struct work_struct *work)
+static void perf_buffer_free_work(struct work_struct *work)
 {
-       struct perf_mmap_data *data;
+       struct perf_buffer *buffer;
        void *base;
        int i, nr;
 
-       data = container_of(work, struct perf_mmap_data, work);
-       nr = 1 << page_order(data);
+       buffer = container_of(work, struct perf_buffer, work);
+       nr = 1 << page_order(buffer);
 
-       base = data->user_page;
+       base = buffer->user_page;
        for (i = 0; i < nr + 1; i++)
                perf_mmap_unmark_page(base + (i * PAGE_SIZE));
 
        vfree(base);
-       kfree(data);
+       kfree(buffer);
 }
 
-static void perf_mmap_data_free(struct perf_mmap_data *data)
+static void perf_buffer_free(struct perf_buffer *buffer)
 {
-       schedule_work(&data->work);
+       schedule_work(&buffer->work);
 }
 
-static struct perf_mmap_data *
-perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
+static struct perf_buffer *
+perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags)
 {
-       struct perf_mmap_data *data;
+       struct perf_buffer *buffer;
        unsigned long size;
        void *all_buf;
 
-       size = sizeof(struct perf_mmap_data);
+       size = sizeof(struct perf_buffer);
        size += sizeof(void *);
 
-       data = kzalloc(size, GFP_KERNEL);
-       if (!data)
+       buffer = kzalloc(size, GFP_KERNEL);
+       if (!buffer)
                goto fail;
 
-       INIT_WORK(&data->work, perf_mmap_data_free_work);
+       INIT_WORK(&buffer->work, perf_buffer_free_work);
 
        all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
        if (!all_buf)
                goto fail_all_buf;
 
-       data->user_page = all_buf;
-       data->data_pages[0] = all_buf + PAGE_SIZE;
-       data->page_order = ilog2(nr_pages);
-       data->nr_pages = 1;
+       buffer->user_page = all_buf;
+       buffer->data_pages[0] = all_buf + PAGE_SIZE;
+       buffer->page_order = ilog2(nr_pages);
+       buffer->nr_pages = 1;
+
+       perf_buffer_init(buffer, watermark, flags);
 
-       return data;
+       return buffer;
 
 fail_all_buf:
-       kfree(data);
+       kfree(buffer);
 
 fail:
        return NULL;
@@ -2558,15 +2579,15 @@ fail:
 
 #endif
 
-static unsigned long perf_data_size(struct perf_mmap_data *data)
+static unsigned long perf_data_size(struct perf_buffer *buffer)
 {
-       return data->nr_pages << (PAGE_SHIFT + page_order(data));
+       return buffer->nr_pages << (PAGE_SHIFT + page_order(buffer));
 }
 
 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct perf_event *event = vma->vm_file->private_data;
-       struct perf_mmap_data *data;
+       struct perf_buffer *buffer;
        int ret = VM_FAULT_SIGBUS;
 
        if (vmf->flags & FAULT_FLAG_MKWRITE) {
@@ -2576,14 +2597,14 @@ static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        }
 
        rcu_read_lock();
-       data = rcu_dereference(event->data);
-       if (!data)
+       buffer = rcu_dereference(event->buffer);
+       if (!buffer)
                goto unlock;
 
        if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
                goto unlock;
 
-       vmf->page = perf_mmap_to_page(data, vmf->pgoff);
+       vmf->page = perf_mmap_to_page(buffer, vmf->pgoff);
        if (!vmf->page)
                goto unlock;
 
@@ -2598,52 +2619,35 @@ unlock:
        return ret;
 }
 
-static void
-perf_mmap_data_init(struct perf_event *event, struct perf_mmap_data *data)
-{
-       long max_size = perf_data_size(data);
-
-       if (event->attr.watermark) {
-               data->watermark = min_t(long, max_size,
-                                       event->attr.wakeup_watermark);
-       }
-
-       if (!data->watermark)
-               data->watermark = max_size / 2;
-
-       atomic_set(&data->refcount, 1);
-       rcu_assign_pointer(event->data, data);
-}
-
-static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head)
+static void perf_buffer_free_rcu(struct rcu_head *rcu_head)
 {
-       struct perf_mmap_data *data;
+       struct perf_buffer *buffer;
 
-       data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
-       perf_mmap_data_free(data);
+       buffer = container_of(rcu_head, struct perf_buffer, rcu_head);
+       perf_buffer_free(buffer);
 }
 
-static struct perf_mmap_data *perf_mmap_data_get(struct perf_event *event)
+static struct perf_buffer *perf_buffer_get(struct perf_event *event)
 {
-       struct perf_mmap_data *data;
+       struct perf_buffer *buffer;
 
        rcu_read_lock();
-       data = rcu_dereference(event->data);
-       if (data) {
-               if (!atomic_inc_not_zero(&data->refcount))
-                       data = NULL;
+       buffer = rcu_dereference(event->buffer);
+       if (buffer) {
+               if (!atomic_inc_not_zero(&buffer->refcount))
+                       buffer = NULL;
        }
        rcu_read_unlock();
 
-       return data;
+       return buffer;
 }
 
-static void perf_mmap_data_put(struct perf_mmap_data *data)
+static void perf_buffer_put(struct perf_buffer *buffer)
 {
-       if (!atomic_dec_and_test(&data->refcount))
+       if (!atomic_dec_and_test(&buffer->refcount))
                return;
 
-       call_rcu(&data->rcu_head, perf_mmap_data_free_rcu);
+       call_rcu(&buffer->rcu_head, perf_buffer_free_rcu);
 }
 
 static void perf_mmap_open(struct vm_area_struct *vma)
@@ -2658,16 +2662,16 @@ static void perf_mmap_close(struct vm_area_struct *vma)
        struct perf_event *event = vma->vm_file->private_data;
 
        if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
-               unsigned long size = perf_data_size(event->data);
+               unsigned long size = perf_data_size(event->buffer);
                struct user_struct *user = event->mmap_user;
-               struct perf_mmap_data *data = event->data;
+               struct perf_buffer *buffer = event->buffer;
 
                atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
                vma->vm_mm->locked_vm -= event->mmap_locked;
-               rcu_assign_pointer(event->data, NULL);
+               rcu_assign_pointer(event->buffer, NULL);
                mutex_unlock(&event->mmap_mutex);
 
-               perf_mmap_data_put(data);
+               perf_buffer_put(buffer);
                free_uid(user);
        }
 }
@@ -2685,11 +2689,11 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
        unsigned long user_locked, user_lock_limit;
        struct user_struct *user = current_user();
        unsigned long locked, lock_limit;
-       struct perf_mmap_data *data;
+       struct perf_buffer *buffer;
        unsigned long vma_size;
        unsigned long nr_pages;
        long user_extra, extra;
-       int ret = 0;
+       int ret = 0, flags = 0;
 
        /*
         * Don't allow mmap() of inherited per-task counters. This would
@@ -2706,7 +2710,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
        nr_pages = (vma_size / PAGE_SIZE) - 1;
 
        /*
-        * If we have data pages ensure they're a power-of-two number, so we
+        * If we have buffer pages ensure they're a power-of-two number, so we
         * can do bitmasks instead of modulo.
         */
        if (nr_pages != 0 && !is_power_of_2(nr_pages))
@@ -2720,9 +2724,9 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
 
        WARN_ON_ONCE(event->ctx->parent_ctx);
        mutex_lock(&event->mmap_mutex);
-       if (event->data) {
-               if (event->data->nr_pages == nr_pages)
-                       atomic_inc(&event->data->refcount);
+       if (event->buffer) {
+               if (event->buffer->nr_pages == nr_pages)
+                       atomic_inc(&event->buffer->refcount);
                else
                        ret = -EINVAL;
                goto unlock;
@@ -2752,17 +2756,18 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
                goto unlock;
        }
 
-       WARN_ON(event->data);
+       WARN_ON(event->buffer);
+
+       if (vma->vm_flags & VM_WRITE)
+               flags |= PERF_BUFFER_WRITABLE;
 
-       data = perf_mmap_data_alloc(event, nr_pages);
-       if (!data) {
+       buffer = perf_buffer_alloc(nr_pages, event->attr.wakeup_watermark,
+                                  event->cpu, flags);
+       if (!buffer) {
                ret = -ENOMEM;
                goto unlock;
        }
-
-       perf_mmap_data_init(event, data);
-       if (vma->vm_flags & VM_WRITE)
-               event->data->writable = 1;
+       rcu_assign_pointer(event->buffer, buffer);
 
        atomic_long_add(user_extra, &user->locked_vm);
        event->mmap_locked = extra;
@@ -2941,11 +2946,6 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
        return NULL;
 }
 
-__weak
-void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
-{
-}
-
 
 /*
  * We assume there is only KVM supporting the callbacks.
@@ -2971,15 +2971,15 @@ EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
 /*
  * Output
  */
-static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail,
+static bool perf_output_space(struct perf_buffer *buffer, unsigned long tail,
                              unsigned long offset, unsigned long head)
 {
        unsigned long mask;
 
-       if (!data->writable)
+       if (!buffer->writable)
                return true;
 
-       mask = perf_data_size(data) - 1;
+       mask = perf_data_size(buffer) - 1;
 
        offset = (offset - tail) & mask;
        head   = (head   - tail) & mask;
@@ -2992,7 +2992,7 @@ static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail,
 
 static void perf_output_wakeup(struct perf_output_handle *handle)
 {
-       atomic_set(&handle->data->poll, POLL_IN);
+       atomic_set(&handle->buffer->poll, POLL_IN);
 
        if (handle->nmi) {
                handle->event->pending_wakeup = 1;
@@ -3012,45 +3012,45 @@ static void perf_output_wakeup(struct perf_output_handle *handle)
  */
 static void perf_output_get_handle(struct perf_output_handle *handle)
 {
-       struct perf_mmap_data *data = handle->data;
+       struct perf_buffer *buffer = handle->buffer;
 
        preempt_disable();
-       local_inc(&data->nest);
-       handle->wakeup = local_read(&data->wakeup);
+       local_inc(&buffer->nest);
+       handle->wakeup = local_read(&buffer->wakeup);
 }
 
 static void perf_output_put_handle(struct perf_output_handle *handle)
 {
-       struct perf_mmap_data *data = handle->data;
+       struct perf_buffer *buffer = handle->buffer;
        unsigned long head;
 
 again:
-       head = local_read(&data->head);
+       head = local_read(&buffer->head);
 
        /*
         * IRQ/NMI can happen here, which means we can miss a head update.
         */
 
-       if (!local_dec_and_test(&data->nest))
+       if (!local_dec_and_test(&buffer->nest))
                goto out;
 
        /*
         * Publish the known good head. Rely on the full barrier implied
-        * by atomic_dec_and_test() order the data->head read and this
+        * by atomic_dec_and_test() order the buffer->head read and this
         * write.
         */
-       data->user_page->data_head = head;
+       buffer->user_page->data_head = head;
 
        /*
         * Now check if we missed an update, rely on the (compiler)
-        * barrier in atomic_dec_and_test() to re-read data->head.
+        * barrier in atomic_dec_and_test() to re-read buffer->head.
         */
-       if (unlikely(head != local_read(&data->head))) {
-               local_inc(&data->nest);
+       if (unlikely(head != local_read(&buffer->head))) {
+               local_inc(&buffer->nest);
                goto again;
        }
 
-       if (handle->wakeup != local_read(&data->wakeup))
+       if (handle->wakeup != local_read(&buffer->wakeup))
                perf_output_wakeup(handle);
 
  out:
@@ -3070,12 +3070,12 @@ __always_inline void perf_output_copy(struct perf_output_handle *handle,
                buf += size;
                handle->size -= size;
                if (!handle->size) {
-                       struct perf_mmap_data *data = handle->data;
+                       struct perf_buffer *buffer = handle->buffer;
 
                        handle->page++;
-                       handle->page &= data->nr_pages - 1;
-                       handle->addr = data->data_pages[handle->page];
-                       handle->size = PAGE_SIZE << page_order(data);
+                       handle->page &= buffer->nr_pages - 1;
+                       handle->addr = buffer->data_pages[handle->page];
+                       handle->size = PAGE_SIZE << page_order(buffer);
                }
        } while (len);
 }
@@ -3084,7 +3084,7 @@ int perf_output_begin(struct perf_output_handle *handle,
                      struct perf_event *event, unsigned int size,
                      int nmi, int sample)
 {
-       struct perf_mmap_data *data;
+       struct perf_buffer *buffer;
        unsigned long tail, offset, head;
        int have_lost;
        struct {
@@ -3100,19 +3100,19 @@ int perf_output_begin(struct perf_output_handle *handle,
        if (event->parent)
                event = event->parent;
 
-       data = rcu_dereference(event->data);
-       if (!data)
+       buffer = rcu_dereference(event->buffer);
+       if (!buffer)
                goto out;
 
-       handle->data    = data;
+       handle->buffer  = buffer;
        handle->event   = event;
        handle->nmi     = nmi;
        handle->sample  = sample;
 
-       if (!data->nr_pages)
+       if (!buffer->nr_pages)
                goto out;
 
-       have_lost = local_read(&data->lost);
+       have_lost = local_read(&buffer->lost);
        if (have_lost)
                size += sizeof(lost_event);
 
@@ -3124,30 +3124,30 @@ int perf_output_begin(struct perf_output_handle *handle,
                 * tail pointer. So that all reads will be completed before the
                 * write is issued.
                 */
-               tail = ACCESS_ONCE(data->user_page->data_tail);
+               tail = ACCESS_ONCE(buffer->user_page->data_tail);
                smp_rmb();
-               offset = head = local_read(&data->head);
+               offset = head = local_read(&buffer->head);
                head += size;
-               if (unlikely(!perf_output_space(data, tail, offset, head)))
+               if (unlikely(!perf_output_space(buffer, tail, offset, head)))
                        goto fail;
-       } while (local_cmpxchg(&data->head, offset, head) != offset);
+       } while (local_cmpxchg(&buffer->head, offset, head) != offset);
 
-       if (head - local_read(&data->wakeup) > data->watermark)
-               local_add(data->watermark, &data->wakeup);
+       if (head - local_read(&buffer->wakeup) > buffer->watermark)
+               local_add(buffer->watermark, &buffer->wakeup);
 
-       handle->page = offset >> (PAGE_SHIFT + page_order(data));
-       handle->page &= data->nr_pages - 1;
-       handle->size = offset & ((PAGE_SIZE << page_order(data)) - 1);
-       handle->addr = data->data_pages[handle->page];
+       handle->page = offset >> (PAGE_SHIFT + page_order(buffer));
+       handle->page &= buffer->nr_pages - 1;
+       handle->size = offset & ((PAGE_SIZE << page_order(buffer)) - 1);
+       handle->addr = buffer->data_pages[handle->page];
        handle->addr += handle->size;
-       handle->size = (PAGE_SIZE << page_order(data)) - handle->size;
+       handle->size = (PAGE_SIZE << page_order(buffer)) - handle->size;
 
        if (have_lost) {
                lost_event.header.type = PERF_RECORD_LOST;
                lost_event.header.misc = 0;
                lost_event.header.size = sizeof(lost_event);
                lost_event.id          = event->id;
-               lost_event.lost        = local_xchg(&data->lost, 0);
+               lost_event.lost        = local_xchg(&buffer->lost, 0);
 
                perf_output_put(handle, lost_event);
        }
@@ -3155,7 +3155,7 @@ int perf_output_begin(struct perf_output_handle *handle,
        return 0;
 
 fail:
-       local_inc(&data->lost);
+       local_inc(&buffer->lost);
        perf_output_put_handle(handle);
 out:
        rcu_read_unlock();
@@ -3166,15 +3166,15 @@ out:
 void perf_output_end(struct perf_output_handle *handle)
 {
        struct perf_event *event = handle->event;
-       struct perf_mmap_data *data = handle->data;
+       struct perf_buffer *buffer = handle->buffer;
 
        int wakeup_events = event->attr.wakeup_events;
 
        if (handle->sample && wakeup_events) {
-               int events = local_inc_return(&data->events);
+               int events = local_inc_return(&buffer->events);
                if (events >= wakeup_events) {
-                       local_sub(wakeup_events, &data->events);
-                       local_inc(&data->wakeup);
+                       local_sub(wakeup_events, &buffer->events);
+                       local_inc(&buffer->wakeup);
                }
        }
 
@@ -3211,7 +3211,7 @@ static void perf_output_read_one(struct perf_output_handle *handle,
        u64 values[4];
        int n = 0;
 
-       values[n++] = atomic64_read(&event->count);
+       values[n++] = perf_event_count(event);
        if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
                values[n++] = event->total_time_enabled +
                        atomic64_read(&event->child_total_time_enabled);
@@ -3248,7 +3248,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
        if (leader != event)
                leader->pmu->read(leader);
 
-       values[n++] = atomic64_read(&leader->count);
+       values[n++] = perf_event_count(leader);
        if (read_format & PERF_FORMAT_ID)
                values[n++] = primary_event_id(leader);
 
@@ -3260,7 +3260,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
                if (sub != event)
                        sub->pmu->read(sub);
 
-               values[n++] = atomic64_read(&sub->count);
+               values[n++] = perf_event_count(sub);
                if (read_format & PERF_FORMAT_ID)
                        values[n++] = primary_event_id(sub);
 
@@ -3491,7 +3491,7 @@ perf_event_read_event(struct perf_event *event,
 /*
  * task tracking -- fork/exit
  *
- * enabled by: attr.comm | attr.mmap | attr.task
+ * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
  */
 
 struct perf_task_event {
@@ -3541,7 +3541,8 @@ static int perf_event_task_match(struct perf_event *event)
        if (event->cpu != -1 && event->cpu != smp_processor_id())
                return 0;
 
-       if (event->attr.comm || event->attr.mmap || event->attr.task)
+       if (event->attr.comm || event->attr.mmap ||
+           event->attr.mmap_data || event->attr.task)
                return 1;
 
        return 0;
@@ -3766,7 +3767,8 @@ static void perf_event_mmap_output(struct perf_event *event,
 }
 
 static int perf_event_mmap_match(struct perf_event *event,
-                                  struct perf_mmap_event *mmap_event)
+                                  struct perf_mmap_event *mmap_event,
+                                  int executable)
 {
        if (event->state < PERF_EVENT_STATE_INACTIVE)
                return 0;
@@ -3774,19 +3776,21 @@ static int perf_event_mmap_match(struct perf_event *event,
        if (event->cpu != -1 && event->cpu != smp_processor_id())
                return 0;
 
-       if (event->attr.mmap)
+       if ((!executable && event->attr.mmap_data) ||
+           (executable && event->attr.mmap))
                return 1;
 
        return 0;
 }
 
 static void perf_event_mmap_ctx(struct perf_event_context *ctx,
-                                 struct perf_mmap_event *mmap_event)
+                                 struct perf_mmap_event *mmap_event,
+                                 int executable)
 {
        struct perf_event *event;
 
        list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
-               if (perf_event_mmap_match(event, mmap_event))
+               if (perf_event_mmap_match(event, mmap_event, executable))
                        perf_event_mmap_output(event, mmap_event);
        }
 }
@@ -3830,6 +3834,14 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
                if (!vma->vm_mm) {
                        name = strncpy(tmp, "[vdso]", sizeof(tmp));
                        goto got_name;
+               } else if (vma->vm_start <= vma->vm_mm->start_brk &&
+                               vma->vm_end >= vma->vm_mm->brk) {
+                       name = strncpy(tmp, "[heap]", sizeof(tmp));
+                       goto got_name;
+               } else if (vma->vm_start <= vma->vm_mm->start_stack &&
+                               vma->vm_end >= vma->vm_mm->start_stack) {
+                       name = strncpy(tmp, "[stack]", sizeof(tmp));
+                       goto got_name;
                }
 
                name = strncpy(tmp, "//anon", sizeof(tmp));
@@ -3846,17 +3858,17 @@ got_name:
 
        rcu_read_lock();
        cpuctx = &get_cpu_var(perf_cpu_context);
-       perf_event_mmap_ctx(&cpuctx->ctx, mmap_event);
+       perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, vma->vm_flags & VM_EXEC);
        ctx = rcu_dereference(current->perf_event_ctxp);
        if (ctx)
-               perf_event_mmap_ctx(ctx, mmap_event);
+               perf_event_mmap_ctx(ctx, mmap_event, vma->vm_flags & VM_EXEC);
        put_cpu_var(perf_cpu_context);
        rcu_read_unlock();
 
        kfree(buf);
 }
 
-void __perf_event_mmap(struct vm_area_struct *vma)
+void perf_event_mmap(struct vm_area_struct *vma)
 {
        struct perf_mmap_event mmap_event;
 
@@ -4018,14 +4030,14 @@ static u64 perf_swevent_set_period(struct perf_event *event)
        hwc->last_period = hwc->sample_period;
 
 again:
-       old = val = atomic64_read(&hwc->period_left);
+       old = val = local64_read(&hwc->period_left);
        if (val < 0)
                return 0;
 
        nr = div64_u64(period + val, period);
        offset = nr * period;
        val -= offset;
-       if (atomic64_cmpxchg(&hwc->period_left, old, val) != old)
+       if (local64_cmpxchg(&hwc->period_left, old, val) != old)
                goto again;
 
        return nr;
@@ -4064,7 +4076,7 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
 {
        struct hw_perf_event *hwc = &event->hw;
 
-       atomic64_add(nr, &event->count);
+       local64_add(nr, &event->count);
 
        if (!regs)
                return;
@@ -4075,7 +4087,7 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
        if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
                return perf_swevent_overflow(event, 1, nmi, data, regs);
 
-       if (atomic64_add_negative(nr, &hwc->period_left))
+       if (local64_add_negative(nr, &hwc->period_left))
                return;
 
        perf_swevent_overflow(event, 0, nmi, data, regs);
@@ -4213,14 +4225,12 @@ int perf_swevent_get_recursion_context(void)
 }
 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
 
-void perf_swevent_put_recursion_context(int rctx)
+void inline perf_swevent_put_recursion_context(int rctx)
 {
        struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
        barrier();
        cpuctx->recursion[rctx]--;
 }
-EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context);
-
 
 void __perf_sw_event(u32 event_id, u64 nr, int nmi,
                            struct pt_regs *regs, u64 addr)
@@ -4368,8 +4378,8 @@ static void cpu_clock_perf_event_update(struct perf_event *event)
        u64 now;
 
        now = cpu_clock(cpu);
-       prev = atomic64_xchg(&event->hw.prev_count, now);
-       atomic64_add(now - prev, &event->count);
+       prev = local64_xchg(&event->hw.prev_count, now);
+       local64_add(now - prev, &event->count);
 }
 
 static int cpu_clock_perf_event_enable(struct perf_event *event)
@@ -4377,7 +4387,7 @@ static int cpu_clock_perf_event_enable(struct perf_event *event)
        struct hw_perf_event *hwc = &event->hw;
        int cpu = raw_smp_processor_id();
 
-       atomic64_set(&hwc->prev_count, cpu_clock(cpu));
+       local64_set(&hwc->prev_count, cpu_clock(cpu));
        perf_swevent_start_hrtimer(event);
 
        return 0;
@@ -4409,9 +4419,9 @@ static void task_clock_perf_event_update(struct perf_event *event, u64 now)
        u64 prev;
        s64 delta;
 
-       prev = atomic64_xchg(&event->hw.prev_count, now);
+       prev = local64_xchg(&event->hw.prev_count, now);
        delta = now - prev;
-       atomic64_add(delta, &event->count);
+       local64_add(delta, &event->count);
 }
 
 static int task_clock_perf_event_enable(struct perf_event *event)
@@ -4421,7 +4431,7 @@ static int task_clock_perf_event_enable(struct perf_event *event)
 
        now = event->ctx->time;
 
-       atomic64_set(&hwc->prev_count, now);
+       local64_set(&hwc->prev_count, now);
 
        perf_swevent_start_hrtimer(event);
 
@@ -4601,7 +4611,7 @@ static int perf_tp_event_match(struct perf_event *event,
 }
 
 void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
-                  struct pt_regs *regs, struct hlist_head *head)
+                  struct pt_regs *regs, struct hlist_head *head, int rctx)
 {
        struct perf_sample_data data;
        struct perf_event *event;
@@ -4615,12 +4625,12 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
        perf_sample_data_init(&data, addr);
        data.raw = &raw;
 
-       rcu_read_lock();
        hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
                if (perf_tp_event_match(event, &data, regs))
                        perf_swevent_add(event, count, 1, &data, regs);
        }
-       rcu_read_unlock();
+
+       perf_swevent_put_recursion_context(rctx);
 }
 EXPORT_SYMBOL_GPL(perf_tp_event);
 
@@ -4864,7 +4874,7 @@ perf_event_alloc(struct perf_event_attr *attr,
                hwc->sample_period = 1;
        hwc->last_period = hwc->sample_period;
 
-       atomic64_set(&hwc->period_left, hwc->sample_period);
+       local64_set(&hwc->period_left, hwc->sample_period);
 
        /*
         * we currently do not support PERF_FORMAT_GROUP on inherited events
@@ -4913,7 +4923,7 @@ done:
 
        if (!event->parent) {
                atomic_inc(&nr_events);
-               if (event->attr.mmap)
+               if (event->attr.mmap || event->attr.mmap_data)
                        atomic_inc(&nr_mmap_events);
                if (event->attr.comm)
                        atomic_inc(&nr_comm_events);
@@ -5007,7 +5017,7 @@ err_size:
 static int
 perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
 {
-       struct perf_mmap_data *data = NULL, *old_data = NULL;
+       struct perf_buffer *buffer = NULL, *old_buffer = NULL;
        int ret = -EINVAL;
 
        if (!output_event)
@@ -5037,19 +5047,19 @@ set:
 
        if (output_event) {
                /* get the buffer we want to redirect to */
-               data = perf_mmap_data_get(output_event);
-               if (!data)
+               buffer = perf_buffer_get(output_event);
+               if (!buffer)
                        goto unlock;
        }
 
-       old_data = event->data;
-       rcu_assign_pointer(event->data, data);
+       old_buffer = event->buffer;
+       rcu_assign_pointer(event->buffer, buffer);
        ret = 0;
 unlock:
        mutex_unlock(&event->mmap_mutex);
 
-       if (old_data)
-               perf_mmap_data_put(old_data);
+       if (old_buffer)
+               perf_buffer_put(old_buffer);
 out:
        return ret;
 }
@@ -5298,7 +5308,7 @@ inherit_event(struct perf_event *parent_event,
                hwc->sample_period = sample_period;
                hwc->last_period   = sample_period;
 
-               atomic64_set(&hwc->period_left, sample_period);
+               local64_set(&hwc->period_left, sample_period);
        }
 
        child_event->overflow_handler = parent_event->overflow_handler;
@@ -5359,12 +5369,12 @@ static void sync_child_event(struct perf_event *child_event,
        if (child_event->attr.inherit_stat)
                perf_event_read_event(child_event, child);
 
-       child_val = atomic64_read(&child_event->count);
+       child_val = perf_event_count(child_event);
 
        /*
         * Add back the child's count to the parent's count:
         */
-       atomic64_add(child_val, &parent_event->count);
+       atomic64_add(child_val, &parent_event->child_count);
        atomic64_add(child_event->total_time_enabled,
                     &parent_event->child_total_time_enabled);
        atomic64_add(child_event->total_time_running,
index f8b8996228dd7d8de9ee49a845a193921f1b8a5d..3c5d34a4e93271ace2197987ef6bf555f94f53cb 100644 (file)
@@ -3717,7 +3717,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
  * off of preempt_enable. Kernel preemptions off return from interrupt
  * occur there and call schedule directly.
  */
-asmlinkage void __sched preempt_schedule(void)
+asmlinkage void __sched notrace preempt_schedule(void)
 {
        struct thread_info *ti = current_thread_info();
 
@@ -3729,9 +3729,9 @@ asmlinkage void __sched preempt_schedule(void)
                return;
 
        do {
-               add_preempt_count(PREEMPT_ACTIVE);
+               add_preempt_count_notrace(PREEMPT_ACTIVE);
                schedule();
-               sub_preempt_count(PREEMPT_ACTIVE);
+               sub_preempt_count_notrace(PREEMPT_ACTIVE);
 
                /*
                 * Check again in case we missed a preemption opportunity
index 8b1797c4545b41c00cca7b3e5e268cf8b8f0e164..f669092fdead5fecbf70a885431773f1f0e51621 100644 (file)
@@ -229,23 +229,6 @@ config FTRACE_SYSCALLS
        help
          Basic tracer to catch the syscall entry and exit events.
 
-config BOOT_TRACER
-       bool "Trace boot initcalls"
-       select GENERIC_TRACER
-       select CONTEXT_SWITCH_TRACER
-       help
-         This tracer helps developers to optimize boot times: it records
-         the timings of the initcalls and traces key events and the identity
-         of tasks that can cause boot delays, such as context-switches.
-
-         Its aim is to be parsed by the scripts/bootgraph.pl tool to
-         produce pretty graphics about boot inefficiencies, giving a visual
-         representation of the delays during initcalls - but the raw
-         /debug/tracing/trace text output is readable too.
-
-         You must pass in initcall_debug and ftrace=initcall to the kernel
-         command line to enable this on bootup.
-
 config TRACE_BRANCH_PROFILING
        bool
        select GENERIC_TRACER
@@ -371,26 +354,6 @@ config STACK_TRACER
 
          Say N if unsure.
 
-config KMEMTRACE
-       bool "Trace SLAB allocations"
-       select GENERIC_TRACER
-       help
-         kmemtrace provides tracing for slab allocator functions, such as
-         kmalloc, kfree, kmem_cache_alloc, kmem_cache_free, etc. Collected
-         data is then fed to the userspace application in order to analyse
-         allocation hotspots, internal fragmentation and so on, making it
-         possible to see how well an allocator performs, as well as debug
-         and profile kernel code.
-
-         This requires an userspace application to use. See
-         Documentation/trace/kmemtrace.txt for more information.
-
-         Saying Y will make the kernel somewhat larger and slower. However,
-         if you disable kmemtrace at run-time or boot-time, the performance
-         impact is minimal (depending on the arch the kernel is built for).
-
-         If unsure, say N.
-
 config WORKQUEUE_TRACER
        bool "Trace workqueues"
        select GENERIC_TRACER
index ffb1a5b0550e3f071c568695a386a10ea84290b8..469a1c7555a5d32f39da433e8df64a490aaa72e7 100644 (file)
@@ -38,10 +38,8 @@ obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
 obj-$(CONFIG_NOP_TRACER) += trace_nop.o
 obj-$(CONFIG_STACK_TRACER) += trace_stack.o
 obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
-obj-$(CONFIG_BOOT_TRACER) += trace_boot.o
 obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o
 obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o
-obj-$(CONFIG_KMEMTRACE) += kmemtrace.o
 obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o
 obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
 ifeq ($(CONFIG_BLOCK),y)
index 6d2cb14f9449083c9a2e78f507b9c1255c8e7ca2..0d88ce9b9fb8828c9a81fdffcd47763ae5cc2543 100644 (file)
@@ -1883,7 +1883,6 @@ function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
        struct hlist_head *hhd;
        struct hlist_node *n;
        unsigned long key;
-       int resched;
 
        key = hash_long(ip, FTRACE_HASH_BITS);
 
@@ -1897,12 +1896,12 @@ function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
         * period. This syncs the hash iteration and freeing of items
         * on the hash. rcu_read_lock is too dangerous here.
         */
-       resched = ftrace_preempt_disable();
+       preempt_disable_notrace();
        hlist_for_each_entry_rcu(entry, n, hhd, node) {
                if (entry->ip == ip)
                        entry->ops->func(ip, parent_ip, &entry->data);
        }
-       ftrace_preempt_enable(resched);
+       preempt_enable_notrace();
 }
 
 static struct ftrace_ops trace_probe_ops __read_mostly =
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c
deleted file mode 100644 (file)
index bbfc1bb..0000000
+++ /dev/null
@@ -1,529 +0,0 @@
-/*
- * Memory allocator tracing
- *
- * Copyright (C) 2008 Eduard - Gabriel Munteanu
- * Copyright (C) 2008 Pekka Enberg <penberg@cs.helsinki.fi>
- * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
- */
-
-#include <linux/tracepoint.h>
-#include <linux/seq_file.h>
-#include <linux/debugfs.h>
-#include <linux/dcache.h>
-#include <linux/fs.h>
-
-#include <linux/kmemtrace.h>
-
-#include "trace_output.h"
-#include "trace.h"
-
-/* Select an alternative, minimalistic output than the original one */
-#define TRACE_KMEM_OPT_MINIMAL 0x1
-
-static struct tracer_opt kmem_opts[] = {
-       /* Default disable the minimalistic output */
-       { TRACER_OPT(kmem_minimalistic, TRACE_KMEM_OPT_MINIMAL) },
-       { }
-};
-
-static struct tracer_flags kmem_tracer_flags = {
-       .val                    = 0,
-       .opts                   = kmem_opts
-};
-
-static struct trace_array *kmemtrace_array;
-
-/* Trace allocations */
-static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id,
-                                  unsigned long call_site,
-                                  const void *ptr,
-                                  size_t bytes_req,
-                                  size_t bytes_alloc,
-                                  gfp_t gfp_flags,
-                                  int node)
-{
-       struct ftrace_event_call *call = &event_kmem_alloc;
-       struct trace_array *tr = kmemtrace_array;
-       struct kmemtrace_alloc_entry *entry;
-       struct ring_buffer_event *event;
-
-       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
-       if (!event)
-               return;
-
-       entry = ring_buffer_event_data(event);
-       tracing_generic_entry_update(&entry->ent, 0, 0);
-
-       entry->ent.type         = TRACE_KMEM_ALLOC;
-       entry->type_id          = type_id;
-       entry->call_site        = call_site;
-       entry->ptr              = ptr;
-       entry->bytes_req        = bytes_req;
-       entry->bytes_alloc      = bytes_alloc;
-       entry->gfp_flags        = gfp_flags;
-       entry->node             = node;
-
-       if (!filter_check_discard(call, entry, tr->buffer, event))
-               ring_buffer_unlock_commit(tr->buffer, event);
-
-       trace_wake_up();
-}
-
-static inline void kmemtrace_free(enum kmemtrace_type_id type_id,
-                                 unsigned long call_site,
-                                 const void *ptr)
-{
-       struct ftrace_event_call *call = &event_kmem_free;
-       struct trace_array *tr = kmemtrace_array;
-       struct kmemtrace_free_entry *entry;
-       struct ring_buffer_event *event;
-
-       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
-       if (!event)
-               return;
-       entry   = ring_buffer_event_data(event);
-       tracing_generic_entry_update(&entry->ent, 0, 0);
-
-       entry->ent.type         = TRACE_KMEM_FREE;
-       entry->type_id          = type_id;
-       entry->call_site        = call_site;
-       entry->ptr              = ptr;
-
-       if (!filter_check_discard(call, entry, tr->buffer, event))
-               ring_buffer_unlock_commit(tr->buffer, event);
-
-       trace_wake_up();
-}
-
-static void kmemtrace_kmalloc(void *ignore,
-                             unsigned long call_site,
-                             const void *ptr,
-                             size_t bytes_req,
-                             size_t bytes_alloc,
-                             gfp_t gfp_flags)
-{
-       kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
-                       bytes_req, bytes_alloc, gfp_flags, -1);
-}
-
-static void kmemtrace_kmem_cache_alloc(void *ignore,
-                                      unsigned long call_site,
-                                      const void *ptr,
-                                      size_t bytes_req,
-                                      size_t bytes_alloc,
-                                      gfp_t gfp_flags)
-{
-       kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
-                       bytes_req, bytes_alloc, gfp_flags, -1);
-}
-
-static void kmemtrace_kmalloc_node(void *ignore,
-                                  unsigned long call_site,
-                                  const void *ptr,
-                                  size_t bytes_req,
-                                  size_t bytes_alloc,
-                                  gfp_t gfp_flags,
-                                  int node)
-{
-       kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
-                       bytes_req, bytes_alloc, gfp_flags, node);
-}
-
-static void kmemtrace_kmem_cache_alloc_node(void *ignore,
-                                           unsigned long call_site,
-                                           const void *ptr,
-                                           size_t bytes_req,
-                                           size_t bytes_alloc,
-                                           gfp_t gfp_flags,
-                                           int node)
-{
-       kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
-                       bytes_req, bytes_alloc, gfp_flags, node);
-}
-
-static void
-kmemtrace_kfree(void *ignore, unsigned long call_site, const void *ptr)
-{
-       kmemtrace_free(KMEMTRACE_TYPE_KMALLOC, call_site, ptr);
-}
-
-static void kmemtrace_kmem_cache_free(void *ignore,
-                                     unsigned long call_site, const void *ptr)
-{
-       kmemtrace_free(KMEMTRACE_TYPE_CACHE, call_site, ptr);
-}
-
-static int kmemtrace_start_probes(void)
-{
-       int err;
-
-       err = register_trace_kmalloc(kmemtrace_kmalloc, NULL);
-       if (err)
-               return err;
-       err = register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc, NULL);
-       if (err)
-               return err;
-       err = register_trace_kmalloc_node(kmemtrace_kmalloc_node, NULL);
-       if (err)
-               return err;
-       err = register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node, NULL);
-       if (err)
-               return err;
-       err = register_trace_kfree(kmemtrace_kfree, NULL);
-       if (err)
-               return err;
-       err = register_trace_kmem_cache_free(kmemtrace_kmem_cache_free, NULL);
-
-       return err;
-}
-
-static void kmemtrace_stop_probes(void)
-{
-       unregister_trace_kmalloc(kmemtrace_kmalloc, NULL);
-       unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc, NULL);
-       unregister_trace_kmalloc_node(kmemtrace_kmalloc_node, NULL);
-       unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node, NULL);
-       unregister_trace_kfree(kmemtrace_kfree, NULL);
-       unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free, NULL);
-}
-
-static int kmem_trace_init(struct trace_array *tr)
-{
-       kmemtrace_array = tr;
-
-       tracing_reset_online_cpus(tr);
-
-       kmemtrace_start_probes();
-
-       return 0;
-}
-
-static void kmem_trace_reset(struct trace_array *tr)
-{
-       kmemtrace_stop_probes();
-}
-
-static void kmemtrace_headers(struct seq_file *s)
-{
-       /* Don't need headers for the original kmemtrace output */
-       if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL))
-               return;
-
-       seq_printf(s, "#\n");
-       seq_printf(s, "# ALLOC  TYPE  REQ   GIVEN  FLAGS     "
-                       "      POINTER         NODE    CALLER\n");
-       seq_printf(s, "# FREE   |      |     |       |       "
-                       "       |   |            |        |\n");
-       seq_printf(s, "# |\n\n");
-}
-
-/*
- * The following functions give the original output from kmemtrace,
- * plus the origin CPU, since reordering occurs in-kernel now.
- */
-
-#define KMEMTRACE_USER_ALLOC   0
-#define KMEMTRACE_USER_FREE    1
-
-struct kmemtrace_user_event {
-       u8                      event_id;
-       u8                      type_id;
-       u16                     event_size;
-       u32                     cpu;
-       u64                     timestamp;
-       unsigned long           call_site;
-       unsigned long           ptr;
-};
-
-struct kmemtrace_user_event_alloc {
-       size_t                  bytes_req;
-       size_t                  bytes_alloc;
-       unsigned                gfp_flags;
-       int                     node;
-};
-
-static enum print_line_t
-kmemtrace_print_alloc(struct trace_iterator *iter, int flags,
-                     struct trace_event *event)
-{
-       struct trace_seq *s = &iter->seq;
-       struct kmemtrace_alloc_entry *entry;
-       int ret;
-
-       trace_assign_type(entry, iter->ent);
-
-       ret = trace_seq_printf(s, "type_id %d call_site %pF ptr %lu "
-           "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d\n",
-           entry->type_id, (void *)entry->call_site, (unsigned long)entry->ptr,
-           (unsigned long)entry->bytes_req, (unsigned long)entry->bytes_alloc,
-           (unsigned long)entry->gfp_flags, entry->node);
-
-       if (!ret)
-               return TRACE_TYPE_PARTIAL_LINE;
-       return TRACE_TYPE_HANDLED;
-}
-
-static enum print_line_t
-kmemtrace_print_free(struct trace_iterator *iter, int flags,
-                    struct trace_event *event)
-{
-       struct trace_seq *s = &iter->seq;
-       struct kmemtrace_free_entry *entry;
-       int ret;
-
-       trace_assign_type(entry, iter->ent);
-
-       ret = trace_seq_printf(s, "type_id %d call_site %pF ptr %lu\n",
-                              entry->type_id, (void *)entry->call_site,
-                              (unsigned long)entry->ptr);
-
-       if (!ret)
-               return TRACE_TYPE_PARTIAL_LINE;
-       return TRACE_TYPE_HANDLED;
-}
-
-static enum print_line_t
-kmemtrace_print_alloc_user(struct trace_iterator *iter, int flags,
-                          struct trace_event *event)
-{
-       struct trace_seq *s = &iter->seq;
-       struct kmemtrace_alloc_entry *entry;
-       struct kmemtrace_user_event *ev;
-       struct kmemtrace_user_event_alloc *ev_alloc;
-
-       trace_assign_type(entry, iter->ent);
-
-       ev = trace_seq_reserve(s, sizeof(*ev));
-       if (!ev)
-               return TRACE_TYPE_PARTIAL_LINE;
-
-       ev->event_id            = KMEMTRACE_USER_ALLOC;
-       ev->type_id             = entry->type_id;
-       ev->event_size          = sizeof(*ev) + sizeof(*ev_alloc);
-       ev->cpu                 = iter->cpu;
-       ev->timestamp           = iter->ts;
-       ev->call_site           = entry->call_site;
-       ev->ptr                 = (unsigned long)entry->ptr;
-
-       ev_alloc = trace_seq_reserve(s, sizeof(*ev_alloc));
-       if (!ev_alloc)
-               return TRACE_TYPE_PARTIAL_LINE;
-
-       ev_alloc->bytes_req     = entry->bytes_req;
-       ev_alloc->bytes_alloc   = entry->bytes_alloc;
-       ev_alloc->gfp_flags     = entry->gfp_flags;
-       ev_alloc->node          = entry->node;
-
-       return TRACE_TYPE_HANDLED;
-}
-
-static enum print_line_t
-kmemtrace_print_free_user(struct trace_iterator *iter, int flags,
-                         struct trace_event *event)
-{
-       struct trace_seq *s = &iter->seq;
-       struct kmemtrace_free_entry *entry;
-       struct kmemtrace_user_event *ev;
-
-       trace_assign_type(entry, iter->ent);
-
-       ev = trace_seq_reserve(s, sizeof(*ev));
-       if (!ev)
-               return TRACE_TYPE_PARTIAL_LINE;
-
-       ev->event_id            = KMEMTRACE_USER_FREE;
-       ev->type_id             = entry->type_id;
-       ev->event_size          = sizeof(*ev);
-       ev->cpu                 = iter->cpu;
-       ev->timestamp           = iter->ts;
-       ev->call_site           = entry->call_site;
-       ev->ptr                 = (unsigned long)entry->ptr;
-
-       return TRACE_TYPE_HANDLED;
-}
-
-/* The two other following provide a more minimalistic output */
-static enum print_line_t
-kmemtrace_print_alloc_compress(struct trace_iterator *iter)
-{
-       struct kmemtrace_alloc_entry *entry;
-       struct trace_seq *s = &iter->seq;
-       int ret;
-
-       trace_assign_type(entry, iter->ent);
-
-       /* Alloc entry */
-       ret = trace_seq_printf(s, "  +      ");
-       if (!ret)
-               return TRACE_TYPE_PARTIAL_LINE;
-
-       /* Type */
-       switch (entry->type_id) {
-       case KMEMTRACE_TYPE_KMALLOC:
-               ret = trace_seq_printf(s, "K   ");
-               break;
-       case KMEMTRACE_TYPE_CACHE:
-               ret = trace_seq_printf(s, "C   ");
-               break;
-       case KMEMTRACE_TYPE_PAGES:
-               ret = trace_seq_printf(s, "P   ");
-               break;
-       default:
-               ret = trace_seq_printf(s, "?   ");
-       }
-
-       if (!ret)
-               return TRACE_TYPE_PARTIAL_LINE;
-
-       /* Requested */
-       ret = trace_seq_printf(s, "%4zu   ", entry->bytes_req);
-       if (!ret)
-               return TRACE_TYPE_PARTIAL_LINE;
-
-       /* Allocated */
-       ret = trace_seq_printf(s, "%4zu   ", entry->bytes_alloc);
-       if (!ret)
-               return TRACE_TYPE_PARTIAL_LINE;
-
-       /* Flags
-        * TODO: would be better to see the name of the GFP flag names
-        */
-       ret = trace_seq_printf(s, "%08x   ", entry->gfp_flags);
-       if (!ret)
-               return TRACE_TYPE_PARTIAL_LINE;
-
-       /* Pointer to allocated */
-       ret = trace_seq_printf(s, "0x%tx   ", (ptrdiff_t)entry->ptr);
-       if (!ret)
-               return TRACE_TYPE_PARTIAL_LINE;
-
-       /* Node and call site*/
-       ret = trace_seq_printf(s, "%4d   %pf\n", entry->node,
-                                                (void *)entry->call_site);
-       if (!ret)
-               return TRACE_TYPE_PARTIAL_LINE;
-
-       return TRACE_TYPE_HANDLED;
-}
-
-static enum print_line_t
-kmemtrace_print_free_compress(struct trace_iterator *iter)
-{
-       struct kmemtrace_free_entry *entry;
-       struct trace_seq *s = &iter->seq;
-       int ret;
-
-       trace_assign_type(entry, iter->ent);
-
-       /* Free entry */
-       ret = trace_seq_printf(s, "  -      ");
-       if (!ret)
-               return TRACE_TYPE_PARTIAL_LINE;
-
-       /* Type */
-       switch (entry->type_id) {
-       case KMEMTRACE_TYPE_KMALLOC:
-               ret = trace_seq_printf(s, "K     ");
-               break;
-       case KMEMTRACE_TYPE_CACHE:
-               ret = trace_seq_printf(s, "C     ");
-               break;
-       case KMEMTRACE_TYPE_PAGES:
-               ret = trace_seq_printf(s, "P     ");
-               break;
-       default:
-               ret = trace_seq_printf(s, "?     ");
-       }
-
-       if (!ret)
-               return TRACE_TYPE_PARTIAL_LINE;
-
-       /* Skip requested/allocated/flags */
-       ret = trace_seq_printf(s, "                       ");
-       if (!ret)
-               return TRACE_TYPE_PARTIAL_LINE;
-
-       /* Pointer to allocated */
-       ret = trace_seq_printf(s, "0x%tx   ", (ptrdiff_t)entry->ptr);
-       if (!ret)
-               return TRACE_TYPE_PARTIAL_LINE;
-
-       /* Skip node and print call site*/
-       ret = trace_seq_printf(s, "       %pf\n", (void *)entry->call_site);
-       if (!ret)
-               return TRACE_TYPE_PARTIAL_LINE;
-
-       return TRACE_TYPE_HANDLED;
-}
-
-static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter)
-{
-       struct trace_entry *entry = iter->ent;
-
-       if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL))
-               return TRACE_TYPE_UNHANDLED;
-
-       switch (entry->type) {
-       case TRACE_KMEM_ALLOC:
-               return kmemtrace_print_alloc_compress(iter);
-       case TRACE_KMEM_FREE:
-               return kmemtrace_print_free_compress(iter);
-       default:
-               return TRACE_TYPE_UNHANDLED;
-       }
-}
-
-static struct trace_event_functions kmem_trace_alloc_funcs = {
-       .trace                  = kmemtrace_print_alloc,
-       .binary                 = kmemtrace_print_alloc_user,
-};
-
-static struct trace_event kmem_trace_alloc = {
-       .type                   = TRACE_KMEM_ALLOC,
-       .funcs                  = &kmem_trace_alloc_funcs,
-};
-
-static struct trace_event_functions kmem_trace_free_funcs = {
-       .trace                  = kmemtrace_print_free,
-       .binary                 = kmemtrace_print_free_user,
-};
-
-static struct trace_event kmem_trace_free = {
-       .type                   = TRACE_KMEM_FREE,
-       .funcs                  = &kmem_trace_free_funcs,
-};
-
-static struct tracer kmem_tracer __read_mostly = {
-       .name                   = "kmemtrace",
-       .init                   = kmem_trace_init,
-       .reset                  = kmem_trace_reset,
-       .print_line             = kmemtrace_print_line,
-       .print_header           = kmemtrace_headers,
-       .flags                  = &kmem_tracer_flags
-};
-
-void kmemtrace_init(void)
-{
-       /* earliest opportunity to start kmem tracing */
-}
-
-static int __init init_kmem_tracer(void)
-{
-       if (!register_ftrace_event(&kmem_trace_alloc)) {
-               pr_warning("Warning: could not register kmem events\n");
-               return 1;
-       }
-
-       if (!register_ftrace_event(&kmem_trace_free)) {
-               pr_warning("Warning: could not register kmem events\n");
-               return 1;
-       }
-
-       if (register_tracer(&kmem_tracer) != 0) {
-               pr_warning("Warning: could not register the kmem tracer\n");
-               return 1;
-       }
-
-       return 0;
-}
-device_initcall(init_kmem_tracer);
index 1da7b6ea8b85d70dde15b50369202cb68acab0bf..28d0615a513f1bf322b679e4ce347fa9b007449d 100644 (file)
@@ -2242,8 +2242,6 @@ static void trace_recursive_unlock(void)
 
 #endif
 
-static DEFINE_PER_CPU(int, rb_need_resched);
-
 /**
  * ring_buffer_lock_reserve - reserve a part of the buffer
  * @buffer: the ring buffer to reserve from
@@ -2264,13 +2262,13 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
        struct ring_buffer_event *event;
-       int cpu, resched;
+       int cpu;
 
        if (ring_buffer_flags != RB_BUFFERS_ON)
                return NULL;
 
        /* If we are tracing schedule, we don't want to recurse */
-       resched = ftrace_preempt_disable();
+       preempt_disable_notrace();
 
        if (atomic_read(&buffer->record_disabled))
                goto out_nocheck;
@@ -2295,21 +2293,13 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
        if (!event)
                goto out;
 
-       /*
-        * Need to store resched state on this cpu.
-        * Only the first needs to.
-        */
-
-       if (preempt_count() == 1)
-               per_cpu(rb_need_resched, cpu) = resched;
-
        return event;
 
  out:
        trace_recursive_unlock();
 
  out_nocheck:
-       ftrace_preempt_enable(resched);
+       preempt_enable_notrace();
        return NULL;
 }
 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
@@ -2355,13 +2345,7 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
 
        trace_recursive_unlock();
 
-       /*
-        * Only the last preempt count needs to restore preemption.
-        */
-       if (preempt_count() == 1)
-               ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
-       else
-               preempt_enable_no_resched_notrace();
+       preempt_enable_notrace();
 
        return 0;
 }
@@ -2469,13 +2453,7 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer,
 
        trace_recursive_unlock();
 
-       /*
-        * Only the last preempt count needs to restore preemption.
-        */
-       if (preempt_count() == 1)
-               ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
-       else
-               preempt_enable_no_resched_notrace();
+       preempt_enable_notrace();
 
 }
 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
@@ -2501,12 +2479,12 @@ int ring_buffer_write(struct ring_buffer *buffer,
        struct ring_buffer_event *event;
        void *body;
        int ret = -EBUSY;
-       int cpu, resched;
+       int cpu;
 
        if (ring_buffer_flags != RB_BUFFERS_ON)
                return -EBUSY;
 
-       resched = ftrace_preempt_disable();
+       preempt_disable_notrace();
 
        if (atomic_read(&buffer->record_disabled))
                goto out;
@@ -2536,7 +2514,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
 
        ret = 0;
  out:
-       ftrace_preempt_enable(resched);
+       preempt_enable_notrace();
 
        return ret;
 }
index 086d3631680505f20d0820371c50b43f4056e316..8683dec6946b8d2aa8f64e0cd288de1713d72d82 100644 (file)
@@ -1404,7 +1404,6 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
        struct bprint_entry *entry;
        unsigned long flags;
        int disable;
-       int resched;
        int cpu, len = 0, size, pc;
 
        if (unlikely(tracing_selftest_running || tracing_disabled))
@@ -1414,7 +1413,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
        pause_graph_tracing();
 
        pc = preempt_count();
-       resched = ftrace_preempt_disable();
+       preempt_disable_notrace();
        cpu = raw_smp_processor_id();
        data = tr->data[cpu];
 
@@ -1452,7 +1451,7 @@ out_unlock:
 
 out:
        atomic_dec_return(&data->disabled);
-       ftrace_preempt_enable(resched);
+       preempt_enable_notrace();
        unpause_graph_tracing();
 
        return len;
@@ -4597,9 +4596,6 @@ __init static int tracer_alloc_buffers(void)
 
        register_tracer(&nop_trace);
        current_trace = &nop_trace;
-#ifdef CONFIG_BOOT_TRACER
-       register_tracer(&boot_tracer);
-#endif
        /* All seems OK, enable tracing */
        tracing_disabled = 0;
 
index 2cd96399463f88d51a5683802cc34f74343f881f..01ce088c1cdfeaf0b5b1ad3944bb9ec70012d597 100644 (file)
@@ -9,10 +9,7 @@
 #include <linux/mmiotrace.h>
 #include <linux/tracepoint.h>
 #include <linux/ftrace.h>
-#include <trace/boot.h>
-#include <linux/kmemtrace.h>
 #include <linux/hw_breakpoint.h>
-
 #include <linux/trace_seq.h>
 #include <linux/ftrace_event.h>
 
@@ -29,26 +26,15 @@ enum trace_type {
        TRACE_MMIO_RW,
        TRACE_MMIO_MAP,
        TRACE_BRANCH,
-       TRACE_BOOT_CALL,
-       TRACE_BOOT_RET,
        TRACE_GRAPH_RET,
        TRACE_GRAPH_ENT,
        TRACE_USER_STACK,
-       TRACE_KMEM_ALLOC,
-       TRACE_KMEM_FREE,
        TRACE_BLK,
        TRACE_KSYM,
 
        __TRACE_LAST_TYPE,
 };
 
-enum kmemtrace_type_id {
-       KMEMTRACE_TYPE_KMALLOC = 0,     /* kmalloc() or kfree(). */
-       KMEMTRACE_TYPE_CACHE,           /* kmem_cache_*(). */
-       KMEMTRACE_TYPE_PAGES,           /* __get_free_pages() and friends. */
-};
-
-extern struct tracer boot_tracer;
 
 #undef __field
 #define __field(type, item)            type    item;
@@ -209,17 +195,11 @@ extern void __ftrace_bad_type(void);
                          TRACE_MMIO_RW);                               \
                IF_ASSIGN(var, ent, struct trace_mmiotrace_map,         \
                          TRACE_MMIO_MAP);                              \
-               IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
-               IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
                IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
                IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry,      \
                          TRACE_GRAPH_ENT);             \
                IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry,      \
                          TRACE_GRAPH_RET);             \
-               IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry,       \
-                         TRACE_KMEM_ALLOC);    \
-               IF_ASSIGN(var, ent, struct kmemtrace_free_entry,        \
-                         TRACE_KMEM_FREE);     \
                IF_ASSIGN(var, ent, struct ksym_trace_entry, TRACE_KSYM);\
                __ftrace_bad_type();                                    \
        } while (0)
@@ -628,54 +608,6 @@ enum trace_iterator_flags {
 
 extern struct tracer nop_trace;
 
-/**
- * ftrace_preempt_disable - disable preemption scheduler safe
- *
- * When tracing can happen inside the scheduler, there exists
- * cases that the tracing might happen before the need_resched
- * flag is checked. If this happens and the tracer calls
- * preempt_enable (after a disable), a schedule might take place
- * causing an infinite recursion.
- *
- * To prevent this, we read the need_resched flag before
- * disabling preemption. When we want to enable preemption we
- * check the flag, if it is set, then we call preempt_enable_no_resched.
- * Otherwise, we call preempt_enable.
- *
- * The rational for doing the above is that if need_resched is set
- * and we have yet to reschedule, we are either in an atomic location
- * (where we do not need to check for scheduling) or we are inside
- * the scheduler and do not want to resched.
- */
-static inline int ftrace_preempt_disable(void)
-{
-       int resched;
-
-       resched = need_resched();
-       preempt_disable_notrace();
-
-       return resched;
-}
-
-/**
- * ftrace_preempt_enable - enable preemption scheduler safe
- * @resched: the return value from ftrace_preempt_disable
- *
- * This is a scheduler safe way to enable preemption and not miss
- * any preemption checks. The disabled saved the state of preemption.
- * If resched is set, then we are either inside an atomic or
- * are inside the scheduler (we would have already scheduled
- * otherwise). In this case, we do not want to call normal
- * preempt_enable, but preempt_enable_no_resched instead.
- */
-static inline void ftrace_preempt_enable(int resched)
-{
-       if (resched)
-               preempt_enable_no_resched_notrace();
-       else
-               preempt_enable_notrace();
-}
-
 #ifdef CONFIG_BRANCH_TRACER
 extern int enable_branch_tracing(struct trace_array *tr);
 extern void disable_branch_tracing(void);
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
deleted file mode 100644 (file)
index c21d5f3..0000000
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * ring buffer based initcalls tracer
- *
- * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
- *
- */
-
-#include <linux/init.h>
-#include <linux/debugfs.h>
-#include <linux/ftrace.h>
-#include <linux/kallsyms.h>
-#include <linux/time.h>
-
-#include "trace.h"
-#include "trace_output.h"
-
-static struct trace_array *boot_trace;
-static bool pre_initcalls_finished;
-
-/* Tells the boot tracer that the pre_smp_initcalls are finished.
- * So we are ready .
- * It doesn't enable sched events tracing however.
- * You have to call enable_boot_trace to do so.
- */
-void start_boot_trace(void)
-{
-       pre_initcalls_finished = true;
-}
-
-void enable_boot_trace(void)
-{
-       if (boot_trace && pre_initcalls_finished)
-               tracing_start_sched_switch_record();
-}
-
-void disable_boot_trace(void)
-{
-       if (boot_trace && pre_initcalls_finished)
-               tracing_stop_sched_switch_record();
-}
-
-static int boot_trace_init(struct trace_array *tr)
-{
-       boot_trace = tr;
-
-       if (!tr)
-               return 0;
-
-       tracing_reset_online_cpus(tr);
-
-       tracing_sched_switch_assign_trace(tr);
-       return 0;
-}
-
-static enum print_line_t
-initcall_call_print_line(struct trace_iterator *iter)
-{
-       struct trace_entry *entry = iter->ent;
-       struct trace_seq *s = &iter->seq;
-       struct trace_boot_call *field;
-       struct boot_trace_call *call;
-       u64 ts;
-       unsigned long nsec_rem;
-       int ret;
-
-       trace_assign_type(field, entry);
-       call = &field->boot_call;
-       ts = iter->ts;
-       nsec_rem = do_div(ts, NSEC_PER_SEC);
-
-       ret = trace_seq_printf(s, "[%5ld.%09ld] calling  %s @ %i\n",
-                       (unsigned long)ts, nsec_rem, call->func, call->caller);
-
-       if (!ret)
-               return TRACE_TYPE_PARTIAL_LINE;
-       else
-               return TRACE_TYPE_HANDLED;
-}
-
-static enum print_line_t
-initcall_ret_print_line(struct trace_iterator *iter)
-{
-       struct trace_entry *entry = iter->ent;
-       struct trace_seq *s = &iter->seq;
-       struct trace_boot_ret *field;
-       struct boot_trace_ret *init_ret;
-       u64 ts;
-       unsigned long nsec_rem;
-       int ret;
-
-       trace_assign_type(field, entry);
-       init_ret = &field->boot_ret;
-       ts = iter->ts;
-       nsec_rem = do_div(ts, NSEC_PER_SEC);
-
-       ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s "
-                       "returned %d after %llu msecs\n",
-                       (unsigned long) ts,
-                       nsec_rem,
-                       init_ret->func, init_ret->result, init_ret->duration);
-
-       if (!ret)
-               return TRACE_TYPE_PARTIAL_LINE;
-       else
-               return TRACE_TYPE_HANDLED;
-}
-
-static enum print_line_t initcall_print_line(struct trace_iterator *iter)
-{
-       struct trace_entry *entry = iter->ent;
-
-       switch (entry->type) {
-       case TRACE_BOOT_CALL:
-               return initcall_call_print_line(iter);
-       case TRACE_BOOT_RET:
-               return initcall_ret_print_line(iter);
-       default:
-               return TRACE_TYPE_UNHANDLED;
-       }
-}
-
-struct tracer boot_tracer __read_mostly =
-{
-       .name           = "initcall",
-       .init           = boot_trace_init,
-       .reset          = tracing_reset_online_cpus,
-       .print_line     = initcall_print_line,
-};
-
-void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
-{
-       struct ftrace_event_call *call = &event_boot_call;
-       struct ring_buffer_event *event;
-       struct ring_buffer *buffer;
-       struct trace_boot_call *entry;
-       struct trace_array *tr = boot_trace;
-
-       if (!tr || !pre_initcalls_finished)
-               return;
-
-       /* Get its name now since this function could
-        * disappear because it is in the .init section.
-        */
-       sprint_symbol(bt->func, (unsigned long)fn);
-       preempt_disable();
-
-       buffer = tr->buffer;
-       event = trace_buffer_lock_reserve(buffer, TRACE_BOOT_CALL,
-                                         sizeof(*entry), 0, 0);
-       if (!event)
-               goto out;
-       entry   = ring_buffer_event_data(event);
-       entry->boot_call = *bt;
-       if (!filter_check_discard(call, entry, buffer, event))
-               trace_buffer_unlock_commit(buffer, event, 0, 0);
- out:
-       preempt_enable();
-}
-
-void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn)
-{
-       struct ftrace_event_call *call = &event_boot_ret;
-       struct ring_buffer_event *event;
-       struct ring_buffer *buffer;
-       struct trace_boot_ret *entry;
-       struct trace_array *tr = boot_trace;
-
-       if (!tr || !pre_initcalls_finished)
-               return;
-
-       sprint_symbol(bt->func, (unsigned long)fn);
-       preempt_disable();
-
-       buffer = tr->buffer;
-       event = trace_buffer_lock_reserve(buffer, TRACE_BOOT_RET,
-                                         sizeof(*entry), 0, 0);
-       if (!event)
-               goto out;
-       entry   = ring_buffer_event_data(event);
-       entry->boot_ret = *bt;
-       if (!filter_check_discard(call, entry, buffer, event))
-               trace_buffer_unlock_commit(buffer, event, 0, 0);
- out:
-       preempt_enable();
-}
index 9d589d8dcd1aa7e9470b6868980a20c4aeaf6160..52fda6c04ac3e1f3ee2ff5f79dc95cd5bea53152 100644 (file)
 u64 notrace trace_clock_local(void)
 {
        u64 clock;
-       int resched;
 
        /*
         * sched_clock() is an architecture implemented, fast, scalable,
         * lockless clock. It is not guaranteed to be coherent across
         * CPUs, nor across CPU idle events.
         */
-       resched = ftrace_preempt_disable();
+       preempt_disable_notrace();
        clock = sched_clock();
-       ftrace_preempt_enable(resched);
+       preempt_enable_notrace();
 
        return clock;
 }
index dc008c1240da54ae8ebc6fcccb204ad205897e96..13abc157dbaf47d1a4af1eedddd9a993d92a5e92 100644 (file)
@@ -271,33 +271,6 @@ FTRACE_ENTRY(mmiotrace_map, trace_mmiotrace_map,
                 __entry->map_id, __entry->opcode)
 );
 
-FTRACE_ENTRY(boot_call, trace_boot_call,
-
-       TRACE_BOOT_CALL,
-
-       F_STRUCT(
-               __field_struct( struct boot_trace_call, boot_call       )
-               __field_desc(   pid_t,  boot_call,      caller          )
-               __array_desc(   char,   boot_call,      func,   KSYM_SYMBOL_LEN)
-       ),
-
-       F_printk("%d  %s", __entry->caller, __entry->func)
-);
-
-FTRACE_ENTRY(boot_ret, trace_boot_ret,
-
-       TRACE_BOOT_RET,
-
-       F_STRUCT(
-               __field_struct( struct boot_trace_ret,  boot_ret        )
-               __array_desc(   char,   boot_ret,       func,   KSYM_SYMBOL_LEN)
-               __field_desc(   int,    boot_ret,       result          )
-               __field_desc(   unsigned long, boot_ret, duration       )
-       ),
-
-       F_printk("%s %d %lx",
-                __entry->func, __entry->result, __entry->duration)
-);
 
 #define TRACE_FUNC_SIZE 30
 #define TRACE_FILE_SIZE 20
@@ -318,41 +291,6 @@ FTRACE_ENTRY(branch, trace_branch,
                 __entry->func, __entry->file, __entry->correct)
 );
 
-FTRACE_ENTRY(kmem_alloc, kmemtrace_alloc_entry,
-
-       TRACE_KMEM_ALLOC,
-
-       F_STRUCT(
-               __field(        enum kmemtrace_type_id, type_id         )
-               __field(        unsigned long,          call_site       )
-               __field(        const void *,           ptr             )
-               __field(        size_t,                 bytes_req       )
-               __field(        size_t,                 bytes_alloc     )
-               __field(        gfp_t,                  gfp_flags       )
-               __field(        int,                    node            )
-       ),
-
-       F_printk("type:%u call_site:%lx ptr:%p req:%zi alloc:%zi"
-                " flags:%x node:%d",
-                __entry->type_id, __entry->call_site, __entry->ptr,
-                __entry->bytes_req, __entry->bytes_alloc,
-                __entry->gfp_flags, __entry->node)
-);
-
-FTRACE_ENTRY(kmem_free, kmemtrace_free_entry,
-
-       TRACE_KMEM_FREE,
-
-       F_STRUCT(
-               __field(        enum kmemtrace_type_id, type_id         )
-               __field(        unsigned long,          call_site       )
-               __field(        const void *,           ptr             )
-       ),
-
-       F_printk("type:%u call_site:%lx ptr:%p",
-                __entry->type_id, __entry->call_site, __entry->ptr)
-);
-
 FTRACE_ENTRY(ksym_trace, ksym_trace_entry,
 
        TRACE_KSYM,
index e6f65887842c9a91649550daee07c593c818f9b5..4799d7047eb0f0e40c8c91f53cf244b6a08642e6 100644 (file)
@@ -9,8 +9,6 @@
 #include <linux/kprobes.h>
 #include "trace.h"
 
-EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs);
-
 static char *perf_trace_buf[4];
 
 /*
index 53cffc0b08014db76d9e87da0d8bbc3c9768bd49..a594f9a7ee3de0bb6dddd64e46d5fa71542ceaf6 100644 (file)
@@ -1524,12 +1524,11 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
        struct ftrace_entry *entry;
        unsigned long flags;
        long disabled;
-       int resched;
        int cpu;
        int pc;
 
        pc = preempt_count();
-       resched = ftrace_preempt_disable();
+       preempt_disable_notrace();
        cpu = raw_smp_processor_id();
        disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
 
@@ -1551,7 +1550,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
 
  out:
        atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
-       ftrace_preempt_enable(resched);
+       preempt_enable_notrace();
 }
 
 static struct ftrace_ops trace_ops __initdata  =
index b3f3776b0cd64002605f6c3d2f2dc9528f2b9932..16aee4d44e8fd3e121c2dc4910a120968d01d5d8 100644 (file)
@@ -54,14 +54,14 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
        struct trace_array_cpu *data;
        unsigned long flags;
        long disabled;
-       int cpu, resched;
+       int cpu;
        int pc;
 
        if (unlikely(!ftrace_function_enabled))
                return;
 
        pc = preempt_count();
-       resched = ftrace_preempt_disable();
+       preempt_disable_notrace();
        local_save_flags(flags);
        cpu = raw_smp_processor_id();
        data = tr->data[cpu];
@@ -71,7 +71,7 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
                trace_function(tr, ip, parent_ip, flags, pc);
 
        atomic_dec(&data->disabled);
-       ftrace_preempt_enable(resched);
+       preempt_enable_notrace();
 }
 
 static void
index 0e73bc2ef8c55a0b8a47ffe19d7b1aad3577de2b..c9fd5bd02036d85db3474d4b6cc5b1761f4e572a 100644 (file)
@@ -46,7 +46,6 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
        struct trace_array_cpu *data;
        unsigned long flags;
        long disabled;
-       int resched;
        int cpu;
        int pc;
 
@@ -54,7 +53,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
                return;
 
        pc = preempt_count();
-       resched = ftrace_preempt_disable();
+       preempt_disable_notrace();
 
        cpu = raw_smp_processor_id();
        if (cpu != wakeup_current_cpu)
@@ -74,7 +73,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
  out:
        atomic_dec(&data->disabled);
  out_enable:
-       ftrace_preempt_enable(resched);
+       preempt_enable_notrace();
 }
 
 static struct ftrace_ops trace_ops __read_mostly =
index f4bc9b27de5fd13d131dd13b204b1e5432a4283a..056468eae7cfce7a76bf5c2364f71c722adeb325 100644 (file)
@@ -110,12 +110,12 @@ static inline void check_stack(void)
 static void
 stack_trace_call(unsigned long ip, unsigned long parent_ip)
 {
-       int cpu, resched;
+       int cpu;
 
        if (unlikely(!ftrace_enabled || stack_trace_disabled))
                return;
 
-       resched = ftrace_preempt_disable();
+       preempt_disable_notrace();
 
        cpu = raw_smp_processor_id();
        /* no atomic needed, we only modify this variable by this cpu */
@@ -127,7 +127,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)
  out:
        per_cpu(trace_active, cpu)--;
        /* prevent recursion in schedule */
-       ftrace_preempt_enable(resched);
+       preempt_enable_notrace();
 }
 
 static struct ftrace_ops trace_ops __read_mostly =
index a7974a552ca90689f3ff358f96b6eb2622c5748c..c080956f4d8eb96b3a6e03c4e879598de9303830 100644 (file)
@@ -33,12 +33,13 @@ static DEFINE_MUTEX(sample_timer_lock);
  */
 static DEFINE_PER_CPU(struct hrtimer, stack_trace_hrtimer);
 
-struct stack_frame {
+struct stack_frame_user {
        const void __user       *next_fp;
        unsigned long           return_address;
 };
 
-static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
+static int
+copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
 {
        int ret;
 
@@ -125,7 +126,7 @@ trace_kernel(struct pt_regs *regs, struct trace_array *tr,
 static void timer_notify(struct pt_regs *regs, int cpu)
 {
        struct trace_array_cpu *data;
-       struct stack_frame frame;
+       struct stack_frame_user frame;
        struct trace_array *tr;
        const void __user *fp;
        int is_user;
index 456ec6f278897a560bd915e6d84297ac3de7ddef..e38e910cb75673c528116e9e0a114fb0c8f3b8cb 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1734,8 +1734,10 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
                grow = (address - vma->vm_end) >> PAGE_SHIFT;
 
                error = acct_stack_growth(vma, size, grow);
-               if (!error)
+               if (!error) {
                        vma->vm_end = address;
+                       perf_event_mmap(vma);
+               }
        }
        anon_vma_unlock(vma);
        return error;
@@ -1781,6 +1783,7 @@ static int expand_downwards(struct vm_area_struct *vma,
                if (!error) {
                        vma->vm_start = address;
                        vma->vm_pgoff -= grow;
+                       perf_event_mmap(vma);
                }
        }
        anon_vma_unlock(vma);
@@ -2208,6 +2211,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
        vma->vm_page_prot = vm_get_page_prot(flags);
        vma_link(mm, vma, prev, rb_link, rb_parent);
 out:
+       perf_event_mmap(vma);
        mm->total_vm += len >> PAGE_SHIFT;
        if (flags & VM_LOCKED) {
                if (!mlock_vma_pages_range(vma, addr, addr + len))
index e49f8f46f46d6878dba427dae10fc9b5fb153a9f..47360c3e5abde0bd27e0ce418bc7a8d1c3ab5b57 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
 #include       <linux/cpu.h>
 #include       <linux/sysctl.h>
 #include       <linux/module.h>
-#include       <linux/kmemtrace.h>
 #include       <linux/rcupdate.h>
 #include       <linux/string.h>
 #include       <linux/uaccess.h>
index 23631e2bb57af43c5cb577a75323cc9768798016..a82ab5811bd928e4c419be3233bf7e7e0249b9f9 100644 (file)
--- a/mm/slob.c
+++ b/mm/slob.c
 #include <linux/module.h>
 #include <linux/rcupdate.h>
 #include <linux/list.h>
-#include <linux/kmemtrace.h>
 #include <linux/kmemleak.h>
+
+#include <trace/events/kmem.h>
+
 #include <asm/atomic.h>
 
 /*
index 578f68f3c51f76ed3c2ff21b5ff2545d2ab64c0d..7bb7940f4eeea2a686937d69795c3acb95077297 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -17,7 +17,6 @@
 #include <linux/slab.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
-#include <linux/kmemtrace.h>
 #include <linux/kmemcheck.h>
 #include <linux/cpu.h>
 #include <linux/cpuset.h>
index 62fcc3a7f4d35f4ae29c959baac684f154c9aafd..18513b0191db2cfeb2e208e2ddf66c96af6089e1 100644 (file)
@@ -111,13 +111,38 @@ tar%pkg: FORCE
 clean-dirs += $(objtree)/tar-install/
 
 
+# perf-pkg - generate a source tarball with perf source
+# ---------------------------------------------------------------------------
+
+perf-tar=perf-$(KERNELVERSION)
+
+quiet_cmd_perf_tar = TAR
+      cmd_perf_tar = \
+git archive --prefix=$(perf-tar)/ HEAD^{tree}                       \
+       $$(cat $(srctree)/tools/perf/MANIFEST) -o $(perf-tar).tar;  \
+mkdir -p $(perf-tar);                                               \
+git rev-parse HEAD > $(perf-tar)/HEAD;                              \
+tar rf $(perf-tar).tar $(perf-tar)/HEAD;                            \
+rm -r $(perf-tar);                                                  \
+$(if $(findstring tar-src,$@),,                                     \
+$(if $(findstring bz2,$@),bzip2,                                    \
+$(if $(findstring gz,$@),gzip,                                      \
+$(error unknown target $@)))                                       \
+       -f -9 $(perf-tar).tar)
+
+perf-%pkg: FORCE
+       $(call cmd,perf_tar)
+
 # Help text displayed when executing 'make help'
 # ---------------------------------------------------------------------------
 help: FORCE
-       @echo '  rpm-pkg         - Build both source and binary RPM kernel packages'
-       @echo '  binrpm-pkg      - Build only the binary kernel package'
-       @echo '  deb-pkg         - Build the kernel as an deb package'
-       @echo '  tar-pkg         - Build the kernel as an uncompressed tarball'
-       @echo '  targz-pkg       - Build the kernel as a gzip compressed tarball'
-       @echo '  tarbz2-pkg      - Build the kernel as a bzip2 compressed tarball'
+       @echo '  rpm-pkg             - Build both source and binary RPM kernel packages'
+       @echo '  binrpm-pkg          - Build only the binary kernel package'
+       @echo '  deb-pkg             - Build the kernel as an deb package'
+       @echo '  tar-pkg             - Build the kernel as an uncompressed tarball'
+       @echo '  targz-pkg           - Build the kernel as a gzip compressed tarball'
+       @echo '  tarbz2-pkg          - Build the kernel as a bzip2 compressed tarball'
+       @echo '  perf-tar-src-pkg    - Build $(perf-tar).tar source tarball'
+       @echo '  perf-targz-src-pkg  - Build $(perf-tar).tar.gz source tarball'
+       @echo '  perf-tarbz2-src-pkg - Build $(perf-tar).tar.bz2 source tarball'
 
index e1d60d780784b8dd5ecedb42760a7802478276c1..cb43289e447f9d880b32ebadf173fa0f01aca166 100644 (file)
@@ -18,3 +18,5 @@ perf-archive
 tags
 TAGS
 cscope*
+config.mak
+config.mak.autogen
index 94a258c96a440b091616264fb2a7d0fd8b515967..ea531d9d975ce23f6f75d8ca61bf7fc38c3915ea 100644 (file)
@@ -31,6 +31,10 @@ OPTIONS
 --vmlinux=PATH::
        Specify vmlinux path which has debuginfo (Dwarf binary).
 
+-s::
+--source=PATH::
+       Specify path to kernel source.
+
 -v::
 --verbose::
         Be more verbose (show parsed arguments, etc).
index 34e255fc3e2f14d0a559efd207b6def3abfdea9e..3ee27dccfde97d6c58f562bd890a6df9c6026d25 100644 (file)
@@ -103,6 +103,19 @@ OPTIONS
 --raw-samples::
 Collect raw sample records from all opened counters (default for tracepoint counters).
 
+-C::
+--cpu::
+Collect samples only on the list of cpus provided. Multiple CPUs can be provided as a
+comma-sperated list with no space: 0,1. Ranges of CPUs are specified with -: 0-2.
+In per-thread mode with inheritance mode on (default), samples are captured only when
+the thread executes on the designated CPUs. Default is to monitor all CPUs.
+
+-N::
+--no-buildid-cache::
+Do not update the builid cache. This saves some overhead in situations
+where the information in the perf.data file (which includes buildids)
+is sufficient.
+
 SEE ALSO
 --------
 linkperf:perf-stat[1], linkperf:perf-list[1]
index 909fa766fa1cfa014041e41ddb76d29555c2469c..4b3a2d46b4378607f5195d12328646f5b1d7a638 100644 (file)
@@ -46,6 +46,13 @@ OPTIONS
 -B::
         print large numbers with thousands' separators according to locale
 
+-C::
+--cpu=::
+Count only on the list of cpus provided. Multiple CPUs can be provided as a
+comma-sperated list with no space: 0,1. Ranges of CPUs are specified with -: 0-2.
+In per-thread mode, this option is ignored. The -a option is still necessary
+to activate system-wide monitoring. Default is to count on all CPUs.
+
 EXAMPLES
 --------
 
index 785b9fc32a46f45559a77c193ed56fdf9bfc7702..1f9687663f2a9cd62d6cff9f597395523b3da931 100644 (file)
@@ -25,9 +25,11 @@ OPTIONS
 --count=<count>::
        Event period to sample.
 
--C <cpu>::
---CPU=<cpu>::
-       CPU to profile.
+-C <cpu-list>::
+--cpu=<cpu>::
+Monitor only on the list of cpus provided. Multiple CPUs can be provided as a
+comma-sperated list with no space: 0,1. Ranges of CPUs are specified with -: 0-2.
+Default is to monitor all CPUS.
 
 -d <seconds>::
 --delay=<seconds>::
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
new file mode 100644 (file)
index 0000000..8c7fc0c
--- /dev/null
@@ -0,0 +1,12 @@
+tools/perf
+include/linux/perf_event.h
+include/linux/rbtree.h
+include/linux/list.h
+include/linux/hash.h
+include/linux/stringify.h
+lib/rbtree.c
+include/linux/swab.h
+arch/*/include/asm/unistd*.h
+include/linux/poison.h
+include/linux/magic.h
+include/linux/hw_breakpoint.h
index 3d8f31ed771df18770b1e31361485d39400b5476..6aa2fe323db1ec63424cb785e576d2914ee44f2b 100644 (file)
@@ -285,14 +285,10 @@ else
        QUIET_STDERR = ">/dev/null 2>&1"
 endif
 
-BITBUCKET = "/dev/null"
+-include feature-tests.mak
 
-ifneq ($(shell sh -c "(echo '\#include <stdio.h>'; echo 'int main(void) { return puts(\"hi\"); }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) "$(QUIET_STDERR)" && echo y"), y)
-       BITBUCKET = .perf.dev.null
-endif
-
-ifeq ($(shell sh -c "echo 'int foo(void) {char X[2]; return 3;}' | $(CC) -x c -c -Werror -fstack-protector-all - -o $(BITBUCKET) "$(QUIET_STDERR)" && echo y"), y)
-  CFLAGS := $(CFLAGS) -fstack-protector-all
+ifeq ($(call try-cc,$(SOURCE_HELLO),-Werror -fstack-protector-all),y)
+       CFLAGS := $(CFLAGS) -fstack-protector-all
 endif
 
 
@@ -508,7 +504,8 @@ PERFLIBS = $(LIB_FILE)
 -include config.mak
 
 ifndef NO_DWARF
-ifneq ($(shell sh -c "(echo '\#include <dwarf.h>'; echo '\#include <libdw.h>'; echo '\#include <version.h>'; echo '\#ifndef _ELFUTILS_PREREQ'; echo '\#error'; echo '\#endif'; echo 'int main(void) { Dwarf *dbg; dbg = dwarf_begin(0, DWARF_C_READ); return (long)dbg; }') | $(CC) -x c - $(ALL_CFLAGS) -I/usr/include/elfutils -ldw -lelf -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
+FLAGS_DWARF=$(ALL_CFLAGS) -I/usr/include/elfutils -ldw -lelf $(ALL_LDFLAGS) $(EXTLIBS)
+ifneq ($(call try-cc,$(SOURCE_DWARF),$(FLAGS_DWARF)),y)
        msg := $(warning No libdw.h found or old libdw.h found or elfutils is older than 0.138, disables dwarf support. Please install new elfutils-devel/libdw-dev);
        NO_DWARF := 1
 endif # Dwarf support
@@ -536,16 +533,18 @@ ifneq ($(OUTPUT),)
        BASIC_CFLAGS += -I$(OUTPUT)
 endif
 
-ifeq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
-ifneq ($(shell sh -c "(echo '\#include <gnu/libc-version.h>'; echo 'int main(void) { const char * version = gnu_get_libc_version(); return (long)version; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
-       msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]/glibc-static);
+FLAGS_LIBELF=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS)
+ifneq ($(call try-cc,$(SOURCE_LIBELF),$(FLAGS_LIBELF)),y)
+       FLAGS_GLIBC=$(ALL_CFLAGS) $(ALL_LDFLAGS)
+       ifneq ($(call try-cc,$(SOURCE_GLIBC),$(FLAGS_GLIBC)),y)
+               msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]/glibc-static);
+       else
+               msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel);
+       endif
 endif
 
-       ifneq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ_MMAP, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
-               BASIC_CFLAGS += -DLIBELF_NO_MMAP
-       endif
-else
-       msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel and glibc-dev[el]);
+ifneq ($(call try-cc,$(SOURCE_ELF_MMAP),$(FLAGS_COMMON)),y)
+       BASIC_CFLAGS += -DLIBELF_NO_MMAP
 endif
 
 ifndef NO_DWARF
@@ -561,41 +560,47 @@ endif # NO_DWARF
 ifdef NO_NEWT
        BASIC_CFLAGS += -DNO_NEWT_SUPPORT
 else
-ifneq ($(shell sh -c "(echo '\#include <newt.h>'; echo 'int main(void) { newtInit(); newtCls(); return newtFinished(); }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -lnewt -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
-       msg := $(warning newt not found, disables TUI support. Please install newt-devel or libnewt-dev);
-       BASIC_CFLAGS += -DNO_NEWT_SUPPORT
-else
-       # Fedora has /usr/include/slang/slang.h, but ubuntu /usr/include/slang.h
-       BASIC_CFLAGS += -I/usr/include/slang
-       EXTLIBS += -lnewt -lslang
-       LIB_OBJS += $(OUTPUT)util/newt.o
-endif
-endif # NO_NEWT
-
-ifndef NO_LIBPERL
-PERL_EMBED_LDOPTS = `perl -MExtUtils::Embed -e ldopts 2>/dev/null`
-PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null`
+       FLAGS_NEWT=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) -lnewt
+       ifneq ($(call try-cc,$(SOURCE_NEWT),$(FLAGS_NEWT)),y)
+               msg := $(warning newt not found, disables TUI support. Please install newt-devel or libnewt-dev);
+               BASIC_CFLAGS += -DNO_NEWT_SUPPORT
+       else
+               # Fedora has /usr/include/slang/slang.h, but ubuntu /usr/include/slang.h
+               BASIC_CFLAGS += -I/usr/include/slang
+               EXTLIBS += -lnewt -lslang
+               LIB_OBJS += $(OUTPUT)util/newt.o
+       endif
 endif
 
-ifneq ($(shell sh -c "(echo '\#include <EXTERN.h>'; echo '\#include <perl.h>'; echo 'int main(void) { perl_alloc(); return 0; }') | $(CC) -x c - $(PERL_EMBED_CCOPTS) -o $(BITBUCKET) $(PERL_EMBED_LDOPTS) > /dev/null 2>&1 && echo y"), y)
+ifdef NO_LIBPERL
        BASIC_CFLAGS += -DNO_LIBPERL
 else
-       ALL_LDFLAGS += $(PERL_EMBED_LDOPTS)
-       LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-perl.o
-       LIB_OBJS += $(OUTPUT)scripts/perl/Perf-Trace-Util/Context.o
-endif
+       PERL_EMBED_LDOPTS = `perl -MExtUtils::Embed -e ldopts 2>/dev/null`
+       PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null`
+       PERL_EMBED_FLAGS=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS)
 
-ifndef NO_LIBPYTHON
-PYTHON_EMBED_LDOPTS = `python-config --ldflags 2>/dev/null`
-PYTHON_EMBED_CCOPTS = `python-config --cflags 2>/dev/null`
+       ifneq ($(call try-cc,$(SOURCE_PERL_EMBED),$(FLAGS_PERL_EMBED)),y)
+               BASIC_CFLAGS += -DNO_LIBPERL
+       else
+               ALL_LDFLAGS += $(PERL_EMBED_LDOPTS)
+               LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-perl.o
+               LIB_OBJS += $(OUTPUT)scripts/perl/Perf-Trace-Util/Context.o
+       endif
 endif
 
-ifneq ($(shell sh -c "(echo '\#include <Python.h>'; echo 'int main(void) { Py_Initialize(); return 0; }') | $(CC) -x c - $(PYTHON_EMBED_CCOPTS) -o $(BITBUCKET) $(PYTHON_EMBED_LDOPTS) > /dev/null 2>&1 && echo y"), y)
+ifdef NO_LIBPYTHON
        BASIC_CFLAGS += -DNO_LIBPYTHON
 else
-       ALL_LDFLAGS += $(PYTHON_EMBED_LDOPTS)
-       LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-python.o
-       LIB_OBJS += $(OUTPUT)scripts/python/Perf-Trace-Util/Context.o
+       PYTHON_EMBED_LDOPTS = `python-config --ldflags 2>/dev/null`
+       PYTHON_EMBED_CCOPTS = `python-config --cflags 2>/dev/null`
+       FLAGS_PYTHON_EMBED=$(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
+       ifneq ($(call try-cc,$(SOURCE_PYTHON_EMBED),$(FLAGS_PYTHON_EMBED)),y)
+               BASIC_CFLAGS += -DNO_LIBPYTHON
+       else
+               ALL_LDFLAGS += $(PYTHON_EMBED_LDOPTS)
+               LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-python.o
+               LIB_OBJS += $(OUTPUT)scripts/python/Perf-Trace-Util/Context.o
+       endif
 endif
 
 ifdef NO_DEMANGLE
@@ -604,20 +609,23 @@ else ifdef HAVE_CPLUS_DEMANGLE
        EXTLIBS += -liberty
        BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE
 else
-       has_bfd := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) -lbfd "$(QUIET_STDERR)" && echo y")
-
+       FLAGS_BFD=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) -lbfd
+       has_bfd := $(call try-cc,$(SOURCE_BFD),$(FLAGS_BFD))
        ifeq ($(has_bfd),y)
                EXTLIBS += -lbfd
        else
-               has_bfd_iberty := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) -lbfd -liberty "$(QUIET_STDERR)" && echo y")
+               FLAGS_BFD_IBERTY=$(FLAGS_BFD) -liberty
+               has_bfd_iberty := $(call try-cc,$(SOURCE_BFD),$(FLAGS_BFD_IBERTY))
                ifeq ($(has_bfd_iberty),y)
                        EXTLIBS += -lbfd -liberty
                else
-                       has_bfd_iberty_z := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) -lbfd -liberty -lz "$(QUIET_STDERR)" && echo y")
+                       FLAGS_BFD_IBERTY_Z=$(FLAGS_BFD_IBERTY) -lz
+                       has_bfd_iberty_z := $(call try-cc,$(SOURCE_BFD),$(FLAGS_BFD_IBERTY_Z))
                        ifeq ($(has_bfd_iberty_z),y)
                                EXTLIBS += -lbfd -liberty -lz
                        else
-                               has_cplus_demangle := $(shell sh -c "(echo 'extern char *cplus_demangle(const char *, int);'; echo 'int main(void) { cplus_demangle(0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) -liberty "$(QUIET_STDERR)" && echo y")
+                               FLAGS_CPLUS_DEMANGLE=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) -liberty
+                               has_cplus_demangle := $(call try-cc,$(SOURCE_CPLUS_DEMANGLE),$(FLAGS_CPLUS_DEMANGLE))
                                ifeq ($(has_cplus_demangle),y)
                                        EXTLIBS += -liberty
                                        BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE
@@ -865,7 +873,7 @@ export TAR INSTALL DESTDIR SHELL_PATH
 
 SHELL = $(SHELL_PATH)
 
-all:: .perf.dev.null shell_compatibility_test $(ALL_PROGRAMS) $(BUILT_INS) $(OTHER_PROGRAMS) $(OUTPUT)PERF-BUILD-OPTIONS
+all:: shell_compatibility_test $(ALL_PROGRAMS) $(BUILT_INS) $(OTHER_PROGRAMS) $(OUTPUT)PERF-BUILD-OPTIONS
 ifneq (,$X)
        $(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) perf$X)), test '$p' -ef '$p$X' || $(RM) '$p';)
 endif
@@ -1195,11 +1203,6 @@ clean:
 .PHONY: .FORCE-PERF-VERSION-FILE TAGS tags cscope .FORCE-PERF-CFLAGS
 .PHONY: .FORCE-PERF-BUILD-OPTIONS
 
-.perf.dev.null:
-               touch .perf.dev.null
-
-.INTERMEDIATE: .perf.dev.null
-
 ### Make sure built-ins do not have dups and listed in perf.c
 #
 check-builtins::
index 96db5248e99569914c62427a3466aa9b91559530..fd20670ce986bae016759aa61bd01c272bd7f47f 100644 (file)
@@ -61,11 +61,9 @@ static int hists__add_entry(struct hists *self, struct addr_location *al)
 static int process_sample_event(event_t *event, struct perf_session *session)
 {
        struct addr_location al;
+       struct sample_data data;
 
-       dump_printf("(IP, %d): %d: %#Lx\n", event->header.misc,
-                   event->ip.pid, event->ip.ip);
-
-       if (event__preprocess_sample(event, session, &al, NULL) < 0) {
+       if (event__preprocess_sample(event, session, &al, &data, NULL) < 0) {
                pr_warning("problem processing %d event, skipping it.\n",
                           event->header.type);
                return -1;
index f8e3d1852029b65c9c2efbdf5b6315f6f9e9d492..29ad20e6791969625303ed25ec5321f0ddd2c528 100644 (file)
@@ -78,8 +78,7 @@ static int __cmd_buildid_cache(void)
        struct str_node *pos;
        char debugdir[PATH_MAX];
 
-       snprintf(debugdir, sizeof(debugdir), "%s/%s", getenv("HOME"),
-                DEBUG_CACHE_DIR);
+       snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir);
 
        if (add_name_list_str) {
                list = strlist__new(true, add_name_list_str);
index a6e2fdc7a04e16613087a4c65e70d832ab903505..39e6627ebb96a05ba361a8534f43cd2ae701c594 100644 (file)
@@ -35,10 +35,7 @@ static int diff__process_sample_event(event_t *event, struct perf_session *sessi
        struct addr_location al;
        struct sample_data data = { .period = 1, };
 
-       dump_printf("(IP, %d): %d: %#Lx\n", event->header.misc,
-                   event->ip.pid, event->ip.ip);
-
-       if (event__preprocess_sample(event, session, &al, NULL) < 0) {
+       if (event__preprocess_sample(event, session, &al, &data, NULL) < 0) {
                pr_warning("problem processing %d event, skipping it.\n",
                           event->header.type);
                return -1;
@@ -47,8 +44,6 @@ static int diff__process_sample_event(event_t *event, struct perf_session *sessi
        if (al.filtered || al.sym == NULL)
                return 0;
 
-       event__parse_sample(event, session->sample_type, &data);
-
        if (hists__add_entry(&session->hists, &al, data.period)) {
                pr_warning("problem incrementing symbol period, skipping event\n");
                return -1;
index e4a4da32a56864a7c1d95ec44a5b603f4f541e92..54551867e7e086234fc525afe790789800cdaece 100644 (file)
@@ -182,6 +182,8 @@ static const struct option options[] = {
                     "Show source code lines.", opt_show_lines),
        OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
                   "file", "vmlinux pathname"),
+       OPT_STRING('s', "source", &symbol_conf.source_prefix,
+                  "directory", "path to kernel source"),
 #endif
        OPT__DRY_RUN(&probe_event_dry_run),
        OPT_INTEGER('\0', "max-probes", &params.max_probe_points,
index dc3435e18bdeb18cfdcb8e2a08a21a98cc695a2d..86b1c3b6264e71790b4113a1a50a1654abcdbcfd 100644 (file)
@@ -49,7 +49,6 @@ static int                    group                           =      0;
 static int                     realtime_prio                   =      0;
 static bool                    raw_samples                     =  false;
 static bool                    system_wide                     =  false;
-static int                     profile_cpu                     =     -1;
 static pid_t                   target_pid                      =     -1;
 static pid_t                   target_tid                      =     -1;
 static pid_t                   *all_tids                       =      NULL;
@@ -61,6 +60,7 @@ static bool                   call_graph                      =  false;
 static bool                    inherit_stat                    =  false;
 static bool                    no_samples                      =  false;
 static bool                    sample_address                  =  false;
+static bool                    no_buildid                      =  false;
 
 static long                    samples                         =      0;
 static u64                     bytes_written                   =      0;
@@ -74,6 +74,7 @@ static int                    file_new                        =      1;
 static off_t                   post_processing_offset;
 
 static struct perf_session     *session;
+static const char              *cpu_list;
 
 struct mmap_data {
        int                     counter;
@@ -268,12 +269,17 @@ static void create_counter(int counter, int cpu)
        if (inherit_stat)
                attr->inherit_stat = 1;
 
-       if (sample_address)
+       if (sample_address) {
                attr->sample_type       |= PERF_SAMPLE_ADDR;
+               attr->mmap_data = track;
+       }
 
        if (call_graph)
                attr->sample_type       |= PERF_SAMPLE_CALLCHAIN;
 
+       if (system_wide)
+               attr->sample_type       |= PERF_SAMPLE_CPU;
+
        if (raw_samples) {
                attr->sample_type       |= PERF_SAMPLE_TIME;
                attr->sample_type       |= PERF_SAMPLE_RAW;
@@ -300,7 +306,7 @@ try_again:
                                die("Permission error - are you root?\n"
                                        "\t Consider tweaking"
                                        " /proc/sys/kernel/perf_event_paranoid.\n");
-                       else if (err ==  ENODEV && profile_cpu != -1) {
+                       else if (err ==  ENODEV && cpu_list) {
                                die("No such device - did you specify"
                                        " an out-of-range profile CPU?\n");
                        }
@@ -622,10 +628,15 @@ static int __cmd_record(int argc, const char **argv)
                close(child_ready_pipe[0]);
        }
 
-       if ((!system_wide && no_inherit) || profile_cpu != -1) {
-               open_counters(profile_cpu);
+       nr_cpus = read_cpu_map(cpu_list);
+       if (nr_cpus < 1) {
+               perror("failed to collect number of CPUs\n");
+               return -1;
+       }
+
+       if (!system_wide && no_inherit && !cpu_list) {
+               open_counters(-1);
        } else {
-               nr_cpus = read_cpu_map();
                for (i = 0; i < nr_cpus; i++)
                        open_counters(cpumap[i]);
        }
@@ -704,7 +715,7 @@ static int __cmd_record(int argc, const char **argv)
        if (perf_guest)
                perf_session__process_machines(session, event__synthesize_guest_os);
 
-       if (!system_wide && profile_cpu == -1)
+       if (!system_wide)
                event__synthesize_thread(target_tid, process_synthesized_event,
                                         session);
        else
@@ -794,8 +805,8 @@ static const struct option options[] = {
                            "system-wide collection from all CPUs"),
        OPT_BOOLEAN('A', "append", &append_file,
                            "append to the output file to do incremental profiling"),
-       OPT_INTEGER('C', "profile_cpu", &profile_cpu,
-                           "CPU to profile on"),
+       OPT_STRING('C', "cpu", &cpu_list, "cpu",
+                   "list of cpus to monitor"),
        OPT_BOOLEAN('f', "force", &force,
                        "overwrite existing data file (deprecated)"),
        OPT_U64('c', "count", &user_interval, "event period to sample"),
@@ -815,6 +826,8 @@ static const struct option options[] = {
                    "Sample addresses"),
        OPT_BOOLEAN('n', "no-samples", &no_samples,
                    "don't sample"),
+       OPT_BOOLEAN('N', "no-buildid-cache", &no_buildid,
+                   "do not update the buildid cache"),
        OPT_END()
 };
 
@@ -825,7 +838,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
        argc = parse_options(argc, argv, options, record_usage,
                            PARSE_OPT_STOP_AT_NON_OPTION);
        if (!argc && target_pid == -1 && target_tid == -1 &&
-               !system_wide && profile_cpu == -1)
+               !system_wide && !cpu_list)
                usage_with_options(record_usage, options);
 
        if (force && append_file) {
@@ -839,6 +852,8 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
        }
 
        symbol__init();
+       if (no_buildid)
+               disable_buildid_cache();
 
        if (!nr_counters) {
                nr_counters     = 1;
index 3592057829648a8d3f8ac909f93a10a729640b68..371a3c995806ade919deb62a95dcc0ffa5329a91 100644 (file)
@@ -155,30 +155,7 @@ static int process_sample_event(event_t *event, struct perf_session *session)
        struct addr_location al;
        struct perf_event_attr *attr;
 
-       event__parse_sample(event, session->sample_type, &data);
-
-       dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc,
-                   data.pid, data.tid, data.ip, data.period);
-
-       if (session->sample_type & PERF_SAMPLE_CALLCHAIN) {
-               unsigned int i;
-
-               dump_printf("... chain: nr:%Lu\n", data.callchain->nr);
-
-               if (!ip_callchain__valid(data.callchain, event)) {
-                       pr_debug("call-chain problem with event, "
-                                "skipping it.\n");
-                       return 0;
-               }
-
-               if (dump_trace) {
-                       for (i = 0; i < data.callchain->nr; i++)
-                               dump_printf("..... %2d: %016Lx\n",
-                                           i, data.callchain->ips[i]);
-               }
-       }
-
-       if (event__preprocess_sample(event, session, &al, NULL) < 0) {
+       if (event__preprocess_sample(event, session, &al, &data, NULL) < 0) {
                fprintf(stderr, "problem processing %d event, skipping it.\n",
                        event->header.type);
                return -1;
index 9a39ca3c3ac4fe77f5ed23c82b8dc8cc973b9f51..a6b4d44f950246e27d4cb6b0bc3e6d5afd27adcc 100644 (file)
@@ -69,7 +69,7 @@ static struct perf_event_attr default_attrs[] = {
 };
 
 static bool                    system_wide                     =  false;
-static unsigned int            nr_cpus                         =  0;
+static int                     nr_cpus                         =  0;
 static int                     run_idx                         =  0;
 
 static int                     run_count                       =  1;
@@ -82,6 +82,7 @@ static int                    thread_num                      =  0;
 static pid_t                   child_pid                       = -1;
 static bool                    null_run                        =  false;
 static bool                    big_num                         =  false;
+static const char              *cpu_list;
 
 
 static int                     *fd[MAX_NR_CPUS][MAX_COUNTERS];
@@ -158,7 +159,7 @@ static int create_perf_stat_counter(int counter)
                                    PERF_FORMAT_TOTAL_TIME_RUNNING;
 
        if (system_wide) {
-               unsigned int cpu;
+               int cpu;
 
                for (cpu = 0; cpu < nr_cpus; cpu++) {
                        fd[cpu][counter][0] = sys_perf_event_open(attr,
@@ -208,7 +209,7 @@ static inline int nsec_counter(int counter)
 static void read_counter(int counter)
 {
        u64 count[3], single_count[3];
-       unsigned int cpu;
+       int cpu;
        size_t res, nv;
        int scaled;
        int i, thread;
@@ -542,6 +543,8 @@ static const struct option options[] = {
                    "null run - dont start any counters"),
        OPT_BOOLEAN('B', "big-num", &big_num,
                    "print large numbers with thousands\' separators"),
+       OPT_STRING('C', "cpu", &cpu_list, "cpu",
+                   "list of cpus to monitor in system-wide"),
        OPT_END()
 };
 
@@ -566,10 +569,13 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
        }
 
        if (system_wide)
-               nr_cpus = read_cpu_map();
+               nr_cpus = read_cpu_map(cpu_list);
        else
                nr_cpus = 1;
 
+       if (nr_cpus < 1)
+               usage_with_options(stat_usage, options);
+
        if (target_pid != -1) {
                target_tid = target_pid;
                thread_num = find_all_tid(target_pid, &all_tids);
index a66f4272b994b546b0c2a811b0f1f613a4100a9a..1e8e92e317b900fa4765427b80de8bd8c0cce534 100644 (file)
@@ -102,6 +102,7 @@ struct sym_entry            *sym_filter_entry_sched         =   NULL;
 static int                     sym_pcnt_filter                 =      5;
 static int                     sym_counter                     =      0;
 static int                     display_weighted                =     -1;
+static const char              *cpu_list;
 
 /*
  * Symbols
@@ -982,6 +983,7 @@ static void event__process_sample(const event_t *self,
        u64 ip = self->ip.ip;
        struct sym_entry *syme;
        struct addr_location al;
+       struct sample_data data;
        struct machine *machine;
        u8 origin = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
 
@@ -1024,7 +1026,8 @@ static void event__process_sample(const event_t *self,
        if (self->header.misc & PERF_RECORD_MISC_EXACT_IP)
                exact_samples++;
 
-       if (event__preprocess_sample(self, session, &al, symbol_filter) < 0 ||
+       if (event__preprocess_sample(self, session, &al, &data,
+                                    symbol_filter) < 0 ||
            al.filtered)
                return;
 
@@ -1351,8 +1354,8 @@ static const struct option options[] = {
                    "profile events on existing thread id"),
        OPT_BOOLEAN('a', "all-cpus", &system_wide,
                            "system-wide collection from all CPUs"),
-       OPT_INTEGER('C', "CPU", &profile_cpu,
-                   "CPU to profile on"),
+       OPT_STRING('C', "cpu", &cpu_list, "cpu",
+                   "list of cpus to monitor"),
        OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
                   "file", "vmlinux pathname"),
        OPT_BOOLEAN('K', "hide_kernel_symbols", &hide_kernel_symbols,
@@ -1428,10 +1431,10 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
                return -ENOMEM;
 
        /* CPU and PID are mutually exclusive */
-       if (target_tid > 0 && profile_cpu != -1) {
+       if (target_tid > 0 && cpu_list) {
                printf("WARNING: PID switch overriding CPU\n");
                sleep(1);
-               profile_cpu = -1;
+               cpu_list = NULL;
        }
 
        if (!nr_counters)
@@ -1469,10 +1472,13 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
                attrs[counter].sample_period = default_interval;
        }
 
-       if (target_tid != -1 || profile_cpu != -1)
+       if (target_tid != -1)
                nr_cpus = 1;
        else
-               nr_cpus = read_cpu_map();
+               nr_cpus = read_cpu_map(cpu_list);
+
+       if (nr_cpus < 1)
+               usage_with_options(top_usage, options);
 
        get_term_dimensions(&winsize);
        if (print_entries == 0) {
diff --git a/tools/perf/feature-tests.mak b/tools/perf/feature-tests.mak
new file mode 100644 (file)
index 0000000..ddb68e6
--- /dev/null
@@ -0,0 +1,119 @@
+define SOURCE_HELLO
+#include <stdio.h>
+int main(void)
+{
+       return puts(\"hi\");
+}
+endef
+
+ifndef NO_DWARF
+define SOURCE_DWARF
+#include <dwarf.h>
+#include <libdw.h>
+#include <version.h>
+#ifndef _ELFUTILS_PREREQ
+#error
+#endif
+
+int main(void)
+{
+       Dwarf *dbg = dwarf_begin(0, DWARF_C_READ);
+       return (long)dbg;
+}
+endef
+endif
+
+define SOURCE_LIBELF
+#include <libelf.h>
+
+int main(void)
+{
+       Elf *elf = elf_begin(0, ELF_C_READ, 0);
+       return (long)elf;
+}
+endef
+
+define SOURCE_GLIBC
+#include <gnu/libc-version.h>
+
+int main(void)
+{
+       const char *version = gnu_get_libc_version();
+       return (long)version;
+}
+endef
+
+define SOURCE_ELF_MMAP
+#include <libelf.h>
+int main(void)
+{
+       Elf *elf = elf_begin(0, ELF_C_READ_MMAP, 0);
+       return (long)elf;
+}
+endef
+
+ifndef NO_NEWT
+define SOURCE_NEWT
+#include <newt.h>
+
+int main(void)
+{
+       newtInit();
+       newtCls();
+       return newtFinished();
+}
+endef
+endif
+
+ifndef NO_LIBPERL
+define SOURCE_PERL_EMBED
+#include <EXTERN.h>
+#include <perl.h>
+
+int main(void)
+{
+perl_alloc();
+return 0;
+}
+endef
+endif
+
+ifndef NO_LIBPYTHON
+define SOURCE_PYTHON_EMBED
+#include <Python.h>
+
+int main(void)
+{
+       Py_Initialize();
+       return 0;
+}
+endef
+endif
+
+define SOURCE_BFD
+#include <bfd.h>
+
+int main(void)
+{
+       bfd_demangle(0, 0, 0);
+       return 0;
+}
+endef
+
+define SOURCE_CPLUS_DEMANGLE
+extern char *cplus_demangle(const char *, int);
+
+int main(void)
+{
+       cplus_demangle(0, 0);
+       return 0;
+}
+endef
+
+# try-cc
+# Usage: option = $(call try-cc, source-to-build, cc-options)
+try-cc = $(shell sh -c                                           \
+       'TMP="$(TMPOUT).$$$$";                                    \
+        echo "$(1)" |                                            \
+        $(CC) -x c - $(2) -o "$$TMP" > /dev/null 2>&1 && echo y; \
+        rm -f "$$TMP"')
index 2e7a4f417e2065c336614f853554357146bb73ce..677e59d62a8dc3ae0475ab31d8c78acaeca50e51 100644 (file)
@@ -7,7 +7,17 @@ if [ $# -ne 0 ] ; then
        PERF_DATA=$1
 fi
 
-DEBUGDIR=~/.debug/
+#
+# PERF_BUILDID_DIR environment variable set by perf
+# path to buildid directory, default to $HOME/.debug
+#
+if [ -z $PERF_BUILDID_DIR ]; then
+       PERF_BUILDID_DIR=~/.debug/
+else
+        # append / to make substitutions work
+        PERF_BUILDID_DIR=$PERF_BUILDID_DIR/
+fi
+
 BUILDIDS=$(mktemp /tmp/perf-archive-buildids.XXXXXX)
 NOBUILDID=0000000000000000000000000000000000000000
 
@@ -22,13 +32,13 @@ MANIFEST=$(mktemp /tmp/perf-archive-manifest.XXXXXX)
 
 cut -d ' ' -f 1 $BUILDIDS | \
 while read build_id ; do
-       linkname=$DEBUGDIR.build-id/${build_id:0:2}/${build_id:2}
+       linkname=$PERF_BUILDID_DIR.build-id/${build_id:0:2}/${build_id:2}
        filename=$(readlink -f $linkname)
-       echo ${linkname#$DEBUGDIR} >> $MANIFEST
-       echo ${filename#$DEBUGDIR} >> $MANIFEST
+       echo ${linkname#$PERF_BUILDID_DIR} >> $MANIFEST
+       echo ${filename#$PERF_BUILDID_DIR} >> $MANIFEST
 done
 
-tar cfj $PERF_DATA.tar.bz2 -C $DEBUGDIR -T $MANIFEST
+tar cfj $PERF_DATA.tar.bz2 -C $PERF_BUILDID_DIR -T $MANIFEST
 rm -f $MANIFEST $BUILDIDS
 echo -e "Now please run:\n"
 echo -e "$ tar xvf $PERF_DATA.tar.bz2 -C ~/.debug\n"
index 6e4871191138970e57ea0dbeedf185eabb4ce0c1..cdd6c03f1e14c132e550b85e07b22e7621a710d2 100644 (file)
@@ -458,6 +458,8 @@ int main(int argc, const char **argv)
        handle_options(&argv, &argc, NULL);
        commit_pager_choice();
        set_debugfs_path();
+       set_buildid_dir();
+
        if (argc > 0) {
                if (!prefixcmp(argv[0], "--"))
                        argv[0] += 2;
index 70c5cf87d020d9d44750da480face3820d2ded15..5c26e2d314af1e243d8afe04fcd98ec17a80b740 100644 (file)
@@ -43,19 +43,17 @@ struct perf_event_ops build_id__mark_dso_hit_ops = {
 char *dso__build_id_filename(struct dso *self, char *bf, size_t size)
 {
        char build_id_hex[BUILD_ID_SIZE * 2 + 1];
-       const char *home;
 
        if (!self->has_build_id)
                return NULL;
 
        build_id__sprintf(self->build_id, sizeof(self->build_id), build_id_hex);
-       home = getenv("HOME");
        if (bf == NULL) {
-               if (asprintf(&bf, "%s/%s/.build-id/%.2s/%s", home,
-                            DEBUG_CACHE_DIR, build_id_hex, build_id_hex + 2) < 0)
+               if (asprintf(&bf, "%s/.build-id/%.2s/%s", buildid_dir,
+                            build_id_hex, build_id_hex + 2) < 0)
                        return NULL;
        } else
-               snprintf(bf, size, "%s/%s/.build-id/%.2s/%s", home,
-                        DEBUG_CACHE_DIR, build_id_hex, build_id_hex + 2);
+               snprintf(bf, size, "%s/.build-id/%.2s/%s", buildid_dir,
+                        build_id_hex, build_id_hex + 2);
        return bf;
 }
index 65fe664fddf698e67aa0639169665e64fffbdf61..27e9ebe4076e0efbf4117a46f2dbbcc74a1dcc3e 100644 (file)
@@ -23,6 +23,7 @@ extern int perf_config(config_fn_t fn, void *);
 extern int perf_config_int(const char *, const char *);
 extern int perf_config_bool(const char *, const char *);
 extern int config_error_nonbool(const char *);
+extern const char *perf_config_dirname(const char *, const char *);
 
 /* pager.c */
 extern void setup_pager(void);
index 62b69ad4aa735bc0ec146277661e30f9c0e3d4e9..e63c997d6c1b8ada3d338f386d35a45c5d658e5a 100644 (file)
@@ -18,7 +18,7 @@
 #include "util.h"
 #include "callchain.h"
 
-bool ip_callchain__valid(struct ip_callchain *chain, event_t *event)
+bool ip_callchain__valid(struct ip_callchain *chain, const event_t *event)
 {
        unsigned int chain_size = event->header.size;
        chain_size -= (unsigned long)&event->ip.__more_data - (unsigned long)event;
index 1ca73e4a2723997099973963f8477bdbbafcc909..809850fb75fbcf4d27f92fee8c9f8ea0ae5df320 100644 (file)
@@ -60,5 +60,5 @@ int register_callchain_param(struct callchain_param *param);
 int append_chain(struct callchain_node *root, struct ip_callchain *chain,
                 struct map_symbol *syms);
 
-bool ip_callchain__valid(struct ip_callchain *chain, event_t *event);
+bool ip_callchain__valid(struct ip_callchain *chain, const event_t *event);
 #endif /* __PERF_CALLCHAIN_H */
index dabe892d0e5374ad8689040e6865a642e161f752..e02d78cae70f688fc0fc32957d078cf45796ed75 100644 (file)
 
 #define MAXNAME (256)
 
+#define DEBUG_CACHE_DIR ".debug"
+
+
+char buildid_dir[MAXPATHLEN]; /* root dir for buildid, binary cache */
+
 static FILE *config_file;
 static const char *config_file_name;
 static int config_linenr;
@@ -127,7 +132,7 @@ static int get_value(config_fn_t fn, void *data, char *name, unsigned int len)
                        break;
                if (!iskeychar(c))
                        break;
-               name[len++] = tolower(c);
+               name[len++] = c;
                if (len >= MAXNAME)
                        return -1;
        }
@@ -327,6 +332,13 @@ int perf_config_bool(const char *name, const char *value)
        return !!perf_config_bool_or_int(name, value, &discard);
 }
 
+const char *perf_config_dirname(const char *name, const char *value)
+{
+       if (!name)
+               return NULL;
+       return value;
+}
+
 static int perf_default_core_config(const char *var __used, const char *value __used)
 {
        /* Add other config variables here and to Documentation/config.txt. */
@@ -428,3 +440,53 @@ int config_error_nonbool(const char *var)
 {
        return error("Missing value for '%s'", var);
 }
+
+struct buildid_dir_config {
+       char *dir;
+};
+
+static int buildid_dir_command_config(const char *var, const char *value,
+                                     void *data)
+{
+       struct buildid_dir_config *c = data;
+       const char *v;
+
+       /* same dir for all commands */
+       if (!prefixcmp(var, "buildid.") && !strcmp(var + 8, "dir")) {
+               v = perf_config_dirname(var, value);
+               if (!v)
+                       return -1;
+               strncpy(c->dir, v, MAXPATHLEN-1);
+               c->dir[MAXPATHLEN-1] = '\0';
+       }
+       return 0;
+}
+
+static void check_buildid_dir_config(void)
+{
+       struct buildid_dir_config c;
+       c.dir = buildid_dir;
+       perf_config(buildid_dir_command_config, &c);
+}
+
+void set_buildid_dir(void)
+{
+       buildid_dir[0] = '\0';
+
+       /* try config file */
+       check_buildid_dir_config();
+
+       /* default to $HOME/.debug */
+       if (buildid_dir[0] == '\0') {
+               char *v = getenv("HOME");
+               if (v) {
+                       snprintf(buildid_dir, MAXPATHLEN-1, "%s/%s",
+                                v, DEBUG_CACHE_DIR);
+               } else {
+                       strncpy(buildid_dir, DEBUG_CACHE_DIR, MAXPATHLEN-1);
+               }
+               buildid_dir[MAXPATHLEN-1] = '\0';
+       }
+       /* for communicating with external commands */
+       setenv("PERF_BUILDID_DIR", buildid_dir, 1);
+}
index 4e01490e51e549c1be5b3e5d640683f65a765f7f..0f9b8d7a7d7e7d62f38c48ec8846251beb532918 100644 (file)
@@ -20,7 +20,7 @@ static int default_cpu_map(void)
        return nr_cpus;
 }
 
-int read_cpu_map(void)
+static int read_all_cpu_map(void)
 {
        FILE *onlnf;
        int nr_cpus = 0;
@@ -57,3 +57,58 @@ int read_cpu_map(void)
 
        return default_cpu_map();
 }
+
+int read_cpu_map(const char *cpu_list)
+{
+       unsigned long start_cpu, end_cpu = 0;
+       char *p = NULL;
+       int i, nr_cpus = 0;
+
+       if (!cpu_list)
+               return read_all_cpu_map();
+
+       if (!isdigit(*cpu_list))
+               goto invalid;
+
+       while (isdigit(*cpu_list)) {
+               p = NULL;
+               start_cpu = strtoul(cpu_list, &p, 0);
+               if (start_cpu >= INT_MAX
+                   || (*p != '\0' && *p != ',' && *p != '-'))
+                       goto invalid;
+
+               if (*p == '-') {
+                       cpu_list = ++p;
+                       p = NULL;
+                       end_cpu = strtoul(cpu_list, &p, 0);
+
+                       if (end_cpu >= INT_MAX || (*p != '\0' && *p != ','))
+                               goto invalid;
+
+                       if (end_cpu < start_cpu)
+                               goto invalid;
+               } else {
+                       end_cpu = start_cpu;
+               }
+
+               for (; start_cpu <= end_cpu; start_cpu++) {
+                       /* check for duplicates */
+                       for (i = 0; i < nr_cpus; i++)
+                               if (cpumap[i] == (int)start_cpu)
+                                       goto invalid;
+
+                       assert(nr_cpus < MAX_NR_CPUS);
+                       cpumap[nr_cpus++] = (int)start_cpu;
+               }
+               if (*p)
+                       ++p;
+
+               cpu_list = p;
+       }
+       if (nr_cpus > 0)
+               return nr_cpus;
+
+       return default_cpu_map();
+invalid:
+       return -1;
+}
index 86c78bb330982619589a11329c5719c03a3bb725..3e60f56e490eb10f8cf08981e703bf5699d6b20c 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef __PERF_CPUMAP_H
 #define __PERF_CPUMAP_H
 
-extern int read_cpu_map(void);
+extern int read_cpu_map(const char *cpu_list);
 extern int cpumap[];
 
 #endif /* __PERF_CPUMAP_H */
index 6cddff2bc970b1aeb49322aaf7c5b0028ee5fecd..318dab15d17732a709ab7b72b8db03fb6e08b24f 100644 (file)
@@ -86,12 +86,10 @@ void trace_event(event_t *event)
                        dump_printf_color("  ", color);
                        for (j = 0; j < 15-(i & 15); j++)
                                dump_printf_color("   ", color);
-                       for (j = 0; j < (i & 15); j++) {
-                               if (isprint(raw_event[i-15+j]))
-                                       dump_printf_color("%c", color,
-                                                         raw_event[i-15+j]);
-                               else
-                                       dump_printf_color(".", color);
+                       for (j = i & ~15; j <= i; j++) {
+                               dump_printf_color("%c", color,
+                                               isprint(raw_event[j]) ?
+                                               raw_event[j] : '.');
                        }
                        dump_printf_color("\n", color);
                }
index 1f08f008d289f841f7ae0089462129f97e313afc..a7460868124bd0d1e643d43866c7a04863f0999e 100644 (file)
@@ -655,11 +655,36 @@ static void dso__calc_col_width(struct dso *self)
 }
 
 int event__preprocess_sample(const event_t *self, struct perf_session *session,
-                            struct addr_location *al, symbol_filter_t filter)
+                            struct addr_location *al, struct sample_data *data,
+                            symbol_filter_t filter)
 {
        u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
-       struct thread *thread = perf_session__findnew(session, self->ip.pid);
+       struct thread *thread;
+
+       event__parse_sample(self, session->sample_type, data);
+
+       dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld cpu:%d\n",
+                   self->header.misc, data->pid, data->tid, data->ip,
+                   data->period, data->cpu);
+
+       if (session->sample_type & PERF_SAMPLE_CALLCHAIN) {
+               unsigned int i;
+
+               dump_printf("... chain: nr:%Lu\n", data->callchain->nr);
+
+               if (!ip_callchain__valid(data->callchain, self)) {
+                       pr_debug("call-chain problem with event, "
+                                "skipping it.\n");
+                       goto out_filtered;
+               }
 
+               if (dump_trace) {
+                       for (i = 0; i < data->callchain->nr; i++)
+                               dump_printf("..... %2d: %016Lx\n",
+                                           i, data->callchain->ips[i]);
+               }
+       }
+       thread = perf_session__findnew(session, self->ip.pid);
        if (thread == NULL)
                return -1;
 
@@ -685,6 +710,7 @@ int event__preprocess_sample(const event_t *self, struct perf_session *session,
                    al->map ? al->map->dso->long_name :
                        al->level == 'H' ? "[hypervisor]" : "<not found>");
        al->sym = NULL;
+       al->cpu = data->cpu;
 
        if (al->map) {
                if (symbol_conf.dso_list &&
@@ -724,9 +750,9 @@ out_filtered:
        return 0;
 }
 
-int event__parse_sample(event_t *event, u64 type, struct sample_data *data)
+int event__parse_sample(const event_t *event, u64 type, struct sample_data *data)
 {
-       u64 *array = event->sample.array;
+       const u64 *array = event->sample.array;
 
        if (type & PERF_SAMPLE_IP) {
                data->ip = event->ip.ip;
@@ -765,7 +791,8 @@ int event__parse_sample(event_t *event, u64 type, struct sample_data *data)
                u32 *p = (u32 *)array;
                data->cpu = *p;
                array++;
-       }
+       } else
+               data->cpu = -1;
 
        if (type & PERF_SAMPLE_PERIOD) {
                data->period = *array;
index 8577085db067bb3d7b6a7530fab877b3e28afe96..887ee63bbb625a8222d66e9741526f0f63d08fdf 100644 (file)
@@ -157,8 +157,9 @@ int event__process_task(event_t *self, struct perf_session *session);
 
 struct addr_location;
 int event__preprocess_sample(const event_t *self, struct perf_session *session,
-                            struct addr_location *al, symbol_filter_t filter);
-int event__parse_sample(event_t *event, u64 type, struct sample_data *data);
+                            struct addr_location *al, struct sample_data *data,
+                            symbol_filter_t filter);
+int event__parse_sample(const event_t *event, u64 type, struct sample_data *data);
 
 extern const char *event__name[];
 
index 1f62435f96c2fe6a3ef6a1a82f0f28ddca13652d..d7e67b167ea338a28fed519260f1069dd8e92cf9 100644 (file)
@@ -16,6 +16,8 @@
 #include "symbol.h"
 #include "debug.h"
 
+static bool no_buildid_cache = false;
+
 /*
  * Create new perf.data header attribute:
  */
@@ -385,8 +387,7 @@ static int perf_session__cache_build_ids(struct perf_session *self)
        int ret;
        char debugdir[PATH_MAX];
 
-       snprintf(debugdir, sizeof(debugdir), "%s/%s", getenv("HOME"),
-                DEBUG_CACHE_DIR);
+       snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir);
 
        if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
                return -1;
@@ -471,7 +472,8 @@ static int perf_header__adds_write(struct perf_header *self, int fd)
                }
                buildid_sec->size = lseek(fd, 0, SEEK_CUR) -
                                          buildid_sec->offset;
-               perf_session__cache_build_ids(session);
+               if (!no_buildid_cache)
+                       perf_session__cache_build_ids(session);
        }
 
        lseek(fd, sec_start, SEEK_SET);
@@ -1190,3 +1192,8 @@ int event__process_build_id(event_t *self,
                                 session);
        return 0;
 }
+
+void disable_buildid_cache(void)
+{
+       no_buildid_cache = true;
+}
index 07f89b66b318e43748f666f495f87b75f6a34462..68d288c975de308b5161e2326fa0747a68726fc8 100644 (file)
@@ -70,6 +70,7 @@ struct hist_entry *__hists__add_entry(struct hists *self,
                        .map    = al->map,
                        .sym    = al->sym,
                },
+               .cpu    = al->cpu,
                .ip     = al->addr,
                .level  = al->level,
                .period = period,
@@ -1037,7 +1038,7 @@ fallback:
                 dso, dso->long_name, sym, sym->name);
 
        snprintf(command, sizeof(command),
-                "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s|grep -v %s|expand",
+                "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS -C %s|grep -v %s|expand",
                 map__rip_2objdump(map, sym->start),
                 map__rip_2objdump(map, sym->end),
                 filename, filename);
index d964cb199c672f96712c4125ba036b7bfe038687..baf665383498ec54042edee05ea7783c3398967f 100644 (file)
@@ -37,6 +37,7 @@
 #include "event.h"
 #include "debug.h"
 #include "util.h"
+#include "symbol.h"
 #include "probe-finder.h"
 
 /* Kprobe tracer basic type is up to u64 */
@@ -57,6 +58,55 @@ static int strtailcmp(const char *s1, const char *s2)
        return 0;
 }
 
+/*
+ * Find a src file from a DWARF tag path. Prepend optional source path prefix
+ * and chop off leading directories that do not exist. Result is passed back as
+ * a newly allocated path on success.
+ * Return 0 if file was found and readable, -errno otherwise.
+ */
+static int get_real_path(const char *raw_path, char **new_path)
+{
+       if (!symbol_conf.source_prefix) {
+               if (access(raw_path, R_OK) == 0) {
+                       *new_path = strdup(raw_path);
+                       return 0;
+               } else
+                       return -errno;
+       }
+
+       *new_path = malloc((strlen(symbol_conf.source_prefix) +
+                           strlen(raw_path) + 2));
+       if (!*new_path)
+               return -ENOMEM;
+
+       for (;;) {
+               sprintf(*new_path, "%s/%s", symbol_conf.source_prefix,
+                       raw_path);
+
+               if (access(*new_path, R_OK) == 0)
+                       return 0;
+
+               switch (errno) {
+               case ENAMETOOLONG:
+               case ENOENT:
+               case EROFS:
+               case EFAULT:
+                       raw_path = strchr(++raw_path, '/');
+                       if (!raw_path) {
+                               free(*new_path);
+                               *new_path = NULL;
+                               return -ENOENT;
+                       }
+                       continue;
+
+               default:
+                       free(*new_path);
+                       *new_path = NULL;
+                       return -errno;
+               }
+       }
+}
+
 /* Line number list operations */
 
 /* Add a line to line number list */
@@ -1096,11 +1146,13 @@ end:
 static int line_range_add_line(const char *src, unsigned int lineno,
                               struct line_range *lr)
 {
+       int ret;
+
        /* Copy real path */
        if (!lr->path) {
-               lr->path = strdup(src);
-               if (lr->path == NULL)
-                       return -ENOMEM;
+               ret = get_real_path(src, &lr->path);
+               if (ret != 0)
+                       return ret;
        }
        return line_list__add_line(&lr->line_list, lineno);
 }
index 8f83a1835766e5d1a6e509547d14559529bbba23..0564a5cfb12ebbd05dd56d239427db31ba4a5126 100644 (file)
@@ -27,8 +27,10 @@ static int perf_session__open(struct perf_session *self, bool force)
 
        self->fd = open(self->filename, O_RDONLY);
        if (self->fd < 0) {
-               pr_err("failed to open file: %s", self->filename);
-               if (!strcmp(self->filename, "perf.data"))
+               int err = errno;
+
+               pr_err("failed to open %s: %s", self->filename, strerror(err));
+               if (err == ENOENT && !strcmp(self->filename, "perf.data"))
                        pr_err("  (try 'perf record' first)");
                pr_err("\n");
                return -errno;
index 2316cb5a41164d4d99fbb289118def977b74cf5e..c27b4b03fbc17a424b3cba97ad8c7566121e57d7 100644 (file)
@@ -13,6 +13,7 @@ enum sort_type        sort__first_dimension;
 unsigned int dsos__col_width;
 unsigned int comms__col_width;
 unsigned int threads__col_width;
+unsigned int cpus__col_width;
 static unsigned int parent_symbol__col_width;
 char * field_sep;
 
@@ -28,6 +29,8 @@ static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf,
                                    size_t size, unsigned int width);
 static int hist_entry__parent_snprintf(struct hist_entry *self, char *bf,
                                       size_t size, unsigned int width);
+static int hist_entry__cpu_snprintf(struct hist_entry *self, char *bf,
+                                   size_t size, unsigned int width);
 
 struct sort_entry sort_thread = {
        .se_header      = "Command:  Pid",
@@ -63,6 +66,13 @@ struct sort_entry sort_parent = {
        .se_snprintf    = hist_entry__parent_snprintf,
        .se_width       = &parent_symbol__col_width,
 };
+struct sort_entry sort_cpu = {
+       .se_header      = "CPU",
+       .se_cmp         = sort__cpu_cmp,
+       .se_snprintf    = hist_entry__cpu_snprintf,
+       .se_width       = &cpus__col_width,
+};
 
 struct sort_dimension {
        const char              *name;
@@ -76,6 +86,7 @@ static struct sort_dimension sort_dimensions[] = {
        { .name = "dso",        .entry = &sort_dso,     },
        { .name = "symbol",     .entry = &sort_sym,     },
        { .name = "parent",     .entry = &sort_parent,  },
+       { .name = "cpu",        .entry = &sort_cpu,     },
 };
 
 int64_t cmp_null(void *l, void *r)
@@ -242,6 +253,20 @@ static int hist_entry__parent_snprintf(struct hist_entry *self, char *bf,
                              self->parent ? self->parent->name : "[other]");
 }
 
+/* --sort cpu */
+
+int64_t
+sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+       return right->cpu - left->cpu;
+}
+
+static int hist_entry__cpu_snprintf(struct hist_entry *self, char *bf,
+                                      size_t size, unsigned int width)
+{
+       return repsep_snprintf(bf, size, "%-*d", width, self->cpu);
+}
+
 int sort_dimension__add(const char *tok)
 {
        unsigned int i;
@@ -281,6 +306,8 @@ int sort_dimension__add(const char *tok)
                                sort__first_dimension = SORT_SYM;
                        else if (!strcmp(sd->name, "parent"))
                                sort__first_dimension = SORT_PARENT;
+                       else if (!strcmp(sd->name, "cpu"))
+                               sort__first_dimension = SORT_CPU;
                }
 
                list_add_tail(&sd->entry->list, &hist_entry__sort_list);
index 0d61c4082f43849ddfe4ac1f6cb1cf9841009dd8..560c855417e4ac2989dfe82ec8dffe30dc8933ed 100644 (file)
@@ -39,6 +39,7 @@ extern struct sort_entry sort_parent;
 extern unsigned int dsos__col_width;
 extern unsigned int comms__col_width;
 extern unsigned int threads__col_width;
+extern unsigned int cpus__col_width;
 extern enum sort_type sort__first_dimension;
 
 struct hist_entry {
@@ -51,6 +52,7 @@ struct hist_entry {
        struct map_symbol       ms;
        struct thread           *thread;
        u64                     ip;
+       s32                     cpu;
        u32                     nr_events;
        char                    level;
        u8                      filtered;
@@ -68,7 +70,8 @@ enum sort_type {
        SORT_COMM,
        SORT_DSO,
        SORT_SYM,
-       SORT_PARENT
+       SORT_PARENT,
+       SORT_CPU,
 };
 
 /*
@@ -104,6 +107,7 @@ extern int64_t sort__comm_collapse(struct hist_entry *, struct hist_entry *);
 extern int64_t sort__dso_cmp(struct hist_entry *, struct hist_entry *);
 extern int64_t sort__sym_cmp(struct hist_entry *, struct hist_entry *);
 extern int64_t sort__parent_cmp(struct hist_entry *, struct hist_entry *);
+int64_t sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right);
 extern size_t sort__parent_print(FILE *, struct hist_entry *, unsigned int);
 extern int sort_dimension__add(const char *);
 void sort_entry__setup_elide(struct sort_entry *self, struct strlist *list,
index b63e5713849f3abfa25bf8160e8b458e7adf913e..971d0a05d6b44c85c7490f97bcd159acd4a87b25 100644 (file)
@@ -933,6 +933,25 @@ static bool elf_sec__is_a(GElf_Shdr *self, Elf_Data *secstrs, enum map_type type
        }
 }
 
+static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
+{
+       Elf_Scn *sec = NULL;
+       GElf_Shdr shdr;
+       size_t cnt = 1;
+
+       while ((sec = elf_nextscn(elf, sec)) != NULL) {
+               gelf_getshdr(sec, &shdr);
+
+               if ((addr >= shdr.sh_addr) &&
+                   (addr < (shdr.sh_addr + shdr.sh_size)))
+                       return cnt;
+
+               ++cnt;
+       }
+
+       return -1;
+}
+
 static int dso__load_sym(struct dso *self, struct map *map, const char *name,
                         int fd, symbol_filter_t filter, int kmodule)
 {
@@ -944,12 +963,13 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name,
        int err = -1;
        uint32_t idx;
        GElf_Ehdr ehdr;
-       GElf_Shdr shdr;
-       Elf_Data *syms;
+       GElf_Shdr shdr, opdshdr;
+       Elf_Data *syms, *opddata = NULL;
        GElf_Sym sym;
-       Elf_Scn *sec, *sec_strndx;
+       Elf_Scn *sec, *sec_strndx, *opdsec;
        Elf *elf;
        int nr = 0;
+       size_t opdidx = 0;
 
        elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
        if (elf == NULL) {
@@ -969,6 +989,10 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name,
                        goto out_elf_end;
        }
 
+       opdsec = elf_section_by_name(elf, &ehdr, &opdshdr, ".opd", &opdidx);
+       if (opdsec)
+               opddata = elf_rawdata(opdsec, NULL);
+
        syms = elf_getdata(sec, NULL);
        if (syms == NULL)
                goto out_elf_end;
@@ -1013,6 +1037,13 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name,
                if (!is_label && !elf_sym__is_a(&sym, map->type))
                        continue;
 
+               if (opdsec && sym.st_shndx == opdidx) {
+                       u32 offset = sym.st_value - opdshdr.sh_addr;
+                       u64 *opd = opddata->d_buf + offset;
+                       sym.st_value = *opd;
+                       sym.st_shndx = elf_addr_to_index(elf, sym.st_value);
+               }
+
                sec = elf_getscn(elf, sym.st_shndx);
                if (!sec)
                        goto out_elf_end;
index 5e02d2c1715425c800af943b808143c9ed302ba3..80e569bbdecc4bd0ea37a7898035b9c52d0c0793 100644 (file)
@@ -9,8 +9,6 @@
 #include <linux/rbtree.h>
 #include <stdio.h>
 
-#define DEBUG_CACHE_DIR ".debug"
-
 #ifdef HAVE_CPLUS_DEMANGLE
 extern char *cplus_demangle(const char *, int);
 
@@ -73,6 +71,7 @@ struct symbol_conf {
                        full_paths,
                        show_cpu_utilization;
        const char      *vmlinux_name,
+                       *source_prefix,
                        *field_sep;
        const char      *default_guest_vmlinux_name,
                        *default_guest_kallsyms,
@@ -112,7 +111,8 @@ struct addr_location {
        u64           addr;
        char          level;
        bool          filtered;
-       unsigned int  cpumode;
+       u8            cpumode;
+       s32           cpu;
 };
 
 enum dso_kernel_type {
index 4e8b6b0c551c87cd98f78857e4205ff3f5cd79d5..f380fed74359034a843756256d6e7a79b0ff22b5 100644 (file)
@@ -89,6 +89,7 @@
 
 extern const char *graph_line;
 extern const char *graph_dotted_line;
+extern char buildid_dir[];
 
 /* On most systems <limits.h> would have given us this, but
  * not on some systems (e.g. GNU/Hurd).
@@ -152,6 +153,8 @@ extern void warning(const char *err, ...) __attribute__((format (printf, 1, 2)))
 extern void set_die_routine(void (*routine)(const char *err, va_list params) NORETURN);
 
 extern int prefixcmp(const char *str, const char *prefix);
+extern void set_buildid_dir(void);
+extern void disable_buildid_cache(void);
 
 static inline const char *skip_prefix(const char *str, const char *prefix)
 {