perf thread-stack: Add thread_stack__sample_late()
authorAdrian Hunter <adrian.hunter@intel.com>
Wed, 1 Apr 2020 10:16:06 +0000 (13:16 +0300)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Thu, 16 Apr 2020 15:19:15 +0000 (12:19 -0300)
Add a thread stack function to create a call chain for hardware events
where the sample records get created some time after the event occurred.

Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Link: http://lore.kernel.org/lkml/20200401101613.6201-10-adrian.hunter@intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
tools/perf/util/thread-stack.c
tools/perf/util/thread-stack.h

index 0885967d5bc38077acf6a69cd66718657ca3fed9..83f6c83f5617348bafe5c4447b479e93ed72973f 100644 (file)
@@ -497,6 +497,63 @@ void thread_stack__sample(struct thread *thread, int cpu,
        chain->nr = i;
 }
 
+/*
+ * Hardware sample records, created some time after the event occurred, need to
+ * have subsequent addresses removed from the call chain.
+ */
+void thread_stack__sample_late(struct thread *thread, int cpu,
+                              struct ip_callchain *chain, size_t sz,
+                              u64 sample_ip, u64 kernel_start)
+{
+       struct thread_stack *ts = thread__stack(thread, cpu);
+       u64 sample_context = callchain_context(sample_ip, kernel_start);
+       u64 last_context, context, ip;
+       size_t nr = 0, j;
+
+       if (sz < 2) {
+               chain->nr = 0;
+               return;
+       }
+
+       if (!ts)
+               goto out;
+
+       /*
+        * When tracing kernel space, kernel addresses occur at the top of the
+        * call chain after the event occurred but before tracing stopped.
+        * Skip them.
+        */
+       for (j = 1; j <= ts->cnt; j++) {
+               ip = ts->stack[ts->cnt - j].ret_addr;
+               context = callchain_context(ip, kernel_start);
+               if (context == PERF_CONTEXT_USER ||
+                   (context == sample_context && ip == sample_ip))
+                       break;
+       }
+
+       last_context = sample_ip; /* Use sample_ip as an invalid context */
+
+       for (; nr < sz && j <= ts->cnt; nr++, j++) {
+               ip = ts->stack[ts->cnt - j].ret_addr;
+               context = callchain_context(ip, kernel_start);
+               if (context != last_context) {
+                       if (nr >= sz - 1)
+                               break;
+                       chain->ips[nr++] = context;
+                       last_context = context;
+               }
+               chain->ips[nr] = ip;
+       }
+out:
+       if (nr) {
+               chain->nr = nr;
+       } else {
+               chain->ips[0] = sample_context;
+               chain->ips[1] = sample_ip;
+               chain->nr = 2;
+       }
+}
+
 struct call_return_processor *
 call_return_processor__new(int (*process)(struct call_return *cr, u64 *parent_db_id, void *data),
                           void *data)
index e1ec5a58f1b2cda18fce0aa1b6457b16db8caf39..8962ddc4e1ab89c97052ea0c7e34efcd2aa48737 100644 (file)
@@ -85,6 +85,9 @@ int thread_stack__event(struct thread *thread, int cpu, u32 flags, u64 from_ip,
 void thread_stack__set_trace_nr(struct thread *thread, int cpu, u64 trace_nr);
 void thread_stack__sample(struct thread *thread, int cpu, struct ip_callchain *chain,
                          size_t sz, u64 ip, u64 kernel_start);
+void thread_stack__sample_late(struct thread *thread, int cpu,
+                              struct ip_callchain *chain, size_t sz, u64 ip,
+                              u64 kernel_start);
 int thread_stack__flush(struct thread *thread);
 void thread_stack__free(struct thread *thread);
 size_t thread_stack__depth(struct thread *thread, int cpu);