Merge branches 'tracing/branch-tracer', 'tracing/ftrace', 'tracing/function-graph...
[sfrench/cifs-2.6.git] / kernel / trace / trace_functions_graph.c
1 /*
2  *
3  * Function graph tracer.
4  * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
5  * Mostly borrowed from function tracer which
6  * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7  *
8  */
9 #include <linux/debugfs.h>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/fs.h>
13
14 #include "trace.h"
15
16 #define TRACE_GRAPH_INDENT      2
17
18 /* Flag options */
19 #define TRACE_GRAPH_PRINT_OVERRUN       0x1
20 #define TRACE_GRAPH_PRINT_CPU           0x2
21 #define TRACE_GRAPH_PRINT_OVERHEAD      0x4
22
23 static struct tracer_opt trace_opts[] = {
24         /* Display overruns ? */
25         { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
26         /* Display CPU ? */
27         { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
28         /* Display Overhead ? */
29         { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
30         { } /* Empty entry */
31 };
32
33 static struct tracer_flags tracer_flags = {
34         /* Don't display overruns by default */
35         .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD,
36         .opts = trace_opts
37 };
38
39 /* pid on the last trace processed */
40 static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 };
41
42 static int graph_trace_init(struct trace_array *tr)
43 {
44         int cpu, ret;
45
46         for_each_online_cpu(cpu)
47                 tracing_reset(tr, cpu);
48
49         ret = register_ftrace_graph(&trace_graph_return,
50                                         &trace_graph_entry);
51         if (ret)
52                 return ret;
53         tracing_start_cmdline_record();
54
55         return 0;
56 }
57
58 static void graph_trace_reset(struct trace_array *tr)
59 {
60         tracing_stop_cmdline_record();
61         unregister_ftrace_graph();
62 }
63
64 static inline int log10_cpu(int nb)
65 {
66         if (nb / 100)
67                 return 3;
68         if (nb / 10)
69                 return 2;
70         return 1;
71 }
72
73 static enum print_line_t
74 print_graph_cpu(struct trace_seq *s, int cpu)
75 {
76         int i;
77         int ret;
78         int log10_this = log10_cpu(cpu);
79         int log10_all = log10_cpu(cpus_weight_nr(cpu_online_map));
80
81
82         /*
83          * Start with a space character - to make it stand out
84          * to the right a bit when trace output is pasted into
85          * email:
86          */
87         ret = trace_seq_printf(s, " ");
88
89         /*
90          * Tricky - we space the CPU field according to the max
91          * number of online CPUs. On a 2-cpu system it would take
92          * a maximum of 1 digit - on a 128 cpu system it would
93          * take up to 3 digits:
94          */
95         for (i = 0; i < log10_all - log10_this; i++) {
96                 ret = trace_seq_printf(s, " ");
97                 if (!ret)
98                         return TRACE_TYPE_PARTIAL_LINE;
99         }
100         ret = trace_seq_printf(s, "%d) ", cpu);
101         if (!ret)
102                 return TRACE_TYPE_PARTIAL_LINE;
103
104         return TRACE_TYPE_HANDLED;
105 }
106
107
108 /* If the pid changed since the last trace, output this event */
109 static int verif_pid(struct trace_seq *s, pid_t pid, int cpu)
110 {
111         char *comm, *prev_comm;
112         pid_t prev_pid;
113         int ret;
114
115         if (last_pid[cpu] != -1 && last_pid[cpu] == pid)
116                 return 1;
117
118         prev_pid = last_pid[cpu];
119         last_pid[cpu] = pid;
120
121         comm = trace_find_cmdline(pid);
122         prev_comm = trace_find_cmdline(prev_pid);
123
124 /*
125  * Context-switch trace line:
126
127  ------------------------------------------
128  | 1)  migration/0--1  =>  sshd-1755
129  ------------------------------------------
130
131  */
132         ret = trace_seq_printf(s,
133                 " ------------------------------------------\n");
134         ret += trace_seq_printf(s, " | %d)  %s-%d  =>  %s-%d\n",
135                                   cpu, prev_comm, prev_pid, comm, pid);
136         ret += trace_seq_printf(s,
137                 " ------------------------------------------\n\n");
138         return ret;
139 }
140
141 static bool
142 trace_branch_is_leaf(struct trace_iterator *iter,
143                 struct ftrace_graph_ent_entry *curr)
144 {
145         struct ring_buffer_iter *ring_iter;
146         struct ring_buffer_event *event;
147         struct ftrace_graph_ret_entry *next;
148
149         ring_iter = iter->buffer_iter[iter->cpu];
150
151         if (!ring_iter)
152                 return false;
153
154         event = ring_buffer_iter_peek(ring_iter, NULL);
155
156         if (!event)
157                 return false;
158
159         next = ring_buffer_event_data(event);
160
161         if (next->ent.type != TRACE_GRAPH_RET)
162                 return false;
163
164         if (curr->ent.pid != next->ent.pid ||
165                         curr->graph_ent.func != next->ret.func)
166                 return false;
167
168         return true;
169 }
170
171
172 static inline int
173 print_graph_duration(unsigned long long duration, struct trace_seq *s)
174 {
175         unsigned long nsecs_rem = do_div(duration, 1000);
176         return trace_seq_printf(s, "%4llu.%3lu us |  ", duration, nsecs_rem);
177 }
178
179 /* Signal a overhead of time execution to the output */
180 static int
181 print_graph_overhead(unsigned long long duration, struct trace_seq *s)
182 {
183         /* Duration exceeded 100 msecs */
184         if (duration > 100000ULL)
185                 return trace_seq_printf(s, "! ");
186
187         /* Duration exceeded 10 msecs */
188         if (duration > 10000ULL)
189                 return trace_seq_printf(s, "+ ");
190
191         return trace_seq_printf(s, "  ");
192 }
193
194 /* Case of a leaf function on its call entry */
195 static enum print_line_t
196 print_graph_entry_leaf(struct trace_iterator *iter,
197                 struct ftrace_graph_ent_entry *entry, struct trace_seq *s)
198 {
199         struct ftrace_graph_ret_entry *ret_entry;
200         struct ftrace_graph_ret *graph_ret;
201         struct ring_buffer_event *event;
202         struct ftrace_graph_ent *call;
203         unsigned long long duration;
204         int ret;
205         int i;
206
207         event = ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
208         ret_entry = ring_buffer_event_data(event);
209         graph_ret = &ret_entry->ret;
210         call = &entry->graph_ent;
211         duration = graph_ret->rettime - graph_ret->calltime;
212
213         /* Must not exceed 8 characters: 9999.999 us */
214         if (duration > 10000000ULL)
215                 duration = 9999999ULL;
216
217         /* Overhead */
218         if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
219                 ret = print_graph_overhead(duration, s);
220                 if (!ret)
221                         return TRACE_TYPE_PARTIAL_LINE;
222         }
223
224         /* Duration */
225         ret = print_graph_duration(duration, s);
226         if (!ret)
227                 return TRACE_TYPE_PARTIAL_LINE;
228
229         /* Function */
230         for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
231                 ret = trace_seq_printf(s, " ");
232                 if (!ret)
233                         return TRACE_TYPE_PARTIAL_LINE;
234         }
235
236         ret = seq_print_ip_sym(s, call->func, 0);
237         if (!ret)
238                 return TRACE_TYPE_PARTIAL_LINE;
239
240         ret = trace_seq_printf(s, "();\n");
241         if (!ret)
242                 return TRACE_TYPE_PARTIAL_LINE;
243
244         return TRACE_TYPE_HANDLED;
245 }
246
247 static enum print_line_t
248 print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
249                         struct trace_seq *s)
250 {
251         int i;
252         int ret;
253         struct ftrace_graph_ent *call = &entry->graph_ent;
254
255         /* No overhead */
256         if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
257                 ret = trace_seq_printf(s, "  ");
258                 if (!ret)
259                         return TRACE_TYPE_PARTIAL_LINE;
260         }
261
262         /* No time */
263         ret = trace_seq_printf(s, "            |  ");
264
265         /* Function */
266         for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
267                 ret = trace_seq_printf(s, " ");
268                 if (!ret)
269                         return TRACE_TYPE_PARTIAL_LINE;
270         }
271
272         ret = seq_print_ip_sym(s, call->func, 0);
273         if (!ret)
274                 return TRACE_TYPE_PARTIAL_LINE;
275
276         ret = trace_seq_printf(s, "() {\n");
277         if (!ret)
278                 return TRACE_TYPE_PARTIAL_LINE;
279
280         return TRACE_TYPE_HANDLED;
281 }
282
283 static enum print_line_t
284 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
285                         struct trace_iterator *iter, int cpu)
286 {
287         int ret;
288         struct trace_entry *ent = iter->ent;
289
290         /* Pid */
291         if (!verif_pid(s, ent->pid, cpu))
292                 return TRACE_TYPE_PARTIAL_LINE;
293
294         /* Cpu */
295         if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
296                 ret = print_graph_cpu(s, cpu);
297                 if (!ret)
298                         return TRACE_TYPE_PARTIAL_LINE;
299         }
300
301         if (trace_branch_is_leaf(iter, field))
302                 return print_graph_entry_leaf(iter, field, s);
303         else
304                 return print_graph_entry_nested(field, s);
305
306 }
307
308 static enum print_line_t
309 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
310                    struct trace_entry *ent, int cpu)
311 {
312         int i;
313         int ret;
314         unsigned long long duration = trace->rettime - trace->calltime;
315
316         /* Must not exceed 8 characters: xxxx.yyy us */
317         if (duration > 10000000ULL)
318                 duration = 9999999ULL;
319
320         /* Pid */
321         if (!verif_pid(s, ent->pid, cpu))
322                 return TRACE_TYPE_PARTIAL_LINE;
323
324         /* Cpu */
325         if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
326                 ret = print_graph_cpu(s, cpu);
327                 if (!ret)
328                         return TRACE_TYPE_PARTIAL_LINE;
329         }
330
331         /* Overhead */
332         if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
333                 ret = print_graph_overhead(duration, s);
334                 if (!ret)
335                         return TRACE_TYPE_PARTIAL_LINE;
336         }
337
338         /* Duration */
339         ret = print_graph_duration(duration, s);
340         if (!ret)
341                 return TRACE_TYPE_PARTIAL_LINE;
342
343         /* Closing brace */
344         for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
345                 ret = trace_seq_printf(s, " ");
346                 if (!ret)
347                         return TRACE_TYPE_PARTIAL_LINE;
348         }
349
350         ret = trace_seq_printf(s, "}\n");
351         if (!ret)
352                 return TRACE_TYPE_PARTIAL_LINE;
353
354         /* Overrun */
355         if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
356                 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
357                                         trace->overrun);
358                 if (!ret)
359                         return TRACE_TYPE_PARTIAL_LINE;
360         }
361         return TRACE_TYPE_HANDLED;
362 }
363
364 enum print_line_t
365 print_graph_function(struct trace_iterator *iter)
366 {
367         struct trace_seq *s = &iter->seq;
368         struct trace_entry *entry = iter->ent;
369
370         switch (entry->type) {
371         case TRACE_GRAPH_ENT: {
372                 struct ftrace_graph_ent_entry *field;
373                 trace_assign_type(field, entry);
374                 return print_graph_entry(field, s, iter,
375                                          iter->cpu);
376         }
377         case TRACE_GRAPH_RET: {
378                 struct ftrace_graph_ret_entry *field;
379                 trace_assign_type(field, entry);
380                 return print_graph_return(&field->ret, s, entry, iter->cpu);
381         }
382         default:
383                 return TRACE_TYPE_UNHANDLED;
384         }
385 }
386
387 static struct tracer graph_trace __read_mostly = {
388         .name        = "function_graph",
389         .init        = graph_trace_init,
390         .reset       = graph_trace_reset,
391         .print_line = print_graph_function,
392         .flags          = &tracer_flags,
393 };
394
395 static __init int init_graph_trace(void)
396 {
397         return register_tracer(&graph_trace);
398 }
399
400 device_initcall(init_graph_trace);