Merge tag 'docs-5.0-fixes' of git://git.lwn.net/linux
[sfrench/cifs-2.6.git] / kernel / trace / trace.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ring buffer based function tracer
4  *
5  * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7  *
8  * Originally taken from the RT patch by:
9  *    Arnaldo Carvalho de Melo <acme@redhat.com>
10  *
11  * Based on code from the latency_tracer, that is:
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 Nadia Yvette Chambers
14  */
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/seq_file.h>
21 #include <linux/notifier.h>
22 #include <linux/irqflags.h>
23 #include <linux/debugfs.h>
24 #include <linux/tracefs.h>
25 #include <linux/pagemap.h>
26 #include <linux/hardirq.h>
27 #include <linux/linkage.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/ftrace.h>
31 #include <linux/module.h>
32 #include <linux/percpu.h>
33 #include <linux/splice.h>
34 #include <linux/kdebug.h>
35 #include <linux/string.h>
36 #include <linux/mount.h>
37 #include <linux/rwsem.h>
38 #include <linux/slab.h>
39 #include <linux/ctype.h>
40 #include <linux/init.h>
41 #include <linux/poll.h>
42 #include <linux/nmi.h>
43 #include <linux/fs.h>
44 #include <linux/trace.h>
45 #include <linux/sched/clock.h>
46 #include <linux/sched/rt.h>
47
48 #include "trace.h"
49 #include "trace_output.h"
50
51 /*
52  * On boot up, the ring buffer is set to the minimum size, so that
53  * we do not waste memory on systems that are not using tracing.
54  */
55 bool ring_buffer_expanded;
56
57 /*
58  * We need to change this state when a selftest is running.
59  * A selftest will lurk into the ring-buffer to count the
60  * entries inserted during the selftest although some concurrent
61  * insertions into the ring-buffer such as trace_printk could occurred
62  * at the same time, giving false positive or negative results.
63  */
64 static bool __read_mostly tracing_selftest_running;
65
66 /*
67  * If a tracer is running, we do not want to run SELFTEST.
68  */
69 bool __read_mostly tracing_selftest_disabled;
70
71 /* Pipe tracepoints to printk */
72 struct trace_iterator *tracepoint_print_iter;
73 int tracepoint_printk;
74 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
75
76 /* For tracers that don't implement custom flags */
77 static struct tracer_opt dummy_tracer_opt[] = {
78         { }
79 };
80
81 static int
82 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
83 {
84         return 0;
85 }
86
87 /*
88  * To prevent the comm cache from being overwritten when no
89  * tracing is active, only save the comm when a trace event
90  * occurred.
91  */
92 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
93
94 /*
95  * Kill all tracing for good (never come back).
96  * It is initialized to 1 but will turn to zero if the initialization
97  * of the tracer is successful. But that is the only place that sets
98  * this back to zero.
99  */
100 static int tracing_disabled = 1;
101
102 cpumask_var_t __read_mostly     tracing_buffer_mask;
103
104 /*
105  * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
106  *
107  * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
108  * is set, then ftrace_dump is called. This will output the contents
109  * of the ftrace buffers to the console.  This is very useful for
110  * capturing traces that lead to crashes and outputing it to a
111  * serial console.
112  *
113  * It is default off, but you can enable it with either specifying
114  * "ftrace_dump_on_oops" in the kernel command line, or setting
115  * /proc/sys/kernel/ftrace_dump_on_oops
116  * Set 1 if you want to dump buffers of all CPUs
117  * Set 2 if you want to dump the buffer of the CPU that triggered oops
118  */
119
120 enum ftrace_dump_mode ftrace_dump_on_oops;
121
122 /* When set, tracing will stop when a WARN*() is hit */
123 int __disable_trace_on_warning;
124
125 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
126 /* Map of enums to their values, for "eval_map" file */
127 struct trace_eval_map_head {
128         struct module                   *mod;
129         unsigned long                   length;
130 };
131
132 union trace_eval_map_item;
133
134 struct trace_eval_map_tail {
135         /*
136          * "end" is first and points to NULL as it must be different
137          * than "mod" or "eval_string"
138          */
139         union trace_eval_map_item       *next;
140         const char                      *end;   /* points to NULL */
141 };
142
143 static DEFINE_MUTEX(trace_eval_mutex);
144
145 /*
146  * The trace_eval_maps are saved in an array with two extra elements,
147  * one at the beginning, and one at the end. The beginning item contains
148  * the count of the saved maps (head.length), and the module they
149  * belong to if not built in (head.mod). The ending item contains a
150  * pointer to the next array of saved eval_map items.
151  */
152 union trace_eval_map_item {
153         struct trace_eval_map           map;
154         struct trace_eval_map_head      head;
155         struct trace_eval_map_tail      tail;
156 };
157
158 static union trace_eval_map_item *trace_eval_maps;
159 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
160
161 static int tracing_set_tracer(struct trace_array *tr, const char *buf);
162
163 #define MAX_TRACER_SIZE         100
164 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
165 static char *default_bootup_tracer;
166
167 static bool allocate_snapshot;
168
169 static int __init set_cmdline_ftrace(char *str)
170 {
171         strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
172         default_bootup_tracer = bootup_tracer_buf;
173         /* We are using ftrace early, expand it */
174         ring_buffer_expanded = true;
175         return 1;
176 }
177 __setup("ftrace=", set_cmdline_ftrace);
178
179 static int __init set_ftrace_dump_on_oops(char *str)
180 {
181         if (*str++ != '=' || !*str) {
182                 ftrace_dump_on_oops = DUMP_ALL;
183                 return 1;
184         }
185
186         if (!strcmp("orig_cpu", str)) {
187                 ftrace_dump_on_oops = DUMP_ORIG;
188                 return 1;
189         }
190
191         return 0;
192 }
193 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
194
195 static int __init stop_trace_on_warning(char *str)
196 {
197         if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
198                 __disable_trace_on_warning = 1;
199         return 1;
200 }
201 __setup("traceoff_on_warning", stop_trace_on_warning);
202
203 static int __init boot_alloc_snapshot(char *str)
204 {
205         allocate_snapshot = true;
206         /* We also need the main ring buffer expanded */
207         ring_buffer_expanded = true;
208         return 1;
209 }
210 __setup("alloc_snapshot", boot_alloc_snapshot);
211
212
213 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
214
215 static int __init set_trace_boot_options(char *str)
216 {
217         strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
218         return 0;
219 }
220 __setup("trace_options=", set_trace_boot_options);
221
222 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
223 static char *trace_boot_clock __initdata;
224
225 static int __init set_trace_boot_clock(char *str)
226 {
227         strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
228         trace_boot_clock = trace_boot_clock_buf;
229         return 0;
230 }
231 __setup("trace_clock=", set_trace_boot_clock);
232
233 static int __init set_tracepoint_printk(char *str)
234 {
235         if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
236                 tracepoint_printk = 1;
237         return 1;
238 }
239 __setup("tp_printk", set_tracepoint_printk);
240
241 unsigned long long ns2usecs(u64 nsec)
242 {
243         nsec += 500;
244         do_div(nsec, 1000);
245         return nsec;
246 }
247
248 /* trace_flags holds trace_options default values */
249 #define TRACE_DEFAULT_FLAGS                                             \
250         (FUNCTION_DEFAULT_FLAGS |                                       \
251          TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |                  \
252          TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO |                \
253          TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |                 \
254          TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
255
256 /* trace_options that are only supported by global_trace */
257 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK |                      \
258                TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
259
260 /* trace_flags that are default zero for instances */
261 #define ZEROED_TRACE_FLAGS \
262         (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
263
264 /*
265  * The global_trace is the descriptor that holds the top-level tracing
266  * buffers for the live tracing.
267  */
268 static struct trace_array global_trace = {
269         .trace_flags = TRACE_DEFAULT_FLAGS,
270 };
271
272 LIST_HEAD(ftrace_trace_arrays);
273
274 int trace_array_get(struct trace_array *this_tr)
275 {
276         struct trace_array *tr;
277         int ret = -ENODEV;
278
279         mutex_lock(&trace_types_lock);
280         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
281                 if (tr == this_tr) {
282                         tr->ref++;
283                         ret = 0;
284                         break;
285                 }
286         }
287         mutex_unlock(&trace_types_lock);
288
289         return ret;
290 }
291
292 static void __trace_array_put(struct trace_array *this_tr)
293 {
294         WARN_ON(!this_tr->ref);
295         this_tr->ref--;
296 }
297
298 void trace_array_put(struct trace_array *this_tr)
299 {
300         mutex_lock(&trace_types_lock);
301         __trace_array_put(this_tr);
302         mutex_unlock(&trace_types_lock);
303 }
304
305 int call_filter_check_discard(struct trace_event_call *call, void *rec,
306                               struct ring_buffer *buffer,
307                               struct ring_buffer_event *event)
308 {
309         if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
310             !filter_match_preds(call->filter, rec)) {
311                 __trace_event_discard_commit(buffer, event);
312                 return 1;
313         }
314
315         return 0;
316 }
317
318 void trace_free_pid_list(struct trace_pid_list *pid_list)
319 {
320         vfree(pid_list->pids);
321         kfree(pid_list);
322 }
323
324 /**
325  * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
326  * @filtered_pids: The list of pids to check
327  * @search_pid: The PID to find in @filtered_pids
328  *
329  * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
330  */
331 bool
332 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
333 {
334         /*
335          * If pid_max changed after filtered_pids was created, we
336          * by default ignore all pids greater than the previous pid_max.
337          */
338         if (search_pid >= filtered_pids->pid_max)
339                 return false;
340
341         return test_bit(search_pid, filtered_pids->pids);
342 }
343
344 /**
345  * trace_ignore_this_task - should a task be ignored for tracing
346  * @filtered_pids: The list of pids to check
347  * @task: The task that should be ignored if not filtered
348  *
349  * Checks if @task should be traced or not from @filtered_pids.
350  * Returns true if @task should *NOT* be traced.
351  * Returns false if @task should be traced.
352  */
353 bool
354 trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
355 {
356         /*
357          * Return false, because if filtered_pids does not exist,
358          * all pids are good to trace.
359          */
360         if (!filtered_pids)
361                 return false;
362
363         return !trace_find_filtered_pid(filtered_pids, task->pid);
364 }
365
366 /**
367  * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list
368  * @pid_list: The list to modify
369  * @self: The current task for fork or NULL for exit
370  * @task: The task to add or remove
371  *
372  * If adding a task, if @self is defined, the task is only added if @self
373  * is also included in @pid_list. This happens on fork and tasks should
374  * only be added when the parent is listed. If @self is NULL, then the
375  * @task pid will be removed from the list, which would happen on exit
376  * of a task.
377  */
378 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
379                                   struct task_struct *self,
380                                   struct task_struct *task)
381 {
382         if (!pid_list)
383                 return;
384
385         /* For forks, we only add if the forking task is listed */
386         if (self) {
387                 if (!trace_find_filtered_pid(pid_list, self->pid))
388                         return;
389         }
390
391         /* Sorry, but we don't support pid_max changing after setting */
392         if (task->pid >= pid_list->pid_max)
393                 return;
394
395         /* "self" is set for forks, and NULL for exits */
396         if (self)
397                 set_bit(task->pid, pid_list->pids);
398         else
399                 clear_bit(task->pid, pid_list->pids);
400 }
401
402 /**
403  * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
404  * @pid_list: The pid list to show
405  * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
406  * @pos: The position of the file
407  *
408  * This is used by the seq_file "next" operation to iterate the pids
409  * listed in a trace_pid_list structure.
410  *
411  * Returns the pid+1 as we want to display pid of zero, but NULL would
412  * stop the iteration.
413  */
414 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
415 {
416         unsigned long pid = (unsigned long)v;
417
418         (*pos)++;
419
420         /* pid already is +1 of the actual prevous bit */
421         pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
422
423         /* Return pid + 1 to allow zero to be represented */
424         if (pid < pid_list->pid_max)
425                 return (void *)(pid + 1);
426
427         return NULL;
428 }
429
430 /**
431  * trace_pid_start - Used for seq_file to start reading pid lists
432  * @pid_list: The pid list to show
433  * @pos: The position of the file
434  *
435  * This is used by seq_file "start" operation to start the iteration
436  * of listing pids.
437  *
438  * Returns the pid+1 as we want to display pid of zero, but NULL would
439  * stop the iteration.
440  */
441 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
442 {
443         unsigned long pid;
444         loff_t l = 0;
445
446         pid = find_first_bit(pid_list->pids, pid_list->pid_max);
447         if (pid >= pid_list->pid_max)
448                 return NULL;
449
450         /* Return pid + 1 so that zero can be the exit value */
451         for (pid++; pid && l < *pos;
452              pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
453                 ;
454         return (void *)pid;
455 }
456
457 /**
458  * trace_pid_show - show the current pid in seq_file processing
459  * @m: The seq_file structure to write into
460  * @v: A void pointer of the pid (+1) value to display
461  *
462  * Can be directly used by seq_file operations to display the current
463  * pid value.
464  */
465 int trace_pid_show(struct seq_file *m, void *v)
466 {
467         unsigned long pid = (unsigned long)v - 1;
468
469         seq_printf(m, "%lu\n", pid);
470         return 0;
471 }
472
473 /* 128 should be much more than enough */
474 #define PID_BUF_SIZE            127
475
476 int trace_pid_write(struct trace_pid_list *filtered_pids,
477                     struct trace_pid_list **new_pid_list,
478                     const char __user *ubuf, size_t cnt)
479 {
480         struct trace_pid_list *pid_list;
481         struct trace_parser parser;
482         unsigned long val;
483         int nr_pids = 0;
484         ssize_t read = 0;
485         ssize_t ret = 0;
486         loff_t pos;
487         pid_t pid;
488
489         if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
490                 return -ENOMEM;
491
492         /*
493          * Always recreate a new array. The write is an all or nothing
494          * operation. Always create a new array when adding new pids by
495          * the user. If the operation fails, then the current list is
496          * not modified.
497          */
498         pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
499         if (!pid_list)
500                 return -ENOMEM;
501
502         pid_list->pid_max = READ_ONCE(pid_max);
503
504         /* Only truncating will shrink pid_max */
505         if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
506                 pid_list->pid_max = filtered_pids->pid_max;
507
508         pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
509         if (!pid_list->pids) {
510                 kfree(pid_list);
511                 return -ENOMEM;
512         }
513
514         if (filtered_pids) {
515                 /* copy the current bits to the new max */
516                 for_each_set_bit(pid, filtered_pids->pids,
517                                  filtered_pids->pid_max) {
518                         set_bit(pid, pid_list->pids);
519                         nr_pids++;
520                 }
521         }
522
523         while (cnt > 0) {
524
525                 pos = 0;
526
527                 ret = trace_get_user(&parser, ubuf, cnt, &pos);
528                 if (ret < 0 || !trace_parser_loaded(&parser))
529                         break;
530
531                 read += ret;
532                 ubuf += ret;
533                 cnt -= ret;
534
535                 ret = -EINVAL;
536                 if (kstrtoul(parser.buffer, 0, &val))
537                         break;
538                 if (val >= pid_list->pid_max)
539                         break;
540
541                 pid = (pid_t)val;
542
543                 set_bit(pid, pid_list->pids);
544                 nr_pids++;
545
546                 trace_parser_clear(&parser);
547                 ret = 0;
548         }
549         trace_parser_put(&parser);
550
551         if (ret < 0) {
552                 trace_free_pid_list(pid_list);
553                 return ret;
554         }
555
556         if (!nr_pids) {
557                 /* Cleared the list of pids */
558                 trace_free_pid_list(pid_list);
559                 read = ret;
560                 pid_list = NULL;
561         }
562
563         *new_pid_list = pid_list;
564
565         return read;
566 }
567
568 static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
569 {
570         u64 ts;
571
572         /* Early boot up does not have a buffer yet */
573         if (!buf->buffer)
574                 return trace_clock_local();
575
576         ts = ring_buffer_time_stamp(buf->buffer, cpu);
577         ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
578
579         return ts;
580 }
581
582 u64 ftrace_now(int cpu)
583 {
584         return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
585 }
586
587 /**
588  * tracing_is_enabled - Show if global_trace has been disabled
589  *
590  * Shows if the global trace has been enabled or not. It uses the
591  * mirror flag "buffer_disabled" to be used in fast paths such as for
592  * the irqsoff tracer. But it may be inaccurate due to races. If you
593  * need to know the accurate state, use tracing_is_on() which is a little
594  * slower, but accurate.
595  */
596 int tracing_is_enabled(void)
597 {
598         /*
599          * For quick access (irqsoff uses this in fast path), just
600          * return the mirror variable of the state of the ring buffer.
601          * It's a little racy, but we don't really care.
602          */
603         smp_rmb();
604         return !global_trace.buffer_disabled;
605 }
606
607 /*
608  * trace_buf_size is the size in bytes that is allocated
609  * for a buffer. Note, the number of bytes is always rounded
610  * to page size.
611  *
612  * This number is purposely set to a low number of 16384.
613  * If the dump on oops happens, it will be much appreciated
614  * to not have to wait for all that output. Anyway this can be
615  * boot time and run time configurable.
616  */
617 #define TRACE_BUF_SIZE_DEFAULT  1441792UL /* 16384 * 88 (sizeof(entry)) */
618
619 static unsigned long            trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
620
621 /* trace_types holds a link list of available tracers. */
622 static struct tracer            *trace_types __read_mostly;
623
624 /*
625  * trace_types_lock is used to protect the trace_types list.
626  */
627 DEFINE_MUTEX(trace_types_lock);
628
629 /*
630  * serialize the access of the ring buffer
631  *
632  * ring buffer serializes readers, but it is low level protection.
633  * The validity of the events (which returns by ring_buffer_peek() ..etc)
634  * are not protected by ring buffer.
635  *
636  * The content of events may become garbage if we allow other process consumes
637  * these events concurrently:
638  *   A) the page of the consumed events may become a normal page
639  *      (not reader page) in ring buffer, and this page will be rewrited
640  *      by events producer.
641  *   B) The page of the consumed events may become a page for splice_read,
642  *      and this page will be returned to system.
643  *
644  * These primitives allow multi process access to different cpu ring buffer
645  * concurrently.
646  *
647  * These primitives don't distinguish read-only and read-consume access.
648  * Multi read-only access are also serialized.
649  */
650
651 #ifdef CONFIG_SMP
652 static DECLARE_RWSEM(all_cpu_access_lock);
653 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
654
655 static inline void trace_access_lock(int cpu)
656 {
657         if (cpu == RING_BUFFER_ALL_CPUS) {
658                 /* gain it for accessing the whole ring buffer. */
659                 down_write(&all_cpu_access_lock);
660         } else {
661                 /* gain it for accessing a cpu ring buffer. */
662
663                 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
664                 down_read(&all_cpu_access_lock);
665
666                 /* Secondly block other access to this @cpu ring buffer. */
667                 mutex_lock(&per_cpu(cpu_access_lock, cpu));
668         }
669 }
670
671 static inline void trace_access_unlock(int cpu)
672 {
673         if (cpu == RING_BUFFER_ALL_CPUS) {
674                 up_write(&all_cpu_access_lock);
675         } else {
676                 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
677                 up_read(&all_cpu_access_lock);
678         }
679 }
680
681 static inline void trace_access_lock_init(void)
682 {
683         int cpu;
684
685         for_each_possible_cpu(cpu)
686                 mutex_init(&per_cpu(cpu_access_lock, cpu));
687 }
688
689 #else
690
691 static DEFINE_MUTEX(access_lock);
692
693 static inline void trace_access_lock(int cpu)
694 {
695         (void)cpu;
696         mutex_lock(&access_lock);
697 }
698
699 static inline void trace_access_unlock(int cpu)
700 {
701         (void)cpu;
702         mutex_unlock(&access_lock);
703 }
704
705 static inline void trace_access_lock_init(void)
706 {
707 }
708
709 #endif
710
711 #ifdef CONFIG_STACKTRACE
712 static void __ftrace_trace_stack(struct ring_buffer *buffer,
713                                  unsigned long flags,
714                                  int skip, int pc, struct pt_regs *regs);
715 static inline void ftrace_trace_stack(struct trace_array *tr,
716                                       struct ring_buffer *buffer,
717                                       unsigned long flags,
718                                       int skip, int pc, struct pt_regs *regs);
719
720 #else
721 static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
722                                         unsigned long flags,
723                                         int skip, int pc, struct pt_regs *regs)
724 {
725 }
726 static inline void ftrace_trace_stack(struct trace_array *tr,
727                                       struct ring_buffer *buffer,
728                                       unsigned long flags,
729                                       int skip, int pc, struct pt_regs *regs)
730 {
731 }
732
733 #endif
734
735 static __always_inline void
736 trace_event_setup(struct ring_buffer_event *event,
737                   int type, unsigned long flags, int pc)
738 {
739         struct trace_entry *ent = ring_buffer_event_data(event);
740
741         tracing_generic_entry_update(ent, flags, pc);
742         ent->type = type;
743 }
744
745 static __always_inline struct ring_buffer_event *
746 __trace_buffer_lock_reserve(struct ring_buffer *buffer,
747                           int type,
748                           unsigned long len,
749                           unsigned long flags, int pc)
750 {
751         struct ring_buffer_event *event;
752
753         event = ring_buffer_lock_reserve(buffer, len);
754         if (event != NULL)
755                 trace_event_setup(event, type, flags, pc);
756
757         return event;
758 }
759
760 void tracer_tracing_on(struct trace_array *tr)
761 {
762         if (tr->trace_buffer.buffer)
763                 ring_buffer_record_on(tr->trace_buffer.buffer);
764         /*
765          * This flag is looked at when buffers haven't been allocated
766          * yet, or by some tracers (like irqsoff), that just want to
767          * know if the ring buffer has been disabled, but it can handle
768          * races of where it gets disabled but we still do a record.
769          * As the check is in the fast path of the tracers, it is more
770          * important to be fast than accurate.
771          */
772         tr->buffer_disabled = 0;
773         /* Make the flag seen by readers */
774         smp_wmb();
775 }
776
777 /**
778  * tracing_on - enable tracing buffers
779  *
780  * This function enables tracing buffers that may have been
781  * disabled with tracing_off.
782  */
783 void tracing_on(void)
784 {
785         tracer_tracing_on(&global_trace);
786 }
787 EXPORT_SYMBOL_GPL(tracing_on);
788
789
790 static __always_inline void
791 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
792 {
793         __this_cpu_write(trace_taskinfo_save, true);
794
795         /* If this is the temp buffer, we need to commit fully */
796         if (this_cpu_read(trace_buffered_event) == event) {
797                 /* Length is in event->array[0] */
798                 ring_buffer_write(buffer, event->array[0], &event->array[1]);
799                 /* Release the temp buffer */
800                 this_cpu_dec(trace_buffered_event_cnt);
801         } else
802                 ring_buffer_unlock_commit(buffer, event);
803 }
804
805 /**
806  * __trace_puts - write a constant string into the trace buffer.
807  * @ip:    The address of the caller
808  * @str:   The constant string to write
809  * @size:  The size of the string.
810  */
811 int __trace_puts(unsigned long ip, const char *str, int size)
812 {
813         struct ring_buffer_event *event;
814         struct ring_buffer *buffer;
815         struct print_entry *entry;
816         unsigned long irq_flags;
817         int alloc;
818         int pc;
819
820         if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
821                 return 0;
822
823         pc = preempt_count();
824
825         if (unlikely(tracing_selftest_running || tracing_disabled))
826                 return 0;
827
828         alloc = sizeof(*entry) + size + 2; /* possible \n added */
829
830         local_save_flags(irq_flags);
831         buffer = global_trace.trace_buffer.buffer;
832         event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 
833                                             irq_flags, pc);
834         if (!event)
835                 return 0;
836
837         entry = ring_buffer_event_data(event);
838         entry->ip = ip;
839
840         memcpy(&entry->buf, str, size);
841
842         /* Add a newline if necessary */
843         if (entry->buf[size - 1] != '\n') {
844                 entry->buf[size] = '\n';
845                 entry->buf[size + 1] = '\0';
846         } else
847                 entry->buf[size] = '\0';
848
849         __buffer_unlock_commit(buffer, event);
850         ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
851
852         return size;
853 }
854 EXPORT_SYMBOL_GPL(__trace_puts);
855
856 /**
857  * __trace_bputs - write the pointer to a constant string into trace buffer
858  * @ip:    The address of the caller
859  * @str:   The constant string to write to the buffer to
860  */
861 int __trace_bputs(unsigned long ip, const char *str)
862 {
863         struct ring_buffer_event *event;
864         struct ring_buffer *buffer;
865         struct bputs_entry *entry;
866         unsigned long irq_flags;
867         int size = sizeof(struct bputs_entry);
868         int pc;
869
870         if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
871                 return 0;
872
873         pc = preempt_count();
874
875         if (unlikely(tracing_selftest_running || tracing_disabled))
876                 return 0;
877
878         local_save_flags(irq_flags);
879         buffer = global_trace.trace_buffer.buffer;
880         event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
881                                             irq_flags, pc);
882         if (!event)
883                 return 0;
884
885         entry = ring_buffer_event_data(event);
886         entry->ip                       = ip;
887         entry->str                      = str;
888
889         __buffer_unlock_commit(buffer, event);
890         ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
891
892         return 1;
893 }
894 EXPORT_SYMBOL_GPL(__trace_bputs);
895
896 #ifdef CONFIG_TRACER_SNAPSHOT
897 void tracing_snapshot_instance(struct trace_array *tr)
898 {
899         struct tracer *tracer = tr->current_trace;
900         unsigned long flags;
901
902         if (in_nmi()) {
903                 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
904                 internal_trace_puts("*** snapshot is being ignored        ***\n");
905                 return;
906         }
907
908         if (!tr->allocated_snapshot) {
909                 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
910                 internal_trace_puts("*** stopping trace here!   ***\n");
911                 tracing_off();
912                 return;
913         }
914
915         /* Note, snapshot can not be used when the tracer uses it */
916         if (tracer->use_max_tr) {
917                 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
918                 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
919                 return;
920         }
921
922         local_irq_save(flags);
923         update_max_tr(tr, current, smp_processor_id());
924         local_irq_restore(flags);
925 }
926
927 /**
928  * tracing_snapshot - take a snapshot of the current buffer.
929  *
930  * This causes a swap between the snapshot buffer and the current live
931  * tracing buffer. You can use this to take snapshots of the live
932  * trace when some condition is triggered, but continue to trace.
933  *
934  * Note, make sure to allocate the snapshot with either
935  * a tracing_snapshot_alloc(), or by doing it manually
936  * with: echo 1 > /sys/kernel/debug/tracing/snapshot
937  *
938  * If the snapshot buffer is not allocated, it will stop tracing.
939  * Basically making a permanent snapshot.
940  */
941 void tracing_snapshot(void)
942 {
943         struct trace_array *tr = &global_trace;
944
945         tracing_snapshot_instance(tr);
946 }
947 EXPORT_SYMBOL_GPL(tracing_snapshot);
948
949 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
950                                         struct trace_buffer *size_buf, int cpu_id);
951 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
952
953 int tracing_alloc_snapshot_instance(struct trace_array *tr)
954 {
955         int ret;
956
957         if (!tr->allocated_snapshot) {
958
959                 /* allocate spare buffer */
960                 ret = resize_buffer_duplicate_size(&tr->max_buffer,
961                                    &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
962                 if (ret < 0)
963                         return ret;
964
965                 tr->allocated_snapshot = true;
966         }
967
968         return 0;
969 }
970
971 static void free_snapshot(struct trace_array *tr)
972 {
973         /*
974          * We don't free the ring buffer. instead, resize it because
975          * The max_tr ring buffer has some state (e.g. ring->clock) and
976          * we want preserve it.
977          */
978         ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
979         set_buffer_entries(&tr->max_buffer, 1);
980         tracing_reset_online_cpus(&tr->max_buffer);
981         tr->allocated_snapshot = false;
982 }
983
984 /**
985  * tracing_alloc_snapshot - allocate snapshot buffer.
986  *
987  * This only allocates the snapshot buffer if it isn't already
988  * allocated - it doesn't also take a snapshot.
989  *
990  * This is meant to be used in cases where the snapshot buffer needs
991  * to be set up for events that can't sleep but need to be able to
992  * trigger a snapshot.
993  */
994 int tracing_alloc_snapshot(void)
995 {
996         struct trace_array *tr = &global_trace;
997         int ret;
998
999         ret = tracing_alloc_snapshot_instance(tr);
1000         WARN_ON(ret < 0);
1001
1002         return ret;
1003 }
1004 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1005
1006 /**
1007  * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1008  *
1009  * This is similar to tracing_snapshot(), but it will allocate the
1010  * snapshot buffer if it isn't already allocated. Use this only
1011  * where it is safe to sleep, as the allocation may sleep.
1012  *
1013  * This causes a swap between the snapshot buffer and the current live
1014  * tracing buffer. You can use this to take snapshots of the live
1015  * trace when some condition is triggered, but continue to trace.
1016  */
1017 void tracing_snapshot_alloc(void)
1018 {
1019         int ret;
1020
1021         ret = tracing_alloc_snapshot();
1022         if (ret < 0)
1023                 return;
1024
1025         tracing_snapshot();
1026 }
1027 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1028 #else
1029 void tracing_snapshot(void)
1030 {
1031         WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1032 }
1033 EXPORT_SYMBOL_GPL(tracing_snapshot);
1034 int tracing_alloc_snapshot(void)
1035 {
1036         WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1037         return -ENODEV;
1038 }
1039 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1040 void tracing_snapshot_alloc(void)
1041 {
1042         /* Give warning */
1043         tracing_snapshot();
1044 }
1045 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1046 #endif /* CONFIG_TRACER_SNAPSHOT */
1047
1048 void tracer_tracing_off(struct trace_array *tr)
1049 {
1050         if (tr->trace_buffer.buffer)
1051                 ring_buffer_record_off(tr->trace_buffer.buffer);
1052         /*
1053          * This flag is looked at when buffers haven't been allocated
1054          * yet, or by some tracers (like irqsoff), that just want to
1055          * know if the ring buffer has been disabled, but it can handle
1056          * races of where it gets disabled but we still do a record.
1057          * As the check is in the fast path of the tracers, it is more
1058          * important to be fast than accurate.
1059          */
1060         tr->buffer_disabled = 1;
1061         /* Make the flag seen by readers */
1062         smp_wmb();
1063 }
1064
1065 /**
1066  * tracing_off - turn off tracing buffers
1067  *
1068  * This function stops the tracing buffers from recording data.
1069  * It does not disable any overhead the tracers themselves may
1070  * be causing. This function simply causes all recording to
1071  * the ring buffers to fail.
1072  */
1073 void tracing_off(void)
1074 {
1075         tracer_tracing_off(&global_trace);
1076 }
1077 EXPORT_SYMBOL_GPL(tracing_off);
1078
1079 void disable_trace_on_warning(void)
1080 {
1081         if (__disable_trace_on_warning)
1082                 tracing_off();
1083 }
1084
1085 /**
1086  * tracer_tracing_is_on - show real state of ring buffer enabled
1087  * @tr : the trace array to know if ring buffer is enabled
1088  *
1089  * Shows real state of the ring buffer if it is enabled or not.
1090  */
1091 bool tracer_tracing_is_on(struct trace_array *tr)
1092 {
1093         if (tr->trace_buffer.buffer)
1094                 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1095         return !tr->buffer_disabled;
1096 }
1097
1098 /**
1099  * tracing_is_on - show state of ring buffers enabled
1100  */
1101 int tracing_is_on(void)
1102 {
1103         return tracer_tracing_is_on(&global_trace);
1104 }
1105 EXPORT_SYMBOL_GPL(tracing_is_on);
1106
1107 static int __init set_buf_size(char *str)
1108 {
1109         unsigned long buf_size;
1110
1111         if (!str)
1112                 return 0;
1113         buf_size = memparse(str, &str);
1114         /* nr_entries can not be zero */
1115         if (buf_size == 0)
1116                 return 0;
1117         trace_buf_size = buf_size;
1118         return 1;
1119 }
1120 __setup("trace_buf_size=", set_buf_size);
1121
1122 static int __init set_tracing_thresh(char *str)
1123 {
1124         unsigned long threshold;
1125         int ret;
1126
1127         if (!str)
1128                 return 0;
1129         ret = kstrtoul(str, 0, &threshold);
1130         if (ret < 0)
1131                 return 0;
1132         tracing_thresh = threshold * 1000;
1133         return 1;
1134 }
1135 __setup("tracing_thresh=", set_tracing_thresh);
1136
1137 unsigned long nsecs_to_usecs(unsigned long nsecs)
1138 {
1139         return nsecs / 1000;
1140 }
1141
1142 /*
1143  * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1144  * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1145  * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1146  * of strings in the order that the evals (enum) were defined.
1147  */
1148 #undef C
1149 #define C(a, b) b
1150
1151 /* These must match the bit postions in trace_iterator_flags */
1152 static const char *trace_options[] = {
1153         TRACE_FLAGS
1154         NULL
1155 };
1156
1157 static struct {
1158         u64 (*func)(void);
1159         const char *name;
1160         int in_ns;              /* is this clock in nanoseconds? */
1161 } trace_clocks[] = {
1162         { trace_clock_local,            "local",        1 },
1163         { trace_clock_global,           "global",       1 },
1164         { trace_clock_counter,          "counter",      0 },
1165         { trace_clock_jiffies,          "uptime",       0 },
1166         { trace_clock,                  "perf",         1 },
1167         { ktime_get_mono_fast_ns,       "mono",         1 },
1168         { ktime_get_raw_fast_ns,        "mono_raw",     1 },
1169         { ktime_get_boot_fast_ns,       "boot",         1 },
1170         ARCH_TRACE_CLOCKS
1171 };
1172
1173 bool trace_clock_in_ns(struct trace_array *tr)
1174 {
1175         if (trace_clocks[tr->clock_id].in_ns)
1176                 return true;
1177
1178         return false;
1179 }
1180
1181 /*
1182  * trace_parser_get_init - gets the buffer for trace parser
1183  */
1184 int trace_parser_get_init(struct trace_parser *parser, int size)
1185 {
1186         memset(parser, 0, sizeof(*parser));
1187
1188         parser->buffer = kmalloc(size, GFP_KERNEL);
1189         if (!parser->buffer)
1190                 return 1;
1191
1192         parser->size = size;
1193         return 0;
1194 }
1195
1196 /*
1197  * trace_parser_put - frees the buffer for trace parser
1198  */
1199 void trace_parser_put(struct trace_parser *parser)
1200 {
1201         kfree(parser->buffer);
1202         parser->buffer = NULL;
1203 }
1204
1205 /*
1206  * trace_get_user - reads the user input string separated by  space
1207  * (matched by isspace(ch))
1208  *
1209  * For each string found the 'struct trace_parser' is updated,
1210  * and the function returns.
1211  *
1212  * Returns number of bytes read.
1213  *
1214  * See kernel/trace/trace.h for 'struct trace_parser' details.
1215  */
1216 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1217         size_t cnt, loff_t *ppos)
1218 {
1219         char ch;
1220         size_t read = 0;
1221         ssize_t ret;
1222
1223         if (!*ppos)
1224                 trace_parser_clear(parser);
1225
1226         ret = get_user(ch, ubuf++);
1227         if (ret)
1228                 goto out;
1229
1230         read++;
1231         cnt--;
1232
1233         /*
1234          * The parser is not finished with the last write,
1235          * continue reading the user input without skipping spaces.
1236          */
1237         if (!parser->cont) {
1238                 /* skip white space */
1239                 while (cnt && isspace(ch)) {
1240                         ret = get_user(ch, ubuf++);
1241                         if (ret)
1242                                 goto out;
1243                         read++;
1244                         cnt--;
1245                 }
1246
1247                 parser->idx = 0;
1248
1249                 /* only spaces were written */
1250                 if (isspace(ch) || !ch) {
1251                         *ppos += read;
1252                         ret = read;
1253                         goto out;
1254                 }
1255         }
1256
1257         /* read the non-space input */
1258         while (cnt && !isspace(ch) && ch) {
1259                 if (parser->idx < parser->size - 1)
1260                         parser->buffer[parser->idx++] = ch;
1261                 else {
1262                         ret = -EINVAL;
1263                         goto out;
1264                 }
1265                 ret = get_user(ch, ubuf++);
1266                 if (ret)
1267                         goto out;
1268                 read++;
1269                 cnt--;
1270         }
1271
1272         /* We either got finished input or we have to wait for another call. */
1273         if (isspace(ch) || !ch) {
1274                 parser->buffer[parser->idx] = 0;
1275                 parser->cont = false;
1276         } else if (parser->idx < parser->size - 1) {
1277                 parser->cont = true;
1278                 parser->buffer[parser->idx++] = ch;
1279                 /* Make sure the parsed string always terminates with '\0'. */
1280                 parser->buffer[parser->idx] = 0;
1281         } else {
1282                 ret = -EINVAL;
1283                 goto out;
1284         }
1285
1286         *ppos += read;
1287         ret = read;
1288
1289 out:
1290         return ret;
1291 }
1292
1293 /* TODO add a seq_buf_to_buffer() */
1294 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1295 {
1296         int len;
1297
1298         if (trace_seq_used(s) <= s->seq.readpos)
1299                 return -EBUSY;
1300
1301         len = trace_seq_used(s) - s->seq.readpos;
1302         if (cnt > len)
1303                 cnt = len;
1304         memcpy(buf, s->buffer + s->seq.readpos, cnt);
1305
1306         s->seq.readpos += cnt;
1307         return cnt;
1308 }
1309
1310 unsigned long __read_mostly     tracing_thresh;
1311
1312 #ifdef CONFIG_TRACER_MAX_TRACE
1313 /*
1314  * Copy the new maximum trace into the separate maximum-trace
1315  * structure. (this way the maximum trace is permanently saved,
1316  * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1317  */
1318 static void
1319 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1320 {
1321         struct trace_buffer *trace_buf = &tr->trace_buffer;
1322         struct trace_buffer *max_buf = &tr->max_buffer;
1323         struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1324         struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1325
1326         max_buf->cpu = cpu;
1327         max_buf->time_start = data->preempt_timestamp;
1328
1329         max_data->saved_latency = tr->max_latency;
1330         max_data->critical_start = data->critical_start;
1331         max_data->critical_end = data->critical_end;
1332
1333         memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1334         max_data->pid = tsk->pid;
1335         /*
1336          * If tsk == current, then use current_uid(), as that does not use
1337          * RCU. The irq tracer can be called out of RCU scope.
1338          */
1339         if (tsk == current)
1340                 max_data->uid = current_uid();
1341         else
1342                 max_data->uid = task_uid(tsk);
1343
1344         max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1345         max_data->policy = tsk->policy;
1346         max_data->rt_priority = tsk->rt_priority;
1347
1348         /* record this tasks comm */
1349         tracing_record_cmdline(tsk);
1350 }
1351
1352 /**
1353  * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1354  * @tr: tracer
1355  * @tsk: the task with the latency
1356  * @cpu: The cpu that initiated the trace.
1357  *
1358  * Flip the buffers between the @tr and the max_tr and record information
1359  * about which task was the cause of this latency.
1360  */
1361 void
1362 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1363 {
1364         if (tr->stop_count)
1365                 return;
1366
1367         WARN_ON_ONCE(!irqs_disabled());
1368
1369         if (!tr->allocated_snapshot) {
1370                 /* Only the nop tracer should hit this when disabling */
1371                 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1372                 return;
1373         }
1374
1375         arch_spin_lock(&tr->max_lock);
1376
1377         /* Inherit the recordable setting from trace_buffer */
1378         if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
1379                 ring_buffer_record_on(tr->max_buffer.buffer);
1380         else
1381                 ring_buffer_record_off(tr->max_buffer.buffer);
1382
1383         swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
1384
1385         __update_max_tr(tr, tsk, cpu);
1386         arch_spin_unlock(&tr->max_lock);
1387 }
1388
1389 /**
1390  * update_max_tr_single - only copy one trace over, and reset the rest
1391  * @tr - tracer
1392  * @tsk - task with the latency
1393  * @cpu - the cpu of the buffer to copy.
1394  *
1395  * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1396  */
1397 void
1398 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1399 {
1400         int ret;
1401
1402         if (tr->stop_count)
1403                 return;
1404
1405         WARN_ON_ONCE(!irqs_disabled());
1406         if (!tr->allocated_snapshot) {
1407                 /* Only the nop tracer should hit this when disabling */
1408                 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1409                 return;
1410         }
1411
1412         arch_spin_lock(&tr->max_lock);
1413
1414         ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1415
1416         if (ret == -EBUSY) {
1417                 /*
1418                  * We failed to swap the buffer due to a commit taking
1419                  * place on this CPU. We fail to record, but we reset
1420                  * the max trace buffer (no one writes directly to it)
1421                  * and flag that it failed.
1422                  */
1423                 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1424                         "Failed to swap buffers due to commit in progress\n");
1425         }
1426
1427         WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1428
1429         __update_max_tr(tr, tsk, cpu);
1430         arch_spin_unlock(&tr->max_lock);
1431 }
1432 #endif /* CONFIG_TRACER_MAX_TRACE */
1433
1434 static int wait_on_pipe(struct trace_iterator *iter, int full)
1435 {
1436         /* Iterators are static, they should be filled or empty */
1437         if (trace_buffer_iter(iter, iter->cpu_file))
1438                 return 0;
1439
1440         return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1441                                 full);
1442 }
1443
1444 #ifdef CONFIG_FTRACE_STARTUP_TEST
1445 static bool selftests_can_run;
1446
1447 struct trace_selftests {
1448         struct list_head                list;
1449         struct tracer                   *type;
1450 };
1451
1452 static LIST_HEAD(postponed_selftests);
1453
1454 static int save_selftest(struct tracer *type)
1455 {
1456         struct trace_selftests *selftest;
1457
1458         selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1459         if (!selftest)
1460                 return -ENOMEM;
1461
1462         selftest->type = type;
1463         list_add(&selftest->list, &postponed_selftests);
1464         return 0;
1465 }
1466
1467 static int run_tracer_selftest(struct tracer *type)
1468 {
1469         struct trace_array *tr = &global_trace;
1470         struct tracer *saved_tracer = tr->current_trace;
1471         int ret;
1472
1473         if (!type->selftest || tracing_selftest_disabled)
1474                 return 0;
1475
1476         /*
1477          * If a tracer registers early in boot up (before scheduling is
1478          * initialized and such), then do not run its selftests yet.
1479          * Instead, run it a little later in the boot process.
1480          */
1481         if (!selftests_can_run)
1482                 return save_selftest(type);
1483
1484         /*
1485          * Run a selftest on this tracer.
1486          * Here we reset the trace buffer, and set the current
1487          * tracer to be this tracer. The tracer can then run some
1488          * internal tracing to verify that everything is in order.
1489          * If we fail, we do not register this tracer.
1490          */
1491         tracing_reset_online_cpus(&tr->trace_buffer);
1492
1493         tr->current_trace = type;
1494
1495 #ifdef CONFIG_TRACER_MAX_TRACE
1496         if (type->use_max_tr) {
1497                 /* If we expanded the buffers, make sure the max is expanded too */
1498                 if (ring_buffer_expanded)
1499                         ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1500                                            RING_BUFFER_ALL_CPUS);
1501                 tr->allocated_snapshot = true;
1502         }
1503 #endif
1504
1505         /* the test is responsible for initializing and enabling */
1506         pr_info("Testing tracer %s: ", type->name);
1507         ret = type->selftest(type, tr);
1508         /* the test is responsible for resetting too */
1509         tr->current_trace = saved_tracer;
1510         if (ret) {
1511                 printk(KERN_CONT "FAILED!\n");
1512                 /* Add the warning after printing 'FAILED' */
1513                 WARN_ON(1);
1514                 return -1;
1515         }
1516         /* Only reset on passing, to avoid touching corrupted buffers */
1517         tracing_reset_online_cpus(&tr->trace_buffer);
1518
1519 #ifdef CONFIG_TRACER_MAX_TRACE
1520         if (type->use_max_tr) {
1521                 tr->allocated_snapshot = false;
1522
1523                 /* Shrink the max buffer again */
1524                 if (ring_buffer_expanded)
1525                         ring_buffer_resize(tr->max_buffer.buffer, 1,
1526                                            RING_BUFFER_ALL_CPUS);
1527         }
1528 #endif
1529
1530         printk(KERN_CONT "PASSED\n");
1531         return 0;
1532 }
1533
1534 static __init int init_trace_selftests(void)
1535 {
1536         struct trace_selftests *p, *n;
1537         struct tracer *t, **last;
1538         int ret;
1539
1540         selftests_can_run = true;
1541
1542         mutex_lock(&trace_types_lock);
1543
1544         if (list_empty(&postponed_selftests))
1545                 goto out;
1546
1547         pr_info("Running postponed tracer tests:\n");
1548
1549         list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1550                 ret = run_tracer_selftest(p->type);
1551                 /* If the test fails, then warn and remove from available_tracers */
1552                 if (ret < 0) {
1553                         WARN(1, "tracer: %s failed selftest, disabling\n",
1554                              p->type->name);
1555                         last = &trace_types;
1556                         for (t = trace_types; t; t = t->next) {
1557                                 if (t == p->type) {
1558                                         *last = t->next;
1559                                         break;
1560                                 }
1561                                 last = &t->next;
1562                         }
1563                 }
1564                 list_del(&p->list);
1565                 kfree(p);
1566         }
1567
1568  out:
1569         mutex_unlock(&trace_types_lock);
1570
1571         return 0;
1572 }
1573 core_initcall(init_trace_selftests);
1574 #else
1575 static inline int run_tracer_selftest(struct tracer *type)
1576 {
1577         return 0;
1578 }
1579 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1580
1581 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1582
1583 static void __init apply_trace_boot_options(void);
1584
1585 /**
1586  * register_tracer - register a tracer with the ftrace system.
1587  * @type - the plugin for the tracer
1588  *
1589  * Register a new plugin tracer.
1590  */
1591 int __init register_tracer(struct tracer *type)
1592 {
1593         struct tracer *t;
1594         int ret = 0;
1595
1596         if (!type->name) {
1597                 pr_info("Tracer must have a name\n");
1598                 return -1;
1599         }
1600
1601         if (strlen(type->name) >= MAX_TRACER_SIZE) {
1602                 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1603                 return -1;
1604         }
1605
1606         mutex_lock(&trace_types_lock);
1607
1608         tracing_selftest_running = true;
1609
1610         for (t = trace_types; t; t = t->next) {
1611                 if (strcmp(type->name, t->name) == 0) {
1612                         /* already found */
1613                         pr_info("Tracer %s already registered\n",
1614                                 type->name);
1615                         ret = -1;
1616                         goto out;
1617                 }
1618         }
1619
1620         if (!type->set_flag)
1621                 type->set_flag = &dummy_set_flag;
1622         if (!type->flags) {
1623                 /*allocate a dummy tracer_flags*/
1624                 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
1625                 if (!type->flags) {
1626                         ret = -ENOMEM;
1627                         goto out;
1628                 }
1629                 type->flags->val = 0;
1630                 type->flags->opts = dummy_tracer_opt;
1631         } else
1632                 if (!type->flags->opts)
1633                         type->flags->opts = dummy_tracer_opt;
1634
1635         /* store the tracer for __set_tracer_option */
1636         type->flags->trace = type;
1637
1638         ret = run_tracer_selftest(type);
1639         if (ret < 0)
1640                 goto out;
1641
1642         type->next = trace_types;
1643         trace_types = type;
1644         add_tracer_options(&global_trace, type);
1645
1646  out:
1647         tracing_selftest_running = false;
1648         mutex_unlock(&trace_types_lock);
1649
1650         if (ret || !default_bootup_tracer)
1651                 goto out_unlock;
1652
1653         if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1654                 goto out_unlock;
1655
1656         printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1657         /* Do we want this tracer to start on bootup? */
1658         tracing_set_tracer(&global_trace, type->name);
1659         default_bootup_tracer = NULL;
1660
1661         apply_trace_boot_options();
1662
1663         /* disable other selftests, since this will break it. */
1664         tracing_selftest_disabled = true;
1665 #ifdef CONFIG_FTRACE_STARTUP_TEST
1666         printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1667                type->name);
1668 #endif
1669
1670  out_unlock:
1671         return ret;
1672 }
1673
1674 void tracing_reset(struct trace_buffer *buf, int cpu)
1675 {
1676         struct ring_buffer *buffer = buf->buffer;
1677
1678         if (!buffer)
1679                 return;
1680
1681         ring_buffer_record_disable(buffer);
1682
1683         /* Make sure all commits have finished */
1684         synchronize_rcu();
1685         ring_buffer_reset_cpu(buffer, cpu);
1686
1687         ring_buffer_record_enable(buffer);
1688 }
1689
1690 void tracing_reset_online_cpus(struct trace_buffer *buf)
1691 {
1692         struct ring_buffer *buffer = buf->buffer;
1693         int cpu;
1694
1695         if (!buffer)
1696                 return;
1697
1698         ring_buffer_record_disable(buffer);
1699
1700         /* Make sure all commits have finished */
1701         synchronize_rcu();
1702
1703         buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1704
1705         for_each_online_cpu(cpu)
1706                 ring_buffer_reset_cpu(buffer, cpu);
1707
1708         ring_buffer_record_enable(buffer);
1709 }
1710
1711 /* Must have trace_types_lock held */
1712 void tracing_reset_all_online_cpus(void)
1713 {
1714         struct trace_array *tr;
1715
1716         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1717                 if (!tr->clear_trace)
1718                         continue;
1719                 tr->clear_trace = false;
1720                 tracing_reset_online_cpus(&tr->trace_buffer);
1721 #ifdef CONFIG_TRACER_MAX_TRACE
1722                 tracing_reset_online_cpus(&tr->max_buffer);
1723 #endif
1724         }
1725 }
1726
1727 static int *tgid_map;
1728
1729 #define SAVED_CMDLINES_DEFAULT 128
1730 #define NO_CMDLINE_MAP UINT_MAX
1731 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1732 struct saved_cmdlines_buffer {
1733         unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1734         unsigned *map_cmdline_to_pid;
1735         unsigned cmdline_num;
1736         int cmdline_idx;
1737         char *saved_cmdlines;
1738 };
1739 static struct saved_cmdlines_buffer *savedcmd;
1740
1741 /* temporary disable recording */
1742 static atomic_t trace_record_taskinfo_disabled __read_mostly;
1743
1744 static inline char *get_saved_cmdlines(int idx)
1745 {
1746         return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1747 }
1748
1749 static inline void set_cmdline(int idx, const char *cmdline)
1750 {
1751         memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1752 }
1753
1754 static int allocate_cmdlines_buffer(unsigned int val,
1755                                     struct saved_cmdlines_buffer *s)
1756 {
1757         s->map_cmdline_to_pid = kmalloc_array(val,
1758                                               sizeof(*s->map_cmdline_to_pid),
1759                                               GFP_KERNEL);
1760         if (!s->map_cmdline_to_pid)
1761                 return -ENOMEM;
1762
1763         s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
1764         if (!s->saved_cmdlines) {
1765                 kfree(s->map_cmdline_to_pid);
1766                 return -ENOMEM;
1767         }
1768
1769         s->cmdline_idx = 0;
1770         s->cmdline_num = val;
1771         memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1772                sizeof(s->map_pid_to_cmdline));
1773         memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1774                val * sizeof(*s->map_cmdline_to_pid));
1775
1776         return 0;
1777 }
1778
1779 static int trace_create_savedcmd(void)
1780 {
1781         int ret;
1782
1783         savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1784         if (!savedcmd)
1785                 return -ENOMEM;
1786
1787         ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1788         if (ret < 0) {
1789                 kfree(savedcmd);
1790                 savedcmd = NULL;
1791                 return -ENOMEM;
1792         }
1793
1794         return 0;
1795 }
1796
1797 int is_tracing_stopped(void)
1798 {
1799         return global_trace.stop_count;
1800 }
1801
1802 /**
1803  * tracing_start - quick start of the tracer
1804  *
1805  * If tracing is enabled but was stopped by tracing_stop,
1806  * this will start the tracer back up.
1807  */
1808 void tracing_start(void)
1809 {
1810         struct ring_buffer *buffer;
1811         unsigned long flags;
1812
1813         if (tracing_disabled)
1814                 return;
1815
1816         raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1817         if (--global_trace.stop_count) {
1818                 if (global_trace.stop_count < 0) {
1819                         /* Someone screwed up their debugging */
1820                         WARN_ON_ONCE(1);
1821                         global_trace.stop_count = 0;
1822                 }
1823                 goto out;
1824         }
1825
1826         /* Prevent the buffers from switching */
1827         arch_spin_lock(&global_trace.max_lock);
1828
1829         buffer = global_trace.trace_buffer.buffer;
1830         if (buffer)
1831                 ring_buffer_record_enable(buffer);
1832
1833 #ifdef CONFIG_TRACER_MAX_TRACE
1834         buffer = global_trace.max_buffer.buffer;
1835         if (buffer)
1836                 ring_buffer_record_enable(buffer);
1837 #endif
1838
1839         arch_spin_unlock(&global_trace.max_lock);
1840
1841  out:
1842         raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1843 }
1844
1845 static void tracing_start_tr(struct trace_array *tr)
1846 {
1847         struct ring_buffer *buffer;
1848         unsigned long flags;
1849
1850         if (tracing_disabled)
1851                 return;
1852
1853         /* If global, we need to also start the max tracer */
1854         if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1855                 return tracing_start();
1856
1857         raw_spin_lock_irqsave(&tr->start_lock, flags);
1858
1859         if (--tr->stop_count) {
1860                 if (tr->stop_count < 0) {
1861                         /* Someone screwed up their debugging */
1862                         WARN_ON_ONCE(1);
1863                         tr->stop_count = 0;
1864                 }
1865                 goto out;
1866         }
1867
1868         buffer = tr->trace_buffer.buffer;
1869         if (buffer)
1870                 ring_buffer_record_enable(buffer);
1871
1872  out:
1873         raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1874 }
1875
1876 /**
1877  * tracing_stop - quick stop of the tracer
1878  *
1879  * Light weight way to stop tracing. Use in conjunction with
1880  * tracing_start.
1881  */
1882 void tracing_stop(void)
1883 {
1884         struct ring_buffer *buffer;
1885         unsigned long flags;
1886
1887         raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1888         if (global_trace.stop_count++)
1889                 goto out;
1890
1891         /* Prevent the buffers from switching */
1892         arch_spin_lock(&global_trace.max_lock);
1893
1894         buffer = global_trace.trace_buffer.buffer;
1895         if (buffer)
1896                 ring_buffer_record_disable(buffer);
1897
1898 #ifdef CONFIG_TRACER_MAX_TRACE
1899         buffer = global_trace.max_buffer.buffer;
1900         if (buffer)
1901                 ring_buffer_record_disable(buffer);
1902 #endif
1903
1904         arch_spin_unlock(&global_trace.max_lock);
1905
1906  out:
1907         raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1908 }
1909
1910 static void tracing_stop_tr(struct trace_array *tr)
1911 {
1912         struct ring_buffer *buffer;
1913         unsigned long flags;
1914
1915         /* If global, we need to also stop the max tracer */
1916         if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1917                 return tracing_stop();
1918
1919         raw_spin_lock_irqsave(&tr->start_lock, flags);
1920         if (tr->stop_count++)
1921                 goto out;
1922
1923         buffer = tr->trace_buffer.buffer;
1924         if (buffer)
1925                 ring_buffer_record_disable(buffer);
1926
1927  out:
1928         raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1929 }
1930
1931 static int trace_save_cmdline(struct task_struct *tsk)
1932 {
1933         unsigned pid, idx;
1934
1935         /* treat recording of idle task as a success */
1936         if (!tsk->pid)
1937                 return 1;
1938
1939         if (unlikely(tsk->pid > PID_MAX_DEFAULT))
1940                 return 0;
1941
1942         /*
1943          * It's not the end of the world if we don't get
1944          * the lock, but we also don't want to spin
1945          * nor do we want to disable interrupts,
1946          * so if we miss here, then better luck next time.
1947          */
1948         if (!arch_spin_trylock(&trace_cmdline_lock))
1949                 return 0;
1950
1951         idx = savedcmd->map_pid_to_cmdline[tsk->pid];
1952         if (idx == NO_CMDLINE_MAP) {
1953                 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
1954
1955                 /*
1956                  * Check whether the cmdline buffer at idx has a pid
1957                  * mapped. We are going to overwrite that entry so we
1958                  * need to clear the map_pid_to_cmdline. Otherwise we
1959                  * would read the new comm for the old pid.
1960                  */
1961                 pid = savedcmd->map_cmdline_to_pid[idx];
1962                 if (pid != NO_CMDLINE_MAP)
1963                         savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1964
1965                 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1966                 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
1967
1968                 savedcmd->cmdline_idx = idx;
1969         }
1970
1971         set_cmdline(idx, tsk->comm);
1972
1973         arch_spin_unlock(&trace_cmdline_lock);
1974
1975         return 1;
1976 }
1977
1978 static void __trace_find_cmdline(int pid, char comm[])
1979 {
1980         unsigned map;
1981
1982         if (!pid) {
1983                 strcpy(comm, "<idle>");
1984                 return;
1985         }
1986
1987         if (WARN_ON_ONCE(pid < 0)) {
1988                 strcpy(comm, "<XXX>");
1989                 return;
1990         }
1991
1992         if (pid > PID_MAX_DEFAULT) {
1993                 strcpy(comm, "<...>");
1994                 return;
1995         }
1996
1997         map = savedcmd->map_pid_to_cmdline[pid];
1998         if (map != NO_CMDLINE_MAP)
1999                 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2000         else
2001                 strcpy(comm, "<...>");
2002 }
2003
2004 void trace_find_cmdline(int pid, char comm[])
2005 {
2006         preempt_disable();
2007         arch_spin_lock(&trace_cmdline_lock);
2008
2009         __trace_find_cmdline(pid, comm);
2010
2011         arch_spin_unlock(&trace_cmdline_lock);
2012         preempt_enable();
2013 }
2014
2015 int trace_find_tgid(int pid)
2016 {
2017         if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2018                 return 0;
2019
2020         return tgid_map[pid];
2021 }
2022
2023 static int trace_save_tgid(struct task_struct *tsk)
2024 {
2025         /* treat recording of idle task as a success */
2026         if (!tsk->pid)
2027                 return 1;
2028
2029         if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
2030                 return 0;
2031
2032         tgid_map[tsk->pid] = tsk->tgid;
2033         return 1;
2034 }
2035
2036 static bool tracing_record_taskinfo_skip(int flags)
2037 {
2038         if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2039                 return true;
2040         if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2041                 return true;
2042         if (!__this_cpu_read(trace_taskinfo_save))
2043                 return true;
2044         return false;
2045 }
2046
2047 /**
2048  * tracing_record_taskinfo - record the task info of a task
2049  *
2050  * @task  - task to record
2051  * @flags - TRACE_RECORD_CMDLINE for recording comm
2052  *        - TRACE_RECORD_TGID for recording tgid
2053  */
2054 void tracing_record_taskinfo(struct task_struct *task, int flags)
2055 {
2056         bool done;
2057
2058         if (tracing_record_taskinfo_skip(flags))
2059                 return;
2060
2061         /*
2062          * Record as much task information as possible. If some fail, continue
2063          * to try to record the others.
2064          */
2065         done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2066         done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2067
2068         /* If recording any information failed, retry again soon. */
2069         if (!done)
2070                 return;
2071
2072         __this_cpu_write(trace_taskinfo_save, false);
2073 }
2074
2075 /**
2076  * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2077  *
2078  * @prev - previous task during sched_switch
2079  * @next - next task during sched_switch
2080  * @flags - TRACE_RECORD_CMDLINE for recording comm
2081  *          TRACE_RECORD_TGID for recording tgid
2082  */
2083 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2084                                           struct task_struct *next, int flags)
2085 {
2086         bool done;
2087
2088         if (tracing_record_taskinfo_skip(flags))
2089                 return;
2090
2091         /*
2092          * Record as much task information as possible. If some fail, continue
2093          * to try to record the others.
2094          */
2095         done  = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2096         done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2097         done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2098         done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2099
2100         /* If recording any information failed, retry again soon. */
2101         if (!done)
2102                 return;
2103
2104         __this_cpu_write(trace_taskinfo_save, false);
2105 }
2106
2107 /* Helpers to record a specific task information */
2108 void tracing_record_cmdline(struct task_struct *task)
2109 {
2110         tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2111 }
2112
2113 void tracing_record_tgid(struct task_struct *task)
2114 {
2115         tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2116 }
2117
2118 /*
2119  * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2120  * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2121  * simplifies those functions and keeps them in sync.
2122  */
2123 enum print_line_t trace_handle_return(struct trace_seq *s)
2124 {
2125         return trace_seq_has_overflowed(s) ?
2126                 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2127 }
2128 EXPORT_SYMBOL_GPL(trace_handle_return);
2129
2130 void
2131 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
2132                              int pc)
2133 {
2134         struct task_struct *tsk = current;
2135
2136         entry->preempt_count            = pc & 0xff;
2137         entry->pid                      = (tsk) ? tsk->pid : 0;
2138         entry->flags =
2139 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2140                 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
2141 #else
2142                 TRACE_FLAG_IRQS_NOSUPPORT |
2143 #endif
2144                 ((pc & NMI_MASK    ) ? TRACE_FLAG_NMI     : 0) |
2145                 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
2146                 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
2147                 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2148                 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
2149 }
2150 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
2151
2152 struct ring_buffer_event *
2153 trace_buffer_lock_reserve(struct ring_buffer *buffer,
2154                           int type,
2155                           unsigned long len,
2156                           unsigned long flags, int pc)
2157 {
2158         return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
2159 }
2160
2161 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2162 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2163 static int trace_buffered_event_ref;
2164
2165 /**
2166  * trace_buffered_event_enable - enable buffering events
2167  *
2168  * When events are being filtered, it is quicker to use a temporary
2169  * buffer to write the event data into if there's a likely chance
2170  * that it will not be committed. The discard of the ring buffer
2171  * is not as fast as committing, and is much slower than copying
2172  * a commit.
2173  *
2174  * When an event is to be filtered, allocate per cpu buffers to
2175  * write the event data into, and if the event is filtered and discarded
2176  * it is simply dropped, otherwise, the entire data is to be committed
2177  * in one shot.
2178  */
2179 void trace_buffered_event_enable(void)
2180 {
2181         struct ring_buffer_event *event;
2182         struct page *page;
2183         int cpu;
2184
2185         WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2186
2187         if (trace_buffered_event_ref++)
2188                 return;
2189
2190         for_each_tracing_cpu(cpu) {
2191                 page = alloc_pages_node(cpu_to_node(cpu),
2192                                         GFP_KERNEL | __GFP_NORETRY, 0);
2193                 if (!page)
2194                         goto failed;
2195
2196                 event = page_address(page);
2197                 memset(event, 0, sizeof(*event));
2198
2199                 per_cpu(trace_buffered_event, cpu) = event;
2200
2201                 preempt_disable();
2202                 if (cpu == smp_processor_id() &&
2203                     this_cpu_read(trace_buffered_event) !=
2204                     per_cpu(trace_buffered_event, cpu))
2205                         WARN_ON_ONCE(1);
2206                 preempt_enable();
2207         }
2208
2209         return;
2210  failed:
2211         trace_buffered_event_disable();
2212 }
2213
2214 static void enable_trace_buffered_event(void *data)
2215 {
2216         /* Probably not needed, but do it anyway */
2217         smp_rmb();
2218         this_cpu_dec(trace_buffered_event_cnt);
2219 }
2220
2221 static void disable_trace_buffered_event(void *data)
2222 {
2223         this_cpu_inc(trace_buffered_event_cnt);
2224 }
2225
2226 /**
2227  * trace_buffered_event_disable - disable buffering events
2228  *
2229  * When a filter is removed, it is faster to not use the buffered
2230  * events, and to commit directly into the ring buffer. Free up
2231  * the temp buffers when there are no more users. This requires
2232  * special synchronization with current events.
2233  */
2234 void trace_buffered_event_disable(void)
2235 {
2236         int cpu;
2237
2238         WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2239
2240         if (WARN_ON_ONCE(!trace_buffered_event_ref))
2241                 return;
2242
2243         if (--trace_buffered_event_ref)
2244                 return;
2245
2246         preempt_disable();
2247         /* For each CPU, set the buffer as used. */
2248         smp_call_function_many(tracing_buffer_mask,
2249                                disable_trace_buffered_event, NULL, 1);
2250         preempt_enable();
2251
2252         /* Wait for all current users to finish */
2253         synchronize_rcu();
2254
2255         for_each_tracing_cpu(cpu) {
2256                 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2257                 per_cpu(trace_buffered_event, cpu) = NULL;
2258         }
2259         /*
2260          * Make sure trace_buffered_event is NULL before clearing
2261          * trace_buffered_event_cnt.
2262          */
2263         smp_wmb();
2264
2265         preempt_disable();
2266         /* Do the work on each cpu */
2267         smp_call_function_many(tracing_buffer_mask,
2268                                enable_trace_buffered_event, NULL, 1);
2269         preempt_enable();
2270 }
2271
2272 static struct ring_buffer *temp_buffer;
2273
2274 struct ring_buffer_event *
2275 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
2276                           struct trace_event_file *trace_file,
2277                           int type, unsigned long len,
2278                           unsigned long flags, int pc)
2279 {
2280         struct ring_buffer_event *entry;
2281         int val;
2282
2283         *current_rb = trace_file->tr->trace_buffer.buffer;
2284
2285         if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
2286              (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2287             (entry = this_cpu_read(trace_buffered_event))) {
2288                 /* Try to use the per cpu buffer first */
2289                 val = this_cpu_inc_return(trace_buffered_event_cnt);
2290                 if (val == 1) {
2291                         trace_event_setup(entry, type, flags, pc);
2292                         entry->array[0] = len;
2293                         return entry;
2294                 }
2295                 this_cpu_dec(trace_buffered_event_cnt);
2296         }
2297
2298         entry = __trace_buffer_lock_reserve(*current_rb,
2299                                             type, len, flags, pc);
2300         /*
2301          * If tracing is off, but we have triggers enabled
2302          * we still need to look at the event data. Use the temp_buffer
2303          * to store the trace event for the tigger to use. It's recusive
2304          * safe and will not be recorded anywhere.
2305          */
2306         if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2307                 *current_rb = temp_buffer;
2308                 entry = __trace_buffer_lock_reserve(*current_rb,
2309                                                     type, len, flags, pc);
2310         }
2311         return entry;
2312 }
2313 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2314
2315 static DEFINE_SPINLOCK(tracepoint_iter_lock);
2316 static DEFINE_MUTEX(tracepoint_printk_mutex);
2317
2318 static void output_printk(struct trace_event_buffer *fbuffer)
2319 {
2320         struct trace_event_call *event_call;
2321         struct trace_event *event;
2322         unsigned long flags;
2323         struct trace_iterator *iter = tracepoint_print_iter;
2324
2325         /* We should never get here if iter is NULL */
2326         if (WARN_ON_ONCE(!iter))
2327                 return;
2328
2329         event_call = fbuffer->trace_file->event_call;
2330         if (!event_call || !event_call->event.funcs ||
2331             !event_call->event.funcs->trace)
2332                 return;
2333
2334         event = &fbuffer->trace_file->event_call->event;
2335
2336         spin_lock_irqsave(&tracepoint_iter_lock, flags);
2337         trace_seq_init(&iter->seq);
2338         iter->ent = fbuffer->entry;
2339         event_call->event.funcs->trace(iter, 0, event);
2340         trace_seq_putc(&iter->seq, 0);
2341         printk("%s", iter->seq.buffer);
2342
2343         spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2344 }
2345
2346 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2347                              void __user *buffer, size_t *lenp,
2348                              loff_t *ppos)
2349 {
2350         int save_tracepoint_printk;
2351         int ret;
2352
2353         mutex_lock(&tracepoint_printk_mutex);
2354         save_tracepoint_printk = tracepoint_printk;
2355
2356         ret = proc_dointvec(table, write, buffer, lenp, ppos);
2357
2358         /*
2359          * This will force exiting early, as tracepoint_printk
2360          * is always zero when tracepoint_printk_iter is not allocated
2361          */
2362         if (!tracepoint_print_iter)
2363                 tracepoint_printk = 0;
2364
2365         if (save_tracepoint_printk == tracepoint_printk)
2366                 goto out;
2367
2368         if (tracepoint_printk)
2369                 static_key_enable(&tracepoint_printk_key.key);
2370         else
2371                 static_key_disable(&tracepoint_printk_key.key);
2372
2373  out:
2374         mutex_unlock(&tracepoint_printk_mutex);
2375
2376         return ret;
2377 }
2378
2379 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2380 {
2381         if (static_key_false(&tracepoint_printk_key.key))
2382                 output_printk(fbuffer);
2383
2384         event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2385                                     fbuffer->event, fbuffer->entry,
2386                                     fbuffer->flags, fbuffer->pc);
2387 }
2388 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2389
2390 /*
2391  * Skip 3:
2392  *
2393  *   trace_buffer_unlock_commit_regs()
2394  *   trace_event_buffer_commit()
2395  *   trace_event_raw_event_xxx()
2396  */
2397 # define STACK_SKIP 3
2398
2399 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2400                                      struct ring_buffer *buffer,
2401                                      struct ring_buffer_event *event,
2402                                      unsigned long flags, int pc,
2403                                      struct pt_regs *regs)
2404 {
2405         __buffer_unlock_commit(buffer, event);
2406
2407         /*
2408          * If regs is not set, then skip the necessary functions.
2409          * Note, we can still get here via blktrace, wakeup tracer
2410          * and mmiotrace, but that's ok if they lose a function or
2411          * two. They are not that meaningful.
2412          */
2413         ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
2414         ftrace_trace_userstack(buffer, flags, pc);
2415 }
2416
2417 /*
2418  * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2419  */
2420 void
2421 trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2422                                    struct ring_buffer_event *event)
2423 {
2424         __buffer_unlock_commit(buffer, event);
2425 }
2426
2427 static void
2428 trace_process_export(struct trace_export *export,
2429                struct ring_buffer_event *event)
2430 {
2431         struct trace_entry *entry;
2432         unsigned int size = 0;
2433
2434         entry = ring_buffer_event_data(event);
2435         size = ring_buffer_event_length(event);
2436         export->write(export, entry, size);
2437 }
2438
2439 static DEFINE_MUTEX(ftrace_export_lock);
2440
2441 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2442
2443 static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2444
2445 static inline void ftrace_exports_enable(void)
2446 {
2447         static_branch_enable(&ftrace_exports_enabled);
2448 }
2449
2450 static inline void ftrace_exports_disable(void)
2451 {
2452         static_branch_disable(&ftrace_exports_enabled);
2453 }
2454
2455 static void ftrace_exports(struct ring_buffer_event *event)
2456 {
2457         struct trace_export *export;
2458
2459         preempt_disable_notrace();
2460
2461         export = rcu_dereference_raw_notrace(ftrace_exports_list);
2462         while (export) {
2463                 trace_process_export(export, event);
2464                 export = rcu_dereference_raw_notrace(export->next);
2465         }
2466
2467         preempt_enable_notrace();
2468 }
2469
2470 static inline void
2471 add_trace_export(struct trace_export **list, struct trace_export *export)
2472 {
2473         rcu_assign_pointer(export->next, *list);
2474         /*
2475          * We are entering export into the list but another
2476          * CPU might be walking that list. We need to make sure
2477          * the export->next pointer is valid before another CPU sees
2478          * the export pointer included into the list.
2479          */
2480         rcu_assign_pointer(*list, export);
2481 }
2482
2483 static inline int
2484 rm_trace_export(struct trace_export **list, struct trace_export *export)
2485 {
2486         struct trace_export **p;
2487
2488         for (p = list; *p != NULL; p = &(*p)->next)
2489                 if (*p == export)
2490                         break;
2491
2492         if (*p != export)
2493                 return -1;
2494
2495         rcu_assign_pointer(*p, (*p)->next);
2496
2497         return 0;
2498 }
2499
2500 static inline void
2501 add_ftrace_export(struct trace_export **list, struct trace_export *export)
2502 {
2503         if (*list == NULL)
2504                 ftrace_exports_enable();
2505
2506         add_trace_export(list, export);
2507 }
2508
2509 static inline int
2510 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2511 {
2512         int ret;
2513
2514         ret = rm_trace_export(list, export);
2515         if (*list == NULL)
2516                 ftrace_exports_disable();
2517
2518         return ret;
2519 }
2520
2521 int register_ftrace_export(struct trace_export *export)
2522 {
2523         if (WARN_ON_ONCE(!export->write))
2524                 return -1;
2525
2526         mutex_lock(&ftrace_export_lock);
2527
2528         add_ftrace_export(&ftrace_exports_list, export);
2529
2530         mutex_unlock(&ftrace_export_lock);
2531
2532         return 0;
2533 }
2534 EXPORT_SYMBOL_GPL(register_ftrace_export);
2535
2536 int unregister_ftrace_export(struct trace_export *export)
2537 {
2538         int ret;
2539
2540         mutex_lock(&ftrace_export_lock);
2541
2542         ret = rm_ftrace_export(&ftrace_exports_list, export);
2543
2544         mutex_unlock(&ftrace_export_lock);
2545
2546         return ret;
2547 }
2548 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2549
2550 void
2551 trace_function(struct trace_array *tr,
2552                unsigned long ip, unsigned long parent_ip, unsigned long flags,
2553                int pc)
2554 {
2555         struct trace_event_call *call = &event_function;
2556         struct ring_buffer *buffer = tr->trace_buffer.buffer;
2557         struct ring_buffer_event *event;
2558         struct ftrace_entry *entry;
2559
2560         event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2561                                             flags, pc);
2562         if (!event)
2563                 return;
2564         entry   = ring_buffer_event_data(event);
2565         entry->ip                       = ip;
2566         entry->parent_ip                = parent_ip;
2567
2568         if (!call_filter_check_discard(call, entry, buffer, event)) {
2569                 if (static_branch_unlikely(&ftrace_exports_enabled))
2570                         ftrace_exports(event);
2571                 __buffer_unlock_commit(buffer, event);
2572         }
2573 }
2574
2575 #ifdef CONFIG_STACKTRACE
2576
2577 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2578 struct ftrace_stack {
2579         unsigned long           calls[FTRACE_STACK_MAX_ENTRIES];
2580 };
2581
2582 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
2583 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2584
2585 static void __ftrace_trace_stack(struct ring_buffer *buffer,
2586                                  unsigned long flags,
2587                                  int skip, int pc, struct pt_regs *regs)
2588 {
2589         struct trace_event_call *call = &event_kernel_stack;
2590         struct ring_buffer_event *event;
2591         struct stack_entry *entry;
2592         struct stack_trace trace;
2593         int use_stack;
2594         int size = FTRACE_STACK_ENTRIES;
2595
2596         trace.nr_entries        = 0;
2597         trace.skip              = skip;
2598
2599         /*
2600          * Add one, for this function and the call to save_stack_trace()
2601          * If regs is set, then these functions will not be in the way.
2602          */
2603 #ifndef CONFIG_UNWINDER_ORC
2604         if (!regs)
2605                 trace.skip++;
2606 #endif
2607
2608         /*
2609          * Since events can happen in NMIs there's no safe way to
2610          * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2611          * or NMI comes in, it will just have to use the default
2612          * FTRACE_STACK_SIZE.
2613          */
2614         preempt_disable_notrace();
2615
2616         use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
2617         /*
2618          * We don't need any atomic variables, just a barrier.
2619          * If an interrupt comes in, we don't care, because it would
2620          * have exited and put the counter back to what we want.
2621          * We just need a barrier to keep gcc from moving things
2622          * around.
2623          */
2624         barrier();
2625         if (use_stack == 1) {
2626                 trace.entries           = this_cpu_ptr(ftrace_stack.calls);
2627                 trace.max_entries       = FTRACE_STACK_MAX_ENTRIES;
2628
2629                 if (regs)
2630                         save_stack_trace_regs(regs, &trace);
2631                 else
2632                         save_stack_trace(&trace);
2633
2634                 if (trace.nr_entries > size)
2635                         size = trace.nr_entries;
2636         } else
2637                 /* From now on, use_stack is a boolean */
2638                 use_stack = 0;
2639
2640         size *= sizeof(unsigned long);
2641
2642         event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2643                                             sizeof(*entry) + size, flags, pc);
2644         if (!event)
2645                 goto out;
2646         entry = ring_buffer_event_data(event);
2647
2648         memset(&entry->caller, 0, size);
2649
2650         if (use_stack)
2651                 memcpy(&entry->caller, trace.entries,
2652                        trace.nr_entries * sizeof(unsigned long));
2653         else {
2654                 trace.max_entries       = FTRACE_STACK_ENTRIES;
2655                 trace.entries           = entry->caller;
2656                 if (regs)
2657                         save_stack_trace_regs(regs, &trace);
2658                 else
2659                         save_stack_trace(&trace);
2660         }
2661
2662         entry->size = trace.nr_entries;
2663
2664         if (!call_filter_check_discard(call, entry, buffer, event))
2665                 __buffer_unlock_commit(buffer, event);
2666
2667  out:
2668         /* Again, don't let gcc optimize things here */
2669         barrier();
2670         __this_cpu_dec(ftrace_stack_reserve);
2671         preempt_enable_notrace();
2672
2673 }
2674
2675 static inline void ftrace_trace_stack(struct trace_array *tr,
2676                                       struct ring_buffer *buffer,
2677                                       unsigned long flags,
2678                                       int skip, int pc, struct pt_regs *regs)
2679 {
2680         if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
2681                 return;
2682
2683         __ftrace_trace_stack(buffer, flags, skip, pc, regs);
2684 }
2685
2686 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2687                    int pc)
2688 {
2689         struct ring_buffer *buffer = tr->trace_buffer.buffer;
2690
2691         if (rcu_is_watching()) {
2692                 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2693                 return;
2694         }
2695
2696         /*
2697          * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2698          * but if the above rcu_is_watching() failed, then the NMI
2699          * triggered someplace critical, and rcu_irq_enter() should
2700          * not be called from NMI.
2701          */
2702         if (unlikely(in_nmi()))
2703                 return;
2704
2705         rcu_irq_enter_irqson();
2706         __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2707         rcu_irq_exit_irqson();
2708 }
2709
2710 /**
2711  * trace_dump_stack - record a stack back trace in the trace buffer
2712  * @skip: Number of functions to skip (helper handlers)
2713  */
2714 void trace_dump_stack(int skip)
2715 {
2716         unsigned long flags;
2717
2718         if (tracing_disabled || tracing_selftest_running)
2719                 return;
2720
2721         local_save_flags(flags);
2722
2723 #ifndef CONFIG_UNWINDER_ORC
2724         /* Skip 1 to skip this function. */
2725         skip++;
2726 #endif
2727         __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2728                              flags, skip, preempt_count(), NULL);
2729 }
2730 EXPORT_SYMBOL_GPL(trace_dump_stack);
2731
2732 static DEFINE_PER_CPU(int, user_stack_count);
2733
2734 void
2735 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
2736 {
2737         struct trace_event_call *call = &event_user_stack;
2738         struct ring_buffer_event *event;
2739         struct userstack_entry *entry;
2740         struct stack_trace trace;
2741
2742         if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
2743                 return;
2744
2745         /*
2746          * NMIs can not handle page faults, even with fix ups.
2747          * The save user stack can (and often does) fault.
2748          */
2749         if (unlikely(in_nmi()))
2750                 return;
2751
2752         /*
2753          * prevent recursion, since the user stack tracing may
2754          * trigger other kernel events.
2755          */
2756         preempt_disable();
2757         if (__this_cpu_read(user_stack_count))
2758                 goto out;
2759
2760         __this_cpu_inc(user_stack_count);
2761
2762         event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2763                                             sizeof(*entry), flags, pc);
2764         if (!event)
2765                 goto out_drop_count;
2766         entry   = ring_buffer_event_data(event);
2767
2768         entry->tgid             = current->tgid;
2769         memset(&entry->caller, 0, sizeof(entry->caller));
2770
2771         trace.nr_entries        = 0;
2772         trace.max_entries       = FTRACE_STACK_ENTRIES;
2773         trace.skip              = 0;
2774         trace.entries           = entry->caller;
2775
2776         save_stack_trace_user(&trace);
2777         if (!call_filter_check_discard(call, entry, buffer, event))
2778                 __buffer_unlock_commit(buffer, event);
2779
2780  out_drop_count:
2781         __this_cpu_dec(user_stack_count);
2782  out:
2783         preempt_enable();
2784 }
2785
2786 #ifdef UNUSED
2787 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
2788 {
2789         ftrace_trace_userstack(tr, flags, preempt_count());
2790 }
2791 #endif /* UNUSED */
2792
2793 #endif /* CONFIG_STACKTRACE */
2794
2795 /* created for use with alloc_percpu */
2796 struct trace_buffer_struct {
2797         int nesting;
2798         char buffer[4][TRACE_BUF_SIZE];
2799 };
2800
2801 static struct trace_buffer_struct *trace_percpu_buffer;
2802
2803 /*
2804  * Thise allows for lockless recording.  If we're nested too deeply, then
2805  * this returns NULL.
2806  */
2807 static char *get_trace_buf(void)
2808 {
2809         struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
2810
2811         if (!buffer || buffer->nesting >= 4)
2812                 return NULL;
2813
2814         buffer->nesting++;
2815
2816         /* Interrupts must see nesting incremented before we use the buffer */
2817         barrier();
2818         return &buffer->buffer[buffer->nesting][0];
2819 }
2820
2821 static void put_trace_buf(void)
2822 {
2823         /* Don't let the decrement of nesting leak before this */
2824         barrier();
2825         this_cpu_dec(trace_percpu_buffer->nesting);
2826 }
2827
2828 static int alloc_percpu_trace_buffer(void)
2829 {
2830         struct trace_buffer_struct *buffers;
2831
2832         buffers = alloc_percpu(struct trace_buffer_struct);
2833         if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
2834                 return -ENOMEM;
2835
2836         trace_percpu_buffer = buffers;
2837         return 0;
2838 }
2839
2840 static int buffers_allocated;
2841
2842 void trace_printk_init_buffers(void)
2843 {
2844         if (buffers_allocated)
2845                 return;
2846
2847         if (alloc_percpu_trace_buffer())
2848                 return;
2849
2850         /* trace_printk() is for debug use only. Don't use it in production. */
2851
2852         pr_warn("\n");
2853         pr_warn("**********************************************************\n");
2854         pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
2855         pr_warn("**                                                      **\n");
2856         pr_warn("** trace_printk() being used. Allocating extra memory.  **\n");
2857         pr_warn("**                                                      **\n");
2858         pr_warn("** This means that this is a DEBUG kernel and it is     **\n");
2859         pr_warn("** unsafe for production use.                           **\n");
2860         pr_warn("**                                                      **\n");
2861         pr_warn("** If you see this message and you are not debugging    **\n");
2862         pr_warn("** the kernel, report this immediately to your vendor!  **\n");
2863         pr_warn("**                                                      **\n");
2864         pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
2865         pr_warn("**********************************************************\n");
2866
2867         /* Expand the buffers to set size */
2868         tracing_update_buffers();
2869
2870         buffers_allocated = 1;
2871
2872         /*
2873          * trace_printk_init_buffers() can be called by modules.
2874          * If that happens, then we need to start cmdline recording
2875          * directly here. If the global_trace.buffer is already
2876          * allocated here, then this was called by module code.
2877          */
2878         if (global_trace.trace_buffer.buffer)
2879                 tracing_start_cmdline_record();
2880 }
2881
2882 void trace_printk_start_comm(void)
2883 {
2884         /* Start tracing comms if trace printk is set */
2885         if (!buffers_allocated)
2886                 return;
2887         tracing_start_cmdline_record();
2888 }
2889
2890 static void trace_printk_start_stop_comm(int enabled)
2891 {
2892         if (!buffers_allocated)
2893                 return;
2894
2895         if (enabled)
2896                 tracing_start_cmdline_record();
2897         else
2898                 tracing_stop_cmdline_record();
2899 }
2900
2901 /**
2902  * trace_vbprintk - write binary msg to tracing buffer
2903  *
2904  */
2905 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2906 {
2907         struct trace_event_call *call = &event_bprint;
2908         struct ring_buffer_event *event;
2909         struct ring_buffer *buffer;
2910         struct trace_array *tr = &global_trace;
2911         struct bprint_entry *entry;
2912         unsigned long flags;
2913         char *tbuffer;
2914         int len = 0, size, pc;
2915
2916         if (unlikely(tracing_selftest_running || tracing_disabled))
2917                 return 0;
2918
2919         /* Don't pollute graph traces with trace_vprintk internals */
2920         pause_graph_tracing();
2921
2922         pc = preempt_count();
2923         preempt_disable_notrace();
2924
2925         tbuffer = get_trace_buf();
2926         if (!tbuffer) {
2927                 len = 0;
2928                 goto out_nobuffer;
2929         }
2930
2931         len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2932
2933         if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2934                 goto out;
2935
2936         local_save_flags(flags);
2937         size = sizeof(*entry) + sizeof(u32) * len;
2938         buffer = tr->trace_buffer.buffer;
2939         event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2940                                             flags, pc);
2941         if (!event)
2942                 goto out;
2943         entry = ring_buffer_event_data(event);
2944         entry->ip                       = ip;
2945         entry->fmt                      = fmt;
2946
2947         memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2948         if (!call_filter_check_discard(call, entry, buffer, event)) {
2949                 __buffer_unlock_commit(buffer, event);
2950                 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
2951         }
2952
2953 out:
2954         put_trace_buf();
2955
2956 out_nobuffer:
2957         preempt_enable_notrace();
2958         unpause_graph_tracing();
2959
2960         return len;
2961 }
2962 EXPORT_SYMBOL_GPL(trace_vbprintk);
2963
2964 __printf(3, 0)
2965 static int
2966 __trace_array_vprintk(struct ring_buffer *buffer,
2967                       unsigned long ip, const char *fmt, va_list args)
2968 {
2969         struct trace_event_call *call = &event_print;
2970         struct ring_buffer_event *event;
2971         int len = 0, size, pc;
2972         struct print_entry *entry;
2973         unsigned long flags;
2974         char *tbuffer;
2975
2976         if (tracing_disabled || tracing_selftest_running)
2977                 return 0;
2978
2979         /* Don't pollute graph traces with trace_vprintk internals */
2980         pause_graph_tracing();
2981
2982         pc = preempt_count();
2983         preempt_disable_notrace();
2984
2985
2986         tbuffer = get_trace_buf();
2987         if (!tbuffer) {
2988                 len = 0;
2989                 goto out_nobuffer;
2990         }
2991
2992         len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2993
2994         local_save_flags(flags);
2995         size = sizeof(*entry) + len + 1;
2996         event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2997                                             flags, pc);
2998         if (!event)
2999                 goto out;
3000         entry = ring_buffer_event_data(event);
3001         entry->ip = ip;
3002
3003         memcpy(&entry->buf, tbuffer, len + 1);
3004         if (!call_filter_check_discard(call, entry, buffer, event)) {
3005                 __buffer_unlock_commit(buffer, event);
3006                 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
3007         }
3008
3009 out:
3010         put_trace_buf();
3011
3012 out_nobuffer:
3013         preempt_enable_notrace();
3014         unpause_graph_tracing();
3015
3016         return len;
3017 }
3018
3019 __printf(3, 0)
3020 int trace_array_vprintk(struct trace_array *tr,
3021                         unsigned long ip, const char *fmt, va_list args)
3022 {
3023         return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
3024 }
3025
3026 __printf(3, 0)
3027 int trace_array_printk(struct trace_array *tr,
3028                        unsigned long ip, const char *fmt, ...)
3029 {
3030         int ret;
3031         va_list ap;
3032
3033         if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3034                 return 0;
3035
3036         va_start(ap, fmt);
3037         ret = trace_array_vprintk(tr, ip, fmt, ap);
3038         va_end(ap);
3039         return ret;
3040 }
3041
3042 __printf(3, 4)
3043 int trace_array_printk_buf(struct ring_buffer *buffer,
3044                            unsigned long ip, const char *fmt, ...)
3045 {
3046         int ret;
3047         va_list ap;
3048
3049         if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3050                 return 0;
3051
3052         va_start(ap, fmt);
3053         ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3054         va_end(ap);
3055         return ret;
3056 }
3057
3058 __printf(2, 0)
3059 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3060 {
3061         return trace_array_vprintk(&global_trace, ip, fmt, args);
3062 }
3063 EXPORT_SYMBOL_GPL(trace_vprintk);
3064
3065 static void trace_iterator_increment(struct trace_iterator *iter)
3066 {
3067         struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3068
3069         iter->idx++;
3070         if (buf_iter)
3071                 ring_buffer_read(buf_iter, NULL);
3072 }
3073
3074 static struct trace_entry *
3075 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3076                 unsigned long *lost_events)
3077 {
3078         struct ring_buffer_event *event;
3079         struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3080
3081         if (buf_iter)
3082                 event = ring_buffer_iter_peek(buf_iter, ts);
3083         else
3084                 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
3085                                          lost_events);
3086
3087         if (event) {
3088                 iter->ent_size = ring_buffer_event_length(event);
3089                 return ring_buffer_event_data(event);
3090         }
3091         iter->ent_size = 0;
3092         return NULL;
3093 }
3094
3095 static struct trace_entry *
3096 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3097                   unsigned long *missing_events, u64 *ent_ts)
3098 {
3099         struct ring_buffer *buffer = iter->trace_buffer->buffer;
3100         struct trace_entry *ent, *next = NULL;
3101         unsigned long lost_events = 0, next_lost = 0;
3102         int cpu_file = iter->cpu_file;
3103         u64 next_ts = 0, ts;
3104         int next_cpu = -1;
3105         int next_size = 0;
3106         int cpu;
3107
3108         /*
3109          * If we are in a per_cpu trace file, don't bother by iterating over
3110          * all cpu and peek directly.
3111          */
3112         if (cpu_file > RING_BUFFER_ALL_CPUS) {
3113                 if (ring_buffer_empty_cpu(buffer, cpu_file))
3114                         return NULL;
3115                 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3116                 if (ent_cpu)
3117                         *ent_cpu = cpu_file;
3118
3119                 return ent;
3120         }
3121
3122         for_each_tracing_cpu(cpu) {
3123
3124                 if (ring_buffer_empty_cpu(buffer, cpu))
3125                         continue;
3126
3127                 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3128
3129                 /*
3130                  * Pick the entry with the smallest timestamp:
3131                  */
3132                 if (ent && (!next || ts < next_ts)) {
3133                         next = ent;
3134                         next_cpu = cpu;
3135                         next_ts = ts;
3136                         next_lost = lost_events;
3137                         next_size = iter->ent_size;
3138                 }
3139         }
3140
3141         iter->ent_size = next_size;
3142
3143         if (ent_cpu)
3144                 *ent_cpu = next_cpu;
3145
3146         if (ent_ts)
3147                 *ent_ts = next_ts;
3148
3149         if (missing_events)
3150                 *missing_events = next_lost;
3151
3152         return next;
3153 }
3154
3155 /* Find the next real entry, without updating the iterator itself */
3156 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3157                                           int *ent_cpu, u64 *ent_ts)
3158 {
3159         return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3160 }
3161
3162 /* Find the next real entry, and increment the iterator to the next entry */
3163 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3164 {
3165         iter->ent = __find_next_entry(iter, &iter->cpu,
3166                                       &iter->lost_events, &iter->ts);
3167
3168         if (iter->ent)
3169                 trace_iterator_increment(iter);
3170
3171         return iter->ent ? iter : NULL;
3172 }
3173
3174 static void trace_consume(struct trace_iterator *iter)
3175 {
3176         ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
3177                             &iter->lost_events);
3178 }
3179
3180 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3181 {
3182         struct trace_iterator *iter = m->private;
3183         int i = (int)*pos;
3184         void *ent;
3185
3186         WARN_ON_ONCE(iter->leftover);
3187
3188         (*pos)++;
3189
3190         /* can't go backwards */
3191         if (iter->idx > i)
3192                 return NULL;
3193
3194         if (iter->idx < 0)
3195                 ent = trace_find_next_entry_inc(iter);
3196         else
3197                 ent = iter;
3198
3199         while (ent && iter->idx < i)
3200                 ent = trace_find_next_entry_inc(iter);
3201
3202         iter->pos = *pos;
3203
3204         return ent;
3205 }
3206
3207 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3208 {
3209         struct ring_buffer_event *event;
3210         struct ring_buffer_iter *buf_iter;
3211         unsigned long entries = 0;
3212         u64 ts;
3213
3214         per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
3215
3216         buf_iter = trace_buffer_iter(iter, cpu);
3217         if (!buf_iter)
3218                 return;
3219
3220         ring_buffer_iter_reset(buf_iter);
3221
3222         /*
3223          * We could have the case with the max latency tracers
3224          * that a reset never took place on a cpu. This is evident
3225          * by the timestamp being before the start of the buffer.
3226          */
3227         while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
3228                 if (ts >= iter->trace_buffer->time_start)
3229                         break;
3230                 entries++;
3231                 ring_buffer_read(buf_iter, NULL);
3232         }
3233
3234         per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
3235 }
3236
3237 /*
3238  * The current tracer is copied to avoid a global locking
3239  * all around.
3240  */
3241 static void *s_start(struct seq_file *m, loff_t *pos)
3242 {
3243         struct trace_iterator *iter = m->private;
3244         struct trace_array *tr = iter->tr;
3245         int cpu_file = iter->cpu_file;
3246         void *p = NULL;
3247         loff_t l = 0;
3248         int cpu;
3249
3250         /*
3251          * copy the tracer to avoid using a global lock all around.
3252          * iter->trace is a copy of current_trace, the pointer to the
3253          * name may be used instead of a strcmp(), as iter->trace->name
3254          * will point to the same string as current_trace->name.
3255          */
3256         mutex_lock(&trace_types_lock);
3257         if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3258                 *iter->trace = *tr->current_trace;
3259         mutex_unlock(&trace_types_lock);
3260
3261 #ifdef CONFIG_TRACER_MAX_TRACE
3262         if (iter->snapshot && iter->trace->use_max_tr)
3263                 return ERR_PTR(-EBUSY);
3264 #endif
3265
3266         if (!iter->snapshot)
3267                 atomic_inc(&trace_record_taskinfo_disabled);
3268
3269         if (*pos != iter->pos) {
3270                 iter->ent = NULL;
3271                 iter->cpu = 0;
3272                 iter->idx = -1;
3273
3274                 if (cpu_file == RING_BUFFER_ALL_CPUS) {
3275                         for_each_tracing_cpu(cpu)
3276                                 tracing_iter_reset(iter, cpu);
3277                 } else
3278                         tracing_iter_reset(iter, cpu_file);
3279
3280                 iter->leftover = 0;
3281                 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3282                         ;
3283
3284         } else {
3285                 /*
3286                  * If we overflowed the seq_file before, then we want
3287                  * to just reuse the trace_seq buffer again.
3288                  */
3289                 if (iter->leftover)
3290                         p = iter;
3291                 else {
3292                         l = *pos - 1;
3293                         p = s_next(m, p, &l);
3294                 }
3295         }
3296
3297         trace_event_read_lock();
3298         trace_access_lock(cpu_file);
3299         return p;
3300 }
3301
3302 static void s_stop(struct seq_file *m, void *p)
3303 {
3304         struct trace_iterator *iter = m->private;
3305
3306 #ifdef CONFIG_TRACER_MAX_TRACE
3307         if (iter->snapshot && iter->trace->use_max_tr)
3308                 return;
3309 #endif
3310
3311         if (!iter->snapshot)
3312                 atomic_dec(&trace_record_taskinfo_disabled);
3313
3314         trace_access_unlock(iter->cpu_file);
3315         trace_event_read_unlock();
3316 }
3317
3318 static void
3319 get_total_entries(struct trace_buffer *buf,
3320                   unsigned long *total, unsigned long *entries)
3321 {
3322         unsigned long count;
3323         int cpu;
3324
3325         *total = 0;
3326         *entries = 0;
3327
3328         for_each_tracing_cpu(cpu) {
3329                 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3330                 /*
3331                  * If this buffer has skipped entries, then we hold all
3332                  * entries for the trace and we need to ignore the
3333                  * ones before the time stamp.
3334                  */
3335                 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3336                         count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3337                         /* total is the same as the entries */
3338                         *total += count;
3339                 } else
3340                         *total += count +
3341                                 ring_buffer_overrun_cpu(buf->buffer, cpu);
3342                 *entries += count;
3343         }
3344 }
3345
3346 static void print_lat_help_header(struct seq_file *m)
3347 {
3348         seq_puts(m, "#                  _------=> CPU#            \n"
3349                     "#                 / _-----=> irqs-off        \n"
3350                     "#                | / _----=> need-resched    \n"
3351                     "#                || / _---=> hardirq/softirq \n"
3352                     "#                ||| / _--=> preempt-depth   \n"
3353                     "#                |||| /     delay            \n"
3354                     "#  cmd     pid   ||||| time  |   caller      \n"
3355                     "#     \\   /      |||||  \\    |   /         \n");
3356 }
3357
3358 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
3359 {
3360         unsigned long total;
3361         unsigned long entries;
3362
3363         get_total_entries(buf, &total, &entries);
3364         seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
3365                    entries, total, num_online_cpus());
3366         seq_puts(m, "#\n");
3367 }
3368
3369 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
3370                                    unsigned int flags)
3371 {
3372         bool tgid = flags & TRACE_ITER_RECORD_TGID;
3373
3374         print_event_info(buf, m);
3375
3376         seq_printf(m, "#           TASK-PID   %s  CPU#   TIMESTAMP  FUNCTION\n", tgid ? "TGID     " : "");
3377         seq_printf(m, "#              | |     %s    |       |         |\n",      tgid ? "  |      " : "");
3378 }
3379
3380 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
3381                                        unsigned int flags)
3382 {
3383         bool tgid = flags & TRACE_ITER_RECORD_TGID;
3384         const char tgid_space[] = "          ";
3385         const char space[] = "  ";
3386
3387         seq_printf(m, "#                          %s  _-----=> irqs-off\n",
3388                    tgid ? tgid_space : space);
3389         seq_printf(m, "#                          %s / _----=> need-resched\n",
3390                    tgid ? tgid_space : space);
3391         seq_printf(m, "#                          %s| / _---=> hardirq/softirq\n",
3392                    tgid ? tgid_space : space);
3393         seq_printf(m, "#                          %s|| / _--=> preempt-depth\n",
3394                    tgid ? tgid_space : space);
3395         seq_printf(m, "#                          %s||| /     delay\n",
3396                    tgid ? tgid_space : space);
3397         seq_printf(m, "#           TASK-PID %sCPU#  ||||    TIMESTAMP  FUNCTION\n",
3398                    tgid ? "   TGID   " : space);
3399         seq_printf(m, "#              | |   %s  |   ||||       |         |\n",
3400                    tgid ? "     |    " : space);
3401 }
3402
3403 void
3404 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3405 {
3406         unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
3407         struct trace_buffer *buf = iter->trace_buffer;
3408         struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
3409         struct tracer *type = iter->trace;
3410         unsigned long entries;
3411         unsigned long total;
3412         const char *name = "preemption";
3413
3414         name = type->name;
3415
3416         get_total_entries(buf, &total, &entries);
3417
3418         seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
3419                    name, UTS_RELEASE);
3420         seq_puts(m, "# -----------------------------------"
3421                  "---------------------------------\n");
3422         seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
3423                    " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
3424                    nsecs_to_usecs(data->saved_latency),
3425                    entries,
3426                    total,
3427                    buf->cpu,
3428 #if defined(CONFIG_PREEMPT_NONE)
3429                    "server",
3430 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
3431                    "desktop",
3432 #elif defined(CONFIG_PREEMPT)
3433                    "preempt",
3434 #else
3435                    "unknown",
3436 #endif
3437                    /* These are reserved for later use */
3438                    0, 0, 0, 0);
3439 #ifdef CONFIG_SMP
3440         seq_printf(m, " #P:%d)\n", num_online_cpus());
3441 #else
3442         seq_puts(m, ")\n");
3443 #endif
3444         seq_puts(m, "#    -----------------\n");
3445         seq_printf(m, "#    | task: %.16s-%d "
3446                    "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
3447                    data->comm, data->pid,
3448                    from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
3449                    data->policy, data->rt_priority);
3450         seq_puts(m, "#    -----------------\n");
3451
3452         if (data->critical_start) {
3453                 seq_puts(m, "#  => started at: ");
3454                 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3455                 trace_print_seq(m, &iter->seq);
3456                 seq_puts(m, "\n#  => ended at:   ");
3457                 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3458                 trace_print_seq(m, &iter->seq);
3459                 seq_puts(m, "\n#\n");
3460         }
3461
3462         seq_puts(m, "#\n");
3463 }
3464
3465 static void test_cpu_buff_start(struct trace_iterator *iter)
3466 {
3467         struct trace_seq *s = &iter->seq;
3468         struct trace_array *tr = iter->tr;
3469
3470         if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
3471                 return;
3472
3473         if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3474                 return;
3475
3476         if (cpumask_available(iter->started) &&
3477             cpumask_test_cpu(iter->cpu, iter->started))
3478                 return;
3479
3480         if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
3481                 return;
3482
3483         if (cpumask_available(iter->started))
3484                 cpumask_set_cpu(iter->cpu, iter->started);
3485
3486         /* Don't print started cpu buffer for the first entry of the trace */
3487         if (iter->idx > 1)
3488                 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3489                                 iter->cpu);
3490 }
3491
3492 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
3493 {
3494         struct trace_array *tr = iter->tr;
3495         struct trace_seq *s = &iter->seq;
3496         unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
3497         struct trace_entry *entry;
3498         struct trace_event *event;
3499
3500         entry = iter->ent;
3501
3502         test_cpu_buff_start(iter);
3503
3504         event = ftrace_find_event(entry->type);
3505
3506         if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3507                 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3508                         trace_print_lat_context(iter);
3509                 else
3510                         trace_print_context(iter);
3511         }
3512
3513         if (trace_seq_has_overflowed(s))
3514                 return TRACE_TYPE_PARTIAL_LINE;
3515
3516         if (event)
3517                 return event->funcs->trace(iter, sym_flags, event);
3518
3519         trace_seq_printf(s, "Unknown type %d\n", entry->type);
3520
3521         return trace_handle_return(s);
3522 }
3523
3524 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
3525 {
3526         struct trace_array *tr = iter->tr;
3527         struct trace_seq *s = &iter->seq;
3528         struct trace_entry *entry;
3529         struct trace_event *event;
3530
3531         entry = iter->ent;
3532
3533         if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
3534                 trace_seq_printf(s, "%d %d %llu ",
3535                                  entry->pid, iter->cpu, iter->ts);
3536
3537         if (trace_seq_has_overflowed(s))
3538                 return TRACE_TYPE_PARTIAL_LINE;
3539
3540         event = ftrace_find_event(entry->type);
3541         if (event)
3542                 return event->funcs->raw(iter, 0, event);
3543
3544         trace_seq_printf(s, "%d ?\n", entry->type);
3545
3546         return trace_handle_return(s);
3547 }
3548
3549 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
3550 {
3551         struct trace_array *tr = iter->tr;
3552         struct trace_seq *s = &iter->seq;
3553         unsigned char newline = '\n';
3554         struct trace_entry *entry;
3555         struct trace_event *event;
3556
3557         entry = iter->ent;
3558
3559         if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3560                 SEQ_PUT_HEX_FIELD(s, entry->pid);
3561                 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3562                 SEQ_PUT_HEX_FIELD(s, iter->ts);
3563                 if (trace_seq_has_overflowed(s))
3564                         return TRACE_TYPE_PARTIAL_LINE;
3565         }
3566
3567         event = ftrace_find_event(entry->type);
3568         if (event) {
3569                 enum print_line_t ret = event->funcs->hex(iter, 0, event);
3570                 if (ret != TRACE_TYPE_HANDLED)
3571                         return ret;
3572         }
3573
3574         SEQ_PUT_FIELD(s, newline);
3575
3576         return trace_handle_return(s);
3577 }
3578
3579 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
3580 {
3581         struct trace_array *tr = iter->tr;
3582         struct trace_seq *s = &iter->seq;
3583         struct trace_entry *entry;
3584         struct trace_event *event;
3585
3586         entry = iter->ent;
3587
3588         if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3589                 SEQ_PUT_FIELD(s, entry->pid);
3590                 SEQ_PUT_FIELD(s, iter->cpu);
3591                 SEQ_PUT_FIELD(s, iter->ts);
3592                 if (trace_seq_has_overflowed(s))
3593                         return TRACE_TYPE_PARTIAL_LINE;
3594         }
3595
3596         event = ftrace_find_event(entry->type);
3597         return event ? event->funcs->binary(iter, 0, event) :
3598                 TRACE_TYPE_HANDLED;
3599 }
3600
3601 int trace_empty(struct trace_iterator *iter)
3602 {
3603         struct ring_buffer_iter *buf_iter;
3604         int cpu;
3605
3606         /* If we are looking at one CPU buffer, only check that one */
3607         if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
3608                 cpu = iter->cpu_file;
3609                 buf_iter = trace_buffer_iter(iter, cpu);
3610                 if (buf_iter) {
3611                         if (!ring_buffer_iter_empty(buf_iter))
3612                                 return 0;
3613                 } else {
3614                         if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3615                                 return 0;
3616                 }
3617                 return 1;
3618         }
3619
3620         for_each_tracing_cpu(cpu) {
3621                 buf_iter = trace_buffer_iter(iter, cpu);
3622                 if (buf_iter) {
3623                         if (!ring_buffer_iter_empty(buf_iter))
3624                                 return 0;
3625                 } else {
3626                         if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3627                                 return 0;
3628                 }
3629         }
3630
3631         return 1;
3632 }
3633
3634 /*  Called with trace_event_read_lock() held. */
3635 enum print_line_t print_trace_line(struct trace_iterator *iter)
3636 {
3637         struct trace_array *tr = iter->tr;
3638         unsigned long trace_flags = tr->trace_flags;
3639         enum print_line_t ret;
3640
3641         if (iter->lost_events) {
3642                 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3643                                  iter->cpu, iter->lost_events);
3644                 if (trace_seq_has_overflowed(&iter->seq))
3645                         return TRACE_TYPE_PARTIAL_LINE;
3646         }
3647
3648         if (iter->trace && iter->trace->print_line) {
3649                 ret = iter->trace->print_line(iter);
3650                 if (ret != TRACE_TYPE_UNHANDLED)
3651                         return ret;
3652         }
3653
3654         if (iter->ent->type == TRACE_BPUTS &&
3655                         trace_flags & TRACE_ITER_PRINTK &&
3656                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3657                 return trace_print_bputs_msg_only(iter);
3658
3659         if (iter->ent->type == TRACE_BPRINT &&
3660                         trace_flags & TRACE_ITER_PRINTK &&
3661                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3662                 return trace_print_bprintk_msg_only(iter);
3663
3664         if (iter->ent->type == TRACE_PRINT &&
3665                         trace_flags & TRACE_ITER_PRINTK &&
3666                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3667                 return trace_print_printk_msg_only(iter);
3668
3669         if (trace_flags & TRACE_ITER_BIN)
3670                 return print_bin_fmt(iter);
3671
3672         if (trace_flags & TRACE_ITER_HEX)
3673                 return print_hex_fmt(iter);
3674
3675         if (trace_flags & TRACE_ITER_RAW)
3676                 return print_raw_fmt(iter);
3677
3678         return print_trace_fmt(iter);
3679 }
3680
3681 void trace_latency_header(struct seq_file *m)
3682 {
3683         struct trace_iterator *iter = m->private;
3684         struct trace_array *tr = iter->tr;
3685
3686         /* print nothing if the buffers are empty */
3687         if (trace_empty(iter))
3688                 return;
3689
3690         if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3691                 print_trace_header(m, iter);
3692
3693         if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
3694                 print_lat_help_header(m);
3695 }
3696
3697 void trace_default_header(struct seq_file *m)
3698 {
3699         struct trace_iterator *iter = m->private;
3700         struct trace_array *tr = iter->tr;
3701         unsigned long trace_flags = tr->trace_flags;
3702
3703         if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3704                 return;
3705
3706         if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3707                 /* print nothing if the buffers are empty */
3708                 if (trace_empty(iter))
3709                         return;
3710                 print_trace_header(m, iter);
3711                 if (!(trace_flags & TRACE_ITER_VERBOSE))
3712                         print_lat_help_header(m);
3713         } else {
3714                 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3715                         if (trace_flags & TRACE_ITER_IRQ_INFO)
3716                                 print_func_help_header_irq(iter->trace_buffer,
3717                                                            m, trace_flags);
3718                         else
3719                                 print_func_help_header(iter->trace_buffer, m,
3720                                                        trace_flags);
3721                 }
3722         }
3723 }
3724
3725 static void test_ftrace_alive(struct seq_file *m)
3726 {
3727         if (!ftrace_is_dead())
3728                 return;
3729         seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3730                     "#          MAY BE MISSING FUNCTION EVENTS\n");
3731 }
3732
3733 #ifdef CONFIG_TRACER_MAX_TRACE
3734 static void show_snapshot_main_help(struct seq_file *m)
3735 {
3736         seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3737                     "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3738                     "#                      Takes a snapshot of the main buffer.\n"
3739                     "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3740                     "#                      (Doesn't have to be '2' works with any number that\n"
3741                     "#                       is not a '0' or '1')\n");
3742 }
3743
3744 static void show_snapshot_percpu_help(struct seq_file *m)
3745 {
3746         seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
3747 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3748         seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3749                     "#                      Takes a snapshot of the main buffer for this cpu.\n");
3750 #else
3751         seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3752                     "#                     Must use main snapshot file to allocate.\n");
3753 #endif
3754         seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3755                     "#                      (Doesn't have to be '2' works with any number that\n"
3756                     "#                       is not a '0' or '1')\n");
3757 }
3758
3759 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3760 {
3761         if (iter->tr->allocated_snapshot)
3762                 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
3763         else
3764                 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
3765
3766         seq_puts(m, "# Snapshot commands:\n");
3767         if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3768                 show_snapshot_main_help(m);
3769         else
3770                 show_snapshot_percpu_help(m);
3771 }
3772 #else
3773 /* Should never be called */
3774 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3775 #endif
3776
3777 static int s_show(struct seq_file *m, void *v)
3778 {
3779         struct trace_iterator *iter = v;
3780         int ret;
3781
3782         if (iter->ent == NULL) {
3783                 if (iter->tr) {
3784                         seq_printf(m, "# tracer: %s\n", iter->trace->name);
3785                         seq_puts(m, "#\n");
3786                         test_ftrace_alive(m);
3787                 }
3788                 if (iter->snapshot && trace_empty(iter))
3789                         print_snapshot_help(m, iter);
3790                 else if (iter->trace && iter->trace->print_header)
3791                         iter->trace->print_header(m);
3792                 else
3793                         trace_default_header(m);
3794
3795         } else if (iter->leftover) {
3796                 /*
3797                  * If we filled the seq_file buffer earlier, we
3798                  * want to just show it now.
3799                  */
3800                 ret = trace_print_seq(m, &iter->seq);
3801
3802                 /* ret should this time be zero, but you never know */
3803                 iter->leftover = ret;
3804
3805         } else {
3806                 print_trace_line(iter);
3807                 ret = trace_print_seq(m, &iter->seq);
3808                 /*
3809                  * If we overflow the seq_file buffer, then it will
3810                  * ask us for this data again at start up.
3811                  * Use that instead.
3812                  *  ret is 0 if seq_file write succeeded.
3813                  *        -1 otherwise.
3814                  */
3815                 iter->leftover = ret;
3816         }
3817
3818         return 0;
3819 }
3820
3821 /*
3822  * Should be used after trace_array_get(), trace_types_lock
3823  * ensures that i_cdev was already initialized.
3824  */
3825 static inline int tracing_get_cpu(struct inode *inode)
3826 {
3827         if (inode->i_cdev) /* See trace_create_cpu_file() */
3828                 return (long)inode->i_cdev - 1;
3829         return RING_BUFFER_ALL_CPUS;
3830 }
3831
3832 static const struct seq_operations tracer_seq_ops = {
3833         .start          = s_start,
3834         .next           = s_next,
3835         .stop           = s_stop,
3836         .show           = s_show,
3837 };
3838
3839 static struct trace_iterator *
3840 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
3841 {
3842         struct trace_array *tr = inode->i_private;
3843         struct trace_iterator *iter;
3844         int cpu;
3845
3846         if (tracing_disabled)
3847                 return ERR_PTR(-ENODEV);
3848
3849         iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
3850         if (!iter)
3851                 return ERR_PTR(-ENOMEM);
3852
3853         iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
3854                                     GFP_KERNEL);
3855         if (!iter->buffer_iter)
3856                 goto release;
3857
3858         /*
3859          * We make a copy of the current tracer to avoid concurrent
3860          * changes on it while we are reading.
3861          */
3862         mutex_lock(&trace_types_lock);
3863         iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
3864         if (!iter->trace)
3865                 goto fail;
3866
3867         *iter->trace = *tr->current_trace;
3868
3869         if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
3870                 goto fail;
3871
3872         iter->tr = tr;
3873
3874 #ifdef CONFIG_TRACER_MAX_TRACE
3875         /* Currently only the top directory has a snapshot */
3876         if (tr->current_trace->print_max || snapshot)
3877                 iter->trace_buffer = &tr->max_buffer;
3878         else
3879 #endif
3880                 iter->trace_buffer = &tr->trace_buffer;
3881         iter->snapshot = snapshot;
3882         iter->pos = -1;
3883         iter->cpu_file = tracing_get_cpu(inode);
3884         mutex_init(&iter->mutex);
3885
3886         /* Notify the tracer early; before we stop tracing. */
3887         if (iter->trace && iter->trace->open)
3888                 iter->trace->open(iter);
3889
3890         /* Annotate start of buffers if we had overruns */
3891         if (ring_buffer_overruns(iter->trace_buffer->buffer))
3892                 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3893
3894         /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3895         if (trace_clocks[tr->clock_id].in_ns)
3896                 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3897
3898         /* stop the trace while dumping if we are not opening "snapshot" */
3899         if (!iter->snapshot)
3900                 tracing_stop_tr(tr);
3901
3902         if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
3903                 for_each_tracing_cpu(cpu) {
3904                         iter->buffer_iter[cpu] =
3905                                 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3906                 }
3907                 ring_buffer_read_prepare_sync();
3908                 for_each_tracing_cpu(cpu) {
3909                         ring_buffer_read_start(iter->buffer_iter[cpu]);
3910                         tracing_iter_reset(iter, cpu);
3911                 }
3912         } else {
3913                 cpu = iter->cpu_file;
3914                 iter->buffer_iter[cpu] =
3915                         ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3916                 ring_buffer_read_prepare_sync();
3917                 ring_buffer_read_start(iter->buffer_iter[cpu]);
3918                 tracing_iter_reset(iter, cpu);
3919         }
3920
3921         mutex_unlock(&trace_types_lock);
3922
3923         return iter;
3924
3925  fail:
3926         mutex_unlock(&trace_types_lock);
3927         kfree(iter->trace);
3928         kfree(iter->buffer_iter);
3929 release:
3930         seq_release_private(inode, file);
3931         return ERR_PTR(-ENOMEM);
3932 }
3933
3934 int tracing_open_generic(struct inode *inode, struct file *filp)
3935 {
3936         if (tracing_disabled)
3937                 return -ENODEV;
3938
3939         filp->private_data = inode->i_private;
3940         return 0;
3941 }
3942
3943 bool tracing_is_disabled(void)
3944 {
3945         return (tracing_disabled) ? true: false;
3946 }
3947
3948 /*
3949  * Open and update trace_array ref count.
3950  * Must have the current trace_array passed to it.
3951  */
3952 static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
3953 {
3954         struct trace_array *tr = inode->i_private;
3955
3956         if (tracing_disabled)
3957                 return -ENODEV;
3958
3959         if (trace_array_get(tr) < 0)
3960                 return -ENODEV;
3961
3962         filp->private_data = inode->i_private;
3963
3964         return 0;
3965 }
3966
3967 static int tracing_release(struct inode *inode, struct file *file)
3968 {
3969         struct trace_array *tr = inode->i_private;
3970         struct seq_file *m = file->private_data;
3971         struct trace_iterator *iter;
3972         int cpu;
3973
3974         if (!(file->f_mode & FMODE_READ)) {
3975                 trace_array_put(tr);
3976                 return 0;
3977         }
3978
3979         /* Writes do not use seq_file */
3980         iter = m->private;
3981         mutex_lock(&trace_types_lock);
3982
3983         for_each_tracing_cpu(cpu) {
3984                 if (iter->buffer_iter[cpu])
3985                         ring_buffer_read_finish(iter->buffer_iter[cpu]);
3986         }
3987
3988         if (iter->trace && iter->trace->close)
3989                 iter->trace->close(iter);
3990
3991         if (!iter->snapshot)
3992                 /* reenable tracing if it was previously enabled */
3993                 tracing_start_tr(tr);
3994
3995         __trace_array_put(tr);
3996
3997         mutex_unlock(&trace_types_lock);
3998
3999         mutex_destroy(&iter->mutex);
4000         free_cpumask_var(iter->started);
4001         kfree(iter->trace);
4002         kfree(iter->buffer_iter);
4003         seq_release_private(inode, file);
4004
4005         return 0;
4006 }
4007
4008 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4009 {
4010         struct trace_array *tr = inode->i_private;
4011
4012         trace_array_put(tr);
4013         return 0;
4014 }
4015
4016 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4017 {
4018         struct trace_array *tr = inode->i_private;
4019
4020         trace_array_put(tr);
4021
4022         return single_release(inode, file);
4023 }
4024
4025 static int tracing_open(struct inode *inode, struct file *file)
4026 {
4027         struct trace_array *tr = inode->i_private;
4028         struct trace_iterator *iter;
4029         int ret = 0;
4030
4031         if (trace_array_get(tr) < 0)
4032                 return -ENODEV;
4033
4034         /* If this file was open for write, then erase contents */
4035         if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4036                 int cpu = tracing_get_cpu(inode);
4037                 struct trace_buffer *trace_buf = &tr->trace_buffer;
4038
4039 #ifdef CONFIG_TRACER_MAX_TRACE
4040                 if (tr->current_trace->print_max)
4041                         trace_buf = &tr->max_buffer;
4042 #endif
4043
4044                 if (cpu == RING_BUFFER_ALL_CPUS)
4045                         tracing_reset_online_cpus(trace_buf);
4046                 else
4047                         tracing_reset(trace_buf, cpu);
4048         }
4049
4050         if (file->f_mode & FMODE_READ) {
4051                 iter = __tracing_open(inode, file, false);
4052                 if (IS_ERR(iter))
4053                         ret = PTR_ERR(iter);
4054                 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4055                         iter->iter_flags |= TRACE_FILE_LAT_FMT;
4056         }
4057
4058         if (ret < 0)
4059                 trace_array_put(tr);
4060
4061         return ret;
4062 }
4063
4064 /*
4065  * Some tracers are not suitable for instance buffers.
4066  * A tracer is always available for the global array (toplevel)
4067  * or if it explicitly states that it is.
4068  */
4069 static bool
4070 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4071 {
4072         return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4073 }
4074
4075 /* Find the next tracer that this trace array may use */
4076 static struct tracer *
4077 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4078 {
4079         while (t && !trace_ok_for_array(t, tr))
4080                 t = t->next;
4081
4082         return t;
4083 }
4084
4085 static void *
4086 t_next(struct seq_file *m, void *v, loff_t *pos)
4087 {
4088         struct trace_array *tr = m->private;
4089         struct tracer *t = v;
4090
4091         (*pos)++;
4092
4093         if (t)
4094                 t = get_tracer_for_array(tr, t->next);
4095
4096         return t;
4097 }
4098
4099 static void *t_start(struct seq_file *m, loff_t *pos)
4100 {
4101         struct trace_array *tr = m->private;
4102         struct tracer *t;
4103         loff_t l = 0;
4104
4105         mutex_lock(&trace_types_lock);
4106
4107         t = get_tracer_for_array(tr, trace_types);
4108         for (; t && l < *pos; t = t_next(m, t, &l))
4109                         ;
4110
4111         return t;
4112 }
4113
4114 static void t_stop(struct seq_file *m, void *p)
4115 {
4116         mutex_unlock(&trace_types_lock);
4117 }
4118
4119 static int t_show(struct seq_file *m, void *v)
4120 {
4121         struct tracer *t = v;
4122
4123         if (!t)
4124                 return 0;
4125
4126         seq_puts(m, t->name);
4127         if (t->next)
4128                 seq_putc(m, ' ');
4129         else
4130                 seq_putc(m, '\n');
4131
4132         return 0;
4133 }
4134
4135 static const struct seq_operations show_traces_seq_ops = {
4136         .start          = t_start,
4137         .next           = t_next,
4138         .stop           = t_stop,
4139         .show           = t_show,
4140 };
4141
4142 static int show_traces_open(struct inode *inode, struct file *file)
4143 {
4144         struct trace_array *tr = inode->i_private;
4145         struct seq_file *m;
4146         int ret;
4147
4148         if (tracing_disabled)
4149                 return -ENODEV;
4150
4151         ret = seq_open(file, &show_traces_seq_ops);
4152         if (ret)
4153                 return ret;
4154
4155         m = file->private_data;
4156         m->private = tr;
4157
4158         return 0;
4159 }
4160
4161 static ssize_t
4162 tracing_write_stub(struct file *filp, const char __user *ubuf,
4163                    size_t count, loff_t *ppos)
4164 {
4165         return count;
4166 }
4167
4168 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
4169 {
4170         int ret;
4171
4172         if (file->f_mode & FMODE_READ)
4173                 ret = seq_lseek(file, offset, whence);
4174         else
4175                 file->f_pos = ret = 0;
4176
4177         return ret;
4178 }
4179
4180 static const struct file_operations tracing_fops = {
4181         .open           = tracing_open,
4182         .read           = seq_read,
4183         .write          = tracing_write_stub,
4184         .llseek         = tracing_lseek,
4185         .release        = tracing_release,
4186 };
4187
4188 static const struct file_operations show_traces_fops = {
4189         .open           = show_traces_open,
4190         .read           = seq_read,
4191         .release        = seq_release,
4192         .llseek         = seq_lseek,
4193 };
4194
4195 static ssize_t
4196 tracing_cpumask_read(struct file *filp, char __user *ubuf,
4197                      size_t count, loff_t *ppos)
4198 {
4199         struct trace_array *tr = file_inode(filp)->i_private;
4200         char *mask_str;
4201         int len;
4202
4203         len = snprintf(NULL, 0, "%*pb\n",
4204                        cpumask_pr_args(tr->tracing_cpumask)) + 1;
4205         mask_str = kmalloc(len, GFP_KERNEL);
4206         if (!mask_str)
4207                 return -ENOMEM;
4208
4209         len = snprintf(mask_str, len, "%*pb\n",
4210                        cpumask_pr_args(tr->tracing_cpumask));
4211         if (len >= count) {
4212                 count = -EINVAL;
4213                 goto out_err;
4214         }
4215         count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
4216
4217 out_err:
4218         kfree(mask_str);
4219
4220         return count;
4221 }
4222
4223 static ssize_t
4224 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4225                       size_t count, loff_t *ppos)
4226 {
4227         struct trace_array *tr = file_inode(filp)->i_private;
4228         cpumask_var_t tracing_cpumask_new;
4229         int err, cpu;
4230
4231         if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4232                 return -ENOMEM;
4233
4234         err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
4235         if (err)
4236                 goto err_unlock;
4237
4238         local_irq_disable();
4239         arch_spin_lock(&tr->max_lock);
4240         for_each_tracing_cpu(cpu) {
4241                 /*
4242                  * Increase/decrease the disabled counter if we are
4243                  * about to flip a bit in the cpumask:
4244                  */
4245                 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4246                                 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4247                         atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4248                         ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
4249                 }
4250                 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4251                                 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4252                         atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4253                         ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
4254                 }
4255         }
4256         arch_spin_unlock(&tr->max_lock);
4257         local_irq_enable();
4258
4259         cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
4260         free_cpumask_var(tracing_cpumask_new);
4261
4262         return count;
4263
4264 err_unlock:
4265         free_cpumask_var(tracing_cpumask_new);
4266
4267         return err;
4268 }
4269
4270 static const struct file_operations tracing_cpumask_fops = {
4271         .open           = tracing_open_generic_tr,
4272         .read           = tracing_cpumask_read,
4273         .write          = tracing_cpumask_write,
4274         .release        = tracing_release_generic_tr,
4275         .llseek         = generic_file_llseek,
4276 };
4277
4278 static int tracing_trace_options_show(struct seq_file *m, void *v)
4279 {
4280         struct tracer_opt *trace_opts;
4281         struct trace_array *tr = m->private;
4282         u32 tracer_flags;
4283         int i;
4284
4285         mutex_lock(&trace_types_lock);
4286         tracer_flags = tr->current_trace->flags->val;
4287         trace_opts = tr->current_trace->flags->opts;
4288
4289         for (i = 0; trace_options[i]; i++) {
4290                 if (tr->trace_flags & (1 << i))
4291                         seq_printf(m, "%s\n", trace_options[i]);
4292                 else
4293                         seq_printf(m, "no%s\n", trace_options[i]);
4294         }
4295
4296         for (i = 0; trace_opts[i].name; i++) {
4297                 if (tracer_flags & trace_opts[i].bit)
4298                         seq_printf(m, "%s\n", trace_opts[i].name);
4299                 else
4300                         seq_printf(m, "no%s\n", trace_opts[i].name);
4301         }
4302         mutex_unlock(&trace_types_lock);
4303
4304         return 0;
4305 }
4306
4307 static int __set_tracer_option(struct trace_array *tr,
4308                                struct tracer_flags *tracer_flags,
4309                                struct tracer_opt *opts, int neg)
4310 {
4311         struct tracer *trace = tracer_flags->trace;
4312         int ret;
4313
4314         ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
4315         if (ret)
4316                 return ret;
4317
4318         if (neg)
4319                 tracer_flags->val &= ~opts->bit;
4320         else
4321                 tracer_flags->val |= opts->bit;
4322         return 0;
4323 }
4324
4325 /* Try to assign a tracer specific option */
4326 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
4327 {
4328         struct tracer *trace = tr->current_trace;
4329         struct tracer_flags *tracer_flags = trace->flags;
4330         struct tracer_opt *opts = NULL;
4331         int i;
4332
4333         for (i = 0; tracer_flags->opts[i].name; i++) {
4334                 opts = &tracer_flags->opts[i];
4335
4336                 if (strcmp(cmp, opts->name) == 0)
4337                         return __set_tracer_option(tr, trace->flags, opts, neg);
4338         }
4339
4340         return -EINVAL;
4341 }
4342
4343 /* Some tracers require overwrite to stay enabled */
4344 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4345 {
4346         if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4347                 return -1;
4348
4349         return 0;
4350 }
4351
4352 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
4353 {
4354         /* do nothing if flag is already set */
4355         if (!!(tr->trace_flags & mask) == !!enabled)
4356                 return 0;
4357
4358         /* Give the tracer a chance to approve the change */
4359         if (tr->current_trace->flag_changed)
4360                 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
4361                         return -EINVAL;
4362
4363         if (enabled)
4364                 tr->trace_flags |= mask;
4365         else
4366                 tr->trace_flags &= ~mask;
4367
4368         if (mask == TRACE_ITER_RECORD_CMD)
4369                 trace_event_enable_cmd_record(enabled);
4370
4371         if (mask == TRACE_ITER_RECORD_TGID) {
4372                 if (!tgid_map)
4373                         tgid_map = kcalloc(PID_MAX_DEFAULT + 1,
4374                                            sizeof(*tgid_map),
4375                                            GFP_KERNEL);
4376                 if (!tgid_map) {
4377                         tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4378                         return -ENOMEM;
4379                 }
4380
4381                 trace_event_enable_tgid_record(enabled);
4382         }
4383
4384         if (mask == TRACE_ITER_EVENT_FORK)
4385                 trace_event_follow_fork(tr, enabled);
4386
4387         if (mask == TRACE_ITER_FUNC_FORK)
4388                 ftrace_pid_follow_fork(tr, enabled);
4389
4390         if (mask == TRACE_ITER_OVERWRITE) {
4391                 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
4392 #ifdef CONFIG_TRACER_MAX_TRACE
4393                 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
4394 #endif
4395         }
4396
4397         if (mask == TRACE_ITER_PRINTK) {
4398                 trace_printk_start_stop_comm(enabled);
4399                 trace_printk_control(enabled);
4400         }
4401
4402         return 0;
4403 }
4404
4405 static int trace_set_options(struct trace_array *tr, char *option)
4406 {
4407         char *cmp;
4408         int neg = 0;
4409         int ret;
4410         size_t orig_len = strlen(option);
4411         int len;
4412
4413         cmp = strstrip(option);
4414
4415         len = str_has_prefix(cmp, "no");
4416         if (len)
4417                 neg = 1;
4418
4419         cmp += len;
4420
4421         mutex_lock(&trace_types_lock);
4422
4423         ret = match_string(trace_options, -1, cmp);
4424         /* If no option could be set, test the specific tracer options */
4425         if (ret < 0)
4426                 ret = set_tracer_option(tr, cmp, neg);
4427         else
4428                 ret = set_tracer_flag(tr, 1 << ret, !neg);
4429
4430         mutex_unlock(&trace_types_lock);
4431
4432         /*
4433          * If the first trailing whitespace is replaced with '\0' by strstrip,
4434          * turn it back into a space.
4435          */
4436         if (orig_len > strlen(option))
4437                 option[strlen(option)] = ' ';
4438
4439         return ret;
4440 }
4441
4442 static void __init apply_trace_boot_options(void)
4443 {
4444         char *buf = trace_boot_options_buf;
4445         char *option;
4446
4447         while (true) {
4448                 option = strsep(&buf, ",");
4449
4450                 if (!option)
4451                         break;
4452
4453                 if (*option)
4454                         trace_set_options(&global_trace, option);
4455
4456                 /* Put back the comma to allow this to be called again */
4457                 if (buf)
4458                         *(buf - 1) = ',';
4459         }
4460 }
4461
4462 static ssize_t
4463 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4464                         size_t cnt, loff_t *ppos)
4465 {
4466         struct seq_file *m = filp->private_data;
4467         struct trace_array *tr = m->private;
4468         char buf[64];
4469         int ret;
4470
4471         if (cnt >= sizeof(buf))
4472                 return -EINVAL;
4473
4474         if (copy_from_user(buf, ubuf, cnt))
4475                 return -EFAULT;
4476
4477         buf[cnt] = 0;
4478
4479         ret = trace_set_options(tr, buf);
4480         if (ret < 0)
4481                 return ret;
4482
4483         *ppos += cnt;
4484
4485         return cnt;
4486 }
4487
4488 static int tracing_trace_options_open(struct inode *inode, struct file *file)
4489 {
4490         struct trace_array *tr = inode->i_private;
4491         int ret;
4492
4493         if (tracing_disabled)
4494                 return -ENODEV;
4495
4496         if (trace_array_get(tr) < 0)
4497                 return -ENODEV;
4498
4499         ret = single_open(file, tracing_trace_options_show, inode->i_private);
4500         if (ret < 0)
4501                 trace_array_put(tr);
4502
4503         return ret;
4504 }
4505
4506 static const struct file_operations tracing_iter_fops = {
4507         .open           = tracing_trace_options_open,
4508         .read           = seq_read,
4509         .llseek         = seq_lseek,
4510         .release        = tracing_single_release_tr,
4511         .write          = tracing_trace_options_write,
4512 };
4513
4514 static const char readme_msg[] =
4515         "tracing mini-HOWTO:\n\n"
4516         "# echo 0 > tracing_on : quick way to disable tracing\n"
4517         "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4518         " Important files:\n"
4519         "  trace\t\t\t- The static contents of the buffer\n"
4520         "\t\t\t  To clear the buffer write into this file: echo > trace\n"
4521         "  trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4522         "  current_tracer\t- function and latency tracers\n"
4523         "  available_tracers\t- list of configured tracers for current_tracer\n"
4524         "  buffer_size_kb\t- view and modify size of per cpu buffer\n"
4525         "  buffer_total_size_kb  - view total size of all cpu buffers\n\n"
4526         "  trace_clock\t\t-change the clock used to order events\n"
4527         "       local:   Per cpu clock but may not be synced across CPUs\n"
4528         "      global:   Synced across CPUs but slows tracing down.\n"
4529         "     counter:   Not a clock, but just an increment\n"
4530         "      uptime:   Jiffy counter from time of boot\n"
4531         "        perf:   Same clock that perf events use\n"
4532 #ifdef CONFIG_X86_64
4533         "     x86-tsc:   TSC cycle counter\n"
4534 #endif
4535         "\n  timestamp_mode\t-view the mode used to timestamp events\n"
4536         "       delta:   Delta difference against a buffer-wide timestamp\n"
4537         "    absolute:   Absolute (standalone) timestamp\n"
4538         "\n  trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
4539         "\n  trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
4540         "  tracing_cpumask\t- Limit which CPUs to trace\n"
4541         "  instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4542         "\t\t\t  Remove sub-buffer with rmdir\n"
4543         "  trace_options\t\t- Set format or modify how tracing happens\n"
4544         "\t\t\t  Disable an option by adding a suffix 'no' to the\n"
4545         "\t\t\t  option name\n"
4546         "  saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
4547 #ifdef CONFIG_DYNAMIC_FTRACE
4548         "\n  available_filter_functions - list of functions that can be filtered on\n"
4549         "  set_ftrace_filter\t- echo function name in here to only trace these\n"
4550         "\t\t\t  functions\n"
4551         "\t     accepts: func_full_name or glob-matching-pattern\n"
4552         "\t     modules: Can select a group via module\n"
4553         "\t      Format: :mod:<module-name>\n"
4554         "\t     example: echo :mod:ext3 > set_ftrace_filter\n"
4555         "\t    triggers: a command to perform when function is hit\n"
4556         "\t      Format: <function>:<trigger>[:count]\n"
4557         "\t     trigger: traceon, traceoff\n"
4558         "\t\t      enable_event:<system>:<event>\n"
4559         "\t\t      disable_event:<system>:<event>\n"
4560 #ifdef CONFIG_STACKTRACE
4561         "\t\t      stacktrace\n"
4562 #endif
4563 #ifdef CONFIG_TRACER_SNAPSHOT
4564         "\t\t      snapshot\n"
4565 #endif
4566         "\t\t      dump\n"
4567         "\t\t      cpudump\n"
4568         "\t     example: echo do_fault:traceoff > set_ftrace_filter\n"
4569         "\t              echo do_trap:traceoff:3 > set_ftrace_filter\n"
4570         "\t     The first one will disable tracing every time do_fault is hit\n"
4571         "\t     The second will disable tracing at most 3 times when do_trap is hit\n"
4572         "\t       The first time do trap is hit and it disables tracing, the\n"
4573         "\t       counter will decrement to 2. If tracing is already disabled,\n"
4574         "\t       the counter will not decrement. It only decrements when the\n"
4575         "\t       trigger did work\n"
4576         "\t     To remove trigger without count:\n"
4577         "\t       echo '!<function>:<trigger> > set_ftrace_filter\n"
4578         "\t     To remove trigger with a count:\n"
4579         "\t       echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
4580         "  set_ftrace_notrace\t- echo function name in here to never trace.\n"
4581         "\t    accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4582         "\t    modules: Can select a group via module command :mod:\n"
4583         "\t    Does not accept triggers\n"
4584 #endif /* CONFIG_DYNAMIC_FTRACE */
4585 #ifdef CONFIG_FUNCTION_TRACER
4586         "  set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4587         "\t\t    (function)\n"
4588 #endif
4589 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4590         "  set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
4591         "  set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
4592         "  max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4593 #endif
4594 #ifdef CONFIG_TRACER_SNAPSHOT
4595         "\n  snapshot\t\t- Like 'trace' but shows the content of the static\n"
4596         "\t\t\t  snapshot buffer. Read the contents for more\n"
4597         "\t\t\t  information\n"
4598 #endif
4599 #ifdef CONFIG_STACK_TRACER
4600         "  stack_trace\t\t- Shows the max stack trace when active\n"
4601         "  stack_max_size\t- Shows current max stack size that was traced\n"
4602         "\t\t\t  Write into this file to reset the max size (trigger a\n"
4603         "\t\t\t  new trace)\n"
4604 #ifdef CONFIG_DYNAMIC_FTRACE
4605         "  stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4606         "\t\t\t  traces\n"
4607 #endif
4608 #endif /* CONFIG_STACK_TRACER */
4609 #ifdef CONFIG_DYNAMIC_EVENTS
4610         "  dynamic_events\t\t- Add/remove/show the generic dynamic events\n"
4611         "\t\t\t  Write into this file to define/undefine new trace events.\n"
4612 #endif
4613 #ifdef CONFIG_KPROBE_EVENTS
4614         "  kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4615         "\t\t\t  Write into this file to define/undefine new trace events.\n"
4616 #endif
4617 #ifdef CONFIG_UPROBE_EVENTS
4618         "  uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4619         "\t\t\t  Write into this file to define/undefine new trace events.\n"
4620 #endif
4621 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
4622         "\t  accepts: event-definitions (one definition per line)\n"
4623         "\t   Format: p[:[<group>/]<event>] <place> [<args>]\n"
4624         "\t           r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
4625 #ifdef CONFIG_HIST_TRIGGERS
4626         "\t           s:[synthetic/]<event> <field> [<field>]\n"
4627 #endif
4628         "\t           -:[<group>/]<event>\n"
4629 #ifdef CONFIG_KPROBE_EVENTS
4630         "\t    place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4631   "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4632 #endif
4633 #ifdef CONFIG_UPROBE_EVENTS
4634   "   place (uprobe): <path>:<offset>[(ref_ctr_offset)]\n"
4635 #endif
4636         "\t     args: <name>=fetcharg[:type]\n"
4637         "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4638 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
4639         "\t           $stack<index>, $stack, $retval, $comm, $arg<N>\n"
4640 #else
4641         "\t           $stack<index>, $stack, $retval, $comm\n"
4642 #endif
4643         "\t     type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
4644         "\t           b<bit-width>@<bit-offset>/<container-size>,\n"
4645         "\t           <type>\\[<array-size>\\]\n"
4646 #ifdef CONFIG_HIST_TRIGGERS
4647         "\t    field: <stype> <name>;\n"
4648         "\t    stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
4649         "\t           [unsigned] char/int/long\n"
4650 #endif
4651 #endif
4652         "  events/\t\t- Directory containing all trace event subsystems:\n"
4653         "      enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4654         "  events/<system>/\t- Directory containing all trace events for <system>:\n"
4655         "      enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4656         "\t\t\t  events\n"
4657         "      filter\t\t- If set, only events passing filter are traced\n"
4658         "  events/<system>/<event>/\t- Directory containing control files for\n"
4659         "\t\t\t  <event>:\n"
4660         "      enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4661         "      filter\t\t- If set, only events passing filter are traced\n"
4662         "      trigger\t\t- If set, a command to perform when event is hit\n"
4663         "\t    Format: <trigger>[:count][if <filter>]\n"
4664         "\t   trigger: traceon, traceoff\n"
4665         "\t            enable_event:<system>:<event>\n"
4666         "\t            disable_event:<system>:<event>\n"
4667 #ifdef CONFIG_HIST_TRIGGERS
4668         "\t            enable_hist:<system>:<event>\n"
4669         "\t            disable_hist:<system>:<event>\n"
4670 #endif
4671 #ifdef CONFIG_STACKTRACE
4672         "\t\t    stacktrace\n"
4673 #endif
4674 #ifdef CONFIG_TRACER_SNAPSHOT
4675         "\t\t    snapshot\n"
4676 #endif
4677 #ifdef CONFIG_HIST_TRIGGERS
4678         "\t\t    hist (see below)\n"
4679 #endif
4680         "\t   example: echo traceoff > events/block/block_unplug/trigger\n"
4681         "\t            echo traceoff:3 > events/block/block_unplug/trigger\n"
4682         "\t            echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4683         "\t                  events/block/block_unplug/trigger\n"
4684         "\t   The first disables tracing every time block_unplug is hit.\n"
4685         "\t   The second disables tracing the first 3 times block_unplug is hit.\n"
4686         "\t   The third enables the kmalloc event the first 3 times block_unplug\n"
4687         "\t     is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4688         "\t   Like function triggers, the counter is only decremented if it\n"
4689         "\t    enabled or disabled tracing.\n"
4690         "\t   To remove a trigger without a count:\n"
4691         "\t     echo '!<trigger> > <system>/<event>/trigger\n"
4692         "\t   To remove a trigger with a count:\n"
4693         "\t     echo '!<trigger>:0 > <system>/<event>/trigger\n"
4694         "\t   Filters can be ignored when removing a trigger.\n"
4695 #ifdef CONFIG_HIST_TRIGGERS
4696         "      hist trigger\t- If set, event hits are aggregated into a hash table\n"
4697         "\t    Format: hist:keys=<field1[,field2,...]>\n"
4698         "\t            [:values=<field1[,field2,...]>]\n"
4699         "\t            [:sort=<field1[,field2,...]>]\n"
4700         "\t            [:size=#entries]\n"
4701         "\t            [:pause][:continue][:clear]\n"
4702         "\t            [:name=histname1]\n"
4703         "\t            [if <filter>]\n\n"
4704         "\t    When a matching event is hit, an entry is added to a hash\n"
4705         "\t    table using the key(s) and value(s) named, and the value of a\n"
4706         "\t    sum called 'hitcount' is incremented.  Keys and values\n"
4707         "\t    correspond to fields in the event's format description.  Keys\n"
4708         "\t    can be any field, or the special string 'stacktrace'.\n"
4709         "\t    Compound keys consisting of up to two fields can be specified\n"
4710         "\t    by the 'keys' keyword.  Values must correspond to numeric\n"
4711         "\t    fields.  Sort keys consisting of up to two fields can be\n"
4712         "\t    specified using the 'sort' keyword.  The sort direction can\n"
4713         "\t    be modified by appending '.descending' or '.ascending' to a\n"
4714         "\t    sort field.  The 'size' parameter can be used to specify more\n"
4715         "\t    or fewer than the default 2048 entries for the hashtable size.\n"
4716         "\t    If a hist trigger is given a name using the 'name' parameter,\n"
4717         "\t    its histogram data will be shared with other triggers of the\n"
4718         "\t    same name, and trigger hits will update this common data.\n\n"
4719         "\t    Reading the 'hist' file for the event will dump the hash\n"
4720         "\t    table in its entirety to stdout.  If there are multiple hist\n"
4721         "\t    triggers attached to an event, there will be a table for each\n"
4722         "\t    trigger in the output.  The table displayed for a named\n"
4723         "\t    trigger will be the same as any other instance having the\n"
4724         "\t    same name.  The default format used to display a given field\n"
4725         "\t    can be modified by appending any of the following modifiers\n"
4726         "\t    to the field name, as applicable:\n\n"
4727         "\t            .hex        display a number as a hex value\n"
4728         "\t            .sym        display an address as a symbol\n"
4729         "\t            .sym-offset display an address as a symbol and offset\n"
4730         "\t            .execname   display a common_pid as a program name\n"
4731         "\t            .syscall    display a syscall id as a syscall name\n"
4732         "\t            .log2       display log2 value rather than raw number\n"
4733         "\t            .usecs      display a common_timestamp in microseconds\n\n"
4734         "\t    The 'pause' parameter can be used to pause an existing hist\n"
4735         "\t    trigger or to start a hist trigger but not log any events\n"
4736         "\t    until told to do so.  'continue' can be used to start or\n"
4737         "\t    restart a paused hist trigger.\n\n"
4738         "\t    The 'clear' parameter will clear the contents of a running\n"
4739         "\t    hist trigger and leave its current paused/active state\n"
4740         "\t    unchanged.\n\n"
4741         "\t    The enable_hist and disable_hist triggers can be used to\n"
4742         "\t    have one event conditionally start and stop another event's\n"
4743         "\t    already-attached hist trigger.  The syntax is analagous to\n"
4744         "\t    the enable_event and disable_event triggers.\n"
4745 #endif
4746 ;
4747
4748 static ssize_t
4749 tracing_readme_read(struct file *filp, char __user *ubuf,
4750                        size_t cnt, loff_t *ppos)
4751 {
4752         return simple_read_from_buffer(ubuf, cnt, ppos,
4753                                         readme_msg, strlen(readme_msg));
4754 }
4755
4756 static const struct file_operations tracing_readme_fops = {
4757         .open           = tracing_open_generic,
4758         .read           = tracing_readme_read,
4759         .llseek         = generic_file_llseek,
4760 };
4761
4762 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
4763 {
4764         int *ptr = v;
4765
4766         if (*pos || m->count)
4767                 ptr++;
4768
4769         (*pos)++;
4770
4771         for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
4772                 if (trace_find_tgid(*ptr))
4773                         return ptr;
4774         }
4775
4776         return NULL;
4777 }
4778
4779 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
4780 {
4781         void *v;
4782         loff_t l = 0;
4783
4784         if (!tgid_map)
4785                 return NULL;
4786
4787         v = &tgid_map[0];
4788         while (l <= *pos) {
4789                 v = saved_tgids_next(m, v, &l);
4790                 if (!v)
4791                         return NULL;
4792         }
4793
4794         return v;
4795 }
4796
4797 static void saved_tgids_stop(struct seq_file *m, void *v)
4798 {
4799 }
4800
4801 static int saved_tgids_show(struct seq_file *m, void *v)
4802 {
4803         int pid = (int *)v - tgid_map;
4804
4805         seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
4806         return 0;
4807 }
4808
4809 static const struct seq_operations tracing_saved_tgids_seq_ops = {
4810         .start          = saved_tgids_start,
4811         .stop           = saved_tgids_stop,
4812         .next           = saved_tgids_next,
4813         .show           = saved_tgids_show,
4814 };
4815
4816 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
4817 {
4818         if (tracing_disabled)
4819                 return -ENODEV;
4820
4821         return seq_open(filp, &tracing_saved_tgids_seq_ops);
4822 }
4823
4824
4825 static const struct file_operations tracing_saved_tgids_fops = {
4826         .open           = tracing_saved_tgids_open,
4827         .read           = seq_read,
4828         .llseek         = seq_lseek,
4829         .release        = seq_release,
4830 };
4831
4832 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
4833 {
4834         unsigned int *ptr = v;
4835
4836         if (*pos || m->count)
4837                 ptr++;
4838
4839         (*pos)++;
4840
4841         for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
4842              ptr++) {
4843                 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
4844                         continue;
4845
4846                 return ptr;
4847         }
4848
4849         return NULL;
4850 }
4851
4852 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
4853 {
4854         void *v;
4855         loff_t l = 0;
4856
4857         preempt_disable();
4858         arch_spin_lock(&trace_cmdline_lock);
4859
4860         v = &savedcmd->map_cmdline_to_pid[0];
4861         while (l <= *pos) {
4862                 v = saved_cmdlines_next(m, v, &l);
4863                 if (!v)
4864                         return NULL;
4865         }
4866
4867         return v;
4868 }
4869
4870 static void saved_cmdlines_stop(struct seq_file *m, void *v)
4871 {
4872         arch_spin_unlock(&trace_cmdline_lock);
4873         preempt_enable();
4874 }
4875
4876 static int saved_cmdlines_show(struct seq_file *m, void *v)
4877 {
4878         char buf[TASK_COMM_LEN];
4879         unsigned int *pid = v;
4880
4881         __trace_find_cmdline(*pid, buf);
4882         seq_printf(m, "%d %s\n", *pid, buf);
4883         return 0;
4884 }
4885
4886 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
4887         .start          = saved_cmdlines_start,
4888         .next           = saved_cmdlines_next,
4889         .stop           = saved_cmdlines_stop,
4890         .show           = saved_cmdlines_show,
4891 };
4892
4893 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
4894 {
4895         if (tracing_disabled)
4896                 return -ENODEV;
4897
4898         return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
4899 }
4900
4901 static const struct file_operations tracing_saved_cmdlines_fops = {
4902         .open           = tracing_saved_cmdlines_open,
4903         .read           = seq_read,
4904         .llseek         = seq_lseek,
4905         .release        = seq_release,
4906 };
4907
4908 static ssize_t
4909 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
4910                                  size_t cnt, loff_t *ppos)
4911 {
4912         char buf[64];
4913         int r;
4914
4915         arch_spin_lock(&trace_cmdline_lock);
4916         r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
4917         arch_spin_unlock(&trace_cmdline_lock);
4918
4919         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4920 }
4921
4922 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
4923 {
4924         kfree(s->saved_cmdlines);
4925         kfree(s->map_cmdline_to_pid);
4926         kfree(s);
4927 }
4928
4929 static int tracing_resize_saved_cmdlines(unsigned int val)
4930 {
4931         struct saved_cmdlines_buffer *s, *savedcmd_temp;
4932
4933         s = kmalloc(sizeof(*s), GFP_KERNEL);
4934         if (!s)
4935                 return -ENOMEM;
4936
4937         if (allocate_cmdlines_buffer(val, s) < 0) {
4938                 kfree(s);
4939                 return -ENOMEM;
4940         }
4941
4942         arch_spin_lock(&trace_cmdline_lock);
4943         savedcmd_temp = savedcmd;
4944         savedcmd = s;
4945         arch_spin_unlock(&trace_cmdline_lock);
4946         free_saved_cmdlines_buffer(savedcmd_temp);
4947
4948         return 0;
4949 }
4950
4951 static ssize_t
4952 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4953                                   size_t cnt, loff_t *ppos)
4954 {
4955         unsigned long val;
4956         int ret;
4957
4958         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4959         if (ret)
4960                 return ret;
4961
4962         /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4963         if (!val || val > PID_MAX_DEFAULT)
4964                 return -EINVAL;
4965
4966         ret = tracing_resize_saved_cmdlines((unsigned int)val);
4967         if (ret < 0)
4968                 return ret;
4969
4970         *ppos += cnt;
4971
4972         return cnt;
4973 }
4974
4975 static const struct file_operations tracing_saved_cmdlines_size_fops = {
4976         .open           = tracing_open_generic,
4977         .read           = tracing_saved_cmdlines_size_read,
4978         .write          = tracing_saved_cmdlines_size_write,
4979 };
4980
4981 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
4982 static union trace_eval_map_item *
4983 update_eval_map(union trace_eval_map_item *ptr)
4984 {
4985         if (!ptr->map.eval_string) {
4986                 if (ptr->tail.next) {
4987                         ptr = ptr->tail.next;
4988                         /* Set ptr to the next real item (skip head) */
4989                         ptr++;
4990                 } else
4991                         return NULL;
4992         }
4993         return ptr;
4994 }
4995
4996 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
4997 {
4998         union trace_eval_map_item *ptr = v;
4999
5000         /*
5001          * Paranoid! If ptr points to end, we don't want to increment past it.
5002          * This really should never happen.
5003          */
5004         ptr = update_eval_map(ptr);
5005         if (WARN_ON_ONCE(!ptr))
5006                 return NULL;
5007
5008         ptr++;
5009
5010         (*pos)++;
5011
5012         ptr = update_eval_map(ptr);
5013
5014         return ptr;
5015 }
5016
5017 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5018 {
5019         union trace_eval_map_item *v;
5020         loff_t l = 0;
5021
5022         mutex_lock(&trace_eval_mutex);
5023
5024         v = trace_eval_maps;
5025         if (v)
5026                 v++;
5027
5028         while (v && l < *pos) {
5029                 v = eval_map_next(m, v, &l);
5030         }
5031
5032         return v;
5033 }
5034
5035 static void eval_map_stop(struct seq_file *m, void *v)
5036 {
5037         mutex_unlock(&trace_eval_mutex);
5038 }
5039
5040 static int eval_map_show(struct seq_file *m, void *v)
5041 {
5042         union trace_eval_map_item *ptr = v;
5043
5044         seq_printf(m, "%s %ld (%s)\n",
5045                    ptr->map.eval_string, ptr->map.eval_value,
5046                    ptr->map.system);
5047
5048         return 0;
5049 }
5050
5051 static const struct seq_operations tracing_eval_map_seq_ops = {
5052         .start          = eval_map_start,
5053         .next           = eval_map_next,
5054         .stop           = eval_map_stop,
5055         .show           = eval_map_show,
5056 };
5057
5058 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5059 {
5060         if (tracing_disabled)
5061                 return -ENODEV;
5062
5063         return seq_open(filp, &tracing_eval_map_seq_ops);
5064 }
5065
5066 static const struct file_operations tracing_eval_map_fops = {
5067         .open           = tracing_eval_map_open,
5068         .read           = seq_read,
5069         .llseek         = seq_lseek,
5070         .release        = seq_release,
5071 };
5072
5073 static inline union trace_eval_map_item *
5074 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5075 {
5076         /* Return tail of array given the head */
5077         return ptr + ptr->head.length + 1;
5078 }
5079
5080 static void
5081 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5082                            int len)
5083 {
5084         struct trace_eval_map **stop;
5085         struct trace_eval_map **map;
5086         union trace_eval_map_item *map_array;
5087         union trace_eval_map_item *ptr;
5088
5089         stop = start + len;
5090
5091         /*
5092          * The trace_eval_maps contains the map plus a head and tail item,
5093          * where the head holds the module and length of array, and the
5094          * tail holds a pointer to the next list.
5095          */
5096         map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
5097         if (!map_array) {
5098                 pr_warn("Unable to allocate trace eval mapping\n");
5099                 return;
5100         }
5101
5102         mutex_lock(&trace_eval_mutex);
5103
5104         if (!trace_eval_maps)
5105                 trace_eval_maps = map_array;
5106         else {
5107                 ptr = trace_eval_maps;
5108                 for (;;) {
5109                         ptr = trace_eval_jmp_to_tail(ptr);
5110                         if (!ptr->tail.next)
5111                                 break;
5112                         ptr = ptr->tail.next;
5113
5114                 }
5115                 ptr->tail.next = map_array;
5116         }
5117         map_array->head.mod = mod;
5118         map_array->head.length = len;
5119         map_array++;
5120
5121         for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5122                 map_array->map = **map;
5123                 map_array++;
5124         }
5125         memset(map_array, 0, sizeof(*map_array));
5126
5127         mutex_unlock(&trace_eval_mutex);
5128 }
5129
5130 static void trace_create_eval_file(struct dentry *d_tracer)
5131 {
5132         trace_create_file("eval_map", 0444, d_tracer,
5133                           NULL, &tracing_eval_map_fops);
5134 }
5135
5136 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
5137 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5138 static inline void trace_insert_eval_map_file(struct module *mod,
5139                               struct trace_eval_map **start, int len) { }
5140 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5141
5142 static void trace_insert_eval_map(struct module *mod,
5143                                   struct trace_eval_map **start, int len)
5144 {
5145         struct trace_eval_map **map;
5146
5147         if (len <= 0)
5148                 return;
5149
5150         map = start;
5151
5152         trace_event_eval_update(map, len);
5153
5154         trace_insert_eval_map_file(mod, start, len);
5155 }
5156
5157 static ssize_t
5158 tracing_set_trace_read(struct file *filp, char __user *ubuf,
5159                        size_t cnt, loff_t *ppos)
5160 {
5161         struct trace_array *tr = filp->private_data;
5162         char buf[MAX_TRACER_SIZE+2];
5163         int r;
5164
5165         mutex_lock(&trace_types_lock);
5166         r = sprintf(buf, "%s\n", tr->current_trace->name);
5167         mutex_unlock(&trace_types_lock);
5168
5169         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5170 }
5171
5172 int tracer_init(struct tracer *t, struct trace_array *tr)
5173 {
5174         tracing_reset_online_cpus(&tr->trace_buffer);
5175         return t->init(tr);
5176 }
5177
5178 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
5179 {
5180         int cpu;
5181
5182         for_each_tracing_cpu(cpu)
5183                 per_cpu_ptr(buf->data, cpu)->entries = val;
5184 }
5185
5186 #ifdef CONFIG_TRACER_MAX_TRACE
5187 /* resize @tr's buffer to the size of @size_tr's entries */
5188 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
5189                                         struct trace_buffer *size_buf, int cpu_id)
5190 {
5191         int cpu, ret = 0;
5192
5193         if (cpu_id == RING_BUFFER_ALL_CPUS) {
5194                 for_each_tracing_cpu(cpu) {
5195                         ret = ring_buffer_resize(trace_buf->buffer,
5196                                  per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5197                         if (ret < 0)
5198                                 break;
5199                         per_cpu_ptr(trace_buf->data, cpu)->entries =
5200                                 per_cpu_ptr(size_buf->data, cpu)->entries;
5201                 }
5202         } else {
5203                 ret = ring_buffer_resize(trace_buf->buffer,
5204                                  per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5205                 if (ret == 0)
5206                         per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5207                                 per_cpu_ptr(size_buf->data, cpu_id)->entries;
5208         }
5209
5210         return ret;
5211 }
5212 #endif /* CONFIG_TRACER_MAX_TRACE */
5213
5214 static int __tracing_resize_ring_buffer(struct trace_array *tr,
5215                                         unsigned long size, int cpu)
5216 {
5217         int ret;
5218
5219         /*
5220          * If kernel or user changes the size of the ring buffer
5221          * we use the size that was given, and we can forget about
5222          * expanding it later.
5223          */
5224         ring_buffer_expanded = true;
5225
5226         /* May be called before buffers are initialized */
5227         if (!tr->trace_buffer.buffer)
5228                 return 0;
5229
5230         ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
5231         if (ret < 0)
5232                 return ret;
5233
5234 #ifdef CONFIG_TRACER_MAX_TRACE
5235         if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5236             !tr->current_trace->use_max_tr)
5237                 goto out;
5238
5239         ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5240         if (ret < 0) {
5241                 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
5242                                                      &tr->trace_buffer, cpu);
5243                 if (r < 0) {
5244                         /*
5245                          * AARGH! We are left with different
5246                          * size max buffer!!!!
5247                          * The max buffer is our "snapshot" buffer.
5248                          * When a tracer needs a snapshot (one of the
5249                          * latency tracers), it swaps the max buffer
5250                          * with the saved snap shot. We succeeded to
5251                          * update the size of the main buffer, but failed to
5252                          * update the size of the max buffer. But when we tried
5253                          * to reset the main buffer to the original size, we
5254                          * failed there too. This is very unlikely to
5255                          * happen, but if it does, warn and kill all
5256                          * tracing.
5257                          */
5258                         WARN_ON(1);
5259                         tracing_disabled = 1;
5260                 }
5261                 return ret;
5262         }
5263
5264         if (cpu == RING_BUFFER_ALL_CPUS)
5265                 set_buffer_entries(&tr->max_buffer, size);
5266         else
5267                 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
5268
5269  out:
5270 #endif /* CONFIG_TRACER_MAX_TRACE */
5271
5272         if (cpu == RING_BUFFER_ALL_CPUS)
5273                 set_buffer_entries(&tr->trace_buffer, size);
5274         else
5275                 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
5276
5277         return ret;
5278 }
5279
5280 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5281                                           unsigned long size, int cpu_id)
5282 {
5283         int ret = size;
5284
5285         mutex_lock(&trace_types_lock);
5286
5287         if (cpu_id != RING_BUFFER_ALL_CPUS) {
5288                 /* make sure, this cpu is enabled in the mask */
5289                 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5290                         ret = -EINVAL;
5291                         goto out;
5292                 }
5293         }
5294
5295         ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
5296         if (ret < 0)
5297                 ret = -ENOMEM;
5298
5299 out:
5300         mutex_unlock(&trace_types_lock);
5301
5302         return ret;
5303 }
5304
5305
5306 /**
5307  * tracing_update_buffers - used by tracing facility to expand ring buffers
5308  *
5309  * To save on memory when the tracing is never used on a system with it
5310  * configured in. The ring buffers are set to a minimum size. But once
5311  * a user starts to use the tracing facility, then they need to grow
5312  * to their default size.
5313  *
5314  * This function is to be called when a tracer is about to be used.
5315  */
5316 int tracing_update_buffers(void)
5317 {
5318         int ret = 0;
5319
5320         mutex_lock(&trace_types_lock);
5321         if (!ring_buffer_expanded)
5322                 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
5323                                                 RING_BUFFER_ALL_CPUS);
5324         mutex_unlock(&trace_types_lock);
5325
5326         return ret;
5327 }
5328
5329 struct trace_option_dentry;
5330
5331 static void
5332 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
5333
5334 /*
5335  * Used to clear out the tracer before deletion of an instance.
5336  * Must have trace_types_lock held.
5337  */
5338 static void tracing_set_nop(struct trace_array *tr)
5339 {
5340         if (tr->current_trace == &nop_trace)
5341                 return;
5342         
5343         tr->current_trace->enabled--;
5344
5345         if (tr->current_trace->reset)
5346                 tr->current_trace->reset(tr);
5347
5348         tr->current_trace = &nop_trace;
5349 }
5350
5351 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
5352 {
5353         /* Only enable if the directory has been created already. */
5354         if (!tr->dir)
5355                 return;
5356
5357         create_trace_option_files(tr, t);
5358 }
5359
5360 static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5361 {
5362         struct tracer *t;
5363 #ifdef CONFIG_TRACER_MAX_TRACE
5364         bool had_max_tr;
5365 #endif
5366         int ret = 0;
5367
5368         mutex_lock(&trace_types_lock);
5369
5370         if (!ring_buffer_expanded) {
5371                 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
5372                                                 RING_BUFFER_ALL_CPUS);
5373                 if (ret < 0)
5374                         goto out;
5375                 ret = 0;
5376         }
5377
5378         for (t = trace_types; t; t = t->next) {
5379                 if (strcmp(t->name, buf) == 0)
5380                         break;
5381         }
5382         if (!t) {
5383                 ret = -EINVAL;
5384                 goto out;
5385         }
5386         if (t == tr->current_trace)
5387                 goto out;
5388
5389         /* Some tracers won't work on kernel command line */
5390         if (system_state < SYSTEM_RUNNING && t->noboot) {
5391                 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5392                         t->name);
5393                 goto out;
5394         }
5395
5396         /* Some tracers are only allowed for the top level buffer */
5397         if (!trace_ok_for_array(t, tr)) {
5398                 ret = -EINVAL;
5399                 goto out;
5400         }
5401
5402         /* If trace pipe files are being read, we can't change the tracer */
5403         if (tr->current_trace->ref) {
5404                 ret = -EBUSY;
5405                 goto out;
5406         }
5407
5408         trace_branch_disable();
5409
5410         tr->current_trace->enabled--;
5411
5412         if (tr->current_trace->reset)
5413                 tr->current_trace->reset(tr);
5414
5415         /* Current trace needs to be nop_trace before synchronize_rcu */
5416         tr->current_trace = &nop_trace;
5417
5418 #ifdef CONFIG_TRACER_MAX_TRACE
5419         had_max_tr = tr->allocated_snapshot;
5420
5421         if (had_max_tr && !t->use_max_tr) {
5422                 /*
5423                  * We need to make sure that the update_max_tr sees that
5424                  * current_trace changed to nop_trace to keep it from
5425                  * swapping the buffers after we resize it.
5426                  * The update_max_tr is called from interrupts disabled
5427                  * so a synchronized_sched() is sufficient.
5428                  */
5429                 synchronize_rcu();
5430                 free_snapshot(tr);
5431         }
5432 #endif
5433
5434 #ifdef CONFIG_TRACER_MAX_TRACE
5435         if (t->use_max_tr && !had_max_tr) {
5436                 ret = tracing_alloc_snapshot_instance(tr);
5437                 if (ret < 0)
5438                         goto out;
5439         }
5440 #endif
5441
5442         if (t->init) {
5443                 ret = tracer_init(t, tr);
5444                 if (ret)
5445                         goto out;
5446         }
5447
5448         tr->current_trace = t;
5449         tr->current_trace->enabled++;
5450         trace_branch_enable(tr);
5451  out:
5452         mutex_unlock(&trace_types_lock);
5453
5454         return ret;
5455 }
5456
5457 static ssize_t
5458 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5459                         size_t cnt, loff_t *ppos)
5460 {
5461         struct trace_array *tr = filp->private_data;
5462         char buf[MAX_TRACER_SIZE+1];
5463         int i;
5464         size_t ret;
5465         int err;
5466
5467         ret = cnt;
5468
5469         if (cnt > MAX_TRACER_SIZE)
5470                 cnt = MAX_TRACER_SIZE;
5471
5472         if (copy_from_user(buf, ubuf, cnt))
5473                 return -EFAULT;
5474
5475         buf[cnt] = 0;
5476
5477         /* strip ending whitespace. */
5478         for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5479                 buf[i] = 0;
5480
5481         err = tracing_set_tracer(tr, buf);
5482         if (err)
5483                 return err;
5484
5485         *ppos += ret;
5486
5487         return ret;
5488 }
5489
5490 static ssize_t
5491 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5492                    size_t cnt, loff_t *ppos)
5493 {
5494         char buf[64];
5495         int r;
5496
5497         r = snprintf(buf, sizeof(buf), "%ld\n",
5498                      *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
5499         if (r > sizeof(buf))
5500                 r = sizeof(buf);
5501         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5502 }
5503
5504 static ssize_t
5505 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5506                     size_t cnt, loff_t *ppos)
5507 {
5508         unsigned long val;
5509         int ret;
5510
5511         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5512         if (ret)
5513                 return ret;
5514
5515         *ptr = val * 1000;
5516
5517         return cnt;
5518 }
5519
5520 static ssize_t
5521 tracing_thresh_read(struct file *filp, char __user *ubuf,
5522                     size_t cnt, loff_t *ppos)
5523 {
5524         return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5525 }
5526
5527 static ssize_t
5528 tracing_thresh_write(struct file *filp, const char __user *ubuf,
5529                      size_t cnt, loff_t *ppos)
5530 {
5531         struct trace_array *tr = filp->private_data;
5532         int ret;
5533
5534         mutex_lock(&trace_types_lock);
5535         ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5536         if (ret < 0)
5537                 goto out;
5538
5539         if (tr->current_trace->update_thresh) {
5540                 ret = tr->current_trace->update_thresh(tr);
5541                 if (ret < 0)
5542                         goto out;
5543         }
5544
5545         ret = cnt;
5546 out:
5547         mutex_unlock(&trace_types_lock);
5548
5549         return ret;
5550 }
5551
5552 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
5553
5554 static ssize_t
5555 tracing_max_lat_read(struct file *filp, char __user *ubuf,
5556                      size_t cnt, loff_t *ppos)
5557 {
5558         return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5559 }
5560
5561 static ssize_t
5562 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5563                       size_t cnt, loff_t *ppos)
5564 {
5565         return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5566 }
5567
5568 #endif
5569
5570 static int tracing_open_pipe(struct inode *inode, struct file *filp)
5571 {
5572         struct trace_array *tr = inode->i_private;
5573         struct trace_iterator *iter;
5574         int ret = 0;
5575
5576         if (tracing_disabled)
5577                 return -ENODEV;
5578
5579         if (trace_array_get(tr) < 0)
5580                 return -ENODEV;
5581
5582         mutex_lock(&trace_types_lock);
5583
5584         /* create a buffer to store the information to pass to userspace */
5585         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5586         if (!iter) {
5587                 ret = -ENOMEM;
5588                 __trace_array_put(tr);
5589                 goto out;
5590         }
5591
5592         trace_seq_init(&iter->seq);
5593         iter->trace = tr->current_trace;
5594
5595         if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5596                 ret = -ENOMEM;
5597                 goto fail;
5598         }
5599
5600         /* trace pipe does not show start of buffer */
5601         cpumask_setall(iter->started);
5602
5603         if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
5604                 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5605
5606         /* Output in nanoseconds only if we are using a clock in nanoseconds. */
5607         if (trace_clocks[tr->clock_id].in_ns)
5608                 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5609
5610         iter->tr = tr;
5611         iter->trace_buffer = &tr->trace_buffer;
5612         iter->cpu_file = tracing_get_cpu(inode);
5613         mutex_init(&iter->mutex);
5614         filp->private_data = iter;
5615
5616         if (iter->trace->pipe_open)
5617                 iter->trace->pipe_open(iter);
5618
5619         nonseekable_open(inode, filp);
5620
5621         tr->current_trace->ref++;
5622 out:
5623         mutex_unlock(&trace_types_lock);
5624         return ret;
5625
5626 fail:
5627         kfree(iter->trace);
5628         kfree(iter);
5629         __trace_array_put(tr);
5630         mutex_unlock(&trace_types_lock);
5631         return ret;
5632 }
5633
5634 static int tracing_release_pipe(struct inode *inode, struct file *file)
5635 {
5636         struct trace_iterator *iter = file->private_data;
5637         struct trace_array *tr = inode->i_private;
5638
5639         mutex_lock(&trace_types_lock);
5640
5641         tr->current_trace->ref--;
5642
5643         if (iter->trace->pipe_close)
5644                 iter->trace->pipe_close(iter);
5645
5646         mutex_unlock(&trace_types_lock);
5647
5648         free_cpumask_var(iter->started);
5649         mutex_destroy(&iter->mutex);
5650         kfree(iter);
5651
5652         trace_array_put(tr);
5653
5654         return 0;
5655 }
5656
5657 static __poll_t
5658 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
5659 {
5660         struct trace_array *tr = iter->tr;
5661
5662         /* Iterators are static, they should be filled or empty */
5663         if (trace_buffer_iter(iter, iter->cpu_file))
5664                 return EPOLLIN | EPOLLRDNORM;
5665
5666         if (tr->trace_flags & TRACE_ITER_BLOCK)
5667                 /*
5668                  * Always select as readable when in blocking mode
5669                  */
5670                 return EPOLLIN | EPOLLRDNORM;
5671         else
5672                 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
5673                                              filp, poll_table);
5674 }
5675
5676 static __poll_t
5677 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5678 {
5679         struct trace_iterator *iter = filp->private_data;
5680
5681         return trace_poll(iter, filp, poll_table);
5682 }
5683
5684 /* Must be called with iter->mutex held. */
5685 static int tracing_wait_pipe(struct file *filp)
5686 {
5687         struct trace_iterator *iter = filp->private_data;
5688         int ret;
5689
5690         while (trace_empty(iter)) {
5691
5692                 if ((filp->f_flags & O_NONBLOCK)) {
5693                         return -EAGAIN;
5694                 }
5695
5696                 /*
5697                  * We block until we read something and tracing is disabled.
5698                  * We still block if tracing is disabled, but we have never
5699                  * read anything. This allows a user to cat this file, and
5700                  * then enable tracing. But after we have read something,
5701                  * we give an EOF when tracing is again disabled.
5702                  *
5703                  * iter->pos will be 0 if we haven't read anything.
5704                  */
5705                 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
5706                         break;
5707
5708                 mutex_unlock(&iter->mutex);
5709
5710                 ret = wait_on_pipe(iter, 0);
5711
5712                 mutex_lock(&iter->mutex);
5713
5714                 if (ret)
5715                         return ret;
5716         }
5717
5718         return 1;
5719 }
5720
5721 /*
5722  * Consumer reader.
5723  */
5724 static ssize_t
5725 tracing_read_pipe(struct file *filp, char __user *ubuf,
5726                   size_t cnt, loff_t *ppos)
5727 {
5728         struct trace_iterator *iter = filp->private_data;
5729         ssize_t sret;
5730
5731         /*
5732          * Avoid more than one consumer on a single file descriptor
5733          * This is just a matter of traces coherency, the ring buffer itself
5734          * is protected.
5735          */
5736         mutex_lock(&iter->mutex);
5737
5738         /* return any leftover data */
5739         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5740         if (sret != -EBUSY)
5741                 goto out;
5742
5743         trace_seq_init(&iter->seq);
5744
5745         if (iter->trace->read) {
5746                 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5747                 if (sret)
5748                         goto out;
5749         }
5750
5751 waitagain:
5752         sret = tracing_wait_pipe(filp);
5753         if (sret <= 0)
5754                 goto out;
5755
5756         /* stop when tracing is finished */
5757         if (trace_empty(iter)) {
5758                 sret = 0;
5759                 goto out;
5760         }
5761
5762         if (cnt >= PAGE_SIZE)
5763                 cnt = PAGE_SIZE - 1;
5764
5765         /* reset all but tr, trace, and overruns */
5766         memset(&iter->seq, 0,
5767                sizeof(struct trace_iterator) -
5768                offsetof(struct trace_iterator, seq));
5769         cpumask_clear(iter->started);
5770         iter->pos = -1;
5771
5772         trace_event_read_lock();
5773         trace_access_lock(iter->cpu_file);
5774         while (trace_find_next_entry_inc(iter) != NULL) {
5775                 enum print_line_t ret;
5776                 int save_len = iter->seq.seq.len;
5777
5778                 ret = print_trace_line(iter);
5779                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
5780                         /* don't print partial lines */
5781                         iter->seq.seq.len = save_len;
5782                         break;
5783                 }
5784                 if (ret != TRACE_TYPE_NO_CONSUME)
5785                         trace_consume(iter);
5786
5787                 if (trace_seq_used(&iter->seq) >= cnt)
5788                         break;
5789
5790                 /*
5791                  * Setting the full flag means we reached the trace_seq buffer
5792                  * size and we should leave by partial output condition above.
5793                  * One of the trace_seq_* functions is not used properly.
5794                  */
5795                 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
5796                           iter->ent->type);
5797         }
5798         trace_access_unlock(iter->cpu_file);
5799         trace_event_read_unlock();
5800
5801         /* Now copy what we have to the user */
5802         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5803         if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
5804                 trace_seq_init(&iter->seq);
5805
5806         /*
5807          * If there was nothing to send to user, in spite of consuming trace
5808          * entries, go back to wait for more entries.
5809          */
5810         if (sret == -EBUSY)
5811                 goto waitagain;
5812
5813 out:
5814         mutex_unlock(&iter->mutex);
5815
5816         return sret;
5817 }
5818
5819 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
5820                                      unsigned int idx)
5821 {
5822         __free_page(spd->pages[idx]);
5823 }
5824
5825 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
5826         .can_merge              = 0,
5827         .confirm                = generic_pipe_buf_confirm,
5828         .release                = generic_pipe_buf_release,
5829         .steal                  = generic_pipe_buf_steal,
5830         .get                    = generic_pipe_buf_get,
5831 };
5832
5833 static size_t
5834 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
5835 {
5836         size_t count;
5837         int save_len;
5838         int ret;
5839
5840         /* Seq buffer is page-sized, exactly what we need. */
5841         for (;;) {
5842                 save_len = iter->seq.seq.len;
5843                 ret = print_trace_line(iter);
5844
5845                 if (trace_seq_has_overflowed(&iter->seq)) {
5846                         iter->seq.seq.len = save_len;
5847                         break;
5848                 }
5849
5850                 /*
5851                  * This should not be hit, because it should only
5852                  * be set if the iter->seq overflowed. But check it
5853                  * anyway to be safe.
5854                  */
5855                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
5856                         iter->seq.seq.len = save_len;
5857                         break;
5858                 }
5859
5860                 count = trace_seq_used(&iter->seq) - save_len;
5861                 if (rem < count) {
5862                         rem = 0;
5863                         iter->seq.seq.len = save_len;
5864                         break;
5865                 }
5866
5867                 if (ret != TRACE_TYPE_NO_CONSUME)
5868                         trace_consume(iter);
5869                 rem -= count;
5870                 if (!trace_find_next_entry_inc(iter))   {
5871                         rem = 0;
5872                         iter->ent = NULL;
5873                         break;
5874                 }
5875         }
5876
5877         return rem;
5878 }
5879
5880 static ssize_t tracing_splice_read_pipe(struct file *filp,
5881                                         loff_t *ppos,
5882                                         struct pipe_inode_info *pipe,
5883                                         size_t len,
5884                                         unsigned int flags)
5885 {
5886         struct page *pages_def[PIPE_DEF_BUFFERS];
5887         struct partial_page partial_def[PIPE_DEF_BUFFERS];
5888         struct trace_iterator *iter = filp->private_data;
5889         struct splice_pipe_desc spd = {
5890                 .pages          = pages_def,
5891                 .partial        = partial_def,
5892                 .nr_pages       = 0, /* This gets updated below. */
5893                 .nr_pages_max   = PIPE_DEF_BUFFERS,
5894                 .ops            = &tracing_pipe_buf_ops,
5895                 .spd_release    = tracing_spd_release_pipe,
5896         };
5897         ssize_t ret;
5898         size_t rem;
5899         unsigned int i;
5900
5901         if (splice_grow_spd(pipe, &spd))
5902                 return -ENOMEM;
5903
5904         mutex_lock(&iter->mutex);
5905
5906         if (iter->trace->splice_read) {
5907                 ret = iter->trace->splice_read(iter, filp,
5908                                                ppos, pipe, len, flags);
5909                 if (ret)
5910                         goto out_err;
5911         }
5912
5913         ret = tracing_wait_pipe(filp);
5914         if (ret <= 0)
5915                 goto out_err;
5916
5917         if (!iter->ent && !trace_find_next_entry_inc(iter)) {
5918                 ret = -EFAULT;
5919                 goto out_err;
5920         }
5921
5922         trace_event_read_lock();
5923         trace_access_lock(iter->cpu_file);
5924
5925         /* Fill as many pages as possible. */
5926         for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
5927                 spd.pages[i] = alloc_page(GFP_KERNEL);
5928                 if (!spd.pages[i])
5929                         break;
5930
5931                 rem = tracing_fill_pipe_page(rem, iter);
5932
5933                 /* Copy the data into the page, so we can start over. */
5934                 ret = trace_seq_to_buffer(&iter->seq,
5935                                           page_address(spd.pages[i]),
5936                                           trace_seq_used(&iter->seq));
5937                 if (ret < 0) {
5938                         __free_page(spd.pages[i]);
5939                         break;
5940                 }
5941                 spd.partial[i].offset = 0;
5942                 spd.partial[i].len = trace_seq_used(&iter->seq);
5943
5944                 trace_seq_init(&iter->seq);
5945         }
5946
5947         trace_access_unlock(iter->cpu_file);
5948         trace_event_read_unlock();
5949         mutex_unlock(&iter->mutex);
5950
5951         spd.nr_pages = i;
5952
5953         if (i)
5954                 ret = splice_to_pipe(pipe, &spd);
5955         else
5956                 ret = 0;
5957 out:
5958         splice_shrink_spd(&spd);
5959         return ret;
5960
5961 out_err:
5962         mutex_unlock(&iter->mutex);
5963         goto out;
5964 }
5965
5966 static ssize_t
5967 tracing_entries_read(struct file *filp, char __user *ubuf,
5968                      size_t cnt, loff_t *ppos)
5969 {
5970         struct inode *inode = file_inode(filp);
5971         struct trace_array *tr = inode->i_private;
5972         int cpu = tracing_get_cpu(inode);
5973         char buf[64];
5974         int r = 0;
5975         ssize_t ret;
5976
5977         mutex_lock(&trace_types_lock);
5978
5979         if (cpu == RING_BUFFER_ALL_CPUS) {
5980                 int cpu, buf_size_same;
5981                 unsigned long size;
5982
5983                 size = 0;
5984                 buf_size_same = 1;
5985                 /* check if all cpu sizes are same */
5986                 for_each_tracing_cpu(cpu) {
5987                         /* fill in the size from first enabled cpu */
5988                         if (size == 0)
5989                                 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5990                         if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
5991                                 buf_size_same = 0;
5992                                 break;
5993                         }
5994                 }
5995
5996                 if (buf_size_same) {
5997                         if (!ring_buffer_expanded)
5998                                 r = sprintf(buf, "%lu (expanded: %lu)\n",
5999                                             size >> 10,
6000                                             trace_buf_size >> 10);
6001                         else
6002                                 r = sprintf(buf, "%lu\n", size >> 10);
6003                 } else
6004                         r = sprintf(buf, "X\n");
6005         } else
6006                 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
6007
6008         mutex_unlock(&trace_types_lock);
6009
6010         ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6011         return ret;
6012 }
6013
6014 static ssize_t
6015 tracing_entries_write(struct file *filp, const char __user *ubuf,
6016                       size_t cnt, loff_t *ppos)
6017 {
6018         struct inode *inode = file_inode(filp);
6019         struct trace_array *tr = inode->i_private;
6020         unsigned long val;
6021         int ret;
6022
6023         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6024         if (ret)
6025                 return ret;
6026
6027         /* must have at least 1 entry */
6028         if (!val)
6029                 return -EINVAL;
6030
6031         /* value is in KB */
6032         val <<= 10;
6033         ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6034         if (ret < 0)
6035                 return ret;
6036
6037         *ppos += cnt;
6038
6039         return cnt;
6040 }
6041
6042 static ssize_t
6043 tracing_total_entries_read(struct file *filp, char __user *ubuf,
6044                                 size_t cnt, loff_t *ppos)
6045 {
6046         struct trace_array *tr = filp->private_data;
6047         char buf[64];
6048         int r, cpu;
6049         unsigned long size = 0, expanded_size = 0;
6050
6051         mutex_lock(&trace_types_lock);
6052         for_each_tracing_cpu(cpu) {
6053                 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
6054                 if (!ring_buffer_expanded)
6055                         expanded_size += trace_buf_size >> 10;
6056         }
6057         if (ring_buffer_expanded)
6058                 r = sprintf(buf, "%lu\n", size);
6059         else
6060                 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6061         mutex_unlock(&trace_types_lock);
6062
6063         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6064 }
6065
6066 static ssize_t
6067 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6068                           size_t cnt, loff_t *ppos)
6069 {
6070         /*
6071          * There is no need to read what the user has written, this function
6072          * is just to make sure that there is no error when "echo" is used
6073          */
6074
6075         *ppos += cnt;
6076
6077         return cnt;
6078 }
6079
6080 static int
6081 tracing_free_buffer_release(struct inode *inode, struct file *filp)
6082 {
6083         struct trace_array *tr = inode->i_private;
6084
6085         /* disable tracing ? */
6086         if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
6087                 tracer_tracing_off(tr);
6088         /* resize the ring buffer to 0 */
6089         tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
6090
6091         trace_array_put(tr);
6092
6093         return 0;
6094 }
6095
6096 static ssize_t
6097 tracing_mark_write(struct file *filp, const char __user *ubuf,
6098                                         size_t cnt, loff_t *fpos)
6099 {
6100         struct trace_array *tr = filp->private_data;
6101         struct ring_buffer_event *event;
6102         enum event_trigger_type tt = ETT_NONE;
6103         struct ring_buffer *buffer;
6104         struct print_entry *entry;
6105         unsigned long irq_flags;
6106         const char faulted[] = "<faulted>";
6107         ssize_t written;
6108         int size;
6109         int len;
6110
6111 /* Used in tracing_mark_raw_write() as well */
6112 #define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */
6113
6114         if (tracing_disabled)
6115                 return -EINVAL;
6116
6117         if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6118                 return -EINVAL;
6119
6120         if (cnt > TRACE_BUF_SIZE)
6121                 cnt = TRACE_BUF_SIZE;
6122
6123         BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6124
6125         local_save_flags(irq_flags);
6126         size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6127
6128         /* If less than "<faulted>", then make sure we can still add that */
6129         if (cnt < FAULTED_SIZE)
6130                 size += FAULTED_SIZE - cnt;
6131
6132         buffer = tr->trace_buffer.buffer;
6133         event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6134                                             irq_flags, preempt_count());
6135         if (unlikely(!event))
6136                 /* Ring buffer disabled, return as if not open for write */
6137                 return -EBADF;
6138
6139         entry = ring_buffer_event_data(event);
6140         entry->ip = _THIS_IP_;
6141
6142         len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6143         if (len) {
6144                 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6145                 cnt = FAULTED_SIZE;
6146                 written = -EFAULT;
6147         } else
6148                 written = cnt;
6149         len = cnt;
6150
6151         if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6152                 /* do not add \n before testing triggers, but add \0 */
6153                 entry->buf[cnt] = '\0';
6154                 tt = event_triggers_call(tr->trace_marker_file, entry, event);
6155         }
6156
6157         if (entry->buf[cnt - 1] != '\n') {
6158                 entry->buf[cnt] = '\n';
6159                 entry->buf[cnt + 1] = '\0';
6160         } else
6161                 entry->buf[cnt] = '\0';
6162
6163         __buffer_unlock_commit(buffer, event);
6164
6165         if (tt)
6166                 event_triggers_post_call(tr->trace_marker_file, tt);
6167
6168         if (written > 0)
6169                 *fpos += written;
6170
6171         return written;
6172 }
6173
6174 /* Limit it for now to 3K (including tag) */
6175 #define RAW_DATA_MAX_SIZE (1024*3)
6176
6177 static ssize_t
6178 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6179                                         size_t cnt, loff_t *fpos)
6180 {
6181         struct trace_array *tr = filp->private_data;
6182         struct ring_buffer_event *event;
6183         struct ring_buffer *buffer;
6184         struct raw_data_entry *entry;
6185         const char faulted[] = "<faulted>";
6186         unsigned long irq_flags;
6187         ssize_t written;
6188         int size;
6189         int len;
6190
6191 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6192
6193         if (tracing_disabled)
6194                 return -EINVAL;
6195
6196         if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6197                 return -EINVAL;
6198
6199         /* The marker must at least have a tag id */
6200         if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6201                 return -EINVAL;
6202
6203         if (cnt > TRACE_BUF_SIZE)
6204                 cnt = TRACE_BUF_SIZE;
6205
6206         BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6207
6208         local_save_flags(irq_flags);
6209         size = sizeof(*entry) + cnt;
6210         if (cnt < FAULT_SIZE_ID)
6211                 size += FAULT_SIZE_ID - cnt;
6212
6213         buffer = tr->trace_buffer.buffer;
6214         event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6215                                             irq_flags, preempt_count());
6216         if (!event)
6217                 /* Ring buffer disabled, return as if not open for write */
6218                 return -EBADF;
6219
6220         entry = ring_buffer_event_data(event);
6221
6222         len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6223         if (len) {
6224                 entry->id = -1;
6225                 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6226                 written = -EFAULT;
6227         } else
6228                 written = cnt;
6229
6230         __buffer_unlock_commit(buffer, event);
6231
6232         if (written > 0)
6233                 *fpos += written;
6234
6235         return written;
6236 }
6237
6238 static int tracing_clock_show(struct seq_file *m, void *v)
6239 {
6240         struct trace_array *tr = m->private;
6241         int i;
6242
6243         for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
6244                 seq_printf(m,
6245                         "%s%s%s%s", i ? " " : "",
6246                         i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6247                         i == tr->clock_id ? "]" : "");
6248         seq_putc(m, '\n');
6249
6250         return 0;
6251 }
6252
6253 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
6254 {
6255         int i;
6256
6257         for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6258                 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6259                         break;
6260         }
6261         if (i == ARRAY_SIZE(trace_clocks))
6262                 return -EINVAL;
6263
6264         mutex_lock(&trace_types_lock);
6265
6266         tr->clock_id = i;
6267
6268         ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
6269
6270         /*
6271          * New clock may not be consistent with the previous clock.
6272          * Reset the buffer so that it doesn't have incomparable timestamps.
6273          */
6274         tracing_reset_online_cpus(&tr->trace_buffer);
6275
6276 #ifdef CONFIG_TRACER_MAX_TRACE
6277         if (tr->max_buffer.buffer)
6278                 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
6279         tracing_reset_online_cpus(&tr->max_buffer);
6280 #endif
6281
6282         mutex_unlock(&trace_types_lock);
6283
6284         return 0;
6285 }
6286
6287 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6288                                    size_t cnt, loff_t *fpos)
6289 {
6290         struct seq_file *m = filp->private_data;
6291         struct trace_array *tr = m->private;
6292         char buf[64];
6293         const char *clockstr;
6294         int ret;
6295
6296         if (cnt >= sizeof(buf))
6297                 return -EINVAL;
6298
6299         if (copy_from_user(buf, ubuf, cnt))
6300                 return -EFAULT;
6301
6302         buf[cnt] = 0;
6303
6304         clockstr = strstrip(buf);
6305
6306         ret = tracing_set_clock(tr, clockstr);
6307         if (ret)
6308                 return ret;
6309
6310         *fpos += cnt;
6311
6312         return cnt;
6313 }
6314
6315 static int tracing_clock_open(struct inode *inode, struct file *file)
6316 {
6317         struct trace_array *tr = inode->i_private;
6318         int ret;
6319
6320         if (tracing_disabled)
6321                 return -ENODEV;
6322
6323         if (trace_array_get(tr))
6324                 return -ENODEV;
6325
6326         ret = single_open(file, tracing_clock_show, inode->i_private);
6327         if (ret < 0)
6328                 trace_array_put(tr);
6329
6330         return ret;
6331 }
6332
6333 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6334 {
6335         struct trace_array *tr = m->private;
6336
6337         mutex_lock(&trace_types_lock);
6338
6339         if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer))
6340                 seq_puts(m, "delta [absolute]\n");
6341         else
6342                 seq_puts(m, "[delta] absolute\n");
6343
6344         mutex_unlock(&trace_types_lock);
6345
6346         return 0;
6347 }
6348
6349 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6350 {
6351         struct trace_array *tr = inode->i_private;
6352         int ret;
6353
6354         if (tracing_disabled)
6355                 return -ENODEV;
6356
6357         if (trace_array_get(tr))
6358                 return -ENODEV;
6359
6360         ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6361         if (ret < 0)
6362                 trace_array_put(tr);
6363
6364         return ret;
6365 }
6366
6367 int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6368 {
6369         int ret = 0;
6370
6371         mutex_lock(&trace_types_lock);
6372
6373         if (abs && tr->time_stamp_abs_ref++)
6374                 goto out;
6375
6376         if (!abs) {
6377                 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6378                         ret = -EINVAL;
6379                         goto out;
6380                 }
6381
6382                 if (--tr->time_stamp_abs_ref)
6383                         goto out;
6384         }
6385
6386         ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs);
6387
6388 #ifdef CONFIG_TRACER_MAX_TRACE
6389         if (tr->max_buffer.buffer)
6390                 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
6391 #endif
6392  out:
6393         mutex_unlock(&trace_types_lock);
6394
6395         return ret;
6396 }
6397
6398 struct ftrace_buffer_info {
6399         struct trace_iterator   iter;
6400         void                    *spare;
6401         unsigned int            spare_cpu;
6402         unsigned int            read;
6403 };
6404
6405 #ifdef CONFIG_TRACER_SNAPSHOT
6406 static int tracing_snapshot_open(struct inode *inode, struct file *file)
6407 {
6408         struct trace_array *tr = inode->i_private;
6409         struct trace_iterator *iter;
6410         struct seq_file *m;
6411         int ret = 0;
6412
6413         if (trace_array_get(tr) < 0)
6414                 return -ENODEV;
6415
6416         if (file->f_mode & FMODE_READ) {
6417                 iter = __tracing_open(inode, file, true);
6418                 if (IS_ERR(iter))
6419                         ret = PTR_ERR(iter);
6420         } else {
6421                 /* Writes still need the seq_file to hold the private data */
6422                 ret = -ENOMEM;
6423                 m = kzalloc(sizeof(*m), GFP_KERNEL);
6424                 if (!m)
6425                         goto out;
6426                 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6427                 if (!iter) {
6428                         kfree(m);
6429                         goto out;
6430                 }
6431                 ret = 0;
6432
6433                 iter->tr = tr;
6434                 iter->trace_buffer = &tr->max_buffer;
6435                 iter->cpu_file = tracing_get_cpu(inode);
6436                 m->private = iter;
6437                 file->private_data = m;
6438         }
6439 out:
6440         if (ret < 0)
6441                 trace_array_put(tr);
6442
6443         return ret;
6444 }
6445
6446 static ssize_t
6447 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6448                        loff_t *ppos)
6449 {
6450         struct seq_file *m = filp->private_data;
6451         struct trace_iterator *iter = m->private;
6452         struct trace_array *tr = iter->tr;
6453         unsigned long val;
6454         int ret;
6455
6456         ret = tracing_update_buffers();
6457         if (ret < 0)
6458                 return ret;
6459
6460         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6461         if (ret)
6462                 return ret;
6463
6464         mutex_lock(&trace_types_lock);
6465
6466         if (tr->current_trace->use_max_tr) {
6467                 ret = -EBUSY;
6468                 goto out;
6469         }
6470
6471         switch (val) {
6472         case 0:
6473                 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6474                         ret = -EINVAL;
6475                         break;
6476                 }
6477                 if (tr->allocated_snapshot)
6478                         free_snapshot(tr);
6479                 break;
6480         case 1:
6481 /* Only allow per-cpu swap if the ring buffer supports it */
6482 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6483                 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6484                         ret = -EINVAL;
6485                         break;
6486                 }
6487 #endif
6488                 if (!tr->allocated_snapshot) {
6489                         ret = tracing_alloc_snapshot_instance(tr);
6490                         if (ret < 0)
6491                                 break;
6492                 }
6493                 local_irq_disable();
6494                 /* Now, we're going to swap */
6495                 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6496                         update_max_tr(tr, current, smp_processor_id());
6497                 else
6498                         update_max_tr_single(tr, current, iter->cpu_file);
6499                 local_irq_enable();
6500                 break;
6501         default:
6502                 if (tr->allocated_snapshot) {
6503                         if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6504                                 tracing_reset_online_cpus(&tr->max_buffer);
6505                         else
6506                                 tracing_reset(&tr->max_buffer, iter->cpu_file);
6507                 }
6508                 break;
6509         }
6510
6511         if (ret >= 0) {
6512                 *ppos += cnt;
6513                 ret = cnt;
6514         }
6515 out:
6516         mutex_unlock(&trace_types_lock);
6517         return ret;
6518 }
6519
6520 static int tracing_snapshot_release(struct inode *inode, struct file *file)
6521 {
6522         struct seq_file *m = file->private_data;
6523         int ret;
6524
6525         ret = tracing_release(inode, file);
6526
6527         if (file->f_mode & FMODE_READ)
6528                 return ret;
6529
6530         /* If write only, the seq_file is just a stub */
6531         if (m)
6532                 kfree(m->private);
6533         kfree(m);
6534
6535         return 0;
6536 }
6537
6538 static int tracing_buffers_open(struct inode *inode, struct file *filp);
6539 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6540                                     size_t count, loff_t *ppos);
6541 static int tracing_buffers_release(struct inode *inode, struct file *file);
6542 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6543                    struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6544
6545 static int snapshot_raw_open(struct inode *inode, struct file *filp)
6546 {
6547         struct ftrace_buffer_info *info;
6548         int ret;
6549
6550         ret = tracing_buffers_open(inode, filp);
6551         if (ret < 0)
6552                 return ret;
6553
6554         info = filp->private_data;
6555
6556         if (info->iter.trace->use_max_tr) {
6557                 tracing_buffers_release(inode, filp);
6558                 return -EBUSY;
6559         }
6560
6561         info->iter.snapshot = true;
6562         info->iter.trace_buffer = &info->iter.tr->max_buffer;
6563
6564         return ret;
6565 }
6566
6567 #endif /* CONFIG_TRACER_SNAPSHOT */
6568
6569
6570 static const struct file_operations tracing_thresh_fops = {
6571         .open           = tracing_open_generic,
6572         .read           = tracing_thresh_read,
6573         .write          = tracing_thresh_write,
6574         .llseek         = generic_file_llseek,
6575 };
6576
6577 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6578 static const struct file_operations tracing_max_lat_fops = {
6579         .open           = tracing_open_generic,
6580         .read           = tracing_max_lat_read,
6581         .write          = tracing_max_lat_write,
6582         .llseek         = generic_file_llseek,
6583 };
6584 #endif
6585
6586 static const struct file_operations set_tracer_fops = {
6587         .open           = tracing_open_generic,
6588         .read           = tracing_set_trace_read,
6589         .write          = tracing_set_trace_write,
6590         .llseek         = generic_file_llseek,
6591 };
6592
6593 static const struct file_operations tracing_pipe_fops = {
6594         .open           = tracing_open_pipe,
6595         .poll           = tracing_poll_pipe,
6596         .read           = tracing_read_pipe,
6597         .splice_read    = tracing_splice_read_pipe,
6598         .release        = tracing_release_pipe,
6599         .llseek         = no_llseek,
6600 };
6601
6602 static const struct file_operations tracing_entries_fops = {
6603         .open           = tracing_open_generic_tr,
6604         .read           = tracing_entries_read,
6605         .write          = tracing_entries_write,
6606         .llseek         = generic_file_llseek,
6607         .release        = tracing_release_generic_tr,
6608 };
6609
6610 static const struct file_operations tracing_total_entries_fops = {
6611         .open           = tracing_open_generic_tr,
6612         .read           = tracing_total_entries_read,
6613         .llseek         = generic_file_llseek,
6614         .release        = tracing_release_generic_tr,
6615 };
6616
6617 static const struct file_operations tracing_free_buffer_fops = {
6618         .open           = tracing_open_generic_tr,
6619         .write          = tracing_free_buffer_write,
6620         .release        = tracing_free_buffer_release,
6621 };
6622
6623 static const struct file_operations tracing_mark_fops = {
6624         .open           = tracing_open_generic_tr,
6625         .write          = tracing_mark_write,
6626         .llseek         = generic_file_llseek,
6627         .release        = tracing_release_generic_tr,
6628 };
6629
6630 static const struct file_operations tracing_mark_raw_fops = {
6631         .open           = tracing_open_generic_tr,
6632         .write          = tracing_mark_raw_write,
6633         .llseek         = generic_file_llseek,
6634         .release        = tracing_release_generic_tr,
6635 };
6636
6637 static const struct file_operations trace_clock_fops = {
6638         .open           = tracing_clock_open,
6639         .read           = seq_read,
6640         .llseek         = seq_lseek,
6641         .release        = tracing_single_release_tr,
6642         .write          = tracing_clock_write,
6643 };
6644
6645 static const struct file_operations trace_time_stamp_mode_fops = {
6646         .open           = tracing_time_stamp_mode_open,
6647         .read           = seq_read,
6648         .llseek         = seq_lseek,
6649         .release        = tracing_single_release_tr,
6650 };
6651
6652 #ifdef CONFIG_TRACER_SNAPSHOT
6653 static const struct file_operations snapshot_fops = {
6654         .open           = tracing_snapshot_open,
6655         .read           = seq_read,
6656         .write          = tracing_snapshot_write,
6657         .llseek         = tracing_lseek,
6658         .release        = tracing_snapshot_release,
6659 };
6660
6661 static const struct file_operations snapshot_raw_fops = {
6662         .open           = snapshot_raw_open,
6663         .read           = tracing_buffers_read,
6664         .release        = tracing_buffers_release,
6665         .splice_read    = tracing_buffers_splice_read,
6666         .llseek         = no_llseek,
6667 };
6668
6669 #endif /* CONFIG_TRACER_SNAPSHOT */
6670
6671 static int tracing_buffers_open(struct inode *inode, struct file *filp)
6672 {
6673         struct trace_array *tr = inode->i_private;
6674         struct ftrace_buffer_info *info;
6675         int ret;
6676
6677         if (tracing_disabled)
6678                 return -ENODEV;
6679
6680         if (trace_array_get(tr) < 0)
6681                 return -ENODEV;
6682
6683         info = kzalloc(sizeof(*info), GFP_KERNEL);
6684         if (!info) {
6685                 trace_array_put(tr);
6686                 return -ENOMEM;
6687         }
6688
6689         mutex_lock(&trace_types_lock);
6690
6691         info->iter.tr           = tr;
6692         info->iter.cpu_file     = tracing_get_cpu(inode);
6693         info->iter.trace        = tr->current_trace;
6694         info->iter.trace_buffer = &tr->trace_buffer;
6695         info->spare             = NULL;
6696         /* Force reading ring buffer for first read */
6697         info->read              = (unsigned int)-1;
6698
6699         filp->private_data = info;
6700
6701         tr->current_trace->ref++;
6702
6703         mutex_unlock(&trace_types_lock);
6704
6705         ret = nonseekable_open(inode, filp);
6706         if (ret < 0)
6707                 trace_array_put(tr);
6708
6709         return ret;
6710 }
6711
6712 static __poll_t
6713 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
6714 {
6715         struct ftrace_buffer_info *info = filp->private_data;
6716         struct trace_iterator *iter = &info->iter;
6717
6718         return trace_poll(iter, filp, poll_table);
6719 }
6720
6721 static ssize_t
6722 tracing_buffers_read(struct file *filp, char __user *ubuf,
6723                      size_t count, loff_t *ppos)
6724 {
6725         struct ftrace_buffer_info *info = filp->private_data;
6726         struct trace_iterator *iter = &info->iter;
6727         ssize_t ret = 0;
6728         ssize_t size;
6729
6730         if (!count)
6731                 return 0;
6732
6733 #ifdef CONFIG_TRACER_MAX_TRACE
6734         if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6735                 return -EBUSY;
6736 #endif
6737
6738         if (!info->spare) {
6739                 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6740                                                           iter->cpu_file);
6741                 if (IS_ERR(info->spare)) {
6742                         ret = PTR_ERR(info->spare);
6743                         info->spare = NULL;
6744                 } else {
6745                         info->spare_cpu = iter->cpu_file;
6746                 }
6747         }
6748         if (!info->spare)
6749                 return ret;
6750
6751         /* Do we have previous read data to read? */
6752         if (info->read < PAGE_SIZE)
6753                 goto read;
6754
6755  again:
6756         trace_access_lock(iter->cpu_file);
6757         ret = ring_buffer_read_page(iter->trace_buffer->buffer,
6758                                     &info->spare,
6759                                     count,
6760                                     iter->cpu_file, 0);
6761         trace_access_unlock(iter->cpu_file);
6762
6763         if (ret < 0) {
6764                 if (trace_empty(iter)) {
6765                         if ((filp->f_flags & O_NONBLOCK))
6766                                 return -EAGAIN;
6767
6768                         ret = wait_on_pipe(iter, 0);
6769                         if (ret)
6770                                 return ret;
6771
6772                         goto again;
6773                 }
6774                 return 0;
6775         }
6776
6777         info->read = 0;
6778  read:
6779         size = PAGE_SIZE - info->read;
6780         if (size > count)
6781                 size = count;
6782
6783         ret = copy_to_user(ubuf, info->spare + info->read, size);
6784         if (ret == size)
6785                 return -EFAULT;
6786
6787         size -= ret;
6788
6789         *ppos += size;
6790         info->read += size;
6791
6792         return size;
6793 }
6794
6795 static int tracing_buffers_release(struct inode *inode, struct file *file)
6796 {
6797         struct ftrace_buffer_info *info = file->private_data;
6798         struct trace_iterator *iter = &info->iter;
6799
6800         mutex_lock(&trace_types_lock);
6801
6802         iter->tr->current_trace->ref--;
6803
6804         __trace_array_put(iter->tr);
6805
6806         if (info->spare)
6807                 ring_buffer_free_read_page(iter->trace_buffer->buffer,
6808                                            info->spare_cpu, info->spare);
6809         kfree(info);
6810
6811         mutex_unlock(&trace_types_lock);
6812
6813         return 0;
6814 }
6815
6816 struct buffer_ref {
6817         struct ring_buffer      *buffer;
6818         void                    *page;
6819         int                     cpu;
6820         int                     ref;
6821 };
6822
6823 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
6824                                     struct pipe_buffer *buf)
6825 {
6826         struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6827
6828         if (--ref->ref)
6829                 return;
6830
6831         ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
6832         kfree(ref);
6833         buf->private = 0;
6834 }
6835
6836 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
6837                                 struct pipe_buffer *buf)
6838 {
6839         struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6840
6841         ref->ref++;
6842 }
6843
6844 /* Pipe buffer operations for a buffer. */
6845 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
6846         .can_merge              = 0,
6847         .confirm                = generic_pipe_buf_confirm,
6848         .release                = buffer_pipe_buf_release,
6849         .steal                  = generic_pipe_buf_steal,
6850         .get                    = buffer_pipe_buf_get,
6851 };
6852
6853 /*
6854  * Callback from splice_to_pipe(), if we need to release some pages
6855  * at the end of the spd in case we error'ed out in filling the pipe.
6856  */
6857 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
6858 {
6859         struct buffer_ref *ref =
6860                 (struct buffer_ref *)spd->partial[i].private;
6861
6862         if (--ref->ref)
6863                 return;
6864
6865         ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
6866         kfree(ref);
6867         spd->partial[i].private = 0;
6868 }
6869
6870 static ssize_t
6871 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6872                             struct pipe_inode_info *pipe, size_t len,
6873                             unsigned int flags)
6874 {
6875         struct ftrace_buffer_info *info = file->private_data;
6876         struct trace_iterator *iter = &info->iter;
6877         struct partial_page partial_def[PIPE_DEF_BUFFERS];
6878         struct page *pages_def[PIPE_DEF_BUFFERS];
6879         struct splice_pipe_desc spd = {
6880                 .pages          = pages_def,
6881                 .partial        = partial_def,
6882                 .nr_pages_max   = PIPE_DEF_BUFFERS,
6883                 .ops            = &buffer_pipe_buf_ops,
6884                 .spd_release    = buffer_spd_release,
6885         };
6886         struct buffer_ref *ref;
6887         int entries, i;
6888         ssize_t ret = 0;
6889
6890 #ifdef CONFIG_TRACER_MAX_TRACE
6891         if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6892                 return -EBUSY;
6893 #endif
6894
6895         if (*ppos & (PAGE_SIZE - 1))
6896                 return -EINVAL;
6897
6898         if (len & (PAGE_SIZE - 1)) {
6899                 if (len < PAGE_SIZE)
6900                         return -EINVAL;
6901                 len &= PAGE_MASK;
6902         }
6903
6904         if (splice_grow_spd(pipe, &spd))
6905                 return -ENOMEM;
6906
6907  again:
6908         trace_access_lock(iter->cpu_file);
6909         entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
6910
6911         for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
6912                 struct page *page;
6913                 int r;
6914
6915                 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
6916                 if (!ref) {
6917                         ret = -ENOMEM;
6918                         break;
6919                 }
6920
6921                 ref->ref = 1;
6922                 ref->buffer = iter->trace_buffer->buffer;
6923                 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
6924                 if (IS_ERR(ref->page)) {
6925                         ret = PTR_ERR(ref->page);
6926                         ref->page = NULL;
6927                         kfree(ref);
6928                         break;
6929                 }
6930                 ref->cpu = iter->cpu_file;
6931
6932                 r = ring_buffer_read_page(ref->buffer, &ref->page,
6933                                           len, iter->cpu_file, 1);
6934                 if (r < 0) {
6935                         ring_buffer_free_read_page(ref->buffer, ref->cpu,
6936                                                    ref->page);
6937                         kfree(ref);
6938                         break;
6939                 }
6940
6941                 page = virt_to_page(ref->page);
6942
6943                 spd.pages[i] = page;
6944                 spd.partial[i].len = PAGE_SIZE;
6945                 spd.partial[i].offset = 0;
6946                 spd.partial[i].private = (unsigned long)ref;
6947                 spd.nr_pages++;
6948                 *ppos += PAGE_SIZE;
6949
6950                 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
6951         }
6952
6953         trace_access_unlock(iter->cpu_file);
6954         spd.nr_pages = i;
6955
6956         /* did we read anything? */
6957         if (!spd.nr_pages) {
6958                 if (ret)
6959                         goto out;
6960
6961                 ret = -EAGAIN;
6962                 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
6963                         goto out;
6964
6965                 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
6966                 if (ret)
6967                         goto out;
6968
6969                 goto again;
6970         }
6971
6972         ret = splice_to_pipe(pipe, &spd);
6973 out:
6974         splice_shrink_spd(&spd);
6975
6976         return ret;
6977 }
6978
6979 static const struct file_operations tracing_buffers_fops = {
6980         .open           = tracing_buffers_open,
6981         .read           = tracing_buffers_read,
6982         .poll           = tracing_buffers_poll,
6983         .release        = tracing_buffers_release,
6984         .splice_read    = tracing_buffers_splice_read,
6985         .llseek         = no_llseek,
6986 };
6987
6988 static ssize_t
6989 tracing_stats_read(struct file *filp, char __user *ubuf,
6990                    size_t count, loff_t *ppos)
6991 {
6992         struct inode *inode = file_inode(filp);
6993         struct trace_array *tr = inode->i_private;
6994         struct trace_buffer *trace_buf = &tr->trace_buffer;
6995         int cpu = tracing_get_cpu(inode);
6996         struct trace_seq *s;
6997         unsigned long cnt;
6998         unsigned long long t;
6999         unsigned long usec_rem;
7000
7001         s = kmalloc(sizeof(*s), GFP_KERNEL);
7002         if (!s)
7003                 return -ENOMEM;
7004
7005         trace_seq_init(s);
7006
7007         cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
7008         trace_seq_printf(s, "entries: %ld\n", cnt);
7009
7010         cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
7011         trace_seq_printf(s, "overrun: %ld\n", cnt);
7012
7013         cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
7014         trace_seq_printf(s, "commit overrun: %ld\n", cnt);
7015
7016         cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
7017         trace_seq_printf(s, "bytes: %ld\n", cnt);
7018
7019         if (trace_clocks[tr->clock_id].in_ns) {
7020                 /* local or global for trace_clock */
7021                 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7022                 usec_rem = do_div(t, USEC_PER_SEC);
7023                 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
7024                                                                 t, usec_rem);
7025
7026                 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
7027                 usec_rem = do_div(t, USEC_PER_SEC);
7028                 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7029         } else {
7030                 /* counter or tsc mode for trace_clock */
7031                 trace_seq_printf(s, "oldest event ts: %llu\n",
7032                                 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7033
7034                 trace_seq_printf(s, "now ts: %llu\n",
7035                                 ring_buffer_time_stamp(trace_buf->buffer, cpu));
7036         }
7037
7038         cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
7039         trace_seq_printf(s, "dropped events: %ld\n", cnt);
7040
7041         cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
7042         trace_seq_printf(s, "read events: %ld\n", cnt);
7043
7044         count = simple_read_from_buffer(ubuf, count, ppos,
7045                                         s->buffer, trace_seq_used(s));
7046
7047         kfree(s);
7048
7049         return count;
7050 }
7051
7052 static const struct file_operations tracing_stats_fops = {
7053         .open           = tracing_open_generic_tr,
7054         .read           = tracing_stats_read,
7055         .llseek         = generic_file_llseek,
7056         .release        = tracing_release_generic_tr,
7057 };
7058
7059 #ifdef CONFIG_DYNAMIC_FTRACE
7060
7061 static ssize_t
7062 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
7063                   size_t cnt, loff_t *ppos)
7064 {
7065         unsigned long *p = filp->private_data;
7066         char buf[64]; /* Not too big for a shallow stack */
7067         int r;
7068
7069         r = scnprintf(buf, 63, "%ld", *p);
7070         buf[r++] = '\n';
7071
7072         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7073 }
7074
7075 static const struct file_operations tracing_dyn_info_fops = {
7076         .open           = tracing_open_generic,
7077         .read           = tracing_read_dyn_info,
7078         .llseek         = generic_file_llseek,
7079 };
7080 #endif /* CONFIG_DYNAMIC_FTRACE */
7081
7082 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7083 static void
7084 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
7085                 struct trace_array *tr, struct ftrace_probe_ops *ops,
7086                 void *data)
7087 {
7088         tracing_snapshot_instance(tr);
7089 }
7090
7091 static void
7092 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
7093                       struct trace_array *tr, struct ftrace_probe_ops *ops,
7094                       void *data)
7095 {
7096         struct ftrace_func_mapper *mapper = data;
7097         long *count = NULL;
7098
7099         if (mapper)
7100                 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7101
7102         if (count) {
7103
7104                 if (*count <= 0)
7105                         return;
7106
7107                 (*count)--;
7108         }
7109
7110         tracing_snapshot_instance(tr);
7111 }
7112
7113 static int
7114 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7115                       struct ftrace_probe_ops *ops, void *data)
7116 {
7117         struct ftrace_func_mapper *mapper = data;
7118         long *count = NULL;
7119
7120         seq_printf(m, "%ps:", (void *)ip);
7121
7122         seq_puts(m, "snapshot");
7123
7124         if (mapper)
7125                 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7126
7127         if (count)
7128                 seq_printf(m, ":count=%ld\n", *count);
7129         else
7130                 seq_puts(m, ":unlimited\n");
7131
7132         return 0;
7133 }
7134
7135 static int
7136 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
7137                      unsigned long ip, void *init_data, void **data)
7138 {
7139         struct ftrace_func_mapper *mapper = *data;
7140
7141         if (!mapper) {
7142                 mapper = allocate_ftrace_func_mapper();
7143                 if (!mapper)
7144                         return -ENOMEM;
7145                 *data = mapper;
7146         }
7147
7148         return ftrace_func_mapper_add_ip(mapper, ip, init_data);
7149 }
7150
7151 static void
7152 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
7153                      unsigned long ip, void *data)
7154 {
7155         struct ftrace_func_mapper *mapper = data;
7156
7157         if (!ip) {
7158                 if (!mapper)
7159                         return;
7160                 free_ftrace_func_mapper(mapper, NULL);
7161                 return;
7162         }
7163
7164         ftrace_func_mapper_remove_ip(mapper, ip);
7165 }
7166
7167 static struct ftrace_probe_ops snapshot_probe_ops = {
7168         .func                   = ftrace_snapshot,
7169         .print                  = ftrace_snapshot_print,
7170 };
7171
7172 static struct ftrace_probe_ops snapshot_count_probe_ops = {
7173         .func                   = ftrace_count_snapshot,
7174         .print                  = ftrace_snapshot_print,
7175         .init                   = ftrace_snapshot_init,
7176         .free                   = ftrace_snapshot_free,
7177 };
7178
7179 static int
7180 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
7181                                char *glob, char *cmd, char *param, int enable)
7182 {
7183         struct ftrace_probe_ops *ops;
7184         void *count = (void *)-1;
7185         char *number;
7186         int ret;
7187
7188         if (!tr)
7189                 return -ENODEV;
7190
7191         /* hash funcs only work with set_ftrace_filter */
7192         if (!enable)
7193                 return -EINVAL;
7194
7195         ops = param ? &snapshot_count_probe_ops :  &snapshot_probe_ops;
7196
7197         if (glob[0] == '!')
7198                 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
7199
7200         if (!param)
7201                 goto out_reg;
7202
7203         number = strsep(&param, ":");
7204
7205         if (!strlen(number))
7206                 goto out_reg;
7207
7208         /*
7209          * We use the callback data field (which is a pointer)
7210          * as our counter.
7211          */
7212         ret = kstrtoul(number, 0, (unsigned long *)&count);
7213         if (ret)
7214                 return ret;
7215
7216  out_reg:
7217         ret = tracing_alloc_snapshot_instance(tr);
7218         if (ret < 0)
7219                 goto out;
7220
7221         ret = register_ftrace_function_probe(glob, tr, ops, count);
7222
7223  out:
7224         return ret < 0 ? ret : 0;
7225 }
7226
7227 static struct ftrace_func_command ftrace_snapshot_cmd = {
7228         .name                   = "snapshot",
7229         .func                   = ftrace_trace_snapshot_callback,
7230 };
7231
7232 static __init int register_snapshot_cmd(void)
7233 {
7234         return register_ftrace_command(&ftrace_snapshot_cmd);
7235 }
7236 #else
7237 static inline __init int register_snapshot_cmd(void) { return 0; }
7238 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
7239
7240 static struct dentry *tracing_get_dentry(struct trace_array *tr)
7241 {
7242         if (WARN_ON(!tr->dir))
7243                 return ERR_PTR(-ENODEV);
7244
7245         /* Top directory uses NULL as the parent */
7246         if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7247                 return NULL;
7248
7249         /* All sub buffers have a descriptor */
7250         return tr->dir;
7251 }
7252
7253 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
7254 {
7255         struct dentry *d_tracer;
7256
7257         if (tr->percpu_dir)
7258                 return tr->percpu_dir;
7259
7260         d_tracer = tracing_get_dentry(tr);
7261         if (IS_ERR(d_tracer))
7262                 return NULL;
7263
7264         tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
7265
7266         WARN_ONCE(!tr->percpu_dir,
7267                   "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
7268
7269         return tr->percpu_dir;
7270 }
7271
7272 static struct dentry *
7273 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
7274                       void *data, long cpu, const struct file_operations *fops)
7275 {
7276         struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
7277
7278         if (ret) /* See tracing_get_cpu() */
7279                 d_inode(ret)->i_cdev = (void *)(cpu + 1);
7280         return ret;
7281 }
7282
7283 static void
7284 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
7285 {
7286         struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
7287         struct dentry *d_cpu;
7288         char cpu_dir[30]; /* 30 characters should be more than enough */
7289
7290         if (!d_percpu)
7291                 return;
7292
7293         snprintf(cpu_dir, 30, "cpu%ld", cpu);
7294         d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
7295         if (!d_cpu) {
7296                 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
7297                 return;
7298         }
7299
7300         /* per cpu trace_pipe */
7301         trace_create_cpu_file("trace_pipe", 0444, d_cpu,
7302                                 tr, cpu, &tracing_pipe_fops);
7303
7304         /* per cpu trace */
7305         trace_create_cpu_file("trace", 0644, d_cpu,
7306                                 tr, cpu, &tracing_fops);
7307
7308         trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
7309                                 tr, cpu, &tracing_buffers_fops);
7310
7311         trace_create_cpu_file("stats", 0444, d_cpu,
7312                                 tr, cpu, &tracing_stats_fops);
7313
7314         trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
7315                                 tr, cpu, &tracing_entries_fops);
7316
7317 #ifdef CONFIG_TRACER_SNAPSHOT
7318         trace_create_cpu_file("snapshot", 0644, d_cpu,
7319                                 tr, cpu, &snapshot_fops);
7320
7321         trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
7322                                 tr, cpu, &snapshot_raw_fops);
7323 #endif
7324 }
7325
7326 #ifdef CONFIG_FTRACE_SELFTEST
7327 /* Let selftest have access to static functions in this file */
7328 #include "trace_selftest.c"
7329 #endif
7330
7331 static ssize_t
7332 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
7333                         loff_t *ppos)
7334 {
7335         struct trace_option_dentry *topt = filp->private_data;
7336         char *buf;
7337
7338         if (topt->flags->val & topt->opt->bit)
7339                 buf = "1\n";
7340         else
7341                 buf = "0\n";
7342
7343         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7344 }
7345
7346 static ssize_t
7347 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
7348                          loff_t *ppos)
7349 {
7350         struct trace_option_dentry *topt = filp->private_data;
7351         unsigned long val;
7352         int ret;
7353
7354         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7355         if (ret)
7356                 return ret;
7357
7358         if (val != 0 && val != 1)
7359                 return -EINVAL;
7360
7361         if (!!(topt->flags->val & topt->opt->bit) != val) {
7362                 mutex_lock(&trace_types_lock);
7363                 ret = __set_tracer_option(topt->tr, topt->flags,
7364                                           topt->opt, !val);
7365                 mutex_unlock(&trace_types_lock);
7366                 if (ret)
7367                         return ret;
7368         }
7369
7370         *ppos += cnt;
7371
7372         return cnt;
7373 }
7374
7375
7376 static const struct file_operations trace_options_fops = {
7377         .open = tracing_open_generic,
7378         .read = trace_options_read,
7379         .write = trace_options_write,
7380         .llseek = generic_file_llseek,
7381 };
7382
7383 /*
7384  * In order to pass in both the trace_array descriptor as well as the index
7385  * to the flag that the trace option file represents, the trace_array
7386  * has a character array of trace_flags_index[], which holds the index
7387  * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7388  * The address of this character array is passed to the flag option file
7389  * read/write callbacks.
7390  *
7391  * In order to extract both the index and the trace_array descriptor,
7392  * get_tr_index() uses the following algorithm.
7393  *
7394  *   idx = *ptr;
7395  *
7396  * As the pointer itself contains the address of the index (remember
7397  * index[1] == 1).
7398  *
7399  * Then to get the trace_array descriptor, by subtracting that index
7400  * from the ptr, we get to the start of the index itself.
7401  *
7402  *   ptr - idx == &index[0]
7403  *
7404  * Then a simple container_of() from that pointer gets us to the
7405  * trace_array descriptor.
7406  */
7407 static void get_tr_index(void *data, struct trace_array **ptr,
7408                          unsigned int *pindex)
7409 {
7410         *pindex = *(unsigned char *)data;
7411
7412         *ptr = container_of(data - *pindex, struct trace_array,
7413                             trace_flags_index);
7414 }
7415
7416 static ssize_t
7417 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7418                         loff_t *ppos)
7419 {
7420         void *tr_index = filp->private_data;
7421         struct trace_array *tr;
7422         unsigned int index;
7423         char *buf;
7424
7425         get_tr_index(tr_index, &tr, &index);
7426
7427         if (tr->trace_flags & (1 << index))
7428                 buf = "1\n";
7429         else
7430                 buf = "0\n";
7431
7432         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7433 }
7434
7435 static ssize_t
7436 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7437                          loff_t *ppos)
7438 {
7439         void *tr_index = filp->private_data;
7440         struct trace_array *tr;
7441         unsigned int index;
7442         unsigned long val;
7443         int ret;
7444
7445         get_tr_index(tr_index, &tr, &index);
7446
7447         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7448         if (ret)
7449                 return ret;
7450
7451         if (val != 0 && val != 1)
7452                 return -EINVAL;
7453
7454         mutex_lock(&trace_types_lock);
7455         ret = set_tracer_flag(tr, 1 << index, val);
7456         mutex_unlock(&trace_types_lock);
7457
7458         if (ret < 0)
7459                 return ret;
7460
7461         *ppos += cnt;
7462
7463         return cnt;
7464 }
7465
7466 static const struct file_operations trace_options_core_fops = {
7467         .open = tracing_open_generic,
7468         .read = trace_options_core_read,
7469         .write = trace_options_core_write,
7470         .llseek = generic_file_llseek,
7471 };
7472
7473 struct dentry *trace_create_file(const char *name,
7474                                  umode_t mode,
7475                                  struct dentry *parent,
7476                                  void *data,
7477                                  const struct file_operations *fops)
7478 {
7479         struct dentry *ret;
7480
7481         ret = tracefs_create_file(name, mode, parent, data, fops);
7482         if (!ret)
7483                 pr_warn("Could not create tracefs '%s' entry\n", name);
7484
7485         return ret;
7486 }
7487
7488
7489 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
7490 {
7491         struct dentry *d_tracer;
7492
7493         if (tr->options)
7494                 return tr->options;
7495
7496         d_tracer = tracing_get_dentry(tr);
7497         if (IS_ERR(d_tracer))
7498                 return NULL;
7499
7500         tr->options = tracefs_create_dir("options", d_tracer);
7501         if (!tr->options) {
7502                 pr_warn("Could not create tracefs directory 'options'\n");
7503                 return NULL;
7504         }
7505
7506         return tr->options;
7507 }
7508
7509 static void
7510 create_trace_option_file(struct trace_array *tr,
7511                          struct trace_option_dentry *topt,
7512                          struct tracer_flags *flags,
7513                          struct tracer_opt *opt)
7514 {
7515         struct dentry *t_options;
7516
7517         t_options = trace_options_init_dentry(tr);
7518         if (!t_options)
7519                 return;
7520
7521         topt->flags = flags;
7522         topt->opt = opt;
7523         topt->tr = tr;
7524
7525         topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
7526                                     &trace_options_fops);
7527
7528 }
7529
7530 static void
7531 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
7532 {
7533         struct trace_option_dentry *topts;
7534         struct trace_options *tr_topts;
7535         struct tracer_flags *flags;
7536         struct tracer_opt *opts;
7537         int cnt;
7538         int i;
7539
7540         if (!tracer)
7541                 return;
7542
7543         flags = tracer->flags;
7544
7545         if (!flags || !flags->opts)
7546                 return;
7547
7548         /*
7549          * If this is an instance, only create flags for tracers
7550          * the instance may have.
7551          */
7552         if (!trace_ok_for_array(tracer, tr))
7553                 return;
7554
7555         for (i = 0; i < tr->nr_topts; i++) {
7556                 /* Make sure there's no duplicate flags. */
7557                 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
7558                         return;
7559         }
7560
7561         opts = flags->opts;
7562
7563         for (cnt = 0; opts[cnt].name; cnt++)
7564                 ;
7565
7566         topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
7567         if (!topts)
7568                 return;
7569
7570         tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
7571                             GFP_KERNEL);
7572         if (!tr_topts) {
7573                 kfree(topts);
7574                 return;
7575         }
7576
7577         tr->topts = tr_topts;
7578         tr->topts[tr->nr_topts].tracer = tracer;
7579         tr->topts[tr->nr_topts].topts = topts;
7580         tr->nr_topts++;
7581
7582         for (cnt = 0; opts[cnt].name; cnt++) {
7583                 create_trace_option_file(tr, &topts[cnt], flags,
7584                                          &opts[cnt]);
7585                 WARN_ONCE(topts[cnt].entry == NULL,
7586                           "Failed to create trace option: %s",
7587                           opts[cnt].name);
7588         }
7589 }
7590
7591 static struct dentry *
7592 create_trace_option_core_file(struct trace_array *tr,
7593                               const char *option, long index)
7594 {
7595         struct dentry *t_options;
7596
7597         t_options = trace_options_init_dentry(tr);
7598         if (!t_options)
7599                 return NULL;
7600
7601         return trace_create_file(option, 0644, t_options,
7602                                  (void *)&tr->trace_flags_index[index],
7603                                  &trace_options_core_fops);
7604 }
7605
7606 static void create_trace_options_dir(struct trace_array *tr)
7607 {
7608         struct dentry *t_options;
7609         bool top_level = tr == &global_trace;
7610         int i;
7611
7612         t_options = trace_options_init_dentry(tr);
7613         if (!t_options)
7614                 return;
7615
7616         for (i = 0; trace_options[i]; i++) {
7617                 if (top_level ||
7618                     !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
7619                         create_trace_option_core_file(tr, trace_options[i], i);
7620         }
7621 }
7622
7623 static ssize_t
7624 rb_simple_read(struct file *filp, char __user *ubuf,
7625                size_t cnt, loff_t *ppos)
7626 {
7627         struct trace_array *tr = filp->private_data;
7628         char buf[64];
7629         int r;
7630
7631         r = tracer_tracing_is_on(tr);
7632         r = sprintf(buf, "%d\n", r);
7633
7634         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7635 }
7636
7637 static ssize_t
7638 rb_simple_write(struct file *filp, const char __user *ubuf,
7639                 size_t cnt, loff_t *ppos)
7640 {
7641         struct trace_array *tr = filp->private_data;
7642         struct ring_buffer *buffer = tr->trace_buffer.buffer;
7643         unsigned long val;
7644         int ret;
7645
7646         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7647         if (ret)
7648                 return ret;
7649
7650         if (buffer) {
7651                 mutex_lock(&trace_types_lock);
7652                 if (!!val == tracer_tracing_is_on(tr)) {
7653                         val = 0; /* do nothing */
7654                 } else if (val) {
7655                         tracer_tracing_on(tr);
7656                         if (tr->current_trace->start)
7657                                 tr->current_trace->start(tr);
7658                 } else {
7659                         tracer_tracing_off(tr);
7660                         if (tr->current_trace->stop)
7661                                 tr->current_trace->stop(tr);
7662                 }
7663                 mutex_unlock(&trace_types_lock);
7664         }
7665
7666         (*ppos)++;
7667
7668         return cnt;
7669 }
7670
7671 static const struct file_operations rb_simple_fops = {
7672         .open           = tracing_open_generic_tr,
7673         .read           = rb_simple_read,
7674         .write          = rb_simple_write,
7675         .release        = tracing_release_generic_tr,
7676         .llseek         = default_llseek,
7677 };
7678
7679 static ssize_t
7680 buffer_percent_read(struct file *filp, char __user *ubuf,
7681                     size_t cnt, loff_t *ppos)
7682 {
7683         struct trace_array *tr = filp->private_data;
7684         char buf[64];
7685         int r;
7686
7687         r = tr->buffer_percent;
7688         r = sprintf(buf, "%d\n", r);
7689
7690         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7691 }
7692
7693 static ssize_t
7694 buffer_percent_write(struct file *filp, const char __user *ubuf,
7695                      size_t cnt, loff_t *ppos)
7696 {
7697         struct trace_array *tr = filp->private_data;
7698         unsigned long val;
7699         int ret;
7700
7701         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7702         if (ret)
7703                 return ret;
7704
7705         if (val > 100)
7706                 return -EINVAL;
7707
7708         if (!val)
7709                 val = 1;
7710
7711         tr->buffer_percent = val;
7712
7713         (*ppos)++;
7714
7715         return cnt;
7716 }
7717
7718 static const struct file_operations buffer_percent_fops = {
7719         .open           = tracing_open_generic_tr,
7720         .read           = buffer_percent_read,
7721         .write          = buffer_percent_write,
7722         .release        = tracing_release_generic_tr,
7723         .llseek         = default_llseek,
7724 };
7725
7726 struct dentry *trace_instance_dir;
7727
7728 static void
7729 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
7730
7731 static int
7732 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
7733 {
7734         enum ring_buffer_flags rb_flags;
7735
7736         rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
7737
7738         buf->tr = tr;
7739
7740         buf->buffer = ring_buffer_alloc(size, rb_flags);
7741         if (!buf->buffer)
7742                 return -ENOMEM;
7743
7744         buf->data = alloc_percpu(struct trace_array_cpu);
7745         if (!buf->data) {
7746                 ring_buffer_free(buf->buffer);
7747                 buf->buffer = NULL;
7748                 return -ENOMEM;
7749         }
7750
7751         /* Allocate the first page for all buffers */
7752         set_buffer_entries(&tr->trace_buffer,
7753                            ring_buffer_size(tr->trace_buffer.buffer, 0));
7754
7755         return 0;
7756 }
7757
7758 static int allocate_trace_buffers(struct trace_array *tr, int size)
7759 {
7760         int ret;
7761
7762         ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
7763         if (ret)
7764                 return ret;
7765
7766 #ifdef CONFIG_TRACER_MAX_TRACE
7767         ret = allocate_trace_buffer(tr, &tr->max_buffer,
7768                                     allocate_snapshot ? size : 1);
7769         if (WARN_ON(ret)) {
7770                 ring_buffer_free(tr->trace_buffer.buffer);
7771                 tr->trace_buffer.buffer = NULL;
7772                 free_percpu(tr->trace_buffer.data);
7773                 tr->trace_buffer.data = NULL;
7774                 return -ENOMEM;
7775         }
7776         tr->allocated_snapshot = allocate_snapshot;
7777
7778         /*
7779          * Only the top level trace array gets its snapshot allocated
7780          * from the kernel command line.
7781          */
7782         allocate_snapshot = false;
7783 #endif
7784         return 0;
7785 }
7786
7787 static void free_trace_buffer(struct trace_buffer *buf)
7788 {
7789         if (buf->buffer) {
7790                 ring_buffer_free(buf->buffer);
7791                 buf->buffer = NULL;
7792                 free_percpu(buf->data);
7793                 buf->data = NULL;
7794         }
7795 }
7796
7797 static void free_trace_buffers(struct trace_array *tr)
7798 {
7799         if (!tr)
7800                 return;
7801
7802         free_trace_buffer(&tr->trace_buffer);
7803
7804 #ifdef CONFIG_TRACER_MAX_TRACE
7805         free_trace_buffer(&tr->max_buffer);
7806 #endif
7807 }
7808
7809 static void init_trace_flags_index(struct trace_array *tr)
7810 {
7811         int i;
7812
7813         /* Used by the trace options files */
7814         for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
7815                 tr->trace_flags_index[i] = i;
7816 }
7817
7818 static void __update_tracer_options(struct trace_array *tr)
7819 {
7820         struct tracer *t;
7821
7822         for (t = trace_types; t; t = t->next)
7823                 add_tracer_options(tr, t);
7824 }
7825
7826 static void update_tracer_options(struct trace_array *tr)
7827 {
7828         mutex_lock(&trace_types_lock);
7829         __update_tracer_options(tr);
7830         mutex_unlock(&trace_types_lock);
7831 }
7832
7833 static int instance_mkdir(const char *name)
7834 {
7835         struct trace_array *tr;
7836         int ret;
7837
7838         mutex_lock(&event_mutex);
7839         mutex_lock(&trace_types_lock);
7840
7841         ret = -EEXIST;
7842         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7843                 if (tr->name && strcmp(tr->name, name) == 0)
7844                         goto out_unlock;
7845         }
7846
7847         ret = -ENOMEM;
7848         tr = kzalloc(sizeof(*tr), GFP_KERNEL);
7849         if (!tr)
7850                 goto out_unlock;
7851
7852         tr->name = kstrdup(name, GFP_KERNEL);
7853         if (!tr->name)
7854                 goto out_free_tr;
7855
7856         if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
7857                 goto out_free_tr;
7858
7859         tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
7860
7861         cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
7862
7863         raw_spin_lock_init(&tr->start_lock);
7864
7865         tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7866
7867         tr->current_trace = &nop_trace;
7868
7869         INIT_LIST_HEAD(&tr->systems);
7870         INIT_LIST_HEAD(&tr->events);
7871         INIT_LIST_HEAD(&tr->hist_vars);
7872
7873         if (allocate_trace_buffers(tr, trace_buf_size) < 0)
7874                 goto out_free_tr;
7875
7876         tr->dir = tracefs_create_dir(name, trace_instance_dir);
7877         if (!tr->dir)
7878                 goto out_free_tr;
7879
7880         ret = event_trace_add_tracer(tr->dir, tr);
7881         if (ret) {
7882                 tracefs_remove_recursive(tr->dir);
7883                 goto out_free_tr;
7884         }
7885
7886         ftrace_init_trace_array(tr);
7887
7888         init_tracer_tracefs(tr, tr->dir);
7889         init_trace_flags_index(tr);
7890         __update_tracer_options(tr);
7891
7892         list_add(&tr->list, &ftrace_trace_arrays);
7893
7894         mutex_unlock(&trace_types_lock);
7895         mutex_unlock(&event_mutex);
7896
7897         return 0;
7898
7899  out_free_tr:
7900         free_trace_buffers(tr);
7901         free_cpumask_var(tr->tracing_cpumask);
7902         kfree(tr->name);
7903         kfree(tr);
7904
7905  out_unlock:
7906         mutex_unlock(&trace_types_lock);
7907         mutex_unlock(&event_mutex);
7908
7909         return ret;
7910
7911 }
7912
7913 static int instance_rmdir(const char *name)
7914 {
7915         struct trace_array *tr;
7916         int found = 0;
7917         int ret;
7918         int i;
7919
7920         mutex_lock(&event_mutex);
7921         mutex_lock(&trace_types_lock);
7922
7923         ret = -ENODEV;
7924         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7925                 if (tr->name && strcmp(tr->name, name) == 0) {
7926                         found = 1;
7927                         break;
7928                 }
7929         }
7930         if (!found)
7931                 goto out_unlock;
7932
7933         ret = -EBUSY;
7934         if (tr->ref || (tr->current_trace && tr->current_trace->ref))
7935                 goto out_unlock;
7936
7937         list_del(&tr->list);
7938
7939         /* Disable all the flags that were enabled coming in */
7940         for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
7941                 if ((1 << i) & ZEROED_TRACE_FLAGS)
7942                         set_tracer_flag(tr, 1 << i, 0);
7943         }
7944
7945         tracing_set_nop(tr);
7946         clear_ftrace_function_probes(tr);
7947         event_trace_del_tracer(tr);
7948         ftrace_clear_pids(tr);
7949         ftrace_destroy_function_files(tr);
7950         tracefs_remove_recursive(tr->dir);
7951         free_trace_buffers(tr);
7952
7953         for (i = 0; i < tr->nr_topts; i++) {
7954                 kfree(tr->topts[i].topts);
7955         }
7956         kfree(tr->topts);
7957
7958         free_cpumask_var(tr->tracing_cpumask);
7959         kfree(tr->name);
7960         kfree(tr);
7961
7962         ret = 0;
7963
7964  out_unlock:
7965         mutex_unlock(&trace_types_lock);
7966         mutex_unlock(&event_mutex);
7967
7968         return ret;
7969 }
7970
7971 static __init void create_trace_instances(struct dentry *d_tracer)
7972 {
7973         trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
7974                                                          instance_mkdir,
7975                                                          instance_rmdir);
7976         if (WARN_ON(!trace_instance_dir))
7977                 return;
7978 }
7979
7980 static void
7981 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
7982 {
7983         struct trace_event_file *file;
7984         int cpu;
7985
7986         trace_create_file("available_tracers", 0444, d_tracer,
7987                         tr, &show_traces_fops);
7988
7989         trace_create_file("current_tracer", 0644, d_tracer,
7990                         tr, &set_tracer_fops);
7991
7992         trace_create_file("tracing_cpumask", 0644, d_tracer,
7993                           tr, &tracing_cpumask_fops);
7994
7995         trace_create_file("trace_options", 0644, d_tracer,
7996                           tr, &tracing_iter_fops);
7997
7998         trace_create_file("trace", 0644, d_tracer,
7999                           tr, &tracing_fops);
8000
8001         trace_create_file("trace_pipe", 0444, d_tracer,
8002                           tr, &tracing_pipe_fops);
8003
8004         trace_create_file("buffer_size_kb", 0644, d_tracer,
8005                           tr, &tracing_entries_fops);
8006
8007         trace_create_file("buffer_total_size_kb", 0444, d_tracer,
8008                           tr, &tracing_total_entries_fops);
8009
8010         trace_create_file("free_buffer", 0200, d_tracer,
8011                           tr, &tracing_free_buffer_fops);
8012
8013         trace_create_file("trace_marker", 0220, d_tracer,
8014                           tr, &tracing_mark_fops);
8015
8016         file = __find_event_file(tr, "ftrace", "print");
8017         if (file && file->dir)
8018                 trace_create_file("trigger", 0644, file->dir, file,
8019                                   &event_trigger_fops);
8020         tr->trace_marker_file = file;
8021
8022         trace_create_file("trace_marker_raw", 0220, d_tracer,
8023                           tr, &tracing_mark_raw_fops);
8024
8025         trace_create_file("trace_clock", 0644, d_tracer, tr,
8026                           &trace_clock_fops);
8027
8028         trace_create_file("tracing_on", 0644, d_tracer,
8029                           tr, &rb_simple_fops);
8030
8031         trace_create_file("timestamp_mode", 0444, d_tracer, tr,
8032                           &trace_time_stamp_mode_fops);
8033
8034         tr->buffer_percent = 50;
8035
8036         trace_create_file("buffer_percent", 0444, d_tracer,
8037                         tr, &buffer_percent_fops);
8038
8039         create_trace_options_dir(tr);
8040
8041 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
8042         trace_create_file("tracing_max_latency", 0644, d_tracer,
8043                         &tr->max_latency, &tracing_max_lat_fops);
8044 #endif
8045
8046         if (ftrace_create_function_files(tr, d_tracer))
8047                 WARN(1, "Could not allocate function filter files");
8048
8049 #ifdef CONFIG_TRACER_SNAPSHOT
8050         trace_create_file("snapshot", 0644, d_tracer,
8051                           tr, &snapshot_fops);
8052 #endif
8053
8054         for_each_tracing_cpu(cpu)
8055                 tracing_init_tracefs_percpu(tr, cpu);
8056
8057         ftrace_init_tracefs(tr, d_tracer);
8058 }
8059
8060 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
8061 {
8062         struct vfsmount *mnt;
8063         struct file_system_type *type;
8064
8065         /*
8066          * To maintain backward compatibility for tools that mount
8067          * debugfs to get to the tracing facility, tracefs is automatically
8068          * mounted to the debugfs/tracing directory.
8069          */
8070         type = get_fs_type("tracefs");
8071         if (!type)
8072                 return NULL;
8073         mnt = vfs_submount(mntpt, type, "tracefs", NULL);
8074         put_filesystem(type);
8075         if (IS_ERR(mnt))
8076                 return NULL;
8077         mntget(mnt);
8078
8079         return mnt;
8080 }
8081
8082 /**
8083  * tracing_init_dentry - initialize top level trace array
8084  *
8085  * This is called when creating files or directories in the tracing
8086  * directory. It is called via fs_initcall() by any of the boot up code
8087  * and expects to return the dentry of the top level tracing directory.
8088  */
8089 struct dentry *tracing_init_dentry(void)
8090 {
8091         struct trace_array *tr = &global_trace;
8092
8093         /* The top level trace array uses  NULL as parent */
8094         if (tr->dir)
8095                 return NULL;
8096
8097         if (WARN_ON(!tracefs_initialized()) ||
8098                 (IS_ENABLED(CONFIG_DEBUG_FS) &&
8099                  WARN_ON(!debugfs_initialized())))
8100                 return ERR_PTR(-ENODEV);
8101
8102         /*
8103          * As there may still be users that expect the tracing
8104          * files to exist in debugfs/tracing, we must automount
8105          * the tracefs file system there, so older tools still
8106          * work with the newer kerenl.
8107          */
8108         tr->dir = debugfs_create_automount("tracing", NULL,
8109                                            trace_automount, NULL);
8110         if (!tr->dir) {
8111                 pr_warn_once("Could not create debugfs directory 'tracing'\n");
8112                 return ERR_PTR(-ENOMEM);
8113         }
8114
8115         return NULL;
8116 }
8117
8118 extern struct trace_eval_map *__start_ftrace_eval_maps[];
8119 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
8120
8121 static void __init trace_eval_init(void)
8122 {
8123         int len;
8124
8125         len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
8126         trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
8127 }
8128
8129 #ifdef CONFIG_MODULES
8130 static void trace_module_add_evals(struct module *mod)
8131 {
8132         if (!mod->num_trace_evals)
8133                 return;
8134
8135         /*
8136          * Modules with bad taint do not have events created, do
8137          * not bother with enums either.
8138          */
8139         if (trace_module_has_bad_taint(mod))
8140                 return;
8141
8142         trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
8143 }
8144
8145 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
8146 static void trace_module_remove_evals(struct module *mod)
8147 {
8148         union trace_eval_map_item *map;
8149         union trace_eval_map_item **last = &trace_eval_maps;
8150
8151         if (!mod->num_trace_evals)
8152                 return;
8153
8154         mutex_lock(&trace_eval_mutex);
8155
8156         map = trace_eval_maps;
8157
8158         while (map) {
8159                 if (map->head.mod == mod)
8160                         break;
8161                 map = trace_eval_jmp_to_tail(map);
8162                 last = &map->tail.next;
8163                 map = map->tail.next;
8164         }
8165         if (!map)
8166                 goto out;
8167
8168         *last = trace_eval_jmp_to_tail(map)->tail.next;
8169         kfree(map);
8170  out:
8171         mutex_unlock(&trace_eval_mutex);
8172 }
8173 #else
8174 static inline void trace_module_remove_evals(struct module *mod) { }
8175 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
8176
8177 static int trace_module_notify(struct notifier_block *self,
8178                                unsigned long val, void *data)
8179 {
8180         struct module *mod = data;
8181
8182         switch (val) {
8183         case MODULE_STATE_COMING:
8184                 trace_module_add_evals(mod);
8185                 break;
8186         case MODULE_STATE_GOING:
8187                 trace_module_remove_evals(mod);
8188                 break;
8189         }
8190
8191         return 0;
8192 }
8193
8194 static struct notifier_block trace_module_nb = {
8195         .notifier_call = trace_module_notify,
8196         .priority = 0,
8197 };
8198 #endif /* CONFIG_MODULES */
8199
8200 static __init int tracer_init_tracefs(void)
8201 {
8202         struct dentry *d_tracer;
8203
8204         trace_access_lock_init();
8205
8206         d_tracer = tracing_init_dentry();
8207         if (IS_ERR(d_tracer))
8208                 return 0;
8209
8210         event_trace_init();
8211
8212         init_tracer_tracefs(&global_trace, d_tracer);
8213         ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
8214
8215         trace_create_file("tracing_thresh", 0644, d_tracer,
8216                         &global_trace, &tracing_thresh_fops);
8217
8218         trace_create_file("README", 0444, d_tracer,
8219                         NULL, &tracing_readme_fops);
8220
8221         trace_create_file("saved_cmdlines", 0444, d_tracer,
8222                         NULL, &tracing_saved_cmdlines_fops);
8223
8224         trace_create_file("saved_cmdlines_size", 0644, d_tracer,
8225                           NULL, &tracing_saved_cmdlines_size_fops);
8226
8227         trace_create_file("saved_tgids", 0444, d_tracer,
8228                         NULL, &tracing_saved_tgids_fops);
8229
8230         trace_eval_init();
8231
8232         trace_create_eval_file(d_tracer);
8233
8234 #ifdef CONFIG_MODULES
8235         register_module_notifier(&trace_module_nb);
8236 #endif
8237
8238 #ifdef CONFIG_DYNAMIC_FTRACE
8239         trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
8240                         &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
8241 #endif
8242
8243         create_trace_instances(d_tracer);
8244
8245         update_tracer_options(&global_trace);
8246
8247         return 0;
8248 }
8249
8250 static int trace_panic_handler(struct notifier_block *this,
8251                                unsigned long event, void *unused)
8252 {
8253         if (ftrace_dump_on_oops)
8254                 ftrace_dump(ftrace_dump_on_oops);
8255         return NOTIFY_OK;
8256 }
8257
8258 static struct notifier_block trace_panic_notifier = {
8259         .notifier_call  = trace_panic_handler,
8260         .next           = NULL,
8261         .priority       = 150   /* priority: INT_MAX >= x >= 0 */
8262 };
8263
8264 static int trace_die_handler(struct notifier_block *self,
8265                              unsigned long val,
8266                              void *data)
8267 {
8268         switch (val) {
8269         case DIE_OOPS:
8270                 if (ftrace_dump_on_oops)
8271                         ftrace_dump(ftrace_dump_on_oops);
8272                 break;
8273         default:
8274                 break;
8275         }
8276         return NOTIFY_OK;
8277 }
8278
8279 static struct notifier_block trace_die_notifier = {
8280         .notifier_call = trace_die_handler,
8281         .priority = 200
8282 };
8283
8284 /*
8285  * printk is set to max of 1024, we really don't need it that big.
8286  * Nothing should be printing 1000 characters anyway.
8287  */
8288 #define TRACE_MAX_PRINT         1000
8289
8290 /*
8291  * Define here KERN_TRACE so that we have one place to modify
8292  * it if we decide to change what log level the ftrace dump
8293  * should be at.
8294  */
8295 #define KERN_TRACE              KERN_EMERG
8296
8297 void
8298 trace_printk_seq(struct trace_seq *s)
8299 {
8300         /* Probably should print a warning here. */
8301         if (s->seq.len >= TRACE_MAX_PRINT)
8302                 s->seq.len = TRACE_MAX_PRINT;
8303
8304         /*
8305          * More paranoid code. Although the buffer size is set to
8306          * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8307          * an extra layer of protection.
8308          */
8309         if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
8310                 s->seq.len = s->seq.size - 1;
8311
8312         /* should be zero ended, but we are paranoid. */
8313         s->buffer[s->seq.len] = 0;
8314
8315         printk(KERN_TRACE "%s", s->buffer);
8316
8317         trace_seq_init(s);
8318 }
8319
8320 void trace_init_global_iter(struct trace_iterator *iter)
8321 {
8322         iter->tr = &global_trace;
8323         iter->trace = iter->tr->current_trace;
8324         iter->cpu_file = RING_BUFFER_ALL_CPUS;
8325         iter->trace_buffer = &global_trace.trace_buffer;
8326
8327         if (iter->trace && iter->trace->open)
8328                 iter->trace->open(iter);
8329
8330         /* Annotate start of buffers if we had overruns */
8331         if (ring_buffer_overruns(iter->trace_buffer->buffer))
8332                 iter->iter_flags |= TRACE_FILE_ANNOTATE;
8333
8334         /* Output in nanoseconds only if we are using a clock in nanoseconds. */
8335         if (trace_clocks[iter->tr->clock_id].in_ns)
8336                 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
8337 }
8338
8339 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
8340 {
8341         /* use static because iter can be a bit big for the stack */
8342         static struct trace_iterator iter;
8343         static atomic_t dump_running;
8344         struct trace_array *tr = &global_trace;
8345         unsigned int old_userobj;
8346         unsigned long flags;
8347         int cnt = 0, cpu;
8348
8349         /* Only allow one dump user at a time. */
8350         if (atomic_inc_return(&dump_running) != 1) {
8351                 atomic_dec(&dump_running);
8352                 return;
8353         }
8354
8355         /*
8356          * Always turn off tracing when we dump.
8357          * We don't need to show trace output of what happens
8358          * between multiple crashes.
8359          *
8360          * If the user does a sysrq-z, then they can re-enable
8361          * tracing with echo 1 > tracing_on.
8362          */
8363         tracing_off();
8364
8365         local_irq_save(flags);
8366         printk_nmi_direct_enter();
8367
8368         /* Simulate the iterator */
8369         trace_init_global_iter(&iter);
8370
8371         for_each_tracing_cpu(cpu) {
8372                 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
8373         }
8374
8375         old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
8376
8377         /* don't look at user memory in panic mode */
8378         tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
8379
8380         switch (oops_dump_mode) {
8381         case DUMP_ALL:
8382                 iter.cpu_file = RING_BUFFER_ALL_CPUS;
8383                 break;
8384         case DUMP_ORIG:
8385                 iter.cpu_file = raw_smp_processor_id();
8386                 break;
8387         case DUMP_NONE:
8388                 goto out_enable;
8389         default:
8390                 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
8391                 iter.cpu_file = RING_BUFFER_ALL_CPUS;
8392         }
8393
8394         printk(KERN_TRACE "Dumping ftrace buffer:\n");
8395
8396         /* Did function tracer already get disabled? */
8397         if (ftrace_is_dead()) {
8398                 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8399                 printk("#          MAY BE MISSING FUNCTION EVENTS\n");
8400         }
8401
8402         /*
8403          * We need to stop all tracing on all CPUS to read the
8404          * the next buffer. This is a bit expensive, but is
8405          * not done often. We fill all what we can read,
8406          * and then release the locks again.
8407          */
8408
8409         while (!trace_empty(&iter)) {
8410
8411                 if (!cnt)
8412                         printk(KERN_TRACE "---------------------------------\n");
8413
8414                 cnt++;
8415
8416                 /* reset all but tr, trace, and overruns */
8417                 memset(&iter.seq, 0,
8418                        sizeof(struct trace_iterator) -
8419                        offsetof(struct trace_iterator, seq));
8420                 iter.iter_flags |= TRACE_FILE_LAT_FMT;
8421                 iter.pos = -1;
8422
8423                 if (trace_find_next_entry_inc(&iter) != NULL) {
8424                         int ret;
8425
8426                         ret = print_trace_line(&iter);
8427                         if (ret != TRACE_TYPE_NO_CONSUME)
8428                                 trace_consume(&iter);
8429                 }
8430                 touch_nmi_watchdog();
8431
8432                 trace_printk_seq(&iter.seq);
8433         }
8434
8435         if (!cnt)
8436                 printk(KERN_TRACE "   (ftrace buffer empty)\n");
8437         else
8438                 printk(KERN_TRACE "---------------------------------\n");
8439
8440  out_enable:
8441         tr->trace_flags |= old_userobj;
8442
8443         for_each_tracing_cpu(cpu) {
8444                 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
8445         }
8446         atomic_dec(&dump_running);
8447         printk_nmi_direct_exit();
8448         local_irq_restore(flags);
8449 }
8450 EXPORT_SYMBOL_GPL(ftrace_dump);
8451
8452 int trace_run_command(const char *buf, int (*createfn)(int, char **))
8453 {
8454         char **argv;
8455         int argc, ret;
8456
8457         argc = 0;
8458         ret = 0;
8459         argv = argv_split(GFP_KERNEL, buf, &argc);
8460         if (!argv)
8461                 return -ENOMEM;
8462
8463         if (argc)
8464                 ret = createfn(argc, argv);
8465
8466         argv_free(argv);
8467
8468         return ret;
8469 }
8470
8471 #define WRITE_BUFSIZE  4096
8472
8473 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
8474                                 size_t count, loff_t *ppos,
8475                                 int (*createfn)(int, char **))
8476 {
8477         char *kbuf, *buf, *tmp;
8478         int ret = 0;
8479         size_t done = 0;
8480         size_t size;
8481
8482         kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
8483         if (!kbuf)
8484                 return -ENOMEM;
8485
8486         while (done < count) {
8487                 size = count - done;
8488
8489                 if (size >= WRITE_BUFSIZE)
8490                         size = WRITE_BUFSIZE - 1;
8491
8492                 if (copy_from_user(kbuf, buffer + done, size)) {
8493                         ret = -EFAULT;
8494                         goto out;
8495                 }
8496                 kbuf[size] = '\0';
8497                 buf = kbuf;
8498                 do {
8499                         tmp = strchr(buf, '\n');
8500                         if (tmp) {
8501                                 *tmp = '\0';
8502                                 size = tmp - buf + 1;
8503                         } else {
8504                                 size = strlen(buf);
8505                                 if (done + size < count) {
8506                                         if (buf != kbuf)
8507                                                 break;
8508                                         /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
8509                                         pr_warn("Line length is too long: Should be less than %d\n",
8510                                                 WRITE_BUFSIZE - 2);
8511                                         ret = -EINVAL;
8512                                         goto out;
8513                                 }
8514                         }
8515                         done += size;
8516
8517                         /* Remove comments */
8518                         tmp = strchr(buf, '#');
8519
8520                         if (tmp)
8521                                 *tmp = '\0';
8522
8523                         ret = trace_run_command(buf, createfn);
8524                         if (ret)
8525                                 goto out;
8526                         buf += size;
8527
8528                 } while (done < count);
8529         }
8530         ret = done;
8531
8532 out:
8533         kfree(kbuf);
8534
8535         return ret;
8536 }
8537
8538 __init static int tracer_alloc_buffers(void)
8539 {
8540         int ring_buf_size;
8541         int ret = -ENOMEM;
8542
8543         /*
8544          * Make sure we don't accidently add more trace options
8545          * than we have bits for.
8546          */
8547         BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
8548
8549         if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
8550                 goto out;
8551
8552         if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
8553                 goto out_free_buffer_mask;
8554
8555         /* Only allocate trace_printk buffers if a trace_printk exists */
8556         if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
8557                 /* Must be called before global_trace.buffer is allocated */
8558                 trace_printk_init_buffers();
8559
8560         /* To save memory, keep the ring buffer size to its minimum */
8561         if (ring_buffer_expanded)
8562                 ring_buf_size = trace_buf_size;
8563         else
8564                 ring_buf_size = 1;
8565
8566         cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
8567         cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
8568
8569         raw_spin_lock_init(&global_trace.start_lock);
8570
8571         /*
8572          * The prepare callbacks allocates some memory for the ring buffer. We
8573          * don't free the buffer if the if the CPU goes down. If we were to free
8574          * the buffer, then the user would lose any trace that was in the
8575          * buffer. The memory will be removed once the "instance" is removed.
8576          */
8577         ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
8578                                       "trace/RB:preapre", trace_rb_cpu_prepare,
8579                                       NULL);
8580         if (ret < 0)
8581                 goto out_free_cpumask;
8582         /* Used for event triggers */
8583         ret = -ENOMEM;
8584         temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
8585         if (!temp_buffer)
8586                 goto out_rm_hp_state;
8587
8588         if (trace_create_savedcmd() < 0)
8589                 goto out_free_temp_buffer;
8590
8591         /* TODO: make the number of buffers hot pluggable with CPUS */
8592         if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
8593                 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
8594                 WARN_ON(1);
8595                 goto out_free_savedcmd;
8596         }
8597
8598         if (global_trace.buffer_disabled)
8599                 tracing_off();
8600
8601         if (trace_boot_clock) {
8602                 ret = tracing_set_clock(&global_trace, trace_boot_clock);
8603                 if (ret < 0)
8604                         pr_warn("Trace clock %s not defined, going back to default\n",
8605                                 trace_boot_clock);
8606         }
8607
8608         /*
8609          * register_tracer() might reference current_trace, so it
8610          * needs to be set before we register anything. This is
8611          * just a bootstrap of current_trace anyway.
8612          */
8613         global_trace.current_trace = &nop_trace;
8614
8615         global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8616
8617         ftrace_init_global_array_ops(&global_trace);
8618
8619         init_trace_flags_index(&global_trace);
8620
8621         register_tracer(&nop_trace);
8622
8623         /* Function tracing may start here (via kernel command line) */
8624         init_function_trace();
8625
8626         /* All seems OK, enable tracing */
8627         tracing_disabled = 0;
8628
8629         atomic_notifier_chain_register(&panic_notifier_list,
8630                                        &trace_panic_notifier);
8631
8632         register_die_notifier(&trace_die_notifier);
8633
8634         global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
8635
8636         INIT_LIST_HEAD(&global_trace.systems);
8637         INIT_LIST_HEAD(&global_trace.events);
8638         INIT_LIST_HEAD(&global_trace.hist_vars);
8639         list_add(&global_trace.list, &ftrace_trace_arrays);
8640
8641         apply_trace_boot_options();
8642
8643         register_snapshot_cmd();
8644
8645         return 0;
8646
8647 out_free_savedcmd:
8648         free_saved_cmdlines_buffer(savedcmd);
8649 out_free_temp_buffer:
8650         ring_buffer_free(temp_buffer);
8651 out_rm_hp_state:
8652         cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
8653 out_free_cpumask:
8654         free_cpumask_var(global_trace.tracing_cpumask);
8655 out_free_buffer_mask:
8656         free_cpumask_var(tracing_buffer_mask);
8657 out:
8658         return ret;
8659 }
8660
8661 void __init early_trace_init(void)
8662 {
8663         if (tracepoint_printk) {
8664                 tracepoint_print_iter =
8665                         kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
8666                 if (WARN_ON(!tracepoint_print_iter))
8667                         tracepoint_printk = 0;
8668                 else
8669                         static_key_enable(&tracepoint_printk_key.key);
8670         }
8671         tracer_alloc_buffers();
8672 }
8673
8674 void __init trace_init(void)
8675 {
8676         trace_event_init();
8677 }
8678
8679 __init static int clear_boot_tracer(void)
8680 {
8681         /*
8682          * The default tracer at boot buffer is an init section.
8683          * This function is called in lateinit. If we did not
8684          * find the boot tracer, then clear it out, to prevent
8685          * later registration from accessing the buffer that is
8686          * about to be freed.
8687          */
8688         if (!default_bootup_tracer)
8689                 return 0;
8690
8691         printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
8692                default_bootup_tracer);
8693         default_bootup_tracer = NULL;
8694
8695         return 0;
8696 }
8697
8698 fs_initcall(tracer_init_tracefs);
8699 late_initcall_sync(clear_boot_tracer);
8700
8701 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
8702 __init static int tracing_set_default_clock(void)
8703 {
8704         /* sched_clock_stable() is determined in late_initcall */
8705         if (!trace_boot_clock && !sched_clock_stable()) {
8706                 printk(KERN_WARNING
8707                        "Unstable clock detected, switching default tracing clock to \"global\"\n"
8708                        "If you want to keep using the local clock, then add:\n"
8709                        "  \"trace_clock=local\"\n"
8710                        "on the kernel command line\n");
8711                 tracing_set_clock(&global_trace, "global");
8712         }
8713
8714         return 0;
8715 }
8716 late_initcall_sync(tracing_set_default_clock);
8717 #endif