1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/notifier.h>
23 #include <linux/irqflags.h>
24 #include <linux/debugfs.h>
25 #include <linux/tracefs.h>
26 #include <linux/pagemap.h>
27 #include <linux/hardirq.h>
28 #include <linux/linkage.h>
29 #include <linux/uaccess.h>
30 #include <linux/vmalloc.h>
31 #include <linux/ftrace.h>
32 #include <linux/module.h>
33 #include <linux/percpu.h>
34 #include <linux/splice.h>
35 #include <linux/kdebug.h>
36 #include <linux/string.h>
37 #include <linux/mount.h>
38 #include <linux/rwsem.h>
39 #include <linux/slab.h>
40 #include <linux/ctype.h>
41 #include <linux/init.h>
42 #include <linux/panic_notifier.h>
43 #include <linux/poll.h>
44 #include <linux/nmi.h>
46 #include <linux/trace.h>
47 #include <linux/sched/clock.h>
48 #include <linux/sched/rt.h>
49 #include <linux/fsnotify.h>
50 #include <linux/irq_work.h>
51 #include <linux/workqueue.h>
54 #include "trace_output.h"
57 * On boot up, the ring buffer is set to the minimum size, so that
58 * we do not waste memory on systems that are not using tracing.
60 bool ring_buffer_expanded;
63 * We need to change this state when a selftest is running.
64 * A selftest will lurk into the ring-buffer to count the
65 * entries inserted during the selftest although some concurrent
66 * insertions into the ring-buffer such as trace_printk could occurred
67 * at the same time, giving false positive or negative results.
69 static bool __read_mostly tracing_selftest_running;
72 * If boot-time tracing including tracers/events via kernel cmdline
73 * is running, we do not want to run SELFTEST.
75 bool __read_mostly tracing_selftest_disabled;
77 #ifdef CONFIG_FTRACE_STARTUP_TEST
78 void __init disable_tracing_selftest(const char *reason)
80 if (!tracing_selftest_disabled) {
81 tracing_selftest_disabled = true;
82 pr_info("Ftrace startup test is disabled due to %s\n", reason);
87 /* Pipe tracepoints to printk */
88 struct trace_iterator *tracepoint_print_iter;
89 int tracepoint_printk;
90 static bool tracepoint_printk_stop_on_boot __initdata;
91 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
93 /* For tracers that don't implement custom flags */
94 static struct tracer_opt dummy_tracer_opt[] = {
99 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
105 * To prevent the comm cache from being overwritten when no
106 * tracing is active, only save the comm when a trace event
109 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
112 * Kill all tracing for good (never come back).
113 * It is initialized to 1 but will turn to zero if the initialization
114 * of the tracer is successful. But that is the only place that sets
117 static int tracing_disabled = 1;
119 cpumask_var_t __read_mostly tracing_buffer_mask;
122 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
124 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
125 * is set, then ftrace_dump is called. This will output the contents
126 * of the ftrace buffers to the console. This is very useful for
127 * capturing traces that lead to crashes and outputing it to a
130 * It is default off, but you can enable it with either specifying
131 * "ftrace_dump_on_oops" in the kernel command line, or setting
132 * /proc/sys/kernel/ftrace_dump_on_oops
133 * Set 1 if you want to dump buffers of all CPUs
134 * Set 2 if you want to dump the buffer of the CPU that triggered oops
137 enum ftrace_dump_mode ftrace_dump_on_oops;
139 /* When set, tracing will stop when a WARN*() is hit */
140 int __disable_trace_on_warning;
142 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
143 /* Map of enums to their values, for "eval_map" file */
144 struct trace_eval_map_head {
146 unsigned long length;
149 union trace_eval_map_item;
151 struct trace_eval_map_tail {
153 * "end" is first and points to NULL as it must be different
154 * than "mod" or "eval_string"
156 union trace_eval_map_item *next;
157 const char *end; /* points to NULL */
160 static DEFINE_MUTEX(trace_eval_mutex);
163 * The trace_eval_maps are saved in an array with two extra elements,
164 * one at the beginning, and one at the end. The beginning item contains
165 * the count of the saved maps (head.length), and the module they
166 * belong to if not built in (head.mod). The ending item contains a
167 * pointer to the next array of saved eval_map items.
169 union trace_eval_map_item {
170 struct trace_eval_map map;
171 struct trace_eval_map_head head;
172 struct trace_eval_map_tail tail;
175 static union trace_eval_map_item *trace_eval_maps;
176 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
178 int tracing_set_tracer(struct trace_array *tr, const char *buf);
179 static void ftrace_trace_userstack(struct trace_array *tr,
180 struct trace_buffer *buffer,
181 unsigned int trace_ctx);
183 #define MAX_TRACER_SIZE 100
184 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
185 static char *default_bootup_tracer;
187 static bool allocate_snapshot;
188 static bool snapshot_at_boot;
190 static int __init set_cmdline_ftrace(char *str)
192 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
193 default_bootup_tracer = bootup_tracer_buf;
194 /* We are using ftrace early, expand it */
195 ring_buffer_expanded = true;
198 __setup("ftrace=", set_cmdline_ftrace);
200 static int __init set_ftrace_dump_on_oops(char *str)
202 if (*str++ != '=' || !*str || !strcmp("1", str)) {
203 ftrace_dump_on_oops = DUMP_ALL;
207 if (!strcmp("orig_cpu", str) || !strcmp("2", str)) {
208 ftrace_dump_on_oops = DUMP_ORIG;
214 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
216 static int __init stop_trace_on_warning(char *str)
218 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
219 __disable_trace_on_warning = 1;
222 __setup("traceoff_on_warning", stop_trace_on_warning);
224 static int __init boot_alloc_snapshot(char *str)
226 allocate_snapshot = true;
227 /* We also need the main ring buffer expanded */
228 ring_buffer_expanded = true;
231 __setup("alloc_snapshot", boot_alloc_snapshot);
234 static int __init boot_snapshot(char *str)
236 snapshot_at_boot = true;
237 boot_alloc_snapshot(str);
240 __setup("ftrace_boot_snapshot", boot_snapshot);
243 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
245 static int __init set_trace_boot_options(char *str)
247 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
250 __setup("trace_options=", set_trace_boot_options);
252 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
253 static char *trace_boot_clock __initdata;
255 static int __init set_trace_boot_clock(char *str)
257 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
258 trace_boot_clock = trace_boot_clock_buf;
261 __setup("trace_clock=", set_trace_boot_clock);
263 static int __init set_tracepoint_printk(char *str)
265 /* Ignore the "tp_printk_stop_on_boot" param */
269 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
270 tracepoint_printk = 1;
273 __setup("tp_printk", set_tracepoint_printk);
275 static int __init set_tracepoint_printk_stop(char *str)
277 tracepoint_printk_stop_on_boot = true;
280 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
282 unsigned long long ns2usecs(u64 nsec)
290 trace_process_export(struct trace_export *export,
291 struct ring_buffer_event *event, int flag)
293 struct trace_entry *entry;
294 unsigned int size = 0;
296 if (export->flags & flag) {
297 entry = ring_buffer_event_data(event);
298 size = ring_buffer_event_length(event);
299 export->write(export, entry, size);
303 static DEFINE_MUTEX(ftrace_export_lock);
305 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
307 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
308 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
309 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
311 static inline void ftrace_exports_enable(struct trace_export *export)
313 if (export->flags & TRACE_EXPORT_FUNCTION)
314 static_branch_inc(&trace_function_exports_enabled);
316 if (export->flags & TRACE_EXPORT_EVENT)
317 static_branch_inc(&trace_event_exports_enabled);
319 if (export->flags & TRACE_EXPORT_MARKER)
320 static_branch_inc(&trace_marker_exports_enabled);
323 static inline void ftrace_exports_disable(struct trace_export *export)
325 if (export->flags & TRACE_EXPORT_FUNCTION)
326 static_branch_dec(&trace_function_exports_enabled);
328 if (export->flags & TRACE_EXPORT_EVENT)
329 static_branch_dec(&trace_event_exports_enabled);
331 if (export->flags & TRACE_EXPORT_MARKER)
332 static_branch_dec(&trace_marker_exports_enabled);
335 static void ftrace_exports(struct ring_buffer_event *event, int flag)
337 struct trace_export *export;
339 preempt_disable_notrace();
341 export = rcu_dereference_raw_check(ftrace_exports_list);
343 trace_process_export(export, event, flag);
344 export = rcu_dereference_raw_check(export->next);
347 preempt_enable_notrace();
351 add_trace_export(struct trace_export **list, struct trace_export *export)
353 rcu_assign_pointer(export->next, *list);
355 * We are entering export into the list but another
356 * CPU might be walking that list. We need to make sure
357 * the export->next pointer is valid before another CPU sees
358 * the export pointer included into the list.
360 rcu_assign_pointer(*list, export);
364 rm_trace_export(struct trace_export **list, struct trace_export *export)
366 struct trace_export **p;
368 for (p = list; *p != NULL; p = &(*p)->next)
375 rcu_assign_pointer(*p, (*p)->next);
381 add_ftrace_export(struct trace_export **list, struct trace_export *export)
383 ftrace_exports_enable(export);
385 add_trace_export(list, export);
389 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
393 ret = rm_trace_export(list, export);
394 ftrace_exports_disable(export);
399 int register_ftrace_export(struct trace_export *export)
401 if (WARN_ON_ONCE(!export->write))
404 mutex_lock(&ftrace_export_lock);
406 add_ftrace_export(&ftrace_exports_list, export);
408 mutex_unlock(&ftrace_export_lock);
412 EXPORT_SYMBOL_GPL(register_ftrace_export);
414 int unregister_ftrace_export(struct trace_export *export)
418 mutex_lock(&ftrace_export_lock);
420 ret = rm_ftrace_export(&ftrace_exports_list, export);
422 mutex_unlock(&ftrace_export_lock);
426 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
428 /* trace_flags holds trace_options default values */
429 #define TRACE_DEFAULT_FLAGS \
430 (FUNCTION_DEFAULT_FLAGS | \
431 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
432 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
433 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
434 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
437 /* trace_options that are only supported by global_trace */
438 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
439 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
441 /* trace_flags that are default zero for instances */
442 #define ZEROED_TRACE_FLAGS \
443 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
446 * The global_trace is the descriptor that holds the top-level tracing
447 * buffers for the live tracing.
449 static struct trace_array global_trace = {
450 .trace_flags = TRACE_DEFAULT_FLAGS,
453 LIST_HEAD(ftrace_trace_arrays);
455 int trace_array_get(struct trace_array *this_tr)
457 struct trace_array *tr;
460 mutex_lock(&trace_types_lock);
461 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
468 mutex_unlock(&trace_types_lock);
473 static void __trace_array_put(struct trace_array *this_tr)
475 WARN_ON(!this_tr->ref);
480 * trace_array_put - Decrement the reference counter for this trace array.
481 * @this_tr : pointer to the trace array
483 * NOTE: Use this when we no longer need the trace array returned by
484 * trace_array_get_by_name(). This ensures the trace array can be later
488 void trace_array_put(struct trace_array *this_tr)
493 mutex_lock(&trace_types_lock);
494 __trace_array_put(this_tr);
495 mutex_unlock(&trace_types_lock);
497 EXPORT_SYMBOL_GPL(trace_array_put);
499 int tracing_check_open_get_tr(struct trace_array *tr)
503 ret = security_locked_down(LOCKDOWN_TRACEFS);
507 if (tracing_disabled)
510 if (tr && trace_array_get(tr) < 0)
516 int call_filter_check_discard(struct trace_event_call *call, void *rec,
517 struct trace_buffer *buffer,
518 struct ring_buffer_event *event)
520 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
521 !filter_match_preds(call->filter, rec)) {
522 __trace_event_discard_commit(buffer, event);
530 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
531 * @filtered_pids: The list of pids to check
532 * @search_pid: The PID to find in @filtered_pids
534 * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
537 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
539 return trace_pid_list_is_set(filtered_pids, search_pid);
543 * trace_ignore_this_task - should a task be ignored for tracing
544 * @filtered_pids: The list of pids to check
545 * @filtered_no_pids: The list of pids not to be traced
546 * @task: The task that should be ignored if not filtered
548 * Checks if @task should be traced or not from @filtered_pids.
549 * Returns true if @task should *NOT* be traced.
550 * Returns false if @task should be traced.
553 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
554 struct trace_pid_list *filtered_no_pids,
555 struct task_struct *task)
558 * If filtered_no_pids is not empty, and the task's pid is listed
559 * in filtered_no_pids, then return true.
560 * Otherwise, if filtered_pids is empty, that means we can
561 * trace all tasks. If it has content, then only trace pids
562 * within filtered_pids.
565 return (filtered_pids &&
566 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
568 trace_find_filtered_pid(filtered_no_pids, task->pid));
572 * trace_filter_add_remove_task - Add or remove a task from a pid_list
573 * @pid_list: The list to modify
574 * @self: The current task for fork or NULL for exit
575 * @task: The task to add or remove
577 * If adding a task, if @self is defined, the task is only added if @self
578 * is also included in @pid_list. This happens on fork and tasks should
579 * only be added when the parent is listed. If @self is NULL, then the
580 * @task pid will be removed from the list, which would happen on exit
583 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
584 struct task_struct *self,
585 struct task_struct *task)
590 /* For forks, we only add if the forking task is listed */
592 if (!trace_find_filtered_pid(pid_list, self->pid))
596 /* "self" is set for forks, and NULL for exits */
598 trace_pid_list_set(pid_list, task->pid);
600 trace_pid_list_clear(pid_list, task->pid);
604 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
605 * @pid_list: The pid list to show
606 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
607 * @pos: The position of the file
609 * This is used by the seq_file "next" operation to iterate the pids
610 * listed in a trace_pid_list structure.
612 * Returns the pid+1 as we want to display pid of zero, but NULL would
613 * stop the iteration.
615 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
617 long pid = (unsigned long)v;
622 /* pid already is +1 of the actual previous bit */
623 if (trace_pid_list_next(pid_list, pid, &next) < 0)
628 /* Return pid + 1 to allow zero to be represented */
629 return (void *)(pid + 1);
633 * trace_pid_start - Used for seq_file to start reading pid lists
634 * @pid_list: The pid list to show
635 * @pos: The position of the file
637 * This is used by seq_file "start" operation to start the iteration
640 * Returns the pid+1 as we want to display pid of zero, but NULL would
641 * stop the iteration.
643 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
649 if (trace_pid_list_first(pid_list, &first) < 0)
654 /* Return pid + 1 so that zero can be the exit value */
655 for (pid++; pid && l < *pos;
656 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
662 * trace_pid_show - show the current pid in seq_file processing
663 * @m: The seq_file structure to write into
664 * @v: A void pointer of the pid (+1) value to display
666 * Can be directly used by seq_file operations to display the current
669 int trace_pid_show(struct seq_file *m, void *v)
671 unsigned long pid = (unsigned long)v - 1;
673 seq_printf(m, "%lu\n", pid);
677 /* 128 should be much more than enough */
678 #define PID_BUF_SIZE 127
680 int trace_pid_write(struct trace_pid_list *filtered_pids,
681 struct trace_pid_list **new_pid_list,
682 const char __user *ubuf, size_t cnt)
684 struct trace_pid_list *pid_list;
685 struct trace_parser parser;
693 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
697 * Always recreate a new array. The write is an all or nothing
698 * operation. Always create a new array when adding new pids by
699 * the user. If the operation fails, then the current list is
702 pid_list = trace_pid_list_alloc();
704 trace_parser_put(&parser);
709 /* copy the current bits to the new max */
710 ret = trace_pid_list_first(filtered_pids, &pid);
712 trace_pid_list_set(pid_list, pid);
713 ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
723 ret = trace_get_user(&parser, ubuf, cnt, &pos);
724 if (ret < 0 || !trace_parser_loaded(&parser))
732 if (kstrtoul(parser.buffer, 0, &val))
737 if (trace_pid_list_set(pid_list, pid) < 0) {
743 trace_parser_clear(&parser);
746 trace_parser_put(&parser);
749 trace_pid_list_free(pid_list);
754 /* Cleared the list of pids */
755 trace_pid_list_free(pid_list);
760 *new_pid_list = pid_list;
765 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
769 /* Early boot up does not have a buffer yet */
771 return trace_clock_local();
773 ts = ring_buffer_time_stamp(buf->buffer);
774 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
779 u64 ftrace_now(int cpu)
781 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
785 * tracing_is_enabled - Show if global_trace has been enabled
787 * Shows if the global trace has been enabled or not. It uses the
788 * mirror flag "buffer_disabled" to be used in fast paths such as for
789 * the irqsoff tracer. But it may be inaccurate due to races. If you
790 * need to know the accurate state, use tracing_is_on() which is a little
791 * slower, but accurate.
793 int tracing_is_enabled(void)
796 * For quick access (irqsoff uses this in fast path), just
797 * return the mirror variable of the state of the ring buffer.
798 * It's a little racy, but we don't really care.
801 return !global_trace.buffer_disabled;
805 * trace_buf_size is the size in bytes that is allocated
806 * for a buffer. Note, the number of bytes is always rounded
809 * This number is purposely set to a low number of 16384.
810 * If the dump on oops happens, it will be much appreciated
811 * to not have to wait for all that output. Anyway this can be
812 * boot time and run time configurable.
814 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
816 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
818 /* trace_types holds a link list of available tracers. */
819 static struct tracer *trace_types __read_mostly;
822 * trace_types_lock is used to protect the trace_types list.
824 DEFINE_MUTEX(trace_types_lock);
827 * serialize the access of the ring buffer
829 * ring buffer serializes readers, but it is low level protection.
830 * The validity of the events (which returns by ring_buffer_peek() ..etc)
831 * are not protected by ring buffer.
833 * The content of events may become garbage if we allow other process consumes
834 * these events concurrently:
835 * A) the page of the consumed events may become a normal page
836 * (not reader page) in ring buffer, and this page will be rewritten
837 * by events producer.
838 * B) The page of the consumed events may become a page for splice_read,
839 * and this page will be returned to system.
841 * These primitives allow multi process access to different cpu ring buffer
844 * These primitives don't distinguish read-only and read-consume access.
845 * Multi read-only access are also serialized.
849 static DECLARE_RWSEM(all_cpu_access_lock);
850 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
852 static inline void trace_access_lock(int cpu)
854 if (cpu == RING_BUFFER_ALL_CPUS) {
855 /* gain it for accessing the whole ring buffer. */
856 down_write(&all_cpu_access_lock);
858 /* gain it for accessing a cpu ring buffer. */
860 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
861 down_read(&all_cpu_access_lock);
863 /* Secondly block other access to this @cpu ring buffer. */
864 mutex_lock(&per_cpu(cpu_access_lock, cpu));
868 static inline void trace_access_unlock(int cpu)
870 if (cpu == RING_BUFFER_ALL_CPUS) {
871 up_write(&all_cpu_access_lock);
873 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
874 up_read(&all_cpu_access_lock);
878 static inline void trace_access_lock_init(void)
882 for_each_possible_cpu(cpu)
883 mutex_init(&per_cpu(cpu_access_lock, cpu));
888 static DEFINE_MUTEX(access_lock);
890 static inline void trace_access_lock(int cpu)
893 mutex_lock(&access_lock);
896 static inline void trace_access_unlock(int cpu)
899 mutex_unlock(&access_lock);
902 static inline void trace_access_lock_init(void)
908 #ifdef CONFIG_STACKTRACE
909 static void __ftrace_trace_stack(struct trace_buffer *buffer,
910 unsigned int trace_ctx,
911 int skip, struct pt_regs *regs);
912 static inline void ftrace_trace_stack(struct trace_array *tr,
913 struct trace_buffer *buffer,
914 unsigned int trace_ctx,
915 int skip, struct pt_regs *regs);
918 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
919 unsigned int trace_ctx,
920 int skip, struct pt_regs *regs)
923 static inline void ftrace_trace_stack(struct trace_array *tr,
924 struct trace_buffer *buffer,
925 unsigned long trace_ctx,
926 int skip, struct pt_regs *regs)
932 static __always_inline void
933 trace_event_setup(struct ring_buffer_event *event,
934 int type, unsigned int trace_ctx)
936 struct trace_entry *ent = ring_buffer_event_data(event);
938 tracing_generic_entry_update(ent, type, trace_ctx);
941 static __always_inline struct ring_buffer_event *
942 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
945 unsigned int trace_ctx)
947 struct ring_buffer_event *event;
949 event = ring_buffer_lock_reserve(buffer, len);
951 trace_event_setup(event, type, trace_ctx);
956 void tracer_tracing_on(struct trace_array *tr)
958 if (tr->array_buffer.buffer)
959 ring_buffer_record_on(tr->array_buffer.buffer);
961 * This flag is looked at when buffers haven't been allocated
962 * yet, or by some tracers (like irqsoff), that just want to
963 * know if the ring buffer has been disabled, but it can handle
964 * races of where it gets disabled but we still do a record.
965 * As the check is in the fast path of the tracers, it is more
966 * important to be fast than accurate.
968 tr->buffer_disabled = 0;
969 /* Make the flag seen by readers */
974 * tracing_on - enable tracing buffers
976 * This function enables tracing buffers that may have been
977 * disabled with tracing_off.
979 void tracing_on(void)
981 tracer_tracing_on(&global_trace);
983 EXPORT_SYMBOL_GPL(tracing_on);
986 static __always_inline void
987 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
989 __this_cpu_write(trace_taskinfo_save, true);
991 /* If this is the temp buffer, we need to commit fully */
992 if (this_cpu_read(trace_buffered_event) == event) {
993 /* Length is in event->array[0] */
994 ring_buffer_write(buffer, event->array[0], &event->array[1]);
995 /* Release the temp buffer */
996 this_cpu_dec(trace_buffered_event_cnt);
997 /* ring_buffer_unlock_commit() enables preemption */
998 preempt_enable_notrace();
1000 ring_buffer_unlock_commit(buffer, event);
1004 * __trace_puts - write a constant string into the trace buffer.
1005 * @ip: The address of the caller
1006 * @str: The constant string to write
1007 * @size: The size of the string.
1009 int __trace_puts(unsigned long ip, const char *str, int size)
1011 struct ring_buffer_event *event;
1012 struct trace_buffer *buffer;
1013 struct print_entry *entry;
1014 unsigned int trace_ctx;
1017 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1020 if (unlikely(tracing_selftest_running || tracing_disabled))
1023 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1025 trace_ctx = tracing_gen_ctx();
1026 buffer = global_trace.array_buffer.buffer;
1027 ring_buffer_nest_start(buffer);
1028 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1035 entry = ring_buffer_event_data(event);
1038 memcpy(&entry->buf, str, size);
1040 /* Add a newline if necessary */
1041 if (entry->buf[size - 1] != '\n') {
1042 entry->buf[size] = '\n';
1043 entry->buf[size + 1] = '\0';
1045 entry->buf[size] = '\0';
1047 __buffer_unlock_commit(buffer, event);
1048 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1050 ring_buffer_nest_end(buffer);
1053 EXPORT_SYMBOL_GPL(__trace_puts);
1056 * __trace_bputs - write the pointer to a constant string into trace buffer
1057 * @ip: The address of the caller
1058 * @str: The constant string to write to the buffer to
1060 int __trace_bputs(unsigned long ip, const char *str)
1062 struct ring_buffer_event *event;
1063 struct trace_buffer *buffer;
1064 struct bputs_entry *entry;
1065 unsigned int trace_ctx;
1066 int size = sizeof(struct bputs_entry);
1069 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1072 if (unlikely(tracing_selftest_running || tracing_disabled))
1075 trace_ctx = tracing_gen_ctx();
1076 buffer = global_trace.array_buffer.buffer;
1078 ring_buffer_nest_start(buffer);
1079 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1084 entry = ring_buffer_event_data(event);
1088 __buffer_unlock_commit(buffer, event);
1089 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1093 ring_buffer_nest_end(buffer);
1096 EXPORT_SYMBOL_GPL(__trace_bputs);
1098 #ifdef CONFIG_TRACER_SNAPSHOT
1099 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1102 struct tracer *tracer = tr->current_trace;
1103 unsigned long flags;
1106 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1107 internal_trace_puts("*** snapshot is being ignored ***\n");
1111 if (!tr->allocated_snapshot) {
1112 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
1113 internal_trace_puts("*** stopping trace here! ***\n");
1118 /* Note, snapshot can not be used when the tracer uses it */
1119 if (tracer->use_max_tr) {
1120 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
1121 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
1125 local_irq_save(flags);
1126 update_max_tr(tr, current, smp_processor_id(), cond_data);
1127 local_irq_restore(flags);
1130 void tracing_snapshot_instance(struct trace_array *tr)
1132 tracing_snapshot_instance_cond(tr, NULL);
1136 * tracing_snapshot - take a snapshot of the current buffer.
1138 * This causes a swap between the snapshot buffer and the current live
1139 * tracing buffer. You can use this to take snapshots of the live
1140 * trace when some condition is triggered, but continue to trace.
1142 * Note, make sure to allocate the snapshot with either
1143 * a tracing_snapshot_alloc(), or by doing it manually
1144 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
1146 * If the snapshot buffer is not allocated, it will stop tracing.
1147 * Basically making a permanent snapshot.
1149 void tracing_snapshot(void)
1151 struct trace_array *tr = &global_trace;
1153 tracing_snapshot_instance(tr);
1155 EXPORT_SYMBOL_GPL(tracing_snapshot);
1158 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1159 * @tr: The tracing instance to snapshot
1160 * @cond_data: The data to be tested conditionally, and possibly saved
1162 * This is the same as tracing_snapshot() except that the snapshot is
1163 * conditional - the snapshot will only happen if the
1164 * cond_snapshot.update() implementation receiving the cond_data
1165 * returns true, which means that the trace array's cond_snapshot
1166 * update() operation used the cond_data to determine whether the
1167 * snapshot should be taken, and if it was, presumably saved it along
1168 * with the snapshot.
1170 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1172 tracing_snapshot_instance_cond(tr, cond_data);
1174 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1177 * tracing_snapshot_cond_data - get the user data associated with a snapshot
1178 * @tr: The tracing instance
1180 * When the user enables a conditional snapshot using
1181 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1182 * with the snapshot. This accessor is used to retrieve it.
1184 * Should not be called from cond_snapshot.update(), since it takes
1185 * the tr->max_lock lock, which the code calling
1186 * cond_snapshot.update() has already done.
1188 * Returns the cond_data associated with the trace array's snapshot.
1190 void *tracing_cond_snapshot_data(struct trace_array *tr)
1192 void *cond_data = NULL;
1194 arch_spin_lock(&tr->max_lock);
1196 if (tr->cond_snapshot)
1197 cond_data = tr->cond_snapshot->cond_data;
1199 arch_spin_unlock(&tr->max_lock);
1203 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1205 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1206 struct array_buffer *size_buf, int cpu_id);
1207 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1209 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1213 if (!tr->allocated_snapshot) {
1215 /* allocate spare buffer */
1216 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1217 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1221 tr->allocated_snapshot = true;
1227 static void free_snapshot(struct trace_array *tr)
1230 * We don't free the ring buffer. instead, resize it because
1231 * The max_tr ring buffer has some state (e.g. ring->clock) and
1232 * we want preserve it.
1234 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1235 set_buffer_entries(&tr->max_buffer, 1);
1236 tracing_reset_online_cpus(&tr->max_buffer);
1237 tr->allocated_snapshot = false;
1241 * tracing_alloc_snapshot - allocate snapshot buffer.
1243 * This only allocates the snapshot buffer if it isn't already
1244 * allocated - it doesn't also take a snapshot.
1246 * This is meant to be used in cases where the snapshot buffer needs
1247 * to be set up for events that can't sleep but need to be able to
1248 * trigger a snapshot.
1250 int tracing_alloc_snapshot(void)
1252 struct trace_array *tr = &global_trace;
1255 ret = tracing_alloc_snapshot_instance(tr);
1260 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1263 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1265 * This is similar to tracing_snapshot(), but it will allocate the
1266 * snapshot buffer if it isn't already allocated. Use this only
1267 * where it is safe to sleep, as the allocation may sleep.
1269 * This causes a swap between the snapshot buffer and the current live
1270 * tracing buffer. You can use this to take snapshots of the live
1271 * trace when some condition is triggered, but continue to trace.
1273 void tracing_snapshot_alloc(void)
1277 ret = tracing_alloc_snapshot();
1283 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1286 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1287 * @tr: The tracing instance
1288 * @cond_data: User data to associate with the snapshot
1289 * @update: Implementation of the cond_snapshot update function
1291 * Check whether the conditional snapshot for the given instance has
1292 * already been enabled, or if the current tracer is already using a
1293 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1294 * save the cond_data and update function inside.
1296 * Returns 0 if successful, error otherwise.
1298 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1299 cond_update_fn_t update)
1301 struct cond_snapshot *cond_snapshot;
1304 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1308 cond_snapshot->cond_data = cond_data;
1309 cond_snapshot->update = update;
1311 mutex_lock(&trace_types_lock);
1313 ret = tracing_alloc_snapshot_instance(tr);
1317 if (tr->current_trace->use_max_tr) {
1323 * The cond_snapshot can only change to NULL without the
1324 * trace_types_lock. We don't care if we race with it going
1325 * to NULL, but we want to make sure that it's not set to
1326 * something other than NULL when we get here, which we can
1327 * do safely with only holding the trace_types_lock and not
1328 * having to take the max_lock.
1330 if (tr->cond_snapshot) {
1335 arch_spin_lock(&tr->max_lock);
1336 tr->cond_snapshot = cond_snapshot;
1337 arch_spin_unlock(&tr->max_lock);
1339 mutex_unlock(&trace_types_lock);
1344 mutex_unlock(&trace_types_lock);
1345 kfree(cond_snapshot);
1348 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1351 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1352 * @tr: The tracing instance
1354 * Check whether the conditional snapshot for the given instance is
1355 * enabled; if so, free the cond_snapshot associated with it,
1356 * otherwise return -EINVAL.
1358 * Returns 0 if successful, error otherwise.
1360 int tracing_snapshot_cond_disable(struct trace_array *tr)
1364 arch_spin_lock(&tr->max_lock);
1366 if (!tr->cond_snapshot)
1369 kfree(tr->cond_snapshot);
1370 tr->cond_snapshot = NULL;
1373 arch_spin_unlock(&tr->max_lock);
1377 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1379 void tracing_snapshot(void)
1381 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1383 EXPORT_SYMBOL_GPL(tracing_snapshot);
1384 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1386 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1388 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1389 int tracing_alloc_snapshot(void)
1391 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1394 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1395 void tracing_snapshot_alloc(void)
1400 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1401 void *tracing_cond_snapshot_data(struct trace_array *tr)
1405 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1406 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1410 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1411 int tracing_snapshot_cond_disable(struct trace_array *tr)
1415 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1416 #endif /* CONFIG_TRACER_SNAPSHOT */
1418 void tracer_tracing_off(struct trace_array *tr)
1420 if (tr->array_buffer.buffer)
1421 ring_buffer_record_off(tr->array_buffer.buffer);
1423 * This flag is looked at when buffers haven't been allocated
1424 * yet, or by some tracers (like irqsoff), that just want to
1425 * know if the ring buffer has been disabled, but it can handle
1426 * races of where it gets disabled but we still do a record.
1427 * As the check is in the fast path of the tracers, it is more
1428 * important to be fast than accurate.
1430 tr->buffer_disabled = 1;
1431 /* Make the flag seen by readers */
1436 * tracing_off - turn off tracing buffers
1438 * This function stops the tracing buffers from recording data.
1439 * It does not disable any overhead the tracers themselves may
1440 * be causing. This function simply causes all recording to
1441 * the ring buffers to fail.
1443 void tracing_off(void)
1445 tracer_tracing_off(&global_trace);
1447 EXPORT_SYMBOL_GPL(tracing_off);
1449 void disable_trace_on_warning(void)
1451 if (__disable_trace_on_warning) {
1452 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1453 "Disabling tracing due to warning\n");
1459 * tracer_tracing_is_on - show real state of ring buffer enabled
1460 * @tr : the trace array to know if ring buffer is enabled
1462 * Shows real state of the ring buffer if it is enabled or not.
1464 bool tracer_tracing_is_on(struct trace_array *tr)
1466 if (tr->array_buffer.buffer)
1467 return ring_buffer_record_is_on(tr->array_buffer.buffer);
1468 return !tr->buffer_disabled;
1472 * tracing_is_on - show state of ring buffers enabled
1474 int tracing_is_on(void)
1476 return tracer_tracing_is_on(&global_trace);
1478 EXPORT_SYMBOL_GPL(tracing_is_on);
1480 static int __init set_buf_size(char *str)
1482 unsigned long buf_size;
1486 buf_size = memparse(str, &str);
1488 * nr_entries can not be zero and the startup
1489 * tests require some buffer space. Therefore
1490 * ensure we have at least 4096 bytes of buffer.
1492 trace_buf_size = max(4096UL, buf_size);
1495 __setup("trace_buf_size=", set_buf_size);
1497 static int __init set_tracing_thresh(char *str)
1499 unsigned long threshold;
1504 ret = kstrtoul(str, 0, &threshold);
1507 tracing_thresh = threshold * 1000;
1510 __setup("tracing_thresh=", set_tracing_thresh);
1512 unsigned long nsecs_to_usecs(unsigned long nsecs)
1514 return nsecs / 1000;
1518 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1519 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1520 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1521 * of strings in the order that the evals (enum) were defined.
1526 /* These must match the bit positions in trace_iterator_flags */
1527 static const char *trace_options[] = {
1535 int in_ns; /* is this clock in nanoseconds? */
1536 } trace_clocks[] = {
1537 { trace_clock_local, "local", 1 },
1538 { trace_clock_global, "global", 1 },
1539 { trace_clock_counter, "counter", 0 },
1540 { trace_clock_jiffies, "uptime", 0 },
1541 { trace_clock, "perf", 1 },
1542 { ktime_get_mono_fast_ns, "mono", 1 },
1543 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1544 { ktime_get_boot_fast_ns, "boot", 1 },
1548 bool trace_clock_in_ns(struct trace_array *tr)
1550 if (trace_clocks[tr->clock_id].in_ns)
1557 * trace_parser_get_init - gets the buffer for trace parser
1559 int trace_parser_get_init(struct trace_parser *parser, int size)
1561 memset(parser, 0, sizeof(*parser));
1563 parser->buffer = kmalloc(size, GFP_KERNEL);
1564 if (!parser->buffer)
1567 parser->size = size;
1572 * trace_parser_put - frees the buffer for trace parser
1574 void trace_parser_put(struct trace_parser *parser)
1576 kfree(parser->buffer);
1577 parser->buffer = NULL;
1581 * trace_get_user - reads the user input string separated by space
1582 * (matched by isspace(ch))
1584 * For each string found the 'struct trace_parser' is updated,
1585 * and the function returns.
1587 * Returns number of bytes read.
1589 * See kernel/trace/trace.h for 'struct trace_parser' details.
1591 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1592 size_t cnt, loff_t *ppos)
1599 trace_parser_clear(parser);
1601 ret = get_user(ch, ubuf++);
1609 * The parser is not finished with the last write,
1610 * continue reading the user input without skipping spaces.
1612 if (!parser->cont) {
1613 /* skip white space */
1614 while (cnt && isspace(ch)) {
1615 ret = get_user(ch, ubuf++);
1624 /* only spaces were written */
1625 if (isspace(ch) || !ch) {
1632 /* read the non-space input */
1633 while (cnt && !isspace(ch) && ch) {
1634 if (parser->idx < parser->size - 1)
1635 parser->buffer[parser->idx++] = ch;
1640 ret = get_user(ch, ubuf++);
1647 /* We either got finished input or we have to wait for another call. */
1648 if (isspace(ch) || !ch) {
1649 parser->buffer[parser->idx] = 0;
1650 parser->cont = false;
1651 } else if (parser->idx < parser->size - 1) {
1652 parser->cont = true;
1653 parser->buffer[parser->idx++] = ch;
1654 /* Make sure the parsed string always terminates with '\0'. */
1655 parser->buffer[parser->idx] = 0;
1668 /* TODO add a seq_buf_to_buffer() */
1669 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1673 if (trace_seq_used(s) <= s->seq.readpos)
1676 len = trace_seq_used(s) - s->seq.readpos;
1679 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1681 s->seq.readpos += cnt;
1685 unsigned long __read_mostly tracing_thresh;
1686 static const struct file_operations tracing_max_lat_fops;
1688 #ifdef LATENCY_FS_NOTIFY
1690 static struct workqueue_struct *fsnotify_wq;
1692 static void latency_fsnotify_workfn(struct work_struct *work)
1694 struct trace_array *tr = container_of(work, struct trace_array,
1696 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1699 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1701 struct trace_array *tr = container_of(iwork, struct trace_array,
1703 queue_work(fsnotify_wq, &tr->fsnotify_work);
1706 static void trace_create_maxlat_file(struct trace_array *tr,
1707 struct dentry *d_tracer)
1709 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1710 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1711 tr->d_max_latency = trace_create_file("tracing_max_latency",
1713 d_tracer, &tr->max_latency,
1714 &tracing_max_lat_fops);
1717 __init static int latency_fsnotify_init(void)
1719 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1720 WQ_UNBOUND | WQ_HIGHPRI, 0);
1722 pr_err("Unable to allocate tr_max_lat_wq\n");
1728 late_initcall_sync(latency_fsnotify_init);
1730 void latency_fsnotify(struct trace_array *tr)
1735 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1736 * possible that we are called from __schedule() or do_idle(), which
1737 * could cause a deadlock.
1739 irq_work_queue(&tr->fsnotify_irqwork);
1742 #elif defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) \
1743 || defined(CONFIG_OSNOISE_TRACER)
1745 #define trace_create_maxlat_file(tr, d_tracer) \
1746 trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \
1747 d_tracer, &tr->max_latency, &tracing_max_lat_fops)
1750 #define trace_create_maxlat_file(tr, d_tracer) do { } while (0)
1753 #ifdef CONFIG_TRACER_MAX_TRACE
1755 * Copy the new maximum trace into the separate maximum-trace
1756 * structure. (this way the maximum trace is permanently saved,
1757 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1760 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1762 struct array_buffer *trace_buf = &tr->array_buffer;
1763 struct array_buffer *max_buf = &tr->max_buffer;
1764 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1765 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1768 max_buf->time_start = data->preempt_timestamp;
1770 max_data->saved_latency = tr->max_latency;
1771 max_data->critical_start = data->critical_start;
1772 max_data->critical_end = data->critical_end;
1774 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1775 max_data->pid = tsk->pid;
1777 * If tsk == current, then use current_uid(), as that does not use
1778 * RCU. The irq tracer can be called out of RCU scope.
1781 max_data->uid = current_uid();
1783 max_data->uid = task_uid(tsk);
1785 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1786 max_data->policy = tsk->policy;
1787 max_data->rt_priority = tsk->rt_priority;
1789 /* record this tasks comm */
1790 tracing_record_cmdline(tsk);
1791 latency_fsnotify(tr);
1795 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1797 * @tsk: the task with the latency
1798 * @cpu: The cpu that initiated the trace.
1799 * @cond_data: User data associated with a conditional snapshot
1801 * Flip the buffers between the @tr and the max_tr and record information
1802 * about which task was the cause of this latency.
1805 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1811 WARN_ON_ONCE(!irqs_disabled());
1813 if (!tr->allocated_snapshot) {
1814 /* Only the nop tracer should hit this when disabling */
1815 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1819 arch_spin_lock(&tr->max_lock);
1821 /* Inherit the recordable setting from array_buffer */
1822 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1823 ring_buffer_record_on(tr->max_buffer.buffer);
1825 ring_buffer_record_off(tr->max_buffer.buffer);
1827 #ifdef CONFIG_TRACER_SNAPSHOT
1828 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1831 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1833 __update_max_tr(tr, tsk, cpu);
1836 arch_spin_unlock(&tr->max_lock);
1840 * update_max_tr_single - only copy one trace over, and reset the rest
1842 * @tsk: task with the latency
1843 * @cpu: the cpu of the buffer to copy.
1845 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1848 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1855 WARN_ON_ONCE(!irqs_disabled());
1856 if (!tr->allocated_snapshot) {
1857 /* Only the nop tracer should hit this when disabling */
1858 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1862 arch_spin_lock(&tr->max_lock);
1864 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1866 if (ret == -EBUSY) {
1868 * We failed to swap the buffer due to a commit taking
1869 * place on this CPU. We fail to record, but we reset
1870 * the max trace buffer (no one writes directly to it)
1871 * and flag that it failed.
1873 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1874 "Failed to swap buffers due to commit in progress\n");
1877 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1879 __update_max_tr(tr, tsk, cpu);
1880 arch_spin_unlock(&tr->max_lock);
1882 #endif /* CONFIG_TRACER_MAX_TRACE */
1884 static int wait_on_pipe(struct trace_iterator *iter, int full)
1886 /* Iterators are static, they should be filled or empty */
1887 if (trace_buffer_iter(iter, iter->cpu_file))
1890 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
1894 #ifdef CONFIG_FTRACE_STARTUP_TEST
1895 static bool selftests_can_run;
1897 struct trace_selftests {
1898 struct list_head list;
1899 struct tracer *type;
1902 static LIST_HEAD(postponed_selftests);
1904 static int save_selftest(struct tracer *type)
1906 struct trace_selftests *selftest;
1908 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1912 selftest->type = type;
1913 list_add(&selftest->list, &postponed_selftests);
1917 static int run_tracer_selftest(struct tracer *type)
1919 struct trace_array *tr = &global_trace;
1920 struct tracer *saved_tracer = tr->current_trace;
1923 if (!type->selftest || tracing_selftest_disabled)
1927 * If a tracer registers early in boot up (before scheduling is
1928 * initialized and such), then do not run its selftests yet.
1929 * Instead, run it a little later in the boot process.
1931 if (!selftests_can_run)
1932 return save_selftest(type);
1934 if (!tracing_is_on()) {
1935 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
1941 * Run a selftest on this tracer.
1942 * Here we reset the trace buffer, and set the current
1943 * tracer to be this tracer. The tracer can then run some
1944 * internal tracing to verify that everything is in order.
1945 * If we fail, we do not register this tracer.
1947 tracing_reset_online_cpus(&tr->array_buffer);
1949 tr->current_trace = type;
1951 #ifdef CONFIG_TRACER_MAX_TRACE
1952 if (type->use_max_tr) {
1953 /* If we expanded the buffers, make sure the max is expanded too */
1954 if (ring_buffer_expanded)
1955 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1956 RING_BUFFER_ALL_CPUS);
1957 tr->allocated_snapshot = true;
1961 /* the test is responsible for initializing and enabling */
1962 pr_info("Testing tracer %s: ", type->name);
1963 ret = type->selftest(type, tr);
1964 /* the test is responsible for resetting too */
1965 tr->current_trace = saved_tracer;
1967 printk(KERN_CONT "FAILED!\n");
1968 /* Add the warning after printing 'FAILED' */
1972 /* Only reset on passing, to avoid touching corrupted buffers */
1973 tracing_reset_online_cpus(&tr->array_buffer);
1975 #ifdef CONFIG_TRACER_MAX_TRACE
1976 if (type->use_max_tr) {
1977 tr->allocated_snapshot = false;
1979 /* Shrink the max buffer again */
1980 if (ring_buffer_expanded)
1981 ring_buffer_resize(tr->max_buffer.buffer, 1,
1982 RING_BUFFER_ALL_CPUS);
1986 printk(KERN_CONT "PASSED\n");
1990 static __init int init_trace_selftests(void)
1992 struct trace_selftests *p, *n;
1993 struct tracer *t, **last;
1996 selftests_can_run = true;
1998 mutex_lock(&trace_types_lock);
2000 if (list_empty(&postponed_selftests))
2003 pr_info("Running postponed tracer tests:\n");
2005 tracing_selftest_running = true;
2006 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
2007 /* This loop can take minutes when sanitizers are enabled, so
2008 * lets make sure we allow RCU processing.
2011 ret = run_tracer_selftest(p->type);
2012 /* If the test fails, then warn and remove from available_tracers */
2014 WARN(1, "tracer: %s failed selftest, disabling\n",
2016 last = &trace_types;
2017 for (t = trace_types; t; t = t->next) {
2028 tracing_selftest_running = false;
2031 mutex_unlock(&trace_types_lock);
2035 core_initcall(init_trace_selftests);
2037 static inline int run_tracer_selftest(struct tracer *type)
2041 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2043 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2045 static void __init apply_trace_boot_options(void);
2048 * register_tracer - register a tracer with the ftrace system.
2049 * @type: the plugin for the tracer
2051 * Register a new plugin tracer.
2053 int __init register_tracer(struct tracer *type)
2059 pr_info("Tracer must have a name\n");
2063 if (strlen(type->name) >= MAX_TRACER_SIZE) {
2064 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2068 if (security_locked_down(LOCKDOWN_TRACEFS)) {
2069 pr_warn("Can not register tracer %s due to lockdown\n",
2074 mutex_lock(&trace_types_lock);
2076 tracing_selftest_running = true;
2078 for (t = trace_types; t; t = t->next) {
2079 if (strcmp(type->name, t->name) == 0) {
2081 pr_info("Tracer %s already registered\n",
2088 if (!type->set_flag)
2089 type->set_flag = &dummy_set_flag;
2091 /*allocate a dummy tracer_flags*/
2092 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2097 type->flags->val = 0;
2098 type->flags->opts = dummy_tracer_opt;
2100 if (!type->flags->opts)
2101 type->flags->opts = dummy_tracer_opt;
2103 /* store the tracer for __set_tracer_option */
2104 type->flags->trace = type;
2106 ret = run_tracer_selftest(type);
2110 type->next = trace_types;
2112 add_tracer_options(&global_trace, type);
2115 tracing_selftest_running = false;
2116 mutex_unlock(&trace_types_lock);
2118 if (ret || !default_bootup_tracer)
2121 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2124 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2125 /* Do we want this tracer to start on bootup? */
2126 tracing_set_tracer(&global_trace, type->name);
2127 default_bootup_tracer = NULL;
2129 apply_trace_boot_options();
2131 /* disable other selftests, since this will break it. */
2132 disable_tracing_selftest("running a tracer");
2138 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2140 struct trace_buffer *buffer = buf->buffer;
2145 ring_buffer_record_disable(buffer);
2147 /* Make sure all commits have finished */
2149 ring_buffer_reset_cpu(buffer, cpu);
2151 ring_buffer_record_enable(buffer);
2154 void tracing_reset_online_cpus(struct array_buffer *buf)
2156 struct trace_buffer *buffer = buf->buffer;
2161 ring_buffer_record_disable(buffer);
2163 /* Make sure all commits have finished */
2166 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2168 ring_buffer_reset_online_cpus(buffer);
2170 ring_buffer_record_enable(buffer);
2173 /* Must have trace_types_lock held */
2174 void tracing_reset_all_online_cpus(void)
2176 struct trace_array *tr;
2178 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2179 if (!tr->clear_trace)
2181 tr->clear_trace = false;
2182 tracing_reset_online_cpus(&tr->array_buffer);
2183 #ifdef CONFIG_TRACER_MAX_TRACE
2184 tracing_reset_online_cpus(&tr->max_buffer);
2190 * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
2191 * is the tgid last observed corresponding to pid=i.
2193 static int *tgid_map;
2195 /* The maximum valid index into tgid_map. */
2196 static size_t tgid_map_max;
2198 #define SAVED_CMDLINES_DEFAULT 128
2199 #define NO_CMDLINE_MAP UINT_MAX
2200 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2201 struct saved_cmdlines_buffer {
2202 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2203 unsigned *map_cmdline_to_pid;
2204 unsigned cmdline_num;
2206 char *saved_cmdlines;
2208 static struct saved_cmdlines_buffer *savedcmd;
2210 static inline char *get_saved_cmdlines(int idx)
2212 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2215 static inline void set_cmdline(int idx, const char *cmdline)
2217 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
2220 static int allocate_cmdlines_buffer(unsigned int val,
2221 struct saved_cmdlines_buffer *s)
2223 s->map_cmdline_to_pid = kmalloc_array(val,
2224 sizeof(*s->map_cmdline_to_pid),
2226 if (!s->map_cmdline_to_pid)
2229 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
2230 if (!s->saved_cmdlines) {
2231 kfree(s->map_cmdline_to_pid);
2236 s->cmdline_num = val;
2237 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2238 sizeof(s->map_pid_to_cmdline));
2239 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2240 val * sizeof(*s->map_cmdline_to_pid));
2245 static int trace_create_savedcmd(void)
2249 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
2253 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2263 int is_tracing_stopped(void)
2265 return global_trace.stop_count;
2269 * tracing_start - quick start of the tracer
2271 * If tracing is enabled but was stopped by tracing_stop,
2272 * this will start the tracer back up.
2274 void tracing_start(void)
2276 struct trace_buffer *buffer;
2277 unsigned long flags;
2279 if (tracing_disabled)
2282 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2283 if (--global_trace.stop_count) {
2284 if (global_trace.stop_count < 0) {
2285 /* Someone screwed up their debugging */
2287 global_trace.stop_count = 0;
2292 /* Prevent the buffers from switching */
2293 arch_spin_lock(&global_trace.max_lock);
2295 buffer = global_trace.array_buffer.buffer;
2297 ring_buffer_record_enable(buffer);
2299 #ifdef CONFIG_TRACER_MAX_TRACE
2300 buffer = global_trace.max_buffer.buffer;
2302 ring_buffer_record_enable(buffer);
2305 arch_spin_unlock(&global_trace.max_lock);
2308 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2311 static void tracing_start_tr(struct trace_array *tr)
2313 struct trace_buffer *buffer;
2314 unsigned long flags;
2316 if (tracing_disabled)
2319 /* If global, we need to also start the max tracer */
2320 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2321 return tracing_start();
2323 raw_spin_lock_irqsave(&tr->start_lock, flags);
2325 if (--tr->stop_count) {
2326 if (tr->stop_count < 0) {
2327 /* Someone screwed up their debugging */
2334 buffer = tr->array_buffer.buffer;
2336 ring_buffer_record_enable(buffer);
2339 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2343 * tracing_stop - quick stop of the tracer
2345 * Light weight way to stop tracing. Use in conjunction with
2348 void tracing_stop(void)
2350 struct trace_buffer *buffer;
2351 unsigned long flags;
2353 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2354 if (global_trace.stop_count++)
2357 /* Prevent the buffers from switching */
2358 arch_spin_lock(&global_trace.max_lock);
2360 buffer = global_trace.array_buffer.buffer;
2362 ring_buffer_record_disable(buffer);
2364 #ifdef CONFIG_TRACER_MAX_TRACE
2365 buffer = global_trace.max_buffer.buffer;
2367 ring_buffer_record_disable(buffer);
2370 arch_spin_unlock(&global_trace.max_lock);
2373 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2376 static void tracing_stop_tr(struct trace_array *tr)
2378 struct trace_buffer *buffer;
2379 unsigned long flags;
2381 /* If global, we need to also stop the max tracer */
2382 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2383 return tracing_stop();
2385 raw_spin_lock_irqsave(&tr->start_lock, flags);
2386 if (tr->stop_count++)
2389 buffer = tr->array_buffer.buffer;
2391 ring_buffer_record_disable(buffer);
2394 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2397 static int trace_save_cmdline(struct task_struct *tsk)
2401 /* treat recording of idle task as a success */
2405 tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
2408 * It's not the end of the world if we don't get
2409 * the lock, but we also don't want to spin
2410 * nor do we want to disable interrupts,
2411 * so if we miss here, then better luck next time.
2413 if (!arch_spin_trylock(&trace_cmdline_lock))
2416 idx = savedcmd->map_pid_to_cmdline[tpid];
2417 if (idx == NO_CMDLINE_MAP) {
2418 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2420 savedcmd->map_pid_to_cmdline[tpid] = idx;
2421 savedcmd->cmdline_idx = idx;
2424 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2425 set_cmdline(idx, tsk->comm);
2427 arch_spin_unlock(&trace_cmdline_lock);
2432 static void __trace_find_cmdline(int pid, char comm[])
2438 strcpy(comm, "<idle>");
2442 if (WARN_ON_ONCE(pid < 0)) {
2443 strcpy(comm, "<XXX>");
2447 tpid = pid & (PID_MAX_DEFAULT - 1);
2448 map = savedcmd->map_pid_to_cmdline[tpid];
2449 if (map != NO_CMDLINE_MAP) {
2450 tpid = savedcmd->map_cmdline_to_pid[map];
2452 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2456 strcpy(comm, "<...>");
2459 void trace_find_cmdline(int pid, char comm[])
2462 arch_spin_lock(&trace_cmdline_lock);
2464 __trace_find_cmdline(pid, comm);
2466 arch_spin_unlock(&trace_cmdline_lock);
2470 static int *trace_find_tgid_ptr(int pid)
2473 * Pairs with the smp_store_release in set_tracer_flag() to ensure that
2474 * if we observe a non-NULL tgid_map then we also observe the correct
2477 int *map = smp_load_acquire(&tgid_map);
2479 if (unlikely(!map || pid > tgid_map_max))
2485 int trace_find_tgid(int pid)
2487 int *ptr = trace_find_tgid_ptr(pid);
2489 return ptr ? *ptr : 0;
2492 static int trace_save_tgid(struct task_struct *tsk)
2496 /* treat recording of idle task as a success */
2500 ptr = trace_find_tgid_ptr(tsk->pid);
2508 static bool tracing_record_taskinfo_skip(int flags)
2510 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2512 if (!__this_cpu_read(trace_taskinfo_save))
2518 * tracing_record_taskinfo - record the task info of a task
2520 * @task: task to record
2521 * @flags: TRACE_RECORD_CMDLINE for recording comm
2522 * TRACE_RECORD_TGID for recording tgid
2524 void tracing_record_taskinfo(struct task_struct *task, int flags)
2528 if (tracing_record_taskinfo_skip(flags))
2532 * Record as much task information as possible. If some fail, continue
2533 * to try to record the others.
2535 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2536 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2538 /* If recording any information failed, retry again soon. */
2542 __this_cpu_write(trace_taskinfo_save, false);
2546 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2548 * @prev: previous task during sched_switch
2549 * @next: next task during sched_switch
2550 * @flags: TRACE_RECORD_CMDLINE for recording comm
2551 * TRACE_RECORD_TGID for recording tgid
2553 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2554 struct task_struct *next, int flags)
2558 if (tracing_record_taskinfo_skip(flags))
2562 * Record as much task information as possible. If some fail, continue
2563 * to try to record the others.
2565 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2566 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2567 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2568 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2570 /* If recording any information failed, retry again soon. */
2574 __this_cpu_write(trace_taskinfo_save, false);
2577 /* Helpers to record a specific task information */
2578 void tracing_record_cmdline(struct task_struct *task)
2580 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2583 void tracing_record_tgid(struct task_struct *task)
2585 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2589 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2590 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2591 * simplifies those functions and keeps them in sync.
2593 enum print_line_t trace_handle_return(struct trace_seq *s)
2595 return trace_seq_has_overflowed(s) ?
2596 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2598 EXPORT_SYMBOL_GPL(trace_handle_return);
2600 static unsigned short migration_disable_value(void)
2602 #if defined(CONFIG_SMP)
2603 return current->migration_disabled;
2609 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2611 unsigned int trace_flags = irqs_status;
2614 pc = preempt_count();
2617 trace_flags |= TRACE_FLAG_NMI;
2618 if (pc & HARDIRQ_MASK)
2619 trace_flags |= TRACE_FLAG_HARDIRQ;
2620 if (in_serving_softirq())
2621 trace_flags |= TRACE_FLAG_SOFTIRQ;
2622 if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
2623 trace_flags |= TRACE_FLAG_BH_OFF;
2625 if (tif_need_resched())
2626 trace_flags |= TRACE_FLAG_NEED_RESCHED;
2627 if (test_preempt_need_resched())
2628 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2629 return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
2630 (min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
2633 struct ring_buffer_event *
2634 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2637 unsigned int trace_ctx)
2639 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2642 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2643 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2644 static int trace_buffered_event_ref;
2647 * trace_buffered_event_enable - enable buffering events
2649 * When events are being filtered, it is quicker to use a temporary
2650 * buffer to write the event data into if there's a likely chance
2651 * that it will not be committed. The discard of the ring buffer
2652 * is not as fast as committing, and is much slower than copying
2655 * When an event is to be filtered, allocate per cpu buffers to
2656 * write the event data into, and if the event is filtered and discarded
2657 * it is simply dropped, otherwise, the entire data is to be committed
2660 void trace_buffered_event_enable(void)
2662 struct ring_buffer_event *event;
2666 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2668 if (trace_buffered_event_ref++)
2671 for_each_tracing_cpu(cpu) {
2672 page = alloc_pages_node(cpu_to_node(cpu),
2673 GFP_KERNEL | __GFP_NORETRY, 0);
2677 event = page_address(page);
2678 memset(event, 0, sizeof(*event));
2680 per_cpu(trace_buffered_event, cpu) = event;
2683 if (cpu == smp_processor_id() &&
2684 __this_cpu_read(trace_buffered_event) !=
2685 per_cpu(trace_buffered_event, cpu))
2692 trace_buffered_event_disable();
2695 static void enable_trace_buffered_event(void *data)
2697 /* Probably not needed, but do it anyway */
2699 this_cpu_dec(trace_buffered_event_cnt);
2702 static void disable_trace_buffered_event(void *data)
2704 this_cpu_inc(trace_buffered_event_cnt);
2708 * trace_buffered_event_disable - disable buffering events
2710 * When a filter is removed, it is faster to not use the buffered
2711 * events, and to commit directly into the ring buffer. Free up
2712 * the temp buffers when there are no more users. This requires
2713 * special synchronization with current events.
2715 void trace_buffered_event_disable(void)
2719 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2721 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2724 if (--trace_buffered_event_ref)
2728 /* For each CPU, set the buffer as used. */
2729 smp_call_function_many(tracing_buffer_mask,
2730 disable_trace_buffered_event, NULL, 1);
2733 /* Wait for all current users to finish */
2736 for_each_tracing_cpu(cpu) {
2737 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2738 per_cpu(trace_buffered_event, cpu) = NULL;
2741 * Make sure trace_buffered_event is NULL before clearing
2742 * trace_buffered_event_cnt.
2747 /* Do the work on each cpu */
2748 smp_call_function_many(tracing_buffer_mask,
2749 enable_trace_buffered_event, NULL, 1);
2753 static struct trace_buffer *temp_buffer;
2755 struct ring_buffer_event *
2756 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2757 struct trace_event_file *trace_file,
2758 int type, unsigned long len,
2759 unsigned int trace_ctx)
2761 struct ring_buffer_event *entry;
2762 struct trace_array *tr = trace_file->tr;
2765 *current_rb = tr->array_buffer.buffer;
2767 if (!tr->no_filter_buffering_ref &&
2768 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
2769 preempt_disable_notrace();
2771 * Filtering is on, so try to use the per cpu buffer first.
2772 * This buffer will simulate a ring_buffer_event,
2773 * where the type_len is zero and the array[0] will
2774 * hold the full length.
2775 * (see include/linux/ring-buffer.h for details on
2776 * how the ring_buffer_event is structured).
2778 * Using a temp buffer during filtering and copying it
2779 * on a matched filter is quicker than writing directly
2780 * into the ring buffer and then discarding it when
2781 * it doesn't match. That is because the discard
2782 * requires several atomic operations to get right.
2783 * Copying on match and doing nothing on a failed match
2784 * is still quicker than no copy on match, but having
2785 * to discard out of the ring buffer on a failed match.
2787 if ((entry = __this_cpu_read(trace_buffered_event))) {
2788 int max_len = PAGE_SIZE - struct_size(entry, array, 1);
2790 val = this_cpu_inc_return(trace_buffered_event_cnt);
2793 * Preemption is disabled, but interrupts and NMIs
2794 * can still come in now. If that happens after
2795 * the above increment, then it will have to go
2796 * back to the old method of allocating the event
2797 * on the ring buffer, and if the filter fails, it
2798 * will have to call ring_buffer_discard_commit()
2801 * Need to also check the unlikely case that the
2802 * length is bigger than the temp buffer size.
2803 * If that happens, then the reserve is pretty much
2804 * guaranteed to fail, as the ring buffer currently
2805 * only allows events less than a page. But that may
2806 * change in the future, so let the ring buffer reserve
2807 * handle the failure in that case.
2809 if (val == 1 && likely(len <= max_len)) {
2810 trace_event_setup(entry, type, trace_ctx);
2811 entry->array[0] = len;
2812 /* Return with preemption disabled */
2815 this_cpu_dec(trace_buffered_event_cnt);
2817 /* __trace_buffer_lock_reserve() disables preemption */
2818 preempt_enable_notrace();
2821 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2824 * If tracing is off, but we have triggers enabled
2825 * we still need to look at the event data. Use the temp_buffer
2826 * to store the trace event for the trigger to use. It's recursive
2827 * safe and will not be recorded anywhere.
2829 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2830 *current_rb = temp_buffer;
2831 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2836 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2838 static DEFINE_SPINLOCK(tracepoint_iter_lock);
2839 static DEFINE_MUTEX(tracepoint_printk_mutex);
2841 static void output_printk(struct trace_event_buffer *fbuffer)
2843 struct trace_event_call *event_call;
2844 struct trace_event_file *file;
2845 struct trace_event *event;
2846 unsigned long flags;
2847 struct trace_iterator *iter = tracepoint_print_iter;
2849 /* We should never get here if iter is NULL */
2850 if (WARN_ON_ONCE(!iter))
2853 event_call = fbuffer->trace_file->event_call;
2854 if (!event_call || !event_call->event.funcs ||
2855 !event_call->event.funcs->trace)
2858 file = fbuffer->trace_file;
2859 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2860 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2861 !filter_match_preds(file->filter, fbuffer->entry)))
2864 event = &fbuffer->trace_file->event_call->event;
2866 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2867 trace_seq_init(&iter->seq);
2868 iter->ent = fbuffer->entry;
2869 event_call->event.funcs->trace(iter, 0, event);
2870 trace_seq_putc(&iter->seq, 0);
2871 printk("%s", iter->seq.buffer);
2873 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2876 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2877 void *buffer, size_t *lenp,
2880 int save_tracepoint_printk;
2883 mutex_lock(&tracepoint_printk_mutex);
2884 save_tracepoint_printk = tracepoint_printk;
2886 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2889 * This will force exiting early, as tracepoint_printk
2890 * is always zero when tracepoint_printk_iter is not allocated
2892 if (!tracepoint_print_iter)
2893 tracepoint_printk = 0;
2895 if (save_tracepoint_printk == tracepoint_printk)
2898 if (tracepoint_printk)
2899 static_key_enable(&tracepoint_printk_key.key);
2901 static_key_disable(&tracepoint_printk_key.key);
2904 mutex_unlock(&tracepoint_printk_mutex);
2909 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2911 enum event_trigger_type tt = ETT_NONE;
2912 struct trace_event_file *file = fbuffer->trace_file;
2914 if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
2915 fbuffer->entry, &tt))
2918 if (static_key_false(&tracepoint_printk_key.key))
2919 output_printk(fbuffer);
2921 if (static_branch_unlikely(&trace_event_exports_enabled))
2922 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2924 trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
2925 fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
2929 event_triggers_post_call(file, tt);
2932 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2937 * trace_buffer_unlock_commit_regs()
2938 * trace_event_buffer_commit()
2939 * trace_event_raw_event_xxx()
2941 # define STACK_SKIP 3
2943 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2944 struct trace_buffer *buffer,
2945 struct ring_buffer_event *event,
2946 unsigned int trace_ctx,
2947 struct pt_regs *regs)
2949 __buffer_unlock_commit(buffer, event);
2952 * If regs is not set, then skip the necessary functions.
2953 * Note, we can still get here via blktrace, wakeup tracer
2954 * and mmiotrace, but that's ok if they lose a function or
2955 * two. They are not that meaningful.
2957 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
2958 ftrace_trace_userstack(tr, buffer, trace_ctx);
2962 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2965 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2966 struct ring_buffer_event *event)
2968 __buffer_unlock_commit(buffer, event);
2972 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
2973 parent_ip, unsigned int trace_ctx)
2975 struct trace_event_call *call = &event_function;
2976 struct trace_buffer *buffer = tr->array_buffer.buffer;
2977 struct ring_buffer_event *event;
2978 struct ftrace_entry *entry;
2980 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2984 entry = ring_buffer_event_data(event);
2986 entry->parent_ip = parent_ip;
2988 if (!call_filter_check_discard(call, entry, buffer, event)) {
2989 if (static_branch_unlikely(&trace_function_exports_enabled))
2990 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
2991 __buffer_unlock_commit(buffer, event);
2995 #ifdef CONFIG_STACKTRACE
2997 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2998 #define FTRACE_KSTACK_NESTING 4
3000 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
3002 struct ftrace_stack {
3003 unsigned long calls[FTRACE_KSTACK_ENTRIES];
3007 struct ftrace_stacks {
3008 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
3011 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
3012 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
3014 static void __ftrace_trace_stack(struct trace_buffer *buffer,
3015 unsigned int trace_ctx,
3016 int skip, struct pt_regs *regs)
3018 struct trace_event_call *call = &event_kernel_stack;
3019 struct ring_buffer_event *event;
3020 unsigned int size, nr_entries;
3021 struct ftrace_stack *fstack;
3022 struct stack_entry *entry;
3026 * Add one, for this function and the call to save_stack_trace()
3027 * If regs is set, then these functions will not be in the way.
3029 #ifndef CONFIG_UNWINDER_ORC
3034 preempt_disable_notrace();
3036 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
3038 /* This should never happen. If it does, yell once and skip */
3039 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
3043 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
3044 * interrupt will either see the value pre increment or post
3045 * increment. If the interrupt happens pre increment it will have
3046 * restored the counter when it returns. We just need a barrier to
3047 * keep gcc from moving things around.
3051 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
3052 size = ARRAY_SIZE(fstack->calls);
3055 nr_entries = stack_trace_save_regs(regs, fstack->calls,
3058 nr_entries = stack_trace_save(fstack->calls, size, skip);
3061 size = nr_entries * sizeof(unsigned long);
3062 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
3063 (sizeof(*entry) - sizeof(entry->caller)) + size,
3067 entry = ring_buffer_event_data(event);
3069 memcpy(&entry->caller, fstack->calls, size);
3070 entry->size = nr_entries;
3072 if (!call_filter_check_discard(call, entry, buffer, event))
3073 __buffer_unlock_commit(buffer, event);
3076 /* Again, don't let gcc optimize things here */
3078 __this_cpu_dec(ftrace_stack_reserve);
3079 preempt_enable_notrace();
3083 static inline void ftrace_trace_stack(struct trace_array *tr,
3084 struct trace_buffer *buffer,
3085 unsigned int trace_ctx,
3086 int skip, struct pt_regs *regs)
3088 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3091 __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
3094 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3097 struct trace_buffer *buffer = tr->array_buffer.buffer;
3099 if (rcu_is_watching()) {
3100 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3105 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
3106 * but if the above rcu_is_watching() failed, then the NMI
3107 * triggered someplace critical, and rcu_irq_enter() should
3108 * not be called from NMI.
3110 if (unlikely(in_nmi()))
3113 rcu_irq_enter_irqson();
3114 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3115 rcu_irq_exit_irqson();
3119 * trace_dump_stack - record a stack back trace in the trace buffer
3120 * @skip: Number of functions to skip (helper handlers)
3122 void trace_dump_stack(int skip)
3124 if (tracing_disabled || tracing_selftest_running)
3127 #ifndef CONFIG_UNWINDER_ORC
3128 /* Skip 1 to skip this function. */
3131 __ftrace_trace_stack(global_trace.array_buffer.buffer,
3132 tracing_gen_ctx(), skip, NULL);
3134 EXPORT_SYMBOL_GPL(trace_dump_stack);
3136 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3137 static DEFINE_PER_CPU(int, user_stack_count);
3140 ftrace_trace_userstack(struct trace_array *tr,
3141 struct trace_buffer *buffer, unsigned int trace_ctx)
3143 struct trace_event_call *call = &event_user_stack;
3144 struct ring_buffer_event *event;
3145 struct userstack_entry *entry;
3147 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3151 * NMIs can not handle page faults, even with fix ups.
3152 * The save user stack can (and often does) fault.
3154 if (unlikely(in_nmi()))
3158 * prevent recursion, since the user stack tracing may
3159 * trigger other kernel events.
3162 if (__this_cpu_read(user_stack_count))
3165 __this_cpu_inc(user_stack_count);
3167 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3168 sizeof(*entry), trace_ctx);
3170 goto out_drop_count;
3171 entry = ring_buffer_event_data(event);
3173 entry->tgid = current->tgid;
3174 memset(&entry->caller, 0, sizeof(entry->caller));
3176 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3177 if (!call_filter_check_discard(call, entry, buffer, event))
3178 __buffer_unlock_commit(buffer, event);
3181 __this_cpu_dec(user_stack_count);
3185 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3186 static void ftrace_trace_userstack(struct trace_array *tr,
3187 struct trace_buffer *buffer,
3188 unsigned int trace_ctx)
3191 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3193 #endif /* CONFIG_STACKTRACE */
3196 func_repeats_set_delta_ts(struct func_repeats_entry *entry,
3197 unsigned long long delta)
3199 entry->bottom_delta_ts = delta & U32_MAX;
3200 entry->top_delta_ts = (delta >> 32);
3203 void trace_last_func_repeats(struct trace_array *tr,
3204 struct trace_func_repeats *last_info,
3205 unsigned int trace_ctx)
3207 struct trace_buffer *buffer = tr->array_buffer.buffer;
3208 struct func_repeats_entry *entry;
3209 struct ring_buffer_event *event;
3212 event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
3213 sizeof(*entry), trace_ctx);
3217 delta = ring_buffer_event_time_stamp(buffer, event) -
3218 last_info->ts_last_call;
3220 entry = ring_buffer_event_data(event);
3221 entry->ip = last_info->ip;
3222 entry->parent_ip = last_info->parent_ip;
3223 entry->count = last_info->count;
3224 func_repeats_set_delta_ts(entry, delta);
3226 __buffer_unlock_commit(buffer, event);
3229 /* created for use with alloc_percpu */
3230 struct trace_buffer_struct {
3232 char buffer[4][TRACE_BUF_SIZE];
3235 static struct trace_buffer_struct __percpu *trace_percpu_buffer;
3238 * This allows for lockless recording. If we're nested too deeply, then
3239 * this returns NULL.
3241 static char *get_trace_buf(void)
3243 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3245 if (!trace_percpu_buffer || buffer->nesting >= 4)
3250 /* Interrupts must see nesting incremented before we use the buffer */
3252 return &buffer->buffer[buffer->nesting - 1][0];
3255 static void put_trace_buf(void)
3257 /* Don't let the decrement of nesting leak before this */
3259 this_cpu_dec(trace_percpu_buffer->nesting);
3262 static int alloc_percpu_trace_buffer(void)
3264 struct trace_buffer_struct __percpu *buffers;
3266 if (trace_percpu_buffer)
3269 buffers = alloc_percpu(struct trace_buffer_struct);
3270 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3273 trace_percpu_buffer = buffers;
3277 static int buffers_allocated;
3279 void trace_printk_init_buffers(void)
3281 if (buffers_allocated)
3284 if (alloc_percpu_trace_buffer())
3287 /* trace_printk() is for debug use only. Don't use it in production. */
3290 pr_warn("**********************************************************\n");
3291 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3293 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3295 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3296 pr_warn("** unsafe for production use. **\n");
3298 pr_warn("** If you see this message and you are not debugging **\n");
3299 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3301 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3302 pr_warn("**********************************************************\n");
3304 /* Expand the buffers to set size */
3305 tracing_update_buffers();
3307 buffers_allocated = 1;
3310 * trace_printk_init_buffers() can be called by modules.
3311 * If that happens, then we need to start cmdline recording
3312 * directly here. If the global_trace.buffer is already
3313 * allocated here, then this was called by module code.
3315 if (global_trace.array_buffer.buffer)
3316 tracing_start_cmdline_record();
3318 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3320 void trace_printk_start_comm(void)
3322 /* Start tracing comms if trace printk is set */
3323 if (!buffers_allocated)
3325 tracing_start_cmdline_record();
3328 static void trace_printk_start_stop_comm(int enabled)
3330 if (!buffers_allocated)
3334 tracing_start_cmdline_record();
3336 tracing_stop_cmdline_record();
3340 * trace_vbprintk - write binary msg to tracing buffer
3341 * @ip: The address of the caller
3342 * @fmt: The string format to write to the buffer
3343 * @args: Arguments for @fmt
3345 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3347 struct trace_event_call *call = &event_bprint;
3348 struct ring_buffer_event *event;
3349 struct trace_buffer *buffer;
3350 struct trace_array *tr = &global_trace;
3351 struct bprint_entry *entry;
3352 unsigned int trace_ctx;
3356 if (unlikely(tracing_selftest_running || tracing_disabled))
3359 /* Don't pollute graph traces with trace_vprintk internals */
3360 pause_graph_tracing();
3362 trace_ctx = tracing_gen_ctx();
3363 preempt_disable_notrace();
3365 tbuffer = get_trace_buf();
3371 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3373 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3376 size = sizeof(*entry) + sizeof(u32) * len;
3377 buffer = tr->array_buffer.buffer;
3378 ring_buffer_nest_start(buffer);
3379 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3383 entry = ring_buffer_event_data(event);
3387 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3388 if (!call_filter_check_discard(call, entry, buffer, event)) {
3389 __buffer_unlock_commit(buffer, event);
3390 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3394 ring_buffer_nest_end(buffer);
3399 preempt_enable_notrace();
3400 unpause_graph_tracing();
3404 EXPORT_SYMBOL_GPL(trace_vbprintk);
3408 __trace_array_vprintk(struct trace_buffer *buffer,
3409 unsigned long ip, const char *fmt, va_list args)
3411 struct trace_event_call *call = &event_print;
3412 struct ring_buffer_event *event;
3414 struct print_entry *entry;
3415 unsigned int trace_ctx;
3418 if (tracing_disabled || tracing_selftest_running)
3421 /* Don't pollute graph traces with trace_vprintk internals */
3422 pause_graph_tracing();
3424 trace_ctx = tracing_gen_ctx();
3425 preempt_disable_notrace();
3428 tbuffer = get_trace_buf();
3434 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3436 size = sizeof(*entry) + len + 1;
3437 ring_buffer_nest_start(buffer);
3438 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3442 entry = ring_buffer_event_data(event);
3445 memcpy(&entry->buf, tbuffer, len + 1);
3446 if (!call_filter_check_discard(call, entry, buffer, event)) {
3447 __buffer_unlock_commit(buffer, event);
3448 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
3452 ring_buffer_nest_end(buffer);
3456 preempt_enable_notrace();
3457 unpause_graph_tracing();
3463 int trace_array_vprintk(struct trace_array *tr,
3464 unsigned long ip, const char *fmt, va_list args)
3466 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3470 * trace_array_printk - Print a message to a specific instance
3471 * @tr: The instance trace_array descriptor
3472 * @ip: The instruction pointer that this is called from.
3473 * @fmt: The format to print (printf format)
3475 * If a subsystem sets up its own instance, they have the right to
3476 * printk strings into their tracing instance buffer using this
3477 * function. Note, this function will not write into the top level
3478 * buffer (use trace_printk() for that), as writing into the top level
3479 * buffer should only have events that can be individually disabled.
3480 * trace_printk() is only used for debugging a kernel, and should not
3481 * be ever incorporated in normal use.
3483 * trace_array_printk() can be used, as it will not add noise to the
3484 * top level tracing buffer.
3486 * Note, trace_array_init_printk() must be called on @tr before this
3490 int trace_array_printk(struct trace_array *tr,
3491 unsigned long ip, const char *fmt, ...)
3499 /* This is only allowed for created instances */
3500 if (tr == &global_trace)
3503 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3507 ret = trace_array_vprintk(tr, ip, fmt, ap);
3511 EXPORT_SYMBOL_GPL(trace_array_printk);
3514 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3515 * @tr: The trace array to initialize the buffers for
3517 * As trace_array_printk() only writes into instances, they are OK to
3518 * have in the kernel (unlike trace_printk()). This needs to be called
3519 * before trace_array_printk() can be used on a trace_array.
3521 int trace_array_init_printk(struct trace_array *tr)
3526 /* This is only allowed for created instances */
3527 if (tr == &global_trace)
3530 return alloc_percpu_trace_buffer();
3532 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3535 int trace_array_printk_buf(struct trace_buffer *buffer,
3536 unsigned long ip, const char *fmt, ...)
3541 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3545 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3551 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3553 return trace_array_vprintk(&global_trace, ip, fmt, args);
3555 EXPORT_SYMBOL_GPL(trace_vprintk);
3557 static void trace_iterator_increment(struct trace_iterator *iter)
3559 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3563 ring_buffer_iter_advance(buf_iter);
3566 static struct trace_entry *
3567 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3568 unsigned long *lost_events)
3570 struct ring_buffer_event *event;
3571 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3574 event = ring_buffer_iter_peek(buf_iter, ts);
3576 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3577 (unsigned long)-1 : 0;
3579 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3584 iter->ent_size = ring_buffer_event_length(event);
3585 return ring_buffer_event_data(event);
3591 static struct trace_entry *
3592 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3593 unsigned long *missing_events, u64 *ent_ts)
3595 struct trace_buffer *buffer = iter->array_buffer->buffer;
3596 struct trace_entry *ent, *next = NULL;
3597 unsigned long lost_events = 0, next_lost = 0;
3598 int cpu_file = iter->cpu_file;
3599 u64 next_ts = 0, ts;
3605 * If we are in a per_cpu trace file, don't bother by iterating over
3606 * all cpu and peek directly.
3608 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3609 if (ring_buffer_empty_cpu(buffer, cpu_file))
3611 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3613 *ent_cpu = cpu_file;
3618 for_each_tracing_cpu(cpu) {
3620 if (ring_buffer_empty_cpu(buffer, cpu))
3623 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3626 * Pick the entry with the smallest timestamp:
3628 if (ent && (!next || ts < next_ts)) {
3632 next_lost = lost_events;
3633 next_size = iter->ent_size;
3637 iter->ent_size = next_size;
3640 *ent_cpu = next_cpu;
3646 *missing_events = next_lost;
3651 #define STATIC_FMT_BUF_SIZE 128
3652 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3654 static char *trace_iter_expand_format(struct trace_iterator *iter)
3659 * iter->tr is NULL when used with tp_printk, which makes
3660 * this get called where it is not safe to call krealloc().
3662 if (!iter->tr || iter->fmt == static_fmt_buf)
3665 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3668 iter->fmt_size += STATIC_FMT_BUF_SIZE;
3675 /* Returns true if the string is safe to dereference from an event */
3676 static bool trace_safe_str(struct trace_iterator *iter, const char *str)
3678 unsigned long addr = (unsigned long)str;
3679 struct trace_event *trace_event;
3680 struct trace_event_call *event;
3682 /* OK if part of the event data */
3683 if ((addr >= (unsigned long)iter->ent) &&
3684 (addr < (unsigned long)iter->ent + iter->ent_size))
3687 /* OK if part of the temp seq buffer */
3688 if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3689 (addr < (unsigned long)iter->tmp_seq.buffer + PAGE_SIZE))
3692 /* Core rodata can not be freed */
3693 if (is_kernel_rodata(addr))
3696 if (trace_is_tracepoint_string(str))
3700 * Now this could be a module event, referencing core module
3701 * data, which is OK.
3706 trace_event = ftrace_find_event(iter->ent->type);
3710 event = container_of(trace_event, struct trace_event_call, event);
3711 if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
3714 /* Would rather have rodata, but this will suffice */
3715 if (within_module_core(addr, event->module))
3721 static const char *show_buffer(struct trace_seq *s)
3723 struct seq_buf *seq = &s->seq;
3725 seq_buf_terminate(seq);
3730 static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
3732 static int test_can_verify_check(const char *fmt, ...)
3739 * The verifier is dependent on vsnprintf() modifies the va_list
3740 * passed to it, where it is sent as a reference. Some architectures
3741 * (like x86_32) passes it by value, which means that vsnprintf()
3742 * does not modify the va_list passed to it, and the verifier
3743 * would then need to be able to understand all the values that
3744 * vsnprintf can use. If it is passed by value, then the verifier
3748 vsnprintf(buf, 16, "%d", ap);
3749 ret = va_arg(ap, int);
3755 static void test_can_verify(void)
3757 if (!test_can_verify_check("%d %d", 0, 1)) {
3758 pr_info("trace event string verifier disabled\n");
3759 static_branch_inc(&trace_no_verify);
3764 * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
3765 * @iter: The iterator that holds the seq buffer and the event being printed
3766 * @fmt: The format used to print the event
3767 * @ap: The va_list holding the data to print from @fmt.
3769 * This writes the data into the @iter->seq buffer using the data from
3770 * @fmt and @ap. If the format has a %s, then the source of the string
3771 * is examined to make sure it is safe to print, otherwise it will
3772 * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
3775 void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
3778 const char *p = fmt;
3782 if (WARN_ON_ONCE(!fmt))
3785 if (static_branch_unlikely(&trace_no_verify))
3788 /* Don't bother checking when doing a ftrace_dump() */
3789 if (iter->fmt == static_fmt_buf)
3798 /* We only care about %s and variants */
3799 for (i = 0; p[i]; i++) {
3800 if (i + 1 >= iter->fmt_size) {
3802 * If we can't expand the copy buffer,
3805 if (!trace_iter_expand_format(iter))
3809 if (p[i] == '\\' && p[i+1]) {
3814 /* Need to test cases like %08.*s */
3815 for (j = 1; p[i+j]; j++) {
3816 if (isdigit(p[i+j]) ||
3819 if (p[i+j] == '*') {
3831 /* If no %s found then just print normally */
3835 /* Copy up to the %s, and print that */
3836 strncpy(iter->fmt, p, i);
3837 iter->fmt[i] = '\0';
3838 trace_seq_vprintf(&iter->seq, iter->fmt, ap);
3841 * If iter->seq is full, the above call no longer guarantees
3842 * that ap is in sync with fmt processing, and further calls
3843 * to va_arg() can return wrong positional arguments.
3845 * Ensure that ap is no longer used in this case.
3847 if (iter->seq.full) {
3853 len = va_arg(ap, int);
3855 /* The ap now points to the string data of the %s */
3856 str = va_arg(ap, const char *);
3859 * If you hit this warning, it is likely that the
3860 * trace event in question used %s on a string that
3861 * was saved at the time of the event, but may not be
3862 * around when the trace is read. Use __string(),
3863 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3864 * instead. See samples/trace_events/trace-events-sample.h
3867 if (WARN_ONCE(!trace_safe_str(iter, str),
3868 "fmt: '%s' current_buffer: '%s'",
3869 fmt, show_buffer(&iter->seq))) {
3872 /* Try to safely read the string */
3874 if (len + 1 > iter->fmt_size)
3875 len = iter->fmt_size - 1;
3878 ret = copy_from_kernel_nofault(iter->fmt, str, len);
3882 ret = strncpy_from_kernel_nofault(iter->fmt, str,
3886 trace_seq_printf(&iter->seq, "(0x%px)", str);
3888 trace_seq_printf(&iter->seq, "(0x%px:%s)",
3890 str = "[UNSAFE-MEMORY]";
3891 strcpy(iter->fmt, "%s");
3893 strncpy(iter->fmt, p + i, j + 1);
3894 iter->fmt[j+1] = '\0';
3897 trace_seq_printf(&iter->seq, iter->fmt, len, str);
3899 trace_seq_printf(&iter->seq, iter->fmt, str);
3905 trace_seq_vprintf(&iter->seq, p, ap);
3908 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3910 const char *p, *new_fmt;
3913 if (WARN_ON_ONCE(!fmt))
3916 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
3920 new_fmt = q = iter->fmt;
3922 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3923 if (!trace_iter_expand_format(iter))
3926 q += iter->fmt - new_fmt;
3927 new_fmt = iter->fmt;
3932 /* Replace %p with %px */
3936 } else if (p[0] == 'p' && !isalnum(p[1])) {
3947 #define STATIC_TEMP_BUF_SIZE 128
3948 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
3950 /* Find the next real entry, without updating the iterator itself */
3951 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3952 int *ent_cpu, u64 *ent_ts)
3954 /* __find_next_entry will reset ent_size */
3955 int ent_size = iter->ent_size;
3956 struct trace_entry *entry;
3959 * If called from ftrace_dump(), then the iter->temp buffer
3960 * will be the static_temp_buf and not created from kmalloc.
3961 * If the entry size is greater than the buffer, we can
3962 * not save it. Just return NULL in that case. This is only
3963 * used to add markers when two consecutive events' time
3964 * stamps have a large delta. See trace_print_lat_context()
3966 if (iter->temp == static_temp_buf &&
3967 STATIC_TEMP_BUF_SIZE < ent_size)
3971 * The __find_next_entry() may call peek_next_entry(), which may
3972 * call ring_buffer_peek() that may make the contents of iter->ent
3973 * undefined. Need to copy iter->ent now.
3975 if (iter->ent && iter->ent != iter->temp) {
3976 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3977 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
3979 temp = kmalloc(iter->ent_size, GFP_KERNEL);
3984 iter->temp_size = iter->ent_size;
3986 memcpy(iter->temp, iter->ent, iter->ent_size);
3987 iter->ent = iter->temp;
3989 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3990 /* Put back the original ent_size */
3991 iter->ent_size = ent_size;
3996 /* Find the next real entry, and increment the iterator to the next entry */
3997 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3999 iter->ent = __find_next_entry(iter, &iter->cpu,
4000 &iter->lost_events, &iter->ts);
4003 trace_iterator_increment(iter);
4005 return iter->ent ? iter : NULL;
4008 static void trace_consume(struct trace_iterator *iter)
4010 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
4011 &iter->lost_events);
4014 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
4016 struct trace_iterator *iter = m->private;
4020 WARN_ON_ONCE(iter->leftover);
4024 /* can't go backwards */
4029 ent = trace_find_next_entry_inc(iter);
4033 while (ent && iter->idx < i)
4034 ent = trace_find_next_entry_inc(iter);
4041 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
4043 struct ring_buffer_iter *buf_iter;
4044 unsigned long entries = 0;
4047 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
4049 buf_iter = trace_buffer_iter(iter, cpu);
4053 ring_buffer_iter_reset(buf_iter);
4056 * We could have the case with the max latency tracers
4057 * that a reset never took place on a cpu. This is evident
4058 * by the timestamp being before the start of the buffer.
4060 while (ring_buffer_iter_peek(buf_iter, &ts)) {
4061 if (ts >= iter->array_buffer->time_start)
4064 ring_buffer_iter_advance(buf_iter);
4067 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
4071 * The current tracer is copied to avoid a global locking
4074 static void *s_start(struct seq_file *m, loff_t *pos)
4076 struct trace_iterator *iter = m->private;
4077 struct trace_array *tr = iter->tr;
4078 int cpu_file = iter->cpu_file;
4084 * copy the tracer to avoid using a global lock all around.
4085 * iter->trace is a copy of current_trace, the pointer to the
4086 * name may be used instead of a strcmp(), as iter->trace->name
4087 * will point to the same string as current_trace->name.
4089 mutex_lock(&trace_types_lock);
4090 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
4091 *iter->trace = *tr->current_trace;
4092 mutex_unlock(&trace_types_lock);
4094 #ifdef CONFIG_TRACER_MAX_TRACE
4095 if (iter->snapshot && iter->trace->use_max_tr)
4096 return ERR_PTR(-EBUSY);
4099 if (*pos != iter->pos) {
4104 if (cpu_file == RING_BUFFER_ALL_CPUS) {
4105 for_each_tracing_cpu(cpu)
4106 tracing_iter_reset(iter, cpu);
4108 tracing_iter_reset(iter, cpu_file);
4111 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
4116 * If we overflowed the seq_file before, then we want
4117 * to just reuse the trace_seq buffer again.
4123 p = s_next(m, p, &l);
4127 trace_event_read_lock();
4128 trace_access_lock(cpu_file);
4132 static void s_stop(struct seq_file *m, void *p)
4134 struct trace_iterator *iter = m->private;
4136 #ifdef CONFIG_TRACER_MAX_TRACE
4137 if (iter->snapshot && iter->trace->use_max_tr)
4141 trace_access_unlock(iter->cpu_file);
4142 trace_event_read_unlock();
4146 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
4147 unsigned long *entries, int cpu)
4149 unsigned long count;
4151 count = ring_buffer_entries_cpu(buf->buffer, cpu);
4153 * If this buffer has skipped entries, then we hold all
4154 * entries for the trace and we need to ignore the
4155 * ones before the time stamp.
4157 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
4158 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
4159 /* total is the same as the entries */
4163 ring_buffer_overrun_cpu(buf->buffer, cpu);
4168 get_total_entries(struct array_buffer *buf,
4169 unsigned long *total, unsigned long *entries)
4177 for_each_tracing_cpu(cpu) {
4178 get_total_entries_cpu(buf, &t, &e, cpu);
4184 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4186 unsigned long total, entries;
4191 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4196 unsigned long trace_total_entries(struct trace_array *tr)
4198 unsigned long total, entries;
4203 get_total_entries(&tr->array_buffer, &total, &entries);
4208 static void print_lat_help_header(struct seq_file *m)
4210 seq_puts(m, "# _------=> CPU# \n"
4211 "# / _-----=> irqs-off/BH-disabled\n"
4212 "# | / _----=> need-resched \n"
4213 "# || / _---=> hardirq/softirq \n"
4214 "# ||| / _--=> preempt-depth \n"
4215 "# |||| / _-=> migrate-disable \n"
4216 "# ||||| / delay \n"
4217 "# cmd pid |||||| time | caller \n"
4218 "# \\ / |||||| \\ | / \n");
4221 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
4223 unsigned long total;
4224 unsigned long entries;
4226 get_total_entries(buf, &total, &entries);
4227 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
4228 entries, total, num_online_cpus());
4232 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
4235 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4237 print_event_info(buf, m);
4239 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
4240 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
4243 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
4246 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4247 const char *space = " ";
4248 int prec = tgid ? 12 : 2;
4250 print_event_info(buf, m);
4252 seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space);
4253 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
4254 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
4255 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
4256 seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space);
4257 seq_printf(m, "# %.*s|||| / delay\n", prec, space);
4258 seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
4259 seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | ");
4263 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4265 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
4266 struct array_buffer *buf = iter->array_buffer;
4267 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
4268 struct tracer *type = iter->trace;
4269 unsigned long entries;
4270 unsigned long total;
4271 const char *name = "preemption";
4275 get_total_entries(buf, &total, &entries);
4277 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
4279 seq_puts(m, "# -----------------------------------"
4280 "---------------------------------\n");
4281 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
4282 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
4283 nsecs_to_usecs(data->saved_latency),
4287 #if defined(CONFIG_PREEMPT_NONE)
4289 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
4291 #elif defined(CONFIG_PREEMPT)
4293 #elif defined(CONFIG_PREEMPT_RT)
4298 /* These are reserved for later use */
4301 seq_printf(m, " #P:%d)\n", num_online_cpus());
4305 seq_puts(m, "# -----------------\n");
4306 seq_printf(m, "# | task: %.16s-%d "
4307 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
4308 data->comm, data->pid,
4309 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
4310 data->policy, data->rt_priority);
4311 seq_puts(m, "# -----------------\n");
4313 if (data->critical_start) {
4314 seq_puts(m, "# => started at: ");
4315 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4316 trace_print_seq(m, &iter->seq);
4317 seq_puts(m, "\n# => ended at: ");
4318 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4319 trace_print_seq(m, &iter->seq);
4320 seq_puts(m, "\n#\n");
4326 static void test_cpu_buff_start(struct trace_iterator *iter)
4328 struct trace_seq *s = &iter->seq;
4329 struct trace_array *tr = iter->tr;
4331 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4334 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4337 if (cpumask_available(iter->started) &&
4338 cpumask_test_cpu(iter->cpu, iter->started))
4341 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4344 if (cpumask_available(iter->started))
4345 cpumask_set_cpu(iter->cpu, iter->started);
4347 /* Don't print started cpu buffer for the first entry of the trace */
4349 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4353 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4355 struct trace_array *tr = iter->tr;
4356 struct trace_seq *s = &iter->seq;
4357 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4358 struct trace_entry *entry;
4359 struct trace_event *event;
4363 test_cpu_buff_start(iter);
4365 event = ftrace_find_event(entry->type);
4367 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4368 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4369 trace_print_lat_context(iter);
4371 trace_print_context(iter);
4374 if (trace_seq_has_overflowed(s))
4375 return TRACE_TYPE_PARTIAL_LINE;
4378 return event->funcs->trace(iter, sym_flags, event);
4380 trace_seq_printf(s, "Unknown type %d\n", entry->type);
4382 return trace_handle_return(s);
4385 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4387 struct trace_array *tr = iter->tr;
4388 struct trace_seq *s = &iter->seq;
4389 struct trace_entry *entry;
4390 struct trace_event *event;
4394 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4395 trace_seq_printf(s, "%d %d %llu ",
4396 entry->pid, iter->cpu, iter->ts);
4398 if (trace_seq_has_overflowed(s))
4399 return TRACE_TYPE_PARTIAL_LINE;
4401 event = ftrace_find_event(entry->type);
4403 return event->funcs->raw(iter, 0, event);
4405 trace_seq_printf(s, "%d ?\n", entry->type);
4407 return trace_handle_return(s);
4410 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4412 struct trace_array *tr = iter->tr;
4413 struct trace_seq *s = &iter->seq;
4414 unsigned char newline = '\n';
4415 struct trace_entry *entry;
4416 struct trace_event *event;
4420 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4421 SEQ_PUT_HEX_FIELD(s, entry->pid);
4422 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4423 SEQ_PUT_HEX_FIELD(s, iter->ts);
4424 if (trace_seq_has_overflowed(s))
4425 return TRACE_TYPE_PARTIAL_LINE;
4428 event = ftrace_find_event(entry->type);
4430 enum print_line_t ret = event->funcs->hex(iter, 0, event);
4431 if (ret != TRACE_TYPE_HANDLED)
4435 SEQ_PUT_FIELD(s, newline);
4437 return trace_handle_return(s);
4440 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4442 struct trace_array *tr = iter->tr;
4443 struct trace_seq *s = &iter->seq;
4444 struct trace_entry *entry;
4445 struct trace_event *event;
4449 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4450 SEQ_PUT_FIELD(s, entry->pid);
4451 SEQ_PUT_FIELD(s, iter->cpu);
4452 SEQ_PUT_FIELD(s, iter->ts);
4453 if (trace_seq_has_overflowed(s))
4454 return TRACE_TYPE_PARTIAL_LINE;
4457 event = ftrace_find_event(entry->type);
4458 return event ? event->funcs->binary(iter, 0, event) :
4462 int trace_empty(struct trace_iterator *iter)
4464 struct ring_buffer_iter *buf_iter;
4467 /* If we are looking at one CPU buffer, only check that one */
4468 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4469 cpu = iter->cpu_file;
4470 buf_iter = trace_buffer_iter(iter, cpu);
4472 if (!ring_buffer_iter_empty(buf_iter))
4475 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4481 for_each_tracing_cpu(cpu) {
4482 buf_iter = trace_buffer_iter(iter, cpu);
4484 if (!ring_buffer_iter_empty(buf_iter))
4487 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4495 /* Called with trace_event_read_lock() held. */
4496 enum print_line_t print_trace_line(struct trace_iterator *iter)
4498 struct trace_array *tr = iter->tr;
4499 unsigned long trace_flags = tr->trace_flags;
4500 enum print_line_t ret;
4502 if (iter->lost_events) {
4503 if (iter->lost_events == (unsigned long)-1)
4504 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4507 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4508 iter->cpu, iter->lost_events);
4509 if (trace_seq_has_overflowed(&iter->seq))
4510 return TRACE_TYPE_PARTIAL_LINE;
4513 if (iter->trace && iter->trace->print_line) {
4514 ret = iter->trace->print_line(iter);
4515 if (ret != TRACE_TYPE_UNHANDLED)
4519 if (iter->ent->type == TRACE_BPUTS &&
4520 trace_flags & TRACE_ITER_PRINTK &&
4521 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4522 return trace_print_bputs_msg_only(iter);
4524 if (iter->ent->type == TRACE_BPRINT &&
4525 trace_flags & TRACE_ITER_PRINTK &&
4526 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4527 return trace_print_bprintk_msg_only(iter);
4529 if (iter->ent->type == TRACE_PRINT &&
4530 trace_flags & TRACE_ITER_PRINTK &&
4531 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4532 return trace_print_printk_msg_only(iter);
4534 if (trace_flags & TRACE_ITER_BIN)
4535 return print_bin_fmt(iter);
4537 if (trace_flags & TRACE_ITER_HEX)
4538 return print_hex_fmt(iter);
4540 if (trace_flags & TRACE_ITER_RAW)
4541 return print_raw_fmt(iter);
4543 return print_trace_fmt(iter);
4546 void trace_latency_header(struct seq_file *m)
4548 struct trace_iterator *iter = m->private;
4549 struct trace_array *tr = iter->tr;
4551 /* print nothing if the buffers are empty */
4552 if (trace_empty(iter))
4555 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4556 print_trace_header(m, iter);
4558 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4559 print_lat_help_header(m);
4562 void trace_default_header(struct seq_file *m)
4564 struct trace_iterator *iter = m->private;
4565 struct trace_array *tr = iter->tr;
4566 unsigned long trace_flags = tr->trace_flags;
4568 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4571 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4572 /* print nothing if the buffers are empty */
4573 if (trace_empty(iter))
4575 print_trace_header(m, iter);
4576 if (!(trace_flags & TRACE_ITER_VERBOSE))
4577 print_lat_help_header(m);
4579 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4580 if (trace_flags & TRACE_ITER_IRQ_INFO)
4581 print_func_help_header_irq(iter->array_buffer,
4584 print_func_help_header(iter->array_buffer, m,
4590 static void test_ftrace_alive(struct seq_file *m)
4592 if (!ftrace_is_dead())
4594 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4595 "# MAY BE MISSING FUNCTION EVENTS\n");
4598 #ifdef CONFIG_TRACER_MAX_TRACE
4599 static void show_snapshot_main_help(struct seq_file *m)
4601 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4602 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4603 "# Takes a snapshot of the main buffer.\n"
4604 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4605 "# (Doesn't have to be '2' works with any number that\n"
4606 "# is not a '0' or '1')\n");
4609 static void show_snapshot_percpu_help(struct seq_file *m)
4611 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4612 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4613 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4614 "# Takes a snapshot of the main buffer for this cpu.\n");
4616 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4617 "# Must use main snapshot file to allocate.\n");
4619 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4620 "# (Doesn't have to be '2' works with any number that\n"
4621 "# is not a '0' or '1')\n");
4624 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4626 if (iter->tr->allocated_snapshot)
4627 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4629 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4631 seq_puts(m, "# Snapshot commands:\n");
4632 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4633 show_snapshot_main_help(m);
4635 show_snapshot_percpu_help(m);
4638 /* Should never be called */
4639 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4642 static int s_show(struct seq_file *m, void *v)
4644 struct trace_iterator *iter = v;
4647 if (iter->ent == NULL) {
4649 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4651 test_ftrace_alive(m);
4653 if (iter->snapshot && trace_empty(iter))
4654 print_snapshot_help(m, iter);
4655 else if (iter->trace && iter->trace->print_header)
4656 iter->trace->print_header(m);
4658 trace_default_header(m);
4660 } else if (iter->leftover) {
4662 * If we filled the seq_file buffer earlier, we
4663 * want to just show it now.
4665 ret = trace_print_seq(m, &iter->seq);
4667 /* ret should this time be zero, but you never know */
4668 iter->leftover = ret;
4671 print_trace_line(iter);
4672 ret = trace_print_seq(m, &iter->seq);
4674 * If we overflow the seq_file buffer, then it will
4675 * ask us for this data again at start up.
4677 * ret is 0 if seq_file write succeeded.
4680 iter->leftover = ret;
4687 * Should be used after trace_array_get(), trace_types_lock
4688 * ensures that i_cdev was already initialized.
4690 static inline int tracing_get_cpu(struct inode *inode)
4692 if (inode->i_cdev) /* See trace_create_cpu_file() */
4693 return (long)inode->i_cdev - 1;
4694 return RING_BUFFER_ALL_CPUS;
4697 static const struct seq_operations tracer_seq_ops = {
4704 static struct trace_iterator *
4705 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4707 struct trace_array *tr = inode->i_private;
4708 struct trace_iterator *iter;
4711 if (tracing_disabled)
4712 return ERR_PTR(-ENODEV);
4714 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4716 return ERR_PTR(-ENOMEM);
4718 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4720 if (!iter->buffer_iter)
4724 * trace_find_next_entry() may need to save off iter->ent.
4725 * It will place it into the iter->temp buffer. As most
4726 * events are less than 128, allocate a buffer of that size.
4727 * If one is greater, then trace_find_next_entry() will
4728 * allocate a new buffer to adjust for the bigger iter->ent.
4729 * It's not critical if it fails to get allocated here.
4731 iter->temp = kmalloc(128, GFP_KERNEL);
4733 iter->temp_size = 128;
4736 * trace_event_printf() may need to modify given format
4737 * string to replace %p with %px so that it shows real address
4738 * instead of hash value. However, that is only for the event
4739 * tracing, other tracer may not need. Defer the allocation
4740 * until it is needed.
4746 * We make a copy of the current tracer to avoid concurrent
4747 * changes on it while we are reading.
4749 mutex_lock(&trace_types_lock);
4750 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
4754 *iter->trace = *tr->current_trace;
4756 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4761 #ifdef CONFIG_TRACER_MAX_TRACE
4762 /* Currently only the top directory has a snapshot */
4763 if (tr->current_trace->print_max || snapshot)
4764 iter->array_buffer = &tr->max_buffer;
4767 iter->array_buffer = &tr->array_buffer;
4768 iter->snapshot = snapshot;
4770 iter->cpu_file = tracing_get_cpu(inode);
4771 mutex_init(&iter->mutex);
4773 /* Notify the tracer early; before we stop tracing. */
4774 if (iter->trace->open)
4775 iter->trace->open(iter);
4777 /* Annotate start of buffers if we had overruns */
4778 if (ring_buffer_overruns(iter->array_buffer->buffer))
4779 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4781 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4782 if (trace_clocks[tr->clock_id].in_ns)
4783 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4786 * If pause-on-trace is enabled, then stop the trace while
4787 * dumping, unless this is the "snapshot" file
4789 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4790 tracing_stop_tr(tr);
4792 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4793 for_each_tracing_cpu(cpu) {
4794 iter->buffer_iter[cpu] =
4795 ring_buffer_read_prepare(iter->array_buffer->buffer,
4798 ring_buffer_read_prepare_sync();
4799 for_each_tracing_cpu(cpu) {
4800 ring_buffer_read_start(iter->buffer_iter[cpu]);
4801 tracing_iter_reset(iter, cpu);
4804 cpu = iter->cpu_file;
4805 iter->buffer_iter[cpu] =
4806 ring_buffer_read_prepare(iter->array_buffer->buffer,
4808 ring_buffer_read_prepare_sync();
4809 ring_buffer_read_start(iter->buffer_iter[cpu]);
4810 tracing_iter_reset(iter, cpu);
4813 mutex_unlock(&trace_types_lock);
4818 mutex_unlock(&trace_types_lock);
4821 kfree(iter->buffer_iter);
4823 seq_release_private(inode, file);
4824 return ERR_PTR(-ENOMEM);
4827 int tracing_open_generic(struct inode *inode, struct file *filp)
4831 ret = tracing_check_open_get_tr(NULL);
4835 filp->private_data = inode->i_private;
4839 bool tracing_is_disabled(void)
4841 return (tracing_disabled) ? true: false;
4845 * Open and update trace_array ref count.
4846 * Must have the current trace_array passed to it.
4848 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4850 struct trace_array *tr = inode->i_private;
4853 ret = tracing_check_open_get_tr(tr);
4857 filp->private_data = inode->i_private;
4862 static int tracing_mark_open(struct inode *inode, struct file *filp)
4864 stream_open(inode, filp);
4865 return tracing_open_generic_tr(inode, filp);
4868 static int tracing_release(struct inode *inode, struct file *file)
4870 struct trace_array *tr = inode->i_private;
4871 struct seq_file *m = file->private_data;
4872 struct trace_iterator *iter;
4875 if (!(file->f_mode & FMODE_READ)) {
4876 trace_array_put(tr);
4880 /* Writes do not use seq_file */
4882 mutex_lock(&trace_types_lock);
4884 for_each_tracing_cpu(cpu) {
4885 if (iter->buffer_iter[cpu])
4886 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4889 if (iter->trace && iter->trace->close)
4890 iter->trace->close(iter);
4892 if (!iter->snapshot && tr->stop_count)
4893 /* reenable tracing if it was previously enabled */
4894 tracing_start_tr(tr);
4896 __trace_array_put(tr);
4898 mutex_unlock(&trace_types_lock);
4900 mutex_destroy(&iter->mutex);
4901 free_cpumask_var(iter->started);
4905 kfree(iter->buffer_iter);
4906 seq_release_private(inode, file);
4911 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4913 struct trace_array *tr = inode->i_private;
4915 trace_array_put(tr);
4919 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4921 struct trace_array *tr = inode->i_private;
4923 trace_array_put(tr);
4925 return single_release(inode, file);
4928 static int tracing_open(struct inode *inode, struct file *file)
4930 struct trace_array *tr = inode->i_private;
4931 struct trace_iterator *iter;
4934 ret = tracing_check_open_get_tr(tr);
4938 /* If this file was open for write, then erase contents */
4939 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4940 int cpu = tracing_get_cpu(inode);
4941 struct array_buffer *trace_buf = &tr->array_buffer;
4943 #ifdef CONFIG_TRACER_MAX_TRACE
4944 if (tr->current_trace->print_max)
4945 trace_buf = &tr->max_buffer;
4948 if (cpu == RING_BUFFER_ALL_CPUS)
4949 tracing_reset_online_cpus(trace_buf);
4951 tracing_reset_cpu(trace_buf, cpu);
4954 if (file->f_mode & FMODE_READ) {
4955 iter = __tracing_open(inode, file, false);
4957 ret = PTR_ERR(iter);
4958 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4959 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4963 trace_array_put(tr);
4969 * Some tracers are not suitable for instance buffers.
4970 * A tracer is always available for the global array (toplevel)
4971 * or if it explicitly states that it is.
4974 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4976 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4979 /* Find the next tracer that this trace array may use */
4980 static struct tracer *
4981 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4983 while (t && !trace_ok_for_array(t, tr))
4990 t_next(struct seq_file *m, void *v, loff_t *pos)
4992 struct trace_array *tr = m->private;
4993 struct tracer *t = v;
4998 t = get_tracer_for_array(tr, t->next);
5003 static void *t_start(struct seq_file *m, loff_t *pos)
5005 struct trace_array *tr = m->private;
5009 mutex_lock(&trace_types_lock);
5011 t = get_tracer_for_array(tr, trace_types);
5012 for (; t && l < *pos; t = t_next(m, t, &l))
5018 static void t_stop(struct seq_file *m, void *p)
5020 mutex_unlock(&trace_types_lock);
5023 static int t_show(struct seq_file *m, void *v)
5025 struct tracer *t = v;
5030 seq_puts(m, t->name);
5039 static const struct seq_operations show_traces_seq_ops = {
5046 static int show_traces_open(struct inode *inode, struct file *file)
5048 struct trace_array *tr = inode->i_private;
5052 ret = tracing_check_open_get_tr(tr);
5056 ret = seq_open(file, &show_traces_seq_ops);
5058 trace_array_put(tr);
5062 m = file->private_data;
5068 static int show_traces_release(struct inode *inode, struct file *file)
5070 struct trace_array *tr = inode->i_private;
5072 trace_array_put(tr);
5073 return seq_release(inode, file);
5077 tracing_write_stub(struct file *filp, const char __user *ubuf,
5078 size_t count, loff_t *ppos)
5083 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
5087 if (file->f_mode & FMODE_READ)
5088 ret = seq_lseek(file, offset, whence);
5090 file->f_pos = ret = 0;
5095 static const struct file_operations tracing_fops = {
5096 .open = tracing_open,
5098 .write = tracing_write_stub,
5099 .llseek = tracing_lseek,
5100 .release = tracing_release,
5103 static const struct file_operations show_traces_fops = {
5104 .open = show_traces_open,
5106 .llseek = seq_lseek,
5107 .release = show_traces_release,
5111 tracing_cpumask_read(struct file *filp, char __user *ubuf,
5112 size_t count, loff_t *ppos)
5114 struct trace_array *tr = file_inode(filp)->i_private;
5118 len = snprintf(NULL, 0, "%*pb\n",
5119 cpumask_pr_args(tr->tracing_cpumask)) + 1;
5120 mask_str = kmalloc(len, GFP_KERNEL);
5124 len = snprintf(mask_str, len, "%*pb\n",
5125 cpumask_pr_args(tr->tracing_cpumask));
5130 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
5138 int tracing_set_cpumask(struct trace_array *tr,
5139 cpumask_var_t tracing_cpumask_new)
5146 local_irq_disable();
5147 arch_spin_lock(&tr->max_lock);
5148 for_each_tracing_cpu(cpu) {
5150 * Increase/decrease the disabled counter if we are
5151 * about to flip a bit in the cpumask:
5153 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5154 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5155 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5156 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5158 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5159 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5160 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5161 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5164 arch_spin_unlock(&tr->max_lock);
5167 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5173 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5174 size_t count, loff_t *ppos)
5176 struct trace_array *tr = file_inode(filp)->i_private;
5177 cpumask_var_t tracing_cpumask_new;
5180 if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
5183 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5187 err = tracing_set_cpumask(tr, tracing_cpumask_new);
5191 free_cpumask_var(tracing_cpumask_new);
5196 free_cpumask_var(tracing_cpumask_new);
5201 static const struct file_operations tracing_cpumask_fops = {
5202 .open = tracing_open_generic_tr,
5203 .read = tracing_cpumask_read,
5204 .write = tracing_cpumask_write,
5205 .release = tracing_release_generic_tr,
5206 .llseek = generic_file_llseek,
5209 static int tracing_trace_options_show(struct seq_file *m, void *v)
5211 struct tracer_opt *trace_opts;
5212 struct trace_array *tr = m->private;
5216 mutex_lock(&trace_types_lock);
5217 tracer_flags = tr->current_trace->flags->val;
5218 trace_opts = tr->current_trace->flags->opts;
5220 for (i = 0; trace_options[i]; i++) {
5221 if (tr->trace_flags & (1 << i))
5222 seq_printf(m, "%s\n", trace_options[i]);
5224 seq_printf(m, "no%s\n", trace_options[i]);
5227 for (i = 0; trace_opts[i].name; i++) {
5228 if (tracer_flags & trace_opts[i].bit)
5229 seq_printf(m, "%s\n", trace_opts[i].name);
5231 seq_printf(m, "no%s\n", trace_opts[i].name);
5233 mutex_unlock(&trace_types_lock);
5238 static int __set_tracer_option(struct trace_array *tr,
5239 struct tracer_flags *tracer_flags,
5240 struct tracer_opt *opts, int neg)
5242 struct tracer *trace = tracer_flags->trace;
5245 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5250 tracer_flags->val &= ~opts->bit;
5252 tracer_flags->val |= opts->bit;
5256 /* Try to assign a tracer specific option */
5257 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
5259 struct tracer *trace = tr->current_trace;
5260 struct tracer_flags *tracer_flags = trace->flags;
5261 struct tracer_opt *opts = NULL;
5264 for (i = 0; tracer_flags->opts[i].name; i++) {
5265 opts = &tracer_flags->opts[i];
5267 if (strcmp(cmp, opts->name) == 0)
5268 return __set_tracer_option(tr, trace->flags, opts, neg);
5274 /* Some tracers require overwrite to stay enabled */
5275 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5277 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5283 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5287 if ((mask == TRACE_ITER_RECORD_TGID) ||
5288 (mask == TRACE_ITER_RECORD_CMD))
5289 lockdep_assert_held(&event_mutex);
5291 /* do nothing if flag is already set */
5292 if (!!(tr->trace_flags & mask) == !!enabled)
5295 /* Give the tracer a chance to approve the change */
5296 if (tr->current_trace->flag_changed)
5297 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5301 tr->trace_flags |= mask;
5303 tr->trace_flags &= ~mask;
5305 if (mask == TRACE_ITER_RECORD_CMD)
5306 trace_event_enable_cmd_record(enabled);
5308 if (mask == TRACE_ITER_RECORD_TGID) {
5310 tgid_map_max = pid_max;
5311 map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
5315 * Pairs with smp_load_acquire() in
5316 * trace_find_tgid_ptr() to ensure that if it observes
5317 * the tgid_map we just allocated then it also observes
5318 * the corresponding tgid_map_max value.
5320 smp_store_release(&tgid_map, map);
5323 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5327 trace_event_enable_tgid_record(enabled);
5330 if (mask == TRACE_ITER_EVENT_FORK)
5331 trace_event_follow_fork(tr, enabled);
5333 if (mask == TRACE_ITER_FUNC_FORK)
5334 ftrace_pid_follow_fork(tr, enabled);
5336 if (mask == TRACE_ITER_OVERWRITE) {
5337 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5338 #ifdef CONFIG_TRACER_MAX_TRACE
5339 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5343 if (mask == TRACE_ITER_PRINTK) {
5344 trace_printk_start_stop_comm(enabled);
5345 trace_printk_control(enabled);
5351 int trace_set_options(struct trace_array *tr, char *option)
5356 size_t orig_len = strlen(option);
5359 cmp = strstrip(option);
5361 len = str_has_prefix(cmp, "no");
5367 mutex_lock(&event_mutex);
5368 mutex_lock(&trace_types_lock);
5370 ret = match_string(trace_options, -1, cmp);
5371 /* If no option could be set, test the specific tracer options */
5373 ret = set_tracer_option(tr, cmp, neg);
5375 ret = set_tracer_flag(tr, 1 << ret, !neg);
5377 mutex_unlock(&trace_types_lock);
5378 mutex_unlock(&event_mutex);
5381 * If the first trailing whitespace is replaced with '\0' by strstrip,
5382 * turn it back into a space.
5384 if (orig_len > strlen(option))
5385 option[strlen(option)] = ' ';
5390 static void __init apply_trace_boot_options(void)
5392 char *buf = trace_boot_options_buf;
5396 option = strsep(&buf, ",");
5402 trace_set_options(&global_trace, option);
5404 /* Put back the comma to allow this to be called again */
5411 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5412 size_t cnt, loff_t *ppos)
5414 struct seq_file *m = filp->private_data;
5415 struct trace_array *tr = m->private;
5419 if (cnt >= sizeof(buf))
5422 if (copy_from_user(buf, ubuf, cnt))
5427 ret = trace_set_options(tr, buf);
5436 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5438 struct trace_array *tr = inode->i_private;
5441 ret = tracing_check_open_get_tr(tr);
5445 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5447 trace_array_put(tr);
5452 static const struct file_operations tracing_iter_fops = {
5453 .open = tracing_trace_options_open,
5455 .llseek = seq_lseek,
5456 .release = tracing_single_release_tr,
5457 .write = tracing_trace_options_write,
5460 static const char readme_msg[] =
5461 "tracing mini-HOWTO:\n\n"
5462 "# echo 0 > tracing_on : quick way to disable tracing\n"
5463 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5464 " Important files:\n"
5465 " trace\t\t\t- The static contents of the buffer\n"
5466 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5467 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5468 " current_tracer\t- function and latency tracers\n"
5469 " available_tracers\t- list of configured tracers for current_tracer\n"
5470 " error_log\t- error log for failed commands (that support it)\n"
5471 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5472 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5473 " trace_clock\t\t-change the clock used to order events\n"
5474 " local: Per cpu clock but may not be synced across CPUs\n"
5475 " global: Synced across CPUs but slows tracing down.\n"
5476 " counter: Not a clock, but just an increment\n"
5477 " uptime: Jiffy counter from time of boot\n"
5478 " perf: Same clock that perf events use\n"
5479 #ifdef CONFIG_X86_64
5480 " x86-tsc: TSC cycle counter\n"
5482 "\n timestamp_mode\t-view the mode used to timestamp events\n"
5483 " delta: Delta difference against a buffer-wide timestamp\n"
5484 " absolute: Absolute (standalone) timestamp\n"
5485 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5486 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5487 " tracing_cpumask\t- Limit which CPUs to trace\n"
5488 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5489 "\t\t\t Remove sub-buffer with rmdir\n"
5490 " trace_options\t\t- Set format or modify how tracing happens\n"
5491 "\t\t\t Disable an option by prefixing 'no' to the\n"
5492 "\t\t\t option name\n"
5493 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5494 #ifdef CONFIG_DYNAMIC_FTRACE
5495 "\n available_filter_functions - list of functions that can be filtered on\n"
5496 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5497 "\t\t\t functions\n"
5498 "\t accepts: func_full_name or glob-matching-pattern\n"
5499 "\t modules: Can select a group via module\n"
5500 "\t Format: :mod:<module-name>\n"
5501 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5502 "\t triggers: a command to perform when function is hit\n"
5503 "\t Format: <function>:<trigger>[:count]\n"
5504 "\t trigger: traceon, traceoff\n"
5505 "\t\t enable_event:<system>:<event>\n"
5506 "\t\t disable_event:<system>:<event>\n"
5507 #ifdef CONFIG_STACKTRACE
5510 #ifdef CONFIG_TRACER_SNAPSHOT
5515 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5516 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5517 "\t The first one will disable tracing every time do_fault is hit\n"
5518 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5519 "\t The first time do trap is hit and it disables tracing, the\n"
5520 "\t counter will decrement to 2. If tracing is already disabled,\n"
5521 "\t the counter will not decrement. It only decrements when the\n"
5522 "\t trigger did work\n"
5523 "\t To remove trigger without count:\n"
5524 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5525 "\t To remove trigger with a count:\n"
5526 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5527 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5528 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5529 "\t modules: Can select a group via module command :mod:\n"
5530 "\t Does not accept triggers\n"
5531 #endif /* CONFIG_DYNAMIC_FTRACE */
5532 #ifdef CONFIG_FUNCTION_TRACER
5533 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5535 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5538 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5539 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5540 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5541 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5543 #ifdef CONFIG_TRACER_SNAPSHOT
5544 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5545 "\t\t\t snapshot buffer. Read the contents for more\n"
5546 "\t\t\t information\n"
5548 #ifdef CONFIG_STACK_TRACER
5549 " stack_trace\t\t- Shows the max stack trace when active\n"
5550 " stack_max_size\t- Shows current max stack size that was traced\n"
5551 "\t\t\t Write into this file to reset the max size (trigger a\n"
5552 "\t\t\t new trace)\n"
5553 #ifdef CONFIG_DYNAMIC_FTRACE
5554 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5557 #endif /* CONFIG_STACK_TRACER */
5558 #ifdef CONFIG_DYNAMIC_EVENTS
5559 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5560 "\t\t\t Write into this file to define/undefine new trace events.\n"
5562 #ifdef CONFIG_KPROBE_EVENTS
5563 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5564 "\t\t\t Write into this file to define/undefine new trace events.\n"
5566 #ifdef CONFIG_UPROBE_EVENTS
5567 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5568 "\t\t\t Write into this file to define/undefine new trace events.\n"
5570 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5571 "\t accepts: event-definitions (one definition per line)\n"
5572 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
5573 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
5574 #ifdef CONFIG_HIST_TRIGGERS
5575 "\t s:[synthetic/]<event> <field> [<field>]\n"
5577 "\t e[:[<group>/]<event>] <attached-group>.<attached-event> [<args>]\n"
5578 "\t -:[<group>/]<event>\n"
5579 #ifdef CONFIG_KPROBE_EVENTS
5580 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5581 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5583 #ifdef CONFIG_UPROBE_EVENTS
5584 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5586 "\t args: <name>=fetcharg[:type]\n"
5587 "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
5588 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5589 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5591 "\t $stack<index>, $stack, $retval, $comm,\n"
5593 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5594 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
5595 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5596 "\t <type>\\[<array-size>\\]\n"
5597 #ifdef CONFIG_HIST_TRIGGERS
5598 "\t field: <stype> <name>;\n"
5599 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5600 "\t [unsigned] char/int/long\n"
5602 "\t efield: For event probes ('e' types), the field is on of the fields\n"
5603 "\t of the <attached-group>/<attached-event>.\n"
5605 " events/\t\t- Directory containing all trace event subsystems:\n"
5606 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5607 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5608 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5610 " filter\t\t- If set, only events passing filter are traced\n"
5611 " events/<system>/<event>/\t- Directory containing control files for\n"
5613 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5614 " filter\t\t- If set, only events passing filter are traced\n"
5615 " trigger\t\t- If set, a command to perform when event is hit\n"
5616 "\t Format: <trigger>[:count][if <filter>]\n"
5617 "\t trigger: traceon, traceoff\n"
5618 "\t enable_event:<system>:<event>\n"
5619 "\t disable_event:<system>:<event>\n"
5620 #ifdef CONFIG_HIST_TRIGGERS
5621 "\t enable_hist:<system>:<event>\n"
5622 "\t disable_hist:<system>:<event>\n"
5624 #ifdef CONFIG_STACKTRACE
5627 #ifdef CONFIG_TRACER_SNAPSHOT
5630 #ifdef CONFIG_HIST_TRIGGERS
5631 "\t\t hist (see below)\n"
5633 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5634 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5635 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5636 "\t events/block/block_unplug/trigger\n"
5637 "\t The first disables tracing every time block_unplug is hit.\n"
5638 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5639 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5640 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5641 "\t Like function triggers, the counter is only decremented if it\n"
5642 "\t enabled or disabled tracing.\n"
5643 "\t To remove a trigger without a count:\n"
5644 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5645 "\t To remove a trigger with a count:\n"
5646 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5647 "\t Filters can be ignored when removing a trigger.\n"
5648 #ifdef CONFIG_HIST_TRIGGERS
5649 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5650 "\t Format: hist:keys=<field1[,field2,...]>\n"
5651 "\t [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n"
5652 "\t [:values=<field1[,field2,...]>]\n"
5653 "\t [:sort=<field1[,field2,...]>]\n"
5654 "\t [:size=#entries]\n"
5655 "\t [:pause][:continue][:clear]\n"
5656 "\t [:name=histname1]\n"
5657 "\t [:<handler>.<action>]\n"
5658 "\t [if <filter>]\n\n"
5659 "\t Note, special fields can be used as well:\n"
5660 "\t common_timestamp - to record current timestamp\n"
5661 "\t common_cpu - to record the CPU the event happened on\n"
5663 "\t A hist trigger variable can be:\n"
5664 "\t - a reference to a field e.g. x=current_timestamp,\n"
5665 "\t - a reference to another variable e.g. y=$x,\n"
5666 "\t - a numeric literal: e.g. ms_per_sec=1000,\n"
5667 "\t - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
5669 "\t hist trigger arithmetic expressions support addition(+), subtraction(-),\n"
5670 "\t multiplication(*) and division(/) operators. An operand can be either a\n"
5671 "\t variable reference, field or numeric literal.\n"
5673 "\t When a matching event is hit, an entry is added to a hash\n"
5674 "\t table using the key(s) and value(s) named, and the value of a\n"
5675 "\t sum called 'hitcount' is incremented. Keys and values\n"
5676 "\t correspond to fields in the event's format description. Keys\n"
5677 "\t can be any field, or the special string 'stacktrace'.\n"
5678 "\t Compound keys consisting of up to two fields can be specified\n"
5679 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5680 "\t fields. Sort keys consisting of up to two fields can be\n"
5681 "\t specified using the 'sort' keyword. The sort direction can\n"
5682 "\t be modified by appending '.descending' or '.ascending' to a\n"
5683 "\t sort field. The 'size' parameter can be used to specify more\n"
5684 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5685 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5686 "\t its histogram data will be shared with other triggers of the\n"
5687 "\t same name, and trigger hits will update this common data.\n\n"
5688 "\t Reading the 'hist' file for the event will dump the hash\n"
5689 "\t table in its entirety to stdout. If there are multiple hist\n"
5690 "\t triggers attached to an event, there will be a table for each\n"
5691 "\t trigger in the output. The table displayed for a named\n"
5692 "\t trigger will be the same as any other instance having the\n"
5693 "\t same name. The default format used to display a given field\n"
5694 "\t can be modified by appending any of the following modifiers\n"
5695 "\t to the field name, as applicable:\n\n"
5696 "\t .hex display a number as a hex value\n"
5697 "\t .sym display an address as a symbol\n"
5698 "\t .sym-offset display an address as a symbol and offset\n"
5699 "\t .execname display a common_pid as a program name\n"
5700 "\t .syscall display a syscall id as a syscall name\n"
5701 "\t .log2 display log2 value rather than raw number\n"
5702 "\t .buckets=size display values in groups of size rather than raw number\n"
5703 "\t .usecs display a common_timestamp in microseconds\n\n"
5704 "\t The 'pause' parameter can be used to pause an existing hist\n"
5705 "\t trigger or to start a hist trigger but not log any events\n"
5706 "\t until told to do so. 'continue' can be used to start or\n"
5707 "\t restart a paused hist trigger.\n\n"
5708 "\t The 'clear' parameter will clear the contents of a running\n"
5709 "\t hist trigger and leave its current paused/active state\n"
5711 "\t The enable_hist and disable_hist triggers can be used to\n"
5712 "\t have one event conditionally start and stop another event's\n"
5713 "\t already-attached hist trigger. The syntax is analogous to\n"
5714 "\t the enable_event and disable_event triggers.\n\n"
5715 "\t Hist trigger handlers and actions are executed whenever a\n"
5716 "\t a histogram entry is added or updated. They take the form:\n\n"
5717 "\t <handler>.<action>\n\n"
5718 "\t The available handlers are:\n\n"
5719 "\t onmatch(matching.event) - invoke on addition or update\n"
5720 "\t onmax(var) - invoke if var exceeds current max\n"
5721 "\t onchange(var) - invoke action if var changes\n\n"
5722 "\t The available actions are:\n\n"
5723 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5724 "\t save(field,...) - save current event fields\n"
5725 #ifdef CONFIG_TRACER_SNAPSHOT
5726 "\t snapshot() - snapshot the trace buffer\n\n"
5728 #ifdef CONFIG_SYNTH_EVENTS
5729 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5730 "\t Write into this file to define/undefine new synthetic events.\n"
5731 "\t example: echo 'myevent u64 lat; char name[]' >> synthetic_events\n"
5737 tracing_readme_read(struct file *filp, char __user *ubuf,
5738 size_t cnt, loff_t *ppos)
5740 return simple_read_from_buffer(ubuf, cnt, ppos,
5741 readme_msg, strlen(readme_msg));
5744 static const struct file_operations tracing_readme_fops = {
5745 .open = tracing_open_generic,
5746 .read = tracing_readme_read,
5747 .llseek = generic_file_llseek,
5750 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5754 return trace_find_tgid_ptr(pid);
5757 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5761 return trace_find_tgid_ptr(pid);
5764 static void saved_tgids_stop(struct seq_file *m, void *v)
5768 static int saved_tgids_show(struct seq_file *m, void *v)
5770 int *entry = (int *)v;
5771 int pid = entry - tgid_map;
5777 seq_printf(m, "%d %d\n", pid, tgid);
5781 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5782 .start = saved_tgids_start,
5783 .stop = saved_tgids_stop,
5784 .next = saved_tgids_next,
5785 .show = saved_tgids_show,
5788 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5792 ret = tracing_check_open_get_tr(NULL);
5796 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5800 static const struct file_operations tracing_saved_tgids_fops = {
5801 .open = tracing_saved_tgids_open,
5803 .llseek = seq_lseek,
5804 .release = seq_release,
5807 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5809 unsigned int *ptr = v;
5811 if (*pos || m->count)
5816 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5818 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5827 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5833 arch_spin_lock(&trace_cmdline_lock);
5835 v = &savedcmd->map_cmdline_to_pid[0];
5837 v = saved_cmdlines_next(m, v, &l);
5845 static void saved_cmdlines_stop(struct seq_file *m, void *v)
5847 arch_spin_unlock(&trace_cmdline_lock);
5851 static int saved_cmdlines_show(struct seq_file *m, void *v)
5853 char buf[TASK_COMM_LEN];
5854 unsigned int *pid = v;
5856 __trace_find_cmdline(*pid, buf);
5857 seq_printf(m, "%d %s\n", *pid, buf);
5861 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5862 .start = saved_cmdlines_start,
5863 .next = saved_cmdlines_next,
5864 .stop = saved_cmdlines_stop,
5865 .show = saved_cmdlines_show,
5868 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5872 ret = tracing_check_open_get_tr(NULL);
5876 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
5879 static const struct file_operations tracing_saved_cmdlines_fops = {
5880 .open = tracing_saved_cmdlines_open,
5882 .llseek = seq_lseek,
5883 .release = seq_release,
5887 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5888 size_t cnt, loff_t *ppos)
5893 arch_spin_lock(&trace_cmdline_lock);
5894 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
5895 arch_spin_unlock(&trace_cmdline_lock);
5897 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5900 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5902 kfree(s->saved_cmdlines);
5903 kfree(s->map_cmdline_to_pid);
5907 static int tracing_resize_saved_cmdlines(unsigned int val)
5909 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5911 s = kmalloc(sizeof(*s), GFP_KERNEL);
5915 if (allocate_cmdlines_buffer(val, s) < 0) {
5920 arch_spin_lock(&trace_cmdline_lock);
5921 savedcmd_temp = savedcmd;
5923 arch_spin_unlock(&trace_cmdline_lock);
5924 free_saved_cmdlines_buffer(savedcmd_temp);
5930 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5931 size_t cnt, loff_t *ppos)
5936 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5940 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5941 if (!val || val > PID_MAX_DEFAULT)
5944 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5953 static const struct file_operations tracing_saved_cmdlines_size_fops = {
5954 .open = tracing_open_generic,
5955 .read = tracing_saved_cmdlines_size_read,
5956 .write = tracing_saved_cmdlines_size_write,
5959 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5960 static union trace_eval_map_item *
5961 update_eval_map(union trace_eval_map_item *ptr)
5963 if (!ptr->map.eval_string) {
5964 if (ptr->tail.next) {
5965 ptr = ptr->tail.next;
5966 /* Set ptr to the next real item (skip head) */
5974 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5976 union trace_eval_map_item *ptr = v;
5979 * Paranoid! If ptr points to end, we don't want to increment past it.
5980 * This really should never happen.
5983 ptr = update_eval_map(ptr);
5984 if (WARN_ON_ONCE(!ptr))
5988 ptr = update_eval_map(ptr);
5993 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5995 union trace_eval_map_item *v;
5998 mutex_lock(&trace_eval_mutex);
6000 v = trace_eval_maps;
6004 while (v && l < *pos) {
6005 v = eval_map_next(m, v, &l);
6011 static void eval_map_stop(struct seq_file *m, void *v)
6013 mutex_unlock(&trace_eval_mutex);
6016 static int eval_map_show(struct seq_file *m, void *v)
6018 union trace_eval_map_item *ptr = v;
6020 seq_printf(m, "%s %ld (%s)\n",
6021 ptr->map.eval_string, ptr->map.eval_value,
6027 static const struct seq_operations tracing_eval_map_seq_ops = {
6028 .start = eval_map_start,
6029 .next = eval_map_next,
6030 .stop = eval_map_stop,
6031 .show = eval_map_show,
6034 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
6038 ret = tracing_check_open_get_tr(NULL);
6042 return seq_open(filp, &tracing_eval_map_seq_ops);
6045 static const struct file_operations tracing_eval_map_fops = {
6046 .open = tracing_eval_map_open,
6048 .llseek = seq_lseek,
6049 .release = seq_release,
6052 static inline union trace_eval_map_item *
6053 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
6055 /* Return tail of array given the head */
6056 return ptr + ptr->head.length + 1;
6060 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
6063 struct trace_eval_map **stop;
6064 struct trace_eval_map **map;
6065 union trace_eval_map_item *map_array;
6066 union trace_eval_map_item *ptr;
6071 * The trace_eval_maps contains the map plus a head and tail item,
6072 * where the head holds the module and length of array, and the
6073 * tail holds a pointer to the next list.
6075 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
6077 pr_warn("Unable to allocate trace eval mapping\n");
6081 mutex_lock(&trace_eval_mutex);
6083 if (!trace_eval_maps)
6084 trace_eval_maps = map_array;
6086 ptr = trace_eval_maps;
6088 ptr = trace_eval_jmp_to_tail(ptr);
6089 if (!ptr->tail.next)
6091 ptr = ptr->tail.next;
6094 ptr->tail.next = map_array;
6096 map_array->head.mod = mod;
6097 map_array->head.length = len;
6100 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
6101 map_array->map = **map;
6104 memset(map_array, 0, sizeof(*map_array));
6106 mutex_unlock(&trace_eval_mutex);
6109 static void trace_create_eval_file(struct dentry *d_tracer)
6111 trace_create_file("eval_map", TRACE_MODE_READ, d_tracer,
6112 NULL, &tracing_eval_map_fops);
6115 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
6116 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
6117 static inline void trace_insert_eval_map_file(struct module *mod,
6118 struct trace_eval_map **start, int len) { }
6119 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
6121 static void trace_insert_eval_map(struct module *mod,
6122 struct trace_eval_map **start, int len)
6124 struct trace_eval_map **map;
6131 trace_event_eval_update(map, len);
6133 trace_insert_eval_map_file(mod, start, len);
6137 tracing_set_trace_read(struct file *filp, char __user *ubuf,
6138 size_t cnt, loff_t *ppos)
6140 struct trace_array *tr = filp->private_data;
6141 char buf[MAX_TRACER_SIZE+2];
6144 mutex_lock(&trace_types_lock);
6145 r = sprintf(buf, "%s\n", tr->current_trace->name);
6146 mutex_unlock(&trace_types_lock);
6148 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6151 int tracer_init(struct tracer *t, struct trace_array *tr)
6153 tracing_reset_online_cpus(&tr->array_buffer);
6157 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
6161 for_each_tracing_cpu(cpu)
6162 per_cpu_ptr(buf->data, cpu)->entries = val;
6165 #ifdef CONFIG_TRACER_MAX_TRACE
6166 /* resize @tr's buffer to the size of @size_tr's entries */
6167 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
6168 struct array_buffer *size_buf, int cpu_id)
6172 if (cpu_id == RING_BUFFER_ALL_CPUS) {
6173 for_each_tracing_cpu(cpu) {
6174 ret = ring_buffer_resize(trace_buf->buffer,
6175 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
6178 per_cpu_ptr(trace_buf->data, cpu)->entries =
6179 per_cpu_ptr(size_buf->data, cpu)->entries;
6182 ret = ring_buffer_resize(trace_buf->buffer,
6183 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
6185 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
6186 per_cpu_ptr(size_buf->data, cpu_id)->entries;
6191 #endif /* CONFIG_TRACER_MAX_TRACE */
6193 static int __tracing_resize_ring_buffer(struct trace_array *tr,
6194 unsigned long size, int cpu)
6199 * If kernel or user changes the size of the ring buffer
6200 * we use the size that was given, and we can forget about
6201 * expanding it later.
6203 ring_buffer_expanded = true;
6205 /* May be called before buffers are initialized */
6206 if (!tr->array_buffer.buffer)
6209 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
6213 #ifdef CONFIG_TRACER_MAX_TRACE
6214 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
6215 !tr->current_trace->use_max_tr)
6218 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
6220 int r = resize_buffer_duplicate_size(&tr->array_buffer,
6221 &tr->array_buffer, cpu);
6224 * AARGH! We are left with different
6225 * size max buffer!!!!
6226 * The max buffer is our "snapshot" buffer.
6227 * When a tracer needs a snapshot (one of the
6228 * latency tracers), it swaps the max buffer
6229 * with the saved snap shot. We succeeded to
6230 * update the size of the main buffer, but failed to
6231 * update the size of the max buffer. But when we tried
6232 * to reset the main buffer to the original size, we
6233 * failed there too. This is very unlikely to
6234 * happen, but if it does, warn and kill all
6238 tracing_disabled = 1;
6243 if (cpu == RING_BUFFER_ALL_CPUS)
6244 set_buffer_entries(&tr->max_buffer, size);
6246 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
6249 #endif /* CONFIG_TRACER_MAX_TRACE */
6251 if (cpu == RING_BUFFER_ALL_CPUS)
6252 set_buffer_entries(&tr->array_buffer, size);
6254 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
6259 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
6260 unsigned long size, int cpu_id)
6264 mutex_lock(&trace_types_lock);
6266 if (cpu_id != RING_BUFFER_ALL_CPUS) {
6267 /* make sure, this cpu is enabled in the mask */
6268 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
6274 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
6279 mutex_unlock(&trace_types_lock);
6286 * tracing_update_buffers - used by tracing facility to expand ring buffers
6288 * To save on memory when the tracing is never used on a system with it
6289 * configured in. The ring buffers are set to a minimum size. But once
6290 * a user starts to use the tracing facility, then they need to grow
6291 * to their default size.
6293 * This function is to be called when a tracer is about to be used.
6295 int tracing_update_buffers(void)
6299 mutex_lock(&trace_types_lock);
6300 if (!ring_buffer_expanded)
6301 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
6302 RING_BUFFER_ALL_CPUS);
6303 mutex_unlock(&trace_types_lock);
6308 struct trace_option_dentry;
6311 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
6314 * Used to clear out the tracer before deletion of an instance.
6315 * Must have trace_types_lock held.
6317 static void tracing_set_nop(struct trace_array *tr)
6319 if (tr->current_trace == &nop_trace)
6322 tr->current_trace->enabled--;
6324 if (tr->current_trace->reset)
6325 tr->current_trace->reset(tr);
6327 tr->current_trace = &nop_trace;
6330 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
6332 /* Only enable if the directory has been created already. */
6336 create_trace_option_files(tr, t);
6339 int tracing_set_tracer(struct trace_array *tr, const char *buf)
6342 #ifdef CONFIG_TRACER_MAX_TRACE
6347 mutex_lock(&trace_types_lock);
6349 if (!ring_buffer_expanded) {
6350 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6351 RING_BUFFER_ALL_CPUS);
6357 for (t = trace_types; t; t = t->next) {
6358 if (strcmp(t->name, buf) == 0)
6365 if (t == tr->current_trace)
6368 #ifdef CONFIG_TRACER_SNAPSHOT
6369 if (t->use_max_tr) {
6370 arch_spin_lock(&tr->max_lock);
6371 if (tr->cond_snapshot)
6373 arch_spin_unlock(&tr->max_lock);
6378 /* Some tracers won't work on kernel command line */
6379 if (system_state < SYSTEM_RUNNING && t->noboot) {
6380 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6385 /* Some tracers are only allowed for the top level buffer */
6386 if (!trace_ok_for_array(t, tr)) {
6391 /* If trace pipe files are being read, we can't change the tracer */
6392 if (tr->trace_ref) {
6397 trace_branch_disable();
6399 tr->current_trace->enabled--;
6401 if (tr->current_trace->reset)
6402 tr->current_trace->reset(tr);
6404 /* Current trace needs to be nop_trace before synchronize_rcu */
6405 tr->current_trace = &nop_trace;
6407 #ifdef CONFIG_TRACER_MAX_TRACE
6408 had_max_tr = tr->allocated_snapshot;
6410 if (had_max_tr && !t->use_max_tr) {
6412 * We need to make sure that the update_max_tr sees that
6413 * current_trace changed to nop_trace to keep it from
6414 * swapping the buffers after we resize it.
6415 * The update_max_tr is called from interrupts disabled
6416 * so a synchronized_sched() is sufficient.
6423 #ifdef CONFIG_TRACER_MAX_TRACE
6424 if (t->use_max_tr && !had_max_tr) {
6425 ret = tracing_alloc_snapshot_instance(tr);
6432 ret = tracer_init(t, tr);
6437 tr->current_trace = t;
6438 tr->current_trace->enabled++;
6439 trace_branch_enable(tr);
6441 mutex_unlock(&trace_types_lock);
6447 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6448 size_t cnt, loff_t *ppos)
6450 struct trace_array *tr = filp->private_data;
6451 char buf[MAX_TRACER_SIZE+1];
6458 if (cnt > MAX_TRACER_SIZE)
6459 cnt = MAX_TRACER_SIZE;
6461 if (copy_from_user(buf, ubuf, cnt))
6466 /* strip ending whitespace. */
6467 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
6470 err = tracing_set_tracer(tr, buf);
6480 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6481 size_t cnt, loff_t *ppos)
6486 r = snprintf(buf, sizeof(buf), "%ld\n",
6487 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6488 if (r > sizeof(buf))
6490 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6494 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6495 size_t cnt, loff_t *ppos)
6500 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6510 tracing_thresh_read(struct file *filp, char __user *ubuf,
6511 size_t cnt, loff_t *ppos)
6513 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6517 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6518 size_t cnt, loff_t *ppos)
6520 struct trace_array *tr = filp->private_data;
6523 mutex_lock(&trace_types_lock);
6524 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6528 if (tr->current_trace->update_thresh) {
6529 ret = tr->current_trace->update_thresh(tr);
6536 mutex_unlock(&trace_types_lock);
6541 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6544 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6545 size_t cnt, loff_t *ppos)
6547 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6551 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6552 size_t cnt, loff_t *ppos)
6554 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6559 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6561 struct trace_array *tr = inode->i_private;
6562 struct trace_iterator *iter;
6565 ret = tracing_check_open_get_tr(tr);
6569 mutex_lock(&trace_types_lock);
6571 /* create a buffer to store the information to pass to userspace */
6572 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6575 __trace_array_put(tr);
6579 trace_seq_init(&iter->seq);
6580 iter->trace = tr->current_trace;
6582 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6587 /* trace pipe does not show start of buffer */
6588 cpumask_setall(iter->started);
6590 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6591 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6593 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6594 if (trace_clocks[tr->clock_id].in_ns)
6595 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6598 iter->array_buffer = &tr->array_buffer;
6599 iter->cpu_file = tracing_get_cpu(inode);
6600 mutex_init(&iter->mutex);
6601 filp->private_data = iter;
6603 if (iter->trace->pipe_open)
6604 iter->trace->pipe_open(iter);
6606 nonseekable_open(inode, filp);
6610 mutex_unlock(&trace_types_lock);
6615 __trace_array_put(tr);
6616 mutex_unlock(&trace_types_lock);
6620 static int tracing_release_pipe(struct inode *inode, struct file *file)
6622 struct trace_iterator *iter = file->private_data;
6623 struct trace_array *tr = inode->i_private;
6625 mutex_lock(&trace_types_lock);
6629 if (iter->trace->pipe_close)
6630 iter->trace->pipe_close(iter);
6632 mutex_unlock(&trace_types_lock);
6634 free_cpumask_var(iter->started);
6635 mutex_destroy(&iter->mutex);
6638 trace_array_put(tr);
6644 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6646 struct trace_array *tr = iter->tr;
6648 /* Iterators are static, they should be filled or empty */
6649 if (trace_buffer_iter(iter, iter->cpu_file))
6650 return EPOLLIN | EPOLLRDNORM;
6652 if (tr->trace_flags & TRACE_ITER_BLOCK)
6654 * Always select as readable when in blocking mode
6656 return EPOLLIN | EPOLLRDNORM;
6658 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6663 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6665 struct trace_iterator *iter = filp->private_data;
6667 return trace_poll(iter, filp, poll_table);
6670 /* Must be called with iter->mutex held. */
6671 static int tracing_wait_pipe(struct file *filp)
6673 struct trace_iterator *iter = filp->private_data;
6676 while (trace_empty(iter)) {
6678 if ((filp->f_flags & O_NONBLOCK)) {
6683 * We block until we read something and tracing is disabled.
6684 * We still block if tracing is disabled, but we have never
6685 * read anything. This allows a user to cat this file, and
6686 * then enable tracing. But after we have read something,
6687 * we give an EOF when tracing is again disabled.
6689 * iter->pos will be 0 if we haven't read anything.
6691 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6694 mutex_unlock(&iter->mutex);
6696 ret = wait_on_pipe(iter, 0);
6698 mutex_lock(&iter->mutex);
6711 tracing_read_pipe(struct file *filp, char __user *ubuf,
6712 size_t cnt, loff_t *ppos)
6714 struct trace_iterator *iter = filp->private_data;
6718 * Avoid more than one consumer on a single file descriptor
6719 * This is just a matter of traces coherency, the ring buffer itself
6722 mutex_lock(&iter->mutex);
6724 /* return any leftover data */
6725 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6729 trace_seq_init(&iter->seq);
6731 if (iter->trace->read) {
6732 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6738 sret = tracing_wait_pipe(filp);
6742 /* stop when tracing is finished */
6743 if (trace_empty(iter)) {
6748 if (cnt >= PAGE_SIZE)
6749 cnt = PAGE_SIZE - 1;
6751 /* reset all but tr, trace, and overruns */
6752 trace_iterator_reset(iter);
6753 cpumask_clear(iter->started);
6754 trace_seq_init(&iter->seq);
6756 trace_event_read_lock();
6757 trace_access_lock(iter->cpu_file);
6758 while (trace_find_next_entry_inc(iter) != NULL) {
6759 enum print_line_t ret;
6760 int save_len = iter->seq.seq.len;
6762 ret = print_trace_line(iter);
6763 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6764 /* don't print partial lines */
6765 iter->seq.seq.len = save_len;
6768 if (ret != TRACE_TYPE_NO_CONSUME)
6769 trace_consume(iter);
6771 if (trace_seq_used(&iter->seq) >= cnt)
6775 * Setting the full flag means we reached the trace_seq buffer
6776 * size and we should leave by partial output condition above.
6777 * One of the trace_seq_* functions is not used properly.
6779 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6782 trace_access_unlock(iter->cpu_file);
6783 trace_event_read_unlock();
6785 /* Now copy what we have to the user */
6786 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6787 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6788 trace_seq_init(&iter->seq);
6791 * If there was nothing to send to user, in spite of consuming trace
6792 * entries, go back to wait for more entries.
6798 mutex_unlock(&iter->mutex);
6803 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6806 __free_page(spd->pages[idx]);
6810 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6816 /* Seq buffer is page-sized, exactly what we need. */
6818 save_len = iter->seq.seq.len;
6819 ret = print_trace_line(iter);
6821 if (trace_seq_has_overflowed(&iter->seq)) {
6822 iter->seq.seq.len = save_len;
6827 * This should not be hit, because it should only
6828 * be set if the iter->seq overflowed. But check it
6829 * anyway to be safe.
6831 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6832 iter->seq.seq.len = save_len;
6836 count = trace_seq_used(&iter->seq) - save_len;
6839 iter->seq.seq.len = save_len;
6843 if (ret != TRACE_TYPE_NO_CONSUME)
6844 trace_consume(iter);
6846 if (!trace_find_next_entry_inc(iter)) {
6856 static ssize_t tracing_splice_read_pipe(struct file *filp,
6858 struct pipe_inode_info *pipe,
6862 struct page *pages_def[PIPE_DEF_BUFFERS];
6863 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6864 struct trace_iterator *iter = filp->private_data;
6865 struct splice_pipe_desc spd = {
6867 .partial = partial_def,
6868 .nr_pages = 0, /* This gets updated below. */
6869 .nr_pages_max = PIPE_DEF_BUFFERS,
6870 .ops = &default_pipe_buf_ops,
6871 .spd_release = tracing_spd_release_pipe,
6877 if (splice_grow_spd(pipe, &spd))
6880 mutex_lock(&iter->mutex);
6882 if (iter->trace->splice_read) {
6883 ret = iter->trace->splice_read(iter, filp,
6884 ppos, pipe, len, flags);
6889 ret = tracing_wait_pipe(filp);
6893 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6898 trace_event_read_lock();
6899 trace_access_lock(iter->cpu_file);
6901 /* Fill as many pages as possible. */
6902 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6903 spd.pages[i] = alloc_page(GFP_KERNEL);
6907 rem = tracing_fill_pipe_page(rem, iter);
6909 /* Copy the data into the page, so we can start over. */
6910 ret = trace_seq_to_buffer(&iter->seq,
6911 page_address(spd.pages[i]),
6912 trace_seq_used(&iter->seq));
6914 __free_page(spd.pages[i]);
6917 spd.partial[i].offset = 0;
6918 spd.partial[i].len = trace_seq_used(&iter->seq);
6920 trace_seq_init(&iter->seq);
6923 trace_access_unlock(iter->cpu_file);
6924 trace_event_read_unlock();
6925 mutex_unlock(&iter->mutex);
6930 ret = splice_to_pipe(pipe, &spd);
6934 splice_shrink_spd(&spd);
6938 mutex_unlock(&iter->mutex);
6943 tracing_entries_read(struct file *filp, char __user *ubuf,
6944 size_t cnt, loff_t *ppos)
6946 struct inode *inode = file_inode(filp);
6947 struct trace_array *tr = inode->i_private;
6948 int cpu = tracing_get_cpu(inode);
6953 mutex_lock(&trace_types_lock);
6955 if (cpu == RING_BUFFER_ALL_CPUS) {
6956 int cpu, buf_size_same;
6961 /* check if all cpu sizes are same */
6962 for_each_tracing_cpu(cpu) {
6963 /* fill in the size from first enabled cpu */
6965 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6966 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
6972 if (buf_size_same) {
6973 if (!ring_buffer_expanded)
6974 r = sprintf(buf, "%lu (expanded: %lu)\n",
6976 trace_buf_size >> 10);
6978 r = sprintf(buf, "%lu\n", size >> 10);
6980 r = sprintf(buf, "X\n");
6982 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
6984 mutex_unlock(&trace_types_lock);
6986 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6991 tracing_entries_write(struct file *filp, const char __user *ubuf,
6992 size_t cnt, loff_t *ppos)
6994 struct inode *inode = file_inode(filp);
6995 struct trace_array *tr = inode->i_private;
6999 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7003 /* must have at least 1 entry */
7007 /* value is in KB */
7009 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
7019 tracing_total_entries_read(struct file *filp, char __user *ubuf,
7020 size_t cnt, loff_t *ppos)
7022 struct trace_array *tr = filp->private_data;
7025 unsigned long size = 0, expanded_size = 0;
7027 mutex_lock(&trace_types_lock);
7028 for_each_tracing_cpu(cpu) {
7029 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
7030 if (!ring_buffer_expanded)
7031 expanded_size += trace_buf_size >> 10;
7033 if (ring_buffer_expanded)
7034 r = sprintf(buf, "%lu\n", size);
7036 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
7037 mutex_unlock(&trace_types_lock);
7039 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7043 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
7044 size_t cnt, loff_t *ppos)
7047 * There is no need to read what the user has written, this function
7048 * is just to make sure that there is no error when "echo" is used
7057 tracing_free_buffer_release(struct inode *inode, struct file *filp)
7059 struct trace_array *tr = inode->i_private;
7061 /* disable tracing ? */
7062 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
7063 tracer_tracing_off(tr);
7064 /* resize the ring buffer to 0 */
7065 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
7067 trace_array_put(tr);
7073 tracing_mark_write(struct file *filp, const char __user *ubuf,
7074 size_t cnt, loff_t *fpos)
7076 struct trace_array *tr = filp->private_data;
7077 struct ring_buffer_event *event;
7078 enum event_trigger_type tt = ETT_NONE;
7079 struct trace_buffer *buffer;
7080 struct print_entry *entry;
7085 /* Used in tracing_mark_raw_write() as well */
7086 #define FAULTED_STR "<faulted>"
7087 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
7089 if (tracing_disabled)
7092 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7095 if (cnt > TRACE_BUF_SIZE)
7096 cnt = TRACE_BUF_SIZE;
7098 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7100 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
7102 /* If less than "<faulted>", then make sure we can still add that */
7103 if (cnt < FAULTED_SIZE)
7104 size += FAULTED_SIZE - cnt;
7106 buffer = tr->array_buffer.buffer;
7107 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
7109 if (unlikely(!event))
7110 /* Ring buffer disabled, return as if not open for write */
7113 entry = ring_buffer_event_data(event);
7114 entry->ip = _THIS_IP_;
7116 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
7118 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7124 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
7125 /* do not add \n before testing triggers, but add \0 */
7126 entry->buf[cnt] = '\0';
7127 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
7130 if (entry->buf[cnt - 1] != '\n') {
7131 entry->buf[cnt] = '\n';
7132 entry->buf[cnt + 1] = '\0';
7134 entry->buf[cnt] = '\0';
7136 if (static_branch_unlikely(&trace_marker_exports_enabled))
7137 ftrace_exports(event, TRACE_EXPORT_MARKER);
7138 __buffer_unlock_commit(buffer, event);
7141 event_triggers_post_call(tr->trace_marker_file, tt);
7146 /* Limit it for now to 3K (including tag) */
7147 #define RAW_DATA_MAX_SIZE (1024*3)
7150 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
7151 size_t cnt, loff_t *fpos)
7153 struct trace_array *tr = filp->private_data;
7154 struct ring_buffer_event *event;
7155 struct trace_buffer *buffer;
7156 struct raw_data_entry *entry;
7161 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
7163 if (tracing_disabled)
7166 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7169 /* The marker must at least have a tag id */
7170 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
7173 if (cnt > TRACE_BUF_SIZE)
7174 cnt = TRACE_BUF_SIZE;
7176 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7178 size = sizeof(*entry) + cnt;
7179 if (cnt < FAULT_SIZE_ID)
7180 size += FAULT_SIZE_ID - cnt;
7182 buffer = tr->array_buffer.buffer;
7183 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
7186 /* Ring buffer disabled, return as if not open for write */
7189 entry = ring_buffer_event_data(event);
7191 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7194 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7199 __buffer_unlock_commit(buffer, event);
7204 static int tracing_clock_show(struct seq_file *m, void *v)
7206 struct trace_array *tr = m->private;
7209 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
7211 "%s%s%s%s", i ? " " : "",
7212 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7213 i == tr->clock_id ? "]" : "");
7219 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
7223 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
7224 if (strcmp(trace_clocks[i].name, clockstr) == 0)
7227 if (i == ARRAY_SIZE(trace_clocks))
7230 mutex_lock(&trace_types_lock);
7234 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
7237 * New clock may not be consistent with the previous clock.
7238 * Reset the buffer so that it doesn't have incomparable timestamps.
7240 tracing_reset_online_cpus(&tr->array_buffer);
7242 #ifdef CONFIG_TRACER_MAX_TRACE
7243 if (tr->max_buffer.buffer)
7244 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7245 tracing_reset_online_cpus(&tr->max_buffer);
7248 mutex_unlock(&trace_types_lock);
7253 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7254 size_t cnt, loff_t *fpos)
7256 struct seq_file *m = filp->private_data;
7257 struct trace_array *tr = m->private;
7259 const char *clockstr;
7262 if (cnt >= sizeof(buf))
7265 if (copy_from_user(buf, ubuf, cnt))
7270 clockstr = strstrip(buf);
7272 ret = tracing_set_clock(tr, clockstr);
7281 static int tracing_clock_open(struct inode *inode, struct file *file)
7283 struct trace_array *tr = inode->i_private;
7286 ret = tracing_check_open_get_tr(tr);
7290 ret = single_open(file, tracing_clock_show, inode->i_private);
7292 trace_array_put(tr);
7297 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7299 struct trace_array *tr = m->private;
7301 mutex_lock(&trace_types_lock);
7303 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7304 seq_puts(m, "delta [absolute]\n");
7306 seq_puts(m, "[delta] absolute\n");
7308 mutex_unlock(&trace_types_lock);
7313 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7315 struct trace_array *tr = inode->i_private;
7318 ret = tracing_check_open_get_tr(tr);
7322 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7324 trace_array_put(tr);
7329 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7331 if (rbe == this_cpu_read(trace_buffered_event))
7332 return ring_buffer_time_stamp(buffer);
7334 return ring_buffer_event_time_stamp(buffer, rbe);
7338 * Set or disable using the per CPU trace_buffer_event when possible.
7340 int tracing_set_filter_buffering(struct trace_array *tr, bool set)
7344 mutex_lock(&trace_types_lock);
7346 if (set && tr->no_filter_buffering_ref++)
7350 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
7355 --tr->no_filter_buffering_ref;
7358 mutex_unlock(&trace_types_lock);
7363 struct ftrace_buffer_info {
7364 struct trace_iterator iter;
7366 unsigned int spare_cpu;
7370 #ifdef CONFIG_TRACER_SNAPSHOT
7371 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7373 struct trace_array *tr = inode->i_private;
7374 struct trace_iterator *iter;
7378 ret = tracing_check_open_get_tr(tr);
7382 if (file->f_mode & FMODE_READ) {
7383 iter = __tracing_open(inode, file, true);
7385 ret = PTR_ERR(iter);
7387 /* Writes still need the seq_file to hold the private data */
7389 m = kzalloc(sizeof(*m), GFP_KERNEL);
7392 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7400 iter->array_buffer = &tr->max_buffer;
7401 iter->cpu_file = tracing_get_cpu(inode);
7403 file->private_data = m;
7407 trace_array_put(tr);
7413 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7416 struct seq_file *m = filp->private_data;
7417 struct trace_iterator *iter = m->private;
7418 struct trace_array *tr = iter->tr;
7422 ret = tracing_update_buffers();
7426 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7430 mutex_lock(&trace_types_lock);
7432 if (tr->current_trace->use_max_tr) {
7437 arch_spin_lock(&tr->max_lock);
7438 if (tr->cond_snapshot)
7440 arch_spin_unlock(&tr->max_lock);
7446 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7450 if (tr->allocated_snapshot)
7454 /* Only allow per-cpu swap if the ring buffer supports it */
7455 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7456 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7461 if (tr->allocated_snapshot)
7462 ret = resize_buffer_duplicate_size(&tr->max_buffer,
7463 &tr->array_buffer, iter->cpu_file);
7465 ret = tracing_alloc_snapshot_instance(tr);
7468 local_irq_disable();
7469 /* Now, we're going to swap */
7470 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7471 update_max_tr(tr, current, smp_processor_id(), NULL);
7473 update_max_tr_single(tr, current, iter->cpu_file);
7477 if (tr->allocated_snapshot) {
7478 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7479 tracing_reset_online_cpus(&tr->max_buffer);
7481 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7491 mutex_unlock(&trace_types_lock);
7495 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7497 struct seq_file *m = file->private_data;
7500 ret = tracing_release(inode, file);
7502 if (file->f_mode & FMODE_READ)
7505 /* If write only, the seq_file is just a stub */
7513 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7514 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7515 size_t count, loff_t *ppos);
7516 static int tracing_buffers_release(struct inode *inode, struct file *file);
7517 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7518 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7520 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7522 struct ftrace_buffer_info *info;
7525 /* The following checks for tracefs lockdown */
7526 ret = tracing_buffers_open(inode, filp);
7530 info = filp->private_data;
7532 if (info->iter.trace->use_max_tr) {
7533 tracing_buffers_release(inode, filp);
7537 info->iter.snapshot = true;
7538 info->iter.array_buffer = &info->iter.tr->max_buffer;
7543 #endif /* CONFIG_TRACER_SNAPSHOT */
7546 static const struct file_operations tracing_thresh_fops = {
7547 .open = tracing_open_generic,
7548 .read = tracing_thresh_read,
7549 .write = tracing_thresh_write,
7550 .llseek = generic_file_llseek,
7553 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
7554 static const struct file_operations tracing_max_lat_fops = {
7555 .open = tracing_open_generic,
7556 .read = tracing_max_lat_read,
7557 .write = tracing_max_lat_write,
7558 .llseek = generic_file_llseek,
7562 static const struct file_operations set_tracer_fops = {
7563 .open = tracing_open_generic,
7564 .read = tracing_set_trace_read,
7565 .write = tracing_set_trace_write,
7566 .llseek = generic_file_llseek,
7569 static const struct file_operations tracing_pipe_fops = {
7570 .open = tracing_open_pipe,
7571 .poll = tracing_poll_pipe,
7572 .read = tracing_read_pipe,
7573 .splice_read = tracing_splice_read_pipe,
7574 .release = tracing_release_pipe,
7575 .llseek = no_llseek,
7578 static const struct file_operations tracing_entries_fops = {
7579 .open = tracing_open_generic_tr,
7580 .read = tracing_entries_read,
7581 .write = tracing_entries_write,
7582 .llseek = generic_file_llseek,
7583 .release = tracing_release_generic_tr,
7586 static const struct file_operations tracing_total_entries_fops = {
7587 .open = tracing_open_generic_tr,
7588 .read = tracing_total_entries_read,
7589 .llseek = generic_file_llseek,
7590 .release = tracing_release_generic_tr,
7593 static const struct file_operations tracing_free_buffer_fops = {
7594 .open = tracing_open_generic_tr,
7595 .write = tracing_free_buffer_write,
7596 .release = tracing_free_buffer_release,
7599 static const struct file_operations tracing_mark_fops = {
7600 .open = tracing_mark_open,
7601 .write = tracing_mark_write,
7602 .release = tracing_release_generic_tr,
7605 static const struct file_operations tracing_mark_raw_fops = {
7606 .open = tracing_mark_open,
7607 .write = tracing_mark_raw_write,
7608 .release = tracing_release_generic_tr,
7611 static const struct file_operations trace_clock_fops = {
7612 .open = tracing_clock_open,
7614 .llseek = seq_lseek,
7615 .release = tracing_single_release_tr,
7616 .write = tracing_clock_write,
7619 static const struct file_operations trace_time_stamp_mode_fops = {
7620 .open = tracing_time_stamp_mode_open,
7622 .llseek = seq_lseek,
7623 .release = tracing_single_release_tr,
7626 #ifdef CONFIG_TRACER_SNAPSHOT
7627 static const struct file_operations snapshot_fops = {
7628 .open = tracing_snapshot_open,
7630 .write = tracing_snapshot_write,
7631 .llseek = tracing_lseek,
7632 .release = tracing_snapshot_release,
7635 static const struct file_operations snapshot_raw_fops = {
7636 .open = snapshot_raw_open,
7637 .read = tracing_buffers_read,
7638 .release = tracing_buffers_release,
7639 .splice_read = tracing_buffers_splice_read,
7640 .llseek = no_llseek,
7643 #endif /* CONFIG_TRACER_SNAPSHOT */
7646 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
7647 * @filp: The active open file structure
7648 * @ubuf: The userspace provided buffer to read value into
7649 * @cnt: The maximum number of bytes to read
7650 * @ppos: The current "file" position
7652 * This function implements the write interface for a struct trace_min_max_param.
7653 * The filp->private_data must point to a trace_min_max_param structure that
7654 * defines where to write the value, the min and the max acceptable values,
7655 * and a lock to protect the write.
7658 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
7660 struct trace_min_max_param *param = filp->private_data;
7667 err = kstrtoull_from_user(ubuf, cnt, 10, &val);
7672 mutex_lock(param->lock);
7674 if (param->min && val < *param->min)
7677 if (param->max && val > *param->max)
7684 mutex_unlock(param->lock);
7693 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
7694 * @filp: The active open file structure
7695 * @ubuf: The userspace provided buffer to read value into
7696 * @cnt: The maximum number of bytes to read
7697 * @ppos: The current "file" position
7699 * This function implements the read interface for a struct trace_min_max_param.
7700 * The filp->private_data must point to a trace_min_max_param struct with valid
7704 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
7706 struct trace_min_max_param *param = filp->private_data;
7707 char buf[U64_STR_SIZE];
7716 if (cnt > sizeof(buf))
7719 len = snprintf(buf, sizeof(buf), "%llu\n", val);
7721 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
7724 const struct file_operations trace_min_max_fops = {
7725 .open = tracing_open_generic,
7726 .read = trace_min_max_read,
7727 .write = trace_min_max_write,
7730 #define TRACING_LOG_ERRS_MAX 8
7731 #define TRACING_LOG_LOC_MAX 128
7733 #define CMD_PREFIX " Command: "
7736 const char **errs; /* ptr to loc-specific array of err strings */
7737 u8 type; /* index into errs -> specific err string */
7738 u16 pos; /* caret position */
7742 struct tracing_log_err {
7743 struct list_head list;
7744 struct err_info info;
7745 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7746 char *cmd; /* what caused err */
7749 static DEFINE_MUTEX(tracing_err_log_lock);
7751 static struct tracing_log_err *alloc_tracing_log_err(int len)
7753 struct tracing_log_err *err;
7755 err = kzalloc(sizeof(*err), GFP_KERNEL);
7757 return ERR_PTR(-ENOMEM);
7759 err->cmd = kzalloc(len, GFP_KERNEL);
7762 return ERR_PTR(-ENOMEM);
7768 static void free_tracing_log_err(struct tracing_log_err *err)
7774 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr,
7777 struct tracing_log_err *err;
7779 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7780 err = alloc_tracing_log_err(len);
7781 if (PTR_ERR(err) != -ENOMEM)
7782 tr->n_err_log_entries++;
7787 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7789 err->cmd = kzalloc(len, GFP_KERNEL);
7791 return ERR_PTR(-ENOMEM);
7792 list_del(&err->list);
7798 * err_pos - find the position of a string within a command for error careting
7799 * @cmd: The tracing command that caused the error
7800 * @str: The string to position the caret at within @cmd
7802 * Finds the position of the first occurrence of @str within @cmd. The
7803 * return value can be passed to tracing_log_err() for caret placement
7806 * Returns the index within @cmd of the first occurrence of @str or 0
7807 * if @str was not found.
7809 unsigned int err_pos(char *cmd, const char *str)
7813 if (WARN_ON(!strlen(cmd)))
7816 found = strstr(cmd, str);
7824 * tracing_log_err - write an error to the tracing error log
7825 * @tr: The associated trace array for the error (NULL for top level array)
7826 * @loc: A string describing where the error occurred
7827 * @cmd: The tracing command that caused the error
7828 * @errs: The array of loc-specific static error strings
7829 * @type: The index into errs[], which produces the specific static err string
7830 * @pos: The position the caret should be placed in the cmd
7832 * Writes an error into tracing/error_log of the form:
7834 * <loc>: error: <text>
7838 * tracing/error_log is a small log file containing the last
7839 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7840 * unless there has been a tracing error, and the error log can be
7841 * cleared and have its memory freed by writing the empty string in
7842 * truncation mode to it i.e. echo > tracing/error_log.
7844 * NOTE: the @errs array along with the @type param are used to
7845 * produce a static error string - this string is not copied and saved
7846 * when the error is logged - only a pointer to it is saved. See
7847 * existing callers for examples of how static strings are typically
7848 * defined for use with tracing_log_err().
7850 void tracing_log_err(struct trace_array *tr,
7851 const char *loc, const char *cmd,
7852 const char **errs, u8 type, u16 pos)
7854 struct tracing_log_err *err;
7860 len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1;
7862 mutex_lock(&tracing_err_log_lock);
7863 err = get_tracing_log_err(tr, len);
7864 if (PTR_ERR(err) == -ENOMEM) {
7865 mutex_unlock(&tracing_err_log_lock);
7869 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7870 snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd);
7872 err->info.errs = errs;
7873 err->info.type = type;
7874 err->info.pos = pos;
7875 err->info.ts = local_clock();
7877 list_add_tail(&err->list, &tr->err_log);
7878 mutex_unlock(&tracing_err_log_lock);
7881 static void clear_tracing_err_log(struct trace_array *tr)
7883 struct tracing_log_err *err, *next;
7885 mutex_lock(&tracing_err_log_lock);
7886 list_for_each_entry_safe(err, next, &tr->err_log, list) {
7887 list_del(&err->list);
7888 free_tracing_log_err(err);
7891 tr->n_err_log_entries = 0;
7892 mutex_unlock(&tracing_err_log_lock);
7895 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7897 struct trace_array *tr = m->private;
7899 mutex_lock(&tracing_err_log_lock);
7901 return seq_list_start(&tr->err_log, *pos);
7904 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7906 struct trace_array *tr = m->private;
7908 return seq_list_next(v, &tr->err_log, pos);
7911 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7913 mutex_unlock(&tracing_err_log_lock);
7916 static void tracing_err_log_show_pos(struct seq_file *m, u16 pos)
7920 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7922 for (i = 0; i < pos; i++)
7927 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7929 struct tracing_log_err *err = v;
7932 const char *err_text = err->info.errs[err->info.type];
7933 u64 sec = err->info.ts;
7936 nsec = do_div(sec, NSEC_PER_SEC);
7937 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7938 err->loc, err_text);
7939 seq_printf(m, "%s", err->cmd);
7940 tracing_err_log_show_pos(m, err->info.pos);
7946 static const struct seq_operations tracing_err_log_seq_ops = {
7947 .start = tracing_err_log_seq_start,
7948 .next = tracing_err_log_seq_next,
7949 .stop = tracing_err_log_seq_stop,
7950 .show = tracing_err_log_seq_show
7953 static int tracing_err_log_open(struct inode *inode, struct file *file)
7955 struct trace_array *tr = inode->i_private;
7958 ret = tracing_check_open_get_tr(tr);
7962 /* If this file was opened for write, then erase contents */
7963 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7964 clear_tracing_err_log(tr);
7966 if (file->f_mode & FMODE_READ) {
7967 ret = seq_open(file, &tracing_err_log_seq_ops);
7969 struct seq_file *m = file->private_data;
7972 trace_array_put(tr);
7978 static ssize_t tracing_err_log_write(struct file *file,
7979 const char __user *buffer,
7980 size_t count, loff_t *ppos)
7985 static int tracing_err_log_release(struct inode *inode, struct file *file)
7987 struct trace_array *tr = inode->i_private;
7989 trace_array_put(tr);
7991 if (file->f_mode & FMODE_READ)
7992 seq_release(inode, file);
7997 static const struct file_operations tracing_err_log_fops = {
7998 .open = tracing_err_log_open,
7999 .write = tracing_err_log_write,
8001 .llseek = seq_lseek,
8002 .release = tracing_err_log_release,
8005 static int tracing_buffers_open(struct inode *inode, struct file *filp)
8007 struct trace_array *tr = inode->i_private;
8008 struct ftrace_buffer_info *info;
8011 ret = tracing_check_open_get_tr(tr);
8015 info = kvzalloc(sizeof(*info), GFP_KERNEL);
8017 trace_array_put(tr);
8021 mutex_lock(&trace_types_lock);
8024 info->iter.cpu_file = tracing_get_cpu(inode);
8025 info->iter.trace = tr->current_trace;
8026 info->iter.array_buffer = &tr->array_buffer;
8028 /* Force reading ring buffer for first read */
8029 info->read = (unsigned int)-1;
8031 filp->private_data = info;
8035 mutex_unlock(&trace_types_lock);
8037 ret = nonseekable_open(inode, filp);
8039 trace_array_put(tr);
8045 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
8047 struct ftrace_buffer_info *info = filp->private_data;
8048 struct trace_iterator *iter = &info->iter;
8050 return trace_poll(iter, filp, poll_table);
8054 tracing_buffers_read(struct file *filp, char __user *ubuf,
8055 size_t count, loff_t *ppos)
8057 struct ftrace_buffer_info *info = filp->private_data;
8058 struct trace_iterator *iter = &info->iter;
8065 #ifdef CONFIG_TRACER_MAX_TRACE
8066 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8071 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
8073 if (IS_ERR(info->spare)) {
8074 ret = PTR_ERR(info->spare);
8077 info->spare_cpu = iter->cpu_file;
8083 /* Do we have previous read data to read? */
8084 if (info->read < PAGE_SIZE)
8088 trace_access_lock(iter->cpu_file);
8089 ret = ring_buffer_read_page(iter->array_buffer->buffer,
8093 trace_access_unlock(iter->cpu_file);
8096 if (trace_empty(iter)) {
8097 if ((filp->f_flags & O_NONBLOCK))
8100 ret = wait_on_pipe(iter, 0);
8111 size = PAGE_SIZE - info->read;
8115 ret = copy_to_user(ubuf, info->spare + info->read, size);
8127 static int tracing_buffers_release(struct inode *inode, struct file *file)
8129 struct ftrace_buffer_info *info = file->private_data;
8130 struct trace_iterator *iter = &info->iter;
8132 mutex_lock(&trace_types_lock);
8134 iter->tr->trace_ref--;
8136 __trace_array_put(iter->tr);
8139 ring_buffer_free_read_page(iter->array_buffer->buffer,
8140 info->spare_cpu, info->spare);
8143 mutex_unlock(&trace_types_lock);
8149 struct trace_buffer *buffer;
8152 refcount_t refcount;
8155 static void buffer_ref_release(struct buffer_ref *ref)
8157 if (!refcount_dec_and_test(&ref->refcount))
8159 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
8163 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
8164 struct pipe_buffer *buf)
8166 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8168 buffer_ref_release(ref);
8172 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
8173 struct pipe_buffer *buf)
8175 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8177 if (refcount_read(&ref->refcount) > INT_MAX/2)
8180 refcount_inc(&ref->refcount);
8184 /* Pipe buffer operations for a buffer. */
8185 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
8186 .release = buffer_pipe_buf_release,
8187 .get = buffer_pipe_buf_get,
8191 * Callback from splice_to_pipe(), if we need to release some pages
8192 * at the end of the spd in case we error'ed out in filling the pipe.
8194 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
8196 struct buffer_ref *ref =
8197 (struct buffer_ref *)spd->partial[i].private;
8199 buffer_ref_release(ref);
8200 spd->partial[i].private = 0;
8204 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
8205 struct pipe_inode_info *pipe, size_t len,
8208 struct ftrace_buffer_info *info = file->private_data;
8209 struct trace_iterator *iter = &info->iter;
8210 struct partial_page partial_def[PIPE_DEF_BUFFERS];
8211 struct page *pages_def[PIPE_DEF_BUFFERS];
8212 struct splice_pipe_desc spd = {
8214 .partial = partial_def,
8215 .nr_pages_max = PIPE_DEF_BUFFERS,
8216 .ops = &buffer_pipe_buf_ops,
8217 .spd_release = buffer_spd_release,
8219 struct buffer_ref *ref;
8223 #ifdef CONFIG_TRACER_MAX_TRACE
8224 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8228 if (*ppos & (PAGE_SIZE - 1))
8231 if (len & (PAGE_SIZE - 1)) {
8232 if (len < PAGE_SIZE)
8237 if (splice_grow_spd(pipe, &spd))
8241 trace_access_lock(iter->cpu_file);
8242 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8244 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
8248 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
8254 refcount_set(&ref->refcount, 1);
8255 ref->buffer = iter->array_buffer->buffer;
8256 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
8257 if (IS_ERR(ref->page)) {
8258 ret = PTR_ERR(ref->page);
8263 ref->cpu = iter->cpu_file;
8265 r = ring_buffer_read_page(ref->buffer, &ref->page,
8266 len, iter->cpu_file, 1);
8268 ring_buffer_free_read_page(ref->buffer, ref->cpu,
8274 page = virt_to_page(ref->page);
8276 spd.pages[i] = page;
8277 spd.partial[i].len = PAGE_SIZE;
8278 spd.partial[i].offset = 0;
8279 spd.partial[i].private = (unsigned long)ref;
8283 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8286 trace_access_unlock(iter->cpu_file);
8289 /* did we read anything? */
8290 if (!spd.nr_pages) {
8295 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
8298 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
8305 ret = splice_to_pipe(pipe, &spd);
8307 splice_shrink_spd(&spd);
8312 static const struct file_operations tracing_buffers_fops = {
8313 .open = tracing_buffers_open,
8314 .read = tracing_buffers_read,
8315 .poll = tracing_buffers_poll,
8316 .release = tracing_buffers_release,
8317 .splice_read = tracing_buffers_splice_read,
8318 .llseek = no_llseek,
8322 tracing_stats_read(struct file *filp, char __user *ubuf,
8323 size_t count, loff_t *ppos)
8325 struct inode *inode = file_inode(filp);
8326 struct trace_array *tr = inode->i_private;
8327 struct array_buffer *trace_buf = &tr->array_buffer;
8328 int cpu = tracing_get_cpu(inode);
8329 struct trace_seq *s;
8331 unsigned long long t;
8332 unsigned long usec_rem;
8334 s = kmalloc(sizeof(*s), GFP_KERNEL);
8340 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8341 trace_seq_printf(s, "entries: %ld\n", cnt);
8343 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8344 trace_seq_printf(s, "overrun: %ld\n", cnt);
8346 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8347 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8349 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8350 trace_seq_printf(s, "bytes: %ld\n", cnt);
8352 if (trace_clocks[tr->clock_id].in_ns) {
8353 /* local or global for trace_clock */
8354 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8355 usec_rem = do_div(t, USEC_PER_SEC);
8356 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8359 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
8360 usec_rem = do_div(t, USEC_PER_SEC);
8361 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8363 /* counter or tsc mode for trace_clock */
8364 trace_seq_printf(s, "oldest event ts: %llu\n",
8365 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8367 trace_seq_printf(s, "now ts: %llu\n",
8368 ring_buffer_time_stamp(trace_buf->buffer));
8371 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8372 trace_seq_printf(s, "dropped events: %ld\n", cnt);
8374 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8375 trace_seq_printf(s, "read events: %ld\n", cnt);
8377 count = simple_read_from_buffer(ubuf, count, ppos,
8378 s->buffer, trace_seq_used(s));
8385 static const struct file_operations tracing_stats_fops = {
8386 .open = tracing_open_generic_tr,
8387 .read = tracing_stats_read,
8388 .llseek = generic_file_llseek,
8389 .release = tracing_release_generic_tr,
8392 #ifdef CONFIG_DYNAMIC_FTRACE
8395 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
8396 size_t cnt, loff_t *ppos)
8402 /* 256 should be plenty to hold the amount needed */
8403 buf = kmalloc(256, GFP_KERNEL);
8407 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
8408 ftrace_update_tot_cnt,
8409 ftrace_number_of_pages,
8410 ftrace_number_of_groups);
8412 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8417 static const struct file_operations tracing_dyn_info_fops = {
8418 .open = tracing_open_generic,
8419 .read = tracing_read_dyn_info,
8420 .llseek = generic_file_llseek,
8422 #endif /* CONFIG_DYNAMIC_FTRACE */
8424 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8426 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
8427 struct trace_array *tr, struct ftrace_probe_ops *ops,
8430 tracing_snapshot_instance(tr);
8434 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
8435 struct trace_array *tr, struct ftrace_probe_ops *ops,
8438 struct ftrace_func_mapper *mapper = data;
8442 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8452 tracing_snapshot_instance(tr);
8456 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8457 struct ftrace_probe_ops *ops, void *data)
8459 struct ftrace_func_mapper *mapper = data;
8462 seq_printf(m, "%ps:", (void *)ip);
8464 seq_puts(m, "snapshot");
8467 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8470 seq_printf(m, ":count=%ld\n", *count);
8472 seq_puts(m, ":unlimited\n");
8478 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8479 unsigned long ip, void *init_data, void **data)
8481 struct ftrace_func_mapper *mapper = *data;
8484 mapper = allocate_ftrace_func_mapper();
8490 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8494 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8495 unsigned long ip, void *data)
8497 struct ftrace_func_mapper *mapper = data;
8502 free_ftrace_func_mapper(mapper, NULL);
8506 ftrace_func_mapper_remove_ip(mapper, ip);
8509 static struct ftrace_probe_ops snapshot_probe_ops = {
8510 .func = ftrace_snapshot,
8511 .print = ftrace_snapshot_print,
8514 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8515 .func = ftrace_count_snapshot,
8516 .print = ftrace_snapshot_print,
8517 .init = ftrace_snapshot_init,
8518 .free = ftrace_snapshot_free,
8522 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8523 char *glob, char *cmd, char *param, int enable)
8525 struct ftrace_probe_ops *ops;
8526 void *count = (void *)-1;
8533 /* hash funcs only work with set_ftrace_filter */
8537 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
8540 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
8545 number = strsep(¶m, ":");
8547 if (!strlen(number))
8551 * We use the callback data field (which is a pointer)
8554 ret = kstrtoul(number, 0, (unsigned long *)&count);
8559 ret = tracing_alloc_snapshot_instance(tr);
8563 ret = register_ftrace_function_probe(glob, tr, ops, count);
8566 return ret < 0 ? ret : 0;
8569 static struct ftrace_func_command ftrace_snapshot_cmd = {
8571 .func = ftrace_trace_snapshot_callback,
8574 static __init int register_snapshot_cmd(void)
8576 return register_ftrace_command(&ftrace_snapshot_cmd);
8579 static inline __init int register_snapshot_cmd(void) { return 0; }
8580 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8582 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8584 if (WARN_ON(!tr->dir))
8585 return ERR_PTR(-ENODEV);
8587 /* Top directory uses NULL as the parent */
8588 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8591 /* All sub buffers have a descriptor */
8595 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8597 struct dentry *d_tracer;
8600 return tr->percpu_dir;
8602 d_tracer = tracing_get_dentry(tr);
8603 if (IS_ERR(d_tracer))
8606 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8608 MEM_FAIL(!tr->percpu_dir,
8609 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8611 return tr->percpu_dir;
8614 static struct dentry *
8615 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8616 void *data, long cpu, const struct file_operations *fops)
8618 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8620 if (ret) /* See tracing_get_cpu() */
8621 d_inode(ret)->i_cdev = (void *)(cpu + 1);
8626 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8628 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8629 struct dentry *d_cpu;
8630 char cpu_dir[30]; /* 30 characters should be more than enough */
8635 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8636 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8638 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8642 /* per cpu trace_pipe */
8643 trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu,
8644 tr, cpu, &tracing_pipe_fops);
8647 trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu,
8648 tr, cpu, &tracing_fops);
8650 trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu,
8651 tr, cpu, &tracing_buffers_fops);
8653 trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu,
8654 tr, cpu, &tracing_stats_fops);
8656 trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu,
8657 tr, cpu, &tracing_entries_fops);
8659 #ifdef CONFIG_TRACER_SNAPSHOT
8660 trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
8661 tr, cpu, &snapshot_fops);
8663 trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
8664 tr, cpu, &snapshot_raw_fops);
8668 #ifdef CONFIG_FTRACE_SELFTEST
8669 /* Let selftest have access to static functions in this file */
8670 #include "trace_selftest.c"
8674 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8677 struct trace_option_dentry *topt = filp->private_data;
8680 if (topt->flags->val & topt->opt->bit)
8685 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8689 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8692 struct trace_option_dentry *topt = filp->private_data;
8696 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8700 if (val != 0 && val != 1)
8703 if (!!(topt->flags->val & topt->opt->bit) != val) {
8704 mutex_lock(&trace_types_lock);
8705 ret = __set_tracer_option(topt->tr, topt->flags,
8707 mutex_unlock(&trace_types_lock);
8718 static const struct file_operations trace_options_fops = {
8719 .open = tracing_open_generic,
8720 .read = trace_options_read,
8721 .write = trace_options_write,
8722 .llseek = generic_file_llseek,
8726 * In order to pass in both the trace_array descriptor as well as the index
8727 * to the flag that the trace option file represents, the trace_array
8728 * has a character array of trace_flags_index[], which holds the index
8729 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8730 * The address of this character array is passed to the flag option file
8731 * read/write callbacks.
8733 * In order to extract both the index and the trace_array descriptor,
8734 * get_tr_index() uses the following algorithm.
8738 * As the pointer itself contains the address of the index (remember
8741 * Then to get the trace_array descriptor, by subtracting that index
8742 * from the ptr, we get to the start of the index itself.
8744 * ptr - idx == &index[0]
8746 * Then a simple container_of() from that pointer gets us to the
8747 * trace_array descriptor.
8749 static void get_tr_index(void *data, struct trace_array **ptr,
8750 unsigned int *pindex)
8752 *pindex = *(unsigned char *)data;
8754 *ptr = container_of(data - *pindex, struct trace_array,
8759 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8762 void *tr_index = filp->private_data;
8763 struct trace_array *tr;
8767 get_tr_index(tr_index, &tr, &index);
8769 if (tr->trace_flags & (1 << index))
8774 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8778 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8781 void *tr_index = filp->private_data;
8782 struct trace_array *tr;
8787 get_tr_index(tr_index, &tr, &index);
8789 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8793 if (val != 0 && val != 1)
8796 mutex_lock(&event_mutex);
8797 mutex_lock(&trace_types_lock);
8798 ret = set_tracer_flag(tr, 1 << index, val);
8799 mutex_unlock(&trace_types_lock);
8800 mutex_unlock(&event_mutex);
8810 static const struct file_operations trace_options_core_fops = {
8811 .open = tracing_open_generic,
8812 .read = trace_options_core_read,
8813 .write = trace_options_core_write,
8814 .llseek = generic_file_llseek,
8817 struct dentry *trace_create_file(const char *name,
8819 struct dentry *parent,
8821 const struct file_operations *fops)
8825 ret = tracefs_create_file(name, mode, parent, data, fops);
8827 pr_warn("Could not create tracefs '%s' entry\n", name);
8833 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
8835 struct dentry *d_tracer;
8840 d_tracer = tracing_get_dentry(tr);
8841 if (IS_ERR(d_tracer))
8844 tr->options = tracefs_create_dir("options", d_tracer);
8846 pr_warn("Could not create tracefs directory 'options'\n");
8854 create_trace_option_file(struct trace_array *tr,
8855 struct trace_option_dentry *topt,
8856 struct tracer_flags *flags,
8857 struct tracer_opt *opt)
8859 struct dentry *t_options;
8861 t_options = trace_options_init_dentry(tr);
8865 topt->flags = flags;
8869 topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
8870 t_options, topt, &trace_options_fops);
8875 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8877 struct trace_option_dentry *topts;
8878 struct trace_options *tr_topts;
8879 struct tracer_flags *flags;
8880 struct tracer_opt *opts;
8887 flags = tracer->flags;
8889 if (!flags || !flags->opts)
8893 * If this is an instance, only create flags for tracers
8894 * the instance may have.
8896 if (!trace_ok_for_array(tracer, tr))
8899 for (i = 0; i < tr->nr_topts; i++) {
8900 /* Make sure there's no duplicate flags. */
8901 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
8907 for (cnt = 0; opts[cnt].name; cnt++)
8910 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
8914 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8921 tr->topts = tr_topts;
8922 tr->topts[tr->nr_topts].tracer = tracer;
8923 tr->topts[tr->nr_topts].topts = topts;
8926 for (cnt = 0; opts[cnt].name; cnt++) {
8927 create_trace_option_file(tr, &topts[cnt], flags,
8929 MEM_FAIL(topts[cnt].entry == NULL,
8930 "Failed to create trace option: %s",
8935 static struct dentry *
8936 create_trace_option_core_file(struct trace_array *tr,
8937 const char *option, long index)
8939 struct dentry *t_options;
8941 t_options = trace_options_init_dentry(tr);
8945 return trace_create_file(option, TRACE_MODE_WRITE, t_options,
8946 (void *)&tr->trace_flags_index[index],
8947 &trace_options_core_fops);
8950 static void create_trace_options_dir(struct trace_array *tr)
8952 struct dentry *t_options;
8953 bool top_level = tr == &global_trace;
8956 t_options = trace_options_init_dentry(tr);
8960 for (i = 0; trace_options[i]; i++) {
8962 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8963 create_trace_option_core_file(tr, trace_options[i], i);
8968 rb_simple_read(struct file *filp, char __user *ubuf,
8969 size_t cnt, loff_t *ppos)
8971 struct trace_array *tr = filp->private_data;
8975 r = tracer_tracing_is_on(tr);
8976 r = sprintf(buf, "%d\n", r);
8978 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8982 rb_simple_write(struct file *filp, const char __user *ubuf,
8983 size_t cnt, loff_t *ppos)
8985 struct trace_array *tr = filp->private_data;
8986 struct trace_buffer *buffer = tr->array_buffer.buffer;
8990 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8995 mutex_lock(&trace_types_lock);
8996 if (!!val == tracer_tracing_is_on(tr)) {
8997 val = 0; /* do nothing */
8999 tracer_tracing_on(tr);
9000 if (tr->current_trace->start)
9001 tr->current_trace->start(tr);
9003 tracer_tracing_off(tr);
9004 if (tr->current_trace->stop)
9005 tr->current_trace->stop(tr);
9007 mutex_unlock(&trace_types_lock);
9015 static const struct file_operations rb_simple_fops = {
9016 .open = tracing_open_generic_tr,
9017 .read = rb_simple_read,
9018 .write = rb_simple_write,
9019 .release = tracing_release_generic_tr,
9020 .llseek = default_llseek,
9024 buffer_percent_read(struct file *filp, char __user *ubuf,
9025 size_t cnt, loff_t *ppos)
9027 struct trace_array *tr = filp->private_data;
9031 r = tr->buffer_percent;
9032 r = sprintf(buf, "%d\n", r);
9034 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9038 buffer_percent_write(struct file *filp, const char __user *ubuf,
9039 size_t cnt, loff_t *ppos)
9041 struct trace_array *tr = filp->private_data;
9045 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9055 tr->buffer_percent = val;
9062 static const struct file_operations buffer_percent_fops = {
9063 .open = tracing_open_generic_tr,
9064 .read = buffer_percent_read,
9065 .write = buffer_percent_write,
9066 .release = tracing_release_generic_tr,
9067 .llseek = default_llseek,
9070 static struct dentry *trace_instance_dir;
9073 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
9076 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
9078 enum ring_buffer_flags rb_flags;
9080 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
9084 buf->buffer = ring_buffer_alloc(size, rb_flags);
9088 buf->data = alloc_percpu(struct trace_array_cpu);
9090 ring_buffer_free(buf->buffer);
9095 /* Allocate the first page for all buffers */
9096 set_buffer_entries(&tr->array_buffer,
9097 ring_buffer_size(tr->array_buffer.buffer, 0));
9102 static int allocate_trace_buffers(struct trace_array *tr, int size)
9106 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
9110 #ifdef CONFIG_TRACER_MAX_TRACE
9111 ret = allocate_trace_buffer(tr, &tr->max_buffer,
9112 allocate_snapshot ? size : 1);
9113 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
9114 ring_buffer_free(tr->array_buffer.buffer);
9115 tr->array_buffer.buffer = NULL;
9116 free_percpu(tr->array_buffer.data);
9117 tr->array_buffer.data = NULL;
9120 tr->allocated_snapshot = allocate_snapshot;
9123 * Only the top level trace array gets its snapshot allocated
9124 * from the kernel command line.
9126 allocate_snapshot = false;
9132 static void free_trace_buffer(struct array_buffer *buf)
9135 ring_buffer_free(buf->buffer);
9137 free_percpu(buf->data);
9142 static void free_trace_buffers(struct trace_array *tr)
9147 free_trace_buffer(&tr->array_buffer);
9149 #ifdef CONFIG_TRACER_MAX_TRACE
9150 free_trace_buffer(&tr->max_buffer);
9154 static void init_trace_flags_index(struct trace_array *tr)
9158 /* Used by the trace options files */
9159 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
9160 tr->trace_flags_index[i] = i;
9163 static void __update_tracer_options(struct trace_array *tr)
9167 for (t = trace_types; t; t = t->next)
9168 add_tracer_options(tr, t);
9171 static void update_tracer_options(struct trace_array *tr)
9173 mutex_lock(&trace_types_lock);
9174 __update_tracer_options(tr);
9175 mutex_unlock(&trace_types_lock);
9178 /* Must have trace_types_lock held */
9179 struct trace_array *trace_array_find(const char *instance)
9181 struct trace_array *tr, *found = NULL;
9183 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9184 if (tr->name && strcmp(tr->name, instance) == 0) {
9193 struct trace_array *trace_array_find_get(const char *instance)
9195 struct trace_array *tr;
9197 mutex_lock(&trace_types_lock);
9198 tr = trace_array_find(instance);
9201 mutex_unlock(&trace_types_lock);
9206 static int trace_array_create_dir(struct trace_array *tr)
9210 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9214 ret = event_trace_add_tracer(tr->dir, tr);
9216 tracefs_remove(tr->dir);
9220 init_tracer_tracefs(tr, tr->dir);
9221 __update_tracer_options(tr);
9226 static struct trace_array *trace_array_create(const char *name)
9228 struct trace_array *tr;
9232 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9234 return ERR_PTR(ret);
9236 tr->name = kstrdup(name, GFP_KERNEL);
9240 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9243 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
9245 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9247 raw_spin_lock_init(&tr->start_lock);
9249 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9251 tr->current_trace = &nop_trace;
9253 INIT_LIST_HEAD(&tr->systems);
9254 INIT_LIST_HEAD(&tr->events);
9255 INIT_LIST_HEAD(&tr->hist_vars);
9256 INIT_LIST_HEAD(&tr->err_log);
9258 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
9261 if (ftrace_allocate_ftrace_ops(tr) < 0)
9264 ftrace_init_trace_array(tr);
9266 init_trace_flags_index(tr);
9268 if (trace_instance_dir) {
9269 ret = trace_array_create_dir(tr);
9273 __trace_early_add_events(tr);
9275 list_add(&tr->list, &ftrace_trace_arrays);
9282 ftrace_free_ftrace_ops(tr);
9283 free_trace_buffers(tr);
9284 free_cpumask_var(tr->tracing_cpumask);
9288 return ERR_PTR(ret);
9291 static int instance_mkdir(const char *name)
9293 struct trace_array *tr;
9296 mutex_lock(&event_mutex);
9297 mutex_lock(&trace_types_lock);
9300 if (trace_array_find(name))
9303 tr = trace_array_create(name);
9305 ret = PTR_ERR_OR_ZERO(tr);
9308 mutex_unlock(&trace_types_lock);
9309 mutex_unlock(&event_mutex);
9314 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9315 * @name: The name of the trace array to be looked up/created.
9317 * Returns pointer to trace array with given name.
9318 * NULL, if it cannot be created.
9320 * NOTE: This function increments the reference counter associated with the
9321 * trace array returned. This makes sure it cannot be freed while in use.
9322 * Use trace_array_put() once the trace array is no longer needed.
9323 * If the trace_array is to be freed, trace_array_destroy() needs to
9324 * be called after the trace_array_put(), or simply let user space delete
9325 * it from the tracefs instances directory. But until the
9326 * trace_array_put() is called, user space can not delete it.
9329 struct trace_array *trace_array_get_by_name(const char *name)
9331 struct trace_array *tr;
9333 mutex_lock(&event_mutex);
9334 mutex_lock(&trace_types_lock);
9336 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9337 if (tr->name && strcmp(tr->name, name) == 0)
9341 tr = trace_array_create(name);
9349 mutex_unlock(&trace_types_lock);
9350 mutex_unlock(&event_mutex);
9353 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9355 static int __remove_instance(struct trace_array *tr)
9359 /* Reference counter for a newly created trace array = 1. */
9360 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9363 list_del(&tr->list);
9365 /* Disable all the flags that were enabled coming in */
9366 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9367 if ((1 << i) & ZEROED_TRACE_FLAGS)
9368 set_tracer_flag(tr, 1 << i, 0);
9371 tracing_set_nop(tr);
9372 clear_ftrace_function_probes(tr);
9373 event_trace_del_tracer(tr);
9374 ftrace_clear_pids(tr);
9375 ftrace_destroy_function_files(tr);
9376 tracefs_remove(tr->dir);
9377 free_percpu(tr->last_func_repeats);
9378 free_trace_buffers(tr);
9380 for (i = 0; i < tr->nr_topts; i++) {
9381 kfree(tr->topts[i].topts);
9385 free_cpumask_var(tr->tracing_cpumask);
9392 int trace_array_destroy(struct trace_array *this_tr)
9394 struct trace_array *tr;
9400 mutex_lock(&event_mutex);
9401 mutex_lock(&trace_types_lock);
9405 /* Making sure trace array exists before destroying it. */
9406 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9407 if (tr == this_tr) {
9408 ret = __remove_instance(tr);
9413 mutex_unlock(&trace_types_lock);
9414 mutex_unlock(&event_mutex);
9418 EXPORT_SYMBOL_GPL(trace_array_destroy);
9420 static int instance_rmdir(const char *name)
9422 struct trace_array *tr;
9425 mutex_lock(&event_mutex);
9426 mutex_lock(&trace_types_lock);
9429 tr = trace_array_find(name);
9431 ret = __remove_instance(tr);
9433 mutex_unlock(&trace_types_lock);
9434 mutex_unlock(&event_mutex);
9439 static __init void create_trace_instances(struct dentry *d_tracer)
9441 struct trace_array *tr;
9443 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9446 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
9449 mutex_lock(&event_mutex);
9450 mutex_lock(&trace_types_lock);
9452 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9455 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9456 "Failed to create instance directory\n"))
9460 mutex_unlock(&trace_types_lock);
9461 mutex_unlock(&event_mutex);
9465 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
9467 struct trace_event_file *file;
9470 trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer,
9471 tr, &show_traces_fops);
9473 trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer,
9474 tr, &set_tracer_fops);
9476 trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer,
9477 tr, &tracing_cpumask_fops);
9479 trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer,
9480 tr, &tracing_iter_fops);
9482 trace_create_file("trace", TRACE_MODE_WRITE, d_tracer,
9485 trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer,
9486 tr, &tracing_pipe_fops);
9488 trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer,
9489 tr, &tracing_entries_fops);
9491 trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer,
9492 tr, &tracing_total_entries_fops);
9494 trace_create_file("free_buffer", 0200, d_tracer,
9495 tr, &tracing_free_buffer_fops);
9497 trace_create_file("trace_marker", 0220, d_tracer,
9498 tr, &tracing_mark_fops);
9500 file = __find_event_file(tr, "ftrace", "print");
9501 if (file && file->dir)
9502 trace_create_file("trigger", TRACE_MODE_WRITE, file->dir,
9503 file, &event_trigger_fops);
9504 tr->trace_marker_file = file;
9506 trace_create_file("trace_marker_raw", 0220, d_tracer,
9507 tr, &tracing_mark_raw_fops);
9509 trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr,
9512 trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer,
9513 tr, &rb_simple_fops);
9515 trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr,
9516 &trace_time_stamp_mode_fops);
9518 tr->buffer_percent = 50;
9520 trace_create_file("buffer_percent", TRACE_MODE_READ, d_tracer,
9521 tr, &buffer_percent_fops);
9523 create_trace_options_dir(tr);
9525 trace_create_maxlat_file(tr, d_tracer);
9527 if (ftrace_create_function_files(tr, d_tracer))
9528 MEM_FAIL(1, "Could not allocate function filter files");
9530 #ifdef CONFIG_TRACER_SNAPSHOT
9531 trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
9532 tr, &snapshot_fops);
9535 trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer,
9536 tr, &tracing_err_log_fops);
9538 for_each_tracing_cpu(cpu)
9539 tracing_init_tracefs_percpu(tr, cpu);
9541 ftrace_init_tracefs(tr, d_tracer);
9544 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9546 struct vfsmount *mnt;
9547 struct file_system_type *type;
9550 * To maintain backward compatibility for tools that mount
9551 * debugfs to get to the tracing facility, tracefs is automatically
9552 * mounted to the debugfs/tracing directory.
9554 type = get_fs_type("tracefs");
9557 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9558 put_filesystem(type);
9567 * tracing_init_dentry - initialize top level trace array
9569 * This is called when creating files or directories in the tracing
9570 * directory. It is called via fs_initcall() by any of the boot up code
9571 * and expects to return the dentry of the top level tracing directory.
9573 int tracing_init_dentry(void)
9575 struct trace_array *tr = &global_trace;
9577 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9578 pr_warn("Tracing disabled due to lockdown\n");
9582 /* The top level trace array uses NULL as parent */
9586 if (WARN_ON(!tracefs_initialized()))
9590 * As there may still be users that expect the tracing
9591 * files to exist in debugfs/tracing, we must automount
9592 * the tracefs file system there, so older tools still
9593 * work with the newer kernel.
9595 tr->dir = debugfs_create_automount("tracing", NULL,
9596 trace_automount, NULL);
9601 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9602 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9604 static struct workqueue_struct *eval_map_wq __initdata;
9605 static struct work_struct eval_map_work __initdata;
9607 static void __init eval_map_work_func(struct work_struct *work)
9611 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9612 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9615 static int __init trace_eval_init(void)
9617 INIT_WORK(&eval_map_work, eval_map_work_func);
9619 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9621 pr_err("Unable to allocate eval_map_wq\n");
9623 eval_map_work_func(&eval_map_work);
9627 queue_work(eval_map_wq, &eval_map_work);
9631 static int __init trace_eval_sync(void)
9633 /* Make sure the eval map updates are finished */
9635 destroy_workqueue(eval_map_wq);
9639 late_initcall_sync(trace_eval_sync);
9642 #ifdef CONFIG_MODULES
9643 static void trace_module_add_evals(struct module *mod)
9645 if (!mod->num_trace_evals)
9649 * Modules with bad taint do not have events created, do
9650 * not bother with enums either.
9652 if (trace_module_has_bad_taint(mod))
9655 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9658 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
9659 static void trace_module_remove_evals(struct module *mod)
9661 union trace_eval_map_item *map;
9662 union trace_eval_map_item **last = &trace_eval_maps;
9664 if (!mod->num_trace_evals)
9667 mutex_lock(&trace_eval_mutex);
9669 map = trace_eval_maps;
9672 if (map->head.mod == mod)
9674 map = trace_eval_jmp_to_tail(map);
9675 last = &map->tail.next;
9676 map = map->tail.next;
9681 *last = trace_eval_jmp_to_tail(map)->tail.next;
9684 mutex_unlock(&trace_eval_mutex);
9687 static inline void trace_module_remove_evals(struct module *mod) { }
9688 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9690 static int trace_module_notify(struct notifier_block *self,
9691 unsigned long val, void *data)
9693 struct module *mod = data;
9696 case MODULE_STATE_COMING:
9697 trace_module_add_evals(mod);
9699 case MODULE_STATE_GOING:
9700 trace_module_remove_evals(mod);
9707 static struct notifier_block trace_module_nb = {
9708 .notifier_call = trace_module_notify,
9711 #endif /* CONFIG_MODULES */
9713 static __init int tracer_init_tracefs(void)
9717 trace_access_lock_init();
9719 ret = tracing_init_dentry();
9725 init_tracer_tracefs(&global_trace, NULL);
9726 ftrace_init_tracefs_toplevel(&global_trace, NULL);
9728 trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL,
9729 &global_trace, &tracing_thresh_fops);
9731 trace_create_file("README", TRACE_MODE_READ, NULL,
9732 NULL, &tracing_readme_fops);
9734 trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL,
9735 NULL, &tracing_saved_cmdlines_fops);
9737 trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL,
9738 NULL, &tracing_saved_cmdlines_size_fops);
9740 trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
9741 NULL, &tracing_saved_tgids_fops);
9745 trace_create_eval_file(NULL);
9747 #ifdef CONFIG_MODULES
9748 register_module_notifier(&trace_module_nb);
9751 #ifdef CONFIG_DYNAMIC_FTRACE
9752 trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL,
9753 NULL, &tracing_dyn_info_fops);
9756 create_trace_instances(NULL);
9758 update_tracer_options(&global_trace);
9763 fs_initcall(tracer_init_tracefs);
9765 static int trace_panic_handler(struct notifier_block *this,
9766 unsigned long event, void *unused)
9768 if (ftrace_dump_on_oops)
9769 ftrace_dump(ftrace_dump_on_oops);
9773 static struct notifier_block trace_panic_notifier = {
9774 .notifier_call = trace_panic_handler,
9776 .priority = 150 /* priority: INT_MAX >= x >= 0 */
9779 static int trace_die_handler(struct notifier_block *self,
9785 if (ftrace_dump_on_oops)
9786 ftrace_dump(ftrace_dump_on_oops);
9794 static struct notifier_block trace_die_notifier = {
9795 .notifier_call = trace_die_handler,
9800 * printk is set to max of 1024, we really don't need it that big.
9801 * Nothing should be printing 1000 characters anyway.
9803 #define TRACE_MAX_PRINT 1000
9806 * Define here KERN_TRACE so that we have one place to modify
9807 * it if we decide to change what log level the ftrace dump
9810 #define KERN_TRACE KERN_EMERG
9813 trace_printk_seq(struct trace_seq *s)
9815 /* Probably should print a warning here. */
9816 if (s->seq.len >= TRACE_MAX_PRINT)
9817 s->seq.len = TRACE_MAX_PRINT;
9820 * More paranoid code. Although the buffer size is set to
9821 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9822 * an extra layer of protection.
9824 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9825 s->seq.len = s->seq.size - 1;
9827 /* should be zero ended, but we are paranoid. */
9828 s->buffer[s->seq.len] = 0;
9830 printk(KERN_TRACE "%s", s->buffer);
9835 void trace_init_global_iter(struct trace_iterator *iter)
9837 iter->tr = &global_trace;
9838 iter->trace = iter->tr->current_trace;
9839 iter->cpu_file = RING_BUFFER_ALL_CPUS;
9840 iter->array_buffer = &global_trace.array_buffer;
9842 if (iter->trace && iter->trace->open)
9843 iter->trace->open(iter);
9845 /* Annotate start of buffers if we had overruns */
9846 if (ring_buffer_overruns(iter->array_buffer->buffer))
9847 iter->iter_flags |= TRACE_FILE_ANNOTATE;
9849 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
9850 if (trace_clocks[iter->tr->clock_id].in_ns)
9851 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
9854 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
9856 /* use static because iter can be a bit big for the stack */
9857 static struct trace_iterator iter;
9858 static atomic_t dump_running;
9859 struct trace_array *tr = &global_trace;
9860 unsigned int old_userobj;
9861 unsigned long flags;
9864 /* Only allow one dump user at a time. */
9865 if (atomic_inc_return(&dump_running) != 1) {
9866 atomic_dec(&dump_running);
9871 * Always turn off tracing when we dump.
9872 * We don't need to show trace output of what happens
9873 * between multiple crashes.
9875 * If the user does a sysrq-z, then they can re-enable
9876 * tracing with echo 1 > tracing_on.
9880 local_irq_save(flags);
9882 /* Simulate the iterator */
9883 trace_init_global_iter(&iter);
9884 /* Can not use kmalloc for iter.temp and iter.fmt */
9885 iter.temp = static_temp_buf;
9886 iter.temp_size = STATIC_TEMP_BUF_SIZE;
9887 iter.fmt = static_fmt_buf;
9888 iter.fmt_size = STATIC_FMT_BUF_SIZE;
9890 for_each_tracing_cpu(cpu) {
9891 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9894 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
9896 /* don't look at user memory in panic mode */
9897 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
9899 switch (oops_dump_mode) {
9901 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9904 iter.cpu_file = raw_smp_processor_id();
9909 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
9910 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9913 printk(KERN_TRACE "Dumping ftrace buffer:\n");
9915 /* Did function tracer already get disabled? */
9916 if (ftrace_is_dead()) {
9917 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
9918 printk("# MAY BE MISSING FUNCTION EVENTS\n");
9922 * We need to stop all tracing on all CPUS to read
9923 * the next buffer. This is a bit expensive, but is
9924 * not done often. We fill all what we can read,
9925 * and then release the locks again.
9928 while (!trace_empty(&iter)) {
9931 printk(KERN_TRACE "---------------------------------\n");
9935 trace_iterator_reset(&iter);
9936 iter.iter_flags |= TRACE_FILE_LAT_FMT;
9938 if (trace_find_next_entry_inc(&iter) != NULL) {
9941 ret = print_trace_line(&iter);
9942 if (ret != TRACE_TYPE_NO_CONSUME)
9943 trace_consume(&iter);
9945 touch_nmi_watchdog();
9947 trace_printk_seq(&iter.seq);
9951 printk(KERN_TRACE " (ftrace buffer empty)\n");
9953 printk(KERN_TRACE "---------------------------------\n");
9956 tr->trace_flags |= old_userobj;
9958 for_each_tracing_cpu(cpu) {
9959 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9961 atomic_dec(&dump_running);
9962 local_irq_restore(flags);
9964 EXPORT_SYMBOL_GPL(ftrace_dump);
9966 #define WRITE_BUFSIZE 4096
9968 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9969 size_t count, loff_t *ppos,
9970 int (*createfn)(const char *))
9972 char *kbuf, *buf, *tmp;
9977 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9981 while (done < count) {
9982 size = count - done;
9984 if (size >= WRITE_BUFSIZE)
9985 size = WRITE_BUFSIZE - 1;
9987 if (copy_from_user(kbuf, buffer + done, size)) {
9994 tmp = strchr(buf, '\n');
9997 size = tmp - buf + 1;
10000 if (done + size < count) {
10003 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
10004 pr_warn("Line length is too long: Should be less than %d\n",
10005 WRITE_BUFSIZE - 2);
10012 /* Remove comments */
10013 tmp = strchr(buf, '#');
10018 ret = createfn(buf);
10023 } while (done < count);
10033 __init static int tracer_alloc_buffers(void)
10039 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10040 pr_warn("Tracing disabled due to lockdown\n");
10045 * Make sure we don't accidentally add more trace options
10046 * than we have bits for.
10048 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
10050 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
10053 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
10054 goto out_free_buffer_mask;
10056 /* Only allocate trace_printk buffers if a trace_printk exists */
10057 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
10058 /* Must be called before global_trace.buffer is allocated */
10059 trace_printk_init_buffers();
10061 /* To save memory, keep the ring buffer size to its minimum */
10062 if (ring_buffer_expanded)
10063 ring_buf_size = trace_buf_size;
10067 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
10068 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
10070 raw_spin_lock_init(&global_trace.start_lock);
10073 * The prepare callbacks allocates some memory for the ring buffer. We
10074 * don't free the buffer if the CPU goes down. If we were to free
10075 * the buffer, then the user would lose any trace that was in the
10076 * buffer. The memory will be removed once the "instance" is removed.
10078 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
10079 "trace/RB:preapre", trace_rb_cpu_prepare,
10082 goto out_free_cpumask;
10083 /* Used for event triggers */
10085 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
10087 goto out_rm_hp_state;
10089 if (trace_create_savedcmd() < 0)
10090 goto out_free_temp_buffer;
10092 /* TODO: make the number of buffers hot pluggable with CPUS */
10093 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
10094 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
10095 goto out_free_savedcmd;
10098 if (global_trace.buffer_disabled)
10101 if (trace_boot_clock) {
10102 ret = tracing_set_clock(&global_trace, trace_boot_clock);
10104 pr_warn("Trace clock %s not defined, going back to default\n",
10109 * register_tracer() might reference current_trace, so it
10110 * needs to be set before we register anything. This is
10111 * just a bootstrap of current_trace anyway.
10113 global_trace.current_trace = &nop_trace;
10115 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10117 ftrace_init_global_array_ops(&global_trace);
10119 init_trace_flags_index(&global_trace);
10121 register_tracer(&nop_trace);
10123 /* Function tracing may start here (via kernel command line) */
10124 init_function_trace();
10126 /* All seems OK, enable tracing */
10127 tracing_disabled = 0;
10129 atomic_notifier_chain_register(&panic_notifier_list,
10130 &trace_panic_notifier);
10132 register_die_notifier(&trace_die_notifier);
10134 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10136 INIT_LIST_HEAD(&global_trace.systems);
10137 INIT_LIST_HEAD(&global_trace.events);
10138 INIT_LIST_HEAD(&global_trace.hist_vars);
10139 INIT_LIST_HEAD(&global_trace.err_log);
10140 list_add(&global_trace.list, &ftrace_trace_arrays);
10142 apply_trace_boot_options();
10144 register_snapshot_cmd();
10151 free_saved_cmdlines_buffer(savedcmd);
10152 out_free_temp_buffer:
10153 ring_buffer_free(temp_buffer);
10155 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
10157 free_cpumask_var(global_trace.tracing_cpumask);
10158 out_free_buffer_mask:
10159 free_cpumask_var(tracing_buffer_mask);
10164 void __init ftrace_boot_snapshot(void)
10166 if (snapshot_at_boot) {
10167 tracing_snapshot();
10168 internal_trace_puts("** Boot snapshot taken **\n");
10172 void __init early_trace_init(void)
10174 if (tracepoint_printk) {
10175 tracepoint_print_iter =
10176 kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
10177 if (MEM_FAIL(!tracepoint_print_iter,
10178 "Failed to allocate trace iterator\n"))
10179 tracepoint_printk = 0;
10181 static_key_enable(&tracepoint_printk_key.key);
10183 tracer_alloc_buffers();
10186 void __init trace_init(void)
10188 trace_event_init();
10191 __init static void clear_boot_tracer(void)
10194 * The default tracer at boot buffer is an init section.
10195 * This function is called in lateinit. If we did not
10196 * find the boot tracer, then clear it out, to prevent
10197 * later registration from accessing the buffer that is
10198 * about to be freed.
10200 if (!default_bootup_tracer)
10203 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10204 default_bootup_tracer);
10205 default_bootup_tracer = NULL;
10208 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
10209 __init static void tracing_set_default_clock(void)
10211 /* sched_clock_stable() is determined in late_initcall */
10212 if (!trace_boot_clock && !sched_clock_stable()) {
10213 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10214 pr_warn("Can not set tracing clock due to lockdown\n");
10218 printk(KERN_WARNING
10219 "Unstable clock detected, switching default tracing clock to \"global\"\n"
10220 "If you want to keep using the local clock, then add:\n"
10221 " \"trace_clock=local\"\n"
10222 "on the kernel command line\n");
10223 tracing_set_clock(&global_trace, "global");
10227 static inline void tracing_set_default_clock(void) { }
10230 __init static int late_trace_init(void)
10232 if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10233 static_key_disable(&tracepoint_printk_key.key);
10234 tracepoint_printk = 0;
10237 tracing_set_default_clock();
10238 clear_boot_tracer();
10242 late_initcall_sync(late_trace_init);