Merge tag 'trace-v4.15' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[sfrench/cifs-2.6.git] / include / linux / trace_events.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 #ifndef _LINUX_TRACE_EVENT_H
4 #define _LINUX_TRACE_EVENT_H
5
6 #include <linux/ring_buffer.h>
7 #include <linux/trace_seq.h>
8 #include <linux/percpu.h>
9 #include <linux/hardirq.h>
10 #include <linux/perf_event.h>
11 #include <linux/tracepoint.h>
12
13 struct trace_array;
14 struct trace_buffer;
15 struct tracer;
16 struct dentry;
17 struct bpf_prog;
18
19 const char *trace_print_flags_seq(struct trace_seq *p, const char *delim,
20                                   unsigned long flags,
21                                   const struct trace_print_flags *flag_array);
22
23 const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
24                                     const struct trace_print_flags *symbol_array);
25
26 #if BITS_PER_LONG == 32
27 const char *trace_print_flags_seq_u64(struct trace_seq *p, const char *delim,
28                       unsigned long long flags,
29                       const struct trace_print_flags_u64 *flag_array);
30
31 const char *trace_print_symbols_seq_u64(struct trace_seq *p,
32                                         unsigned long long val,
33                                         const struct trace_print_flags_u64
34                                                                  *symbol_array);
35 #endif
36
37 const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
38                                     unsigned int bitmask_size);
39
40 const char *trace_print_hex_seq(struct trace_seq *p,
41                                 const unsigned char *buf, int len,
42                                 bool concatenate);
43
44 const char *trace_print_array_seq(struct trace_seq *p,
45                                    const void *buf, int count,
46                                    size_t el_size);
47
48 struct trace_iterator;
49 struct trace_event;
50
51 int trace_raw_output_prep(struct trace_iterator *iter,
52                           struct trace_event *event);
53
54 /*
55  * The trace entry - the most basic unit of tracing. This is what
56  * is printed in the end as a single line in the trace output, such as:
57  *
58  *     bash-15816 [01]   235.197585: idle_cpu <- irq_enter
59  */
60 struct trace_entry {
61         unsigned short          type;
62         unsigned char           flags;
63         unsigned char           preempt_count;
64         int                     pid;
65 };
66
67 #define TRACE_EVENT_TYPE_MAX                                            \
68         ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
69
70 /*
71  * Trace iterator - used by printout routines who present trace
72  * results to users and which routines might sleep, etc:
73  */
74 struct trace_iterator {
75         struct trace_array      *tr;
76         struct tracer           *trace;
77         struct trace_buffer     *trace_buffer;
78         void                    *private;
79         int                     cpu_file;
80         struct mutex            mutex;
81         struct ring_buffer_iter **buffer_iter;
82         unsigned long           iter_flags;
83
84         /* trace_seq for __print_flags() and __print_symbolic() etc. */
85         struct trace_seq        tmp_seq;
86
87         cpumask_var_t           started;
88
89         /* it's true when current open file is snapshot */
90         bool                    snapshot;
91
92         /* The below is zeroed out in pipe_read */
93         struct trace_seq        seq;
94         struct trace_entry      *ent;
95         unsigned long           lost_events;
96         int                     leftover;
97         int                     ent_size;
98         int                     cpu;
99         u64                     ts;
100
101         loff_t                  pos;
102         long                    idx;
103
104         /* All new field here will be zeroed out in pipe_read */
105 };
106
107 enum trace_iter_flags {
108         TRACE_FILE_LAT_FMT      = 1,
109         TRACE_FILE_ANNOTATE     = 2,
110         TRACE_FILE_TIME_IN_NS   = 4,
111 };
112
113
114 typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
115                                       int flags, struct trace_event *event);
116
117 struct trace_event_functions {
118         trace_print_func        trace;
119         trace_print_func        raw;
120         trace_print_func        hex;
121         trace_print_func        binary;
122 };
123
124 struct trace_event {
125         struct hlist_node               node;
126         struct list_head                list;
127         int                             type;
128         struct trace_event_functions    *funcs;
129 };
130
131 extern int register_trace_event(struct trace_event *event);
132 extern int unregister_trace_event(struct trace_event *event);
133
134 /* Return values for print_line callback */
135 enum print_line_t {
136         TRACE_TYPE_PARTIAL_LINE = 0,    /* Retry after flushing the seq */
137         TRACE_TYPE_HANDLED      = 1,
138         TRACE_TYPE_UNHANDLED    = 2,    /* Relay to other output functions */
139         TRACE_TYPE_NO_CONSUME   = 3     /* Handled but ask to not consume */
140 };
141
142 enum print_line_t trace_handle_return(struct trace_seq *s);
143
144 void tracing_generic_entry_update(struct trace_entry *entry,
145                                   unsigned long flags,
146                                   int pc);
147 struct trace_event_file;
148
149 struct ring_buffer_event *
150 trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer,
151                                 struct trace_event_file *trace_file,
152                                 int type, unsigned long len,
153                                 unsigned long flags, int pc);
154
155 #define TRACE_RECORD_CMDLINE    BIT(0)
156 #define TRACE_RECORD_TGID       BIT(1)
157
158 void tracing_record_taskinfo(struct task_struct *task, int flags);
159 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
160                                           struct task_struct *next, int flags);
161
162 void tracing_record_cmdline(struct task_struct *task);
163 void tracing_record_tgid(struct task_struct *task);
164
165 int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...);
166
167 struct event_filter;
168
169 enum trace_reg {
170         TRACE_REG_REGISTER,
171         TRACE_REG_UNREGISTER,
172 #ifdef CONFIG_PERF_EVENTS
173         TRACE_REG_PERF_REGISTER,
174         TRACE_REG_PERF_UNREGISTER,
175         TRACE_REG_PERF_OPEN,
176         TRACE_REG_PERF_CLOSE,
177         /*
178          * These (ADD/DEL) use a 'boolean' return value, where 1 (true) means a
179          * custom action was taken and the default action is not to be
180          * performed.
181          */
182         TRACE_REG_PERF_ADD,
183         TRACE_REG_PERF_DEL,
184 #endif
185 };
186
187 struct trace_event_call;
188
189 struct trace_event_class {
190         const char              *system;
191         void                    *probe;
192 #ifdef CONFIG_PERF_EVENTS
193         void                    *perf_probe;
194 #endif
195         int                     (*reg)(struct trace_event_call *event,
196                                        enum trace_reg type, void *data);
197         int                     (*define_fields)(struct trace_event_call *);
198         struct list_head        *(*get_fields)(struct trace_event_call *);
199         struct list_head        fields;
200         int                     (*raw_init)(struct trace_event_call *);
201 };
202
203 extern int trace_event_reg(struct trace_event_call *event,
204                             enum trace_reg type, void *data);
205
206 struct trace_event_buffer {
207         struct ring_buffer              *buffer;
208         struct ring_buffer_event        *event;
209         struct trace_event_file         *trace_file;
210         void                            *entry;
211         unsigned long                   flags;
212         int                             pc;
213 };
214
215 void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
216                                   struct trace_event_file *trace_file,
217                                   unsigned long len);
218
219 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer);
220
221 enum {
222         TRACE_EVENT_FL_FILTERED_BIT,
223         TRACE_EVENT_FL_CAP_ANY_BIT,
224         TRACE_EVENT_FL_NO_SET_FILTER_BIT,
225         TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
226         TRACE_EVENT_FL_TRACEPOINT_BIT,
227         TRACE_EVENT_FL_KPROBE_BIT,
228         TRACE_EVENT_FL_UPROBE_BIT,
229 };
230
231 /*
232  * Event flags:
233  *  FILTERED      - The event has a filter attached
234  *  CAP_ANY       - Any user can enable for perf
235  *  NO_SET_FILTER - Set when filter has error and is to be ignored
236  *  IGNORE_ENABLE - For trace internal events, do not enable with debugfs file
237  *  TRACEPOINT    - Event is a tracepoint
238  *  KPROBE        - Event is a kprobe
239  *  UPROBE        - Event is a uprobe
240  */
241 enum {
242         TRACE_EVENT_FL_FILTERED         = (1 << TRACE_EVENT_FL_FILTERED_BIT),
243         TRACE_EVENT_FL_CAP_ANY          = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
244         TRACE_EVENT_FL_NO_SET_FILTER    = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
245         TRACE_EVENT_FL_IGNORE_ENABLE    = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
246         TRACE_EVENT_FL_TRACEPOINT       = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
247         TRACE_EVENT_FL_KPROBE           = (1 << TRACE_EVENT_FL_KPROBE_BIT),
248         TRACE_EVENT_FL_UPROBE           = (1 << TRACE_EVENT_FL_UPROBE_BIT),
249 };
250
251 #define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE)
252
253 struct trace_event_call {
254         struct list_head        list;
255         struct trace_event_class *class;
256         union {
257                 char                    *name;
258                 /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
259                 struct tracepoint       *tp;
260         };
261         struct trace_event      event;
262         char                    *print_fmt;
263         struct event_filter     *filter;
264         void                    *mod;
265         void                    *data;
266         /*
267          *   bit 0:             filter_active
268          *   bit 1:             allow trace by non root (cap any)
269          *   bit 2:             failed to apply filter
270          *   bit 3:             trace internal event (do not enable)
271          *   bit 4:             Event was enabled by module
272          *   bit 5:             use call filter rather than file filter
273          *   bit 6:             Event is a tracepoint
274          */
275         int                     flags; /* static flags of different events */
276
277 #ifdef CONFIG_PERF_EVENTS
278         int                             perf_refcount;
279         struct hlist_head __percpu      *perf_events;
280         struct bpf_prog_array __rcu     *prog_array;
281
282         int     (*perf_perm)(struct trace_event_call *,
283                              struct perf_event *);
284 #endif
285 };
286
287 #ifdef CONFIG_PERF_EVENTS
288 static inline bool bpf_prog_array_valid(struct trace_event_call *call)
289 {
290         /*
291          * This inline function checks whether call->prog_array
292          * is valid or not. The function is called in various places,
293          * outside rcu_read_lock/unlock, as a heuristic to speed up execution.
294          *
295          * If this function returns true, and later call->prog_array
296          * becomes false inside rcu_read_lock/unlock region,
297          * we bail out then. If this function return false,
298          * there is a risk that we might miss a few events if the checking
299          * were delayed until inside rcu_read_lock/unlock region and
300          * call->prog_array happened to become non-NULL then.
301          *
302          * Here, READ_ONCE() is used instead of rcu_access_pointer().
303          * rcu_access_pointer() requires the actual definition of
304          * "struct bpf_prog_array" while READ_ONCE() only needs
305          * a declaration of the same type.
306          */
307         return !!READ_ONCE(call->prog_array);
308 }
309 #endif
310
311 static inline const char *
312 trace_event_name(struct trace_event_call *call)
313 {
314         if (call->flags & TRACE_EVENT_FL_TRACEPOINT)
315                 return call->tp ? call->tp->name : NULL;
316         else
317                 return call->name;
318 }
319
320 struct trace_array;
321 struct trace_subsystem_dir;
322
323 enum {
324         EVENT_FILE_FL_ENABLED_BIT,
325         EVENT_FILE_FL_RECORDED_CMD_BIT,
326         EVENT_FILE_FL_RECORDED_TGID_BIT,
327         EVENT_FILE_FL_FILTERED_BIT,
328         EVENT_FILE_FL_NO_SET_FILTER_BIT,
329         EVENT_FILE_FL_SOFT_MODE_BIT,
330         EVENT_FILE_FL_SOFT_DISABLED_BIT,
331         EVENT_FILE_FL_TRIGGER_MODE_BIT,
332         EVENT_FILE_FL_TRIGGER_COND_BIT,
333         EVENT_FILE_FL_PID_FILTER_BIT,
334         EVENT_FILE_FL_WAS_ENABLED_BIT,
335 };
336
337 /*
338  * Event file flags:
339  *  ENABLED       - The event is enabled
340  *  RECORDED_CMD  - The comms should be recorded at sched_switch
341  *  RECORDED_TGID - The tgids should be recorded at sched_switch
342  *  FILTERED      - The event has a filter attached
343  *  NO_SET_FILTER - Set when filter has error and is to be ignored
344  *  SOFT_MODE     - The event is enabled/disabled by SOFT_DISABLED
345  *  SOFT_DISABLED - When set, do not trace the event (even though its
346  *                   tracepoint may be enabled)
347  *  TRIGGER_MODE  - When set, invoke the triggers associated with the event
348  *  TRIGGER_COND  - When set, one or more triggers has an associated filter
349  *  PID_FILTER    - When set, the event is filtered based on pid
350  *  WAS_ENABLED   - Set when enabled to know to clear trace on module removal
351  */
352 enum {
353         EVENT_FILE_FL_ENABLED           = (1 << EVENT_FILE_FL_ENABLED_BIT),
354         EVENT_FILE_FL_RECORDED_CMD      = (1 << EVENT_FILE_FL_RECORDED_CMD_BIT),
355         EVENT_FILE_FL_RECORDED_TGID     = (1 << EVENT_FILE_FL_RECORDED_TGID_BIT),
356         EVENT_FILE_FL_FILTERED          = (1 << EVENT_FILE_FL_FILTERED_BIT),
357         EVENT_FILE_FL_NO_SET_FILTER     = (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT),
358         EVENT_FILE_FL_SOFT_MODE         = (1 << EVENT_FILE_FL_SOFT_MODE_BIT),
359         EVENT_FILE_FL_SOFT_DISABLED     = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT),
360         EVENT_FILE_FL_TRIGGER_MODE      = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT),
361         EVENT_FILE_FL_TRIGGER_COND      = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
362         EVENT_FILE_FL_PID_FILTER        = (1 << EVENT_FILE_FL_PID_FILTER_BIT),
363         EVENT_FILE_FL_WAS_ENABLED       = (1 << EVENT_FILE_FL_WAS_ENABLED_BIT),
364 };
365
366 struct trace_event_file {
367         struct list_head                list;
368         struct trace_event_call         *event_call;
369         struct event_filter __rcu       *filter;
370         struct dentry                   *dir;
371         struct trace_array              *tr;
372         struct trace_subsystem_dir      *system;
373         struct list_head                triggers;
374
375         /*
376          * 32 bit flags:
377          *   bit 0:             enabled
378          *   bit 1:             enabled cmd record
379          *   bit 2:             enable/disable with the soft disable bit
380          *   bit 3:             soft disabled
381          *   bit 4:             trigger enabled
382          *
383          * Note: The bits must be set atomically to prevent races
384          * from other writers. Reads of flags do not need to be in
385          * sync as they occur in critical sections. But the way flags
386          * is currently used, these changes do not affect the code
387          * except that when a change is made, it may have a slight
388          * delay in propagating the changes to other CPUs due to
389          * caching and such. Which is mostly OK ;-)
390          */
391         unsigned long           flags;
392         atomic_t                sm_ref; /* soft-mode reference counter */
393         atomic_t                tm_ref; /* trigger-mode reference counter */
394 };
395
396 #define __TRACE_EVENT_FLAGS(name, value)                                \
397         static int __init trace_init_flags_##name(void)                 \
398         {                                                               \
399                 event_##name.flags |= value;                            \
400                 return 0;                                               \
401         }                                                               \
402         early_initcall(trace_init_flags_##name);
403
404 #define __TRACE_EVENT_PERF_PERM(name, expr...)                          \
405         static int perf_perm_##name(struct trace_event_call *tp_event, \
406                                     struct perf_event *p_event)         \
407         {                                                               \
408                 return ({ expr; });                                     \
409         }                                                               \
410         static int __init trace_init_perf_perm_##name(void)             \
411         {                                                               \
412                 event_##name.perf_perm = &perf_perm_##name;             \
413                 return 0;                                               \
414         }                                                               \
415         early_initcall(trace_init_perf_perm_##name);
416
417 #define PERF_MAX_TRACE_SIZE     2048
418
419 #define MAX_FILTER_STR_VAL      256     /* Should handle KSYM_SYMBOL_LEN */
420
421 enum event_trigger_type {
422         ETT_NONE                = (0),
423         ETT_TRACE_ONOFF         = (1 << 0),
424         ETT_SNAPSHOT            = (1 << 1),
425         ETT_STACKTRACE          = (1 << 2),
426         ETT_EVENT_ENABLE        = (1 << 3),
427         ETT_EVENT_HIST          = (1 << 4),
428         ETT_HIST_ENABLE         = (1 << 5),
429 };
430
431 extern int filter_match_preds(struct event_filter *filter, void *rec);
432
433 extern enum event_trigger_type event_triggers_call(struct trace_event_file *file,
434                                                    void *rec);
435 extern void event_triggers_post_call(struct trace_event_file *file,
436                                      enum event_trigger_type tt,
437                                      void *rec);
438
439 bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
440
441 /**
442  * trace_trigger_soft_disabled - do triggers and test if soft disabled
443  * @file: The file pointer of the event to test
444  *
445  * If any triggers without filters are attached to this event, they
446  * will be called here. If the event is soft disabled and has no
447  * triggers that require testing the fields, it will return true,
448  * otherwise false.
449  */
450 static inline bool
451 trace_trigger_soft_disabled(struct trace_event_file *file)
452 {
453         unsigned long eflags = file->flags;
454
455         if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
456                 if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
457                         event_triggers_call(file, NULL);
458                 if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
459                         return true;
460                 if (eflags & EVENT_FILE_FL_PID_FILTER)
461                         return trace_event_ignore_this_pid(file);
462         }
463         return false;
464 }
465
466 #ifdef CONFIG_BPF_EVENTS
467 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx);
468 int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog);
469 void perf_event_detach_bpf_prog(struct perf_event *event);
470 #else
471 static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
472 {
473         return 1;
474 }
475
476 static inline int
477 perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog)
478 {
479         return -EOPNOTSUPP;
480 }
481
482 static inline void perf_event_detach_bpf_prog(struct perf_event *event) { }
483
484 #endif
485
486 enum {
487         FILTER_OTHER = 0,
488         FILTER_STATIC_STRING,
489         FILTER_DYN_STRING,
490         FILTER_PTR_STRING,
491         FILTER_TRACE_FN,
492         FILTER_COMM,
493         FILTER_CPU,
494 };
495
496 extern int trace_event_raw_init(struct trace_event_call *call);
497 extern int trace_define_field(struct trace_event_call *call, const char *type,
498                               const char *name, int offset, int size,
499                               int is_signed, int filter_type);
500 extern int trace_add_event_call(struct trace_event_call *call);
501 extern int trace_remove_event_call(struct trace_event_call *call);
502 extern int trace_event_get_offsets(struct trace_event_call *call);
503
504 #define is_signed_type(type)    (((type)(-1)) < (type)1)
505
506 int trace_set_clr_event(const char *system, const char *event, int set);
507
508 /*
509  * The double __builtin_constant_p is because gcc will give us an error
510  * if we try to allocate the static variable to fmt if it is not a
511  * constant. Even with the outer if statement optimizing out.
512  */
513 #define event_trace_printk(ip, fmt, args...)                            \
514 do {                                                                    \
515         __trace_printk_check_format(fmt, ##args);                       \
516         tracing_record_cmdline(current);                                \
517         if (__builtin_constant_p(fmt)) {                                \
518                 static const char *trace_printk_fmt                     \
519                   __attribute__((section("__trace_printk_fmt"))) =      \
520                         __builtin_constant_p(fmt) ? fmt : NULL;         \
521                                                                         \
522                 __trace_bprintk(ip, trace_printk_fmt, ##args);          \
523         } else                                                          \
524                 __trace_printk(ip, fmt, ##args);                        \
525 } while (0)
526
527 #ifdef CONFIG_PERF_EVENTS
528 struct perf_event;
529
530 DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
531
532 extern int  perf_trace_init(struct perf_event *event);
533 extern void perf_trace_destroy(struct perf_event *event);
534 extern int  perf_trace_add(struct perf_event *event, int flags);
535 extern void perf_trace_del(struct perf_event *event, int flags);
536 extern int  ftrace_profile_set_filter(struct perf_event *event, int event_id,
537                                      char *filter_str);
538 extern void ftrace_profile_free_filter(struct perf_event *event);
539 void perf_trace_buf_update(void *record, u16 type);
540 void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp);
541
542 void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
543                                struct trace_event_call *call, u64 count,
544                                struct pt_regs *regs, struct hlist_head *head,
545                                struct task_struct *task);
546
547 static inline void
548 perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
549                        u64 count, struct pt_regs *regs, void *head,
550                        struct task_struct *task)
551 {
552         perf_tp_event(type, count, raw_data, size, regs, head, rctx, task);
553 }
554
555 #endif
556
557 #endif /* _LINUX_TRACE_EVENT_H */