Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[sfrench/cifs-2.6.git] / kernel / trace / trace_events.c
1 /*
2  * event tracer
3  *
4  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5  *
6  *  - Added format output of fields of the trace point.
7  *    This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8  *
9  */
10
11 #define pr_fmt(fmt) fmt
12
13 #include <linux/workqueue.h>
14 #include <linux/spinlock.h>
15 #include <linux/kthread.h>
16 #include <linux/tracefs.h>
17 #include <linux/uaccess.h>
18 #include <linux/bsearch.h>
19 #include <linux/module.h>
20 #include <linux/ctype.h>
21 #include <linux/sort.h>
22 #include <linux/slab.h>
23 #include <linux/delay.h>
24
25 #include <trace/events/sched.h>
26
27 #include <asm/setup.h>
28
29 #include "trace_output.h"
30
31 #undef TRACE_SYSTEM
32 #define TRACE_SYSTEM "TRACE_SYSTEM"
33
34 DEFINE_MUTEX(event_mutex);
35
36 LIST_HEAD(ftrace_events);
37 static LIST_HEAD(ftrace_generic_fields);
38 static LIST_HEAD(ftrace_common_fields);
39
40 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
41
42 static struct kmem_cache *field_cachep;
43 static struct kmem_cache *file_cachep;
44
45 static inline int system_refcount(struct event_subsystem *system)
46 {
47         return system->ref_count;
48 }
49
50 static int system_refcount_inc(struct event_subsystem *system)
51 {
52         return system->ref_count++;
53 }
54
55 static int system_refcount_dec(struct event_subsystem *system)
56 {
57         return --system->ref_count;
58 }
59
60 /* Double loops, do not use break, only goto's work */
61 #define do_for_each_event_file(tr, file)                        \
62         list_for_each_entry(tr, &ftrace_trace_arrays, list) {   \
63                 list_for_each_entry(file, &tr->events, list)
64
65 #define do_for_each_event_file_safe(tr, file)                   \
66         list_for_each_entry(tr, &ftrace_trace_arrays, list) {   \
67                 struct trace_event_file *___n;                          \
68                 list_for_each_entry_safe(file, ___n, &tr->events, list)
69
70 #define while_for_each_event_file()             \
71         }
72
73 static struct list_head *
74 trace_get_fields(struct trace_event_call *event_call)
75 {
76         if (!event_call->class->get_fields)
77                 return &event_call->class->fields;
78         return event_call->class->get_fields(event_call);
79 }
80
81 static struct ftrace_event_field *
82 __find_event_field(struct list_head *head, char *name)
83 {
84         struct ftrace_event_field *field;
85
86         list_for_each_entry(field, head, link) {
87                 if (!strcmp(field->name, name))
88                         return field;
89         }
90
91         return NULL;
92 }
93
94 struct ftrace_event_field *
95 trace_find_event_field(struct trace_event_call *call, char *name)
96 {
97         struct ftrace_event_field *field;
98         struct list_head *head;
99
100         head = trace_get_fields(call);
101         field = __find_event_field(head, name);
102         if (field)
103                 return field;
104
105         field = __find_event_field(&ftrace_generic_fields, name);
106         if (field)
107                 return field;
108
109         return __find_event_field(&ftrace_common_fields, name);
110 }
111
112 static int __trace_define_field(struct list_head *head, const char *type,
113                                 const char *name, int offset, int size,
114                                 int is_signed, int filter_type)
115 {
116         struct ftrace_event_field *field;
117
118         field = kmem_cache_alloc(field_cachep, GFP_TRACE);
119         if (!field)
120                 return -ENOMEM;
121
122         field->name = name;
123         field->type = type;
124
125         if (filter_type == FILTER_OTHER)
126                 field->filter_type = filter_assign_type(type);
127         else
128                 field->filter_type = filter_type;
129
130         field->offset = offset;
131         field->size = size;
132         field->is_signed = is_signed;
133
134         list_add(&field->link, head);
135
136         return 0;
137 }
138
139 int trace_define_field(struct trace_event_call *call, const char *type,
140                        const char *name, int offset, int size, int is_signed,
141                        int filter_type)
142 {
143         struct list_head *head;
144
145         if (WARN_ON(!call->class))
146                 return 0;
147
148         head = trace_get_fields(call);
149         return __trace_define_field(head, type, name, offset, size,
150                                     is_signed, filter_type);
151 }
152 EXPORT_SYMBOL_GPL(trace_define_field);
153
154 #define __generic_field(type, item, filter_type)                        \
155         ret = __trace_define_field(&ftrace_generic_fields, #type,       \
156                                    #item, 0, 0, is_signed_type(type),   \
157                                    filter_type);                        \
158         if (ret)                                                        \
159                 return ret;
160
161 #define __common_field(type, item)                                      \
162         ret = __trace_define_field(&ftrace_common_fields, #type,        \
163                                    "common_" #item,                     \
164                                    offsetof(typeof(ent), item),         \
165                                    sizeof(ent.item),                    \
166                                    is_signed_type(type), FILTER_OTHER); \
167         if (ret)                                                        \
168                 return ret;
169
170 static int trace_define_generic_fields(void)
171 {
172         int ret;
173
174         __generic_field(int, CPU, FILTER_CPU);
175         __generic_field(int, cpu, FILTER_CPU);
176         __generic_field(char *, COMM, FILTER_COMM);
177         __generic_field(char *, comm, FILTER_COMM);
178
179         return ret;
180 }
181
182 static int trace_define_common_fields(void)
183 {
184         int ret;
185         struct trace_entry ent;
186
187         __common_field(unsigned short, type);
188         __common_field(unsigned char, flags);
189         __common_field(unsigned char, preempt_count);
190         __common_field(int, pid);
191
192         return ret;
193 }
194
195 static void trace_destroy_fields(struct trace_event_call *call)
196 {
197         struct ftrace_event_field *field, *next;
198         struct list_head *head;
199
200         head = trace_get_fields(call);
201         list_for_each_entry_safe(field, next, head, link) {
202                 list_del(&field->link);
203                 kmem_cache_free(field_cachep, field);
204         }
205 }
206
207 /*
208  * run-time version of trace_event_get_offsets_<call>() that returns the last
209  * accessible offset of trace fields excluding __dynamic_array bytes
210  */
211 int trace_event_get_offsets(struct trace_event_call *call)
212 {
213         struct ftrace_event_field *tail;
214         struct list_head *head;
215
216         head = trace_get_fields(call);
217         /*
218          * head->next points to the last field with the largest offset,
219          * since it was added last by trace_define_field()
220          */
221         tail = list_first_entry(head, struct ftrace_event_field, link);
222         return tail->offset + tail->size;
223 }
224
225 int trace_event_raw_init(struct trace_event_call *call)
226 {
227         int id;
228
229         id = register_trace_event(&call->event);
230         if (!id)
231                 return -ENODEV;
232
233         return 0;
234 }
235 EXPORT_SYMBOL_GPL(trace_event_raw_init);
236
237 bool trace_event_ignore_this_pid(struct trace_event_file *trace_file)
238 {
239         struct trace_array *tr = trace_file->tr;
240         struct trace_array_cpu *data;
241         struct trace_pid_list *pid_list;
242
243         pid_list = rcu_dereference_sched(tr->filtered_pids);
244         if (!pid_list)
245                 return false;
246
247         data = this_cpu_ptr(tr->trace_buffer.data);
248
249         return data->ignore_pid;
250 }
251 EXPORT_SYMBOL_GPL(trace_event_ignore_this_pid);
252
253 void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
254                                  struct trace_event_file *trace_file,
255                                  unsigned long len)
256 {
257         struct trace_event_call *event_call = trace_file->event_call;
258
259         if ((trace_file->flags & EVENT_FILE_FL_PID_FILTER) &&
260             trace_event_ignore_this_pid(trace_file))
261                 return NULL;
262
263         local_save_flags(fbuffer->flags);
264         fbuffer->pc = preempt_count();
265         fbuffer->trace_file = trace_file;
266
267         fbuffer->event =
268                 trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file,
269                                                 event_call->event.type, len,
270                                                 fbuffer->flags, fbuffer->pc);
271         if (!fbuffer->event)
272                 return NULL;
273
274         fbuffer->entry = ring_buffer_event_data(fbuffer->event);
275         return fbuffer->entry;
276 }
277 EXPORT_SYMBOL_GPL(trace_event_buffer_reserve);
278
279 static DEFINE_SPINLOCK(tracepoint_iter_lock);
280
281 static void output_printk(struct trace_event_buffer *fbuffer)
282 {
283         struct trace_event_call *event_call;
284         struct trace_event *event;
285         unsigned long flags;
286         struct trace_iterator *iter = tracepoint_print_iter;
287
288         if (!iter)
289                 return;
290
291         event_call = fbuffer->trace_file->event_call;
292         if (!event_call || !event_call->event.funcs ||
293             !event_call->event.funcs->trace)
294                 return;
295
296         event = &fbuffer->trace_file->event_call->event;
297
298         spin_lock_irqsave(&tracepoint_iter_lock, flags);
299         trace_seq_init(&iter->seq);
300         iter->ent = fbuffer->entry;
301         event_call->event.funcs->trace(iter, 0, event);
302         trace_seq_putc(&iter->seq, 0);
303         printk("%s", iter->seq.buffer);
304
305         spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
306 }
307
308 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
309 {
310         if (tracepoint_printk)
311                 output_printk(fbuffer);
312
313         event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
314                                     fbuffer->event, fbuffer->entry,
315                                     fbuffer->flags, fbuffer->pc);
316 }
317 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
318
319 int trace_event_reg(struct trace_event_call *call,
320                     enum trace_reg type, void *data)
321 {
322         struct trace_event_file *file = data;
323
324         WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
325         switch (type) {
326         case TRACE_REG_REGISTER:
327                 return tracepoint_probe_register(call->tp,
328                                                  call->class->probe,
329                                                  file);
330         case TRACE_REG_UNREGISTER:
331                 tracepoint_probe_unregister(call->tp,
332                                             call->class->probe,
333                                             file);
334                 return 0;
335
336 #ifdef CONFIG_PERF_EVENTS
337         case TRACE_REG_PERF_REGISTER:
338                 return tracepoint_probe_register(call->tp,
339                                                  call->class->perf_probe,
340                                                  call);
341         case TRACE_REG_PERF_UNREGISTER:
342                 tracepoint_probe_unregister(call->tp,
343                                             call->class->perf_probe,
344                                             call);
345                 return 0;
346         case TRACE_REG_PERF_OPEN:
347         case TRACE_REG_PERF_CLOSE:
348         case TRACE_REG_PERF_ADD:
349         case TRACE_REG_PERF_DEL:
350                 return 0;
351 #endif
352         }
353         return 0;
354 }
355 EXPORT_SYMBOL_GPL(trace_event_reg);
356
357 void trace_event_enable_cmd_record(bool enable)
358 {
359         struct trace_event_file *file;
360         struct trace_array *tr;
361
362         mutex_lock(&event_mutex);
363         do_for_each_event_file(tr, file) {
364
365                 if (!(file->flags & EVENT_FILE_FL_ENABLED))
366                         continue;
367
368                 if (enable) {
369                         tracing_start_cmdline_record();
370                         set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
371                 } else {
372                         tracing_stop_cmdline_record();
373                         clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
374                 }
375         } while_for_each_event_file();
376         mutex_unlock(&event_mutex);
377 }
378
379 static int __ftrace_event_enable_disable(struct trace_event_file *file,
380                                          int enable, int soft_disable)
381 {
382         struct trace_event_call *call = file->event_call;
383         struct trace_array *tr = file->tr;
384         int ret = 0;
385         int disable;
386
387         switch (enable) {
388         case 0:
389                 /*
390                  * When soft_disable is set and enable is cleared, the sm_ref
391                  * reference counter is decremented. If it reaches 0, we want
392                  * to clear the SOFT_DISABLED flag but leave the event in the
393                  * state that it was. That is, if the event was enabled and
394                  * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
395                  * is set we do not want the event to be enabled before we
396                  * clear the bit.
397                  *
398                  * When soft_disable is not set but the SOFT_MODE flag is,
399                  * we do nothing. Do not disable the tracepoint, otherwise
400                  * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
401                  */
402                 if (soft_disable) {
403                         if (atomic_dec_return(&file->sm_ref) > 0)
404                                 break;
405                         disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED;
406                         clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
407                 } else
408                         disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE);
409
410                 if (disable && (file->flags & EVENT_FILE_FL_ENABLED)) {
411                         clear_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
412                         if (file->flags & EVENT_FILE_FL_RECORDED_CMD) {
413                                 tracing_stop_cmdline_record();
414                                 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
415                         }
416                         call->class->reg(call, TRACE_REG_UNREGISTER, file);
417                 }
418                 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */
419                 if (file->flags & EVENT_FILE_FL_SOFT_MODE)
420                         set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
421                 else
422                         clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
423                 break;
424         case 1:
425                 /*
426                  * When soft_disable is set and enable is set, we want to
427                  * register the tracepoint for the event, but leave the event
428                  * as is. That means, if the event was already enabled, we do
429                  * nothing (but set SOFT_MODE). If the event is disabled, we
430                  * set SOFT_DISABLED before enabling the event tracepoint, so
431                  * it still seems to be disabled.
432                  */
433                 if (!soft_disable)
434                         clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
435                 else {
436                         if (atomic_inc_return(&file->sm_ref) > 1)
437                                 break;
438                         set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
439                 }
440
441                 if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
442
443                         /* Keep the event disabled, when going to SOFT_MODE. */
444                         if (soft_disable)
445                                 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
446
447                         if (tr->trace_flags & TRACE_ITER_RECORD_CMD) {
448                                 tracing_start_cmdline_record();
449                                 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
450                         }
451                         ret = call->class->reg(call, TRACE_REG_REGISTER, file);
452                         if (ret) {
453                                 tracing_stop_cmdline_record();
454                                 pr_info("event trace: Could not enable event "
455                                         "%s\n", trace_event_name(call));
456                                 break;
457                         }
458                         set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
459
460                         /* WAS_ENABLED gets set but never cleared. */
461                         call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
462                 }
463                 break;
464         }
465
466         return ret;
467 }
468
469 int trace_event_enable_disable(struct trace_event_file *file,
470                                int enable, int soft_disable)
471 {
472         return __ftrace_event_enable_disable(file, enable, soft_disable);
473 }
474
475 static int ftrace_event_enable_disable(struct trace_event_file *file,
476                                        int enable)
477 {
478         return __ftrace_event_enable_disable(file, enable, 0);
479 }
480
481 static void ftrace_clear_events(struct trace_array *tr)
482 {
483         struct trace_event_file *file;
484
485         mutex_lock(&event_mutex);
486         list_for_each_entry(file, &tr->events, list) {
487                 ftrace_event_enable_disable(file, 0);
488         }
489         mutex_unlock(&event_mutex);
490 }
491
492 static int cmp_pid(const void *key, const void *elt)
493 {
494         const pid_t *search_pid = key;
495         const pid_t *pid = elt;
496
497         if (*search_pid == *pid)
498                 return 0;
499         if (*search_pid < *pid)
500                 return -1;
501         return 1;
502 }
503
504 static bool
505 check_ignore_pid(struct trace_pid_list *filtered_pids, struct task_struct *task)
506 {
507         pid_t search_pid;
508         pid_t *pid;
509
510         /*
511          * Return false, because if filtered_pids does not exist,
512          * all pids are good to trace.
513          */
514         if (!filtered_pids)
515                 return false;
516
517         search_pid = task->pid;
518
519         pid = bsearch(&search_pid, filtered_pids->pids,
520                       filtered_pids->nr_pids, sizeof(pid_t),
521                       cmp_pid);
522         if (!pid)
523                 return true;
524
525         return false;
526 }
527
528 static void
529 event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
530                     struct task_struct *prev, struct task_struct *next)
531 {
532         struct trace_array *tr = data;
533         struct trace_pid_list *pid_list;
534
535         pid_list = rcu_dereference_sched(tr->filtered_pids);
536
537         this_cpu_write(tr->trace_buffer.data->ignore_pid,
538                        check_ignore_pid(pid_list, prev) &&
539                        check_ignore_pid(pid_list, next));
540 }
541
542 static void
543 event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
544                     struct task_struct *prev, struct task_struct *next)
545 {
546         struct trace_array *tr = data;
547         struct trace_pid_list *pid_list;
548
549         pid_list = rcu_dereference_sched(tr->filtered_pids);
550
551         this_cpu_write(tr->trace_buffer.data->ignore_pid,
552                        check_ignore_pid(pid_list, next));
553 }
554
555 static void
556 event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
557 {
558         struct trace_array *tr = data;
559         struct trace_pid_list *pid_list;
560
561         /* Nothing to do if we are already tracing */
562         if (!this_cpu_read(tr->trace_buffer.data->ignore_pid))
563                 return;
564
565         pid_list = rcu_dereference_sched(tr->filtered_pids);
566
567         this_cpu_write(tr->trace_buffer.data->ignore_pid,
568                        check_ignore_pid(pid_list, task));
569 }
570
571 static void
572 event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
573 {
574         struct trace_array *tr = data;
575         struct trace_pid_list *pid_list;
576
577         /* Nothing to do if we are not tracing */
578         if (this_cpu_read(tr->trace_buffer.data->ignore_pid))
579                 return;
580
581         pid_list = rcu_dereference_sched(tr->filtered_pids);
582
583         /* Set tracing if current is enabled */
584         this_cpu_write(tr->trace_buffer.data->ignore_pid,
585                        check_ignore_pid(pid_list, current));
586 }
587
588 static void __ftrace_clear_event_pids(struct trace_array *tr)
589 {
590         struct trace_pid_list *pid_list;
591         struct trace_event_file *file;
592         int cpu;
593
594         pid_list = rcu_dereference_protected(tr->filtered_pids,
595                                              lockdep_is_held(&event_mutex));
596         if (!pid_list)
597                 return;
598
599         unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr);
600         unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr);
601
602         unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr);
603         unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr);
604
605         unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, tr);
606         unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, tr);
607
608         unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr);
609         unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr);
610
611         list_for_each_entry(file, &tr->events, list) {
612                 clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
613         }
614
615         for_each_possible_cpu(cpu)
616                 per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false;
617
618         rcu_assign_pointer(tr->filtered_pids, NULL);
619
620         /* Wait till all users are no longer using pid filtering */
621         synchronize_sched();
622
623         free_pages((unsigned long)pid_list->pids, pid_list->order);
624         kfree(pid_list);
625 }
626
627 static void ftrace_clear_event_pids(struct trace_array *tr)
628 {
629         mutex_lock(&event_mutex);
630         __ftrace_clear_event_pids(tr);
631         mutex_unlock(&event_mutex);
632 }
633
634 static void __put_system(struct event_subsystem *system)
635 {
636         struct event_filter *filter = system->filter;
637
638         WARN_ON_ONCE(system_refcount(system) == 0);
639         if (system_refcount_dec(system))
640                 return;
641
642         list_del(&system->list);
643
644         if (filter) {
645                 kfree(filter->filter_string);
646                 kfree(filter);
647         }
648         kfree_const(system->name);
649         kfree(system);
650 }
651
652 static void __get_system(struct event_subsystem *system)
653 {
654         WARN_ON_ONCE(system_refcount(system) == 0);
655         system_refcount_inc(system);
656 }
657
658 static void __get_system_dir(struct trace_subsystem_dir *dir)
659 {
660         WARN_ON_ONCE(dir->ref_count == 0);
661         dir->ref_count++;
662         __get_system(dir->subsystem);
663 }
664
665 static void __put_system_dir(struct trace_subsystem_dir *dir)
666 {
667         WARN_ON_ONCE(dir->ref_count == 0);
668         /* If the subsystem is about to be freed, the dir must be too */
669         WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
670
671         __put_system(dir->subsystem);
672         if (!--dir->ref_count)
673                 kfree(dir);
674 }
675
676 static void put_system(struct trace_subsystem_dir *dir)
677 {
678         mutex_lock(&event_mutex);
679         __put_system_dir(dir);
680         mutex_unlock(&event_mutex);
681 }
682
683 static void remove_subsystem(struct trace_subsystem_dir *dir)
684 {
685         if (!dir)
686                 return;
687
688         if (!--dir->nr_events) {
689                 tracefs_remove_recursive(dir->entry);
690                 list_del(&dir->list);
691                 __put_system_dir(dir);
692         }
693 }
694
695 static void remove_event_file_dir(struct trace_event_file *file)
696 {
697         struct dentry *dir = file->dir;
698         struct dentry *child;
699
700         if (dir) {
701                 spin_lock(&dir->d_lock);        /* probably unneeded */
702                 list_for_each_entry(child, &dir->d_subdirs, d_child) {
703                         if (d_really_is_positive(child))        /* probably unneeded */
704                                 d_inode(child)->i_private = NULL;
705                 }
706                 spin_unlock(&dir->d_lock);
707
708                 tracefs_remove_recursive(dir);
709         }
710
711         list_del(&file->list);
712         remove_subsystem(file->system);
713         free_event_filter(file->filter);
714         kmem_cache_free(file_cachep, file);
715 }
716
717 /*
718  * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
719  */
720 static int
721 __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
722                               const char *sub, const char *event, int set)
723 {
724         struct trace_event_file *file;
725         struct trace_event_call *call;
726         const char *name;
727         int ret = -EINVAL;
728
729         list_for_each_entry(file, &tr->events, list) {
730
731                 call = file->event_call;
732                 name = trace_event_name(call);
733
734                 if (!name || !call->class || !call->class->reg)
735                         continue;
736
737                 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
738                         continue;
739
740                 if (match &&
741                     strcmp(match, name) != 0 &&
742                     strcmp(match, call->class->system) != 0)
743                         continue;
744
745                 if (sub && strcmp(sub, call->class->system) != 0)
746                         continue;
747
748                 if (event && strcmp(event, name) != 0)
749                         continue;
750
751                 ftrace_event_enable_disable(file, set);
752
753                 ret = 0;
754         }
755
756         return ret;
757 }
758
759 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
760                                   const char *sub, const char *event, int set)
761 {
762         int ret;
763
764         mutex_lock(&event_mutex);
765         ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
766         mutex_unlock(&event_mutex);
767
768         return ret;
769 }
770
771 static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
772 {
773         char *event = NULL, *sub = NULL, *match;
774         int ret;
775
776         /*
777          * The buf format can be <subsystem>:<event-name>
778          *  *:<event-name> means any event by that name.
779          *  :<event-name> is the same.
780          *
781          *  <subsystem>:* means all events in that subsystem
782          *  <subsystem>: means the same.
783          *
784          *  <name> (no ':') means all events in a subsystem with
785          *  the name <name> or any event that matches <name>
786          */
787
788         match = strsep(&buf, ":");
789         if (buf) {
790                 sub = match;
791                 event = buf;
792                 match = NULL;
793
794                 if (!strlen(sub) || strcmp(sub, "*") == 0)
795                         sub = NULL;
796                 if (!strlen(event) || strcmp(event, "*") == 0)
797                         event = NULL;
798         }
799
800         ret = __ftrace_set_clr_event(tr, match, sub, event, set);
801
802         /* Put back the colon to allow this to be called again */
803         if (buf)
804                 *(buf - 1) = ':';
805
806         return ret;
807 }
808
809 /**
810  * trace_set_clr_event - enable or disable an event
811  * @system: system name to match (NULL for any system)
812  * @event: event name to match (NULL for all events, within system)
813  * @set: 1 to enable, 0 to disable
814  *
815  * This is a way for other parts of the kernel to enable or disable
816  * event recording.
817  *
818  * Returns 0 on success, -EINVAL if the parameters do not match any
819  * registered events.
820  */
821 int trace_set_clr_event(const char *system, const char *event, int set)
822 {
823         struct trace_array *tr = top_trace_array();
824
825         if (!tr)
826                 return -ENODEV;
827
828         return __ftrace_set_clr_event(tr, NULL, system, event, set);
829 }
830 EXPORT_SYMBOL_GPL(trace_set_clr_event);
831
832 /* 128 should be much more than enough */
833 #define EVENT_BUF_SIZE          127
834
835 static ssize_t
836 ftrace_event_write(struct file *file, const char __user *ubuf,
837                    size_t cnt, loff_t *ppos)
838 {
839         struct trace_parser parser;
840         struct seq_file *m = file->private_data;
841         struct trace_array *tr = m->private;
842         ssize_t read, ret;
843
844         if (!cnt)
845                 return 0;
846
847         ret = tracing_update_buffers();
848         if (ret < 0)
849                 return ret;
850
851         if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
852                 return -ENOMEM;
853
854         read = trace_get_user(&parser, ubuf, cnt, ppos);
855
856         if (read >= 0 && trace_parser_loaded((&parser))) {
857                 int set = 1;
858
859                 if (*parser.buffer == '!')
860                         set = 0;
861
862                 parser.buffer[parser.idx] = 0;
863
864                 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
865                 if (ret)
866                         goto out_put;
867         }
868
869         ret = read;
870
871  out_put:
872         trace_parser_put(&parser);
873
874         return ret;
875 }
876
877 static void *
878 t_next(struct seq_file *m, void *v, loff_t *pos)
879 {
880         struct trace_event_file *file = v;
881         struct trace_event_call *call;
882         struct trace_array *tr = m->private;
883
884         (*pos)++;
885
886         list_for_each_entry_continue(file, &tr->events, list) {
887                 call = file->event_call;
888                 /*
889                  * The ftrace subsystem is for showing formats only.
890                  * They can not be enabled or disabled via the event files.
891                  */
892                 if (call->class && call->class->reg &&
893                     !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
894                         return file;
895         }
896
897         return NULL;
898 }
899
900 static void *t_start(struct seq_file *m, loff_t *pos)
901 {
902         struct trace_event_file *file;
903         struct trace_array *tr = m->private;
904         loff_t l;
905
906         mutex_lock(&event_mutex);
907
908         file = list_entry(&tr->events, struct trace_event_file, list);
909         for (l = 0; l <= *pos; ) {
910                 file = t_next(m, file, &l);
911                 if (!file)
912                         break;
913         }
914         return file;
915 }
916
917 static void *
918 s_next(struct seq_file *m, void *v, loff_t *pos)
919 {
920         struct trace_event_file *file = v;
921         struct trace_array *tr = m->private;
922
923         (*pos)++;
924
925         list_for_each_entry_continue(file, &tr->events, list) {
926                 if (file->flags & EVENT_FILE_FL_ENABLED)
927                         return file;
928         }
929
930         return NULL;
931 }
932
933 static void *s_start(struct seq_file *m, loff_t *pos)
934 {
935         struct trace_event_file *file;
936         struct trace_array *tr = m->private;
937         loff_t l;
938
939         mutex_lock(&event_mutex);
940
941         file = list_entry(&tr->events, struct trace_event_file, list);
942         for (l = 0; l <= *pos; ) {
943                 file = s_next(m, file, &l);
944                 if (!file)
945                         break;
946         }
947         return file;
948 }
949
950 static int t_show(struct seq_file *m, void *v)
951 {
952         struct trace_event_file *file = v;
953         struct trace_event_call *call = file->event_call;
954
955         if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
956                 seq_printf(m, "%s:", call->class->system);
957         seq_printf(m, "%s\n", trace_event_name(call));
958
959         return 0;
960 }
961
962 static void t_stop(struct seq_file *m, void *p)
963 {
964         mutex_unlock(&event_mutex);
965 }
966
967 static void *p_start(struct seq_file *m, loff_t *pos)
968         __acquires(RCU)
969 {
970         struct trace_pid_list *pid_list;
971         struct trace_array *tr = m->private;
972
973         /*
974          * Grab the mutex, to keep calls to p_next() having the same
975          * tr->filtered_pids as p_start() has.
976          * If we just passed the tr->filtered_pids around, then RCU would
977          * have been enough, but doing that makes things more complex.
978          */
979         mutex_lock(&event_mutex);
980         rcu_read_lock_sched();
981
982         pid_list = rcu_dereference_sched(tr->filtered_pids);
983
984         if (!pid_list || *pos >= pid_list->nr_pids)
985                 return NULL;
986
987         return (void *)&pid_list->pids[*pos];
988 }
989
990 static void p_stop(struct seq_file *m, void *p)
991         __releases(RCU)
992 {
993         rcu_read_unlock_sched();
994         mutex_unlock(&event_mutex);
995 }
996
997 static void *
998 p_next(struct seq_file *m, void *v, loff_t *pos)
999 {
1000         struct trace_array *tr = m->private;
1001         struct trace_pid_list *pid_list = rcu_dereference_sched(tr->filtered_pids);
1002
1003         (*pos)++;
1004
1005         if (*pos >= pid_list->nr_pids)
1006                 return NULL;
1007
1008         return (void *)&pid_list->pids[*pos];
1009 }
1010
1011 static int p_show(struct seq_file *m, void *v)
1012 {
1013         pid_t *pid = v;
1014
1015         seq_printf(m, "%d\n", *pid);
1016         return 0;
1017 }
1018
1019 static ssize_t
1020 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1021                   loff_t *ppos)
1022 {
1023         struct trace_event_file *file;
1024         unsigned long flags;
1025         char buf[4] = "0";
1026
1027         mutex_lock(&event_mutex);
1028         file = event_file_data(filp);
1029         if (likely(file))
1030                 flags = file->flags;
1031         mutex_unlock(&event_mutex);
1032
1033         if (!file)
1034                 return -ENODEV;
1035
1036         if (flags & EVENT_FILE_FL_ENABLED &&
1037             !(flags & EVENT_FILE_FL_SOFT_DISABLED))
1038                 strcpy(buf, "1");
1039
1040         if (flags & EVENT_FILE_FL_SOFT_DISABLED ||
1041             flags & EVENT_FILE_FL_SOFT_MODE)
1042                 strcat(buf, "*");
1043
1044         strcat(buf, "\n");
1045
1046         return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
1047 }
1048
1049 static ssize_t
1050 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
1051                    loff_t *ppos)
1052 {
1053         struct trace_event_file *file;
1054         unsigned long val;
1055         int ret;
1056
1057         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1058         if (ret)
1059                 return ret;
1060
1061         ret = tracing_update_buffers();
1062         if (ret < 0)
1063                 return ret;
1064
1065         switch (val) {
1066         case 0:
1067         case 1:
1068                 ret = -ENODEV;
1069                 mutex_lock(&event_mutex);
1070                 file = event_file_data(filp);
1071                 if (likely(file))
1072                         ret = ftrace_event_enable_disable(file, val);
1073                 mutex_unlock(&event_mutex);
1074                 break;
1075
1076         default:
1077                 return -EINVAL;
1078         }
1079
1080         *ppos += cnt;
1081
1082         return ret ? ret : cnt;
1083 }
1084
1085 static ssize_t
1086 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1087                    loff_t *ppos)
1088 {
1089         const char set_to_char[4] = { '?', '0', '1', 'X' };
1090         struct trace_subsystem_dir *dir = filp->private_data;
1091         struct event_subsystem *system = dir->subsystem;
1092         struct trace_event_call *call;
1093         struct trace_event_file *file;
1094         struct trace_array *tr = dir->tr;
1095         char buf[2];
1096         int set = 0;
1097         int ret;
1098
1099         mutex_lock(&event_mutex);
1100         list_for_each_entry(file, &tr->events, list) {
1101                 call = file->event_call;
1102                 if (!trace_event_name(call) || !call->class || !call->class->reg)
1103                         continue;
1104
1105                 if (system && strcmp(call->class->system, system->name) != 0)
1106                         continue;
1107
1108                 /*
1109                  * We need to find out if all the events are set
1110                  * or if all events or cleared, or if we have
1111                  * a mixture.
1112                  */
1113                 set |= (1 << !!(file->flags & EVENT_FILE_FL_ENABLED));
1114
1115                 /*
1116                  * If we have a mixture, no need to look further.
1117                  */
1118                 if (set == 3)
1119                         break;
1120         }
1121         mutex_unlock(&event_mutex);
1122
1123         buf[0] = set_to_char[set];
1124         buf[1] = '\n';
1125
1126         ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
1127
1128         return ret;
1129 }
1130
1131 static ssize_t
1132 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
1133                     loff_t *ppos)
1134 {
1135         struct trace_subsystem_dir *dir = filp->private_data;
1136         struct event_subsystem *system = dir->subsystem;
1137         const char *name = NULL;
1138         unsigned long val;
1139         ssize_t ret;
1140
1141         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1142         if (ret)
1143                 return ret;
1144
1145         ret = tracing_update_buffers();
1146         if (ret < 0)
1147                 return ret;
1148
1149         if (val != 0 && val != 1)
1150                 return -EINVAL;
1151
1152         /*
1153          * Opening of "enable" adds a ref count to system,
1154          * so the name is safe to use.
1155          */
1156         if (system)
1157                 name = system->name;
1158
1159         ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
1160         if (ret)
1161                 goto out;
1162
1163         ret = cnt;
1164
1165 out:
1166         *ppos += cnt;
1167
1168         return ret;
1169 }
1170
1171 enum {
1172         FORMAT_HEADER           = 1,
1173         FORMAT_FIELD_SEPERATOR  = 2,
1174         FORMAT_PRINTFMT         = 3,
1175 };
1176
1177 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
1178 {
1179         struct trace_event_call *call = event_file_data(m->private);
1180         struct list_head *common_head = &ftrace_common_fields;
1181         struct list_head *head = trace_get_fields(call);
1182         struct list_head *node = v;
1183
1184         (*pos)++;
1185
1186         switch ((unsigned long)v) {
1187         case FORMAT_HEADER:
1188                 node = common_head;
1189                 break;
1190
1191         case FORMAT_FIELD_SEPERATOR:
1192                 node = head;
1193                 break;
1194
1195         case FORMAT_PRINTFMT:
1196                 /* all done */
1197                 return NULL;
1198         }
1199
1200         node = node->prev;
1201         if (node == common_head)
1202                 return (void *)FORMAT_FIELD_SEPERATOR;
1203         else if (node == head)
1204                 return (void *)FORMAT_PRINTFMT;
1205         else
1206                 return node;
1207 }
1208
1209 static int f_show(struct seq_file *m, void *v)
1210 {
1211         struct trace_event_call *call = event_file_data(m->private);
1212         struct ftrace_event_field *field;
1213         const char *array_descriptor;
1214
1215         switch ((unsigned long)v) {
1216         case FORMAT_HEADER:
1217                 seq_printf(m, "name: %s\n", trace_event_name(call));
1218                 seq_printf(m, "ID: %d\n", call->event.type);
1219                 seq_puts(m, "format:\n");
1220                 return 0;
1221
1222         case FORMAT_FIELD_SEPERATOR:
1223                 seq_putc(m, '\n');
1224                 return 0;
1225
1226         case FORMAT_PRINTFMT:
1227                 seq_printf(m, "\nprint fmt: %s\n",
1228                            call->print_fmt);
1229                 return 0;
1230         }
1231
1232         field = list_entry(v, struct ftrace_event_field, link);
1233         /*
1234          * Smartly shows the array type(except dynamic array).
1235          * Normal:
1236          *      field:TYPE VAR
1237          * If TYPE := TYPE[LEN], it is shown:
1238          *      field:TYPE VAR[LEN]
1239          */
1240         array_descriptor = strchr(field->type, '[');
1241
1242         if (!strncmp(field->type, "__data_loc", 10))
1243                 array_descriptor = NULL;
1244
1245         if (!array_descriptor)
1246                 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1247                            field->type, field->name, field->offset,
1248                            field->size, !!field->is_signed);
1249         else
1250                 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1251                            (int)(array_descriptor - field->type),
1252                            field->type, field->name,
1253                            array_descriptor, field->offset,
1254                            field->size, !!field->is_signed);
1255
1256         return 0;
1257 }
1258
1259 static void *f_start(struct seq_file *m, loff_t *pos)
1260 {
1261         void *p = (void *)FORMAT_HEADER;
1262         loff_t l = 0;
1263
1264         /* ->stop() is called even if ->start() fails */
1265         mutex_lock(&event_mutex);
1266         if (!event_file_data(m->private))
1267                 return ERR_PTR(-ENODEV);
1268
1269         while (l < *pos && p)
1270                 p = f_next(m, p, &l);
1271
1272         return p;
1273 }
1274
1275 static void f_stop(struct seq_file *m, void *p)
1276 {
1277         mutex_unlock(&event_mutex);
1278 }
1279
1280 static const struct seq_operations trace_format_seq_ops = {
1281         .start          = f_start,
1282         .next           = f_next,
1283         .stop           = f_stop,
1284         .show           = f_show,
1285 };
1286
1287 static int trace_format_open(struct inode *inode, struct file *file)
1288 {
1289         struct seq_file *m;
1290         int ret;
1291
1292         ret = seq_open(file, &trace_format_seq_ops);
1293         if (ret < 0)
1294                 return ret;
1295
1296         m = file->private_data;
1297         m->private = file;
1298
1299         return 0;
1300 }
1301
1302 static ssize_t
1303 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1304 {
1305         int id = (long)event_file_data(filp);
1306         char buf[32];
1307         int len;
1308
1309         if (*ppos)
1310                 return 0;
1311
1312         if (unlikely(!id))
1313                 return -ENODEV;
1314
1315         len = sprintf(buf, "%d\n", id);
1316
1317         return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
1318 }
1319
1320 static ssize_t
1321 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1322                   loff_t *ppos)
1323 {
1324         struct trace_event_file *file;
1325         struct trace_seq *s;
1326         int r = -ENODEV;
1327
1328         if (*ppos)
1329                 return 0;
1330
1331         s = kmalloc(sizeof(*s), GFP_KERNEL);
1332
1333         if (!s)
1334                 return -ENOMEM;
1335
1336         trace_seq_init(s);
1337
1338         mutex_lock(&event_mutex);
1339         file = event_file_data(filp);
1340         if (file)
1341                 print_event_filter(file, s);
1342         mutex_unlock(&event_mutex);
1343
1344         if (file)
1345                 r = simple_read_from_buffer(ubuf, cnt, ppos,
1346                                             s->buffer, trace_seq_used(s));
1347
1348         kfree(s);
1349
1350         return r;
1351 }
1352
1353 static ssize_t
1354 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1355                    loff_t *ppos)
1356 {
1357         struct trace_event_file *file;
1358         char *buf;
1359         int err = -ENODEV;
1360
1361         if (cnt >= PAGE_SIZE)
1362                 return -EINVAL;
1363
1364         buf = memdup_user_nul(ubuf, cnt);
1365         if (IS_ERR(buf))
1366                 return PTR_ERR(buf);
1367
1368         mutex_lock(&event_mutex);
1369         file = event_file_data(filp);
1370         if (file)
1371                 err = apply_event_filter(file, buf);
1372         mutex_unlock(&event_mutex);
1373
1374         kfree(buf);
1375         if (err < 0)
1376                 return err;
1377
1378         *ppos += cnt;
1379
1380         return cnt;
1381 }
1382
1383 static LIST_HEAD(event_subsystems);
1384
1385 static int subsystem_open(struct inode *inode, struct file *filp)
1386 {
1387         struct event_subsystem *system = NULL;
1388         struct trace_subsystem_dir *dir = NULL; /* Initialize for gcc */
1389         struct trace_array *tr;
1390         int ret;
1391
1392         if (tracing_is_disabled())
1393                 return -ENODEV;
1394
1395         /* Make sure the system still exists */
1396         mutex_lock(&trace_types_lock);
1397         mutex_lock(&event_mutex);
1398         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1399                 list_for_each_entry(dir, &tr->systems, list) {
1400                         if (dir == inode->i_private) {
1401                                 /* Don't open systems with no events */
1402                                 if (dir->nr_events) {
1403                                         __get_system_dir(dir);
1404                                         system = dir->subsystem;
1405                                 }
1406                                 goto exit_loop;
1407                         }
1408                 }
1409         }
1410  exit_loop:
1411         mutex_unlock(&event_mutex);
1412         mutex_unlock(&trace_types_lock);
1413
1414         if (!system)
1415                 return -ENODEV;
1416
1417         /* Some versions of gcc think dir can be uninitialized here */
1418         WARN_ON(!dir);
1419
1420         /* Still need to increment the ref count of the system */
1421         if (trace_array_get(tr) < 0) {
1422                 put_system(dir);
1423                 return -ENODEV;
1424         }
1425
1426         ret = tracing_open_generic(inode, filp);
1427         if (ret < 0) {
1428                 trace_array_put(tr);
1429                 put_system(dir);
1430         }
1431
1432         return ret;
1433 }
1434
1435 static int system_tr_open(struct inode *inode, struct file *filp)
1436 {
1437         struct trace_subsystem_dir *dir;
1438         struct trace_array *tr = inode->i_private;
1439         int ret;
1440
1441         if (tracing_is_disabled())
1442                 return -ENODEV;
1443
1444         if (trace_array_get(tr) < 0)
1445                 return -ENODEV;
1446
1447         /* Make a temporary dir that has no system but points to tr */
1448         dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1449         if (!dir) {
1450                 trace_array_put(tr);
1451                 return -ENOMEM;
1452         }
1453
1454         dir->tr = tr;
1455
1456         ret = tracing_open_generic(inode, filp);
1457         if (ret < 0) {
1458                 trace_array_put(tr);
1459                 kfree(dir);
1460                 return ret;
1461         }
1462
1463         filp->private_data = dir;
1464
1465         return 0;
1466 }
1467
1468 static int subsystem_release(struct inode *inode, struct file *file)
1469 {
1470         struct trace_subsystem_dir *dir = file->private_data;
1471
1472         trace_array_put(dir->tr);
1473
1474         /*
1475          * If dir->subsystem is NULL, then this is a temporary
1476          * descriptor that was made for a trace_array to enable
1477          * all subsystems.
1478          */
1479         if (dir->subsystem)
1480                 put_system(dir);
1481         else
1482                 kfree(dir);
1483
1484         return 0;
1485 }
1486
1487 static ssize_t
1488 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1489                       loff_t *ppos)
1490 {
1491         struct trace_subsystem_dir *dir = filp->private_data;
1492         struct event_subsystem *system = dir->subsystem;
1493         struct trace_seq *s;
1494         int r;
1495
1496         if (*ppos)
1497                 return 0;
1498
1499         s = kmalloc(sizeof(*s), GFP_KERNEL);
1500         if (!s)
1501                 return -ENOMEM;
1502
1503         trace_seq_init(s);
1504
1505         print_subsystem_event_filter(system, s);
1506         r = simple_read_from_buffer(ubuf, cnt, ppos,
1507                                     s->buffer, trace_seq_used(s));
1508
1509         kfree(s);
1510
1511         return r;
1512 }
1513
1514 static ssize_t
1515 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1516                        loff_t *ppos)
1517 {
1518         struct trace_subsystem_dir *dir = filp->private_data;
1519         char *buf;
1520         int err;
1521
1522         if (cnt >= PAGE_SIZE)
1523                 return -EINVAL;
1524
1525         buf = memdup_user_nul(ubuf, cnt);
1526         if (IS_ERR(buf))
1527                 return PTR_ERR(buf);
1528
1529         err = apply_subsystem_event_filter(dir, buf);
1530         kfree(buf);
1531         if (err < 0)
1532                 return err;
1533
1534         *ppos += cnt;
1535
1536         return cnt;
1537 }
1538
1539 static ssize_t
1540 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1541 {
1542         int (*func)(struct trace_seq *s) = filp->private_data;
1543         struct trace_seq *s;
1544         int r;
1545
1546         if (*ppos)
1547                 return 0;
1548
1549         s = kmalloc(sizeof(*s), GFP_KERNEL);
1550         if (!s)
1551                 return -ENOMEM;
1552
1553         trace_seq_init(s);
1554
1555         func(s);
1556         r = simple_read_from_buffer(ubuf, cnt, ppos,
1557                                     s->buffer, trace_seq_used(s));
1558
1559         kfree(s);
1560
1561         return r;
1562 }
1563
1564 static int max_pids(struct trace_pid_list *pid_list)
1565 {
1566         return (PAGE_SIZE << pid_list->order) / sizeof(pid_t);
1567 }
1568
1569 static void ignore_task_cpu(void *data)
1570 {
1571         struct trace_array *tr = data;
1572         struct trace_pid_list *pid_list;
1573
1574         /*
1575          * This function is called by on_each_cpu() while the
1576          * event_mutex is held.
1577          */
1578         pid_list = rcu_dereference_protected(tr->filtered_pids,
1579                                              mutex_is_locked(&event_mutex));
1580
1581         this_cpu_write(tr->trace_buffer.data->ignore_pid,
1582                        check_ignore_pid(pid_list, current));
1583 }
1584
1585 static ssize_t
1586 ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
1587                        size_t cnt, loff_t *ppos)
1588 {
1589         struct seq_file *m = filp->private_data;
1590         struct trace_array *tr = m->private;
1591         struct trace_pid_list *filtered_pids = NULL;
1592         struct trace_pid_list *pid_list = NULL;
1593         struct trace_event_file *file;
1594         struct trace_parser parser;
1595         unsigned long val;
1596         loff_t this_pos;
1597         ssize_t read = 0;
1598         ssize_t ret = 0;
1599         pid_t pid;
1600         int i;
1601
1602         if (!cnt)
1603                 return 0;
1604
1605         ret = tracing_update_buffers();
1606         if (ret < 0)
1607                 return ret;
1608
1609         if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
1610                 return -ENOMEM;
1611
1612         mutex_lock(&event_mutex);
1613         /*
1614          * Load as many pids into the array before doing a
1615          * swap from the tr->filtered_pids to the new list.
1616          */
1617         while (cnt > 0) {
1618
1619                 this_pos = 0;
1620
1621                 ret = trace_get_user(&parser, ubuf, cnt, &this_pos);
1622                 if (ret < 0 || !trace_parser_loaded(&parser))
1623                         break;
1624
1625                 read += ret;
1626                 ubuf += ret;
1627                 cnt -= ret;
1628
1629                 parser.buffer[parser.idx] = 0;
1630
1631                 ret = -EINVAL;
1632                 if (kstrtoul(parser.buffer, 0, &val))
1633                         break;
1634                 if (val > INT_MAX)
1635                         break;
1636
1637                 pid = (pid_t)val;
1638
1639                 ret = -ENOMEM;
1640                 if (!pid_list) {
1641                         pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
1642                         if (!pid_list)
1643                                 break;
1644
1645                         filtered_pids = rcu_dereference_protected(tr->filtered_pids,
1646                                                         lockdep_is_held(&event_mutex));
1647                         if (filtered_pids)
1648                                 pid_list->order = filtered_pids->order;
1649                         else
1650                                 pid_list->order = 0;
1651
1652                         pid_list->pids = (void *)__get_free_pages(GFP_KERNEL,
1653                                                                   pid_list->order);
1654                         if (!pid_list->pids)
1655                                 break;
1656
1657                         if (filtered_pids) {
1658                                 pid_list->nr_pids = filtered_pids->nr_pids;
1659                                 memcpy(pid_list->pids, filtered_pids->pids,
1660                                        pid_list->nr_pids * sizeof(pid_t));
1661                         } else
1662                                 pid_list->nr_pids = 0;
1663                 }
1664
1665                 if (pid_list->nr_pids >= max_pids(pid_list)) {
1666                         pid_t *pid_page;
1667
1668                         pid_page = (void *)__get_free_pages(GFP_KERNEL,
1669                                                             pid_list->order + 1);
1670                         if (!pid_page)
1671                                 break;
1672                         memcpy(pid_page, pid_list->pids,
1673                                pid_list->nr_pids * sizeof(pid_t));
1674                         free_pages((unsigned long)pid_list->pids, pid_list->order);
1675
1676                         pid_list->order++;
1677                         pid_list->pids = pid_page;
1678                 }
1679
1680                 pid_list->pids[pid_list->nr_pids++] = pid;
1681                 trace_parser_clear(&parser);
1682                 ret = 0;
1683         }
1684         trace_parser_put(&parser);
1685
1686         if (ret < 0) {
1687                 if (pid_list)
1688                         free_pages((unsigned long)pid_list->pids, pid_list->order);
1689                 kfree(pid_list);
1690                 mutex_unlock(&event_mutex);
1691                 return ret;
1692         }
1693
1694         if (!pid_list) {
1695                 mutex_unlock(&event_mutex);
1696                 return ret;
1697         }
1698
1699         sort(pid_list->pids, pid_list->nr_pids, sizeof(pid_t), cmp_pid, NULL);
1700
1701         /* Remove duplicates */
1702         for (i = 1; i < pid_list->nr_pids; i++) {
1703                 int start = i;
1704
1705                 while (i < pid_list->nr_pids &&
1706                        pid_list->pids[i - 1] == pid_list->pids[i])
1707                         i++;
1708
1709                 if (start != i) {
1710                         if (i < pid_list->nr_pids) {
1711                                 memmove(&pid_list->pids[start], &pid_list->pids[i],
1712                                         (pid_list->nr_pids - i) * sizeof(pid_t));
1713                                 pid_list->nr_pids -= i - start;
1714                                 i = start;
1715                         } else
1716                                 pid_list->nr_pids = start;
1717                 }
1718         }
1719
1720         rcu_assign_pointer(tr->filtered_pids, pid_list);
1721
1722         list_for_each_entry(file, &tr->events, list) {
1723                 set_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
1724         }
1725
1726         if (filtered_pids) {
1727                 synchronize_sched();
1728
1729                 free_pages((unsigned long)filtered_pids->pids, filtered_pids->order);
1730                 kfree(filtered_pids);
1731         } else {
1732                 /*
1733                  * Register a probe that is called before all other probes
1734                  * to set ignore_pid if next or prev do not match.
1735                  * Register a probe this is called after all other probes
1736                  * to only keep ignore_pid set if next pid matches.
1737                  */
1738                 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre,
1739                                                  tr, INT_MAX);
1740                 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post,
1741                                                  tr, 0);
1742
1743                 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre,
1744                                                  tr, INT_MAX);
1745                 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post,
1746                                                  tr, 0);
1747
1748                 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre,
1749                                                      tr, INT_MAX);
1750                 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post,
1751                                                      tr, 0);
1752
1753                 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre,
1754                                                  tr, INT_MAX);
1755                 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post,
1756                                                  tr, 0);
1757         }
1758
1759         /*
1760          * Ignoring of pids is done at task switch. But we have to
1761          * check for those tasks that are currently running.
1762          * Always do this in case a pid was appended or removed.
1763          */
1764         on_each_cpu(ignore_task_cpu, tr, 1);
1765
1766         mutex_unlock(&event_mutex);
1767
1768         ret = read;
1769         *ppos += read;
1770
1771         return ret;
1772 }
1773
1774 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1775 static int ftrace_event_set_open(struct inode *inode, struct file *file);
1776 static int ftrace_event_set_pid_open(struct inode *inode, struct file *file);
1777 static int ftrace_event_release(struct inode *inode, struct file *file);
1778
1779 static const struct seq_operations show_event_seq_ops = {
1780         .start = t_start,
1781         .next = t_next,
1782         .show = t_show,
1783         .stop = t_stop,
1784 };
1785
1786 static const struct seq_operations show_set_event_seq_ops = {
1787         .start = s_start,
1788         .next = s_next,
1789         .show = t_show,
1790         .stop = t_stop,
1791 };
1792
1793 static const struct seq_operations show_set_pid_seq_ops = {
1794         .start = p_start,
1795         .next = p_next,
1796         .show = p_show,
1797         .stop = p_stop,
1798 };
1799
1800 static const struct file_operations ftrace_avail_fops = {
1801         .open = ftrace_event_avail_open,
1802         .read = seq_read,
1803         .llseek = seq_lseek,
1804         .release = seq_release,
1805 };
1806
1807 static const struct file_operations ftrace_set_event_fops = {
1808         .open = ftrace_event_set_open,
1809         .read = seq_read,
1810         .write = ftrace_event_write,
1811         .llseek = seq_lseek,
1812         .release = ftrace_event_release,
1813 };
1814
1815 static const struct file_operations ftrace_set_event_pid_fops = {
1816         .open = ftrace_event_set_pid_open,
1817         .read = seq_read,
1818         .write = ftrace_event_pid_write,
1819         .llseek = seq_lseek,
1820         .release = ftrace_event_release,
1821 };
1822
1823 static const struct file_operations ftrace_enable_fops = {
1824         .open = tracing_open_generic,
1825         .read = event_enable_read,
1826         .write = event_enable_write,
1827         .llseek = default_llseek,
1828 };
1829
1830 static const struct file_operations ftrace_event_format_fops = {
1831         .open = trace_format_open,
1832         .read = seq_read,
1833         .llseek = seq_lseek,
1834         .release = seq_release,
1835 };
1836
1837 static const struct file_operations ftrace_event_id_fops = {
1838         .read = event_id_read,
1839         .llseek = default_llseek,
1840 };
1841
1842 static const struct file_operations ftrace_event_filter_fops = {
1843         .open = tracing_open_generic,
1844         .read = event_filter_read,
1845         .write = event_filter_write,
1846         .llseek = default_llseek,
1847 };
1848
1849 static const struct file_operations ftrace_subsystem_filter_fops = {
1850         .open = subsystem_open,
1851         .read = subsystem_filter_read,
1852         .write = subsystem_filter_write,
1853         .llseek = default_llseek,
1854         .release = subsystem_release,
1855 };
1856
1857 static const struct file_operations ftrace_system_enable_fops = {
1858         .open = subsystem_open,
1859         .read = system_enable_read,
1860         .write = system_enable_write,
1861         .llseek = default_llseek,
1862         .release = subsystem_release,
1863 };
1864
1865 static const struct file_operations ftrace_tr_enable_fops = {
1866         .open = system_tr_open,
1867         .read = system_enable_read,
1868         .write = system_enable_write,
1869         .llseek = default_llseek,
1870         .release = subsystem_release,
1871 };
1872
1873 static const struct file_operations ftrace_show_header_fops = {
1874         .open = tracing_open_generic,
1875         .read = show_header,
1876         .llseek = default_llseek,
1877 };
1878
1879 static int
1880 ftrace_event_open(struct inode *inode, struct file *file,
1881                   const struct seq_operations *seq_ops)
1882 {
1883         struct seq_file *m;
1884         int ret;
1885
1886         ret = seq_open(file, seq_ops);
1887         if (ret < 0)
1888                 return ret;
1889         m = file->private_data;
1890         /* copy tr over to seq ops */
1891         m->private = inode->i_private;
1892
1893         return ret;
1894 }
1895
1896 static int ftrace_event_release(struct inode *inode, struct file *file)
1897 {
1898         struct trace_array *tr = inode->i_private;
1899
1900         trace_array_put(tr);
1901
1902         return seq_release(inode, file);
1903 }
1904
1905 static int
1906 ftrace_event_avail_open(struct inode *inode, struct file *file)
1907 {
1908         const struct seq_operations *seq_ops = &show_event_seq_ops;
1909
1910         return ftrace_event_open(inode, file, seq_ops);
1911 }
1912
1913 static int
1914 ftrace_event_set_open(struct inode *inode, struct file *file)
1915 {
1916         const struct seq_operations *seq_ops = &show_set_event_seq_ops;
1917         struct trace_array *tr = inode->i_private;
1918         int ret;
1919
1920         if (trace_array_get(tr) < 0)
1921                 return -ENODEV;
1922
1923         if ((file->f_mode & FMODE_WRITE) &&
1924             (file->f_flags & O_TRUNC))
1925                 ftrace_clear_events(tr);
1926
1927         ret = ftrace_event_open(inode, file, seq_ops);
1928         if (ret < 0)
1929                 trace_array_put(tr);
1930         return ret;
1931 }
1932
1933 static int
1934 ftrace_event_set_pid_open(struct inode *inode, struct file *file)
1935 {
1936         const struct seq_operations *seq_ops = &show_set_pid_seq_ops;
1937         struct trace_array *tr = inode->i_private;
1938         int ret;
1939
1940         if (trace_array_get(tr) < 0)
1941                 return -ENODEV;
1942
1943         if ((file->f_mode & FMODE_WRITE) &&
1944             (file->f_flags & O_TRUNC))
1945                 ftrace_clear_event_pids(tr);
1946
1947         ret = ftrace_event_open(inode, file, seq_ops);
1948         if (ret < 0)
1949                 trace_array_put(tr);
1950         return ret;
1951 }
1952
1953 static struct event_subsystem *
1954 create_new_subsystem(const char *name)
1955 {
1956         struct event_subsystem *system;
1957
1958         /* need to create new entry */
1959         system = kmalloc(sizeof(*system), GFP_KERNEL);
1960         if (!system)
1961                 return NULL;
1962
1963         system->ref_count = 1;
1964
1965         /* Only allocate if dynamic (kprobes and modules) */
1966         system->name = kstrdup_const(name, GFP_KERNEL);
1967         if (!system->name)
1968                 goto out_free;
1969
1970         system->filter = NULL;
1971
1972         system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1973         if (!system->filter)
1974                 goto out_free;
1975
1976         list_add(&system->list, &event_subsystems);
1977
1978         return system;
1979
1980  out_free:
1981         kfree_const(system->name);
1982         kfree(system);
1983         return NULL;
1984 }
1985
1986 static struct dentry *
1987 event_subsystem_dir(struct trace_array *tr, const char *name,
1988                     struct trace_event_file *file, struct dentry *parent)
1989 {
1990         struct trace_subsystem_dir *dir;
1991         struct event_subsystem *system;
1992         struct dentry *entry;
1993
1994         /* First see if we did not already create this dir */
1995         list_for_each_entry(dir, &tr->systems, list) {
1996                 system = dir->subsystem;
1997                 if (strcmp(system->name, name) == 0) {
1998                         dir->nr_events++;
1999                         file->system = dir;
2000                         return dir->entry;
2001                 }
2002         }
2003
2004         /* Now see if the system itself exists. */
2005         list_for_each_entry(system, &event_subsystems, list) {
2006                 if (strcmp(system->name, name) == 0)
2007                         break;
2008         }
2009         /* Reset system variable when not found */
2010         if (&system->list == &event_subsystems)
2011                 system = NULL;
2012
2013         dir = kmalloc(sizeof(*dir), GFP_KERNEL);
2014         if (!dir)
2015                 goto out_fail;
2016
2017         if (!system) {
2018                 system = create_new_subsystem(name);
2019                 if (!system)
2020                         goto out_free;
2021         } else
2022                 __get_system(system);
2023
2024         dir->entry = tracefs_create_dir(name, parent);
2025         if (!dir->entry) {
2026                 pr_warn("Failed to create system directory %s\n", name);
2027                 __put_system(system);
2028                 goto out_free;
2029         }
2030
2031         dir->tr = tr;
2032         dir->ref_count = 1;
2033         dir->nr_events = 1;
2034         dir->subsystem = system;
2035         file->system = dir;
2036
2037         entry = tracefs_create_file("filter", 0644, dir->entry, dir,
2038                                     &ftrace_subsystem_filter_fops);
2039         if (!entry) {
2040                 kfree(system->filter);
2041                 system->filter = NULL;
2042                 pr_warn("Could not create tracefs '%s/filter' entry\n", name);
2043         }
2044
2045         trace_create_file("enable", 0644, dir->entry, dir,
2046                           &ftrace_system_enable_fops);
2047
2048         list_add(&dir->list, &tr->systems);
2049
2050         return dir->entry;
2051
2052  out_free:
2053         kfree(dir);
2054  out_fail:
2055         /* Only print this message if failed on memory allocation */
2056         if (!dir || !system)
2057                 pr_warn("No memory to create event subsystem %s\n", name);
2058         return NULL;
2059 }
2060
2061 static int
2062 event_create_dir(struct dentry *parent, struct trace_event_file *file)
2063 {
2064         struct trace_event_call *call = file->event_call;
2065         struct trace_array *tr = file->tr;
2066         struct list_head *head;
2067         struct dentry *d_events;
2068         const char *name;
2069         int ret;
2070
2071         /*
2072          * If the trace point header did not define TRACE_SYSTEM
2073          * then the system would be called "TRACE_SYSTEM".
2074          */
2075         if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
2076                 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
2077                 if (!d_events)
2078                         return -ENOMEM;
2079         } else
2080                 d_events = parent;
2081
2082         name = trace_event_name(call);
2083         file->dir = tracefs_create_dir(name, d_events);
2084         if (!file->dir) {
2085                 pr_warn("Could not create tracefs '%s' directory\n", name);
2086                 return -1;
2087         }
2088
2089         if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
2090                 trace_create_file("enable", 0644, file->dir, file,
2091                                   &ftrace_enable_fops);
2092
2093 #ifdef CONFIG_PERF_EVENTS
2094         if (call->event.type && call->class->reg)
2095                 trace_create_file("id", 0444, file->dir,
2096                                   (void *)(long)call->event.type,
2097                                   &ftrace_event_id_fops);
2098 #endif
2099
2100         /*
2101          * Other events may have the same class. Only update
2102          * the fields if they are not already defined.
2103          */
2104         head = trace_get_fields(call);
2105         if (list_empty(head)) {
2106                 ret = call->class->define_fields(call);
2107                 if (ret < 0) {
2108                         pr_warn("Could not initialize trace point events/%s\n",
2109                                 name);
2110                         return -1;
2111                 }
2112         }
2113         trace_create_file("filter", 0644, file->dir, file,
2114                           &ftrace_event_filter_fops);
2115
2116         /*
2117          * Only event directories that can be enabled should have
2118          * triggers.
2119          */
2120         if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
2121                 trace_create_file("trigger", 0644, file->dir, file,
2122                                   &event_trigger_fops);
2123
2124         trace_create_file("format", 0444, file->dir, call,
2125                           &ftrace_event_format_fops);
2126
2127         return 0;
2128 }
2129
2130 static void remove_event_from_tracers(struct trace_event_call *call)
2131 {
2132         struct trace_event_file *file;
2133         struct trace_array *tr;
2134
2135         do_for_each_event_file_safe(tr, file) {
2136                 if (file->event_call != call)
2137                         continue;
2138
2139                 remove_event_file_dir(file);
2140                 /*
2141                  * The do_for_each_event_file_safe() is
2142                  * a double loop. After finding the call for this
2143                  * trace_array, we use break to jump to the next
2144                  * trace_array.
2145                  */
2146                 break;
2147         } while_for_each_event_file();
2148 }
2149
2150 static void event_remove(struct trace_event_call *call)
2151 {
2152         struct trace_array *tr;
2153         struct trace_event_file *file;
2154
2155         do_for_each_event_file(tr, file) {
2156                 if (file->event_call != call)
2157                         continue;
2158                 ftrace_event_enable_disable(file, 0);
2159                 /*
2160                  * The do_for_each_event_file() is
2161                  * a double loop. After finding the call for this
2162                  * trace_array, we use break to jump to the next
2163                  * trace_array.
2164                  */
2165                 break;
2166         } while_for_each_event_file();
2167
2168         if (call->event.funcs)
2169                 __unregister_trace_event(&call->event);
2170         remove_event_from_tracers(call);
2171         list_del(&call->list);
2172 }
2173
2174 static int event_init(struct trace_event_call *call)
2175 {
2176         int ret = 0;
2177         const char *name;
2178
2179         name = trace_event_name(call);
2180         if (WARN_ON(!name))
2181                 return -EINVAL;
2182
2183         if (call->class->raw_init) {
2184                 ret = call->class->raw_init(call);
2185                 if (ret < 0 && ret != -ENOSYS)
2186                         pr_warn("Could not initialize trace events/%s\n", name);
2187         }
2188
2189         return ret;
2190 }
2191
2192 static int
2193 __register_event(struct trace_event_call *call, struct module *mod)
2194 {
2195         int ret;
2196
2197         ret = event_init(call);
2198         if (ret < 0)
2199                 return ret;
2200
2201         list_add(&call->list, &ftrace_events);
2202         call->mod = mod;
2203
2204         return 0;
2205 }
2206
2207 static char *enum_replace(char *ptr, struct trace_enum_map *map, int len)
2208 {
2209         int rlen;
2210         int elen;
2211
2212         /* Find the length of the enum value as a string */
2213         elen = snprintf(ptr, 0, "%ld", map->enum_value);
2214         /* Make sure there's enough room to replace the string with the value */
2215         if (len < elen)
2216                 return NULL;
2217
2218         snprintf(ptr, elen + 1, "%ld", map->enum_value);
2219
2220         /* Get the rest of the string of ptr */
2221         rlen = strlen(ptr + len);
2222         memmove(ptr + elen, ptr + len, rlen);
2223         /* Make sure we end the new string */
2224         ptr[elen + rlen] = 0;
2225
2226         return ptr + elen;
2227 }
2228
2229 static void update_event_printk(struct trace_event_call *call,
2230                                 struct trace_enum_map *map)
2231 {
2232         char *ptr;
2233         int quote = 0;
2234         int len = strlen(map->enum_string);
2235
2236         for (ptr = call->print_fmt; *ptr; ptr++) {
2237                 if (*ptr == '\\') {
2238                         ptr++;
2239                         /* paranoid */
2240                         if (!*ptr)
2241                                 break;
2242                         continue;
2243                 }
2244                 if (*ptr == '"') {
2245                         quote ^= 1;
2246                         continue;
2247                 }
2248                 if (quote)
2249                         continue;
2250                 if (isdigit(*ptr)) {
2251                         /* skip numbers */
2252                         do {
2253                                 ptr++;
2254                                 /* Check for alpha chars like ULL */
2255                         } while (isalnum(*ptr));
2256                         if (!*ptr)
2257                                 break;
2258                         /*
2259                          * A number must have some kind of delimiter after
2260                          * it, and we can ignore that too.
2261                          */
2262                         continue;
2263                 }
2264                 if (isalpha(*ptr) || *ptr == '_') {
2265                         if (strncmp(map->enum_string, ptr, len) == 0 &&
2266                             !isalnum(ptr[len]) && ptr[len] != '_') {
2267                                 ptr = enum_replace(ptr, map, len);
2268                                 /* Hmm, enum string smaller than value */
2269                                 if (WARN_ON_ONCE(!ptr))
2270                                         return;
2271                                 /*
2272                                  * No need to decrement here, as enum_replace()
2273                                  * returns the pointer to the character passed
2274                                  * the enum, and two enums can not be placed
2275                                  * back to back without something in between.
2276                                  * We can skip that something in between.
2277                                  */
2278                                 continue;
2279                         }
2280                 skip_more:
2281                         do {
2282                                 ptr++;
2283                         } while (isalnum(*ptr) || *ptr == '_');
2284                         if (!*ptr)
2285                                 break;
2286                         /*
2287                          * If what comes after this variable is a '.' or
2288                          * '->' then we can continue to ignore that string.
2289                          */
2290                         if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) {
2291                                 ptr += *ptr == '.' ? 1 : 2;
2292                                 if (!*ptr)
2293                                         break;
2294                                 goto skip_more;
2295                         }
2296                         /*
2297                          * Once again, we can skip the delimiter that came
2298                          * after the string.
2299                          */
2300                         continue;
2301                 }
2302         }
2303 }
2304
2305 void trace_event_enum_update(struct trace_enum_map **map, int len)
2306 {
2307         struct trace_event_call *call, *p;
2308         const char *last_system = NULL;
2309         int last_i;
2310         int i;
2311
2312         down_write(&trace_event_sem);
2313         list_for_each_entry_safe(call, p, &ftrace_events, list) {
2314                 /* events are usually grouped together with systems */
2315                 if (!last_system || call->class->system != last_system) {
2316                         last_i = 0;
2317                         last_system = call->class->system;
2318                 }
2319
2320                 for (i = last_i; i < len; i++) {
2321                         if (call->class->system == map[i]->system) {
2322                                 /* Save the first system if need be */
2323                                 if (!last_i)
2324                                         last_i = i;
2325                                 update_event_printk(call, map[i]);
2326                         }
2327                 }
2328         }
2329         up_write(&trace_event_sem);
2330 }
2331
2332 static struct trace_event_file *
2333 trace_create_new_event(struct trace_event_call *call,
2334                        struct trace_array *tr)
2335 {
2336         struct trace_event_file *file;
2337
2338         file = kmem_cache_alloc(file_cachep, GFP_TRACE);
2339         if (!file)
2340                 return NULL;
2341
2342         file->event_call = call;
2343         file->tr = tr;
2344         atomic_set(&file->sm_ref, 0);
2345         atomic_set(&file->tm_ref, 0);
2346         INIT_LIST_HEAD(&file->triggers);
2347         list_add(&file->list, &tr->events);
2348
2349         return file;
2350 }
2351
2352 /* Add an event to a trace directory */
2353 static int
2354 __trace_add_new_event(struct trace_event_call *call, struct trace_array *tr)
2355 {
2356         struct trace_event_file *file;
2357
2358         file = trace_create_new_event(call, tr);
2359         if (!file)
2360                 return -ENOMEM;
2361
2362         return event_create_dir(tr->event_dir, file);
2363 }
2364
2365 /*
2366  * Just create a decriptor for early init. A descriptor is required
2367  * for enabling events at boot. We want to enable events before
2368  * the filesystem is initialized.
2369  */
2370 static __init int
2371 __trace_early_add_new_event(struct trace_event_call *call,
2372                             struct trace_array *tr)
2373 {
2374         struct trace_event_file *file;
2375
2376         file = trace_create_new_event(call, tr);
2377         if (!file)
2378                 return -ENOMEM;
2379
2380         return 0;
2381 }
2382
2383 struct ftrace_module_file_ops;
2384 static void __add_event_to_tracers(struct trace_event_call *call);
2385
2386 /* Add an additional event_call dynamically */
2387 int trace_add_event_call(struct trace_event_call *call)
2388 {
2389         int ret;
2390         mutex_lock(&trace_types_lock);
2391         mutex_lock(&event_mutex);
2392
2393         ret = __register_event(call, NULL);
2394         if (ret >= 0)
2395                 __add_event_to_tracers(call);
2396
2397         mutex_unlock(&event_mutex);
2398         mutex_unlock(&trace_types_lock);
2399         return ret;
2400 }
2401
2402 /*
2403  * Must be called under locking of trace_types_lock, event_mutex and
2404  * trace_event_sem.
2405  */
2406 static void __trace_remove_event_call(struct trace_event_call *call)
2407 {
2408         event_remove(call);
2409         trace_destroy_fields(call);
2410         free_event_filter(call->filter);
2411         call->filter = NULL;
2412 }
2413
2414 static int probe_remove_event_call(struct trace_event_call *call)
2415 {
2416         struct trace_array *tr;
2417         struct trace_event_file *file;
2418
2419 #ifdef CONFIG_PERF_EVENTS
2420         if (call->perf_refcount)
2421                 return -EBUSY;
2422 #endif
2423         do_for_each_event_file(tr, file) {
2424                 if (file->event_call != call)
2425                         continue;
2426                 /*
2427                  * We can't rely on ftrace_event_enable_disable(enable => 0)
2428                  * we are going to do, EVENT_FILE_FL_SOFT_MODE can suppress
2429                  * TRACE_REG_UNREGISTER.
2430                  */
2431                 if (file->flags & EVENT_FILE_FL_ENABLED)
2432                         return -EBUSY;
2433                 /*
2434                  * The do_for_each_event_file_safe() is
2435                  * a double loop. After finding the call for this
2436                  * trace_array, we use break to jump to the next
2437                  * trace_array.
2438                  */
2439                 break;
2440         } while_for_each_event_file();
2441
2442         __trace_remove_event_call(call);
2443
2444         return 0;
2445 }
2446
2447 /* Remove an event_call */
2448 int trace_remove_event_call(struct trace_event_call *call)
2449 {
2450         int ret;
2451
2452         mutex_lock(&trace_types_lock);
2453         mutex_lock(&event_mutex);
2454         down_write(&trace_event_sem);
2455         ret = probe_remove_event_call(call);
2456         up_write(&trace_event_sem);
2457         mutex_unlock(&event_mutex);
2458         mutex_unlock(&trace_types_lock);
2459
2460         return ret;
2461 }
2462
2463 #define for_each_event(event, start, end)                       \
2464         for (event = start;                                     \
2465              (unsigned long)event < (unsigned long)end;         \
2466              event++)
2467
2468 #ifdef CONFIG_MODULES
2469
2470 static void trace_module_add_events(struct module *mod)
2471 {
2472         struct trace_event_call **call, **start, **end;
2473
2474         if (!mod->num_trace_events)
2475                 return;
2476
2477         /* Don't add infrastructure for mods without tracepoints */
2478         if (trace_module_has_bad_taint(mod)) {
2479                 pr_err("%s: module has bad taint, not creating trace events\n",
2480                        mod->name);
2481                 return;
2482         }
2483
2484         start = mod->trace_events;
2485         end = mod->trace_events + mod->num_trace_events;
2486
2487         for_each_event(call, start, end) {
2488                 __register_event(*call, mod);
2489                 __add_event_to_tracers(*call);
2490         }
2491 }
2492
2493 static void trace_module_remove_events(struct module *mod)
2494 {
2495         struct trace_event_call *call, *p;
2496         bool clear_trace = false;
2497
2498         down_write(&trace_event_sem);
2499         list_for_each_entry_safe(call, p, &ftrace_events, list) {
2500                 if (call->mod == mod) {
2501                         if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
2502                                 clear_trace = true;
2503                         __trace_remove_event_call(call);
2504                 }
2505         }
2506         up_write(&trace_event_sem);
2507
2508         /*
2509          * It is safest to reset the ring buffer if the module being unloaded
2510          * registered any events that were used. The only worry is if
2511          * a new module gets loaded, and takes on the same id as the events
2512          * of this module. When printing out the buffer, traced events left
2513          * over from this module may be passed to the new module events and
2514          * unexpected results may occur.
2515          */
2516         if (clear_trace)
2517                 tracing_reset_all_online_cpus();
2518 }
2519
2520 static int trace_module_notify(struct notifier_block *self,
2521                                unsigned long val, void *data)
2522 {
2523         struct module *mod = data;
2524
2525         mutex_lock(&trace_types_lock);
2526         mutex_lock(&event_mutex);
2527         switch (val) {
2528         case MODULE_STATE_COMING:
2529                 trace_module_add_events(mod);
2530                 break;
2531         case MODULE_STATE_GOING:
2532                 trace_module_remove_events(mod);
2533                 break;
2534         }
2535         mutex_unlock(&event_mutex);
2536         mutex_unlock(&trace_types_lock);
2537
2538         return 0;
2539 }
2540
2541 static struct notifier_block trace_module_nb = {
2542         .notifier_call = trace_module_notify,
2543         .priority = 1, /* higher than trace.c module notify */
2544 };
2545 #endif /* CONFIG_MODULES */
2546
2547 /* Create a new event directory structure for a trace directory. */
2548 static void
2549 __trace_add_event_dirs(struct trace_array *tr)
2550 {
2551         struct trace_event_call *call;
2552         int ret;
2553
2554         list_for_each_entry(call, &ftrace_events, list) {
2555                 ret = __trace_add_new_event(call, tr);
2556                 if (ret < 0)
2557                         pr_warn("Could not create directory for event %s\n",
2558                                 trace_event_name(call));
2559         }
2560 }
2561
2562 struct trace_event_file *
2563 find_event_file(struct trace_array *tr, const char *system,  const char *event)
2564 {
2565         struct trace_event_file *file;
2566         struct trace_event_call *call;
2567         const char *name;
2568
2569         list_for_each_entry(file, &tr->events, list) {
2570
2571                 call = file->event_call;
2572                 name = trace_event_name(call);
2573
2574                 if (!name || !call->class || !call->class->reg)
2575                         continue;
2576
2577                 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
2578                         continue;
2579
2580                 if (strcmp(event, name) == 0 &&
2581                     strcmp(system, call->class->system) == 0)
2582                         return file;
2583         }
2584         return NULL;
2585 }
2586
2587 #ifdef CONFIG_DYNAMIC_FTRACE
2588
2589 /* Avoid typos */
2590 #define ENABLE_EVENT_STR        "enable_event"
2591 #define DISABLE_EVENT_STR       "disable_event"
2592
2593 struct event_probe_data {
2594         struct trace_event_file *file;
2595         unsigned long                   count;
2596         int                             ref;
2597         bool                            enable;
2598 };
2599
2600 static void
2601 event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
2602 {
2603         struct event_probe_data **pdata = (struct event_probe_data **)_data;
2604         struct event_probe_data *data = *pdata;
2605
2606         if (!data)
2607                 return;
2608
2609         if (data->enable)
2610                 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
2611         else
2612                 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
2613 }
2614
2615 static void
2616 event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data)
2617 {
2618         struct event_probe_data **pdata = (struct event_probe_data **)_data;
2619         struct event_probe_data *data = *pdata;
2620
2621         if (!data)
2622                 return;
2623
2624         if (!data->count)
2625                 return;
2626
2627         /* Skip if the event is in a state we want to switch to */
2628         if (data->enable == !(data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
2629                 return;
2630
2631         if (data->count != -1)
2632                 (data->count)--;
2633
2634         event_enable_probe(ip, parent_ip, _data);
2635 }
2636
2637 static int
2638 event_enable_print(struct seq_file *m, unsigned long ip,
2639                       struct ftrace_probe_ops *ops, void *_data)
2640 {
2641         struct event_probe_data *data = _data;
2642
2643         seq_printf(m, "%ps:", (void *)ip);
2644
2645         seq_printf(m, "%s:%s:%s",
2646                    data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
2647                    data->file->event_call->class->system,
2648                    trace_event_name(data->file->event_call));
2649
2650         if (data->count == -1)
2651                 seq_puts(m, ":unlimited\n");
2652         else
2653                 seq_printf(m, ":count=%ld\n", data->count);
2654
2655         return 0;
2656 }
2657
2658 static int
2659 event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip,
2660                   void **_data)
2661 {
2662         struct event_probe_data **pdata = (struct event_probe_data **)_data;
2663         struct event_probe_data *data = *pdata;
2664
2665         data->ref++;
2666         return 0;
2667 }
2668
2669 static void
2670 event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip,
2671                   void **_data)
2672 {
2673         struct event_probe_data **pdata = (struct event_probe_data **)_data;
2674         struct event_probe_data *data = *pdata;
2675
2676         if (WARN_ON_ONCE(data->ref <= 0))
2677                 return;
2678
2679         data->ref--;
2680         if (!data->ref) {
2681                 /* Remove the SOFT_MODE flag */
2682                 __ftrace_event_enable_disable(data->file, 0, 1);
2683                 module_put(data->file->event_call->mod);
2684                 kfree(data);
2685         }
2686         *pdata = NULL;
2687 }
2688
2689 static struct ftrace_probe_ops event_enable_probe_ops = {
2690         .func                   = event_enable_probe,
2691         .print                  = event_enable_print,
2692         .init                   = event_enable_init,
2693         .free                   = event_enable_free,
2694 };
2695
2696 static struct ftrace_probe_ops event_enable_count_probe_ops = {
2697         .func                   = event_enable_count_probe,
2698         .print                  = event_enable_print,
2699         .init                   = event_enable_init,
2700         .free                   = event_enable_free,
2701 };
2702
2703 static struct ftrace_probe_ops event_disable_probe_ops = {
2704         .func                   = event_enable_probe,
2705         .print                  = event_enable_print,
2706         .init                   = event_enable_init,
2707         .free                   = event_enable_free,
2708 };
2709
2710 static struct ftrace_probe_ops event_disable_count_probe_ops = {
2711         .func                   = event_enable_count_probe,
2712         .print                  = event_enable_print,
2713         .init                   = event_enable_init,
2714         .free                   = event_enable_free,
2715 };
2716
2717 static int
2718 event_enable_func(struct ftrace_hash *hash,
2719                   char *glob, char *cmd, char *param, int enabled)
2720 {
2721         struct trace_array *tr = top_trace_array();
2722         struct trace_event_file *file;
2723         struct ftrace_probe_ops *ops;
2724         struct event_probe_data *data;
2725         const char *system;
2726         const char *event;
2727         char *number;
2728         bool enable;
2729         int ret;
2730
2731         if (!tr)
2732                 return -ENODEV;
2733
2734         /* hash funcs only work with set_ftrace_filter */
2735         if (!enabled || !param)
2736                 return -EINVAL;
2737
2738         system = strsep(&param, ":");
2739         if (!param)
2740                 return -EINVAL;
2741
2742         event = strsep(&param, ":");
2743
2744         mutex_lock(&event_mutex);
2745
2746         ret = -EINVAL;
2747         file = find_event_file(tr, system, event);
2748         if (!file)
2749                 goto out;
2750
2751         enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
2752
2753         if (enable)
2754                 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
2755         else
2756                 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
2757
2758         if (glob[0] == '!') {
2759                 unregister_ftrace_function_probe_func(glob+1, ops);
2760                 ret = 0;
2761                 goto out;
2762         }
2763
2764         ret = -ENOMEM;
2765         data = kzalloc(sizeof(*data), GFP_KERNEL);
2766         if (!data)
2767                 goto out;
2768
2769         data->enable = enable;
2770         data->count = -1;
2771         data->file = file;
2772
2773         if (!param)
2774                 goto out_reg;
2775
2776         number = strsep(&param, ":");
2777
2778         ret = -EINVAL;
2779         if (!strlen(number))
2780                 goto out_free;
2781
2782         /*
2783          * We use the callback data field (which is a pointer)
2784          * as our counter.
2785          */
2786         ret = kstrtoul(number, 0, &data->count);
2787         if (ret)
2788                 goto out_free;
2789
2790  out_reg:
2791         /* Don't let event modules unload while probe registered */
2792         ret = try_module_get(file->event_call->mod);
2793         if (!ret) {
2794                 ret = -EBUSY;
2795                 goto out_free;
2796         }
2797
2798         ret = __ftrace_event_enable_disable(file, 1, 1);
2799         if (ret < 0)
2800                 goto out_put;
2801         ret = register_ftrace_function_probe(glob, ops, data);
2802         /*
2803          * The above returns on success the # of functions enabled,
2804          * but if it didn't find any functions it returns zero.
2805          * Consider no functions a failure too.
2806          */
2807         if (!ret) {
2808                 ret = -ENOENT;
2809                 goto out_disable;
2810         } else if (ret < 0)
2811                 goto out_disable;
2812         /* Just return zero, not the number of enabled functions */
2813         ret = 0;
2814  out:
2815         mutex_unlock(&event_mutex);
2816         return ret;
2817
2818  out_disable:
2819         __ftrace_event_enable_disable(file, 0, 1);
2820  out_put:
2821         module_put(file->event_call->mod);
2822  out_free:
2823         kfree(data);
2824         goto out;
2825 }
2826
2827 static struct ftrace_func_command event_enable_cmd = {
2828         .name                   = ENABLE_EVENT_STR,
2829         .func                   = event_enable_func,
2830 };
2831
2832 static struct ftrace_func_command event_disable_cmd = {
2833         .name                   = DISABLE_EVENT_STR,
2834         .func                   = event_enable_func,
2835 };
2836
2837 static __init int register_event_cmds(void)
2838 {
2839         int ret;
2840
2841         ret = register_ftrace_command(&event_enable_cmd);
2842         if (WARN_ON(ret < 0))
2843                 return ret;
2844         ret = register_ftrace_command(&event_disable_cmd);
2845         if (WARN_ON(ret < 0))
2846                 unregister_ftrace_command(&event_enable_cmd);
2847         return ret;
2848 }
2849 #else
2850 static inline int register_event_cmds(void) { return 0; }
2851 #endif /* CONFIG_DYNAMIC_FTRACE */
2852
2853 /*
2854  * The top level array has already had its trace_event_file
2855  * descriptors created in order to allow for early events to
2856  * be recorded. This function is called after the tracefs has been
2857  * initialized, and we now have to create the files associated
2858  * to the events.
2859  */
2860 static __init void
2861 __trace_early_add_event_dirs(struct trace_array *tr)
2862 {
2863         struct trace_event_file *file;
2864         int ret;
2865
2866
2867         list_for_each_entry(file, &tr->events, list) {
2868                 ret = event_create_dir(tr->event_dir, file);
2869                 if (ret < 0)
2870                         pr_warn("Could not create directory for event %s\n",
2871                                 trace_event_name(file->event_call));
2872         }
2873 }
2874
2875 /*
2876  * For early boot up, the top trace array requires to have
2877  * a list of events that can be enabled. This must be done before
2878  * the filesystem is set up in order to allow events to be traced
2879  * early.
2880  */
2881 static __init void
2882 __trace_early_add_events(struct trace_array *tr)
2883 {
2884         struct trace_event_call *call;
2885         int ret;
2886
2887         list_for_each_entry(call, &ftrace_events, list) {
2888                 /* Early boot up should not have any modules loaded */
2889                 if (WARN_ON_ONCE(call->mod))
2890                         continue;
2891
2892                 ret = __trace_early_add_new_event(call, tr);
2893                 if (ret < 0)
2894                         pr_warn("Could not create early event %s\n",
2895                                 trace_event_name(call));
2896         }
2897 }
2898
2899 /* Remove the event directory structure for a trace directory. */
2900 static void
2901 __trace_remove_event_dirs(struct trace_array *tr)
2902 {
2903         struct trace_event_file *file, *next;
2904
2905         list_for_each_entry_safe(file, next, &tr->events, list)
2906                 remove_event_file_dir(file);
2907 }
2908
2909 static void __add_event_to_tracers(struct trace_event_call *call)
2910 {
2911         struct trace_array *tr;
2912
2913         list_for_each_entry(tr, &ftrace_trace_arrays, list)
2914                 __trace_add_new_event(call, tr);
2915 }
2916
2917 extern struct trace_event_call *__start_ftrace_events[];
2918 extern struct trace_event_call *__stop_ftrace_events[];
2919
2920 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
2921
2922 static __init int setup_trace_event(char *str)
2923 {
2924         strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
2925         ring_buffer_expanded = true;
2926         tracing_selftest_disabled = true;
2927
2928         return 1;
2929 }
2930 __setup("trace_event=", setup_trace_event);
2931
2932 /* Expects to have event_mutex held when called */
2933 static int
2934 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
2935 {
2936         struct dentry *d_events;
2937         struct dentry *entry;
2938
2939         entry = tracefs_create_file("set_event", 0644, parent,
2940                                     tr, &ftrace_set_event_fops);
2941         if (!entry) {
2942                 pr_warn("Could not create tracefs 'set_event' entry\n");
2943                 return -ENOMEM;
2944         }
2945
2946         d_events = tracefs_create_dir("events", parent);
2947         if (!d_events) {
2948                 pr_warn("Could not create tracefs 'events' directory\n");
2949                 return -ENOMEM;
2950         }
2951
2952         entry = tracefs_create_file("set_event_pid", 0644, parent,
2953                                     tr, &ftrace_set_event_pid_fops);
2954
2955         /* ring buffer internal formats */
2956         trace_create_file("header_page", 0444, d_events,
2957                           ring_buffer_print_page_header,
2958                           &ftrace_show_header_fops);
2959
2960         trace_create_file("header_event", 0444, d_events,
2961                           ring_buffer_print_entry_header,
2962                           &ftrace_show_header_fops);
2963
2964         trace_create_file("enable", 0644, d_events,
2965                           tr, &ftrace_tr_enable_fops);
2966
2967         tr->event_dir = d_events;
2968
2969         return 0;
2970 }
2971
2972 /**
2973  * event_trace_add_tracer - add a instance of a trace_array to events
2974  * @parent: The parent dentry to place the files/directories for events in
2975  * @tr: The trace array associated with these events
2976  *
2977  * When a new instance is created, it needs to set up its events
2978  * directory, as well as other files associated with events. It also
2979  * creates the event hierachry in the @parent/events directory.
2980  *
2981  * Returns 0 on success.
2982  */
2983 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
2984 {
2985         int ret;
2986
2987         mutex_lock(&event_mutex);
2988
2989         ret = create_event_toplevel_files(parent, tr);
2990         if (ret)
2991                 goto out_unlock;
2992
2993         down_write(&trace_event_sem);
2994         __trace_add_event_dirs(tr);
2995         up_write(&trace_event_sem);
2996
2997  out_unlock:
2998         mutex_unlock(&event_mutex);
2999
3000         return ret;
3001 }
3002
3003 /*
3004  * The top trace array already had its file descriptors created.
3005  * Now the files themselves need to be created.
3006  */
3007 static __init int
3008 early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
3009 {
3010         int ret;
3011
3012         mutex_lock(&event_mutex);
3013
3014         ret = create_event_toplevel_files(parent, tr);
3015         if (ret)
3016                 goto out_unlock;
3017
3018         down_write(&trace_event_sem);
3019         __trace_early_add_event_dirs(tr);
3020         up_write(&trace_event_sem);
3021
3022  out_unlock:
3023         mutex_unlock(&event_mutex);
3024
3025         return ret;
3026 }
3027
3028 int event_trace_del_tracer(struct trace_array *tr)
3029 {
3030         mutex_lock(&event_mutex);
3031
3032         /* Disable any event triggers and associated soft-disabled events */
3033         clear_event_triggers(tr);
3034
3035         /* Clear the pid list */
3036         __ftrace_clear_event_pids(tr);
3037
3038         /* Disable any running events */
3039         __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
3040
3041         /* Access to events are within rcu_read_lock_sched() */
3042         synchronize_sched();
3043
3044         down_write(&trace_event_sem);
3045         __trace_remove_event_dirs(tr);
3046         tracefs_remove_recursive(tr->event_dir);
3047         up_write(&trace_event_sem);
3048
3049         tr->event_dir = NULL;
3050
3051         mutex_unlock(&event_mutex);
3052
3053         return 0;
3054 }
3055
3056 static __init int event_trace_memsetup(void)
3057 {
3058         field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
3059         file_cachep = KMEM_CACHE(trace_event_file, SLAB_PANIC);
3060         return 0;
3061 }
3062
3063 static __init void
3064 early_enable_events(struct trace_array *tr, bool disable_first)
3065 {
3066         char *buf = bootup_event_buf;
3067         char *token;
3068         int ret;
3069
3070         while (true) {
3071                 token = strsep(&buf, ",");
3072
3073                 if (!token)
3074                         break;
3075
3076                 if (*token) {
3077                         /* Restarting syscalls requires that we stop them first */
3078                         if (disable_first)
3079                                 ftrace_set_clr_event(tr, token, 0);
3080
3081                         ret = ftrace_set_clr_event(tr, token, 1);
3082                         if (ret)
3083                                 pr_warn("Failed to enable trace event: %s\n", token);
3084                 }
3085
3086                 /* Put back the comma to allow this to be called again */
3087                 if (buf)
3088                         *(buf - 1) = ',';
3089         }
3090 }
3091
3092 static __init int event_trace_enable(void)
3093 {
3094         struct trace_array *tr = top_trace_array();
3095         struct trace_event_call **iter, *call;
3096         int ret;
3097
3098         if (!tr)
3099                 return -ENODEV;
3100
3101         for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
3102
3103                 call = *iter;
3104                 ret = event_init(call);
3105                 if (!ret)
3106                         list_add(&call->list, &ftrace_events);
3107         }
3108
3109         /*
3110          * We need the top trace array to have a working set of trace
3111          * points at early init, before the debug files and directories
3112          * are created. Create the file entries now, and attach them
3113          * to the actual file dentries later.
3114          */
3115         __trace_early_add_events(tr);
3116
3117         early_enable_events(tr, false);
3118
3119         trace_printk_start_comm();
3120
3121         register_event_cmds();
3122
3123         register_trigger_cmds();
3124
3125         return 0;
3126 }
3127
3128 /*
3129  * event_trace_enable() is called from trace_event_init() first to
3130  * initialize events and perhaps start any events that are on the
3131  * command line. Unfortunately, there are some events that will not
3132  * start this early, like the system call tracepoints that need
3133  * to set the TIF_SYSCALL_TRACEPOINT flag of pid 1. But event_trace_enable()
3134  * is called before pid 1 starts, and this flag is never set, making
3135  * the syscall tracepoint never get reached, but the event is enabled
3136  * regardless (and not doing anything).
3137  */
3138 static __init int event_trace_enable_again(void)
3139 {
3140         struct trace_array *tr;
3141
3142         tr = top_trace_array();
3143         if (!tr)
3144                 return -ENODEV;
3145
3146         early_enable_events(tr, true);
3147
3148         return 0;
3149 }
3150
3151 early_initcall(event_trace_enable_again);
3152
3153 static __init int event_trace_init(void)
3154 {
3155         struct trace_array *tr;
3156         struct dentry *d_tracer;
3157         struct dentry *entry;
3158         int ret;
3159
3160         tr = top_trace_array();
3161         if (!tr)
3162                 return -ENODEV;
3163
3164         d_tracer = tracing_init_dentry();
3165         if (IS_ERR(d_tracer))
3166                 return 0;
3167
3168         entry = tracefs_create_file("available_events", 0444, d_tracer,
3169                                     tr, &ftrace_avail_fops);
3170         if (!entry)
3171                 pr_warn("Could not create tracefs 'available_events' entry\n");
3172
3173         if (trace_define_generic_fields())
3174                 pr_warn("tracing: Failed to allocated generic fields");
3175
3176         if (trace_define_common_fields())
3177                 pr_warn("tracing: Failed to allocate common fields");
3178
3179         ret = early_event_add_tracer(d_tracer, tr);
3180         if (ret)
3181                 return ret;
3182
3183 #ifdef CONFIG_MODULES
3184         ret = register_module_notifier(&trace_module_nb);
3185         if (ret)
3186                 pr_warn("Failed to register trace events module notifier\n");
3187 #endif
3188         return 0;
3189 }
3190
3191 void __init trace_event_init(void)
3192 {
3193         event_trace_memsetup();
3194         init_ftrace_syscalls();
3195         event_trace_enable();
3196 }
3197
3198 fs_initcall(event_trace_init);
3199
3200 #ifdef CONFIG_FTRACE_STARTUP_TEST
3201
3202 static DEFINE_SPINLOCK(test_spinlock);
3203 static DEFINE_SPINLOCK(test_spinlock_irq);
3204 static DEFINE_MUTEX(test_mutex);
3205
3206 static __init void test_work(struct work_struct *dummy)
3207 {
3208         spin_lock(&test_spinlock);
3209         spin_lock_irq(&test_spinlock_irq);
3210         udelay(1);
3211         spin_unlock_irq(&test_spinlock_irq);
3212         spin_unlock(&test_spinlock);
3213
3214         mutex_lock(&test_mutex);
3215         msleep(1);
3216         mutex_unlock(&test_mutex);
3217 }
3218
3219 static __init int event_test_thread(void *unused)
3220 {
3221         void *test_malloc;
3222
3223         test_malloc = kmalloc(1234, GFP_KERNEL);
3224         if (!test_malloc)
3225                 pr_info("failed to kmalloc\n");
3226
3227         schedule_on_each_cpu(test_work);
3228
3229         kfree(test_malloc);
3230
3231         set_current_state(TASK_INTERRUPTIBLE);
3232         while (!kthread_should_stop()) {
3233                 schedule();
3234                 set_current_state(TASK_INTERRUPTIBLE);
3235         }
3236         __set_current_state(TASK_RUNNING);
3237
3238         return 0;
3239 }
3240
3241 /*
3242  * Do various things that may trigger events.
3243  */
3244 static __init void event_test_stuff(void)
3245 {
3246         struct task_struct *test_thread;
3247
3248         test_thread = kthread_run(event_test_thread, NULL, "test-events");
3249         msleep(1);
3250         kthread_stop(test_thread);
3251 }
3252
3253 /*
3254  * For every trace event defined, we will test each trace point separately,
3255  * and then by groups, and finally all trace points.
3256  */
3257 static __init void event_trace_self_tests(void)
3258 {
3259         struct trace_subsystem_dir *dir;
3260         struct trace_event_file *file;
3261         struct trace_event_call *call;
3262         struct event_subsystem *system;
3263         struct trace_array *tr;
3264         int ret;
3265
3266         tr = top_trace_array();
3267         if (!tr)
3268                 return;
3269
3270         pr_info("Running tests on trace events:\n");
3271
3272         list_for_each_entry(file, &tr->events, list) {
3273
3274                 call = file->event_call;
3275
3276                 /* Only test those that have a probe */
3277                 if (!call->class || !call->class->probe)
3278                         continue;
3279
3280 /*
3281  * Testing syscall events here is pretty useless, but
3282  * we still do it if configured. But this is time consuming.
3283  * What we really need is a user thread to perform the
3284  * syscalls as we test.
3285  */
3286 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
3287                 if (call->class->system &&
3288                     strcmp(call->class->system, "syscalls") == 0)
3289                         continue;
3290 #endif
3291
3292                 pr_info("Testing event %s: ", trace_event_name(call));
3293
3294                 /*
3295                  * If an event is already enabled, someone is using
3296                  * it and the self test should not be on.
3297                  */
3298                 if (file->flags & EVENT_FILE_FL_ENABLED) {
3299                         pr_warn("Enabled event during self test!\n");
3300                         WARN_ON_ONCE(1);
3301                         continue;
3302                 }
3303
3304                 ftrace_event_enable_disable(file, 1);
3305                 event_test_stuff();
3306                 ftrace_event_enable_disable(file, 0);
3307
3308                 pr_cont("OK\n");
3309         }
3310
3311         /* Now test at the sub system level */
3312
3313         pr_info("Running tests on trace event systems:\n");
3314
3315         list_for_each_entry(dir, &tr->systems, list) {
3316
3317                 system = dir->subsystem;
3318
3319                 /* the ftrace system is special, skip it */
3320                 if (strcmp(system->name, "ftrace") == 0)
3321                         continue;
3322
3323                 pr_info("Testing event system %s: ", system->name);
3324
3325                 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
3326                 if (WARN_ON_ONCE(ret)) {
3327                         pr_warn("error enabling system %s\n",
3328                                 system->name);
3329                         continue;
3330                 }
3331
3332                 event_test_stuff();
3333
3334                 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
3335                 if (WARN_ON_ONCE(ret)) {
3336                         pr_warn("error disabling system %s\n",
3337                                 system->name);
3338                         continue;
3339                 }
3340
3341                 pr_cont("OK\n");
3342         }
3343
3344         /* Test with all events enabled */
3345
3346         pr_info("Running tests on all trace events:\n");
3347         pr_info("Testing all events: ");
3348
3349         ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
3350         if (WARN_ON_ONCE(ret)) {
3351                 pr_warn("error enabling all events\n");
3352                 return;
3353         }
3354
3355         event_test_stuff();
3356
3357         /* reset sysname */
3358         ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
3359         if (WARN_ON_ONCE(ret)) {
3360                 pr_warn("error disabling all events\n");
3361                 return;
3362         }
3363
3364         pr_cont("OK\n");
3365 }
3366
3367 #ifdef CONFIG_FUNCTION_TRACER
3368
3369 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
3370
3371 static struct trace_array *event_tr;
3372
3373 static void __init
3374 function_test_events_call(unsigned long ip, unsigned long parent_ip,
3375                           struct ftrace_ops *op, struct pt_regs *pt_regs)
3376 {
3377         struct ring_buffer_event *event;
3378         struct ring_buffer *buffer;
3379         struct ftrace_entry *entry;
3380         unsigned long flags;
3381         long disabled;
3382         int cpu;
3383         int pc;
3384
3385         pc = preempt_count();
3386         preempt_disable_notrace();
3387         cpu = raw_smp_processor_id();
3388         disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
3389
3390         if (disabled != 1)
3391                 goto out;
3392
3393         local_save_flags(flags);
3394
3395         event = trace_current_buffer_lock_reserve(&buffer,
3396                                                   TRACE_FN, sizeof(*entry),
3397                                                   flags, pc);
3398         if (!event)
3399                 goto out;
3400         entry   = ring_buffer_event_data(event);
3401         entry->ip                       = ip;
3402         entry->parent_ip                = parent_ip;
3403
3404         trace_buffer_unlock_commit(event_tr, buffer, event, flags, pc);
3405
3406  out:
3407         atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
3408         preempt_enable_notrace();
3409 }
3410
3411 static struct ftrace_ops trace_ops __initdata  =
3412 {
3413         .func = function_test_events_call,
3414         .flags = FTRACE_OPS_FL_RECURSION_SAFE,
3415 };
3416
3417 static __init void event_trace_self_test_with_function(void)
3418 {
3419         int ret;
3420         event_tr = top_trace_array();
3421         if (WARN_ON(!event_tr))
3422                 return;
3423         ret = register_ftrace_function(&trace_ops);
3424         if (WARN_ON(ret < 0)) {
3425                 pr_info("Failed to enable function tracer for event tests\n");
3426                 return;
3427         }
3428         pr_info("Running tests again, along with the function tracer\n");
3429         event_trace_self_tests();
3430         unregister_ftrace_function(&trace_ops);
3431 }
3432 #else
3433 static __init void event_trace_self_test_with_function(void)
3434 {
3435 }
3436 #endif
3437
3438 static __init int event_trace_self_tests_init(void)
3439 {
3440         if (!tracing_selftest_disabled) {
3441                 event_trace_self_tests();
3442                 event_trace_self_test_with_function();
3443         }
3444
3445         return 0;
3446 }
3447
3448 late_initcall(event_trace_self_tests_init);
3449
3450 #endif