Merge tag 'usb-serial-5.12-rc1' of https://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / kernel / trace / trace_kprobe.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Kprobes-based tracing events
4  *
5  * Created by Masami Hiramatsu <mhiramat@redhat.com>
6  *
7  */
8 #define pr_fmt(fmt)     "trace_kprobe: " fmt
9
10 #include <linux/security.h>
11 #include <linux/module.h>
12 #include <linux/uaccess.h>
13 #include <linux/rculist.h>
14 #include <linux/error-injection.h>
15
16 #include <asm/setup.h>  /* for COMMAND_LINE_SIZE */
17
18 #include "trace_dynevent.h"
19 #include "trace_kprobe_selftest.h"
20 #include "trace_probe.h"
21 #include "trace_probe_tmpl.h"
22
23 #define KPROBE_EVENT_SYSTEM "kprobes"
24 #define KRETPROBE_MAXACTIVE_MAX 4096
25
26 /* Kprobe early definition from command line */
27 static char kprobe_boot_events_buf[COMMAND_LINE_SIZE] __initdata;
28
29 static int __init set_kprobe_boot_events(char *str)
30 {
31         strlcpy(kprobe_boot_events_buf, str, COMMAND_LINE_SIZE);
32         disable_tracing_selftest("running kprobe events");
33
34         return 0;
35 }
36 __setup("kprobe_event=", set_kprobe_boot_events);
37
38 static int trace_kprobe_create(int argc, const char **argv);
39 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev);
40 static int trace_kprobe_release(struct dyn_event *ev);
41 static bool trace_kprobe_is_busy(struct dyn_event *ev);
42 static bool trace_kprobe_match(const char *system, const char *event,
43                         int argc, const char **argv, struct dyn_event *ev);
44
45 static struct dyn_event_operations trace_kprobe_ops = {
46         .create = trace_kprobe_create,
47         .show = trace_kprobe_show,
48         .is_busy = trace_kprobe_is_busy,
49         .free = trace_kprobe_release,
50         .match = trace_kprobe_match,
51 };
52
53 /*
54  * Kprobe event core functions
55  */
56 struct trace_kprobe {
57         struct dyn_event        devent;
58         struct kretprobe        rp;     /* Use rp.kp for kprobe use */
59         unsigned long __percpu *nhit;
60         const char              *symbol;        /* symbol name */
61         struct trace_probe      tp;
62 };
63
64 static bool is_trace_kprobe(struct dyn_event *ev)
65 {
66         return ev->ops == &trace_kprobe_ops;
67 }
68
69 static struct trace_kprobe *to_trace_kprobe(struct dyn_event *ev)
70 {
71         return container_of(ev, struct trace_kprobe, devent);
72 }
73
74 /**
75  * for_each_trace_kprobe - iterate over the trace_kprobe list
76  * @pos:        the struct trace_kprobe * for each entry
77  * @dpos:       the struct dyn_event * to use as a loop cursor
78  */
79 #define for_each_trace_kprobe(pos, dpos)        \
80         for_each_dyn_event(dpos)                \
81                 if (is_trace_kprobe(dpos) && (pos = to_trace_kprobe(dpos)))
82
83 #define SIZEOF_TRACE_KPROBE(n)                          \
84         (offsetof(struct trace_kprobe, tp.args) +       \
85         (sizeof(struct probe_arg) * (n)))
86
87 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
88 {
89         return tk->rp.handler != NULL;
90 }
91
92 static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
93 {
94         return tk->symbol ? tk->symbol : "unknown";
95 }
96
97 static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
98 {
99         return tk->rp.kp.offset;
100 }
101
102 static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
103 {
104         return !!(kprobe_gone(&tk->rp.kp));
105 }
106
107 static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
108                                                  struct module *mod)
109 {
110         int len = strlen(module_name(mod));
111         const char *name = trace_kprobe_symbol(tk);
112
113         return strncmp(module_name(mod), name, len) == 0 && name[len] == ':';
114 }
115
116 static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
117 {
118         char *p;
119         bool ret;
120
121         if (!tk->symbol)
122                 return false;
123         p = strchr(tk->symbol, ':');
124         if (!p)
125                 return true;
126         *p = '\0';
127         mutex_lock(&module_mutex);
128         ret = !!find_module(tk->symbol);
129         mutex_unlock(&module_mutex);
130         *p = ':';
131
132         return ret;
133 }
134
135 static bool trace_kprobe_is_busy(struct dyn_event *ev)
136 {
137         struct trace_kprobe *tk = to_trace_kprobe(ev);
138
139         return trace_probe_is_enabled(&tk->tp);
140 }
141
142 static bool trace_kprobe_match_command_head(struct trace_kprobe *tk,
143                                             int argc, const char **argv)
144 {
145         char buf[MAX_ARGSTR_LEN + 1];
146
147         if (!argc)
148                 return true;
149
150         if (!tk->symbol)
151                 snprintf(buf, sizeof(buf), "0x%p", tk->rp.kp.addr);
152         else if (tk->rp.kp.offset)
153                 snprintf(buf, sizeof(buf), "%s+%u",
154                          trace_kprobe_symbol(tk), tk->rp.kp.offset);
155         else
156                 snprintf(buf, sizeof(buf), "%s", trace_kprobe_symbol(tk));
157         if (strcmp(buf, argv[0]))
158                 return false;
159         argc--; argv++;
160
161         return trace_probe_match_command_args(&tk->tp, argc, argv);
162 }
163
164 static bool trace_kprobe_match(const char *system, const char *event,
165                         int argc, const char **argv, struct dyn_event *ev)
166 {
167         struct trace_kprobe *tk = to_trace_kprobe(ev);
168
169         return strcmp(trace_probe_name(&tk->tp), event) == 0 &&
170             (!system || strcmp(trace_probe_group_name(&tk->tp), system) == 0) &&
171             trace_kprobe_match_command_head(tk, argc, argv);
172 }
173
174 static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
175 {
176         unsigned long nhit = 0;
177         int cpu;
178
179         for_each_possible_cpu(cpu)
180                 nhit += *per_cpu_ptr(tk->nhit, cpu);
181
182         return nhit;
183 }
184
185 static nokprobe_inline bool trace_kprobe_is_registered(struct trace_kprobe *tk)
186 {
187         return !(list_empty(&tk->rp.kp.list) &&
188                  hlist_unhashed(&tk->rp.kp.hlist));
189 }
190
191 /* Return 0 if it fails to find the symbol address */
192 static nokprobe_inline
193 unsigned long trace_kprobe_address(struct trace_kprobe *tk)
194 {
195         unsigned long addr;
196
197         if (tk->symbol) {
198                 addr = (unsigned long)
199                         kallsyms_lookup_name(trace_kprobe_symbol(tk));
200                 if (addr)
201                         addr += tk->rp.kp.offset;
202         } else {
203                 addr = (unsigned long)tk->rp.kp.addr;
204         }
205         return addr;
206 }
207
208 static nokprobe_inline struct trace_kprobe *
209 trace_kprobe_primary_from_call(struct trace_event_call *call)
210 {
211         struct trace_probe *tp;
212
213         tp = trace_probe_primary_from_call(call);
214         if (WARN_ON_ONCE(!tp))
215                 return NULL;
216
217         return container_of(tp, struct trace_kprobe, tp);
218 }
219
220 bool trace_kprobe_on_func_entry(struct trace_event_call *call)
221 {
222         struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
223
224         return tk ? (kprobe_on_func_entry(tk->rp.kp.addr,
225                         tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
226                         tk->rp.kp.addr ? 0 : tk->rp.kp.offset) == 0) : false;
227 }
228
229 bool trace_kprobe_error_injectable(struct trace_event_call *call)
230 {
231         struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
232
233         return tk ? within_error_injection_list(trace_kprobe_address(tk)) :
234                false;
235 }
236
237 static int register_kprobe_event(struct trace_kprobe *tk);
238 static int unregister_kprobe_event(struct trace_kprobe *tk);
239
240 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
241 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
242                                 struct pt_regs *regs);
243
244 static void free_trace_kprobe(struct trace_kprobe *tk)
245 {
246         if (tk) {
247                 trace_probe_cleanup(&tk->tp);
248                 kfree(tk->symbol);
249                 free_percpu(tk->nhit);
250                 kfree(tk);
251         }
252 }
253
254 /*
255  * Allocate new trace_probe and initialize it (including kprobes).
256  */
257 static struct trace_kprobe *alloc_trace_kprobe(const char *group,
258                                              const char *event,
259                                              void *addr,
260                                              const char *symbol,
261                                              unsigned long offs,
262                                              int maxactive,
263                                              int nargs, bool is_return)
264 {
265         struct trace_kprobe *tk;
266         int ret = -ENOMEM;
267
268         tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
269         if (!tk)
270                 return ERR_PTR(ret);
271
272         tk->nhit = alloc_percpu(unsigned long);
273         if (!tk->nhit)
274                 goto error;
275
276         if (symbol) {
277                 tk->symbol = kstrdup(symbol, GFP_KERNEL);
278                 if (!tk->symbol)
279                         goto error;
280                 tk->rp.kp.symbol_name = tk->symbol;
281                 tk->rp.kp.offset = offs;
282         } else
283                 tk->rp.kp.addr = addr;
284
285         if (is_return)
286                 tk->rp.handler = kretprobe_dispatcher;
287         else
288                 tk->rp.kp.pre_handler = kprobe_dispatcher;
289
290         tk->rp.maxactive = maxactive;
291         INIT_HLIST_NODE(&tk->rp.kp.hlist);
292         INIT_LIST_HEAD(&tk->rp.kp.list);
293
294         ret = trace_probe_init(&tk->tp, event, group, false);
295         if (ret < 0)
296                 goto error;
297
298         dyn_event_init(&tk->devent, &trace_kprobe_ops);
299         return tk;
300 error:
301         free_trace_kprobe(tk);
302         return ERR_PTR(ret);
303 }
304
305 static struct trace_kprobe *find_trace_kprobe(const char *event,
306                                               const char *group)
307 {
308         struct dyn_event *pos;
309         struct trace_kprobe *tk;
310
311         for_each_trace_kprobe(tk, pos)
312                 if (strcmp(trace_probe_name(&tk->tp), event) == 0 &&
313                     strcmp(trace_probe_group_name(&tk->tp), group) == 0)
314                         return tk;
315         return NULL;
316 }
317
318 static inline int __enable_trace_kprobe(struct trace_kprobe *tk)
319 {
320         int ret = 0;
321
322         if (trace_kprobe_is_registered(tk) && !trace_kprobe_has_gone(tk)) {
323                 if (trace_kprobe_is_return(tk))
324                         ret = enable_kretprobe(&tk->rp);
325                 else
326                         ret = enable_kprobe(&tk->rp.kp);
327         }
328
329         return ret;
330 }
331
332 static void __disable_trace_kprobe(struct trace_probe *tp)
333 {
334         struct trace_probe *pos;
335         struct trace_kprobe *tk;
336
337         list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
338                 tk = container_of(pos, struct trace_kprobe, tp);
339                 if (!trace_kprobe_is_registered(tk))
340                         continue;
341                 if (trace_kprobe_is_return(tk))
342                         disable_kretprobe(&tk->rp);
343                 else
344                         disable_kprobe(&tk->rp.kp);
345         }
346 }
347
348 /*
349  * Enable trace_probe
350  * if the file is NULL, enable "perf" handler, or enable "trace" handler.
351  */
352 static int enable_trace_kprobe(struct trace_event_call *call,
353                                 struct trace_event_file *file)
354 {
355         struct trace_probe *pos, *tp;
356         struct trace_kprobe *tk;
357         bool enabled;
358         int ret = 0;
359
360         tp = trace_probe_primary_from_call(call);
361         if (WARN_ON_ONCE(!tp))
362                 return -ENODEV;
363         enabled = trace_probe_is_enabled(tp);
364
365         /* This also changes "enabled" state */
366         if (file) {
367                 ret = trace_probe_add_file(tp, file);
368                 if (ret)
369                         return ret;
370         } else
371                 trace_probe_set_flag(tp, TP_FLAG_PROFILE);
372
373         if (enabled)
374                 return 0;
375
376         list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
377                 tk = container_of(pos, struct trace_kprobe, tp);
378                 if (trace_kprobe_has_gone(tk))
379                         continue;
380                 ret = __enable_trace_kprobe(tk);
381                 if (ret)
382                         break;
383                 enabled = true;
384         }
385
386         if (ret) {
387                 /* Failed to enable one of them. Roll back all */
388                 if (enabled)
389                         __disable_trace_kprobe(tp);
390                 if (file)
391                         trace_probe_remove_file(tp, file);
392                 else
393                         trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
394         }
395
396         return ret;
397 }
398
399 /*
400  * Disable trace_probe
401  * if the file is NULL, disable "perf" handler, or disable "trace" handler.
402  */
403 static int disable_trace_kprobe(struct trace_event_call *call,
404                                 struct trace_event_file *file)
405 {
406         struct trace_probe *tp;
407
408         tp = trace_probe_primary_from_call(call);
409         if (WARN_ON_ONCE(!tp))
410                 return -ENODEV;
411
412         if (file) {
413                 if (!trace_probe_get_file_link(tp, file))
414                         return -ENOENT;
415                 if (!trace_probe_has_single_file(tp))
416                         goto out;
417                 trace_probe_clear_flag(tp, TP_FLAG_TRACE);
418         } else
419                 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
420
421         if (!trace_probe_is_enabled(tp))
422                 __disable_trace_kprobe(tp);
423
424  out:
425         if (file)
426                 /*
427                  * Synchronization is done in below function. For perf event,
428                  * file == NULL and perf_trace_event_unreg() calls
429                  * tracepoint_synchronize_unregister() to ensure synchronize
430                  * event. We don't need to care about it.
431                  */
432                 trace_probe_remove_file(tp, file);
433
434         return 0;
435 }
436
437 #if defined(CONFIG_DYNAMIC_FTRACE) && \
438         !defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
439 static bool __within_notrace_func(unsigned long addr)
440 {
441         unsigned long offset, size;
442
443         if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset))
444                 return false;
445
446         /* Get the entry address of the target function */
447         addr -= offset;
448
449         /*
450          * Since ftrace_location_range() does inclusive range check, we need
451          * to subtract 1 byte from the end address.
452          */
453         return !ftrace_location_range(addr, addr + size - 1);
454 }
455
456 static bool within_notrace_func(struct trace_kprobe *tk)
457 {
458         unsigned long addr = trace_kprobe_address(tk);
459         char symname[KSYM_NAME_LEN], *p;
460
461         if (!__within_notrace_func(addr))
462                 return false;
463
464         /* Check if the address is on a suffixed-symbol */
465         if (!lookup_symbol_name(addr, symname)) {
466                 p = strchr(symname, '.');
467                 if (!p)
468                         return true;
469                 *p = '\0';
470                 addr = (unsigned long)kprobe_lookup_name(symname, 0);
471                 if (addr)
472                         return __within_notrace_func(addr);
473         }
474
475         return true;
476 }
477 #else
478 #define within_notrace_func(tk) (false)
479 #endif
480
481 /* Internal register function - just handle k*probes and flags */
482 static int __register_trace_kprobe(struct trace_kprobe *tk)
483 {
484         int i, ret;
485
486         ret = security_locked_down(LOCKDOWN_KPROBES);
487         if (ret)
488                 return ret;
489
490         if (trace_kprobe_is_registered(tk))
491                 return -EINVAL;
492
493         if (within_notrace_func(tk)) {
494                 pr_warn("Could not probe notrace function %s\n",
495                         trace_kprobe_symbol(tk));
496                 return -EINVAL;
497         }
498
499         for (i = 0; i < tk->tp.nr_args; i++) {
500                 ret = traceprobe_update_arg(&tk->tp.args[i]);
501                 if (ret)
502                         return ret;
503         }
504
505         /* Set/clear disabled flag according to tp->flag */
506         if (trace_probe_is_enabled(&tk->tp))
507                 tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
508         else
509                 tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
510
511         if (trace_kprobe_is_return(tk))
512                 ret = register_kretprobe(&tk->rp);
513         else
514                 ret = register_kprobe(&tk->rp.kp);
515
516         return ret;
517 }
518
519 /* Internal unregister function - just handle k*probes and flags */
520 static void __unregister_trace_kprobe(struct trace_kprobe *tk)
521 {
522         if (trace_kprobe_is_registered(tk)) {
523                 if (trace_kprobe_is_return(tk))
524                         unregister_kretprobe(&tk->rp);
525                 else
526                         unregister_kprobe(&tk->rp.kp);
527                 /* Cleanup kprobe for reuse and mark it unregistered */
528                 INIT_HLIST_NODE(&tk->rp.kp.hlist);
529                 INIT_LIST_HEAD(&tk->rp.kp.list);
530                 if (tk->rp.kp.symbol_name)
531                         tk->rp.kp.addr = NULL;
532         }
533 }
534
535 /* Unregister a trace_probe and probe_event */
536 static int unregister_trace_kprobe(struct trace_kprobe *tk)
537 {
538         /* If other probes are on the event, just unregister kprobe */
539         if (trace_probe_has_sibling(&tk->tp))
540                 goto unreg;
541
542         /* Enabled event can not be unregistered */
543         if (trace_probe_is_enabled(&tk->tp))
544                 return -EBUSY;
545
546         /* Will fail if probe is being used by ftrace or perf */
547         if (unregister_kprobe_event(tk))
548                 return -EBUSY;
549
550 unreg:
551         __unregister_trace_kprobe(tk);
552         dyn_event_remove(&tk->devent);
553         trace_probe_unlink(&tk->tp);
554
555         return 0;
556 }
557
558 static bool trace_kprobe_has_same_kprobe(struct trace_kprobe *orig,
559                                          struct trace_kprobe *comp)
560 {
561         struct trace_probe_event *tpe = orig->tp.event;
562         struct trace_probe *pos;
563         int i;
564
565         list_for_each_entry(pos, &tpe->probes, list) {
566                 orig = container_of(pos, struct trace_kprobe, tp);
567                 if (strcmp(trace_kprobe_symbol(orig),
568                            trace_kprobe_symbol(comp)) ||
569                     trace_kprobe_offset(orig) != trace_kprobe_offset(comp))
570                         continue;
571
572                 /*
573                  * trace_probe_compare_arg_type() ensured that nr_args and
574                  * each argument name and type are same. Let's compare comm.
575                  */
576                 for (i = 0; i < orig->tp.nr_args; i++) {
577                         if (strcmp(orig->tp.args[i].comm,
578                                    comp->tp.args[i].comm))
579                                 break;
580                 }
581
582                 if (i == orig->tp.nr_args)
583                         return true;
584         }
585
586         return false;
587 }
588
589 static int append_trace_kprobe(struct trace_kprobe *tk, struct trace_kprobe *to)
590 {
591         int ret;
592
593         ret = trace_probe_compare_arg_type(&tk->tp, &to->tp);
594         if (ret) {
595                 /* Note that argument starts index = 2 */
596                 trace_probe_log_set_index(ret + 1);
597                 trace_probe_log_err(0, DIFF_ARG_TYPE);
598                 return -EEXIST;
599         }
600         if (trace_kprobe_has_same_kprobe(to, tk)) {
601                 trace_probe_log_set_index(0);
602                 trace_probe_log_err(0, SAME_PROBE);
603                 return -EEXIST;
604         }
605
606         /* Append to existing event */
607         ret = trace_probe_append(&tk->tp, &to->tp);
608         if (ret)
609                 return ret;
610
611         /* Register k*probe */
612         ret = __register_trace_kprobe(tk);
613         if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
614                 pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
615                 ret = 0;
616         }
617
618         if (ret)
619                 trace_probe_unlink(&tk->tp);
620         else
621                 dyn_event_add(&tk->devent);
622
623         return ret;
624 }
625
626 /* Register a trace_probe and probe_event */
627 static int register_trace_kprobe(struct trace_kprobe *tk)
628 {
629         struct trace_kprobe *old_tk;
630         int ret;
631
632         mutex_lock(&event_mutex);
633
634         old_tk = find_trace_kprobe(trace_probe_name(&tk->tp),
635                                    trace_probe_group_name(&tk->tp));
636         if (old_tk) {
637                 if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) {
638                         trace_probe_log_set_index(0);
639                         trace_probe_log_err(0, DIFF_PROBE_TYPE);
640                         ret = -EEXIST;
641                 } else {
642                         ret = append_trace_kprobe(tk, old_tk);
643                 }
644                 goto end;
645         }
646
647         /* Register new event */
648         ret = register_kprobe_event(tk);
649         if (ret) {
650                 pr_warn("Failed to register probe event(%d)\n", ret);
651                 goto end;
652         }
653
654         /* Register k*probe */
655         ret = __register_trace_kprobe(tk);
656         if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
657                 pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
658                 ret = 0;
659         }
660
661         if (ret < 0)
662                 unregister_kprobe_event(tk);
663         else
664                 dyn_event_add(&tk->devent);
665
666 end:
667         mutex_unlock(&event_mutex);
668         return ret;
669 }
670
671 /* Module notifier call back, checking event on the module */
672 static int trace_kprobe_module_callback(struct notifier_block *nb,
673                                        unsigned long val, void *data)
674 {
675         struct module *mod = data;
676         struct dyn_event *pos;
677         struct trace_kprobe *tk;
678         int ret;
679
680         if (val != MODULE_STATE_COMING)
681                 return NOTIFY_DONE;
682
683         /* Update probes on coming module */
684         mutex_lock(&event_mutex);
685         for_each_trace_kprobe(tk, pos) {
686                 if (trace_kprobe_within_module(tk, mod)) {
687                         /* Don't need to check busy - this should have gone. */
688                         __unregister_trace_kprobe(tk);
689                         ret = __register_trace_kprobe(tk);
690                         if (ret)
691                                 pr_warn("Failed to re-register probe %s on %s: %d\n",
692                                         trace_probe_name(&tk->tp),
693                                         module_name(mod), ret);
694                 }
695         }
696         mutex_unlock(&event_mutex);
697
698         return NOTIFY_DONE;
699 }
700
701 static struct notifier_block trace_kprobe_module_nb = {
702         .notifier_call = trace_kprobe_module_callback,
703         .priority = 1   /* Invoked after kprobe module callback */
704 };
705
706 /* Convert certain expected symbols into '_' when generating event names */
707 static inline void sanitize_event_name(char *name)
708 {
709         while (*name++ != '\0')
710                 if (*name == ':' || *name == '.')
711                         *name = '_';
712 }
713
714 static int trace_kprobe_create(int argc, const char *argv[])
715 {
716         /*
717          * Argument syntax:
718          *  - Add kprobe:
719          *      p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
720          *  - Add kretprobe:
721          *      r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
722          *    Or
723          *      p:[GRP/]EVENT] [MOD:]KSYM[+0]%return [FETCHARGS]
724          *
725          * Fetch args:
726          *  $retval     : fetch return value
727          *  $stack      : fetch stack address
728          *  $stackN     : fetch Nth of stack (N:0-)
729          *  $comm       : fetch current task comm
730          *  @ADDR       : fetch memory at ADDR (ADDR should be in kernel)
731          *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
732          *  %REG        : fetch register REG
733          * Dereferencing memory fetch:
734          *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
735          * Alias name of args:
736          *  NAME=FETCHARG : set NAME as alias of FETCHARG.
737          * Type of args:
738          *  FETCHARG:TYPE : use TYPE instead of unsigned long.
739          */
740         struct trace_kprobe *tk = NULL;
741         int i, len, ret = 0;
742         bool is_return = false;
743         char *symbol = NULL, *tmp = NULL;
744         const char *event = NULL, *group = KPROBE_EVENT_SYSTEM;
745         int maxactive = 0;
746         long offset = 0;
747         void *addr = NULL;
748         char buf[MAX_EVENT_NAME_LEN];
749         unsigned int flags = TPARG_FL_KERNEL;
750
751         switch (argv[0][0]) {
752         case 'r':
753                 is_return = true;
754                 break;
755         case 'p':
756                 break;
757         default:
758                 return -ECANCELED;
759         }
760         if (argc < 2)
761                 return -ECANCELED;
762
763         trace_probe_log_init("trace_kprobe", argc, argv);
764
765         event = strchr(&argv[0][1], ':');
766         if (event)
767                 event++;
768
769         if (isdigit(argv[0][1])) {
770                 if (!is_return) {
771                         trace_probe_log_err(1, MAXACT_NO_KPROBE);
772                         goto parse_error;
773                 }
774                 if (event)
775                         len = event - &argv[0][1] - 1;
776                 else
777                         len = strlen(&argv[0][1]);
778                 if (len > MAX_EVENT_NAME_LEN - 1) {
779                         trace_probe_log_err(1, BAD_MAXACT);
780                         goto parse_error;
781                 }
782                 memcpy(buf, &argv[0][1], len);
783                 buf[len] = '\0';
784                 ret = kstrtouint(buf, 0, &maxactive);
785                 if (ret || !maxactive) {
786                         trace_probe_log_err(1, BAD_MAXACT);
787                         goto parse_error;
788                 }
789                 /* kretprobes instances are iterated over via a list. The
790                  * maximum should stay reasonable.
791                  */
792                 if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
793                         trace_probe_log_err(1, MAXACT_TOO_BIG);
794                         goto parse_error;
795                 }
796         }
797
798         /* try to parse an address. if that fails, try to read the
799          * input as a symbol. */
800         if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
801                 trace_probe_log_set_index(1);
802                 /* Check whether uprobe event specified */
803                 if (strchr(argv[1], '/') && strchr(argv[1], ':')) {
804                         ret = -ECANCELED;
805                         goto error;
806                 }
807                 /* a symbol specified */
808                 symbol = kstrdup(argv[1], GFP_KERNEL);
809                 if (!symbol)
810                         return -ENOMEM;
811
812                 tmp = strchr(symbol, '%');
813                 if (tmp) {
814                         if (!strcmp(tmp, "%return")) {
815                                 *tmp = '\0';
816                                 is_return = true;
817                         } else {
818                                 trace_probe_log_err(tmp - symbol, BAD_ADDR_SUFFIX);
819                                 goto parse_error;
820                         }
821                 }
822
823                 /* TODO: support .init module functions */
824                 ret = traceprobe_split_symbol_offset(symbol, &offset);
825                 if (ret || offset < 0 || offset > UINT_MAX) {
826                         trace_probe_log_err(0, BAD_PROBE_ADDR);
827                         goto parse_error;
828                 }
829                 if (is_return)
830                         flags |= TPARG_FL_RETURN;
831                 ret = kprobe_on_func_entry(NULL, symbol, offset);
832                 if (ret == 0)
833                         flags |= TPARG_FL_FENTRY;
834                 /* Defer the ENOENT case until register kprobe */
835                 if (ret == -EINVAL && is_return) {
836                         trace_probe_log_err(0, BAD_RETPROBE);
837                         goto parse_error;
838                 }
839         }
840
841         trace_probe_log_set_index(0);
842         if (event) {
843                 ret = traceprobe_parse_event_name(&event, &group, buf,
844                                                   event - argv[0]);
845                 if (ret)
846                         goto parse_error;
847         } else {
848                 /* Make a new event name */
849                 if (symbol)
850                         snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
851                                  is_return ? 'r' : 'p', symbol, offset);
852                 else
853                         snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
854                                  is_return ? 'r' : 'p', addr);
855                 sanitize_event_name(buf);
856                 event = buf;
857         }
858
859         /* setup a probe */
860         tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
861                                argc - 2, is_return);
862         if (IS_ERR(tk)) {
863                 ret = PTR_ERR(tk);
864                 /* This must return -ENOMEM, else there is a bug */
865                 WARN_ON_ONCE(ret != -ENOMEM);
866                 goto out;       /* We know tk is not allocated */
867         }
868         argc -= 2; argv += 2;
869
870         /* parse arguments */
871         for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
872                 tmp = kstrdup(argv[i], GFP_KERNEL);
873                 if (!tmp) {
874                         ret = -ENOMEM;
875                         goto error;
876                 }
877
878                 trace_probe_log_set_index(i + 2);
879                 ret = traceprobe_parse_probe_arg(&tk->tp, i, tmp, flags);
880                 kfree(tmp);
881                 if (ret)
882                         goto error;     /* This can be -ENOMEM */
883         }
884
885         ret = traceprobe_set_print_fmt(&tk->tp, is_return);
886         if (ret < 0)
887                 goto error;
888
889         ret = register_trace_kprobe(tk);
890         if (ret) {
891                 trace_probe_log_set_index(1);
892                 if (ret == -EILSEQ)
893                         trace_probe_log_err(0, BAD_INSN_BNDRY);
894                 else if (ret == -ENOENT)
895                         trace_probe_log_err(0, BAD_PROBE_ADDR);
896                 else if (ret != -ENOMEM && ret != -EEXIST)
897                         trace_probe_log_err(0, FAIL_REG_PROBE);
898                 goto error;
899         }
900
901 out:
902         trace_probe_log_clear();
903         kfree(symbol);
904         return ret;
905
906 parse_error:
907         ret = -EINVAL;
908 error:
909         free_trace_kprobe(tk);
910         goto out;
911 }
912
913 static int create_or_delete_trace_kprobe(int argc, char **argv)
914 {
915         int ret;
916
917         if (argv[0][0] == '-')
918                 return dyn_event_release(argc, argv, &trace_kprobe_ops);
919
920         ret = trace_kprobe_create(argc, (const char **)argv);
921         return ret == -ECANCELED ? -EINVAL : ret;
922 }
923
924 static int trace_kprobe_run_command(struct dynevent_cmd *cmd)
925 {
926         return trace_run_command(cmd->seq.buffer, create_or_delete_trace_kprobe);
927 }
928
929 /**
930  * kprobe_event_cmd_init - Initialize a kprobe event command object
931  * @cmd: A pointer to the dynevent_cmd struct representing the new event
932  * @buf: A pointer to the buffer used to build the command
933  * @maxlen: The length of the buffer passed in @buf
934  *
935  * Initialize a synthetic event command object.  Use this before
936  * calling any of the other kprobe_event functions.
937  */
938 void kprobe_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
939 {
940         dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_KPROBE,
941                           trace_kprobe_run_command);
942 }
943 EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
944
945 /**
946  * __kprobe_event_gen_cmd_start - Generate a kprobe event command from arg list
947  * @cmd: A pointer to the dynevent_cmd struct representing the new event
948  * @name: The name of the kprobe event
949  * @loc: The location of the kprobe event
950  * @kretprobe: Is this a return probe?
951  * @args: Variable number of arg (pairs), one pair for each field
952  *
953  * NOTE: Users normally won't want to call this function directly, but
954  * rather use the kprobe_event_gen_cmd_start() wrapper, which automatically
955  * adds a NULL to the end of the arg list.  If this function is used
956  * directly, make sure the last arg in the variable arg list is NULL.
957  *
958  * Generate a kprobe event command to be executed by
959  * kprobe_event_gen_cmd_end().  This function can be used to generate the
960  * complete command or only the first part of it; in the latter case,
961  * kprobe_event_add_fields() can be used to add more fields following this.
962  *
963  * Unlikely the synth_event_gen_cmd_start(), @loc must be specified. This
964  * returns -EINVAL if @loc == NULL.
965  *
966  * Return: 0 if successful, error otherwise.
967  */
968 int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe,
969                                  const char *name, const char *loc, ...)
970 {
971         char buf[MAX_EVENT_NAME_LEN];
972         struct dynevent_arg arg;
973         va_list args;
974         int ret;
975
976         if (cmd->type != DYNEVENT_TYPE_KPROBE)
977                 return -EINVAL;
978
979         if (!loc)
980                 return -EINVAL;
981
982         if (kretprobe)
983                 snprintf(buf, MAX_EVENT_NAME_LEN, "r:kprobes/%s", name);
984         else
985                 snprintf(buf, MAX_EVENT_NAME_LEN, "p:kprobes/%s", name);
986
987         ret = dynevent_str_add(cmd, buf);
988         if (ret)
989                 return ret;
990
991         dynevent_arg_init(&arg, 0);
992         arg.str = loc;
993         ret = dynevent_arg_add(cmd, &arg, NULL);
994         if (ret)
995                 return ret;
996
997         va_start(args, loc);
998         for (;;) {
999                 const char *field;
1000
1001                 field = va_arg(args, const char *);
1002                 if (!field)
1003                         break;
1004
1005                 if (++cmd->n_fields > MAX_TRACE_ARGS) {
1006                         ret = -EINVAL;
1007                         break;
1008                 }
1009
1010                 arg.str = field;
1011                 ret = dynevent_arg_add(cmd, &arg, NULL);
1012                 if (ret)
1013                         break;
1014         }
1015         va_end(args);
1016
1017         return ret;
1018 }
1019 EXPORT_SYMBOL_GPL(__kprobe_event_gen_cmd_start);
1020
1021 /**
1022  * __kprobe_event_add_fields - Add probe fields to a kprobe command from arg list
1023  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1024  * @args: Variable number of arg (pairs), one pair for each field
1025  *
1026  * NOTE: Users normally won't want to call this function directly, but
1027  * rather use the kprobe_event_add_fields() wrapper, which
1028  * automatically adds a NULL to the end of the arg list.  If this
1029  * function is used directly, make sure the last arg in the variable
1030  * arg list is NULL.
1031  *
1032  * Add probe fields to an existing kprobe command using a variable
1033  * list of args.  Fields are added in the same order they're listed.
1034  *
1035  * Return: 0 if successful, error otherwise.
1036  */
1037 int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...)
1038 {
1039         struct dynevent_arg arg;
1040         va_list args;
1041         int ret = 0;
1042
1043         if (cmd->type != DYNEVENT_TYPE_KPROBE)
1044                 return -EINVAL;
1045
1046         dynevent_arg_init(&arg, 0);
1047
1048         va_start(args, cmd);
1049         for (;;) {
1050                 const char *field;
1051
1052                 field = va_arg(args, const char *);
1053                 if (!field)
1054                         break;
1055
1056                 if (++cmd->n_fields > MAX_TRACE_ARGS) {
1057                         ret = -EINVAL;
1058                         break;
1059                 }
1060
1061                 arg.str = field;
1062                 ret = dynevent_arg_add(cmd, &arg, NULL);
1063                 if (ret)
1064                         break;
1065         }
1066         va_end(args);
1067
1068         return ret;
1069 }
1070 EXPORT_SYMBOL_GPL(__kprobe_event_add_fields);
1071
1072 /**
1073  * kprobe_event_delete - Delete a kprobe event
1074  * @name: The name of the kprobe event to delete
1075  *
1076  * Delete a kprobe event with the give @name from kernel code rather
1077  * than directly from the command line.
1078  *
1079  * Return: 0 if successful, error otherwise.
1080  */
1081 int kprobe_event_delete(const char *name)
1082 {
1083         char buf[MAX_EVENT_NAME_LEN];
1084
1085         snprintf(buf, MAX_EVENT_NAME_LEN, "-:%s", name);
1086
1087         return trace_run_command(buf, create_or_delete_trace_kprobe);
1088 }
1089 EXPORT_SYMBOL_GPL(kprobe_event_delete);
1090
1091 static int trace_kprobe_release(struct dyn_event *ev)
1092 {
1093         struct trace_kprobe *tk = to_trace_kprobe(ev);
1094         int ret = unregister_trace_kprobe(tk);
1095
1096         if (!ret)
1097                 free_trace_kprobe(tk);
1098         return ret;
1099 }
1100
1101 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev)
1102 {
1103         struct trace_kprobe *tk = to_trace_kprobe(ev);
1104         int i;
1105
1106         seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
1107         if (trace_kprobe_is_return(tk) && tk->rp.maxactive)
1108                 seq_printf(m, "%d", tk->rp.maxactive);
1109         seq_printf(m, ":%s/%s", trace_probe_group_name(&tk->tp),
1110                                 trace_probe_name(&tk->tp));
1111
1112         if (!tk->symbol)
1113                 seq_printf(m, " 0x%p", tk->rp.kp.addr);
1114         else if (tk->rp.kp.offset)
1115                 seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
1116                            tk->rp.kp.offset);
1117         else
1118                 seq_printf(m, " %s", trace_kprobe_symbol(tk));
1119
1120         for (i = 0; i < tk->tp.nr_args; i++)
1121                 seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
1122         seq_putc(m, '\n');
1123
1124         return 0;
1125 }
1126
1127 static int probes_seq_show(struct seq_file *m, void *v)
1128 {
1129         struct dyn_event *ev = v;
1130
1131         if (!is_trace_kprobe(ev))
1132                 return 0;
1133
1134         return trace_kprobe_show(m, ev);
1135 }
1136
1137 static const struct seq_operations probes_seq_op = {
1138         .start  = dyn_event_seq_start,
1139         .next   = dyn_event_seq_next,
1140         .stop   = dyn_event_seq_stop,
1141         .show   = probes_seq_show
1142 };
1143
1144 static int probes_open(struct inode *inode, struct file *file)
1145 {
1146         int ret;
1147
1148         ret = security_locked_down(LOCKDOWN_TRACEFS);
1149         if (ret)
1150                 return ret;
1151
1152         if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
1153                 ret = dyn_events_release_all(&trace_kprobe_ops);
1154                 if (ret < 0)
1155                         return ret;
1156         }
1157
1158         return seq_open(file, &probes_seq_op);
1159 }
1160
1161 static ssize_t probes_write(struct file *file, const char __user *buffer,
1162                             size_t count, loff_t *ppos)
1163 {
1164         return trace_parse_run_command(file, buffer, count, ppos,
1165                                        create_or_delete_trace_kprobe);
1166 }
1167
1168 static const struct file_operations kprobe_events_ops = {
1169         .owner          = THIS_MODULE,
1170         .open           = probes_open,
1171         .read           = seq_read,
1172         .llseek         = seq_lseek,
1173         .release        = seq_release,
1174         .write          = probes_write,
1175 };
1176
1177 /* Probes profiling interfaces */
1178 static int probes_profile_seq_show(struct seq_file *m, void *v)
1179 {
1180         struct dyn_event *ev = v;
1181         struct trace_kprobe *tk;
1182
1183         if (!is_trace_kprobe(ev))
1184                 return 0;
1185
1186         tk = to_trace_kprobe(ev);
1187         seq_printf(m, "  %-44s %15lu %15lu\n",
1188                    trace_probe_name(&tk->tp),
1189                    trace_kprobe_nhit(tk),
1190                    tk->rp.kp.nmissed);
1191
1192         return 0;
1193 }
1194
1195 static const struct seq_operations profile_seq_op = {
1196         .start  = dyn_event_seq_start,
1197         .next   = dyn_event_seq_next,
1198         .stop   = dyn_event_seq_stop,
1199         .show   = probes_profile_seq_show
1200 };
1201
1202 static int profile_open(struct inode *inode, struct file *file)
1203 {
1204         int ret;
1205
1206         ret = security_locked_down(LOCKDOWN_TRACEFS);
1207         if (ret)
1208                 return ret;
1209
1210         return seq_open(file, &profile_seq_op);
1211 }
1212
1213 static const struct file_operations kprobe_profile_ops = {
1214         .owner          = THIS_MODULE,
1215         .open           = profile_open,
1216         .read           = seq_read,
1217         .llseek         = seq_lseek,
1218         .release        = seq_release,
1219 };
1220
1221 /* Kprobe specific fetch functions */
1222
1223 /* Return the length of string -- including null terminal byte */
1224 static nokprobe_inline int
1225 fetch_store_strlen_user(unsigned long addr)
1226 {
1227         const void __user *uaddr =  (__force const void __user *)addr;
1228
1229         return strnlen_user_nofault(uaddr, MAX_STRING_SIZE);
1230 }
1231
1232 /* Return the length of string -- including null terminal byte */
1233 static nokprobe_inline int
1234 fetch_store_strlen(unsigned long addr)
1235 {
1236         int ret, len = 0;
1237         u8 c;
1238
1239 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1240         if (addr < TASK_SIZE)
1241                 return fetch_store_strlen_user(addr);
1242 #endif
1243
1244         do {
1245                 ret = copy_from_kernel_nofault(&c, (u8 *)addr + len, 1);
1246                 len++;
1247         } while (c && ret == 0 && len < MAX_STRING_SIZE);
1248
1249         return (ret < 0) ? ret : len;
1250 }
1251
1252 /*
1253  * Fetch a null-terminated string from user. Caller MUST set *(u32 *)buf
1254  * with max length and relative data location.
1255  */
1256 static nokprobe_inline int
1257 fetch_store_string_user(unsigned long addr, void *dest, void *base)
1258 {
1259         const void __user *uaddr =  (__force const void __user *)addr;
1260         int maxlen = get_loc_len(*(u32 *)dest);
1261         void *__dest;
1262         long ret;
1263
1264         if (unlikely(!maxlen))
1265                 return -ENOMEM;
1266
1267         __dest = get_loc_data(dest, base);
1268
1269         ret = strncpy_from_user_nofault(__dest, uaddr, maxlen);
1270         if (ret >= 0)
1271                 *(u32 *)dest = make_data_loc(ret, __dest - base);
1272
1273         return ret;
1274 }
1275
1276 /*
1277  * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max
1278  * length and relative data location.
1279  */
1280 static nokprobe_inline int
1281 fetch_store_string(unsigned long addr, void *dest, void *base)
1282 {
1283         int maxlen = get_loc_len(*(u32 *)dest);
1284         void *__dest;
1285         long ret;
1286
1287 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1288         if ((unsigned long)addr < TASK_SIZE)
1289                 return fetch_store_string_user(addr, dest, base);
1290 #endif
1291
1292         if (unlikely(!maxlen))
1293                 return -ENOMEM;
1294
1295         __dest = get_loc_data(dest, base);
1296
1297         /*
1298          * Try to get string again, since the string can be changed while
1299          * probing.
1300          */
1301         ret = strncpy_from_kernel_nofault(__dest, (void *)addr, maxlen);
1302         if (ret >= 0)
1303                 *(u32 *)dest = make_data_loc(ret, __dest - base);
1304
1305         return ret;
1306 }
1307
1308 static nokprobe_inline int
1309 probe_mem_read_user(void *dest, void *src, size_t size)
1310 {
1311         const void __user *uaddr =  (__force const void __user *)src;
1312
1313         return copy_from_user_nofault(dest, uaddr, size);
1314 }
1315
1316 static nokprobe_inline int
1317 probe_mem_read(void *dest, void *src, size_t size)
1318 {
1319 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1320         if ((unsigned long)src < TASK_SIZE)
1321                 return probe_mem_read_user(dest, src, size);
1322 #endif
1323         return copy_from_kernel_nofault(dest, src, size);
1324 }
1325
1326 /* Note that we don't verify it, since the code does not come from user space */
1327 static int
1328 process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
1329                    void *base)
1330 {
1331         unsigned long val;
1332
1333 retry:
1334         /* 1st stage: get value from context */
1335         switch (code->op) {
1336         case FETCH_OP_REG:
1337                 val = regs_get_register(regs, code->param);
1338                 break;
1339         case FETCH_OP_STACK:
1340                 val = regs_get_kernel_stack_nth(regs, code->param);
1341                 break;
1342         case FETCH_OP_STACKP:
1343                 val = kernel_stack_pointer(regs);
1344                 break;
1345         case FETCH_OP_RETVAL:
1346                 val = regs_return_value(regs);
1347                 break;
1348         case FETCH_OP_IMM:
1349                 val = code->immediate;
1350                 break;
1351         case FETCH_OP_COMM:
1352                 val = (unsigned long)current->comm;
1353                 break;
1354         case FETCH_OP_DATA:
1355                 val = (unsigned long)code->data;
1356                 break;
1357 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
1358         case FETCH_OP_ARG:
1359                 val = regs_get_kernel_argument(regs, code->param);
1360                 break;
1361 #endif
1362         case FETCH_NOP_SYMBOL:  /* Ignore a place holder */
1363                 code++;
1364                 goto retry;
1365         default:
1366                 return -EILSEQ;
1367         }
1368         code++;
1369
1370         return process_fetch_insn_bottom(code, val, dest, base);
1371 }
1372 NOKPROBE_SYMBOL(process_fetch_insn)
1373
1374 /* Kprobe handler */
1375 static nokprobe_inline void
1376 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
1377                     struct trace_event_file *trace_file)
1378 {
1379         struct kprobe_trace_entry_head *entry;
1380         struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1381         struct trace_event_buffer fbuffer;
1382         int dsize;
1383
1384         WARN_ON(call != trace_file->event_call);
1385
1386         if (trace_trigger_soft_disabled(trace_file))
1387                 return;
1388
1389         local_save_flags(fbuffer.flags);
1390         fbuffer.pc = preempt_count();
1391         fbuffer.trace_file = trace_file;
1392
1393         dsize = __get_data_size(&tk->tp, regs);
1394
1395         fbuffer.event =
1396                 trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file,
1397                                         call->event.type,
1398                                         sizeof(*entry) + tk->tp.size + dsize,
1399                                         fbuffer.flags, fbuffer.pc);
1400         if (!fbuffer.event)
1401                 return;
1402
1403         fbuffer.regs = regs;
1404         entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
1405         entry->ip = (unsigned long)tk->rp.kp.addr;
1406         store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1407
1408         trace_event_buffer_commit(&fbuffer);
1409 }
1410
1411 static void
1412 kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
1413 {
1414         struct event_file_link *link;
1415
1416         trace_probe_for_each_link_rcu(link, &tk->tp)
1417                 __kprobe_trace_func(tk, regs, link->file);
1418 }
1419 NOKPROBE_SYMBOL(kprobe_trace_func);
1420
1421 /* Kretprobe handler */
1422 static nokprobe_inline void
1423 __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1424                        struct pt_regs *regs,
1425                        struct trace_event_file *trace_file)
1426 {
1427         struct kretprobe_trace_entry_head *entry;
1428         struct trace_event_buffer fbuffer;
1429         struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1430         int dsize;
1431
1432         WARN_ON(call != trace_file->event_call);
1433
1434         if (trace_trigger_soft_disabled(trace_file))
1435                 return;
1436
1437         local_save_flags(fbuffer.flags);
1438         fbuffer.pc = preempt_count();
1439         fbuffer.trace_file = trace_file;
1440
1441         dsize = __get_data_size(&tk->tp, regs);
1442         fbuffer.event =
1443                 trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file,
1444                                         call->event.type,
1445                                         sizeof(*entry) + tk->tp.size + dsize,
1446                                         fbuffer.flags, fbuffer.pc);
1447         if (!fbuffer.event)
1448                 return;
1449
1450         fbuffer.regs = regs;
1451         entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
1452         entry->func = (unsigned long)tk->rp.kp.addr;
1453         entry->ret_ip = (unsigned long)ri->ret_addr;
1454         store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1455
1456         trace_event_buffer_commit(&fbuffer);
1457 }
1458
1459 static void
1460 kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1461                      struct pt_regs *regs)
1462 {
1463         struct event_file_link *link;
1464
1465         trace_probe_for_each_link_rcu(link, &tk->tp)
1466                 __kretprobe_trace_func(tk, ri, regs, link->file);
1467 }
1468 NOKPROBE_SYMBOL(kretprobe_trace_func);
1469
1470 /* Event entry printers */
1471 static enum print_line_t
1472 print_kprobe_event(struct trace_iterator *iter, int flags,
1473                    struct trace_event *event)
1474 {
1475         struct kprobe_trace_entry_head *field;
1476         struct trace_seq *s = &iter->seq;
1477         struct trace_probe *tp;
1478
1479         field = (struct kprobe_trace_entry_head *)iter->ent;
1480         tp = trace_probe_primary_from_call(
1481                 container_of(event, struct trace_event_call, event));
1482         if (WARN_ON_ONCE(!tp))
1483                 goto out;
1484
1485         trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1486
1487         if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1488                 goto out;
1489
1490         trace_seq_putc(s, ')');
1491
1492         if (print_probe_args(s, tp->args, tp->nr_args,
1493                              (u8 *)&field[1], field) < 0)
1494                 goto out;
1495
1496         trace_seq_putc(s, '\n');
1497  out:
1498         return trace_handle_return(s);
1499 }
1500
1501 static enum print_line_t
1502 print_kretprobe_event(struct trace_iterator *iter, int flags,
1503                       struct trace_event *event)
1504 {
1505         struct kretprobe_trace_entry_head *field;
1506         struct trace_seq *s = &iter->seq;
1507         struct trace_probe *tp;
1508
1509         field = (struct kretprobe_trace_entry_head *)iter->ent;
1510         tp = trace_probe_primary_from_call(
1511                 container_of(event, struct trace_event_call, event));
1512         if (WARN_ON_ONCE(!tp))
1513                 goto out;
1514
1515         trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1516
1517         if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1518                 goto out;
1519
1520         trace_seq_puts(s, " <- ");
1521
1522         if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1523                 goto out;
1524
1525         trace_seq_putc(s, ')');
1526
1527         if (print_probe_args(s, tp->args, tp->nr_args,
1528                              (u8 *)&field[1], field) < 0)
1529                 goto out;
1530
1531         trace_seq_putc(s, '\n');
1532
1533  out:
1534         return trace_handle_return(s);
1535 }
1536
1537
1538 static int kprobe_event_define_fields(struct trace_event_call *event_call)
1539 {
1540         int ret;
1541         struct kprobe_trace_entry_head field;
1542         struct trace_probe *tp;
1543
1544         tp = trace_probe_primary_from_call(event_call);
1545         if (WARN_ON_ONCE(!tp))
1546                 return -ENOENT;
1547
1548         DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1549
1550         return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
1551 }
1552
1553 static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1554 {
1555         int ret;
1556         struct kretprobe_trace_entry_head field;
1557         struct trace_probe *tp;
1558
1559         tp = trace_probe_primary_from_call(event_call);
1560         if (WARN_ON_ONCE(!tp))
1561                 return -ENOENT;
1562
1563         DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1564         DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1565
1566         return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
1567 }
1568
1569 #ifdef CONFIG_PERF_EVENTS
1570
1571 /* Kprobe profile handler */
1572 static int
1573 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1574 {
1575         struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1576         struct kprobe_trace_entry_head *entry;
1577         struct hlist_head *head;
1578         int size, __size, dsize;
1579         int rctx;
1580
1581         if (bpf_prog_array_valid(call)) {
1582                 unsigned long orig_ip = instruction_pointer(regs);
1583                 int ret;
1584
1585                 ret = trace_call_bpf(call, regs);
1586
1587                 /*
1588                  * We need to check and see if we modified the pc of the
1589                  * pt_regs, and if so return 1 so that we don't do the
1590                  * single stepping.
1591                  */
1592                 if (orig_ip != instruction_pointer(regs))
1593                         return 1;
1594                 if (!ret)
1595                         return 0;
1596         }
1597
1598         head = this_cpu_ptr(call->perf_events);
1599         if (hlist_empty(head))
1600                 return 0;
1601
1602         dsize = __get_data_size(&tk->tp, regs);
1603         __size = sizeof(*entry) + tk->tp.size + dsize;
1604         size = ALIGN(__size + sizeof(u32), sizeof(u64));
1605         size -= sizeof(u32);
1606
1607         entry = perf_trace_buf_alloc(size, NULL, &rctx);
1608         if (!entry)
1609                 return 0;
1610
1611         entry->ip = (unsigned long)tk->rp.kp.addr;
1612         memset(&entry[1], 0, dsize);
1613         store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1614         perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1615                               head, NULL);
1616         return 0;
1617 }
1618 NOKPROBE_SYMBOL(kprobe_perf_func);
1619
1620 /* Kretprobe profile handler */
1621 static void
1622 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1623                     struct pt_regs *regs)
1624 {
1625         struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1626         struct kretprobe_trace_entry_head *entry;
1627         struct hlist_head *head;
1628         int size, __size, dsize;
1629         int rctx;
1630
1631         if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1632                 return;
1633
1634         head = this_cpu_ptr(call->perf_events);
1635         if (hlist_empty(head))
1636                 return;
1637
1638         dsize = __get_data_size(&tk->tp, regs);
1639         __size = sizeof(*entry) + tk->tp.size + dsize;
1640         size = ALIGN(__size + sizeof(u32), sizeof(u64));
1641         size -= sizeof(u32);
1642
1643         entry = perf_trace_buf_alloc(size, NULL, &rctx);
1644         if (!entry)
1645                 return;
1646
1647         entry->func = (unsigned long)tk->rp.kp.addr;
1648         entry->ret_ip = (unsigned long)ri->ret_addr;
1649         store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1650         perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1651                               head, NULL);
1652 }
1653 NOKPROBE_SYMBOL(kretprobe_perf_func);
1654
1655 int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
1656                         const char **symbol, u64 *probe_offset,
1657                         u64 *probe_addr, bool perf_type_tracepoint)
1658 {
1659         const char *pevent = trace_event_name(event->tp_event);
1660         const char *group = event->tp_event->class->system;
1661         struct trace_kprobe *tk;
1662
1663         if (perf_type_tracepoint)
1664                 tk = find_trace_kprobe(pevent, group);
1665         else
1666                 tk = trace_kprobe_primary_from_call(event->tp_event);
1667         if (!tk)
1668                 return -EINVAL;
1669
1670         *fd_type = trace_kprobe_is_return(tk) ? BPF_FD_TYPE_KRETPROBE
1671                                               : BPF_FD_TYPE_KPROBE;
1672         if (tk->symbol) {
1673                 *symbol = tk->symbol;
1674                 *probe_offset = tk->rp.kp.offset;
1675                 *probe_addr = 0;
1676         } else {
1677                 *symbol = NULL;
1678                 *probe_offset = 0;
1679                 *probe_addr = (unsigned long)tk->rp.kp.addr;
1680         }
1681         return 0;
1682 }
1683 #endif  /* CONFIG_PERF_EVENTS */
1684
1685 /*
1686  * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1687  *
1688  * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1689  * lockless, but we can't race with this __init function.
1690  */
1691 static int kprobe_register(struct trace_event_call *event,
1692                            enum trace_reg type, void *data)
1693 {
1694         struct trace_event_file *file = data;
1695
1696         switch (type) {
1697         case TRACE_REG_REGISTER:
1698                 return enable_trace_kprobe(event, file);
1699         case TRACE_REG_UNREGISTER:
1700                 return disable_trace_kprobe(event, file);
1701
1702 #ifdef CONFIG_PERF_EVENTS
1703         case TRACE_REG_PERF_REGISTER:
1704                 return enable_trace_kprobe(event, NULL);
1705         case TRACE_REG_PERF_UNREGISTER:
1706                 return disable_trace_kprobe(event, NULL);
1707         case TRACE_REG_PERF_OPEN:
1708         case TRACE_REG_PERF_CLOSE:
1709         case TRACE_REG_PERF_ADD:
1710         case TRACE_REG_PERF_DEL:
1711                 return 0;
1712 #endif
1713         }
1714         return 0;
1715 }
1716
1717 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1718 {
1719         struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1720         int ret = 0;
1721
1722         raw_cpu_inc(*tk->nhit);
1723
1724         if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1725                 kprobe_trace_func(tk, regs);
1726 #ifdef CONFIG_PERF_EVENTS
1727         if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1728                 ret = kprobe_perf_func(tk, regs);
1729 #endif
1730         return ret;
1731 }
1732 NOKPROBE_SYMBOL(kprobe_dispatcher);
1733
1734 static int
1735 kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1736 {
1737         struct kretprobe *rp = get_kretprobe(ri);
1738         struct trace_kprobe *tk = container_of(rp, struct trace_kprobe, rp);
1739
1740         raw_cpu_inc(*tk->nhit);
1741
1742         if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1743                 kretprobe_trace_func(tk, ri, regs);
1744 #ifdef CONFIG_PERF_EVENTS
1745         if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1746                 kretprobe_perf_func(tk, ri, regs);
1747 #endif
1748         return 0;       /* We don't tweek kernel, so just return 0 */
1749 }
1750 NOKPROBE_SYMBOL(kretprobe_dispatcher);
1751
1752 static struct trace_event_functions kretprobe_funcs = {
1753         .trace          = print_kretprobe_event
1754 };
1755
1756 static struct trace_event_functions kprobe_funcs = {
1757         .trace          = print_kprobe_event
1758 };
1759
1760 static struct trace_event_fields kretprobe_fields_array[] = {
1761         { .type = TRACE_FUNCTION_TYPE,
1762           .define_fields = kretprobe_event_define_fields },
1763         {}
1764 };
1765
1766 static struct trace_event_fields kprobe_fields_array[] = {
1767         { .type = TRACE_FUNCTION_TYPE,
1768           .define_fields = kprobe_event_define_fields },
1769         {}
1770 };
1771
1772 static inline void init_trace_event_call(struct trace_kprobe *tk)
1773 {
1774         struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1775
1776         if (trace_kprobe_is_return(tk)) {
1777                 call->event.funcs = &kretprobe_funcs;
1778                 call->class->fields_array = kretprobe_fields_array;
1779         } else {
1780                 call->event.funcs = &kprobe_funcs;
1781                 call->class->fields_array = kprobe_fields_array;
1782         }
1783
1784         call->flags = TRACE_EVENT_FL_KPROBE;
1785         call->class->reg = kprobe_register;
1786 }
1787
1788 static int register_kprobe_event(struct trace_kprobe *tk)
1789 {
1790         init_trace_event_call(tk);
1791
1792         return trace_probe_register_event_call(&tk->tp);
1793 }
1794
1795 static int unregister_kprobe_event(struct trace_kprobe *tk)
1796 {
1797         return trace_probe_unregister_event_call(&tk->tp);
1798 }
1799
1800 #ifdef CONFIG_PERF_EVENTS
1801 /* create a trace_kprobe, but don't add it to global lists */
1802 struct trace_event_call *
1803 create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
1804                           bool is_return)
1805 {
1806         struct trace_kprobe *tk;
1807         int ret;
1808         char *event;
1809
1810         /*
1811          * local trace_kprobes are not added to dyn_event, so they are never
1812          * searched in find_trace_kprobe(). Therefore, there is no concern of
1813          * duplicated name here.
1814          */
1815         event = func ? func : "DUMMY_EVENT";
1816
1817         tk = alloc_trace_kprobe(KPROBE_EVENT_SYSTEM, event, (void *)addr, func,
1818                                 offs, 0 /* maxactive */, 0 /* nargs */,
1819                                 is_return);
1820
1821         if (IS_ERR(tk)) {
1822                 pr_info("Failed to allocate trace_probe.(%d)\n",
1823                         (int)PTR_ERR(tk));
1824                 return ERR_CAST(tk);
1825         }
1826
1827         init_trace_event_call(tk);
1828
1829         if (traceprobe_set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) {
1830                 ret = -ENOMEM;
1831                 goto error;
1832         }
1833
1834         ret = __register_trace_kprobe(tk);
1835         if (ret < 0)
1836                 goto error;
1837
1838         return trace_probe_event_call(&tk->tp);
1839 error:
1840         free_trace_kprobe(tk);
1841         return ERR_PTR(ret);
1842 }
1843
1844 void destroy_local_trace_kprobe(struct trace_event_call *event_call)
1845 {
1846         struct trace_kprobe *tk;
1847
1848         tk = trace_kprobe_primary_from_call(event_call);
1849         if (unlikely(!tk))
1850                 return;
1851
1852         if (trace_probe_is_enabled(&tk->tp)) {
1853                 WARN_ON(1);
1854                 return;
1855         }
1856
1857         __unregister_trace_kprobe(tk);
1858
1859         free_trace_kprobe(tk);
1860 }
1861 #endif /* CONFIG_PERF_EVENTS */
1862
1863 static __init void enable_boot_kprobe_events(void)
1864 {
1865         struct trace_array *tr = top_trace_array();
1866         struct trace_event_file *file;
1867         struct trace_kprobe *tk;
1868         struct dyn_event *pos;
1869
1870         mutex_lock(&event_mutex);
1871         for_each_trace_kprobe(tk, pos) {
1872                 list_for_each_entry(file, &tr->events, list)
1873                         if (file->event_call == trace_probe_event_call(&tk->tp))
1874                                 trace_event_enable_disable(file, 1, 0);
1875         }
1876         mutex_unlock(&event_mutex);
1877 }
1878
1879 static __init void setup_boot_kprobe_events(void)
1880 {
1881         char *p, *cmd = kprobe_boot_events_buf;
1882         int ret;
1883
1884         strreplace(kprobe_boot_events_buf, ',', ' ');
1885
1886         while (cmd && *cmd != '\0') {
1887                 p = strchr(cmd, ';');
1888                 if (p)
1889                         *p++ = '\0';
1890
1891                 ret = trace_run_command(cmd, create_or_delete_trace_kprobe);
1892                 if (ret)
1893                         pr_warn("Failed to add event(%d): %s\n", ret, cmd);
1894
1895                 cmd = p;
1896         }
1897
1898         enable_boot_kprobe_events();
1899 }
1900
1901 /*
1902  * Register dynevent at core_initcall. This allows kernel to setup kprobe
1903  * events in postcore_initcall without tracefs.
1904  */
1905 static __init int init_kprobe_trace_early(void)
1906 {
1907         int ret;
1908
1909         ret = dyn_event_register(&trace_kprobe_ops);
1910         if (ret)
1911                 return ret;
1912
1913         if (register_module_notifier(&trace_kprobe_module_nb))
1914                 return -EINVAL;
1915
1916         return 0;
1917 }
1918 core_initcall(init_kprobe_trace_early);
1919
1920 /* Make a tracefs interface for controlling probe points */
1921 static __init int init_kprobe_trace(void)
1922 {
1923         int ret;
1924         struct dentry *entry;
1925
1926         ret = tracing_init_dentry();
1927         if (ret)
1928                 return 0;
1929
1930         entry = tracefs_create_file("kprobe_events", 0644, NULL,
1931                                     NULL, &kprobe_events_ops);
1932
1933         /* Event list interface */
1934         if (!entry)
1935                 pr_warn("Could not create tracefs 'kprobe_events' entry\n");
1936
1937         /* Profile interface */
1938         entry = tracefs_create_file("kprobe_profile", 0444, NULL,
1939                                     NULL, &kprobe_profile_ops);
1940
1941         if (!entry)
1942                 pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
1943
1944         setup_boot_kprobe_events();
1945
1946         return 0;
1947 }
1948 fs_initcall(init_kprobe_trace);
1949
1950
1951 #ifdef CONFIG_FTRACE_STARTUP_TEST
1952 static __init struct trace_event_file *
1953 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1954 {
1955         struct trace_event_file *file;
1956
1957         list_for_each_entry(file, &tr->events, list)
1958                 if (file->event_call == trace_probe_event_call(&tk->tp))
1959                         return file;
1960
1961         return NULL;
1962 }
1963
1964 /*
1965  * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1966  * stage, we can do this lockless.
1967  */
1968 static __init int kprobe_trace_self_tests_init(void)
1969 {
1970         int ret, warn = 0;
1971         int (*target)(int, int, int, int, int, int);
1972         struct trace_kprobe *tk;
1973         struct trace_event_file *file;
1974
1975         if (tracing_is_disabled())
1976                 return -ENODEV;
1977
1978         if (tracing_selftest_disabled)
1979                 return 0;
1980
1981         target = kprobe_trace_selftest_target;
1982
1983         pr_info("Testing kprobe tracing: ");
1984
1985         ret = trace_run_command("p:testprobe kprobe_trace_selftest_target $stack $stack0 +0($stack)",
1986                                 create_or_delete_trace_kprobe);
1987         if (WARN_ON_ONCE(ret)) {
1988                 pr_warn("error on probing function entry.\n");
1989                 warn++;
1990         } else {
1991                 /* Enable trace point */
1992                 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1993                 if (WARN_ON_ONCE(tk == NULL)) {
1994                         pr_warn("error on getting new probe.\n");
1995                         warn++;
1996                 } else {
1997                         file = find_trace_probe_file(tk, top_trace_array());
1998                         if (WARN_ON_ONCE(file == NULL)) {
1999                                 pr_warn("error on getting probe file.\n");
2000                                 warn++;
2001                         } else
2002                                 enable_trace_kprobe(
2003                                         trace_probe_event_call(&tk->tp), file);
2004                 }
2005         }
2006
2007         ret = trace_run_command("r:testprobe2 kprobe_trace_selftest_target $retval",
2008                                 create_or_delete_trace_kprobe);
2009         if (WARN_ON_ONCE(ret)) {
2010                 pr_warn("error on probing function return.\n");
2011                 warn++;
2012         } else {
2013                 /* Enable trace point */
2014                 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
2015                 if (WARN_ON_ONCE(tk == NULL)) {
2016                         pr_warn("error on getting 2nd new probe.\n");
2017                         warn++;
2018                 } else {
2019                         file = find_trace_probe_file(tk, top_trace_array());
2020                         if (WARN_ON_ONCE(file == NULL)) {
2021                                 pr_warn("error on getting probe file.\n");
2022                                 warn++;
2023                         } else
2024                                 enable_trace_kprobe(
2025                                         trace_probe_event_call(&tk->tp), file);
2026                 }
2027         }
2028
2029         if (warn)
2030                 goto end;
2031
2032         ret = target(1, 2, 3, 4, 5, 6);
2033
2034         /*
2035          * Not expecting an error here, the check is only to prevent the
2036          * optimizer from removing the call to target() as otherwise there
2037          * are no side-effects and the call is never performed.
2038          */
2039         if (ret != 21)
2040                 warn++;
2041
2042         /* Disable trace points before removing it */
2043         tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
2044         if (WARN_ON_ONCE(tk == NULL)) {
2045                 pr_warn("error on getting test probe.\n");
2046                 warn++;
2047         } else {
2048                 if (trace_kprobe_nhit(tk) != 1) {
2049                         pr_warn("incorrect number of testprobe hits\n");
2050                         warn++;
2051                 }
2052
2053                 file = find_trace_probe_file(tk, top_trace_array());
2054                 if (WARN_ON_ONCE(file == NULL)) {
2055                         pr_warn("error on getting probe file.\n");
2056                         warn++;
2057                 } else
2058                         disable_trace_kprobe(
2059                                 trace_probe_event_call(&tk->tp), file);
2060         }
2061
2062         tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
2063         if (WARN_ON_ONCE(tk == NULL)) {
2064                 pr_warn("error on getting 2nd test probe.\n");
2065                 warn++;
2066         } else {
2067                 if (trace_kprobe_nhit(tk) != 1) {
2068                         pr_warn("incorrect number of testprobe2 hits\n");
2069                         warn++;
2070                 }
2071
2072                 file = find_trace_probe_file(tk, top_trace_array());
2073                 if (WARN_ON_ONCE(file == NULL)) {
2074                         pr_warn("error on getting probe file.\n");
2075                         warn++;
2076                 } else
2077                         disable_trace_kprobe(
2078                                 trace_probe_event_call(&tk->tp), file);
2079         }
2080
2081         ret = trace_run_command("-:testprobe", create_or_delete_trace_kprobe);
2082         if (WARN_ON_ONCE(ret)) {
2083                 pr_warn("error on deleting a probe.\n");
2084                 warn++;
2085         }
2086
2087         ret = trace_run_command("-:testprobe2", create_or_delete_trace_kprobe);
2088         if (WARN_ON_ONCE(ret)) {
2089                 pr_warn("error on deleting a probe.\n");
2090                 warn++;
2091         }
2092
2093 end:
2094         ret = dyn_events_release_all(&trace_kprobe_ops);
2095         if (WARN_ON_ONCE(ret)) {
2096                 pr_warn("error on cleaning up probes.\n");
2097                 warn++;
2098         }
2099         /*
2100          * Wait for the optimizer work to finish. Otherwise it might fiddle
2101          * with probes in already freed __init text.
2102          */
2103         wait_for_kprobe_optimizer();
2104         if (warn)
2105                 pr_cont("NG: Some tests are failed. Please check them.\n");
2106         else
2107                 pr_cont("OK\n");
2108         return 0;
2109 }
2110
2111 late_initcall(kprobe_trace_self_tests_init);
2112
2113 #endif