Merge branch 'for-4.21' of git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu
[sfrench/cifs-2.6.git] / kernel / trace / trace_kprobe.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Kprobes-based tracing events
4  *
5  * Created by Masami Hiramatsu <mhiramat@redhat.com>
6  *
7  */
8 #define pr_fmt(fmt)     "trace_kprobe: " fmt
9
10 #include <linux/module.h>
11 #include <linux/uaccess.h>
12 #include <linux/rculist.h>
13 #include <linux/error-injection.h>
14
15 #include "trace_kprobe_selftest.h"
16 #include "trace_probe.h"
17 #include "trace_probe_tmpl.h"
18
19 #define KPROBE_EVENT_SYSTEM "kprobes"
20 #define KRETPROBE_MAXACTIVE_MAX 4096
21
22 /**
23  * Kprobe event core functions
24  */
25 struct trace_kprobe {
26         struct list_head        list;
27         struct kretprobe        rp;     /* Use rp.kp for kprobe use */
28         unsigned long __percpu *nhit;
29         const char              *symbol;        /* symbol name */
30         struct trace_probe      tp;
31 };
32
33 #define SIZEOF_TRACE_KPROBE(n)                          \
34         (offsetof(struct trace_kprobe, tp.args) +       \
35         (sizeof(struct probe_arg) * (n)))
36
37 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
38 {
39         return tk->rp.handler != NULL;
40 }
41
42 static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
43 {
44         return tk->symbol ? tk->symbol : "unknown";
45 }
46
47 static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
48 {
49         return tk->rp.kp.offset;
50 }
51
52 static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
53 {
54         return !!(kprobe_gone(&tk->rp.kp));
55 }
56
57 static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
58                                                  struct module *mod)
59 {
60         int len = strlen(mod->name);
61         const char *name = trace_kprobe_symbol(tk);
62         return strncmp(mod->name, name, len) == 0 && name[len] == ':';
63 }
64
65 static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
66 {
67         char *p;
68         bool ret;
69
70         if (!tk->symbol)
71                 return false;
72         p = strchr(tk->symbol, ':');
73         if (!p)
74                 return true;
75         *p = '\0';
76         mutex_lock(&module_mutex);
77         ret = !!find_module(tk->symbol);
78         mutex_unlock(&module_mutex);
79         *p = ':';
80
81         return ret;
82 }
83
84 static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
85 {
86         unsigned long nhit = 0;
87         int cpu;
88
89         for_each_possible_cpu(cpu)
90                 nhit += *per_cpu_ptr(tk->nhit, cpu);
91
92         return nhit;
93 }
94
95 /* Return 0 if it fails to find the symbol address */
96 static nokprobe_inline
97 unsigned long trace_kprobe_address(struct trace_kprobe *tk)
98 {
99         unsigned long addr;
100
101         if (tk->symbol) {
102                 addr = (unsigned long)
103                         kallsyms_lookup_name(trace_kprobe_symbol(tk));
104                 if (addr)
105                         addr += tk->rp.kp.offset;
106         } else {
107                 addr = (unsigned long)tk->rp.kp.addr;
108         }
109         return addr;
110 }
111
112 bool trace_kprobe_on_func_entry(struct trace_event_call *call)
113 {
114         struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
115
116         return kprobe_on_func_entry(tk->rp.kp.addr,
117                         tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
118                         tk->rp.kp.addr ? 0 : tk->rp.kp.offset);
119 }
120
121 bool trace_kprobe_error_injectable(struct trace_event_call *call)
122 {
123         struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
124
125         return within_error_injection_list(trace_kprobe_address(tk));
126 }
127
128 static int register_kprobe_event(struct trace_kprobe *tk);
129 static int unregister_kprobe_event(struct trace_kprobe *tk);
130
131 static DEFINE_MUTEX(probe_lock);
132 static LIST_HEAD(probe_list);
133
134 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
135 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
136                                 struct pt_regs *regs);
137
138 /*
139  * Allocate new trace_probe and initialize it (including kprobes).
140  */
141 static struct trace_kprobe *alloc_trace_kprobe(const char *group,
142                                              const char *event,
143                                              void *addr,
144                                              const char *symbol,
145                                              unsigned long offs,
146                                              int maxactive,
147                                              int nargs, bool is_return)
148 {
149         struct trace_kprobe *tk;
150         int ret = -ENOMEM;
151
152         tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
153         if (!tk)
154                 return ERR_PTR(ret);
155
156         tk->nhit = alloc_percpu(unsigned long);
157         if (!tk->nhit)
158                 goto error;
159
160         if (symbol) {
161                 tk->symbol = kstrdup(symbol, GFP_KERNEL);
162                 if (!tk->symbol)
163                         goto error;
164                 tk->rp.kp.symbol_name = tk->symbol;
165                 tk->rp.kp.offset = offs;
166         } else
167                 tk->rp.kp.addr = addr;
168
169         if (is_return)
170                 tk->rp.handler = kretprobe_dispatcher;
171         else
172                 tk->rp.kp.pre_handler = kprobe_dispatcher;
173
174         tk->rp.maxactive = maxactive;
175
176         if (!event || !is_good_name(event)) {
177                 ret = -EINVAL;
178                 goto error;
179         }
180
181         tk->tp.call.class = &tk->tp.class;
182         tk->tp.call.name = kstrdup(event, GFP_KERNEL);
183         if (!tk->tp.call.name)
184                 goto error;
185
186         if (!group || !is_good_name(group)) {
187                 ret = -EINVAL;
188                 goto error;
189         }
190
191         tk->tp.class.system = kstrdup(group, GFP_KERNEL);
192         if (!tk->tp.class.system)
193                 goto error;
194
195         INIT_LIST_HEAD(&tk->list);
196         INIT_LIST_HEAD(&tk->tp.files);
197         return tk;
198 error:
199         kfree(tk->tp.call.name);
200         kfree(tk->symbol);
201         free_percpu(tk->nhit);
202         kfree(tk);
203         return ERR_PTR(ret);
204 }
205
206 static void free_trace_kprobe(struct trace_kprobe *tk)
207 {
208         int i;
209
210         for (i = 0; i < tk->tp.nr_args; i++)
211                 traceprobe_free_probe_arg(&tk->tp.args[i]);
212
213         kfree(tk->tp.call.class->system);
214         kfree(tk->tp.call.name);
215         kfree(tk->symbol);
216         free_percpu(tk->nhit);
217         kfree(tk);
218 }
219
220 static struct trace_kprobe *find_trace_kprobe(const char *event,
221                                               const char *group)
222 {
223         struct trace_kprobe *tk;
224
225         list_for_each_entry(tk, &probe_list, list)
226                 if (strcmp(trace_event_name(&tk->tp.call), event) == 0 &&
227                     strcmp(tk->tp.call.class->system, group) == 0)
228                         return tk;
229         return NULL;
230 }
231
232 static inline int __enable_trace_kprobe(struct trace_kprobe *tk)
233 {
234         int ret = 0;
235
236         if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) {
237                 if (trace_kprobe_is_return(tk))
238                         ret = enable_kretprobe(&tk->rp);
239                 else
240                         ret = enable_kprobe(&tk->rp.kp);
241         }
242
243         return ret;
244 }
245
246 /*
247  * Enable trace_probe
248  * if the file is NULL, enable "perf" handler, or enable "trace" handler.
249  */
250 static int
251 enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
252 {
253         struct event_file_link *link;
254         int ret = 0;
255
256         if (file) {
257                 link = kmalloc(sizeof(*link), GFP_KERNEL);
258                 if (!link) {
259                         ret = -ENOMEM;
260                         goto out;
261                 }
262
263                 link->file = file;
264                 list_add_tail_rcu(&link->list, &tk->tp.files);
265
266                 tk->tp.flags |= TP_FLAG_TRACE;
267                 ret = __enable_trace_kprobe(tk);
268                 if (ret) {
269                         list_del_rcu(&link->list);
270                         kfree(link);
271                         tk->tp.flags &= ~TP_FLAG_TRACE;
272                 }
273
274         } else {
275                 tk->tp.flags |= TP_FLAG_PROFILE;
276                 ret = __enable_trace_kprobe(tk);
277                 if (ret)
278                         tk->tp.flags &= ~TP_FLAG_PROFILE;
279         }
280  out:
281         return ret;
282 }
283
284 /*
285  * Disable trace_probe
286  * if the file is NULL, disable "perf" handler, or disable "trace" handler.
287  */
288 static int
289 disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
290 {
291         struct event_file_link *link = NULL;
292         int wait = 0;
293         int ret = 0;
294
295         if (file) {
296                 link = find_event_file_link(&tk->tp, file);
297                 if (!link) {
298                         ret = -EINVAL;
299                         goto out;
300                 }
301
302                 list_del_rcu(&link->list);
303                 wait = 1;
304                 if (!list_empty(&tk->tp.files))
305                         goto out;
306
307                 tk->tp.flags &= ~TP_FLAG_TRACE;
308         } else
309                 tk->tp.flags &= ~TP_FLAG_PROFILE;
310
311         if (!trace_probe_is_enabled(&tk->tp) && trace_probe_is_registered(&tk->tp)) {
312                 if (trace_kprobe_is_return(tk))
313                         disable_kretprobe(&tk->rp);
314                 else
315                         disable_kprobe(&tk->rp.kp);
316                 wait = 1;
317         }
318
319         /*
320          * if tk is not added to any list, it must be a local trace_kprobe
321          * created with perf_event_open. We don't need to wait for these
322          * trace_kprobes
323          */
324         if (list_empty(&tk->list))
325                 wait = 0;
326  out:
327         if (wait) {
328                 /*
329                  * Synchronize with kprobe_trace_func/kretprobe_trace_func
330                  * to ensure disabled (all running handlers are finished).
331                  * This is not only for kfree(), but also the caller,
332                  * trace_remove_event_call() supposes it for releasing
333                  * event_call related objects, which will be accessed in
334                  * the kprobe_trace_func/kretprobe_trace_func.
335                  */
336                 synchronize_rcu();
337                 kfree(link);    /* Ignored if link == NULL */
338         }
339
340         return ret;
341 }
342
343 #if defined(CONFIG_KPROBES_ON_FTRACE) && \
344         !defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
345 static bool within_notrace_func(struct trace_kprobe *tk)
346 {
347         unsigned long offset, size, addr;
348
349         addr = trace_kprobe_address(tk);
350         if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset))
351                 return false;
352
353         /* Get the entry address of the target function */
354         addr -= offset;
355
356         /*
357          * Since ftrace_location_range() does inclusive range check, we need
358          * to subtract 1 byte from the end address.
359          */
360         return !ftrace_location_range(addr, addr + size - 1);
361 }
362 #else
363 #define within_notrace_func(tk) (false)
364 #endif
365
366 /* Internal register function - just handle k*probes and flags */
367 static int __register_trace_kprobe(struct trace_kprobe *tk)
368 {
369         int i, ret;
370
371         if (trace_probe_is_registered(&tk->tp))
372                 return -EINVAL;
373
374         if (within_notrace_func(tk)) {
375                 pr_warn("Could not probe notrace function %s\n",
376                         trace_kprobe_symbol(tk));
377                 return -EINVAL;
378         }
379
380         for (i = 0; i < tk->tp.nr_args; i++) {
381                 ret = traceprobe_update_arg(&tk->tp.args[i]);
382                 if (ret)
383                         return ret;
384         }
385
386         /* Set/clear disabled flag according to tp->flag */
387         if (trace_probe_is_enabled(&tk->tp))
388                 tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
389         else
390                 tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
391
392         if (trace_kprobe_is_return(tk))
393                 ret = register_kretprobe(&tk->rp);
394         else
395                 ret = register_kprobe(&tk->rp.kp);
396
397         if (ret == 0) {
398                 tk->tp.flags |= TP_FLAG_REGISTERED;
399         } else if (ret == -EILSEQ) {
400                 pr_warn("Probing address(0x%p) is not an instruction boundary.\n",
401                         tk->rp.kp.addr);
402                 ret = -EINVAL;
403         }
404         return ret;
405 }
406
407 /* Internal unregister function - just handle k*probes and flags */
408 static void __unregister_trace_kprobe(struct trace_kprobe *tk)
409 {
410         if (trace_probe_is_registered(&tk->tp)) {
411                 if (trace_kprobe_is_return(tk))
412                         unregister_kretprobe(&tk->rp);
413                 else
414                         unregister_kprobe(&tk->rp.kp);
415                 tk->tp.flags &= ~TP_FLAG_REGISTERED;
416                 /* Cleanup kprobe for reuse */
417                 if (tk->rp.kp.symbol_name)
418                         tk->rp.kp.addr = NULL;
419         }
420 }
421
422 /* Unregister a trace_probe and probe_event: call with locking probe_lock */
423 static int unregister_trace_kprobe(struct trace_kprobe *tk)
424 {
425         /* Enabled event can not be unregistered */
426         if (trace_probe_is_enabled(&tk->tp))
427                 return -EBUSY;
428
429         /* Will fail if probe is being used by ftrace or perf */
430         if (unregister_kprobe_event(tk))
431                 return -EBUSY;
432
433         __unregister_trace_kprobe(tk);
434         list_del(&tk->list);
435
436         return 0;
437 }
438
439 /* Register a trace_probe and probe_event */
440 static int register_trace_kprobe(struct trace_kprobe *tk)
441 {
442         struct trace_kprobe *old_tk;
443         int ret;
444
445         mutex_lock(&probe_lock);
446
447         /* Delete old (same name) event if exist */
448         old_tk = find_trace_kprobe(trace_event_name(&tk->tp.call),
449                         tk->tp.call.class->system);
450         if (old_tk) {
451                 ret = unregister_trace_kprobe(old_tk);
452                 if (ret < 0)
453                         goto end;
454                 free_trace_kprobe(old_tk);
455         }
456
457         /* Register new event */
458         ret = register_kprobe_event(tk);
459         if (ret) {
460                 pr_warn("Failed to register probe event(%d)\n", ret);
461                 goto end;
462         }
463
464         /* Register k*probe */
465         ret = __register_trace_kprobe(tk);
466         if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
467                 pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
468                 ret = 0;
469         }
470
471         if (ret < 0)
472                 unregister_kprobe_event(tk);
473         else
474                 list_add_tail(&tk->list, &probe_list);
475
476 end:
477         mutex_unlock(&probe_lock);
478         return ret;
479 }
480
481 /* Module notifier call back, checking event on the module */
482 static int trace_kprobe_module_callback(struct notifier_block *nb,
483                                        unsigned long val, void *data)
484 {
485         struct module *mod = data;
486         struct trace_kprobe *tk;
487         int ret;
488
489         if (val != MODULE_STATE_COMING)
490                 return NOTIFY_DONE;
491
492         /* Update probes on coming module */
493         mutex_lock(&probe_lock);
494         list_for_each_entry(tk, &probe_list, list) {
495                 if (trace_kprobe_within_module(tk, mod)) {
496                         /* Don't need to check busy - this should have gone. */
497                         __unregister_trace_kprobe(tk);
498                         ret = __register_trace_kprobe(tk);
499                         if (ret)
500                                 pr_warn("Failed to re-register probe %s on %s: %d\n",
501                                         trace_event_name(&tk->tp.call),
502                                         mod->name, ret);
503                 }
504         }
505         mutex_unlock(&probe_lock);
506
507         return NOTIFY_DONE;
508 }
509
510 static struct notifier_block trace_kprobe_module_nb = {
511         .notifier_call = trace_kprobe_module_callback,
512         .priority = 1   /* Invoked after kprobe module callback */
513 };
514
515 /* Convert certain expected symbols into '_' when generating event names */
516 static inline void sanitize_event_name(char *name)
517 {
518         while (*name++ != '\0')
519                 if (*name == ':' || *name == '.')
520                         *name = '_';
521 }
522
523 static int create_trace_kprobe(int argc, char **argv)
524 {
525         /*
526          * Argument syntax:
527          *  - Add kprobe:
528          *      p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
529          *  - Add kretprobe:
530          *      r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
531          * Fetch args:
532          *  $retval     : fetch return value
533          *  $stack      : fetch stack address
534          *  $stackN     : fetch Nth of stack (N:0-)
535          *  $comm       : fetch current task comm
536          *  @ADDR       : fetch memory at ADDR (ADDR should be in kernel)
537          *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
538          *  %REG        : fetch register REG
539          * Dereferencing memory fetch:
540          *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
541          * Alias name of args:
542          *  NAME=FETCHARG : set NAME as alias of FETCHARG.
543          * Type of args:
544          *  FETCHARG:TYPE : use TYPE instead of unsigned long.
545          */
546         struct trace_kprobe *tk;
547         int i, ret = 0;
548         bool is_return = false, is_delete = false;
549         char *symbol = NULL, *event = NULL, *group = NULL;
550         int maxactive = 0;
551         char *arg;
552         long offset = 0;
553         void *addr = NULL;
554         char buf[MAX_EVENT_NAME_LEN];
555         unsigned int flags = TPARG_FL_KERNEL;
556
557         /* argc must be >= 1 */
558         if (argv[0][0] == 'p')
559                 is_return = false;
560         else if (argv[0][0] == 'r') {
561                 is_return = true;
562                 flags |= TPARG_FL_RETURN;
563         } else if (argv[0][0] == '-')
564                 is_delete = true;
565         else {
566                 pr_info("Probe definition must be started with 'p', 'r' or"
567                         " '-'.\n");
568                 return -EINVAL;
569         }
570
571         event = strchr(&argv[0][1], ':');
572         if (event) {
573                 event[0] = '\0';
574                 event++;
575         }
576         if (is_return && isdigit(argv[0][1])) {
577                 ret = kstrtouint(&argv[0][1], 0, &maxactive);
578                 if (ret) {
579                         pr_info("Failed to parse maxactive.\n");
580                         return ret;
581                 }
582                 /* kretprobes instances are iterated over via a list. The
583                  * maximum should stay reasonable.
584                  */
585                 if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
586                         pr_info("Maxactive is too big (%d > %d).\n",
587                                 maxactive, KRETPROBE_MAXACTIVE_MAX);
588                         return -E2BIG;
589                 }
590         }
591
592         if (event) {
593                 char *slash;
594
595                 slash = strchr(event, '/');
596                 if (slash) {
597                         group = event;
598                         event = slash + 1;
599                         slash[0] = '\0';
600                         if (strlen(group) == 0) {
601                                 pr_info("Group name is not specified\n");
602                                 return -EINVAL;
603                         }
604                 }
605                 if (strlen(event) == 0) {
606                         pr_info("Event name is not specified\n");
607                         return -EINVAL;
608                 }
609         }
610         if (!group)
611                 group = KPROBE_EVENT_SYSTEM;
612
613         if (is_delete) {
614                 if (!event) {
615                         pr_info("Delete command needs an event name.\n");
616                         return -EINVAL;
617                 }
618                 mutex_lock(&probe_lock);
619                 tk = find_trace_kprobe(event, group);
620                 if (!tk) {
621                         mutex_unlock(&probe_lock);
622                         pr_info("Event %s/%s doesn't exist.\n", group, event);
623                         return -ENOENT;
624                 }
625                 /* delete an event */
626                 ret = unregister_trace_kprobe(tk);
627                 if (ret == 0)
628                         free_trace_kprobe(tk);
629                 mutex_unlock(&probe_lock);
630                 return ret;
631         }
632
633         if (argc < 2) {
634                 pr_info("Probe point is not specified.\n");
635                 return -EINVAL;
636         }
637
638         /* try to parse an address. if that fails, try to read the
639          * input as a symbol. */
640         if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
641                 /* a symbol specified */
642                 symbol = argv[1];
643                 /* TODO: support .init module functions */
644                 ret = traceprobe_split_symbol_offset(symbol, &offset);
645                 if (ret || offset < 0 || offset > UINT_MAX) {
646                         pr_info("Failed to parse either an address or a symbol.\n");
647                         return ret;
648                 }
649                 if (kprobe_on_func_entry(NULL, symbol, offset))
650                         flags |= TPARG_FL_FENTRY;
651                 if (offset && is_return && !(flags & TPARG_FL_FENTRY)) {
652                         pr_info("Given offset is not valid for return probe.\n");
653                         return -EINVAL;
654                 }
655         }
656         argc -= 2; argv += 2;
657
658         /* setup a probe */
659         if (!event) {
660                 /* Make a new event name */
661                 if (symbol)
662                         snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
663                                  is_return ? 'r' : 'p', symbol, offset);
664                 else
665                         snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
666                                  is_return ? 'r' : 'p', addr);
667                 sanitize_event_name(buf);
668                 event = buf;
669         }
670         tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
671                                argc, is_return);
672         if (IS_ERR(tk)) {
673                 pr_info("Failed to allocate trace_probe.(%d)\n",
674                         (int)PTR_ERR(tk));
675                 return PTR_ERR(tk);
676         }
677
678         /* parse arguments */
679         ret = 0;
680         for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
681                 struct probe_arg *parg = &tk->tp.args[i];
682
683                 /* Increment count for freeing args in error case */
684                 tk->tp.nr_args++;
685
686                 /* Parse argument name */
687                 arg = strchr(argv[i], '=');
688                 if (arg) {
689                         *arg++ = '\0';
690                         parg->name = kstrdup(argv[i], GFP_KERNEL);
691                 } else {
692                         arg = argv[i];
693                         /* If argument name is omitted, set "argN" */
694                         snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
695                         parg->name = kstrdup(buf, GFP_KERNEL);
696                 }
697
698                 if (!parg->name) {
699                         pr_info("Failed to allocate argument[%d] name.\n", i);
700                         ret = -ENOMEM;
701                         goto error;
702                 }
703
704                 if (!is_good_name(parg->name)) {
705                         pr_info("Invalid argument[%d] name: %s\n",
706                                 i, parg->name);
707                         ret = -EINVAL;
708                         goto error;
709                 }
710
711                 if (traceprobe_conflict_field_name(parg->name,
712                                                         tk->tp.args, i)) {
713                         pr_info("Argument[%d] name '%s' conflicts with "
714                                 "another field.\n", i, argv[i]);
715                         ret = -EINVAL;
716                         goto error;
717                 }
718
719                 /* Parse fetch argument */
720                 ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg,
721                                                  flags);
722                 if (ret) {
723                         pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
724                         goto error;
725                 }
726         }
727
728         ret = register_trace_kprobe(tk);
729         if (ret)
730                 goto error;
731         return 0;
732
733 error:
734         free_trace_kprobe(tk);
735         return ret;
736 }
737
738 static int release_all_trace_kprobes(void)
739 {
740         struct trace_kprobe *tk;
741         int ret = 0;
742
743         mutex_lock(&probe_lock);
744         /* Ensure no probe is in use. */
745         list_for_each_entry(tk, &probe_list, list)
746                 if (trace_probe_is_enabled(&tk->tp)) {
747                         ret = -EBUSY;
748                         goto end;
749                 }
750         /* TODO: Use batch unregistration */
751         while (!list_empty(&probe_list)) {
752                 tk = list_entry(probe_list.next, struct trace_kprobe, list);
753                 ret = unregister_trace_kprobe(tk);
754                 if (ret)
755                         goto end;
756                 free_trace_kprobe(tk);
757         }
758
759 end:
760         mutex_unlock(&probe_lock);
761
762         return ret;
763 }
764
765 /* Probes listing interfaces */
766 static void *probes_seq_start(struct seq_file *m, loff_t *pos)
767 {
768         mutex_lock(&probe_lock);
769         return seq_list_start(&probe_list, *pos);
770 }
771
772 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
773 {
774         return seq_list_next(v, &probe_list, pos);
775 }
776
777 static void probes_seq_stop(struct seq_file *m, void *v)
778 {
779         mutex_unlock(&probe_lock);
780 }
781
782 static int probes_seq_show(struct seq_file *m, void *v)
783 {
784         struct trace_kprobe *tk = v;
785         int i;
786
787         seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
788         seq_printf(m, ":%s/%s", tk->tp.call.class->system,
789                         trace_event_name(&tk->tp.call));
790
791         if (!tk->symbol)
792                 seq_printf(m, " 0x%p", tk->rp.kp.addr);
793         else if (tk->rp.kp.offset)
794                 seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
795                            tk->rp.kp.offset);
796         else
797                 seq_printf(m, " %s", trace_kprobe_symbol(tk));
798
799         for (i = 0; i < tk->tp.nr_args; i++)
800                 seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
801         seq_putc(m, '\n');
802
803         return 0;
804 }
805
806 static const struct seq_operations probes_seq_op = {
807         .start  = probes_seq_start,
808         .next   = probes_seq_next,
809         .stop   = probes_seq_stop,
810         .show   = probes_seq_show
811 };
812
813 static int probes_open(struct inode *inode, struct file *file)
814 {
815         int ret;
816
817         if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
818                 ret = release_all_trace_kprobes();
819                 if (ret < 0)
820                         return ret;
821         }
822
823         return seq_open(file, &probes_seq_op);
824 }
825
826 static ssize_t probes_write(struct file *file, const char __user *buffer,
827                             size_t count, loff_t *ppos)
828 {
829         return trace_parse_run_command(file, buffer, count, ppos,
830                                        create_trace_kprobe);
831 }
832
833 static const struct file_operations kprobe_events_ops = {
834         .owner          = THIS_MODULE,
835         .open           = probes_open,
836         .read           = seq_read,
837         .llseek         = seq_lseek,
838         .release        = seq_release,
839         .write          = probes_write,
840 };
841
842 /* Probes profiling interfaces */
843 static int probes_profile_seq_show(struct seq_file *m, void *v)
844 {
845         struct trace_kprobe *tk = v;
846
847         seq_printf(m, "  %-44s %15lu %15lu\n",
848                    trace_event_name(&tk->tp.call),
849                    trace_kprobe_nhit(tk),
850                    tk->rp.kp.nmissed);
851
852         return 0;
853 }
854
855 static const struct seq_operations profile_seq_op = {
856         .start  = probes_seq_start,
857         .next   = probes_seq_next,
858         .stop   = probes_seq_stop,
859         .show   = probes_profile_seq_show
860 };
861
862 static int profile_open(struct inode *inode, struct file *file)
863 {
864         return seq_open(file, &profile_seq_op);
865 }
866
867 static const struct file_operations kprobe_profile_ops = {
868         .owner          = THIS_MODULE,
869         .open           = profile_open,
870         .read           = seq_read,
871         .llseek         = seq_lseek,
872         .release        = seq_release,
873 };
874
875 /* Kprobe specific fetch functions */
876
877 /* Return the length of string -- including null terminal byte */
878 static nokprobe_inline int
879 fetch_store_strlen(unsigned long addr)
880 {
881         mm_segment_t old_fs;
882         int ret, len = 0;
883         u8 c;
884
885         old_fs = get_fs();
886         set_fs(KERNEL_DS);
887         pagefault_disable();
888
889         do {
890                 ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
891                 len++;
892         } while (c && ret == 0 && len < MAX_STRING_SIZE);
893
894         pagefault_enable();
895         set_fs(old_fs);
896
897         return (ret < 0) ? ret : len;
898 }
899
900 /*
901  * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max
902  * length and relative data location.
903  */
904 static nokprobe_inline int
905 fetch_store_string(unsigned long addr, void *dest, void *base)
906 {
907         int maxlen = get_loc_len(*(u32 *)dest);
908         u8 *dst = get_loc_data(dest, base);
909         long ret;
910
911         if (unlikely(!maxlen))
912                 return -ENOMEM;
913         /*
914          * Try to get string again, since the string can be changed while
915          * probing.
916          */
917         ret = strncpy_from_unsafe(dst, (void *)addr, maxlen);
918
919         if (ret >= 0)
920                 *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
921         return ret;
922 }
923
924 static nokprobe_inline int
925 probe_mem_read(void *dest, void *src, size_t size)
926 {
927         return probe_kernel_read(dest, src, size);
928 }
929
930 /* Note that we don't verify it, since the code does not come from user space */
931 static int
932 process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
933                    void *base)
934 {
935         unsigned long val;
936
937 retry:
938         /* 1st stage: get value from context */
939         switch (code->op) {
940         case FETCH_OP_REG:
941                 val = regs_get_register(regs, code->param);
942                 break;
943         case FETCH_OP_STACK:
944                 val = regs_get_kernel_stack_nth(regs, code->param);
945                 break;
946         case FETCH_OP_STACKP:
947                 val = kernel_stack_pointer(regs);
948                 break;
949         case FETCH_OP_RETVAL:
950                 val = regs_return_value(regs);
951                 break;
952         case FETCH_OP_IMM:
953                 val = code->immediate;
954                 break;
955         case FETCH_OP_COMM:
956                 val = (unsigned long)current->comm;
957                 break;
958 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
959         case FETCH_OP_ARG:
960                 val = regs_get_kernel_argument(regs, code->param);
961                 break;
962 #endif
963         case FETCH_NOP_SYMBOL:  /* Ignore a place holder */
964                 code++;
965                 goto retry;
966         default:
967                 return -EILSEQ;
968         }
969         code++;
970
971         return process_fetch_insn_bottom(code, val, dest, base);
972 }
973 NOKPROBE_SYMBOL(process_fetch_insn)
974
975 /* Kprobe handler */
976 static nokprobe_inline void
977 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
978                     struct trace_event_file *trace_file)
979 {
980         struct kprobe_trace_entry_head *entry;
981         struct ring_buffer_event *event;
982         struct ring_buffer *buffer;
983         int size, dsize, pc;
984         unsigned long irq_flags;
985         struct trace_event_call *call = &tk->tp.call;
986
987         WARN_ON(call != trace_file->event_call);
988
989         if (trace_trigger_soft_disabled(trace_file))
990                 return;
991
992         local_save_flags(irq_flags);
993         pc = preempt_count();
994
995         dsize = __get_data_size(&tk->tp, regs);
996         size = sizeof(*entry) + tk->tp.size + dsize;
997
998         event = trace_event_buffer_lock_reserve(&buffer, trace_file,
999                                                 call->event.type,
1000                                                 size, irq_flags, pc);
1001         if (!event)
1002                 return;
1003
1004         entry = ring_buffer_event_data(event);
1005         entry->ip = (unsigned long)tk->rp.kp.addr;
1006         store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1007
1008         event_trigger_unlock_commit_regs(trace_file, buffer, event,
1009                                          entry, irq_flags, pc, regs);
1010 }
1011
1012 static void
1013 kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
1014 {
1015         struct event_file_link *link;
1016
1017         list_for_each_entry_rcu(link, &tk->tp.files, list)
1018                 __kprobe_trace_func(tk, regs, link->file);
1019 }
1020 NOKPROBE_SYMBOL(kprobe_trace_func);
1021
1022 /* Kretprobe handler */
1023 static nokprobe_inline void
1024 __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1025                        struct pt_regs *regs,
1026                        struct trace_event_file *trace_file)
1027 {
1028         struct kretprobe_trace_entry_head *entry;
1029         struct ring_buffer_event *event;
1030         struct ring_buffer *buffer;
1031         int size, pc, dsize;
1032         unsigned long irq_flags;
1033         struct trace_event_call *call = &tk->tp.call;
1034
1035         WARN_ON(call != trace_file->event_call);
1036
1037         if (trace_trigger_soft_disabled(trace_file))
1038                 return;
1039
1040         local_save_flags(irq_flags);
1041         pc = preempt_count();
1042
1043         dsize = __get_data_size(&tk->tp, regs);
1044         size = sizeof(*entry) + tk->tp.size + dsize;
1045
1046         event = trace_event_buffer_lock_reserve(&buffer, trace_file,
1047                                                 call->event.type,
1048                                                 size, irq_flags, pc);
1049         if (!event)
1050                 return;
1051
1052         entry = ring_buffer_event_data(event);
1053         entry->func = (unsigned long)tk->rp.kp.addr;
1054         entry->ret_ip = (unsigned long)ri->ret_addr;
1055         store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1056
1057         event_trigger_unlock_commit_regs(trace_file, buffer, event,
1058                                          entry, irq_flags, pc, regs);
1059 }
1060
1061 static void
1062 kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1063                      struct pt_regs *regs)
1064 {
1065         struct event_file_link *link;
1066
1067         list_for_each_entry_rcu(link, &tk->tp.files, list)
1068                 __kretprobe_trace_func(tk, ri, regs, link->file);
1069 }
1070 NOKPROBE_SYMBOL(kretprobe_trace_func);
1071
1072 /* Event entry printers */
1073 static enum print_line_t
1074 print_kprobe_event(struct trace_iterator *iter, int flags,
1075                    struct trace_event *event)
1076 {
1077         struct kprobe_trace_entry_head *field;
1078         struct trace_seq *s = &iter->seq;
1079         struct trace_probe *tp;
1080
1081         field = (struct kprobe_trace_entry_head *)iter->ent;
1082         tp = container_of(event, struct trace_probe, call.event);
1083
1084         trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
1085
1086         if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1087                 goto out;
1088
1089         trace_seq_putc(s, ')');
1090
1091         if (print_probe_args(s, tp->args, tp->nr_args,
1092                              (u8 *)&field[1], field) < 0)
1093                 goto out;
1094
1095         trace_seq_putc(s, '\n');
1096  out:
1097         return trace_handle_return(s);
1098 }
1099
1100 static enum print_line_t
1101 print_kretprobe_event(struct trace_iterator *iter, int flags,
1102                       struct trace_event *event)
1103 {
1104         struct kretprobe_trace_entry_head *field;
1105         struct trace_seq *s = &iter->seq;
1106         struct trace_probe *tp;
1107
1108         field = (struct kretprobe_trace_entry_head *)iter->ent;
1109         tp = container_of(event, struct trace_probe, call.event);
1110
1111         trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
1112
1113         if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1114                 goto out;
1115
1116         trace_seq_puts(s, " <- ");
1117
1118         if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1119                 goto out;
1120
1121         trace_seq_putc(s, ')');
1122
1123         if (print_probe_args(s, tp->args, tp->nr_args,
1124                              (u8 *)&field[1], field) < 0)
1125                 goto out;
1126
1127         trace_seq_putc(s, '\n');
1128
1129  out:
1130         return trace_handle_return(s);
1131 }
1132
1133
1134 static int kprobe_event_define_fields(struct trace_event_call *event_call)
1135 {
1136         int ret;
1137         struct kprobe_trace_entry_head field;
1138         struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1139
1140         DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1141
1142         return traceprobe_define_arg_fields(event_call, sizeof(field), &tk->tp);
1143 }
1144
1145 static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1146 {
1147         int ret;
1148         struct kretprobe_trace_entry_head field;
1149         struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1150
1151         DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1152         DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1153
1154         return traceprobe_define_arg_fields(event_call, sizeof(field), &tk->tp);
1155 }
1156
1157 #ifdef CONFIG_PERF_EVENTS
1158
1159 /* Kprobe profile handler */
1160 static int
1161 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1162 {
1163         struct trace_event_call *call = &tk->tp.call;
1164         struct kprobe_trace_entry_head *entry;
1165         struct hlist_head *head;
1166         int size, __size, dsize;
1167         int rctx;
1168
1169         if (bpf_prog_array_valid(call)) {
1170                 unsigned long orig_ip = instruction_pointer(regs);
1171                 int ret;
1172
1173                 ret = trace_call_bpf(call, regs);
1174
1175                 /*
1176                  * We need to check and see if we modified the pc of the
1177                  * pt_regs, and if so return 1 so that we don't do the
1178                  * single stepping.
1179                  */
1180                 if (orig_ip != instruction_pointer(regs))
1181                         return 1;
1182                 if (!ret)
1183                         return 0;
1184         }
1185
1186         head = this_cpu_ptr(call->perf_events);
1187         if (hlist_empty(head))
1188                 return 0;
1189
1190         dsize = __get_data_size(&tk->tp, regs);
1191         __size = sizeof(*entry) + tk->tp.size + dsize;
1192         size = ALIGN(__size + sizeof(u32), sizeof(u64));
1193         size -= sizeof(u32);
1194
1195         entry = perf_trace_buf_alloc(size, NULL, &rctx);
1196         if (!entry)
1197                 return 0;
1198
1199         entry->ip = (unsigned long)tk->rp.kp.addr;
1200         memset(&entry[1], 0, dsize);
1201         store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1202         perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1203                               head, NULL);
1204         return 0;
1205 }
1206 NOKPROBE_SYMBOL(kprobe_perf_func);
1207
1208 /* Kretprobe profile handler */
1209 static void
1210 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1211                     struct pt_regs *regs)
1212 {
1213         struct trace_event_call *call = &tk->tp.call;
1214         struct kretprobe_trace_entry_head *entry;
1215         struct hlist_head *head;
1216         int size, __size, dsize;
1217         int rctx;
1218
1219         if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1220                 return;
1221
1222         head = this_cpu_ptr(call->perf_events);
1223         if (hlist_empty(head))
1224                 return;
1225
1226         dsize = __get_data_size(&tk->tp, regs);
1227         __size = sizeof(*entry) + tk->tp.size + dsize;
1228         size = ALIGN(__size + sizeof(u32), sizeof(u64));
1229         size -= sizeof(u32);
1230
1231         entry = perf_trace_buf_alloc(size, NULL, &rctx);
1232         if (!entry)
1233                 return;
1234
1235         entry->func = (unsigned long)tk->rp.kp.addr;
1236         entry->ret_ip = (unsigned long)ri->ret_addr;
1237         store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1238         perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1239                               head, NULL);
1240 }
1241 NOKPROBE_SYMBOL(kretprobe_perf_func);
1242
1243 int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
1244                         const char **symbol, u64 *probe_offset,
1245                         u64 *probe_addr, bool perf_type_tracepoint)
1246 {
1247         const char *pevent = trace_event_name(event->tp_event);
1248         const char *group = event->tp_event->class->system;
1249         struct trace_kprobe *tk;
1250
1251         if (perf_type_tracepoint)
1252                 tk = find_trace_kprobe(pevent, group);
1253         else
1254                 tk = event->tp_event->data;
1255         if (!tk)
1256                 return -EINVAL;
1257
1258         *fd_type = trace_kprobe_is_return(tk) ? BPF_FD_TYPE_KRETPROBE
1259                                               : BPF_FD_TYPE_KPROBE;
1260         if (tk->symbol) {
1261                 *symbol = tk->symbol;
1262                 *probe_offset = tk->rp.kp.offset;
1263                 *probe_addr = 0;
1264         } else {
1265                 *symbol = NULL;
1266                 *probe_offset = 0;
1267                 *probe_addr = (unsigned long)tk->rp.kp.addr;
1268         }
1269         return 0;
1270 }
1271 #endif  /* CONFIG_PERF_EVENTS */
1272
1273 /*
1274  * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1275  *
1276  * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1277  * lockless, but we can't race with this __init function.
1278  */
1279 static int kprobe_register(struct trace_event_call *event,
1280                            enum trace_reg type, void *data)
1281 {
1282         struct trace_kprobe *tk = (struct trace_kprobe *)event->data;
1283         struct trace_event_file *file = data;
1284
1285         switch (type) {
1286         case TRACE_REG_REGISTER:
1287                 return enable_trace_kprobe(tk, file);
1288         case TRACE_REG_UNREGISTER:
1289                 return disable_trace_kprobe(tk, file);
1290
1291 #ifdef CONFIG_PERF_EVENTS
1292         case TRACE_REG_PERF_REGISTER:
1293                 return enable_trace_kprobe(tk, NULL);
1294         case TRACE_REG_PERF_UNREGISTER:
1295                 return disable_trace_kprobe(tk, NULL);
1296         case TRACE_REG_PERF_OPEN:
1297         case TRACE_REG_PERF_CLOSE:
1298         case TRACE_REG_PERF_ADD:
1299         case TRACE_REG_PERF_DEL:
1300                 return 0;
1301 #endif
1302         }
1303         return 0;
1304 }
1305
1306 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1307 {
1308         struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1309         int ret = 0;
1310
1311         raw_cpu_inc(*tk->nhit);
1312
1313         if (tk->tp.flags & TP_FLAG_TRACE)
1314                 kprobe_trace_func(tk, regs);
1315 #ifdef CONFIG_PERF_EVENTS
1316         if (tk->tp.flags & TP_FLAG_PROFILE)
1317                 ret = kprobe_perf_func(tk, regs);
1318 #endif
1319         return ret;
1320 }
1321 NOKPROBE_SYMBOL(kprobe_dispatcher);
1322
1323 static int
1324 kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1325 {
1326         struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
1327
1328         raw_cpu_inc(*tk->nhit);
1329
1330         if (tk->tp.flags & TP_FLAG_TRACE)
1331                 kretprobe_trace_func(tk, ri, regs);
1332 #ifdef CONFIG_PERF_EVENTS
1333         if (tk->tp.flags & TP_FLAG_PROFILE)
1334                 kretprobe_perf_func(tk, ri, regs);
1335 #endif
1336         return 0;       /* We don't tweek kernel, so just return 0 */
1337 }
1338 NOKPROBE_SYMBOL(kretprobe_dispatcher);
1339
1340 static struct trace_event_functions kretprobe_funcs = {
1341         .trace          = print_kretprobe_event
1342 };
1343
1344 static struct trace_event_functions kprobe_funcs = {
1345         .trace          = print_kprobe_event
1346 };
1347
1348 static inline void init_trace_event_call(struct trace_kprobe *tk,
1349                                          struct trace_event_call *call)
1350 {
1351         INIT_LIST_HEAD(&call->class->fields);
1352         if (trace_kprobe_is_return(tk)) {
1353                 call->event.funcs = &kretprobe_funcs;
1354                 call->class->define_fields = kretprobe_event_define_fields;
1355         } else {
1356                 call->event.funcs = &kprobe_funcs;
1357                 call->class->define_fields = kprobe_event_define_fields;
1358         }
1359
1360         call->flags = TRACE_EVENT_FL_KPROBE;
1361         call->class->reg = kprobe_register;
1362         call->data = tk;
1363 }
1364
1365 static int register_kprobe_event(struct trace_kprobe *tk)
1366 {
1367         struct trace_event_call *call = &tk->tp.call;
1368         int ret = 0;
1369
1370         init_trace_event_call(tk, call);
1371
1372         if (traceprobe_set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0)
1373                 return -ENOMEM;
1374         ret = register_trace_event(&call->event);
1375         if (!ret) {
1376                 kfree(call->print_fmt);
1377                 return -ENODEV;
1378         }
1379         ret = trace_add_event_call(call);
1380         if (ret) {
1381                 pr_info("Failed to register kprobe event: %s\n",
1382                         trace_event_name(call));
1383                 kfree(call->print_fmt);
1384                 unregister_trace_event(&call->event);
1385         }
1386         return ret;
1387 }
1388
1389 static int unregister_kprobe_event(struct trace_kprobe *tk)
1390 {
1391         int ret;
1392
1393         /* tp->event is unregistered in trace_remove_event_call() */
1394         ret = trace_remove_event_call(&tk->tp.call);
1395         if (!ret)
1396                 kfree(tk->tp.call.print_fmt);
1397         return ret;
1398 }
1399
1400 #ifdef CONFIG_PERF_EVENTS
1401 /* create a trace_kprobe, but don't add it to global lists */
1402 struct trace_event_call *
1403 create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
1404                           bool is_return)
1405 {
1406         struct trace_kprobe *tk;
1407         int ret;
1408         char *event;
1409
1410         /*
1411          * local trace_kprobes are not added to probe_list, so they are never
1412          * searched in find_trace_kprobe(). Therefore, there is no concern of
1413          * duplicated name here.
1414          */
1415         event = func ? func : "DUMMY_EVENT";
1416
1417         tk = alloc_trace_kprobe(KPROBE_EVENT_SYSTEM, event, (void *)addr, func,
1418                                 offs, 0 /* maxactive */, 0 /* nargs */,
1419                                 is_return);
1420
1421         if (IS_ERR(tk)) {
1422                 pr_info("Failed to allocate trace_probe.(%d)\n",
1423                         (int)PTR_ERR(tk));
1424                 return ERR_CAST(tk);
1425         }
1426
1427         init_trace_event_call(tk, &tk->tp.call);
1428
1429         if (traceprobe_set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) {
1430                 ret = -ENOMEM;
1431                 goto error;
1432         }
1433
1434         ret = __register_trace_kprobe(tk);
1435         if (ret < 0) {
1436                 kfree(tk->tp.call.print_fmt);
1437                 goto error;
1438         }
1439
1440         return &tk->tp.call;
1441 error:
1442         free_trace_kprobe(tk);
1443         return ERR_PTR(ret);
1444 }
1445
1446 void destroy_local_trace_kprobe(struct trace_event_call *event_call)
1447 {
1448         struct trace_kprobe *tk;
1449
1450         tk = container_of(event_call, struct trace_kprobe, tp.call);
1451
1452         if (trace_probe_is_enabled(&tk->tp)) {
1453                 WARN_ON(1);
1454                 return;
1455         }
1456
1457         __unregister_trace_kprobe(tk);
1458
1459         kfree(tk->tp.call.print_fmt);
1460         free_trace_kprobe(tk);
1461 }
1462 #endif /* CONFIG_PERF_EVENTS */
1463
1464 /* Make a tracefs interface for controlling probe points */
1465 static __init int init_kprobe_trace(void)
1466 {
1467         struct dentry *d_tracer;
1468         struct dentry *entry;
1469
1470         if (register_module_notifier(&trace_kprobe_module_nb))
1471                 return -EINVAL;
1472
1473         d_tracer = tracing_init_dentry();
1474         if (IS_ERR(d_tracer))
1475                 return 0;
1476
1477         entry = tracefs_create_file("kprobe_events", 0644, d_tracer,
1478                                     NULL, &kprobe_events_ops);
1479
1480         /* Event list interface */
1481         if (!entry)
1482                 pr_warn("Could not create tracefs 'kprobe_events' entry\n");
1483
1484         /* Profile interface */
1485         entry = tracefs_create_file("kprobe_profile", 0444, d_tracer,
1486                                     NULL, &kprobe_profile_ops);
1487
1488         if (!entry)
1489                 pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
1490         return 0;
1491 }
1492 fs_initcall(init_kprobe_trace);
1493
1494
1495 #ifdef CONFIG_FTRACE_STARTUP_TEST
1496 static __init struct trace_event_file *
1497 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1498 {
1499         struct trace_event_file *file;
1500
1501         list_for_each_entry(file, &tr->events, list)
1502                 if (file->event_call == &tk->tp.call)
1503                         return file;
1504
1505         return NULL;
1506 }
1507
1508 /*
1509  * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1510  * stage, we can do this lockless.
1511  */
1512 static __init int kprobe_trace_self_tests_init(void)
1513 {
1514         int ret, warn = 0;
1515         int (*target)(int, int, int, int, int, int);
1516         struct trace_kprobe *tk;
1517         struct trace_event_file *file;
1518
1519         if (tracing_is_disabled())
1520                 return -ENODEV;
1521
1522         target = kprobe_trace_selftest_target;
1523
1524         pr_info("Testing kprobe tracing: ");
1525
1526         ret = trace_run_command("p:testprobe kprobe_trace_selftest_target "
1527                                 "$stack $stack0 +0($stack)",
1528                                 create_trace_kprobe);
1529         if (WARN_ON_ONCE(ret)) {
1530                 pr_warn("error on probing function entry.\n");
1531                 warn++;
1532         } else {
1533                 /* Enable trace point */
1534                 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1535                 if (WARN_ON_ONCE(tk == NULL)) {
1536                         pr_warn("error on getting new probe.\n");
1537                         warn++;
1538                 } else {
1539                         file = find_trace_probe_file(tk, top_trace_array());
1540                         if (WARN_ON_ONCE(file == NULL)) {
1541                                 pr_warn("error on getting probe file.\n");
1542                                 warn++;
1543                         } else
1544                                 enable_trace_kprobe(tk, file);
1545                 }
1546         }
1547
1548         ret = trace_run_command("r:testprobe2 kprobe_trace_selftest_target "
1549                                 "$retval", create_trace_kprobe);
1550         if (WARN_ON_ONCE(ret)) {
1551                 pr_warn("error on probing function return.\n");
1552                 warn++;
1553         } else {
1554                 /* Enable trace point */
1555                 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1556                 if (WARN_ON_ONCE(tk == NULL)) {
1557                         pr_warn("error on getting 2nd new probe.\n");
1558                         warn++;
1559                 } else {
1560                         file = find_trace_probe_file(tk, top_trace_array());
1561                         if (WARN_ON_ONCE(file == NULL)) {
1562                                 pr_warn("error on getting probe file.\n");
1563                                 warn++;
1564                         } else
1565                                 enable_trace_kprobe(tk, file);
1566                 }
1567         }
1568
1569         if (warn)
1570                 goto end;
1571
1572         ret = target(1, 2, 3, 4, 5, 6);
1573
1574         /*
1575          * Not expecting an error here, the check is only to prevent the
1576          * optimizer from removing the call to target() as otherwise there
1577          * are no side-effects and the call is never performed.
1578          */
1579         if (ret != 21)
1580                 warn++;
1581
1582         /* Disable trace points before removing it */
1583         tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1584         if (WARN_ON_ONCE(tk == NULL)) {
1585                 pr_warn("error on getting test probe.\n");
1586                 warn++;
1587         } else {
1588                 if (trace_kprobe_nhit(tk) != 1) {
1589                         pr_warn("incorrect number of testprobe hits\n");
1590                         warn++;
1591                 }
1592
1593                 file = find_trace_probe_file(tk, top_trace_array());
1594                 if (WARN_ON_ONCE(file == NULL)) {
1595                         pr_warn("error on getting probe file.\n");
1596                         warn++;
1597                 } else
1598                         disable_trace_kprobe(tk, file);
1599         }
1600
1601         tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1602         if (WARN_ON_ONCE(tk == NULL)) {
1603                 pr_warn("error on getting 2nd test probe.\n");
1604                 warn++;
1605         } else {
1606                 if (trace_kprobe_nhit(tk) != 1) {
1607                         pr_warn("incorrect number of testprobe2 hits\n");
1608                         warn++;
1609                 }
1610
1611                 file = find_trace_probe_file(tk, top_trace_array());
1612                 if (WARN_ON_ONCE(file == NULL)) {
1613                         pr_warn("error on getting probe file.\n");
1614                         warn++;
1615                 } else
1616                         disable_trace_kprobe(tk, file);
1617         }
1618
1619         ret = trace_run_command("-:testprobe", create_trace_kprobe);
1620         if (WARN_ON_ONCE(ret)) {
1621                 pr_warn("error on deleting a probe.\n");
1622                 warn++;
1623         }
1624
1625         ret = trace_run_command("-:testprobe2", create_trace_kprobe);
1626         if (WARN_ON_ONCE(ret)) {
1627                 pr_warn("error on deleting a probe.\n");
1628                 warn++;
1629         }
1630
1631 end:
1632         release_all_trace_kprobes();
1633         /*
1634          * Wait for the optimizer work to finish. Otherwise it might fiddle
1635          * with probes in already freed __init text.
1636          */
1637         wait_for_kprobe_optimizer();
1638         if (warn)
1639                 pr_cont("NG: Some tests are failed. Please check them.\n");
1640         else
1641                 pr_cont("OK\n");
1642         return 0;
1643 }
1644
1645 late_initcall(kprobe_trace_self_tests_init);
1646
1647 #endif