Merge tag 'trace-v5.0-pre' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[sfrench/cifs-2.6.git] / kernel / trace / trace_kprobe.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Kprobes-based tracing events
4  *
5  * Created by Masami Hiramatsu <mhiramat@redhat.com>
6  *
7  */
8 #define pr_fmt(fmt)     "trace_kprobe: " fmt
9
10 #include <linux/module.h>
11 #include <linux/uaccess.h>
12 #include <linux/rculist.h>
13 #include <linux/error-injection.h>
14
15 #include "trace_dynevent.h"
16 #include "trace_kprobe_selftest.h"
17 #include "trace_probe.h"
18 #include "trace_probe_tmpl.h"
19
20 #define KPROBE_EVENT_SYSTEM "kprobes"
21 #define KRETPROBE_MAXACTIVE_MAX 4096
22
23 static int trace_kprobe_create(int argc, const char **argv);
24 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev);
25 static int trace_kprobe_release(struct dyn_event *ev);
26 static bool trace_kprobe_is_busy(struct dyn_event *ev);
27 static bool trace_kprobe_match(const char *system, const char *event,
28                                struct dyn_event *ev);
29
30 static struct dyn_event_operations trace_kprobe_ops = {
31         .create = trace_kprobe_create,
32         .show = trace_kprobe_show,
33         .is_busy = trace_kprobe_is_busy,
34         .free = trace_kprobe_release,
35         .match = trace_kprobe_match,
36 };
37
38 /**
39  * Kprobe event core functions
40  */
41 struct trace_kprobe {
42         struct dyn_event        devent;
43         struct kretprobe        rp;     /* Use rp.kp for kprobe use */
44         unsigned long __percpu *nhit;
45         const char              *symbol;        /* symbol name */
46         struct trace_probe      tp;
47 };
48
49 static bool is_trace_kprobe(struct dyn_event *ev)
50 {
51         return ev->ops == &trace_kprobe_ops;
52 }
53
54 static struct trace_kprobe *to_trace_kprobe(struct dyn_event *ev)
55 {
56         return container_of(ev, struct trace_kprobe, devent);
57 }
58
59 /**
60  * for_each_trace_kprobe - iterate over the trace_kprobe list
61  * @pos:        the struct trace_kprobe * for each entry
62  * @dpos:       the struct dyn_event * to use as a loop cursor
63  */
64 #define for_each_trace_kprobe(pos, dpos)        \
65         for_each_dyn_event(dpos)                \
66                 if (is_trace_kprobe(dpos) && (pos = to_trace_kprobe(dpos)))
67
68 #define SIZEOF_TRACE_KPROBE(n)                          \
69         (offsetof(struct trace_kprobe, tp.args) +       \
70         (sizeof(struct probe_arg) * (n)))
71
72 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
73 {
74         return tk->rp.handler != NULL;
75 }
76
77 static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
78 {
79         return tk->symbol ? tk->symbol : "unknown";
80 }
81
82 static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
83 {
84         return tk->rp.kp.offset;
85 }
86
87 static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
88 {
89         return !!(kprobe_gone(&tk->rp.kp));
90 }
91
92 static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
93                                                  struct module *mod)
94 {
95         int len = strlen(mod->name);
96         const char *name = trace_kprobe_symbol(tk);
97         return strncmp(mod->name, name, len) == 0 && name[len] == ':';
98 }
99
100 static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
101 {
102         char *p;
103         bool ret;
104
105         if (!tk->symbol)
106                 return false;
107         p = strchr(tk->symbol, ':');
108         if (!p)
109                 return true;
110         *p = '\0';
111         mutex_lock(&module_mutex);
112         ret = !!find_module(tk->symbol);
113         mutex_unlock(&module_mutex);
114         *p = ':';
115
116         return ret;
117 }
118
119 static bool trace_kprobe_is_busy(struct dyn_event *ev)
120 {
121         struct trace_kprobe *tk = to_trace_kprobe(ev);
122
123         return trace_probe_is_enabled(&tk->tp);
124 }
125
126 static bool trace_kprobe_match(const char *system, const char *event,
127                                struct dyn_event *ev)
128 {
129         struct trace_kprobe *tk = to_trace_kprobe(ev);
130
131         return strcmp(trace_event_name(&tk->tp.call), event) == 0 &&
132             (!system || strcmp(tk->tp.call.class->system, system) == 0);
133 }
134
135 static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
136 {
137         unsigned long nhit = 0;
138         int cpu;
139
140         for_each_possible_cpu(cpu)
141                 nhit += *per_cpu_ptr(tk->nhit, cpu);
142
143         return nhit;
144 }
145
146 /* Return 0 if it fails to find the symbol address */
147 static nokprobe_inline
148 unsigned long trace_kprobe_address(struct trace_kprobe *tk)
149 {
150         unsigned long addr;
151
152         if (tk->symbol) {
153                 addr = (unsigned long)
154                         kallsyms_lookup_name(trace_kprobe_symbol(tk));
155                 if (addr)
156                         addr += tk->rp.kp.offset;
157         } else {
158                 addr = (unsigned long)tk->rp.kp.addr;
159         }
160         return addr;
161 }
162
163 bool trace_kprobe_on_func_entry(struct trace_event_call *call)
164 {
165         struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
166
167         return kprobe_on_func_entry(tk->rp.kp.addr,
168                         tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
169                         tk->rp.kp.addr ? 0 : tk->rp.kp.offset);
170 }
171
172 bool trace_kprobe_error_injectable(struct trace_event_call *call)
173 {
174         struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
175
176         return within_error_injection_list(trace_kprobe_address(tk));
177 }
178
179 static int register_kprobe_event(struct trace_kprobe *tk);
180 static int unregister_kprobe_event(struct trace_kprobe *tk);
181
182 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
183 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
184                                 struct pt_regs *regs);
185
186 /*
187  * Allocate new trace_probe and initialize it (including kprobes).
188  */
189 static struct trace_kprobe *alloc_trace_kprobe(const char *group,
190                                              const char *event,
191                                              void *addr,
192                                              const char *symbol,
193                                              unsigned long offs,
194                                              int maxactive,
195                                              int nargs, bool is_return)
196 {
197         struct trace_kprobe *tk;
198         int ret = -ENOMEM;
199
200         tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
201         if (!tk)
202                 return ERR_PTR(ret);
203
204         tk->nhit = alloc_percpu(unsigned long);
205         if (!tk->nhit)
206                 goto error;
207
208         if (symbol) {
209                 tk->symbol = kstrdup(symbol, GFP_KERNEL);
210                 if (!tk->symbol)
211                         goto error;
212                 tk->rp.kp.symbol_name = tk->symbol;
213                 tk->rp.kp.offset = offs;
214         } else
215                 tk->rp.kp.addr = addr;
216
217         if (is_return)
218                 tk->rp.handler = kretprobe_dispatcher;
219         else
220                 tk->rp.kp.pre_handler = kprobe_dispatcher;
221
222         tk->rp.maxactive = maxactive;
223
224         if (!event || !is_good_name(event)) {
225                 ret = -EINVAL;
226                 goto error;
227         }
228
229         tk->tp.call.class = &tk->tp.class;
230         tk->tp.call.name = kstrdup(event, GFP_KERNEL);
231         if (!tk->tp.call.name)
232                 goto error;
233
234         if (!group || !is_good_name(group)) {
235                 ret = -EINVAL;
236                 goto error;
237         }
238
239         tk->tp.class.system = kstrdup(group, GFP_KERNEL);
240         if (!tk->tp.class.system)
241                 goto error;
242
243         dyn_event_init(&tk->devent, &trace_kprobe_ops);
244         INIT_LIST_HEAD(&tk->tp.files);
245         return tk;
246 error:
247         kfree(tk->tp.call.name);
248         kfree(tk->symbol);
249         free_percpu(tk->nhit);
250         kfree(tk);
251         return ERR_PTR(ret);
252 }
253
254 static void free_trace_kprobe(struct trace_kprobe *tk)
255 {
256         int i;
257
258         if (!tk)
259                 return;
260
261         for (i = 0; i < tk->tp.nr_args; i++)
262                 traceprobe_free_probe_arg(&tk->tp.args[i]);
263
264         kfree(tk->tp.call.class->system);
265         kfree(tk->tp.call.name);
266         kfree(tk->symbol);
267         free_percpu(tk->nhit);
268         kfree(tk);
269 }
270
271 static struct trace_kprobe *find_trace_kprobe(const char *event,
272                                               const char *group)
273 {
274         struct dyn_event *pos;
275         struct trace_kprobe *tk;
276
277         for_each_trace_kprobe(tk, pos)
278                 if (strcmp(trace_event_name(&tk->tp.call), event) == 0 &&
279                     strcmp(tk->tp.call.class->system, group) == 0)
280                         return tk;
281         return NULL;
282 }
283
284 static inline int __enable_trace_kprobe(struct trace_kprobe *tk)
285 {
286         int ret = 0;
287
288         if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) {
289                 if (trace_kprobe_is_return(tk))
290                         ret = enable_kretprobe(&tk->rp);
291                 else
292                         ret = enable_kprobe(&tk->rp.kp);
293         }
294
295         return ret;
296 }
297
298 /*
299  * Enable trace_probe
300  * if the file is NULL, enable "perf" handler, or enable "trace" handler.
301  */
302 static int
303 enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
304 {
305         struct event_file_link *link;
306         int ret = 0;
307
308         if (file) {
309                 link = kmalloc(sizeof(*link), GFP_KERNEL);
310                 if (!link) {
311                         ret = -ENOMEM;
312                         goto out;
313                 }
314
315                 link->file = file;
316                 list_add_tail_rcu(&link->list, &tk->tp.files);
317
318                 tk->tp.flags |= TP_FLAG_TRACE;
319                 ret = __enable_trace_kprobe(tk);
320                 if (ret) {
321                         list_del_rcu(&link->list);
322                         kfree(link);
323                         tk->tp.flags &= ~TP_FLAG_TRACE;
324                 }
325
326         } else {
327                 tk->tp.flags |= TP_FLAG_PROFILE;
328                 ret = __enable_trace_kprobe(tk);
329                 if (ret)
330                         tk->tp.flags &= ~TP_FLAG_PROFILE;
331         }
332  out:
333         return ret;
334 }
335
336 /*
337  * Disable trace_probe
338  * if the file is NULL, disable "perf" handler, or disable "trace" handler.
339  */
340 static int
341 disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
342 {
343         struct event_file_link *link = NULL;
344         int wait = 0;
345         int ret = 0;
346
347         if (file) {
348                 link = find_event_file_link(&tk->tp, file);
349                 if (!link) {
350                         ret = -EINVAL;
351                         goto out;
352                 }
353
354                 list_del_rcu(&link->list);
355                 wait = 1;
356                 if (!list_empty(&tk->tp.files))
357                         goto out;
358
359                 tk->tp.flags &= ~TP_FLAG_TRACE;
360         } else
361                 tk->tp.flags &= ~TP_FLAG_PROFILE;
362
363         if (!trace_probe_is_enabled(&tk->tp) && trace_probe_is_registered(&tk->tp)) {
364                 if (trace_kprobe_is_return(tk))
365                         disable_kretprobe(&tk->rp);
366                 else
367                         disable_kprobe(&tk->rp.kp);
368                 wait = 1;
369         }
370
371         /*
372          * if tk is not added to any list, it must be a local trace_kprobe
373          * created with perf_event_open. We don't need to wait for these
374          * trace_kprobes
375          */
376         if (list_empty(&tk->devent.list))
377                 wait = 0;
378  out:
379         if (wait) {
380                 /*
381                  * Synchronize with kprobe_trace_func/kretprobe_trace_func
382                  * to ensure disabled (all running handlers are finished).
383                  * This is not only for kfree(), but also the caller,
384                  * trace_remove_event_call() supposes it for releasing
385                  * event_call related objects, which will be accessed in
386                  * the kprobe_trace_func/kretprobe_trace_func.
387                  */
388                 synchronize_rcu();
389                 kfree(link);    /* Ignored if link == NULL */
390         }
391
392         return ret;
393 }
394
395 #if defined(CONFIG_KPROBES_ON_FTRACE) && \
396         !defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
397 static bool within_notrace_func(struct trace_kprobe *tk)
398 {
399         unsigned long offset, size, addr;
400
401         addr = trace_kprobe_address(tk);
402         if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset))
403                 return false;
404
405         /* Get the entry address of the target function */
406         addr -= offset;
407
408         /*
409          * Since ftrace_location_range() does inclusive range check, we need
410          * to subtract 1 byte from the end address.
411          */
412         return !ftrace_location_range(addr, addr + size - 1);
413 }
414 #else
415 #define within_notrace_func(tk) (false)
416 #endif
417
418 /* Internal register function - just handle k*probes and flags */
419 static int __register_trace_kprobe(struct trace_kprobe *tk)
420 {
421         int i, ret;
422
423         if (trace_probe_is_registered(&tk->tp))
424                 return -EINVAL;
425
426         if (within_notrace_func(tk)) {
427                 pr_warn("Could not probe notrace function %s\n",
428                         trace_kprobe_symbol(tk));
429                 return -EINVAL;
430         }
431
432         for (i = 0; i < tk->tp.nr_args; i++) {
433                 ret = traceprobe_update_arg(&tk->tp.args[i]);
434                 if (ret)
435                         return ret;
436         }
437
438         /* Set/clear disabled flag according to tp->flag */
439         if (trace_probe_is_enabled(&tk->tp))
440                 tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
441         else
442                 tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
443
444         if (trace_kprobe_is_return(tk))
445                 ret = register_kretprobe(&tk->rp);
446         else
447                 ret = register_kprobe(&tk->rp.kp);
448
449         if (ret == 0) {
450                 tk->tp.flags |= TP_FLAG_REGISTERED;
451         } else if (ret == -EILSEQ) {
452                 pr_warn("Probing address(0x%p) is not an instruction boundary.\n",
453                         tk->rp.kp.addr);
454                 ret = -EINVAL;
455         }
456         return ret;
457 }
458
459 /* Internal unregister function - just handle k*probes and flags */
460 static void __unregister_trace_kprobe(struct trace_kprobe *tk)
461 {
462         if (trace_probe_is_registered(&tk->tp)) {
463                 if (trace_kprobe_is_return(tk))
464                         unregister_kretprobe(&tk->rp);
465                 else
466                         unregister_kprobe(&tk->rp.kp);
467                 tk->tp.flags &= ~TP_FLAG_REGISTERED;
468                 /* Cleanup kprobe for reuse */
469                 if (tk->rp.kp.symbol_name)
470                         tk->rp.kp.addr = NULL;
471         }
472 }
473
474 /* Unregister a trace_probe and probe_event */
475 static int unregister_trace_kprobe(struct trace_kprobe *tk)
476 {
477         /* Enabled event can not be unregistered */
478         if (trace_probe_is_enabled(&tk->tp))
479                 return -EBUSY;
480
481         /* Will fail if probe is being used by ftrace or perf */
482         if (unregister_kprobe_event(tk))
483                 return -EBUSY;
484
485         __unregister_trace_kprobe(tk);
486         dyn_event_remove(&tk->devent);
487
488         return 0;
489 }
490
491 /* Register a trace_probe and probe_event */
492 static int register_trace_kprobe(struct trace_kprobe *tk)
493 {
494         struct trace_kprobe *old_tk;
495         int ret;
496
497         mutex_lock(&event_mutex);
498
499         /* Delete old (same name) event if exist */
500         old_tk = find_trace_kprobe(trace_event_name(&tk->tp.call),
501                         tk->tp.call.class->system);
502         if (old_tk) {
503                 ret = unregister_trace_kprobe(old_tk);
504                 if (ret < 0)
505                         goto end;
506                 free_trace_kprobe(old_tk);
507         }
508
509         /* Register new event */
510         ret = register_kprobe_event(tk);
511         if (ret) {
512                 pr_warn("Failed to register probe event(%d)\n", ret);
513                 goto end;
514         }
515
516         /* Register k*probe */
517         ret = __register_trace_kprobe(tk);
518         if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
519                 pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
520                 ret = 0;
521         }
522
523         if (ret < 0)
524                 unregister_kprobe_event(tk);
525         else
526                 dyn_event_add(&tk->devent);
527
528 end:
529         mutex_unlock(&event_mutex);
530         return ret;
531 }
532
533 /* Module notifier call back, checking event on the module */
534 static int trace_kprobe_module_callback(struct notifier_block *nb,
535                                        unsigned long val, void *data)
536 {
537         struct module *mod = data;
538         struct dyn_event *pos;
539         struct trace_kprobe *tk;
540         int ret;
541
542         if (val != MODULE_STATE_COMING)
543                 return NOTIFY_DONE;
544
545         /* Update probes on coming module */
546         mutex_lock(&event_mutex);
547         for_each_trace_kprobe(tk, pos) {
548                 if (trace_kprobe_within_module(tk, mod)) {
549                         /* Don't need to check busy - this should have gone. */
550                         __unregister_trace_kprobe(tk);
551                         ret = __register_trace_kprobe(tk);
552                         if (ret)
553                                 pr_warn("Failed to re-register probe %s on %s: %d\n",
554                                         trace_event_name(&tk->tp.call),
555                                         mod->name, ret);
556                 }
557         }
558         mutex_unlock(&event_mutex);
559
560         return NOTIFY_DONE;
561 }
562
563 static struct notifier_block trace_kprobe_module_nb = {
564         .notifier_call = trace_kprobe_module_callback,
565         .priority = 1   /* Invoked after kprobe module callback */
566 };
567
568 /* Convert certain expected symbols into '_' when generating event names */
569 static inline void sanitize_event_name(char *name)
570 {
571         while (*name++ != '\0')
572                 if (*name == ':' || *name == '.')
573                         *name = '_';
574 }
575
576 static int trace_kprobe_create(int argc, const char *argv[])
577 {
578         /*
579          * Argument syntax:
580          *  - Add kprobe:
581          *      p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
582          *  - Add kretprobe:
583          *      r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
584          * Fetch args:
585          *  $retval     : fetch return value
586          *  $stack      : fetch stack address
587          *  $stackN     : fetch Nth of stack (N:0-)
588          *  $comm       : fetch current task comm
589          *  @ADDR       : fetch memory at ADDR (ADDR should be in kernel)
590          *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
591          *  %REG        : fetch register REG
592          * Dereferencing memory fetch:
593          *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
594          * Alias name of args:
595          *  NAME=FETCHARG : set NAME as alias of FETCHARG.
596          * Type of args:
597          *  FETCHARG:TYPE : use TYPE instead of unsigned long.
598          */
599         struct trace_kprobe *tk;
600         int i, len, ret = 0;
601         bool is_return = false;
602         char *symbol = NULL, *tmp = NULL;
603         const char *event = NULL, *group = KPROBE_EVENT_SYSTEM;
604         int maxactive = 0;
605         long offset = 0;
606         void *addr = NULL;
607         char buf[MAX_EVENT_NAME_LEN];
608         unsigned int flags = TPARG_FL_KERNEL;
609
610         switch (argv[0][0]) {
611         case 'r':
612                 is_return = true;
613                 flags |= TPARG_FL_RETURN;
614                 break;
615         case 'p':
616                 break;
617         default:
618                 return -ECANCELED;
619         }
620         if (argc < 2)
621                 return -ECANCELED;
622
623         event = strchr(&argv[0][1], ':');
624         if (event)
625                 event++;
626
627         if (is_return && isdigit(argv[0][1])) {
628                 if (event)
629                         len = event - &argv[0][1] - 1;
630                 else
631                         len = strlen(&argv[0][1]);
632                 if (len > MAX_EVENT_NAME_LEN - 1)
633                         return -E2BIG;
634                 memcpy(buf, &argv[0][1], len);
635                 buf[len] = '\0';
636                 ret = kstrtouint(buf, 0, &maxactive);
637                 if (ret) {
638                         pr_info("Failed to parse maxactive.\n");
639                         return ret;
640                 }
641                 /* kretprobes instances are iterated over via a list. The
642                  * maximum should stay reasonable.
643                  */
644                 if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
645                         pr_info("Maxactive is too big (%d > %d).\n",
646                                 maxactive, KRETPROBE_MAXACTIVE_MAX);
647                         return -E2BIG;
648                 }
649         }
650
651         /* try to parse an address. if that fails, try to read the
652          * input as a symbol. */
653         if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
654                 /* Check whether uprobe event specified */
655                 if (strchr(argv[1], '/') && strchr(argv[1], ':'))
656                         return -ECANCELED;
657                 /* a symbol specified */
658                 symbol = kstrdup(argv[1], GFP_KERNEL);
659                 if (!symbol)
660                         return -ENOMEM;
661                 /* TODO: support .init module functions */
662                 ret = traceprobe_split_symbol_offset(symbol, &offset);
663                 if (ret || offset < 0 || offset > UINT_MAX) {
664                         pr_info("Failed to parse either an address or a symbol.\n");
665                         goto out;
666                 }
667                 if (kprobe_on_func_entry(NULL, symbol, offset))
668                         flags |= TPARG_FL_FENTRY;
669                 if (offset && is_return && !(flags & TPARG_FL_FENTRY)) {
670                         pr_info("Given offset is not valid for return probe.\n");
671                         ret = -EINVAL;
672                         goto out;
673                 }
674         }
675         argc -= 2; argv += 2;
676
677         if (event) {
678                 ret = traceprobe_parse_event_name(&event, &group, buf);
679                 if (ret)
680                         goto out;
681         } else {
682                 /* Make a new event name */
683                 if (symbol)
684                         snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
685                                  is_return ? 'r' : 'p', symbol, offset);
686                 else
687                         snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
688                                  is_return ? 'r' : 'p', addr);
689                 sanitize_event_name(buf);
690                 event = buf;
691         }
692
693         /* setup a probe */
694         tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
695                                argc, is_return);
696         if (IS_ERR(tk)) {
697                 pr_info("Failed to allocate trace_probe.(%d)\n",
698                         (int)PTR_ERR(tk));
699                 ret = PTR_ERR(tk);
700                 goto out;
701         }
702
703         /* parse arguments */
704         for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
705                 tmp = kstrdup(argv[i], GFP_KERNEL);
706                 if (!tmp) {
707                         ret = -ENOMEM;
708                         goto error;
709                 }
710
711                 ret = traceprobe_parse_probe_arg(&tk->tp, i, tmp, flags);
712                 kfree(tmp);
713                 if (ret)
714                         goto error;
715         }
716
717         ret = register_trace_kprobe(tk);
718         if (ret)
719                 goto error;
720 out:
721         kfree(symbol);
722         return ret;
723
724 error:
725         free_trace_kprobe(tk);
726         goto out;
727 }
728
729 static int create_or_delete_trace_kprobe(int argc, char **argv)
730 {
731         int ret;
732
733         if (argv[0][0] == '-')
734                 return dyn_event_release(argc, argv, &trace_kprobe_ops);
735
736         ret = trace_kprobe_create(argc, (const char **)argv);
737         return ret == -ECANCELED ? -EINVAL : ret;
738 }
739
740 static int trace_kprobe_release(struct dyn_event *ev)
741 {
742         struct trace_kprobe *tk = to_trace_kprobe(ev);
743         int ret = unregister_trace_kprobe(tk);
744
745         if (!ret)
746                 free_trace_kprobe(tk);
747         return ret;
748 }
749
750 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev)
751 {
752         struct trace_kprobe *tk = to_trace_kprobe(ev);
753         int i;
754
755         seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
756         seq_printf(m, ":%s/%s", tk->tp.call.class->system,
757                         trace_event_name(&tk->tp.call));
758
759         if (!tk->symbol)
760                 seq_printf(m, " 0x%p", tk->rp.kp.addr);
761         else if (tk->rp.kp.offset)
762                 seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
763                            tk->rp.kp.offset);
764         else
765                 seq_printf(m, " %s", trace_kprobe_symbol(tk));
766
767         for (i = 0; i < tk->tp.nr_args; i++)
768                 seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
769         seq_putc(m, '\n');
770
771         return 0;
772 }
773
774 static int probes_seq_show(struct seq_file *m, void *v)
775 {
776         struct dyn_event *ev = v;
777
778         if (!is_trace_kprobe(ev))
779                 return 0;
780
781         return trace_kprobe_show(m, ev);
782 }
783
784 static const struct seq_operations probes_seq_op = {
785         .start  = dyn_event_seq_start,
786         .next   = dyn_event_seq_next,
787         .stop   = dyn_event_seq_stop,
788         .show   = probes_seq_show
789 };
790
791 static int probes_open(struct inode *inode, struct file *file)
792 {
793         int ret;
794
795         if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
796                 ret = dyn_events_release_all(&trace_kprobe_ops);
797                 if (ret < 0)
798                         return ret;
799         }
800
801         return seq_open(file, &probes_seq_op);
802 }
803
804 static ssize_t probes_write(struct file *file, const char __user *buffer,
805                             size_t count, loff_t *ppos)
806 {
807         return trace_parse_run_command(file, buffer, count, ppos,
808                                        create_or_delete_trace_kprobe);
809 }
810
811 static const struct file_operations kprobe_events_ops = {
812         .owner          = THIS_MODULE,
813         .open           = probes_open,
814         .read           = seq_read,
815         .llseek         = seq_lseek,
816         .release        = seq_release,
817         .write          = probes_write,
818 };
819
820 /* Probes profiling interfaces */
821 static int probes_profile_seq_show(struct seq_file *m, void *v)
822 {
823         struct dyn_event *ev = v;
824         struct trace_kprobe *tk;
825
826         if (!is_trace_kprobe(ev))
827                 return 0;
828
829         tk = to_trace_kprobe(ev);
830         seq_printf(m, "  %-44s %15lu %15lu\n",
831                    trace_event_name(&tk->tp.call),
832                    trace_kprobe_nhit(tk),
833                    tk->rp.kp.nmissed);
834
835         return 0;
836 }
837
838 static const struct seq_operations profile_seq_op = {
839         .start  = dyn_event_seq_start,
840         .next   = dyn_event_seq_next,
841         .stop   = dyn_event_seq_stop,
842         .show   = probes_profile_seq_show
843 };
844
845 static int profile_open(struct inode *inode, struct file *file)
846 {
847         return seq_open(file, &profile_seq_op);
848 }
849
850 static const struct file_operations kprobe_profile_ops = {
851         .owner          = THIS_MODULE,
852         .open           = profile_open,
853         .read           = seq_read,
854         .llseek         = seq_lseek,
855         .release        = seq_release,
856 };
857
858 /* Kprobe specific fetch functions */
859
860 /* Return the length of string -- including null terminal byte */
861 static nokprobe_inline int
862 fetch_store_strlen(unsigned long addr)
863 {
864         int ret, len = 0;
865         u8 c;
866
867         do {
868                 ret = probe_kernel_read(&c, (u8 *)addr + len, 1);
869                 len++;
870         } while (c && ret == 0 && len < MAX_STRING_SIZE);
871
872         return (ret < 0) ? ret : len;
873 }
874
875 /*
876  * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max
877  * length and relative data location.
878  */
879 static nokprobe_inline int
880 fetch_store_string(unsigned long addr, void *dest, void *base)
881 {
882         int maxlen = get_loc_len(*(u32 *)dest);
883         u8 *dst = get_loc_data(dest, base);
884         long ret;
885
886         if (unlikely(!maxlen))
887                 return -ENOMEM;
888         /*
889          * Try to get string again, since the string can be changed while
890          * probing.
891          */
892         ret = strncpy_from_unsafe(dst, (void *)addr, maxlen);
893
894         if (ret >= 0)
895                 *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
896         return ret;
897 }
898
899 static nokprobe_inline int
900 probe_mem_read(void *dest, void *src, size_t size)
901 {
902         return probe_kernel_read(dest, src, size);
903 }
904
905 /* Note that we don't verify it, since the code does not come from user space */
906 static int
907 process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
908                    void *base)
909 {
910         unsigned long val;
911
912 retry:
913         /* 1st stage: get value from context */
914         switch (code->op) {
915         case FETCH_OP_REG:
916                 val = regs_get_register(regs, code->param);
917                 break;
918         case FETCH_OP_STACK:
919                 val = regs_get_kernel_stack_nth(regs, code->param);
920                 break;
921         case FETCH_OP_STACKP:
922                 val = kernel_stack_pointer(regs);
923                 break;
924         case FETCH_OP_RETVAL:
925                 val = regs_return_value(regs);
926                 break;
927         case FETCH_OP_IMM:
928                 val = code->immediate;
929                 break;
930         case FETCH_OP_COMM:
931                 val = (unsigned long)current->comm;
932                 break;
933 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
934         case FETCH_OP_ARG:
935                 val = regs_get_kernel_argument(regs, code->param);
936                 break;
937 #endif
938         case FETCH_NOP_SYMBOL:  /* Ignore a place holder */
939                 code++;
940                 goto retry;
941         default:
942                 return -EILSEQ;
943         }
944         code++;
945
946         return process_fetch_insn_bottom(code, val, dest, base);
947 }
948 NOKPROBE_SYMBOL(process_fetch_insn)
949
950 /* Kprobe handler */
951 static nokprobe_inline void
952 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
953                     struct trace_event_file *trace_file)
954 {
955         struct kprobe_trace_entry_head *entry;
956         struct ring_buffer_event *event;
957         struct ring_buffer *buffer;
958         int size, dsize, pc;
959         unsigned long irq_flags;
960         struct trace_event_call *call = &tk->tp.call;
961
962         WARN_ON(call != trace_file->event_call);
963
964         if (trace_trigger_soft_disabled(trace_file))
965                 return;
966
967         local_save_flags(irq_flags);
968         pc = preempt_count();
969
970         dsize = __get_data_size(&tk->tp, regs);
971         size = sizeof(*entry) + tk->tp.size + dsize;
972
973         event = trace_event_buffer_lock_reserve(&buffer, trace_file,
974                                                 call->event.type,
975                                                 size, irq_flags, pc);
976         if (!event)
977                 return;
978
979         entry = ring_buffer_event_data(event);
980         entry->ip = (unsigned long)tk->rp.kp.addr;
981         store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
982
983         event_trigger_unlock_commit_regs(trace_file, buffer, event,
984                                          entry, irq_flags, pc, regs);
985 }
986
987 static void
988 kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
989 {
990         struct event_file_link *link;
991
992         list_for_each_entry_rcu(link, &tk->tp.files, list)
993                 __kprobe_trace_func(tk, regs, link->file);
994 }
995 NOKPROBE_SYMBOL(kprobe_trace_func);
996
997 /* Kretprobe handler */
998 static nokprobe_inline void
999 __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1000                        struct pt_regs *regs,
1001                        struct trace_event_file *trace_file)
1002 {
1003         struct kretprobe_trace_entry_head *entry;
1004         struct ring_buffer_event *event;
1005         struct ring_buffer *buffer;
1006         int size, pc, dsize;
1007         unsigned long irq_flags;
1008         struct trace_event_call *call = &tk->tp.call;
1009
1010         WARN_ON(call != trace_file->event_call);
1011
1012         if (trace_trigger_soft_disabled(trace_file))
1013                 return;
1014
1015         local_save_flags(irq_flags);
1016         pc = preempt_count();
1017
1018         dsize = __get_data_size(&tk->tp, regs);
1019         size = sizeof(*entry) + tk->tp.size + dsize;
1020
1021         event = trace_event_buffer_lock_reserve(&buffer, trace_file,
1022                                                 call->event.type,
1023                                                 size, irq_flags, pc);
1024         if (!event)
1025                 return;
1026
1027         entry = ring_buffer_event_data(event);
1028         entry->func = (unsigned long)tk->rp.kp.addr;
1029         entry->ret_ip = (unsigned long)ri->ret_addr;
1030         store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1031
1032         event_trigger_unlock_commit_regs(trace_file, buffer, event,
1033                                          entry, irq_flags, pc, regs);
1034 }
1035
1036 static void
1037 kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1038                      struct pt_regs *regs)
1039 {
1040         struct event_file_link *link;
1041
1042         list_for_each_entry_rcu(link, &tk->tp.files, list)
1043                 __kretprobe_trace_func(tk, ri, regs, link->file);
1044 }
1045 NOKPROBE_SYMBOL(kretprobe_trace_func);
1046
1047 /* Event entry printers */
1048 static enum print_line_t
1049 print_kprobe_event(struct trace_iterator *iter, int flags,
1050                    struct trace_event *event)
1051 {
1052         struct kprobe_trace_entry_head *field;
1053         struct trace_seq *s = &iter->seq;
1054         struct trace_probe *tp;
1055
1056         field = (struct kprobe_trace_entry_head *)iter->ent;
1057         tp = container_of(event, struct trace_probe, call.event);
1058
1059         trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
1060
1061         if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1062                 goto out;
1063
1064         trace_seq_putc(s, ')');
1065
1066         if (print_probe_args(s, tp->args, tp->nr_args,
1067                              (u8 *)&field[1], field) < 0)
1068                 goto out;
1069
1070         trace_seq_putc(s, '\n');
1071  out:
1072         return trace_handle_return(s);
1073 }
1074
1075 static enum print_line_t
1076 print_kretprobe_event(struct trace_iterator *iter, int flags,
1077                       struct trace_event *event)
1078 {
1079         struct kretprobe_trace_entry_head *field;
1080         struct trace_seq *s = &iter->seq;
1081         struct trace_probe *tp;
1082
1083         field = (struct kretprobe_trace_entry_head *)iter->ent;
1084         tp = container_of(event, struct trace_probe, call.event);
1085
1086         trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
1087
1088         if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1089                 goto out;
1090
1091         trace_seq_puts(s, " <- ");
1092
1093         if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1094                 goto out;
1095
1096         trace_seq_putc(s, ')');
1097
1098         if (print_probe_args(s, tp->args, tp->nr_args,
1099                              (u8 *)&field[1], field) < 0)
1100                 goto out;
1101
1102         trace_seq_putc(s, '\n');
1103
1104  out:
1105         return trace_handle_return(s);
1106 }
1107
1108
1109 static int kprobe_event_define_fields(struct trace_event_call *event_call)
1110 {
1111         int ret;
1112         struct kprobe_trace_entry_head field;
1113         struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1114
1115         DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1116
1117         return traceprobe_define_arg_fields(event_call, sizeof(field), &tk->tp);
1118 }
1119
1120 static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1121 {
1122         int ret;
1123         struct kretprobe_trace_entry_head field;
1124         struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1125
1126         DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1127         DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1128
1129         return traceprobe_define_arg_fields(event_call, sizeof(field), &tk->tp);
1130 }
1131
1132 #ifdef CONFIG_PERF_EVENTS
1133
1134 /* Kprobe profile handler */
1135 static int
1136 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1137 {
1138         struct trace_event_call *call = &tk->tp.call;
1139         struct kprobe_trace_entry_head *entry;
1140         struct hlist_head *head;
1141         int size, __size, dsize;
1142         int rctx;
1143
1144         if (bpf_prog_array_valid(call)) {
1145                 unsigned long orig_ip = instruction_pointer(regs);
1146                 int ret;
1147
1148                 ret = trace_call_bpf(call, regs);
1149
1150                 /*
1151                  * We need to check and see if we modified the pc of the
1152                  * pt_regs, and if so return 1 so that we don't do the
1153                  * single stepping.
1154                  */
1155                 if (orig_ip != instruction_pointer(regs))
1156                         return 1;
1157                 if (!ret)
1158                         return 0;
1159         }
1160
1161         head = this_cpu_ptr(call->perf_events);
1162         if (hlist_empty(head))
1163                 return 0;
1164
1165         dsize = __get_data_size(&tk->tp, regs);
1166         __size = sizeof(*entry) + tk->tp.size + dsize;
1167         size = ALIGN(__size + sizeof(u32), sizeof(u64));
1168         size -= sizeof(u32);
1169
1170         entry = perf_trace_buf_alloc(size, NULL, &rctx);
1171         if (!entry)
1172                 return 0;
1173
1174         entry->ip = (unsigned long)tk->rp.kp.addr;
1175         memset(&entry[1], 0, dsize);
1176         store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1177         perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1178                               head, NULL);
1179         return 0;
1180 }
1181 NOKPROBE_SYMBOL(kprobe_perf_func);
1182
1183 /* Kretprobe profile handler */
1184 static void
1185 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1186                     struct pt_regs *regs)
1187 {
1188         struct trace_event_call *call = &tk->tp.call;
1189         struct kretprobe_trace_entry_head *entry;
1190         struct hlist_head *head;
1191         int size, __size, dsize;
1192         int rctx;
1193
1194         if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1195                 return;
1196
1197         head = this_cpu_ptr(call->perf_events);
1198         if (hlist_empty(head))
1199                 return;
1200
1201         dsize = __get_data_size(&tk->tp, regs);
1202         __size = sizeof(*entry) + tk->tp.size + dsize;
1203         size = ALIGN(__size + sizeof(u32), sizeof(u64));
1204         size -= sizeof(u32);
1205
1206         entry = perf_trace_buf_alloc(size, NULL, &rctx);
1207         if (!entry)
1208                 return;
1209
1210         entry->func = (unsigned long)tk->rp.kp.addr;
1211         entry->ret_ip = (unsigned long)ri->ret_addr;
1212         store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1213         perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1214                               head, NULL);
1215 }
1216 NOKPROBE_SYMBOL(kretprobe_perf_func);
1217
1218 int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
1219                         const char **symbol, u64 *probe_offset,
1220                         u64 *probe_addr, bool perf_type_tracepoint)
1221 {
1222         const char *pevent = trace_event_name(event->tp_event);
1223         const char *group = event->tp_event->class->system;
1224         struct trace_kprobe *tk;
1225
1226         if (perf_type_tracepoint)
1227                 tk = find_trace_kprobe(pevent, group);
1228         else
1229                 tk = event->tp_event->data;
1230         if (!tk)
1231                 return -EINVAL;
1232
1233         *fd_type = trace_kprobe_is_return(tk) ? BPF_FD_TYPE_KRETPROBE
1234                                               : BPF_FD_TYPE_KPROBE;
1235         if (tk->symbol) {
1236                 *symbol = tk->symbol;
1237                 *probe_offset = tk->rp.kp.offset;
1238                 *probe_addr = 0;
1239         } else {
1240                 *symbol = NULL;
1241                 *probe_offset = 0;
1242                 *probe_addr = (unsigned long)tk->rp.kp.addr;
1243         }
1244         return 0;
1245 }
1246 #endif  /* CONFIG_PERF_EVENTS */
1247
1248 /*
1249  * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1250  *
1251  * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1252  * lockless, but we can't race with this __init function.
1253  */
1254 static int kprobe_register(struct trace_event_call *event,
1255                            enum trace_reg type, void *data)
1256 {
1257         struct trace_kprobe *tk = (struct trace_kprobe *)event->data;
1258         struct trace_event_file *file = data;
1259
1260         switch (type) {
1261         case TRACE_REG_REGISTER:
1262                 return enable_trace_kprobe(tk, file);
1263         case TRACE_REG_UNREGISTER:
1264                 return disable_trace_kprobe(tk, file);
1265
1266 #ifdef CONFIG_PERF_EVENTS
1267         case TRACE_REG_PERF_REGISTER:
1268                 return enable_trace_kprobe(tk, NULL);
1269         case TRACE_REG_PERF_UNREGISTER:
1270                 return disable_trace_kprobe(tk, NULL);
1271         case TRACE_REG_PERF_OPEN:
1272         case TRACE_REG_PERF_CLOSE:
1273         case TRACE_REG_PERF_ADD:
1274         case TRACE_REG_PERF_DEL:
1275                 return 0;
1276 #endif
1277         }
1278         return 0;
1279 }
1280
1281 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1282 {
1283         struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1284         int ret = 0;
1285
1286         raw_cpu_inc(*tk->nhit);
1287
1288         if (tk->tp.flags & TP_FLAG_TRACE)
1289                 kprobe_trace_func(tk, regs);
1290 #ifdef CONFIG_PERF_EVENTS
1291         if (tk->tp.flags & TP_FLAG_PROFILE)
1292                 ret = kprobe_perf_func(tk, regs);
1293 #endif
1294         return ret;
1295 }
1296 NOKPROBE_SYMBOL(kprobe_dispatcher);
1297
1298 static int
1299 kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1300 {
1301         struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
1302
1303         raw_cpu_inc(*tk->nhit);
1304
1305         if (tk->tp.flags & TP_FLAG_TRACE)
1306                 kretprobe_trace_func(tk, ri, regs);
1307 #ifdef CONFIG_PERF_EVENTS
1308         if (tk->tp.flags & TP_FLAG_PROFILE)
1309                 kretprobe_perf_func(tk, ri, regs);
1310 #endif
1311         return 0;       /* We don't tweek kernel, so just return 0 */
1312 }
1313 NOKPROBE_SYMBOL(kretprobe_dispatcher);
1314
1315 static struct trace_event_functions kretprobe_funcs = {
1316         .trace          = print_kretprobe_event
1317 };
1318
1319 static struct trace_event_functions kprobe_funcs = {
1320         .trace          = print_kprobe_event
1321 };
1322
1323 static inline void init_trace_event_call(struct trace_kprobe *tk,
1324                                          struct trace_event_call *call)
1325 {
1326         INIT_LIST_HEAD(&call->class->fields);
1327         if (trace_kprobe_is_return(tk)) {
1328                 call->event.funcs = &kretprobe_funcs;
1329                 call->class->define_fields = kretprobe_event_define_fields;
1330         } else {
1331                 call->event.funcs = &kprobe_funcs;
1332                 call->class->define_fields = kprobe_event_define_fields;
1333         }
1334
1335         call->flags = TRACE_EVENT_FL_KPROBE;
1336         call->class->reg = kprobe_register;
1337         call->data = tk;
1338 }
1339
1340 static int register_kprobe_event(struct trace_kprobe *tk)
1341 {
1342         struct trace_event_call *call = &tk->tp.call;
1343         int ret = 0;
1344
1345         init_trace_event_call(tk, call);
1346
1347         if (traceprobe_set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0)
1348                 return -ENOMEM;
1349         ret = register_trace_event(&call->event);
1350         if (!ret) {
1351                 kfree(call->print_fmt);
1352                 return -ENODEV;
1353         }
1354         ret = trace_add_event_call(call);
1355         if (ret) {
1356                 pr_info("Failed to register kprobe event: %s\n",
1357                         trace_event_name(call));
1358                 kfree(call->print_fmt);
1359                 unregister_trace_event(&call->event);
1360         }
1361         return ret;
1362 }
1363
1364 static int unregister_kprobe_event(struct trace_kprobe *tk)
1365 {
1366         int ret;
1367
1368         /* tp->event is unregistered in trace_remove_event_call() */
1369         ret = trace_remove_event_call(&tk->tp.call);
1370         if (!ret)
1371                 kfree(tk->tp.call.print_fmt);
1372         return ret;
1373 }
1374
1375 #ifdef CONFIG_PERF_EVENTS
1376 /* create a trace_kprobe, but don't add it to global lists */
1377 struct trace_event_call *
1378 create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
1379                           bool is_return)
1380 {
1381         struct trace_kprobe *tk;
1382         int ret;
1383         char *event;
1384
1385         /*
1386          * local trace_kprobes are not added to dyn_event, so they are never
1387          * searched in find_trace_kprobe(). Therefore, there is no concern of
1388          * duplicated name here.
1389          */
1390         event = func ? func : "DUMMY_EVENT";
1391
1392         tk = alloc_trace_kprobe(KPROBE_EVENT_SYSTEM, event, (void *)addr, func,
1393                                 offs, 0 /* maxactive */, 0 /* nargs */,
1394                                 is_return);
1395
1396         if (IS_ERR(tk)) {
1397                 pr_info("Failed to allocate trace_probe.(%d)\n",
1398                         (int)PTR_ERR(tk));
1399                 return ERR_CAST(tk);
1400         }
1401
1402         init_trace_event_call(tk, &tk->tp.call);
1403
1404         if (traceprobe_set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) {
1405                 ret = -ENOMEM;
1406                 goto error;
1407         }
1408
1409         ret = __register_trace_kprobe(tk);
1410         if (ret < 0) {
1411                 kfree(tk->tp.call.print_fmt);
1412                 goto error;
1413         }
1414
1415         return &tk->tp.call;
1416 error:
1417         free_trace_kprobe(tk);
1418         return ERR_PTR(ret);
1419 }
1420
1421 void destroy_local_trace_kprobe(struct trace_event_call *event_call)
1422 {
1423         struct trace_kprobe *tk;
1424
1425         tk = container_of(event_call, struct trace_kprobe, tp.call);
1426
1427         if (trace_probe_is_enabled(&tk->tp)) {
1428                 WARN_ON(1);
1429                 return;
1430         }
1431
1432         __unregister_trace_kprobe(tk);
1433
1434         kfree(tk->tp.call.print_fmt);
1435         free_trace_kprobe(tk);
1436 }
1437 #endif /* CONFIG_PERF_EVENTS */
1438
1439 /* Make a tracefs interface for controlling probe points */
1440 static __init int init_kprobe_trace(void)
1441 {
1442         struct dentry *d_tracer;
1443         struct dentry *entry;
1444         int ret;
1445
1446         ret = dyn_event_register(&trace_kprobe_ops);
1447         if (ret)
1448                 return ret;
1449
1450         if (register_module_notifier(&trace_kprobe_module_nb))
1451                 return -EINVAL;
1452
1453         d_tracer = tracing_init_dentry();
1454         if (IS_ERR(d_tracer))
1455                 return 0;
1456
1457         entry = tracefs_create_file("kprobe_events", 0644, d_tracer,
1458                                     NULL, &kprobe_events_ops);
1459
1460         /* Event list interface */
1461         if (!entry)
1462                 pr_warn("Could not create tracefs 'kprobe_events' entry\n");
1463
1464         /* Profile interface */
1465         entry = tracefs_create_file("kprobe_profile", 0444, d_tracer,
1466                                     NULL, &kprobe_profile_ops);
1467
1468         if (!entry)
1469                 pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
1470         return 0;
1471 }
1472 fs_initcall(init_kprobe_trace);
1473
1474
1475 #ifdef CONFIG_FTRACE_STARTUP_TEST
1476 static __init struct trace_event_file *
1477 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1478 {
1479         struct trace_event_file *file;
1480
1481         list_for_each_entry(file, &tr->events, list)
1482                 if (file->event_call == &tk->tp.call)
1483                         return file;
1484
1485         return NULL;
1486 }
1487
1488 /*
1489  * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1490  * stage, we can do this lockless.
1491  */
1492 static __init int kprobe_trace_self_tests_init(void)
1493 {
1494         int ret, warn = 0;
1495         int (*target)(int, int, int, int, int, int);
1496         struct trace_kprobe *tk;
1497         struct trace_event_file *file;
1498
1499         if (tracing_is_disabled())
1500                 return -ENODEV;
1501
1502         target = kprobe_trace_selftest_target;
1503
1504         pr_info("Testing kprobe tracing: ");
1505
1506         ret = trace_run_command("p:testprobe kprobe_trace_selftest_target $stack $stack0 +0($stack)",
1507                                 create_or_delete_trace_kprobe);
1508         if (WARN_ON_ONCE(ret)) {
1509                 pr_warn("error on probing function entry.\n");
1510                 warn++;
1511         } else {
1512                 /* Enable trace point */
1513                 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1514                 if (WARN_ON_ONCE(tk == NULL)) {
1515                         pr_warn("error on getting new probe.\n");
1516                         warn++;
1517                 } else {
1518                         file = find_trace_probe_file(tk, top_trace_array());
1519                         if (WARN_ON_ONCE(file == NULL)) {
1520                                 pr_warn("error on getting probe file.\n");
1521                                 warn++;
1522                         } else
1523                                 enable_trace_kprobe(tk, file);
1524                 }
1525         }
1526
1527         ret = trace_run_command("r:testprobe2 kprobe_trace_selftest_target $retval",
1528                                 create_or_delete_trace_kprobe);
1529         if (WARN_ON_ONCE(ret)) {
1530                 pr_warn("error on probing function return.\n");
1531                 warn++;
1532         } else {
1533                 /* Enable trace point */
1534                 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1535                 if (WARN_ON_ONCE(tk == NULL)) {
1536                         pr_warn("error on getting 2nd new probe.\n");
1537                         warn++;
1538                 } else {
1539                         file = find_trace_probe_file(tk, top_trace_array());
1540                         if (WARN_ON_ONCE(file == NULL)) {
1541                                 pr_warn("error on getting probe file.\n");
1542                                 warn++;
1543                         } else
1544                                 enable_trace_kprobe(tk, file);
1545                 }
1546         }
1547
1548         if (warn)
1549                 goto end;
1550
1551         ret = target(1, 2, 3, 4, 5, 6);
1552
1553         /*
1554          * Not expecting an error here, the check is only to prevent the
1555          * optimizer from removing the call to target() as otherwise there
1556          * are no side-effects and the call is never performed.
1557          */
1558         if (ret != 21)
1559                 warn++;
1560
1561         /* Disable trace points before removing it */
1562         tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1563         if (WARN_ON_ONCE(tk == NULL)) {
1564                 pr_warn("error on getting test probe.\n");
1565                 warn++;
1566         } else {
1567                 if (trace_kprobe_nhit(tk) != 1) {
1568                         pr_warn("incorrect number of testprobe hits\n");
1569                         warn++;
1570                 }
1571
1572                 file = find_trace_probe_file(tk, top_trace_array());
1573                 if (WARN_ON_ONCE(file == NULL)) {
1574                         pr_warn("error on getting probe file.\n");
1575                         warn++;
1576                 } else
1577                         disable_trace_kprobe(tk, file);
1578         }
1579
1580         tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1581         if (WARN_ON_ONCE(tk == NULL)) {
1582                 pr_warn("error on getting 2nd test probe.\n");
1583                 warn++;
1584         } else {
1585                 if (trace_kprobe_nhit(tk) != 1) {
1586                         pr_warn("incorrect number of testprobe2 hits\n");
1587                         warn++;
1588                 }
1589
1590                 file = find_trace_probe_file(tk, top_trace_array());
1591                 if (WARN_ON_ONCE(file == NULL)) {
1592                         pr_warn("error on getting probe file.\n");
1593                         warn++;
1594                 } else
1595                         disable_trace_kprobe(tk, file);
1596         }
1597
1598         ret = trace_run_command("-:testprobe", create_or_delete_trace_kprobe);
1599         if (WARN_ON_ONCE(ret)) {
1600                 pr_warn("error on deleting a probe.\n");
1601                 warn++;
1602         }
1603
1604         ret = trace_run_command("-:testprobe2", create_or_delete_trace_kprobe);
1605         if (WARN_ON_ONCE(ret)) {
1606                 pr_warn("error on deleting a probe.\n");
1607                 warn++;
1608         }
1609
1610 end:
1611         ret = dyn_events_release_all(&trace_kprobe_ops);
1612         if (WARN_ON_ONCE(ret)) {
1613                 pr_warn("error on cleaning up probes.\n");
1614                 warn++;
1615         }
1616         /*
1617          * Wait for the optimizer work to finish. Otherwise it might fiddle
1618          * with probes in already freed __init text.
1619          */
1620         wait_for_kprobe_optimizer();
1621         if (warn)
1622                 pr_cont("NG: Some tests are failed. Please check them.\n");
1623         else
1624                 pr_cont("OK\n");
1625         return 0;
1626 }
1627
1628 late_initcall(kprobe_trace_self_tests_init);
1629
1630 #endif