Merge tag 'renesas-dt-fixes-for-v4.15' of https://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / kernel / trace / trace_kprobe.c
1 /*
2  * Kprobes-based tracing events
3  *
4  * Created by Masami Hiramatsu <mhiramat@redhat.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18  */
19 #define pr_fmt(fmt)     "trace_kprobe: " fmt
20
21 #include <linux/module.h>
22 #include <linux/uaccess.h>
23 #include <linux/rculist.h>
24
25 #include "trace_probe.h"
26
27 #define KPROBE_EVENT_SYSTEM "kprobes"
28 #define KRETPROBE_MAXACTIVE_MAX 4096
29
30 /**
31  * Kprobe event core functions
32  */
33 struct trace_kprobe {
34         struct list_head        list;
35         struct kretprobe        rp;     /* Use rp.kp for kprobe use */
36         unsigned long __percpu *nhit;
37         const char              *symbol;        /* symbol name */
38         struct trace_probe      tp;
39 };
40
41 #define SIZEOF_TRACE_KPROBE(n)                          \
42         (offsetof(struct trace_kprobe, tp.args) +       \
43         (sizeof(struct probe_arg) * (n)))
44
45
46 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
47 {
48         return tk->rp.handler != NULL;
49 }
50
51 static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
52 {
53         return tk->symbol ? tk->symbol : "unknown";
54 }
55
56 static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
57 {
58         return tk->rp.kp.offset;
59 }
60
61 static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
62 {
63         return !!(kprobe_gone(&tk->rp.kp));
64 }
65
66 static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
67                                                  struct module *mod)
68 {
69         int len = strlen(mod->name);
70         const char *name = trace_kprobe_symbol(tk);
71         return strncmp(mod->name, name, len) == 0 && name[len] == ':';
72 }
73
74 static nokprobe_inline bool trace_kprobe_is_on_module(struct trace_kprobe *tk)
75 {
76         return !!strchr(trace_kprobe_symbol(tk), ':');
77 }
78
79 static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
80 {
81         unsigned long nhit = 0;
82         int cpu;
83
84         for_each_possible_cpu(cpu)
85                 nhit += *per_cpu_ptr(tk->nhit, cpu);
86
87         return nhit;
88 }
89
90 static int register_kprobe_event(struct trace_kprobe *tk);
91 static int unregister_kprobe_event(struct trace_kprobe *tk);
92
93 static DEFINE_MUTEX(probe_lock);
94 static LIST_HEAD(probe_list);
95
96 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
97 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
98                                 struct pt_regs *regs);
99
100 /* Memory fetching by symbol */
101 struct symbol_cache {
102         char            *symbol;
103         long            offset;
104         unsigned long   addr;
105 };
106
107 unsigned long update_symbol_cache(struct symbol_cache *sc)
108 {
109         sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);
110
111         if (sc->addr)
112                 sc->addr += sc->offset;
113
114         return sc->addr;
115 }
116
117 void free_symbol_cache(struct symbol_cache *sc)
118 {
119         kfree(sc->symbol);
120         kfree(sc);
121 }
122
123 struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
124 {
125         struct symbol_cache *sc;
126
127         if (!sym || strlen(sym) == 0)
128                 return NULL;
129
130         sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
131         if (!sc)
132                 return NULL;
133
134         sc->symbol = kstrdup(sym, GFP_KERNEL);
135         if (!sc->symbol) {
136                 kfree(sc);
137                 return NULL;
138         }
139         sc->offset = offset;
140         update_symbol_cache(sc);
141
142         return sc;
143 }
144
145 /*
146  * Kprobes-specific fetch functions
147  */
148 #define DEFINE_FETCH_stack(type)                                        \
149 static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,          \
150                                           void *offset, void *dest)     \
151 {                                                                       \
152         *(type *)dest = (type)regs_get_kernel_stack_nth(regs,           \
153                                 (unsigned int)((unsigned long)offset)); \
154 }                                                                       \
155 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(stack, type));
156
157 DEFINE_BASIC_FETCH_FUNCS(stack)
158 /* No string on the stack entry */
159 #define fetch_stack_string      NULL
160 #define fetch_stack_string_size NULL
161
162 #define DEFINE_FETCH_memory(type)                                       \
163 static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,         \
164                                           void *addr, void *dest)       \
165 {                                                                       \
166         type retval;                                                    \
167         if (probe_kernel_address(addr, retval))                         \
168                 *(type *)dest = 0;                                      \
169         else                                                            \
170                 *(type *)dest = retval;                                 \
171 }                                                                       \
172 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, type));
173
174 DEFINE_BASIC_FETCH_FUNCS(memory)
175 /*
176  * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
177  * length and relative data location.
178  */
179 static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
180                                             void *addr, void *dest)
181 {
182         int maxlen = get_rloc_len(*(u32 *)dest);
183         u8 *dst = get_rloc_data(dest);
184         long ret;
185
186         if (!maxlen)
187                 return;
188
189         /*
190          * Try to get string again, since the string can be changed while
191          * probing.
192          */
193         ret = strncpy_from_unsafe(dst, addr, maxlen);
194
195         if (ret < 0) {  /* Failed to fetch string */
196                 dst[0] = '\0';
197                 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
198         } else {
199                 *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(*(u32 *)dest));
200         }
201 }
202 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string));
203
204 /* Return the length of string -- including null terminal byte */
205 static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
206                                                  void *addr, void *dest)
207 {
208         mm_segment_t old_fs;
209         int ret, len = 0;
210         u8 c;
211
212         old_fs = get_fs();
213         set_fs(KERNEL_DS);
214         pagefault_disable();
215
216         do {
217                 ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
218                 len++;
219         } while (c && ret == 0 && len < MAX_STRING_SIZE);
220
221         pagefault_enable();
222         set_fs(old_fs);
223
224         if (ret < 0)    /* Failed to check the length */
225                 *(u32 *)dest = 0;
226         else
227                 *(u32 *)dest = len;
228 }
229 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string_size));
230
231 #define DEFINE_FETCH_symbol(type)                                       \
232 void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs, void *data, void *dest)\
233 {                                                                       \
234         struct symbol_cache *sc = data;                                 \
235         if (sc->addr)                                                   \
236                 fetch_memory_##type(regs, (void *)sc->addr, dest);      \
237         else                                                            \
238                 *(type *)dest = 0;                                      \
239 }                                                                       \
240 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(symbol, type));
241
242 DEFINE_BASIC_FETCH_FUNCS(symbol)
243 DEFINE_FETCH_symbol(string)
244 DEFINE_FETCH_symbol(string_size)
245
246 /* kprobes don't support file_offset fetch methods */
247 #define fetch_file_offset_u8            NULL
248 #define fetch_file_offset_u16           NULL
249 #define fetch_file_offset_u32           NULL
250 #define fetch_file_offset_u64           NULL
251 #define fetch_file_offset_string        NULL
252 #define fetch_file_offset_string_size   NULL
253
254 /* Fetch type information table */
255 static const struct fetch_type kprobes_fetch_type_table[] = {
256         /* Special types */
257         [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
258                                         sizeof(u32), 1, "__data_loc char[]"),
259         [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
260                                         string_size, sizeof(u32), 0, "u32"),
261         /* Basic types */
262         ASSIGN_FETCH_TYPE(u8,  u8,  0),
263         ASSIGN_FETCH_TYPE(u16, u16, 0),
264         ASSIGN_FETCH_TYPE(u32, u32, 0),
265         ASSIGN_FETCH_TYPE(u64, u64, 0),
266         ASSIGN_FETCH_TYPE(s8,  u8,  1),
267         ASSIGN_FETCH_TYPE(s16, u16, 1),
268         ASSIGN_FETCH_TYPE(s32, u32, 1),
269         ASSIGN_FETCH_TYPE(s64, u64, 1),
270         ASSIGN_FETCH_TYPE_ALIAS(x8,  u8,  u8,  0),
271         ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0),
272         ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0),
273         ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0),
274
275         ASSIGN_FETCH_TYPE_END
276 };
277
278 /*
279  * Allocate new trace_probe and initialize it (including kprobes).
280  */
281 static struct trace_kprobe *alloc_trace_kprobe(const char *group,
282                                              const char *event,
283                                              void *addr,
284                                              const char *symbol,
285                                              unsigned long offs,
286                                              int maxactive,
287                                              int nargs, bool is_return)
288 {
289         struct trace_kprobe *tk;
290         int ret = -ENOMEM;
291
292         tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
293         if (!tk)
294                 return ERR_PTR(ret);
295
296         tk->nhit = alloc_percpu(unsigned long);
297         if (!tk->nhit)
298                 goto error;
299
300         if (symbol) {
301                 tk->symbol = kstrdup(symbol, GFP_KERNEL);
302                 if (!tk->symbol)
303                         goto error;
304                 tk->rp.kp.symbol_name = tk->symbol;
305                 tk->rp.kp.offset = offs;
306         } else
307                 tk->rp.kp.addr = addr;
308
309         if (is_return)
310                 tk->rp.handler = kretprobe_dispatcher;
311         else
312                 tk->rp.kp.pre_handler = kprobe_dispatcher;
313
314         tk->rp.maxactive = maxactive;
315
316         if (!event || !is_good_name(event)) {
317                 ret = -EINVAL;
318                 goto error;
319         }
320
321         tk->tp.call.class = &tk->tp.class;
322         tk->tp.call.name = kstrdup(event, GFP_KERNEL);
323         if (!tk->tp.call.name)
324                 goto error;
325
326         if (!group || !is_good_name(group)) {
327                 ret = -EINVAL;
328                 goto error;
329         }
330
331         tk->tp.class.system = kstrdup(group, GFP_KERNEL);
332         if (!tk->tp.class.system)
333                 goto error;
334
335         INIT_LIST_HEAD(&tk->list);
336         INIT_LIST_HEAD(&tk->tp.files);
337         return tk;
338 error:
339         kfree(tk->tp.call.name);
340         kfree(tk->symbol);
341         free_percpu(tk->nhit);
342         kfree(tk);
343         return ERR_PTR(ret);
344 }
345
346 static void free_trace_kprobe(struct trace_kprobe *tk)
347 {
348         int i;
349
350         for (i = 0; i < tk->tp.nr_args; i++)
351                 traceprobe_free_probe_arg(&tk->tp.args[i]);
352
353         kfree(tk->tp.call.class->system);
354         kfree(tk->tp.call.name);
355         kfree(tk->symbol);
356         free_percpu(tk->nhit);
357         kfree(tk);
358 }
359
360 static struct trace_kprobe *find_trace_kprobe(const char *event,
361                                               const char *group)
362 {
363         struct trace_kprobe *tk;
364
365         list_for_each_entry(tk, &probe_list, list)
366                 if (strcmp(trace_event_name(&tk->tp.call), event) == 0 &&
367                     strcmp(tk->tp.call.class->system, group) == 0)
368                         return tk;
369         return NULL;
370 }
371
372 /*
373  * Enable trace_probe
374  * if the file is NULL, enable "perf" handler, or enable "trace" handler.
375  */
376 static int
377 enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
378 {
379         int ret = 0;
380
381         if (file) {
382                 struct event_file_link *link;
383
384                 link = kmalloc(sizeof(*link), GFP_KERNEL);
385                 if (!link) {
386                         ret = -ENOMEM;
387                         goto out;
388                 }
389
390                 link->file = file;
391                 list_add_tail_rcu(&link->list, &tk->tp.files);
392
393                 tk->tp.flags |= TP_FLAG_TRACE;
394         } else
395                 tk->tp.flags |= TP_FLAG_PROFILE;
396
397         if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) {
398                 if (trace_kprobe_is_return(tk))
399                         ret = enable_kretprobe(&tk->rp);
400                 else
401                         ret = enable_kprobe(&tk->rp.kp);
402         }
403  out:
404         return ret;
405 }
406
407 /*
408  * Disable trace_probe
409  * if the file is NULL, disable "perf" handler, or disable "trace" handler.
410  */
411 static int
412 disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
413 {
414         struct event_file_link *link = NULL;
415         int wait = 0;
416         int ret = 0;
417
418         if (file) {
419                 link = find_event_file_link(&tk->tp, file);
420                 if (!link) {
421                         ret = -EINVAL;
422                         goto out;
423                 }
424
425                 list_del_rcu(&link->list);
426                 wait = 1;
427                 if (!list_empty(&tk->tp.files))
428                         goto out;
429
430                 tk->tp.flags &= ~TP_FLAG_TRACE;
431         } else
432                 tk->tp.flags &= ~TP_FLAG_PROFILE;
433
434         if (!trace_probe_is_enabled(&tk->tp) && trace_probe_is_registered(&tk->tp)) {
435                 if (trace_kprobe_is_return(tk))
436                         disable_kretprobe(&tk->rp);
437                 else
438                         disable_kprobe(&tk->rp.kp);
439                 wait = 1;
440         }
441  out:
442         if (wait) {
443                 /*
444                  * Synchronize with kprobe_trace_func/kretprobe_trace_func
445                  * to ensure disabled (all running handlers are finished).
446                  * This is not only for kfree(), but also the caller,
447                  * trace_remove_event_call() supposes it for releasing
448                  * event_call related objects, which will be accessed in
449                  * the kprobe_trace_func/kretprobe_trace_func.
450                  */
451                 synchronize_sched();
452                 kfree(link);    /* Ignored if link == NULL */
453         }
454
455         return ret;
456 }
457
458 /* Internal register function - just handle k*probes and flags */
459 static int __register_trace_kprobe(struct trace_kprobe *tk)
460 {
461         int i, ret;
462
463         if (trace_probe_is_registered(&tk->tp))
464                 return -EINVAL;
465
466         for (i = 0; i < tk->tp.nr_args; i++)
467                 traceprobe_update_arg(&tk->tp.args[i]);
468
469         /* Set/clear disabled flag according to tp->flag */
470         if (trace_probe_is_enabled(&tk->tp))
471                 tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
472         else
473                 tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
474
475         if (trace_kprobe_is_return(tk))
476                 ret = register_kretprobe(&tk->rp);
477         else
478                 ret = register_kprobe(&tk->rp.kp);
479
480         if (ret == 0)
481                 tk->tp.flags |= TP_FLAG_REGISTERED;
482         else {
483                 pr_warn("Could not insert probe at %s+%lu: %d\n",
484                         trace_kprobe_symbol(tk), trace_kprobe_offset(tk), ret);
485                 if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) {
486                         pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
487                         ret = 0;
488                 } else if (ret == -EILSEQ) {
489                         pr_warn("Probing address(0x%p) is not an instruction boundary.\n",
490                                 tk->rp.kp.addr);
491                         ret = -EINVAL;
492                 }
493         }
494
495         return ret;
496 }
497
498 /* Internal unregister function - just handle k*probes and flags */
499 static void __unregister_trace_kprobe(struct trace_kprobe *tk)
500 {
501         if (trace_probe_is_registered(&tk->tp)) {
502                 if (trace_kprobe_is_return(tk))
503                         unregister_kretprobe(&tk->rp);
504                 else
505                         unregister_kprobe(&tk->rp.kp);
506                 tk->tp.flags &= ~TP_FLAG_REGISTERED;
507                 /* Cleanup kprobe for reuse */
508                 if (tk->rp.kp.symbol_name)
509                         tk->rp.kp.addr = NULL;
510         }
511 }
512
513 /* Unregister a trace_probe and probe_event: call with locking probe_lock */
514 static int unregister_trace_kprobe(struct trace_kprobe *tk)
515 {
516         /* Enabled event can not be unregistered */
517         if (trace_probe_is_enabled(&tk->tp))
518                 return -EBUSY;
519
520         /* Will fail if probe is being used by ftrace or perf */
521         if (unregister_kprobe_event(tk))
522                 return -EBUSY;
523
524         __unregister_trace_kprobe(tk);
525         list_del(&tk->list);
526
527         return 0;
528 }
529
530 /* Register a trace_probe and probe_event */
531 static int register_trace_kprobe(struct trace_kprobe *tk)
532 {
533         struct trace_kprobe *old_tk;
534         int ret;
535
536         mutex_lock(&probe_lock);
537
538         /* Delete old (same name) event if exist */
539         old_tk = find_trace_kprobe(trace_event_name(&tk->tp.call),
540                         tk->tp.call.class->system);
541         if (old_tk) {
542                 ret = unregister_trace_kprobe(old_tk);
543                 if (ret < 0)
544                         goto end;
545                 free_trace_kprobe(old_tk);
546         }
547
548         /* Register new event */
549         ret = register_kprobe_event(tk);
550         if (ret) {
551                 pr_warn("Failed to register probe event(%d)\n", ret);
552                 goto end;
553         }
554
555         /* Register k*probe */
556         ret = __register_trace_kprobe(tk);
557         if (ret < 0)
558                 unregister_kprobe_event(tk);
559         else
560                 list_add_tail(&tk->list, &probe_list);
561
562 end:
563         mutex_unlock(&probe_lock);
564         return ret;
565 }
566
567 /* Module notifier call back, checking event on the module */
568 static int trace_kprobe_module_callback(struct notifier_block *nb,
569                                        unsigned long val, void *data)
570 {
571         struct module *mod = data;
572         struct trace_kprobe *tk;
573         int ret;
574
575         if (val != MODULE_STATE_COMING)
576                 return NOTIFY_DONE;
577
578         /* Update probes on coming module */
579         mutex_lock(&probe_lock);
580         list_for_each_entry(tk, &probe_list, list) {
581                 if (trace_kprobe_within_module(tk, mod)) {
582                         /* Don't need to check busy - this should have gone. */
583                         __unregister_trace_kprobe(tk);
584                         ret = __register_trace_kprobe(tk);
585                         if (ret)
586                                 pr_warn("Failed to re-register probe %s on %s: %d\n",
587                                         trace_event_name(&tk->tp.call),
588                                         mod->name, ret);
589                 }
590         }
591         mutex_unlock(&probe_lock);
592
593         return NOTIFY_DONE;
594 }
595
596 static struct notifier_block trace_kprobe_module_nb = {
597         .notifier_call = trace_kprobe_module_callback,
598         .priority = 1   /* Invoked after kprobe module callback */
599 };
600
601 /* Convert certain expected symbols into '_' when generating event names */
602 static inline void sanitize_event_name(char *name)
603 {
604         while (*name++ != '\0')
605                 if (*name == ':' || *name == '.')
606                         *name = '_';
607 }
608
609 static int create_trace_kprobe(int argc, char **argv)
610 {
611         /*
612          * Argument syntax:
613          *  - Add kprobe:
614          *      p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
615          *  - Add kretprobe:
616          *      r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
617          * Fetch args:
618          *  $retval     : fetch return value
619          *  $stack      : fetch stack address
620          *  $stackN     : fetch Nth of stack (N:0-)
621          *  $comm       : fetch current task comm
622          *  @ADDR       : fetch memory at ADDR (ADDR should be in kernel)
623          *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
624          *  %REG        : fetch register REG
625          * Dereferencing memory fetch:
626          *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
627          * Alias name of args:
628          *  NAME=FETCHARG : set NAME as alias of FETCHARG.
629          * Type of args:
630          *  FETCHARG:TYPE : use TYPE instead of unsigned long.
631          */
632         struct trace_kprobe *tk;
633         int i, ret = 0;
634         bool is_return = false, is_delete = false;
635         char *symbol = NULL, *event = NULL, *group = NULL;
636         int maxactive = 0;
637         char *arg;
638         unsigned long offset = 0;
639         void *addr = NULL;
640         char buf[MAX_EVENT_NAME_LEN];
641
642         /* argc must be >= 1 */
643         if (argv[0][0] == 'p')
644                 is_return = false;
645         else if (argv[0][0] == 'r')
646                 is_return = true;
647         else if (argv[0][0] == '-')
648                 is_delete = true;
649         else {
650                 pr_info("Probe definition must be started with 'p', 'r' or"
651                         " '-'.\n");
652                 return -EINVAL;
653         }
654
655         event = strchr(&argv[0][1], ':');
656         if (event) {
657                 event[0] = '\0';
658                 event++;
659         }
660         if (is_return && isdigit(argv[0][1])) {
661                 ret = kstrtouint(&argv[0][1], 0, &maxactive);
662                 if (ret) {
663                         pr_info("Failed to parse maxactive.\n");
664                         return ret;
665                 }
666                 /* kretprobes instances are iterated over via a list. The
667                  * maximum should stay reasonable.
668                  */
669                 if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
670                         pr_info("Maxactive is too big (%d > %d).\n",
671                                 maxactive, KRETPROBE_MAXACTIVE_MAX);
672                         return -E2BIG;
673                 }
674         }
675
676         if (event) {
677                 if (strchr(event, '/')) {
678                         group = event;
679                         event = strchr(group, '/') + 1;
680                         event[-1] = '\0';
681                         if (strlen(group) == 0) {
682                                 pr_info("Group name is not specified\n");
683                                 return -EINVAL;
684                         }
685                 }
686                 if (strlen(event) == 0) {
687                         pr_info("Event name is not specified\n");
688                         return -EINVAL;
689                 }
690         }
691         if (!group)
692                 group = KPROBE_EVENT_SYSTEM;
693
694         if (is_delete) {
695                 if (!event) {
696                         pr_info("Delete command needs an event name.\n");
697                         return -EINVAL;
698                 }
699                 mutex_lock(&probe_lock);
700                 tk = find_trace_kprobe(event, group);
701                 if (!tk) {
702                         mutex_unlock(&probe_lock);
703                         pr_info("Event %s/%s doesn't exist.\n", group, event);
704                         return -ENOENT;
705                 }
706                 /* delete an event */
707                 ret = unregister_trace_kprobe(tk);
708                 if (ret == 0)
709                         free_trace_kprobe(tk);
710                 mutex_unlock(&probe_lock);
711                 return ret;
712         }
713
714         if (argc < 2) {
715                 pr_info("Probe point is not specified.\n");
716                 return -EINVAL;
717         }
718
719         /* try to parse an address. if that fails, try to read the
720          * input as a symbol. */
721         if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
722                 /* a symbol specified */
723                 symbol = argv[1];
724                 /* TODO: support .init module functions */
725                 ret = traceprobe_split_symbol_offset(symbol, &offset);
726                 if (ret) {
727                         pr_info("Failed to parse either an address or a symbol.\n");
728                         return ret;
729                 }
730                 if (offset && is_return &&
731                     !kprobe_on_func_entry(NULL, symbol, offset)) {
732                         pr_info("Given offset is not valid for return probe.\n");
733                         return -EINVAL;
734                 }
735         }
736         argc -= 2; argv += 2;
737
738         /* setup a probe */
739         if (!event) {
740                 /* Make a new event name */
741                 if (symbol)
742                         snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
743                                  is_return ? 'r' : 'p', symbol, offset);
744                 else
745                         snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
746                                  is_return ? 'r' : 'p', addr);
747                 sanitize_event_name(buf);
748                 event = buf;
749         }
750         tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
751                                argc, is_return);
752         if (IS_ERR(tk)) {
753                 pr_info("Failed to allocate trace_probe.(%d)\n",
754                         (int)PTR_ERR(tk));
755                 return PTR_ERR(tk);
756         }
757
758         /* parse arguments */
759         ret = 0;
760         for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
761                 struct probe_arg *parg = &tk->tp.args[i];
762
763                 /* Increment count for freeing args in error case */
764                 tk->tp.nr_args++;
765
766                 /* Parse argument name */
767                 arg = strchr(argv[i], '=');
768                 if (arg) {
769                         *arg++ = '\0';
770                         parg->name = kstrdup(argv[i], GFP_KERNEL);
771                 } else {
772                         arg = argv[i];
773                         /* If argument name is omitted, set "argN" */
774                         snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
775                         parg->name = kstrdup(buf, GFP_KERNEL);
776                 }
777
778                 if (!parg->name) {
779                         pr_info("Failed to allocate argument[%d] name.\n", i);
780                         ret = -ENOMEM;
781                         goto error;
782                 }
783
784                 if (!is_good_name(parg->name)) {
785                         pr_info("Invalid argument[%d] name: %s\n",
786                                 i, parg->name);
787                         ret = -EINVAL;
788                         goto error;
789                 }
790
791                 if (traceprobe_conflict_field_name(parg->name,
792                                                         tk->tp.args, i)) {
793                         pr_info("Argument[%d] name '%s' conflicts with "
794                                 "another field.\n", i, argv[i]);
795                         ret = -EINVAL;
796                         goto error;
797                 }
798
799                 /* Parse fetch argument */
800                 ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg,
801                                                 is_return, true,
802                                                 kprobes_fetch_type_table);
803                 if (ret) {
804                         pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
805                         goto error;
806                 }
807         }
808
809         ret = register_trace_kprobe(tk);
810         if (ret)
811                 goto error;
812         return 0;
813
814 error:
815         free_trace_kprobe(tk);
816         return ret;
817 }
818
819 static int release_all_trace_kprobes(void)
820 {
821         struct trace_kprobe *tk;
822         int ret = 0;
823
824         mutex_lock(&probe_lock);
825         /* Ensure no probe is in use. */
826         list_for_each_entry(tk, &probe_list, list)
827                 if (trace_probe_is_enabled(&tk->tp)) {
828                         ret = -EBUSY;
829                         goto end;
830                 }
831         /* TODO: Use batch unregistration */
832         while (!list_empty(&probe_list)) {
833                 tk = list_entry(probe_list.next, struct trace_kprobe, list);
834                 ret = unregister_trace_kprobe(tk);
835                 if (ret)
836                         goto end;
837                 free_trace_kprobe(tk);
838         }
839
840 end:
841         mutex_unlock(&probe_lock);
842
843         return ret;
844 }
845
846 /* Probes listing interfaces */
847 static void *probes_seq_start(struct seq_file *m, loff_t *pos)
848 {
849         mutex_lock(&probe_lock);
850         return seq_list_start(&probe_list, *pos);
851 }
852
853 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
854 {
855         return seq_list_next(v, &probe_list, pos);
856 }
857
858 static void probes_seq_stop(struct seq_file *m, void *v)
859 {
860         mutex_unlock(&probe_lock);
861 }
862
863 static int probes_seq_show(struct seq_file *m, void *v)
864 {
865         struct trace_kprobe *tk = v;
866         int i;
867
868         seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
869         seq_printf(m, ":%s/%s", tk->tp.call.class->system,
870                         trace_event_name(&tk->tp.call));
871
872         if (!tk->symbol)
873                 seq_printf(m, " 0x%p", tk->rp.kp.addr);
874         else if (tk->rp.kp.offset)
875                 seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
876                            tk->rp.kp.offset);
877         else
878                 seq_printf(m, " %s", trace_kprobe_symbol(tk));
879
880         for (i = 0; i < tk->tp.nr_args; i++)
881                 seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
882         seq_putc(m, '\n');
883
884         return 0;
885 }
886
887 static const struct seq_operations probes_seq_op = {
888         .start  = probes_seq_start,
889         .next   = probes_seq_next,
890         .stop   = probes_seq_stop,
891         .show   = probes_seq_show
892 };
893
894 static int probes_open(struct inode *inode, struct file *file)
895 {
896         int ret;
897
898         if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
899                 ret = release_all_trace_kprobes();
900                 if (ret < 0)
901                         return ret;
902         }
903
904         return seq_open(file, &probes_seq_op);
905 }
906
907 static ssize_t probes_write(struct file *file, const char __user *buffer,
908                             size_t count, loff_t *ppos)
909 {
910         return trace_parse_run_command(file, buffer, count, ppos,
911                                        create_trace_kprobe);
912 }
913
914 static const struct file_operations kprobe_events_ops = {
915         .owner          = THIS_MODULE,
916         .open           = probes_open,
917         .read           = seq_read,
918         .llseek         = seq_lseek,
919         .release        = seq_release,
920         .write          = probes_write,
921 };
922
923 /* Probes profiling interfaces */
924 static int probes_profile_seq_show(struct seq_file *m, void *v)
925 {
926         struct trace_kprobe *tk = v;
927
928         seq_printf(m, "  %-44s %15lu %15lu\n",
929                    trace_event_name(&tk->tp.call),
930                    trace_kprobe_nhit(tk),
931                    tk->rp.kp.nmissed);
932
933         return 0;
934 }
935
936 static const struct seq_operations profile_seq_op = {
937         .start  = probes_seq_start,
938         .next   = probes_seq_next,
939         .stop   = probes_seq_stop,
940         .show   = probes_profile_seq_show
941 };
942
943 static int profile_open(struct inode *inode, struct file *file)
944 {
945         return seq_open(file, &profile_seq_op);
946 }
947
948 static const struct file_operations kprobe_profile_ops = {
949         .owner          = THIS_MODULE,
950         .open           = profile_open,
951         .read           = seq_read,
952         .llseek         = seq_lseek,
953         .release        = seq_release,
954 };
955
956 /* Kprobe handler */
957 static nokprobe_inline void
958 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
959                     struct trace_event_file *trace_file)
960 {
961         struct kprobe_trace_entry_head *entry;
962         struct ring_buffer_event *event;
963         struct ring_buffer *buffer;
964         int size, dsize, pc;
965         unsigned long irq_flags;
966         struct trace_event_call *call = &tk->tp.call;
967
968         WARN_ON(call != trace_file->event_call);
969
970         if (trace_trigger_soft_disabled(trace_file))
971                 return;
972
973         local_save_flags(irq_flags);
974         pc = preempt_count();
975
976         dsize = __get_data_size(&tk->tp, regs);
977         size = sizeof(*entry) + tk->tp.size + dsize;
978
979         event = trace_event_buffer_lock_reserve(&buffer, trace_file,
980                                                 call->event.type,
981                                                 size, irq_flags, pc);
982         if (!event)
983                 return;
984
985         entry = ring_buffer_event_data(event);
986         entry->ip = (unsigned long)tk->rp.kp.addr;
987         store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
988
989         event_trigger_unlock_commit_regs(trace_file, buffer, event,
990                                          entry, irq_flags, pc, regs);
991 }
992
993 static void
994 kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
995 {
996         struct event_file_link *link;
997
998         list_for_each_entry_rcu(link, &tk->tp.files, list)
999                 __kprobe_trace_func(tk, regs, link->file);
1000 }
1001 NOKPROBE_SYMBOL(kprobe_trace_func);
1002
1003 /* Kretprobe handler */
1004 static nokprobe_inline void
1005 __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1006                        struct pt_regs *regs,
1007                        struct trace_event_file *trace_file)
1008 {
1009         struct kretprobe_trace_entry_head *entry;
1010         struct ring_buffer_event *event;
1011         struct ring_buffer *buffer;
1012         int size, pc, dsize;
1013         unsigned long irq_flags;
1014         struct trace_event_call *call = &tk->tp.call;
1015
1016         WARN_ON(call != trace_file->event_call);
1017
1018         if (trace_trigger_soft_disabled(trace_file))
1019                 return;
1020
1021         local_save_flags(irq_flags);
1022         pc = preempt_count();
1023
1024         dsize = __get_data_size(&tk->tp, regs);
1025         size = sizeof(*entry) + tk->tp.size + dsize;
1026
1027         event = trace_event_buffer_lock_reserve(&buffer, trace_file,
1028                                                 call->event.type,
1029                                                 size, irq_flags, pc);
1030         if (!event)
1031                 return;
1032
1033         entry = ring_buffer_event_data(event);
1034         entry->func = (unsigned long)tk->rp.kp.addr;
1035         entry->ret_ip = (unsigned long)ri->ret_addr;
1036         store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1037
1038         event_trigger_unlock_commit_regs(trace_file, buffer, event,
1039                                          entry, irq_flags, pc, regs);
1040 }
1041
1042 static void
1043 kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1044                      struct pt_regs *regs)
1045 {
1046         struct event_file_link *link;
1047
1048         list_for_each_entry_rcu(link, &tk->tp.files, list)
1049                 __kretprobe_trace_func(tk, ri, regs, link->file);
1050 }
1051 NOKPROBE_SYMBOL(kretprobe_trace_func);
1052
1053 /* Event entry printers */
1054 static enum print_line_t
1055 print_kprobe_event(struct trace_iterator *iter, int flags,
1056                    struct trace_event *event)
1057 {
1058         struct kprobe_trace_entry_head *field;
1059         struct trace_seq *s = &iter->seq;
1060         struct trace_probe *tp;
1061         u8 *data;
1062         int i;
1063
1064         field = (struct kprobe_trace_entry_head *)iter->ent;
1065         tp = container_of(event, struct trace_probe, call.event);
1066
1067         trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
1068
1069         if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1070                 goto out;
1071
1072         trace_seq_putc(s, ')');
1073
1074         data = (u8 *)&field[1];
1075         for (i = 0; i < tp->nr_args; i++)
1076                 if (!tp->args[i].type->print(s, tp->args[i].name,
1077                                              data + tp->args[i].offset, field))
1078                         goto out;
1079
1080         trace_seq_putc(s, '\n');
1081  out:
1082         return trace_handle_return(s);
1083 }
1084
1085 static enum print_line_t
1086 print_kretprobe_event(struct trace_iterator *iter, int flags,
1087                       struct trace_event *event)
1088 {
1089         struct kretprobe_trace_entry_head *field;
1090         struct trace_seq *s = &iter->seq;
1091         struct trace_probe *tp;
1092         u8 *data;
1093         int i;
1094
1095         field = (struct kretprobe_trace_entry_head *)iter->ent;
1096         tp = container_of(event, struct trace_probe, call.event);
1097
1098         trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
1099
1100         if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1101                 goto out;
1102
1103         trace_seq_puts(s, " <- ");
1104
1105         if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1106                 goto out;
1107
1108         trace_seq_putc(s, ')');
1109
1110         data = (u8 *)&field[1];
1111         for (i = 0; i < tp->nr_args; i++)
1112                 if (!tp->args[i].type->print(s, tp->args[i].name,
1113                                              data + tp->args[i].offset, field))
1114                         goto out;
1115
1116         trace_seq_putc(s, '\n');
1117
1118  out:
1119         return trace_handle_return(s);
1120 }
1121
1122
1123 static int kprobe_event_define_fields(struct trace_event_call *event_call)
1124 {
1125         int ret, i;
1126         struct kprobe_trace_entry_head field;
1127         struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1128
1129         DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1130         /* Set argument names as fields */
1131         for (i = 0; i < tk->tp.nr_args; i++) {
1132                 struct probe_arg *parg = &tk->tp.args[i];
1133
1134                 ret = trace_define_field(event_call, parg->type->fmttype,
1135                                          parg->name,
1136                                          sizeof(field) + parg->offset,
1137                                          parg->type->size,
1138                                          parg->type->is_signed,
1139                                          FILTER_OTHER);
1140                 if (ret)
1141                         return ret;
1142         }
1143         return 0;
1144 }
1145
1146 static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1147 {
1148         int ret, i;
1149         struct kretprobe_trace_entry_head field;
1150         struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1151
1152         DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1153         DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1154         /* Set argument names as fields */
1155         for (i = 0; i < tk->tp.nr_args; i++) {
1156                 struct probe_arg *parg = &tk->tp.args[i];
1157
1158                 ret = trace_define_field(event_call, parg->type->fmttype,
1159                                          parg->name,
1160                                          sizeof(field) + parg->offset,
1161                                          parg->type->size,
1162                                          parg->type->is_signed,
1163                                          FILTER_OTHER);
1164                 if (ret)
1165                         return ret;
1166         }
1167         return 0;
1168 }
1169
1170 #ifdef CONFIG_PERF_EVENTS
1171
1172 /* Kprobe profile handler */
1173 static void
1174 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1175 {
1176         struct trace_event_call *call = &tk->tp.call;
1177         struct kprobe_trace_entry_head *entry;
1178         struct hlist_head *head;
1179         int size, __size, dsize;
1180         int rctx;
1181
1182         if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1183                 return;
1184
1185         head = this_cpu_ptr(call->perf_events);
1186         if (hlist_empty(head))
1187                 return;
1188
1189         dsize = __get_data_size(&tk->tp, regs);
1190         __size = sizeof(*entry) + tk->tp.size + dsize;
1191         size = ALIGN(__size + sizeof(u32), sizeof(u64));
1192         size -= sizeof(u32);
1193
1194         entry = perf_trace_buf_alloc(size, NULL, &rctx);
1195         if (!entry)
1196                 return;
1197
1198         entry->ip = (unsigned long)tk->rp.kp.addr;
1199         memset(&entry[1], 0, dsize);
1200         store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1201         perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1202                               head, NULL);
1203 }
1204 NOKPROBE_SYMBOL(kprobe_perf_func);
1205
1206 /* Kretprobe profile handler */
1207 static void
1208 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1209                     struct pt_regs *regs)
1210 {
1211         struct trace_event_call *call = &tk->tp.call;
1212         struct kretprobe_trace_entry_head *entry;
1213         struct hlist_head *head;
1214         int size, __size, dsize;
1215         int rctx;
1216
1217         if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1218                 return;
1219
1220         head = this_cpu_ptr(call->perf_events);
1221         if (hlist_empty(head))
1222                 return;
1223
1224         dsize = __get_data_size(&tk->tp, regs);
1225         __size = sizeof(*entry) + tk->tp.size + dsize;
1226         size = ALIGN(__size + sizeof(u32), sizeof(u64));
1227         size -= sizeof(u32);
1228
1229         entry = perf_trace_buf_alloc(size, NULL, &rctx);
1230         if (!entry)
1231                 return;
1232
1233         entry->func = (unsigned long)tk->rp.kp.addr;
1234         entry->ret_ip = (unsigned long)ri->ret_addr;
1235         store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1236         perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1237                               head, NULL);
1238 }
1239 NOKPROBE_SYMBOL(kretprobe_perf_func);
1240 #endif  /* CONFIG_PERF_EVENTS */
1241
1242 /*
1243  * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1244  *
1245  * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1246  * lockless, but we can't race with this __init function.
1247  */
1248 static int kprobe_register(struct trace_event_call *event,
1249                            enum trace_reg type, void *data)
1250 {
1251         struct trace_kprobe *tk = (struct trace_kprobe *)event->data;
1252         struct trace_event_file *file = data;
1253
1254         switch (type) {
1255         case TRACE_REG_REGISTER:
1256                 return enable_trace_kprobe(tk, file);
1257         case TRACE_REG_UNREGISTER:
1258                 return disable_trace_kprobe(tk, file);
1259
1260 #ifdef CONFIG_PERF_EVENTS
1261         case TRACE_REG_PERF_REGISTER:
1262                 return enable_trace_kprobe(tk, NULL);
1263         case TRACE_REG_PERF_UNREGISTER:
1264                 return disable_trace_kprobe(tk, NULL);
1265         case TRACE_REG_PERF_OPEN:
1266         case TRACE_REG_PERF_CLOSE:
1267         case TRACE_REG_PERF_ADD:
1268         case TRACE_REG_PERF_DEL:
1269                 return 0;
1270 #endif
1271         }
1272         return 0;
1273 }
1274
1275 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1276 {
1277         struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1278
1279         raw_cpu_inc(*tk->nhit);
1280
1281         if (tk->tp.flags & TP_FLAG_TRACE)
1282                 kprobe_trace_func(tk, regs);
1283 #ifdef CONFIG_PERF_EVENTS
1284         if (tk->tp.flags & TP_FLAG_PROFILE)
1285                 kprobe_perf_func(tk, regs);
1286 #endif
1287         return 0;       /* We don't tweek kernel, so just return 0 */
1288 }
1289 NOKPROBE_SYMBOL(kprobe_dispatcher);
1290
1291 static int
1292 kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1293 {
1294         struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
1295
1296         raw_cpu_inc(*tk->nhit);
1297
1298         if (tk->tp.flags & TP_FLAG_TRACE)
1299                 kretprobe_trace_func(tk, ri, regs);
1300 #ifdef CONFIG_PERF_EVENTS
1301         if (tk->tp.flags & TP_FLAG_PROFILE)
1302                 kretprobe_perf_func(tk, ri, regs);
1303 #endif
1304         return 0;       /* We don't tweek kernel, so just return 0 */
1305 }
1306 NOKPROBE_SYMBOL(kretprobe_dispatcher);
1307
1308 static struct trace_event_functions kretprobe_funcs = {
1309         .trace          = print_kretprobe_event
1310 };
1311
1312 static struct trace_event_functions kprobe_funcs = {
1313         .trace          = print_kprobe_event
1314 };
1315
1316 static int register_kprobe_event(struct trace_kprobe *tk)
1317 {
1318         struct trace_event_call *call = &tk->tp.call;
1319         int ret;
1320
1321         /* Initialize trace_event_call */
1322         INIT_LIST_HEAD(&call->class->fields);
1323         if (trace_kprobe_is_return(tk)) {
1324                 call->event.funcs = &kretprobe_funcs;
1325                 call->class->define_fields = kretprobe_event_define_fields;
1326         } else {
1327                 call->event.funcs = &kprobe_funcs;
1328                 call->class->define_fields = kprobe_event_define_fields;
1329         }
1330         if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0)
1331                 return -ENOMEM;
1332         ret = register_trace_event(&call->event);
1333         if (!ret) {
1334                 kfree(call->print_fmt);
1335                 return -ENODEV;
1336         }
1337         call->flags = TRACE_EVENT_FL_KPROBE;
1338         call->class->reg = kprobe_register;
1339         call->data = tk;
1340         ret = trace_add_event_call(call);
1341         if (ret) {
1342                 pr_info("Failed to register kprobe event: %s\n",
1343                         trace_event_name(call));
1344                 kfree(call->print_fmt);
1345                 unregister_trace_event(&call->event);
1346         }
1347         return ret;
1348 }
1349
1350 static int unregister_kprobe_event(struct trace_kprobe *tk)
1351 {
1352         int ret;
1353
1354         /* tp->event is unregistered in trace_remove_event_call() */
1355         ret = trace_remove_event_call(&tk->tp.call);
1356         if (!ret)
1357                 kfree(tk->tp.call.print_fmt);
1358         return ret;
1359 }
1360
1361 /* Make a tracefs interface for controlling probe points */
1362 static __init int init_kprobe_trace(void)
1363 {
1364         struct dentry *d_tracer;
1365         struct dentry *entry;
1366
1367         if (register_module_notifier(&trace_kprobe_module_nb))
1368                 return -EINVAL;
1369
1370         d_tracer = tracing_init_dentry();
1371         if (IS_ERR(d_tracer))
1372                 return 0;
1373
1374         entry = tracefs_create_file("kprobe_events", 0644, d_tracer,
1375                                     NULL, &kprobe_events_ops);
1376
1377         /* Event list interface */
1378         if (!entry)
1379                 pr_warn("Could not create tracefs 'kprobe_events' entry\n");
1380
1381         /* Profile interface */
1382         entry = tracefs_create_file("kprobe_profile", 0444, d_tracer,
1383                                     NULL, &kprobe_profile_ops);
1384
1385         if (!entry)
1386                 pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
1387         return 0;
1388 }
1389 fs_initcall(init_kprobe_trace);
1390
1391
1392 #ifdef CONFIG_FTRACE_STARTUP_TEST
1393 /*
1394  * The "__used" keeps gcc from removing the function symbol
1395  * from the kallsyms table. 'noinline' makes sure that there
1396  * isn't an inlined version used by the test method below
1397  */
1398 static __used __init noinline int
1399 kprobe_trace_selftest_target(int a1, int a2, int a3, int a4, int a5, int a6)
1400 {
1401         return a1 + a2 + a3 + a4 + a5 + a6;
1402 }
1403
1404 static __init struct trace_event_file *
1405 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1406 {
1407         struct trace_event_file *file;
1408
1409         list_for_each_entry(file, &tr->events, list)
1410                 if (file->event_call == &tk->tp.call)
1411                         return file;
1412
1413         return NULL;
1414 }
1415
1416 /*
1417  * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1418  * stage, we can do this lockless.
1419  */
1420 static __init int kprobe_trace_self_tests_init(void)
1421 {
1422         int ret, warn = 0;
1423         int (*target)(int, int, int, int, int, int);
1424         struct trace_kprobe *tk;
1425         struct trace_event_file *file;
1426
1427         if (tracing_is_disabled())
1428                 return -ENODEV;
1429
1430         target = kprobe_trace_selftest_target;
1431
1432         pr_info("Testing kprobe tracing: ");
1433
1434         ret = trace_run_command("p:testprobe kprobe_trace_selftest_target "
1435                                 "$stack $stack0 +0($stack)",
1436                                 create_trace_kprobe);
1437         if (WARN_ON_ONCE(ret)) {
1438                 pr_warn("error on probing function entry.\n");
1439                 warn++;
1440         } else {
1441                 /* Enable trace point */
1442                 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1443                 if (WARN_ON_ONCE(tk == NULL)) {
1444                         pr_warn("error on getting new probe.\n");
1445                         warn++;
1446                 } else {
1447                         file = find_trace_probe_file(tk, top_trace_array());
1448                         if (WARN_ON_ONCE(file == NULL)) {
1449                                 pr_warn("error on getting probe file.\n");
1450                                 warn++;
1451                         } else
1452                                 enable_trace_kprobe(tk, file);
1453                 }
1454         }
1455
1456         ret = trace_run_command("r:testprobe2 kprobe_trace_selftest_target "
1457                                 "$retval", create_trace_kprobe);
1458         if (WARN_ON_ONCE(ret)) {
1459                 pr_warn("error on probing function return.\n");
1460                 warn++;
1461         } else {
1462                 /* Enable trace point */
1463                 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1464                 if (WARN_ON_ONCE(tk == NULL)) {
1465                         pr_warn("error on getting 2nd new probe.\n");
1466                         warn++;
1467                 } else {
1468                         file = find_trace_probe_file(tk, top_trace_array());
1469                         if (WARN_ON_ONCE(file == NULL)) {
1470                                 pr_warn("error on getting probe file.\n");
1471                                 warn++;
1472                         } else
1473                                 enable_trace_kprobe(tk, file);
1474                 }
1475         }
1476
1477         if (warn)
1478                 goto end;
1479
1480         ret = target(1, 2, 3, 4, 5, 6);
1481
1482         /*
1483          * Not expecting an error here, the check is only to prevent the
1484          * optimizer from removing the call to target() as otherwise there
1485          * are no side-effects and the call is never performed.
1486          */
1487         if (ret != 21)
1488                 warn++;
1489
1490         /* Disable trace points before removing it */
1491         tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1492         if (WARN_ON_ONCE(tk == NULL)) {
1493                 pr_warn("error on getting test probe.\n");
1494                 warn++;
1495         } else {
1496                 if (trace_kprobe_nhit(tk) != 1) {
1497                         pr_warn("incorrect number of testprobe hits\n");
1498                         warn++;
1499                 }
1500
1501                 file = find_trace_probe_file(tk, top_trace_array());
1502                 if (WARN_ON_ONCE(file == NULL)) {
1503                         pr_warn("error on getting probe file.\n");
1504                         warn++;
1505                 } else
1506                         disable_trace_kprobe(tk, file);
1507         }
1508
1509         tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1510         if (WARN_ON_ONCE(tk == NULL)) {
1511                 pr_warn("error on getting 2nd test probe.\n");
1512                 warn++;
1513         } else {
1514                 if (trace_kprobe_nhit(tk) != 1) {
1515                         pr_warn("incorrect number of testprobe2 hits\n");
1516                         warn++;
1517                 }
1518
1519                 file = find_trace_probe_file(tk, top_trace_array());
1520                 if (WARN_ON_ONCE(file == NULL)) {
1521                         pr_warn("error on getting probe file.\n");
1522                         warn++;
1523                 } else
1524                         disable_trace_kprobe(tk, file);
1525         }
1526
1527         ret = trace_run_command("-:testprobe", create_trace_kprobe);
1528         if (WARN_ON_ONCE(ret)) {
1529                 pr_warn("error on deleting a probe.\n");
1530                 warn++;
1531         }
1532
1533         ret = trace_run_command("-:testprobe2", create_trace_kprobe);
1534         if (WARN_ON_ONCE(ret)) {
1535                 pr_warn("error on deleting a probe.\n");
1536                 warn++;
1537         }
1538
1539 end:
1540         release_all_trace_kprobes();
1541         /*
1542          * Wait for the optimizer work to finish. Otherwise it might fiddle
1543          * with probes in already freed __init text.
1544          */
1545         wait_for_kprobe_optimizer();
1546         if (warn)
1547                 pr_cont("NG: Some tests are failed. Please check them.\n");
1548         else
1549                 pr_cont("OK\n");
1550         return 0;
1551 }
1552
1553 late_initcall(kprobe_trace_self_tests_init);
1554
1555 #endif