Linux 6.9-rc4
[sfrench/cifs-2.6.git] / kernel / trace / trace_kprobe.c
index 7d736248a070b2f633349856345e2013d4a3cd34..14099cc17fc9ed5bfad51b5be14504ef790e50a6 100644 (file)
@@ -7,25 +7,42 @@
  */
 #define pr_fmt(fmt)    "trace_kprobe: " fmt
 
+#include <linux/bpf-cgroup.h>
+#include <linux/security.h>
 #include <linux/module.h>
 #include <linux/uaccess.h>
 #include <linux/rculist.h>
 #include <linux/error-injection.h>
 
+#include <asm/setup.h>  /* for COMMAND_LINE_SIZE */
+
 #include "trace_dynevent.h"
 #include "trace_kprobe_selftest.h"
 #include "trace_probe.h"
 #include "trace_probe_tmpl.h"
+#include "trace_probe_kernel.h"
 
 #define KPROBE_EVENT_SYSTEM "kprobes"
 #define KRETPROBE_MAXACTIVE_MAX 4096
 
-static int trace_kprobe_create(int argc, const char **argv);
+/* Kprobe early definition from command line */
+static char kprobe_boot_events_buf[COMMAND_LINE_SIZE] __initdata;
+
+static int __init set_kprobe_boot_events(char *str)
+{
+       strscpy(kprobe_boot_events_buf, str, COMMAND_LINE_SIZE);
+       disable_tracing_selftest("running kprobe events");
+
+       return 1;
+}
+__setup("kprobe_event=", set_kprobe_boot_events);
+
+static int trace_kprobe_create(const char *raw_command);
 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev);
 static int trace_kprobe_release(struct dyn_event *ev);
 static bool trace_kprobe_is_busy(struct dyn_event *ev);
 static bool trace_kprobe_match(const char *system, const char *event,
-                              struct dyn_event *ev);
+                       int argc, const char **argv, struct dyn_event *ev);
 
 static struct dyn_event_operations trace_kprobe_ops = {
        .create = trace_kprobe_create,
@@ -65,10 +82,6 @@ static struct trace_kprobe *to_trace_kprobe(struct dyn_event *ev)
        for_each_dyn_event(dpos)                \
                if (is_trace_kprobe(dpos) && (pos = to_trace_kprobe(dpos)))
 
-#define SIZEOF_TRACE_KPROBE(n)                         \
-       (offsetof(struct trace_kprobe, tp.args) +       \
-       (sizeof(struct probe_arg) * (n)))
-
 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
 {
        return tk->rp.handler != NULL;
@@ -86,15 +99,16 @@ static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk
 
 static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
 {
-       return !!(kprobe_gone(&tk->rp.kp));
+       return kprobe_gone(&tk->rp.kp);
 }
 
 static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
                                                 struct module *mod)
 {
-       int len = strlen(mod->name);
+       int len = strlen(module_name(mod));
        const char *name = trace_kprobe_symbol(tk);
-       return strncmp(mod->name, name, len) == 0 && name[len] == ':';
+
+       return strncmp(module_name(mod), name, len) == 0 && name[len] == ':';
 }
 
 static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
@@ -108,9 +122,9 @@ static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
        if (!p)
                return true;
        *p = '\0';
-       mutex_lock(&module_mutex);
+       rcu_read_lock_sched();
        ret = !!find_module(tk->symbol);
-       mutex_unlock(&module_mutex);
+       rcu_read_unlock_sched();
        *p = ':';
 
        return ret;
@@ -123,13 +137,37 @@ static bool trace_kprobe_is_busy(struct dyn_event *ev)
        return trace_probe_is_enabled(&tk->tp);
 }
 
+static bool trace_kprobe_match_command_head(struct trace_kprobe *tk,
+                                           int argc, const char **argv)
+{
+       char buf[MAX_ARGSTR_LEN + 1];
+
+       if (!argc)
+               return true;
+
+       if (!tk->symbol)
+               snprintf(buf, sizeof(buf), "0x%p", tk->rp.kp.addr);
+       else if (tk->rp.kp.offset)
+               snprintf(buf, sizeof(buf), "%s+%u",
+                        trace_kprobe_symbol(tk), tk->rp.kp.offset);
+       else
+               snprintf(buf, sizeof(buf), "%s", trace_kprobe_symbol(tk));
+       if (strcmp(buf, argv[0]))
+               return false;
+       argc--; argv++;
+
+       return trace_probe_match_command_args(&tk->tp, argc, argv);
+}
+
 static bool trace_kprobe_match(const char *system, const char *event,
-                              struct dyn_event *ev)
+                       int argc, const char **argv, struct dyn_event *ev)
 {
        struct trace_kprobe *tk = to_trace_kprobe(ev);
 
-       return strcmp(trace_event_name(&tk->tp.call), event) == 0 &&
-           (!system || strcmp(tk->tp.call.class->system, system) == 0);
+       return (event[0] == '\0' ||
+               strcmp(trace_probe_name(&tk->tp), event) == 0) &&
+           (!system || strcmp(trace_probe_group_name(&tk->tp), system) == 0) &&
+           trace_kprobe_match_command_head(tk, argc, argv);
 }
 
 static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
@@ -143,6 +181,12 @@ static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
        return nhit;
 }
 
+static nokprobe_inline bool trace_kprobe_is_registered(struct trace_kprobe *tk)
+{
+       return !(list_empty(&tk->rp.kp.list) &&
+                hlist_unhashed(&tk->rp.kp.hlist));
+}
+
 /* Return 0 if it fails to find the symbol address */
 static nokprobe_inline
 unsigned long trace_kprobe_address(struct trace_kprobe *tk)
@@ -160,20 +204,33 @@ unsigned long trace_kprobe_address(struct trace_kprobe *tk)
        return addr;
 }
 
+static nokprobe_inline struct trace_kprobe *
+trace_kprobe_primary_from_call(struct trace_event_call *call)
+{
+       struct trace_probe *tp;
+
+       tp = trace_probe_primary_from_call(call);
+       if (WARN_ON_ONCE(!tp))
+               return NULL;
+
+       return container_of(tp, struct trace_kprobe, tp);
+}
+
 bool trace_kprobe_on_func_entry(struct trace_event_call *call)
 {
-       struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
+       struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
 
-       return kprobe_on_func_entry(tk->rp.kp.addr,
+       return tk ? (kprobe_on_func_entry(tk->rp.kp.addr,
                        tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
-                       tk->rp.kp.addr ? 0 : tk->rp.kp.offset);
+                       tk->rp.kp.addr ? 0 : tk->rp.kp.offset) == 0) : false;
 }
 
 bool trace_kprobe_error_injectable(struct trace_event_call *call)
 {
-       struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
+       struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
 
-       return within_error_injection_list(trace_kprobe_address(tk));
+       return tk ? within_error_injection_list(trace_kprobe_address(tk)) :
+              false;
 }
 
 static int register_kprobe_event(struct trace_kprobe *tk);
@@ -183,6 +240,16 @@ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
                                struct pt_regs *regs);
 
+static void free_trace_kprobe(struct trace_kprobe *tk)
+{
+       if (tk) {
+               trace_probe_cleanup(&tk->tp);
+               kfree(tk->symbol);
+               free_percpu(tk->nhit);
+               kfree(tk);
+       }
+}
+
 /*
  * Allocate new trace_probe and initialize it (including kprobes).
  */
@@ -197,7 +264,7 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group,
        struct trace_kprobe *tk;
        int ret = -ENOMEM;
 
-       tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
+       tk = kzalloc(struct_size(tk, tp.args, nargs), GFP_KERNEL);
        if (!tk)
                return ERR_PTR(ret);
 
@@ -220,49 +287,20 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group,
                tk->rp.kp.pre_handler = kprobe_dispatcher;
 
        tk->rp.maxactive = maxactive;
+       INIT_HLIST_NODE(&tk->rp.kp.hlist);
+       INIT_LIST_HEAD(&tk->rp.kp.list);
 
-       if (!event || !group) {
-               ret = -EINVAL;
-               goto error;
-       }
-
-       tk->tp.call.class = &tk->tp.class;
-       tk->tp.call.name = kstrdup(event, GFP_KERNEL);
-       if (!tk->tp.call.name)
-               goto error;
-
-       tk->tp.class.system = kstrdup(group, GFP_KERNEL);
-       if (!tk->tp.class.system)
+       ret = trace_probe_init(&tk->tp, event, group, false, nargs);
+       if (ret < 0)
                goto error;
 
        dyn_event_init(&tk->devent, &trace_kprobe_ops);
-       INIT_LIST_HEAD(&tk->tp.files);
        return tk;
 error:
-       kfree(tk->tp.call.name);
-       kfree(tk->symbol);
-       free_percpu(tk->nhit);
-       kfree(tk);
+       free_trace_kprobe(tk);
        return ERR_PTR(ret);
 }
 
-static void free_trace_kprobe(struct trace_kprobe *tk)
-{
-       int i;
-
-       if (!tk)
-               return;
-
-       for (i = 0; i < tk->tp.nr_args; i++)
-               traceprobe_free_probe_arg(&tk->tp.args[i]);
-
-       kfree(tk->tp.call.class->system);
-       kfree(tk->tp.call.name);
-       kfree(tk->symbol);
-       free_percpu(tk->nhit);
-       kfree(tk);
-}
-
 static struct trace_kprobe *find_trace_kprobe(const char *event,
                                              const char *group)
 {
@@ -270,8 +308,8 @@ static struct trace_kprobe *find_trace_kprobe(const char *event,
        struct trace_kprobe *tk;
 
        for_each_trace_kprobe(tk, pos)
-               if (strcmp(trace_event_name(&tk->tp.call), event) == 0 &&
-                   strcmp(tk->tp.call.class->system, group) == 0)
+               if (strcmp(trace_probe_name(&tk->tp), event) == 0 &&
+                   strcmp(trace_probe_group_name(&tk->tp), group) == 0)
                        return tk;
        return NULL;
 }
@@ -280,7 +318,7 @@ static inline int __enable_trace_kprobe(struct trace_kprobe *tk)
 {
        int ret = 0;
 
-       if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) {
+       if (trace_kprobe_is_registered(tk) && !trace_kprobe_has_gone(tk)) {
                if (trace_kprobe_is_return(tk))
                        ret = enable_kretprobe(&tk->rp);
                else
@@ -290,41 +328,67 @@ static inline int __enable_trace_kprobe(struct trace_kprobe *tk)
        return ret;
 }
 
+static void __disable_trace_kprobe(struct trace_probe *tp)
+{
+       struct trace_kprobe *tk;
+
+       list_for_each_entry(tk, trace_probe_probe_list(tp), tp.list) {
+               if (!trace_kprobe_is_registered(tk))
+                       continue;
+               if (trace_kprobe_is_return(tk))
+                       disable_kretprobe(&tk->rp);
+               else
+                       disable_kprobe(&tk->rp.kp);
+       }
+}
+
 /*
  * Enable trace_probe
  * if the file is NULL, enable "perf" handler, or enable "trace" handler.
  */
-static int
-enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
+static int enable_trace_kprobe(struct trace_event_call *call,
+                               struct trace_event_file *file)
 {
-       struct event_file_link *link;
+       struct trace_probe *tp;
+       struct trace_kprobe *tk;
+       bool enabled;
        int ret = 0;
 
-       if (file) {
-               link = kmalloc(sizeof(*link), GFP_KERNEL);
-               if (!link) {
-                       ret = -ENOMEM;
-                       goto out;
-               }
+       tp = trace_probe_primary_from_call(call);
+       if (WARN_ON_ONCE(!tp))
+               return -ENODEV;
+       enabled = trace_probe_is_enabled(tp);
 
-               link->file = file;
-               list_add_tail_rcu(&link->list, &tk->tp.files);
+       /* This also changes "enabled" state */
+       if (file) {
+               ret = trace_probe_add_file(tp, file);
+               if (ret)
+                       return ret;
+       } else
+               trace_probe_set_flag(tp, TP_FLAG_PROFILE);
 
-               tk->tp.flags |= TP_FLAG_TRACE;
-               ret = __enable_trace_kprobe(tk);
-               if (ret) {
-                       list_del_rcu(&link->list);
-                       kfree(link);
-                       tk->tp.flags &= ~TP_FLAG_TRACE;
-               }
+       if (enabled)
+               return 0;
 
-       } else {
-               tk->tp.flags |= TP_FLAG_PROFILE;
+       list_for_each_entry(tk, trace_probe_probe_list(tp), tp.list) {
+               if (trace_kprobe_has_gone(tk))
+                       continue;
                ret = __enable_trace_kprobe(tk);
                if (ret)
-                       tk->tp.flags &= ~TP_FLAG_PROFILE;
+                       break;
+               enabled = true;
        }
- out:
+
+       if (ret) {
+               /* Failed to enable one of them. Roll back all */
+               if (enabled)
+                       __disable_trace_kprobe(tp);
+               if (file)
+                       trace_probe_remove_file(tp, file);
+               else
+                       trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
+       }
+
        return ret;
 }
 
@@ -332,68 +396,46 @@ enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
  * Disable trace_probe
  * if the file is NULL, disable "perf" handler, or disable "trace" handler.
  */
-static int
-disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
+static int disable_trace_kprobe(struct trace_event_call *call,
+                               struct trace_event_file *file)
 {
-       struct event_file_link *link = NULL;
-       int wait = 0;
-       int ret = 0;
+       struct trace_probe *tp;
 
-       if (file) {
-               link = find_event_file_link(&tk->tp, file);
-               if (!link) {
-                       ret = -EINVAL;
-                       goto out;
-               }
+       tp = trace_probe_primary_from_call(call);
+       if (WARN_ON_ONCE(!tp))
+               return -ENODEV;
 
-               list_del_rcu(&link->list);
-               wait = 1;
-               if (!list_empty(&tk->tp.files))
+       if (file) {
+               if (!trace_probe_get_file_link(tp, file))
+                       return -ENOENT;
+               if (!trace_probe_has_single_file(tp))
                        goto out;
-
-               tk->tp.flags &= ~TP_FLAG_TRACE;
+               trace_probe_clear_flag(tp, TP_FLAG_TRACE);
        } else
-               tk->tp.flags &= ~TP_FLAG_PROFILE;
+               trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
 
-       if (!trace_probe_is_enabled(&tk->tp) && trace_probe_is_registered(&tk->tp)) {
-               if (trace_kprobe_is_return(tk))
-                       disable_kretprobe(&tk->rp);
-               else
-                       disable_kprobe(&tk->rp.kp);
-               wait = 1;
-       }
+       if (!trace_probe_is_enabled(tp))
+               __disable_trace_kprobe(tp);
 
-       /*
-        * if tk is not added to any list, it must be a local trace_kprobe
-        * created with perf_event_open. We don't need to wait for these
-        * trace_kprobes
-        */
-       if (list_empty(&tk->devent.list))
-               wait = 0;
  out:
-       if (wait) {
+       if (file)
                /*
-                * Synchronize with kprobe_trace_func/kretprobe_trace_func
-                * to ensure disabled (all running handlers are finished).
-                * This is not only for kfree(), but also the caller,
-                * trace_remove_event_call() supposes it for releasing
-                * event_call related objects, which will be accessed in
-                * the kprobe_trace_func/kretprobe_trace_func.
+                * Synchronization is done in below function. For perf event,
+                * file == NULL and perf_trace_event_unreg() calls
+                * tracepoint_synchronize_unregister() to ensure synchronize
+                * event. We don't need to care about it.
                 */
-               synchronize_rcu();
-               kfree(link);    /* Ignored if link == NULL */
-       }
+               trace_probe_remove_file(tp, file);
 
-       return ret;
+       return 0;
 }
 
-#if defined(CONFIG_KPROBES_ON_FTRACE) && \
+#if defined(CONFIG_DYNAMIC_FTRACE) && \
        !defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
-static bool within_notrace_func(struct trace_kprobe *tk)
+static bool __within_notrace_func(unsigned long addr)
 {
-       unsigned long offset, size, addr;
+       unsigned long offset, size;
 
-       addr = trace_kprobe_address(tk);
        if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset))
                return false;
 
@@ -406,6 +448,28 @@ static bool within_notrace_func(struct trace_kprobe *tk)
         */
        return !ftrace_location_range(addr, addr + size - 1);
 }
+
+static bool within_notrace_func(struct trace_kprobe *tk)
+{
+       unsigned long addr = trace_kprobe_address(tk);
+       char symname[KSYM_NAME_LEN], *p;
+
+       if (!__within_notrace_func(addr))
+               return false;
+
+       /* Check if the address is on a suffixed-symbol */
+       if (!lookup_symbol_name(addr, symname)) {
+               p = strchr(symname, '.');
+               if (!p)
+                       return true;
+               *p = '\0';
+               addr = (unsigned long)kprobe_lookup_name(symname, 0);
+               if (addr)
+                       return __within_notrace_func(addr);
+       }
+
+       return true;
+}
 #else
 #define within_notrace_func(tk)        (false)
 #endif
@@ -415,12 +479,16 @@ static int __register_trace_kprobe(struct trace_kprobe *tk)
 {
        int i, ret;
 
-       if (trace_probe_is_registered(&tk->tp))
+       ret = security_locked_down(LOCKDOWN_KPROBES);
+       if (ret)
+               return ret;
+
+       if (trace_kprobe_is_registered(tk))
                return -EINVAL;
 
        if (within_notrace_func(tk)) {
-               pr_warn("Could not probe notrace function %s\n",
-                       trace_kprobe_symbol(tk));
+               pr_warn("Could not probe notrace function %ps\n",
+                       (void *)trace_kprobe_address(tk));
                return -EINVAL;
        }
 
@@ -441,21 +509,20 @@ static int __register_trace_kprobe(struct trace_kprobe *tk)
        else
                ret = register_kprobe(&tk->rp.kp);
 
-       if (ret == 0)
-               tk->tp.flags |= TP_FLAG_REGISTERED;
        return ret;
 }
 
 /* Internal unregister function - just handle k*probes and flags */
 static void __unregister_trace_kprobe(struct trace_kprobe *tk)
 {
-       if (trace_probe_is_registered(&tk->tp)) {
+       if (trace_kprobe_is_registered(tk)) {
                if (trace_kprobe_is_return(tk))
                        unregister_kretprobe(&tk->rp);
                else
                        unregister_kprobe(&tk->rp.kp);
-               tk->tp.flags &= ~TP_FLAG_REGISTERED;
-               /* Cleanup kprobe for reuse */
+               /* Cleanup kprobe for reuse and mark it unregistered */
+               INIT_HLIST_NODE(&tk->rp.kp.hlist);
+               INIT_LIST_HEAD(&tk->rp.kp.list);
                if (tk->rp.kp.symbol_name)
                        tk->rp.kp.addr = NULL;
        }
@@ -464,20 +531,96 @@ static void __unregister_trace_kprobe(struct trace_kprobe *tk)
 /* Unregister a trace_probe and probe_event */
 static int unregister_trace_kprobe(struct trace_kprobe *tk)
 {
+       /* If other probes are on the event, just unregister kprobe */
+       if (trace_probe_has_sibling(&tk->tp))
+               goto unreg;
+
        /* Enabled event can not be unregistered */
        if (trace_probe_is_enabled(&tk->tp))
                return -EBUSY;
 
+       /* If there's a reference to the dynamic event */
+       if (trace_event_dyn_busy(trace_probe_event_call(&tk->tp)))
+               return -EBUSY;
+
        /* Will fail if probe is being used by ftrace or perf */
        if (unregister_kprobe_event(tk))
                return -EBUSY;
 
+unreg:
        __unregister_trace_kprobe(tk);
        dyn_event_remove(&tk->devent);
+       trace_probe_unlink(&tk->tp);
 
        return 0;
 }
 
+static bool trace_kprobe_has_same_kprobe(struct trace_kprobe *orig,
+                                        struct trace_kprobe *comp)
+{
+       struct trace_probe_event *tpe = orig->tp.event;
+       int i;
+
+       list_for_each_entry(orig, &tpe->probes, tp.list) {
+               if (strcmp(trace_kprobe_symbol(orig),
+                          trace_kprobe_symbol(comp)) ||
+                   trace_kprobe_offset(orig) != trace_kprobe_offset(comp))
+                       continue;
+
+               /*
+                * trace_probe_compare_arg_type() ensured that nr_args and
+                * each argument name and type are same. Let's compare comm.
+                */
+               for (i = 0; i < orig->tp.nr_args; i++) {
+                       if (strcmp(orig->tp.args[i].comm,
+                                  comp->tp.args[i].comm))
+                               break;
+               }
+
+               if (i == orig->tp.nr_args)
+                       return true;
+       }
+
+       return false;
+}
+
+static int append_trace_kprobe(struct trace_kprobe *tk, struct trace_kprobe *to)
+{
+       int ret;
+
+       ret = trace_probe_compare_arg_type(&tk->tp, &to->tp);
+       if (ret) {
+               /* Note that argument starts index = 2 */
+               trace_probe_log_set_index(ret + 1);
+               trace_probe_log_err(0, DIFF_ARG_TYPE);
+               return -EEXIST;
+       }
+       if (trace_kprobe_has_same_kprobe(to, tk)) {
+               trace_probe_log_set_index(0);
+               trace_probe_log_err(0, SAME_PROBE);
+               return -EEXIST;
+       }
+
+       /* Append to existing event */
+       ret = trace_probe_append(&tk->tp, &to->tp);
+       if (ret)
+               return ret;
+
+       /* Register k*probe */
+       ret = __register_trace_kprobe(tk);
+       if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
+               pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
+               ret = 0;
+       }
+
+       if (ret)
+               trace_probe_unlink(&tk->tp);
+       else
+               dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp));
+
+       return ret;
+}
+
 /* Register a trace_probe and probe_event */
 static int register_trace_kprobe(struct trace_kprobe *tk)
 {
@@ -486,20 +629,27 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
 
        mutex_lock(&event_mutex);
 
-       /* Delete old (same name) event if exist */
-       old_tk = find_trace_kprobe(trace_event_name(&tk->tp.call),
-                       tk->tp.call.class->system);
+       old_tk = find_trace_kprobe(trace_probe_name(&tk->tp),
+                                  trace_probe_group_name(&tk->tp));
        if (old_tk) {
-               ret = unregister_trace_kprobe(old_tk);
-               if (ret < 0)
-                       goto end;
-               free_trace_kprobe(old_tk);
+               if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) {
+                       trace_probe_log_set_index(0);
+                       trace_probe_log_err(0, DIFF_PROBE_TYPE);
+                       ret = -EEXIST;
+               } else {
+                       ret = append_trace_kprobe(tk, old_tk);
+               }
+               goto end;
        }
 
        /* Register new event */
        ret = register_kprobe_event(tk);
        if (ret) {
-               pr_warn("Failed to register probe event(%d)\n", ret);
+               if (ret == -EEXIST) {
+                       trace_probe_log_set_index(0);
+                       trace_probe_log_err(0, EVENT_EXIST);
+               } else
+                       pr_warn("Failed to register probe event(%d)\n", ret);
                goto end;
        }
 
@@ -513,7 +663,7 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
        if (ret < 0)
                unregister_kprobe_event(tk);
        else
-               dyn_event_add(&tk->devent);
+               dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp));
 
 end:
        mutex_unlock(&event_mutex);
@@ -541,8 +691,8 @@ static int trace_kprobe_module_callback(struct notifier_block *nb,
                        ret = __register_trace_kprobe(tk);
                        if (ret)
                                pr_warn("Failed to re-register probe %s on %s: %d\n",
-                                       trace_event_name(&tk->tp.call),
-                                       mod->name, ret);
+                                       trace_probe_name(&tk->tp),
+                                       module_name(mod), ret);
                }
        }
        mutex_unlock(&event_mutex);
@@ -555,22 +705,55 @@ static struct notifier_block trace_kprobe_module_nb = {
        .priority = 1   /* Invoked after kprobe module callback */
 };
 
-/* Convert certain expected symbols into '_' when generating event names */
-static inline void sanitize_event_name(char *name)
+static int count_symbols(void *data, unsigned long unused)
+{
+       unsigned int *count = data;
+
+       (*count)++;
+
+       return 0;
+}
+
+struct sym_count_ctx {
+       unsigned int count;
+       const char *name;
+};
+
+static int count_mod_symbols(void *data, const char *name, unsigned long unused)
+{
+       struct sym_count_ctx *ctx = data;
+
+       if (strcmp(name, ctx->name) == 0)
+               ctx->count++;
+
+       return 0;
+}
+
+static unsigned int number_of_same_symbols(char *func_name)
 {
-       while (*name++ != '\0')
-               if (*name == ':' || *name == '.')
-                       *name = '_';
+       struct sym_count_ctx ctx = { .count = 0, .name = func_name };
+
+       kallsyms_on_each_match_symbol(count_symbols, func_name, &ctx.count);
+
+       module_kallsyms_on_each_symbol(NULL, count_mod_symbols, &ctx);
+
+       return ctx.count;
 }
 
-static int trace_kprobe_create(int argc, const char *argv[])
+static int trace_kprobe_entry_handler(struct kretprobe_instance *ri,
+                                     struct pt_regs *regs);
+
+static int __trace_kprobe_create(int argc, const char *argv[])
 {
        /*
         * Argument syntax:
         *  - Add kprobe:
-        *      p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
+        *      p[:[GRP/][EVENT]] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
         *  - Add kretprobe:
-        *      r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
+        *      r[MAXACTIVE][:[GRP/][EVENT]] [MOD:]KSYM[+0] [FETCHARGS]
+        *    Or
+        *      p[:[GRP/][EVENT]] [MOD:]KSYM[+0]%return [FETCHARGS]
+        *
         * Fetch args:
         *  $retval     : fetch return value
         *  $stack      : fetch stack address
@@ -587,20 +770,23 @@ static int trace_kprobe_create(int argc, const char *argv[])
         *  FETCHARG:TYPE : use TYPE instead of unsigned long.
         */
        struct trace_kprobe *tk = NULL;
-       int i, len, ret = 0;
+       int i, len, new_argc = 0, ret = 0;
        bool is_return = false;
        char *symbol = NULL, *tmp = NULL;
+       const char **new_argv = NULL;
        const char *event = NULL, *group = KPROBE_EVENT_SYSTEM;
+       enum probe_print_type ptype;
        int maxactive = 0;
        long offset = 0;
        void *addr = NULL;
        char buf[MAX_EVENT_NAME_LEN];
-       unsigned int flags = TPARG_FL_KERNEL;
+       char gbuf[MAX_EVENT_NAME_LEN];
+       char abuf[MAX_BTF_ARGS_LEN];
+       struct traceprobe_parse_context ctx = { .flags = TPARG_FL_KERNEL };
 
        switch (argv[0][0]) {
        case 'r':
                is_return = true;
-               flags |= TPARG_FL_RETURN;
                break;
        case 'p':
                break;
@@ -618,7 +804,7 @@ static int trace_kprobe_create(int argc, const char *argv[])
 
        if (isdigit(argv[0][1])) {
                if (!is_return) {
-                       trace_probe_log_err(1, MAXACT_NO_KPROBE);
+                       trace_probe_log_err(1, BAD_MAXACT_TYPE);
                        goto parse_error;
                }
                if (event)
@@ -658,27 +844,70 @@ static int trace_kprobe_create(int argc, const char *argv[])
                symbol = kstrdup(argv[1], GFP_KERNEL);
                if (!symbol)
                        return -ENOMEM;
+
+               tmp = strchr(symbol, '%');
+               if (tmp) {
+                       if (!strcmp(tmp, "%return")) {
+                               *tmp = '\0';
+                               is_return = true;
+                       } else {
+                               trace_probe_log_err(tmp - symbol, BAD_ADDR_SUFFIX);
+                               goto parse_error;
+                       }
+               }
+
                /* TODO: support .init module functions */
                ret = traceprobe_split_symbol_offset(symbol, &offset);
                if (ret || offset < 0 || offset > UINT_MAX) {
                        trace_probe_log_err(0, BAD_PROBE_ADDR);
                        goto parse_error;
                }
-               if (kprobe_on_func_entry(NULL, symbol, offset))
-                       flags |= TPARG_FL_FENTRY;
-               if (offset && is_return && !(flags & TPARG_FL_FENTRY)) {
+               if (is_return)
+                       ctx.flags |= TPARG_FL_RETURN;
+               ret = kprobe_on_func_entry(NULL, symbol, offset);
+               if (ret == 0 && !is_return)
+                       ctx.flags |= TPARG_FL_FENTRY;
+               /* Defer the ENOENT case until register kprobe */
+               if (ret == -EINVAL && is_return) {
                        trace_probe_log_err(0, BAD_RETPROBE);
                        goto parse_error;
                }
        }
 
+       if (symbol && !strchr(symbol, ':')) {
+               unsigned int count;
+
+               count = number_of_same_symbols(symbol);
+               if (count > 1) {
+                       /*
+                        * Users should use ADDR to remove the ambiguity of
+                        * using KSYM only.
+                        */
+                       trace_probe_log_err(0, NON_UNIQ_SYMBOL);
+                       ret = -EADDRNOTAVAIL;
+
+                       goto error;
+               } else if (count == 0) {
+                       /*
+                        * We can return ENOENT earlier than when register the
+                        * kprobe.
+                        */
+                       trace_probe_log_err(0, BAD_PROBE_ADDR);
+                       ret = -ENOENT;
+
+                       goto error;
+               }
+       }
+
        trace_probe_log_set_index(0);
        if (event) {
-               ret = traceprobe_parse_event_name(&event, &group, buf,
+               ret = traceprobe_parse_event_name(&event, &group, gbuf,
                                                  event - argv[0]);
                if (ret)
                        goto parse_error;
-       } else {
+       }
+
+       if (!event) {
                /* Make a new event name */
                if (symbol)
                        snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
@@ -690,31 +919,48 @@ static int trace_kprobe_create(int argc, const char *argv[])
                event = buf;
        }
 
+       argc -= 2; argv += 2;
+       ctx.funcname = symbol;
+       new_argv = traceprobe_expand_meta_args(argc, argv, &new_argc,
+                                              abuf, MAX_BTF_ARGS_LEN, &ctx);
+       if (IS_ERR(new_argv)) {
+               ret = PTR_ERR(new_argv);
+               new_argv = NULL;
+               goto out;
+       }
+       if (new_argv) {
+               argc = new_argc;
+               argv = new_argv;
+       }
+
        /* setup a probe */
        tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
-                              argc - 2, is_return);
+                               argc, is_return);
        if (IS_ERR(tk)) {
                ret = PTR_ERR(tk);
                /* This must return -ENOMEM, else there is a bug */
                WARN_ON_ONCE(ret != -ENOMEM);
                goto out;       /* We know tk is not allocated */
        }
-       argc -= 2; argv += 2;
 
        /* parse arguments */
        for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
-               tmp = kstrdup(argv[i], GFP_KERNEL);
-               if (!tmp) {
-                       ret = -ENOMEM;
-                       goto error;
-               }
-
                trace_probe_log_set_index(i + 2);
-               ret = traceprobe_parse_probe_arg(&tk->tp, i, tmp, flags);
-               kfree(tmp);
+               ctx.offset = 0;
+               ret = traceprobe_parse_probe_arg(&tk->tp, i, argv[i], &ctx);
                if (ret)
                        goto error;     /* This can be -ENOMEM */
        }
+       /* entry handler for kretprobe */
+       if (is_return && tk->tp.entry_arg) {
+               tk->rp.entry_handler = trace_kprobe_entry_handler;
+               tk->rp.data_size = traceprobe_get_entry_data_size(&tk->tp);
+       }
+
+       ptype = is_return ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
+       ret = traceprobe_set_print_fmt(&tk->tp, ptype);
+       if (ret < 0)
+               goto error;
 
        ret = register_trace_kprobe(tk);
        if (ret) {
@@ -723,13 +969,15 @@ static int trace_kprobe_create(int argc, const char *argv[])
                        trace_probe_log_err(0, BAD_INSN_BNDRY);
                else if (ret == -ENOENT)
                        trace_probe_log_err(0, BAD_PROBE_ADDR);
-               else if (ret != -ENOMEM)
+               else if (ret != -ENOMEM && ret != -EEXIST)
                        trace_probe_log_err(0, FAIL_REG_PROBE);
                goto error;
        }
 
 out:
+       traceprobe_finish_parse(&ctx);
        trace_probe_log_clear();
+       kfree(new_argv);
        kfree(symbol);
        return ret;
 
@@ -740,17 +988,189 @@ error:
        goto out;
 }
 
-static int create_or_delete_trace_kprobe(int argc, char **argv)
+static int trace_kprobe_create(const char *raw_command)
+{
+       return trace_probe_create(raw_command, __trace_kprobe_create);
+}
+
+static int create_or_delete_trace_kprobe(const char *raw_command)
 {
        int ret;
 
-       if (argv[0][0] == '-')
-               return dyn_event_release(argc, argv, &trace_kprobe_ops);
+       if (raw_command[0] == '-')
+               return dyn_event_release(raw_command, &trace_kprobe_ops);
 
-       ret = trace_kprobe_create(argc, (const char **)argv);
+       ret = trace_kprobe_create(raw_command);
        return ret == -ECANCELED ? -EINVAL : ret;
 }
 
+static int trace_kprobe_run_command(struct dynevent_cmd *cmd)
+{
+       return create_or_delete_trace_kprobe(cmd->seq.buffer);
+}
+
+/**
+ * kprobe_event_cmd_init - Initialize a kprobe event command object
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @buf: A pointer to the buffer used to build the command
+ * @maxlen: The length of the buffer passed in @buf
+ *
+ * Initialize a synthetic event command object.  Use this before
+ * calling any of the other kprobe_event functions.
+ */
+void kprobe_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
+{
+       dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_KPROBE,
+                         trace_kprobe_run_command);
+}
+EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
+
+/**
+ * __kprobe_event_gen_cmd_start - Generate a kprobe event command from arg list
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @kretprobe: Is this a return probe?
+ * @name: The name of the kprobe event
+ * @loc: The location of the kprobe event
+ * @...: Variable number of arg (pairs), one pair for each field
+ *
+ * NOTE: Users normally won't want to call this function directly, but
+ * rather use the kprobe_event_gen_cmd_start() wrapper, which automatically
+ * adds a NULL to the end of the arg list.  If this function is used
+ * directly, make sure the last arg in the variable arg list is NULL.
+ *
+ * Generate a kprobe event command to be executed by
+ * kprobe_event_gen_cmd_end().  This function can be used to generate the
+ * complete command or only the first part of it; in the latter case,
+ * kprobe_event_add_fields() can be used to add more fields following this.
+ *
+ * Unlikely the synth_event_gen_cmd_start(), @loc must be specified. This
+ * returns -EINVAL if @loc == NULL.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe,
+                                const char *name, const char *loc, ...)
+{
+       char buf[MAX_EVENT_NAME_LEN];
+       struct dynevent_arg arg;
+       va_list args;
+       int ret;
+
+       if (cmd->type != DYNEVENT_TYPE_KPROBE)
+               return -EINVAL;
+
+       if (!loc)
+               return -EINVAL;
+
+       if (kretprobe)
+               snprintf(buf, MAX_EVENT_NAME_LEN, "r:kprobes/%s", name);
+       else
+               snprintf(buf, MAX_EVENT_NAME_LEN, "p:kprobes/%s", name);
+
+       ret = dynevent_str_add(cmd, buf);
+       if (ret)
+               return ret;
+
+       dynevent_arg_init(&arg, 0);
+       arg.str = loc;
+       ret = dynevent_arg_add(cmd, &arg, NULL);
+       if (ret)
+               return ret;
+
+       va_start(args, loc);
+       for (;;) {
+               const char *field;
+
+               field = va_arg(args, const char *);
+               if (!field)
+                       break;
+
+               if (++cmd->n_fields > MAX_TRACE_ARGS) {
+                       ret = -EINVAL;
+                       break;
+               }
+
+               arg.str = field;
+               ret = dynevent_arg_add(cmd, &arg, NULL);
+               if (ret)
+                       break;
+       }
+       va_end(args);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(__kprobe_event_gen_cmd_start);
+
+/**
+ * __kprobe_event_add_fields - Add probe fields to a kprobe command from arg list
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @...: Variable number of arg (pairs), one pair for each field
+ *
+ * NOTE: Users normally won't want to call this function directly, but
+ * rather use the kprobe_event_add_fields() wrapper, which
+ * automatically adds a NULL to the end of the arg list.  If this
+ * function is used directly, make sure the last arg in the variable
+ * arg list is NULL.
+ *
+ * Add probe fields to an existing kprobe command using a variable
+ * list of args.  Fields are added in the same order they're listed.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...)
+{
+       struct dynevent_arg arg;
+       va_list args;
+       int ret = 0;
+
+       if (cmd->type != DYNEVENT_TYPE_KPROBE)
+               return -EINVAL;
+
+       dynevent_arg_init(&arg, 0);
+
+       va_start(args, cmd);
+       for (;;) {
+               const char *field;
+
+               field = va_arg(args, const char *);
+               if (!field)
+                       break;
+
+               if (++cmd->n_fields > MAX_TRACE_ARGS) {
+                       ret = -EINVAL;
+                       break;
+               }
+
+               arg.str = field;
+               ret = dynevent_arg_add(cmd, &arg, NULL);
+               if (ret)
+                       break;
+       }
+       va_end(args);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(__kprobe_event_add_fields);
+
+/**
+ * kprobe_event_delete - Delete a kprobe event
+ * @name: The name of the kprobe event to delete
+ *
+ * Delete a kprobe event with the give @name from kernel code rather
+ * than directly from the command line.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int kprobe_event_delete(const char *name)
+{
+       char buf[MAX_EVENT_NAME_LEN];
+
+       snprintf(buf, MAX_EVENT_NAME_LEN, "-:%s", name);
+
+       return create_or_delete_trace_kprobe(buf);
+}
+EXPORT_SYMBOL_GPL(kprobe_event_delete);
+
 static int trace_kprobe_release(struct dyn_event *ev)
 {
        struct trace_kprobe *tk = to_trace_kprobe(ev);
@@ -767,8 +1187,10 @@ static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev)
        int i;
 
        seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
-       seq_printf(m, ":%s/%s", tk->tp.call.class->system,
-                       trace_event_name(&tk->tp.call));
+       if (trace_kprobe_is_return(tk) && tk->rp.maxactive)
+               seq_printf(m, "%d", tk->rp.maxactive);
+       seq_printf(m, ":%s/%s", trace_probe_group_name(&tk->tp),
+                               trace_probe_name(&tk->tp));
 
        if (!tk->symbol)
                seq_printf(m, " 0x%p", tk->rp.kp.addr);
@@ -806,6 +1228,10 @@ static int probes_open(struct inode *inode, struct file *file)
 {
        int ret;
 
+       ret = security_locked_down(LOCKDOWN_TRACEFS);
+       if (ret)
+               return ret;
+
        if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
                ret = dyn_events_release_all(&trace_kprobe_ops);
                if (ret < 0)
@@ -831,20 +1257,28 @@ static const struct file_operations kprobe_events_ops = {
        .write          = probes_write,
 };
 
+static unsigned long trace_kprobe_missed(struct trace_kprobe *tk)
+{
+       return trace_kprobe_is_return(tk) ?
+               tk->rp.kp.nmissed + tk->rp.nmissed : tk->rp.kp.nmissed;
+}
+
 /* Probes profiling interfaces */
 static int probes_profile_seq_show(struct seq_file *m, void *v)
 {
        struct dyn_event *ev = v;
        struct trace_kprobe *tk;
+       unsigned long nmissed;
 
        if (!is_trace_kprobe(ev))
                return 0;
 
        tk = to_trace_kprobe(ev);
+       nmissed = trace_kprobe_missed(tk);
        seq_printf(m, "  %-44s %15lu %15lu\n",
-                  trace_event_name(&tk->tp.call),
+                  trace_probe_name(&tk->tp),
                   trace_kprobe_nhit(tk),
-                  tk->rp.kp.nmissed);
+                  nmissed);
 
        return 0;
 }
@@ -858,6 +1292,12 @@ static const struct seq_operations profile_seq_op = {
 
 static int profile_open(struct inode *inode, struct file *file)
 {
+       int ret;
+
+       ret = security_locked_down(LOCKDOWN_TRACEFS);
+       if (ret)
+               return ret;
+
        return seq_open(file, &profile_seq_op);
 }
 
@@ -869,59 +1309,14 @@ static const struct file_operations kprobe_profile_ops = {
        .release        = seq_release,
 };
 
-/* Kprobe specific fetch functions */
-
-/* Return the length of string -- including null terminal byte */
-static nokprobe_inline int
-fetch_store_strlen(unsigned long addr)
-{
-       int ret, len = 0;
-       u8 c;
-
-       do {
-               ret = probe_kernel_read(&c, (u8 *)addr + len, 1);
-               len++;
-       } while (c && ret == 0 && len < MAX_STRING_SIZE);
-
-       return (ret < 0) ? ret : len;
-}
-
-/*
- * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max
- * length and relative data location.
- */
-static nokprobe_inline int
-fetch_store_string(unsigned long addr, void *dest, void *base)
-{
-       int maxlen = get_loc_len(*(u32 *)dest);
-       u8 *dst = get_loc_data(dest, base);
-       long ret;
-
-       if (unlikely(!maxlen))
-               return -ENOMEM;
-       /*
-        * Try to get string again, since the string can be changed while
-        * probing.
-        */
-       ret = strncpy_from_unsafe(dst, (void *)addr, maxlen);
-
-       if (ret >= 0)
-               *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
-       return ret;
-}
-
-static nokprobe_inline int
-probe_mem_read(void *dest, void *src, size_t size)
-{
-       return probe_kernel_read(dest, src, size);
-}
-
 /* Note that we don't verify it, since the code does not come from user space */
 static int
-process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
-                  void *base)
+process_fetch_insn(struct fetch_insn *code, void *rec, void *edata,
+                  void *dest, void *base)
 {
+       struct pt_regs *regs = rec;
        unsigned long val;
+       int ret;
 
 retry:
        /* 1st stage: get value from context */
@@ -938,22 +1333,21 @@ retry:
        case FETCH_OP_RETVAL:
                val = regs_return_value(regs);
                break;
-       case FETCH_OP_IMM:
-               val = code->immediate;
-               break;
-       case FETCH_OP_COMM:
-               val = (unsigned long)current->comm;
-               break;
 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
        case FETCH_OP_ARG:
                val = regs_get_kernel_argument(regs, code->param);
                break;
+       case FETCH_OP_EDATA:
+               val = *(unsigned long *)((unsigned long)edata + code->offset);
+               break;
 #endif
        case FETCH_NOP_SYMBOL:  /* Ignore a place holder */
                code++;
                goto retry;
        default:
-               return -EILSEQ;
+               ret = process_common_fetch_insn(code, &val);
+               if (ret < 0)
+                       return ret;
        }
        code++;
 
@@ -967,35 +1361,27 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
                    struct trace_event_file *trace_file)
 {
        struct kprobe_trace_entry_head *entry;
-       struct ring_buffer_event *event;
-       struct ring_buffer *buffer;
-       int size, dsize, pc;
-       unsigned long irq_flags;
-       struct trace_event_call *call = &tk->tp.call;
+       struct trace_event_call *call = trace_probe_event_call(&tk->tp);
+       struct trace_event_buffer fbuffer;
+       int dsize;
 
        WARN_ON(call != trace_file->event_call);
 
        if (trace_trigger_soft_disabled(trace_file))
                return;
 
-       local_save_flags(irq_flags);
-       pc = preempt_count();
-
-       dsize = __get_data_size(&tk->tp, regs);
-       size = sizeof(*entry) + tk->tp.size + dsize;
+       dsize = __get_data_size(&tk->tp, regs, NULL);
 
-       event = trace_event_buffer_lock_reserve(&buffer, trace_file,
-                                               call->event.type,
-                                               size, irq_flags, pc);
-       if (!event)
+       entry = trace_event_buffer_reserve(&fbuffer, trace_file,
+                                          sizeof(*entry) + tk->tp.size + dsize);
+       if (!entry)
                return;
 
-       entry = ring_buffer_event_data(event);
+       fbuffer.regs = regs;
        entry->ip = (unsigned long)tk->rp.kp.addr;
-       store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
+       store_trace_args(&entry[1], &tk->tp, regs, NULL, sizeof(*entry), dsize);
 
-       event_trigger_unlock_commit_regs(trace_file, buffer, event,
-                                        entry, irq_flags, pc, regs);
+       trace_event_buffer_commit(&fbuffer);
 }
 
 static void
@@ -1003,48 +1389,65 @@ kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
 {
        struct event_file_link *link;
 
-       list_for_each_entry_rcu(link, &tk->tp.files, list)
+       trace_probe_for_each_link_rcu(link, &tk->tp)
                __kprobe_trace_func(tk, regs, link->file);
 }
 NOKPROBE_SYMBOL(kprobe_trace_func);
 
 /* Kretprobe handler */
+
+static int trace_kprobe_entry_handler(struct kretprobe_instance *ri,
+                                     struct pt_regs *regs)
+{
+       struct kretprobe *rp = get_kretprobe(ri);
+       struct trace_kprobe *tk;
+
+       /*
+        * There is a small chance that get_kretprobe(ri) returns NULL when
+        * the kretprobe is unregister on another CPU between kretprobe's
+        * trampoline_handler and this function.
+        */
+       if (unlikely(!rp))
+               return -ENOENT;
+
+       tk = container_of(rp, struct trace_kprobe, rp);
+
+       /* store argument values into ri->data as entry data */
+       if (tk->tp.entry_arg)
+               store_trace_entry_data(ri->data, &tk->tp, regs);
+
+       return 0;
+}
+
+
 static nokprobe_inline void
 __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
                       struct pt_regs *regs,
                       struct trace_event_file *trace_file)
 {
        struct kretprobe_trace_entry_head *entry;
-       struct ring_buffer_event *event;
-       struct ring_buffer *buffer;
-       int size, pc, dsize;
-       unsigned long irq_flags;
-       struct trace_event_call *call = &tk->tp.call;
+       struct trace_event_buffer fbuffer;
+       struct trace_event_call *call = trace_probe_event_call(&tk->tp);
+       int dsize;
 
        WARN_ON(call != trace_file->event_call);
 
        if (trace_trigger_soft_disabled(trace_file))
                return;
 
-       local_save_flags(irq_flags);
-       pc = preempt_count();
-
-       dsize = __get_data_size(&tk->tp, regs);
-       size = sizeof(*entry) + tk->tp.size + dsize;
+       dsize = __get_data_size(&tk->tp, regs, ri->data);
 
-       event = trace_event_buffer_lock_reserve(&buffer, trace_file,
-                                               call->event.type,
-                                               size, irq_flags, pc);
-       if (!event)
+       entry = trace_event_buffer_reserve(&fbuffer, trace_file,
+                                          sizeof(*entry) + tk->tp.size + dsize);
+       if (!entry)
                return;
 
-       entry = ring_buffer_event_data(event);
+       fbuffer.regs = regs;
        entry->func = (unsigned long)tk->rp.kp.addr;
-       entry->ret_ip = (unsigned long)ri->ret_addr;
-       store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
+       entry->ret_ip = get_kretprobe_retaddr(ri);
+       store_trace_args(&entry[1], &tk->tp, regs, ri->data, sizeof(*entry), dsize);
 
-       event_trigger_unlock_commit_regs(trace_file, buffer, event,
-                                        entry, irq_flags, pc, regs);
+       trace_event_buffer_commit(&fbuffer);
 }
 
 static void
@@ -1053,7 +1456,7 @@ kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
 {
        struct event_file_link *link;
 
-       list_for_each_entry_rcu(link, &tk->tp.files, list)
+       trace_probe_for_each_link_rcu(link, &tk->tp)
                __kretprobe_trace_func(tk, ri, regs, link->file);
 }
 NOKPROBE_SYMBOL(kretprobe_trace_func);
@@ -1068,16 +1471,19 @@ print_kprobe_event(struct trace_iterator *iter, int flags,
        struct trace_probe *tp;
 
        field = (struct kprobe_trace_entry_head *)iter->ent;
-       tp = container_of(event, struct trace_probe, call.event);
+       tp = trace_probe_primary_from_call(
+               container_of(event, struct trace_event_call, event));
+       if (WARN_ON_ONCE(!tp))
+               goto out;
 
-       trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
+       trace_seq_printf(s, "%s: (", trace_probe_name(tp));
 
        if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
                goto out;
 
        trace_seq_putc(s, ')');
 
-       if (print_probe_args(s, tp->args, tp->nr_args,
+       if (trace_probe_print_args(s, tp->args, tp->nr_args,
                             (u8 *)&field[1], field) < 0)
                goto out;
 
@@ -1095,9 +1501,12 @@ print_kretprobe_event(struct trace_iterator *iter, int flags,
        struct trace_probe *tp;
 
        field = (struct kretprobe_trace_entry_head *)iter->ent;
-       tp = container_of(event, struct trace_probe, call.event);
+       tp = trace_probe_primary_from_call(
+               container_of(event, struct trace_event_call, event));
+       if (WARN_ON_ONCE(!tp))
+               goto out;
 
-       trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
+       trace_seq_printf(s, "%s: (", trace_probe_name(tp));
 
        if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
                goto out;
@@ -1109,7 +1518,7 @@ print_kretprobe_event(struct trace_iterator *iter, int flags,
 
        trace_seq_putc(s, ')');
 
-       if (print_probe_args(s, tp->args, tp->nr_args,
+       if (trace_probe_print_args(s, tp->args, tp->nr_args,
                             (u8 *)&field[1], field) < 0)
                goto out;
 
@@ -1124,23 +1533,31 @@ static int kprobe_event_define_fields(struct trace_event_call *event_call)
 {
        int ret;
        struct kprobe_trace_entry_head field;
-       struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
+       struct trace_probe *tp;
+
+       tp = trace_probe_primary_from_call(event_call);
+       if (WARN_ON_ONCE(!tp))
+               return -ENOENT;
 
        DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
 
-       return traceprobe_define_arg_fields(event_call, sizeof(field), &tk->tp);
+       return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
 }
 
 static int kretprobe_event_define_fields(struct trace_event_call *event_call)
 {
        int ret;
        struct kretprobe_trace_entry_head field;
-       struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
+       struct trace_probe *tp;
+
+       tp = trace_probe_primary_from_call(event_call);
+       if (WARN_ON_ONCE(!tp))
+               return -ENOENT;
 
        DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
        DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
 
-       return traceprobe_define_arg_fields(event_call, sizeof(field), &tk->tp);
+       return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
 }
 
 #ifdef CONFIG_PERF_EVENTS
@@ -1149,7 +1566,7 @@ static int kretprobe_event_define_fields(struct trace_event_call *event_call)
 static int
 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
 {
-       struct trace_event_call *call = &tk->tp.call;
+       struct trace_event_call *call = trace_probe_event_call(&tk->tp);
        struct kprobe_trace_entry_head *entry;
        struct hlist_head *head;
        int size, __size, dsize;
@@ -1176,7 +1593,7 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
        if (hlist_empty(head))
                return 0;
 
-       dsize = __get_data_size(&tk->tp, regs);
+       dsize = __get_data_size(&tk->tp, regs, NULL);
        __size = sizeof(*entry) + tk->tp.size + dsize;
        size = ALIGN(__size + sizeof(u32), sizeof(u64));
        size -= sizeof(u32);
@@ -1187,7 +1604,7 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
 
        entry->ip = (unsigned long)tk->rp.kp.addr;
        memset(&entry[1], 0, dsize);
-       store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
+       store_trace_args(&entry[1], &tk->tp, regs, NULL, sizeof(*entry), dsize);
        perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
                              head, NULL);
        return 0;
@@ -1199,7 +1616,7 @@ static void
 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
                    struct pt_regs *regs)
 {
-       struct trace_event_call *call = &tk->tp.call;
+       struct trace_event_call *call = trace_probe_event_call(&tk->tp);
        struct kretprobe_trace_entry_head *entry;
        struct hlist_head *head;
        int size, __size, dsize;
@@ -1212,7 +1629,7 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
        if (hlist_empty(head))
                return;
 
-       dsize = __get_data_size(&tk->tp, regs);
+       dsize = __get_data_size(&tk->tp, regs, ri->data);
        __size = sizeof(*entry) + tk->tp.size + dsize;
        size = ALIGN(__size + sizeof(u32), sizeof(u64));
        size -= sizeof(u32);
@@ -1222,8 +1639,8 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
                return;
 
        entry->func = (unsigned long)tk->rp.kp.addr;
-       entry->ret_ip = (unsigned long)ri->ret_addr;
-       store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
+       entry->ret_ip = get_kretprobe_retaddr(ri);
+       store_trace_args(&entry[1], &tk->tp, regs, ri->data, sizeof(*entry), dsize);
        perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
                              head, NULL);
 }
@@ -1231,7 +1648,8 @@ NOKPROBE_SYMBOL(kretprobe_perf_func);
 
 int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
                        const char **symbol, u64 *probe_offset,
-                       u64 *probe_addr, bool perf_type_tracepoint)
+                       u64 *probe_addr, unsigned long *missed,
+                       bool perf_type_tracepoint)
 {
        const char *pevent = trace_event_name(event->tp_event);
        const char *group = event->tp_event->class->system;
@@ -1240,21 +1658,18 @@ int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
        if (perf_type_tracepoint)
                tk = find_trace_kprobe(pevent, group);
        else
-               tk = event->tp_event->data;
+               tk = trace_kprobe_primary_from_call(event->tp_event);
        if (!tk)
                return -EINVAL;
 
        *fd_type = trace_kprobe_is_return(tk) ? BPF_FD_TYPE_KRETPROBE
                                              : BPF_FD_TYPE_KPROBE;
-       if (tk->symbol) {
-               *symbol = tk->symbol;
-               *probe_offset = tk->rp.kp.offset;
-               *probe_addr = 0;
-       } else {
-               *symbol = NULL;
-               *probe_offset = 0;
-               *probe_addr = (unsigned long)tk->rp.kp.addr;
-       }
+       *probe_offset = tk->rp.kp.offset;
+       *probe_addr = kallsyms_show_value(current_cred()) ?
+                     (unsigned long)tk->rp.kp.addr : 0;
+       *symbol = tk->symbol;
+       if (missed)
+               *missed = trace_kprobe_missed(tk);
        return 0;
 }
 #endif /* CONFIG_PERF_EVENTS */
@@ -1268,20 +1683,19 @@ int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
 static int kprobe_register(struct trace_event_call *event,
                           enum trace_reg type, void *data)
 {
-       struct trace_kprobe *tk = (struct trace_kprobe *)event->data;
        struct trace_event_file *file = data;
 
        switch (type) {
        case TRACE_REG_REGISTER:
-               return enable_trace_kprobe(tk, file);
+               return enable_trace_kprobe(event, file);
        case TRACE_REG_UNREGISTER:
-               return disable_trace_kprobe(tk, file);
+               return disable_trace_kprobe(event, file);
 
 #ifdef CONFIG_PERF_EVENTS
        case TRACE_REG_PERF_REGISTER:
-               return enable_trace_kprobe(tk, NULL);
+               return enable_trace_kprobe(event, NULL);
        case TRACE_REG_PERF_UNREGISTER:
-               return disable_trace_kprobe(tk, NULL);
+               return disable_trace_kprobe(event, NULL);
        case TRACE_REG_PERF_OPEN:
        case TRACE_REG_PERF_CLOSE:
        case TRACE_REG_PERF_ADD:
@@ -1299,10 +1713,10 @@ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
 
        raw_cpu_inc(*tk->nhit);
 
-       if (tk->tp.flags & TP_FLAG_TRACE)
+       if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
                kprobe_trace_func(tk, regs);
 #ifdef CONFIG_PERF_EVENTS
-       if (tk->tp.flags & TP_FLAG_PROFILE)
+       if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
                ret = kprobe_perf_func(tk, regs);
 #endif
        return ret;
@@ -1312,17 +1726,27 @@ NOKPROBE_SYMBOL(kprobe_dispatcher);
 static int
 kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
 {
-       struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
+       struct kretprobe *rp = get_kretprobe(ri);
+       struct trace_kprobe *tk;
+
+       /*
+        * There is a small chance that get_kretprobe(ri) returns NULL when
+        * the kretprobe is unregister on another CPU between kretprobe's
+        * trampoline_handler and this function.
+        */
+       if (unlikely(!rp))
+               return 0;
 
+       tk = container_of(rp, struct trace_kprobe, rp);
        raw_cpu_inc(*tk->nhit);
 
-       if (tk->tp.flags & TP_FLAG_TRACE)
+       if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
                kretprobe_trace_func(tk, ri, regs);
 #ifdef CONFIG_PERF_EVENTS
-       if (tk->tp.flags & TP_FLAG_PROFILE)
+       if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
                kretprobe_perf_func(tk, ri, regs);
 #endif
-       return 0;       /* We don't tweek kernel, so just return 0 */
+       return 0;       /* We don't tweak kernel, so just return 0 */
 }
 NOKPROBE_SYMBOL(kretprobe_dispatcher);
 
@@ -1334,68 +1758,76 @@ static struct trace_event_functions kprobe_funcs = {
        .trace          = print_kprobe_event
 };
 
-static inline void init_trace_event_call(struct trace_kprobe *tk,
-                                        struct trace_event_call *call)
+static struct trace_event_fields kretprobe_fields_array[] = {
+       { .type = TRACE_FUNCTION_TYPE,
+         .define_fields = kretprobe_event_define_fields },
+       {}
+};
+
+static struct trace_event_fields kprobe_fields_array[] = {
+       { .type = TRACE_FUNCTION_TYPE,
+         .define_fields = kprobe_event_define_fields },
+       {}
+};
+
+static inline void init_trace_event_call(struct trace_kprobe *tk)
 {
-       INIT_LIST_HEAD(&call->class->fields);
+       struct trace_event_call *call = trace_probe_event_call(&tk->tp);
+
        if (trace_kprobe_is_return(tk)) {
                call->event.funcs = &kretprobe_funcs;
-               call->class->define_fields = kretprobe_event_define_fields;
+               call->class->fields_array = kretprobe_fields_array;
        } else {
                call->event.funcs = &kprobe_funcs;
-               call->class->define_fields = kprobe_event_define_fields;
+               call->class->fields_array = kprobe_fields_array;
        }
 
        call->flags = TRACE_EVENT_FL_KPROBE;
        call->class->reg = kprobe_register;
-       call->data = tk;
 }
 
 static int register_kprobe_event(struct trace_kprobe *tk)
 {
-       struct trace_event_call *call = &tk->tp.call;
-       int ret = 0;
+       init_trace_event_call(tk);
 
-       init_trace_event_call(tk, call);
-
-       if (traceprobe_set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0)
-               return -ENOMEM;
-       ret = register_trace_event(&call->event);
-       if (!ret) {
-               kfree(call->print_fmt);
-               return -ENODEV;
-       }
-       ret = trace_add_event_call(call);
-       if (ret) {
-               pr_info("Failed to register kprobe event: %s\n",
-                       trace_event_name(call));
-               kfree(call->print_fmt);
-               unregister_trace_event(&call->event);
-       }
-       return ret;
+       return trace_probe_register_event_call(&tk->tp);
 }
 
 static int unregister_kprobe_event(struct trace_kprobe *tk)
 {
-       int ret;
-
-       /* tp->event is unregistered in trace_remove_event_call() */
-       ret = trace_remove_event_call(&tk->tp.call);
-       if (!ret)
-               kfree(tk->tp.call.print_fmt);
-       return ret;
+       return trace_probe_unregister_event_call(&tk->tp);
 }
 
 #ifdef CONFIG_PERF_EVENTS
+
 /* create a trace_kprobe, but don't add it to global lists */
 struct trace_event_call *
 create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
                          bool is_return)
 {
+       enum probe_print_type ptype;
        struct trace_kprobe *tk;
        int ret;
        char *event;
 
+       if (func) {
+               unsigned int count;
+
+               count = number_of_same_symbols(func);
+               if (count > 1)
+                       /*
+                        * Users should use addr to remove the ambiguity of
+                        * using func only.
+                        */
+                       return ERR_PTR(-EADDRNOTAVAIL);
+               else if (count == 0)
+                       /*
+                        * We can return ENOENT earlier than when register the
+                        * kprobe.
+                        */
+                       return ERR_PTR(-ENOENT);
+       }
+
        /*
         * local trace_kprobes are not added to dyn_event, so they are never
         * searched in find_trace_kprobe(). Therefore, there is no concern of
@@ -1413,20 +1845,20 @@ create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
                return ERR_CAST(tk);
        }
 
-       init_trace_event_call(tk, &tk->tp.call);
+       init_trace_event_call(tk);
 
-       if (traceprobe_set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) {
+       ptype = trace_kprobe_is_return(tk) ?
+               PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
+       if (traceprobe_set_print_fmt(&tk->tp, ptype) < 0) {
                ret = -ENOMEM;
                goto error;
        }
 
        ret = __register_trace_kprobe(tk);
-       if (ret < 0) {
-               kfree(tk->tp.call.print_fmt);
+       if (ret < 0)
                goto error;
-       }
 
-       return &tk->tp.call;
+       return trace_probe_event_call(&tk->tp);
 error:
        free_trace_kprobe(tk);
        return ERR_PTR(ret);
@@ -1436,7 +1868,9 @@ void destroy_local_trace_kprobe(struct trace_event_call *event_call)
 {
        struct trace_kprobe *tk;
 
-       tk = container_of(event_call, struct trace_kprobe, tp.call);
+       tk = trace_kprobe_primary_from_call(event_call);
+       if (unlikely(!tk))
+               return;
 
        if (trace_probe_is_enabled(&tk->tp)) {
                WARN_ON(1);
@@ -1445,16 +1879,54 @@ void destroy_local_trace_kprobe(struct trace_event_call *event_call)
 
        __unregister_trace_kprobe(tk);
 
-       kfree(tk->tp.call.print_fmt);
        free_trace_kprobe(tk);
 }
 #endif /* CONFIG_PERF_EVENTS */
 
-/* Make a tracefs interface for controlling probe points */
-static __init int init_kprobe_trace(void)
+static __init void enable_boot_kprobe_events(void)
+{
+       struct trace_array *tr = top_trace_array();
+       struct trace_event_file *file;
+       struct trace_kprobe *tk;
+       struct dyn_event *pos;
+
+       mutex_lock(&event_mutex);
+       for_each_trace_kprobe(tk, pos) {
+               list_for_each_entry(file, &tr->events, list)
+                       if (file->event_call == trace_probe_event_call(&tk->tp))
+                               trace_event_enable_disable(file, 1, 0);
+       }
+       mutex_unlock(&event_mutex);
+}
+
+static __init void setup_boot_kprobe_events(void)
+{
+       char *p, *cmd = kprobe_boot_events_buf;
+       int ret;
+
+       strreplace(kprobe_boot_events_buf, ',', ' ');
+
+       while (cmd && *cmd != '\0') {
+               p = strchr(cmd, ';');
+               if (p)
+                       *p++ = '\0';
+
+               ret = create_or_delete_trace_kprobe(cmd);
+               if (ret)
+                       pr_warn("Failed to add event(%d): %s\n", ret, cmd);
+
+               cmd = p;
+       }
+
+       enable_boot_kprobe_events();
+}
+
+/*
+ * Register dynevent at core_initcall. This allows kernel to setup kprobe
+ * events in postcore_initcall without tracefs.
+ */
+static __init int init_kprobe_trace_early(void)
 {
-       struct dentry *d_tracer;
-       struct dentry *entry;
        int ret;
 
        ret = dyn_event_register(&trace_kprobe_ops);
@@ -1464,23 +1936,29 @@ static __init int init_kprobe_trace(void)
        if (register_module_notifier(&trace_kprobe_module_nb))
                return -EINVAL;
 
-       d_tracer = tracing_init_dentry();
-       if (IS_ERR(d_tracer))
-               return 0;
+       return 0;
+}
+core_initcall(init_kprobe_trace_early);
 
-       entry = tracefs_create_file("kprobe_events", 0644, d_tracer,
-                                   NULL, &kprobe_events_ops);
+/* Make a tracefs interface for controlling probe points */
+static __init int init_kprobe_trace(void)
+{
+       int ret;
+
+       ret = tracing_init_dentry();
+       if (ret)
+               return 0;
 
        /* Event list interface */
-       if (!entry)
-               pr_warn("Could not create tracefs 'kprobe_events' entry\n");
+       trace_create_file("kprobe_events", TRACE_MODE_WRITE,
+                         NULL, NULL, &kprobe_events_ops);
 
        /* Profile interface */
-       entry = tracefs_create_file("kprobe_profile", 0444, d_tracer,
-                                   NULL, &kprobe_profile_ops);
+       trace_create_file("kprobe_profile", TRACE_MODE_READ,
+                         NULL, NULL, &kprobe_profile_ops);
+
+       setup_boot_kprobe_events();
 
-       if (!entry)
-               pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
        return 0;
 }
 fs_initcall(init_kprobe_trace);
@@ -1493,7 +1971,7 @@ find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
        struct trace_event_file *file;
 
        list_for_each_entry(file, &tr->events, list)
-               if (file->event_call == &tk->tp.call)
+               if (file->event_call == trace_probe_event_call(&tk->tp))
                        return file;
 
        return NULL;
@@ -1513,12 +1991,14 @@ static __init int kprobe_trace_self_tests_init(void)
        if (tracing_is_disabled())
                return -ENODEV;
 
+       if (tracing_selftest_disabled)
+               return 0;
+
        target = kprobe_trace_selftest_target;
 
        pr_info("Testing kprobe tracing: ");
 
-       ret = trace_run_command("p:testprobe kprobe_trace_selftest_target $stack $stack0 +0($stack)",
-                               create_or_delete_trace_kprobe);
+       ret = create_or_delete_trace_kprobe("p:testprobe kprobe_trace_selftest_target $stack $stack0 +0($stack)");
        if (WARN_ON_ONCE(ret)) {
                pr_warn("error on probing function entry.\n");
                warn++;
@@ -1534,12 +2014,12 @@ static __init int kprobe_trace_self_tests_init(void)
                                pr_warn("error on getting probe file.\n");
                                warn++;
                        } else
-                               enable_trace_kprobe(tk, file);
+                               enable_trace_kprobe(
+                                       trace_probe_event_call(&tk->tp), file);
                }
        }
 
-       ret = trace_run_command("r:testprobe2 kprobe_trace_selftest_target $retval",
-                               create_or_delete_trace_kprobe);
+       ret = create_or_delete_trace_kprobe("r:testprobe2 kprobe_trace_selftest_target $retval");
        if (WARN_ON_ONCE(ret)) {
                pr_warn("error on probing function return.\n");
                warn++;
@@ -1555,7 +2035,8 @@ static __init int kprobe_trace_self_tests_init(void)
                                pr_warn("error on getting probe file.\n");
                                warn++;
                        } else
-                               enable_trace_kprobe(tk, file);
+                               enable_trace_kprobe(
+                                       trace_probe_event_call(&tk->tp), file);
                }
        }
 
@@ -1588,7 +2069,8 @@ static __init int kprobe_trace_self_tests_init(void)
                        pr_warn("error on getting probe file.\n");
                        warn++;
                } else
-                       disable_trace_kprobe(tk, file);
+                       disable_trace_kprobe(
+                               trace_probe_event_call(&tk->tp), file);
        }
 
        tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
@@ -1606,16 +2088,17 @@ static __init int kprobe_trace_self_tests_init(void)
                        pr_warn("error on getting probe file.\n");
                        warn++;
                } else
-                       disable_trace_kprobe(tk, file);
+                       disable_trace_kprobe(
+                               trace_probe_event_call(&tk->tp), file);
        }
 
-       ret = trace_run_command("-:testprobe", create_or_delete_trace_kprobe);
+       ret = create_or_delete_trace_kprobe("-:testprobe");
        if (WARN_ON_ONCE(ret)) {
                pr_warn("error on deleting a probe.\n");
                warn++;
        }
 
-       ret = trace_run_command("-:testprobe2", create_or_delete_trace_kprobe);
+       ret = create_or_delete_trace_kprobe("-:testprobe2");
        if (WARN_ON_ONCE(ret)) {
                pr_warn("error on deleting a probe.\n");
                warn++;