Linux 6.9-rc4
[sfrench/cifs-2.6.git] / kernel / trace / trace_selftest.c
index 69ee8ef12cee372f4f44fca053d2cc5b67182d0a..e9c5058a8efd8c7e96114df839192f2fd7046e28 100644 (file)
@@ -23,7 +23,7 @@ static inline int trace_valid_entry(struct trace_entry *entry)
        return 0;
 }
 
-static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
+static int trace_test_buffer_cpu(struct array_buffer *buf, int cpu)
 {
        struct ring_buffer_event *event;
        struct trace_entry *entry;
@@ -60,7 +60,7 @@ static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
  * Test the trace buffer to see if all the elements
  * are still sane.
  */
-static int __maybe_unused trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
+static int __maybe_unused trace_test_buffer(struct array_buffer *buf, unsigned long *count)
 {
        unsigned long flags, cnt = 0;
        int cpu, ret = 0;
@@ -107,7 +107,7 @@ static int trace_selftest_test_probe1_cnt;
 static void trace_selftest_test_probe1_func(unsigned long ip,
                                            unsigned long pip,
                                            struct ftrace_ops *op,
-                                           struct pt_regs *pt_regs)
+                                           struct ftrace_regs *fregs)
 {
        trace_selftest_test_probe1_cnt++;
 }
@@ -116,7 +116,7 @@ static int trace_selftest_test_probe2_cnt;
 static void trace_selftest_test_probe2_func(unsigned long ip,
                                            unsigned long pip,
                                            struct ftrace_ops *op,
-                                           struct pt_regs *pt_regs)
+                                           struct ftrace_regs *fregs)
 {
        trace_selftest_test_probe2_cnt++;
 }
@@ -125,7 +125,7 @@ static int trace_selftest_test_probe3_cnt;
 static void trace_selftest_test_probe3_func(unsigned long ip,
                                            unsigned long pip,
                                            struct ftrace_ops *op,
-                                           struct pt_regs *pt_regs)
+                                           struct ftrace_regs *fregs)
 {
        trace_selftest_test_probe3_cnt++;
 }
@@ -134,7 +134,7 @@ static int trace_selftest_test_global_cnt;
 static void trace_selftest_test_global_func(unsigned long ip,
                                            unsigned long pip,
                                            struct ftrace_ops *op,
-                                           struct pt_regs *pt_regs)
+                                           struct ftrace_regs *fregs)
 {
        trace_selftest_test_global_cnt++;
 }
@@ -143,24 +143,21 @@ static int trace_selftest_test_dyn_cnt;
 static void trace_selftest_test_dyn_func(unsigned long ip,
                                         unsigned long pip,
                                         struct ftrace_ops *op,
-                                        struct pt_regs *pt_regs)
+                                        struct ftrace_regs *fregs)
 {
        trace_selftest_test_dyn_cnt++;
 }
 
 static struct ftrace_ops test_probe1 = {
        .func                   = trace_selftest_test_probe1_func,
-       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
 };
 
 static struct ftrace_ops test_probe2 = {
        .func                   = trace_selftest_test_probe2_func,
-       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
 };
 
 static struct ftrace_ops test_probe3 = {
        .func                   = trace_selftest_test_probe3_func,
-       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
 };
 
 static void print_counts(void)
@@ -290,6 +287,40 @@ static int trace_selftest_ops(struct trace_array *tr, int cnt)
        if (trace_selftest_test_probe3_cnt != 4)
                goto out_free;
 
+       /* Remove trace function from probe 3 */
+       func1_name = "!" __stringify(DYN_FTRACE_TEST_NAME);
+       len1 = strlen(func1_name);
+
+       ftrace_set_filter(&test_probe3, func1_name, len1, 0);
+
+       DYN_FTRACE_TEST_NAME();
+
+       print_counts();
+
+       if (trace_selftest_test_probe1_cnt != 3)
+               goto out_free;
+       if (trace_selftest_test_probe2_cnt != 2)
+               goto out_free;
+       if (trace_selftest_test_probe3_cnt != 4)
+               goto out_free;
+       if (cnt > 1) {
+               if (trace_selftest_test_global_cnt == 0)
+                       goto out_free;
+       }
+       if (trace_selftest_test_dyn_cnt == 0)
+               goto out_free;
+
+       DYN_FTRACE_TEST_NAME2();
+
+       print_counts();
+
+       if (trace_selftest_test_probe1_cnt != 3)
+               goto out_free;
+       if (trace_selftest_test_probe2_cnt != 3)
+               goto out_free;
+       if (trace_selftest_test_probe3_cnt != 5)
+               goto out_free;
+
        ret = 0;
  out_free:
        unregister_ftrace_function(dyn_ops);
@@ -362,7 +393,7 @@ static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
        msleep(100);
 
        /* we should have nothing in the buffer */
-       ret = trace_test_buffer(&tr->trace_buffer, &count);
+       ret = trace_test_buffer(&tr->array_buffer, &count);
        if (ret)
                goto out;
 
@@ -383,7 +414,7 @@ static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
        ftrace_enabled = 0;
 
        /* check the trace buffer */
-       ret = trace_test_buffer(&tr->trace_buffer, &count);
+       ret = trace_test_buffer(&tr->array_buffer, &count);
 
        ftrace_enabled = 1;
        tracing_start();
@@ -417,7 +448,7 @@ static int trace_selftest_recursion_cnt;
 static void trace_selftest_test_recursion_func(unsigned long ip,
                                               unsigned long pip,
                                               struct ftrace_ops *op,
-                                              struct pt_regs *pt_regs)
+                                              struct ftrace_regs *fregs)
 {
        /*
         * This function is registered without the recursion safe flag.
@@ -432,7 +463,7 @@ static void trace_selftest_test_recursion_func(unsigned long ip,
 static void trace_selftest_test_recursion_safe_func(unsigned long ip,
                                                    unsigned long pip,
                                                    struct ftrace_ops *op,
-                                                   struct pt_regs *pt_regs)
+                                                   struct ftrace_regs *fregs)
 {
        /*
         * We said we would provide our own recursion. By calling
@@ -448,11 +479,11 @@ static void trace_selftest_test_recursion_safe_func(unsigned long ip,
 
 static struct ftrace_ops test_rec_probe = {
        .func                   = trace_selftest_test_recursion_func,
+       .flags                  = FTRACE_OPS_FL_RECURSION,
 };
 
 static struct ftrace_ops test_recsafe_probe = {
        .func                   = trace_selftest_test_recursion_safe_func,
-       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
 };
 
 static int
@@ -492,8 +523,13 @@ trace_selftest_function_recursion(void)
        unregister_ftrace_function(&test_rec_probe);
 
        ret = -1;
-       if (trace_selftest_recursion_cnt != 1) {
-               pr_cont("*callback not called once (%d)* ",
+       /*
+        * Recursion allows for transitions between context,
+        * and may call the callback twice.
+        */
+       if (trace_selftest_recursion_cnt != 1 &&
+           trace_selftest_recursion_cnt != 2) {
+               pr_cont("*callback not called once (or twice) (%d)* ",
                        trace_selftest_recursion_cnt);
                goto out;
        }
@@ -546,9 +582,11 @@ static enum {
 static void trace_selftest_test_regs_func(unsigned long ip,
                                          unsigned long pip,
                                          struct ftrace_ops *op,
-                                         struct pt_regs *pt_regs)
+                                         struct ftrace_regs *fregs)
 {
-       if (pt_regs)
+       struct pt_regs *regs = ftrace_get_regs(fregs);
+
+       if (regs)
                trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
        else
                trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
@@ -556,7 +594,7 @@ static void trace_selftest_test_regs_func(unsigned long ip,
 
 static struct ftrace_ops test_regs_probe = {
        .func           = trace_selftest_test_regs_func,
-       .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS,
+       .flags          = FTRACE_OPS_FL_SAVE_REGS,
 };
 
 static int
@@ -682,7 +720,7 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
        ftrace_enabled = 0;
 
        /* check the trace buffer */
-       ret = trace_test_buffer(&tr->trace_buffer, &count);
+       ret = trace_test_buffer(&tr->array_buffer, &count);
 
        ftrace_enabled = 1;
        trace->reset(tr);
@@ -730,7 +768,7 @@ static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
        if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
                ftrace_graph_stop();
                printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
-               if (ftrace_dump_on_oops) {
+               if (ftrace_dump_on_oops_enabled()) {
                        ftrace_dump(DUMP_ALL);
                        /* ftrace_dump() disables tracing */
                        tracing_on();
@@ -746,6 +784,10 @@ static struct fgraph_ops fgraph_ops __initdata  = {
        .retfunc                = &trace_graph_return,
 };
 
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+static struct ftrace_ops direct;
+#endif
+
 /*
  * Pretty much the same than for the function tracer from which the selftest
  * has been borrowed.
@@ -756,6 +798,7 @@ trace_selftest_startup_function_graph(struct tracer *trace,
 {
        int ret;
        unsigned long count;
+       char *func_name __maybe_unused;
 
 #ifdef CONFIG_DYNAMIC_FTRACE
        if (ftrace_filter_param) {
@@ -768,7 +811,7 @@ trace_selftest_startup_function_graph(struct tracer *trace,
         * Simulate the init() callback but we attach a watchdog callback
         * to detect and recover from possible hangs
         */
-       tracing_reset_online_cpus(&tr->trace_buffer);
+       tracing_reset_online_cpus(&tr->array_buffer);
        set_graph_array(tr);
        ret = register_ftrace_graph(&fgraph_ops);
        if (ret) {
@@ -782,7 +825,7 @@ trace_selftest_startup_function_graph(struct tracer *trace,
 
        /* Have we just recovered from a hang? */
        if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
-               tracing_selftest_disabled = true;
+               disable_tracing_selftest("recovering from a hang");
                ret = -1;
                goto out;
        }
@@ -790,7 +833,7 @@ trace_selftest_startup_function_graph(struct tracer *trace,
        tracing_stop();
 
        /* check the trace buffer */
-       ret = trace_test_buffer(&tr->trace_buffer, &count);
+       ret = trace_test_buffer(&tr->array_buffer, &count);
 
        /* Need to also simulate the tr->reset to remove this fgraph_ops */
        tracing_stop_cmdline_record();
@@ -804,8 +847,72 @@ trace_selftest_startup_function_graph(struct tracer *trace,
                goto out;
        }
 
-       /* Don't test dynamic tracing, the function tracer already did */
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+       /*
+        * These tests can take some time to run. Make sure on non PREEMPT
+        * kernels, we do not trigger the softlockup detector.
+        */
+       cond_resched();
+
+       tracing_reset_online_cpus(&tr->array_buffer);
+       set_graph_array(tr);
 
+       /*
+        * Some archs *cough*PowerPC*cough* add characters to the
+        * start of the function names. We simply put a '*' to
+        * accommodate them.
+        */
+       func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
+       ftrace_set_global_filter(func_name, strlen(func_name), 1);
+
+       /*
+        * Register direct function together with graph tracer
+        * and make sure we get graph trace.
+        */
+       ftrace_set_filter_ip(&direct, (unsigned long)DYN_FTRACE_TEST_NAME, 0, 0);
+       ret = register_ftrace_direct(&direct,
+                                    (unsigned long)ftrace_stub_direct_tramp);
+       if (ret)
+               goto out;
+
+       cond_resched();
+
+       ret = register_ftrace_graph(&fgraph_ops);
+       if (ret) {
+               warn_failed_init_tracer(trace, ret);
+               goto out;
+       }
+
+       DYN_FTRACE_TEST_NAME();
+
+       count = 0;
+
+       tracing_stop();
+       /* check the trace buffer */
+       ret = trace_test_buffer(&tr->array_buffer, &count);
+
+       unregister_ftrace_graph(&fgraph_ops);
+
+       ret = unregister_ftrace_direct(&direct,
+                                      (unsigned long)ftrace_stub_direct_tramp,
+                                      true);
+       if (ret)
+               goto out;
+
+       cond_resched();
+
+       tracing_start();
+
+       if (!ret && !count) {
+               ret = -1;
+               goto out;
+       }
+
+       /* Enable tracing on all functions again */
+       ftrace_set_global_filter(NULL, 0, 1);
+#endif
+
+       /* Don't test dynamic tracing, the function tracer already did */
 out:
        /* Stop it if we failed */
        if (ret)
@@ -848,7 +955,7 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
        /* stop the tracing. */
        tracing_stop();
        /* check both trace buffers */
-       ret = trace_test_buffer(&tr->trace_buffer, NULL);
+       ret = trace_test_buffer(&tr->array_buffer, NULL);
        if (!ret)
                ret = trace_test_buffer(&tr->max_buffer, &count);
        trace->reset(tr);
@@ -874,7 +981,7 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
        int ret;
 
        /*
-        * Now that the big kernel lock is no longer preemptable,
+        * Now that the big kernel lock is no longer preemptible,
         * and this is called with the BKL held, it will always
         * fail. If preemption is already disabled, simply
         * pass the test. When the BKL is removed, or becomes
@@ -910,7 +1017,7 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
        /* stop the tracing. */
        tracing_stop();
        /* check both trace buffers */
-       ret = trace_test_buffer(&tr->trace_buffer, NULL);
+       ret = trace_test_buffer(&tr->array_buffer, NULL);
        if (!ret)
                ret = trace_test_buffer(&tr->max_buffer, &count);
        trace->reset(tr);
@@ -936,7 +1043,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
        int ret;
 
        /*
-        * Now that the big kernel lock is no longer preemptable,
+        * Now that the big kernel lock is no longer preemptible,
         * and this is called with the BKL held, it will always
         * fail. If preemption is already disabled, simply
         * pass the test. When the BKL is removed, or becomes
@@ -976,7 +1083,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
        /* stop the tracing. */
        tracing_stop();
        /* check both trace buffers */
-       ret = trace_test_buffer(&tr->trace_buffer, NULL);
+       ret = trace_test_buffer(&tr->array_buffer, NULL);
        if (ret)
                goto out;
 
@@ -1006,7 +1113,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
        /* stop the tracing. */
        tracing_stop();
        /* check both trace buffers */
-       ret = trace_test_buffer(&tr->trace_buffer, NULL);
+       ret = trace_test_buffer(&tr->array_buffer, NULL);
        if (ret)
                goto out;
 
@@ -1136,7 +1243,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
        /* stop the tracing. */
        tracing_stop();
        /* check both trace buffers */
-       ret = trace_test_buffer(&tr->trace_buffer, NULL);
+       ret = trace_test_buffer(&tr->array_buffer, NULL);
        if (!ret)
                ret = trace_test_buffer(&tr->max_buffer, &count);
 
@@ -1177,7 +1284,7 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
        /* stop the tracing. */
        tracing_stop();
        /* check the trace buffer */
-       ret = trace_test_buffer(&tr->trace_buffer, &count);
+       ret = trace_test_buffer(&tr->array_buffer, &count);
        trace->reset(tr);
        tracing_start();