tracing: Remove mock up poll wait function
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>
Tue, 29 Apr 2014 21:54:37 +0000 (17:54 -0400)
committerSteven Rostedt <rostedt@goodmis.org>
Wed, 30 Apr 2014 12:40:05 +0000 (08:40 -0400)
Now that the ring buffer has a built in way to wake up readers
when there's data, using irq_work such that it is safe to do it
in any context. But it was still using the old "poor man's"
wait polling that checks every 1/10 of a second to see if it
should wake up a waiter. This makes the latency for a wake up
excruciatingly long. No need to do that anymore.

Completely remove the different wait_poll types from the tracers
and have them all use the default one now.

Reported-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_functions.c
kernel/trace/trace_functions_graph.c
kernel/trace/trace_nop.c
kernel/trace/trace_sched_wakeup.c

index e058c6091e4543f737c1645ff63a2f473d6e098e..4c392c8238bf9592bce12114dd794ab27bf83ffa 100644 (file)
@@ -1085,7 +1085,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
 }
 #endif /* CONFIG_TRACER_MAX_TRACE */
 
-static void default_wait_pipe(struct trace_iterator *iter)
+static void wait_on_pipe(struct trace_iterator *iter)
 {
        /* Iterators are static, they should be filled or empty */
        if (trace_buffer_iter(iter, iter->cpu_file))
@@ -1202,8 +1202,6 @@ int register_tracer(struct tracer *type)
        else
                if (!type->flags->opts)
                        type->flags->opts = dummy_tracer_opt;
-       if (!type->wait_pipe)
-               type->wait_pipe = default_wait_pipe;
 
        ret = run_tracer_selftest(type);
        if (ret < 0)
@@ -4207,25 +4205,6 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
        return trace_poll(iter, filp, poll_table);
 }
 
-/*
- * This is a make-shift waitqueue.
- * A tracer might use this callback on some rare cases:
- *
- *  1) the current tracer might hold the runqueue lock when it wakes up
- *     a reader, hence a deadlock (sched, function, and function graph tracers)
- *  2) the function tracers, trace all functions, we don't want
- *     the overhead of calling wake_up and friends
- *     (and tracing them too)
- *
- *     Anyway, this is really very primitive wakeup.
- */
-void poll_wait_pipe(struct trace_iterator *iter)
-{
-       set_current_state(TASK_INTERRUPTIBLE);
-       /* sleep for 100 msecs, and try again. */
-       schedule_timeout(HZ / 10);
-}
-
 /* Must be called with trace_types_lock mutex held. */
 static int tracing_wait_pipe(struct file *filp)
 {
@@ -4251,7 +4230,7 @@ static int tracing_wait_pipe(struct file *filp)
 
                mutex_unlock(&iter->mutex);
 
-               iter->trace->wait_pipe(iter);
+               wait_on_pipe(iter);
 
                mutex_lock(&iter->mutex);
 
@@ -5179,7 +5158,7 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
                                goto out_unlock;
                        }
                        mutex_unlock(&trace_types_lock);
-                       iter->trace->wait_pipe(iter);
+                       wait_on_pipe(iter);
                        mutex_lock(&trace_types_lock);
                        if (signal_pending(current)) {
                                size = -EINTR;
@@ -5390,7 +5369,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
                        goto out;
                }
                mutex_unlock(&trace_types_lock);
-               iter->trace->wait_pipe(iter);
+               wait_on_pipe(iter);
                mutex_lock(&trace_types_lock);
                if (signal_pending(current)) {
                        ret = -EINTR;
index 8624b50414660cc2f43f1f83f0afdb699c8f5c9a..3b3e09e61f338dba18492ea1875005df35b40d58 100644 (file)
@@ -338,7 +338,6 @@ struct tracer_flags {
  * @stop: called when tracing is paused (echo 0 > tracing_enabled)
  * @open: called when the trace file is opened
  * @pipe_open: called when the trace_pipe file is opened
- * @wait_pipe: override how the user waits for traces on trace_pipe
  * @close: called when the trace file is released
  * @pipe_close: called when the trace_pipe file is released
  * @read: override the default read callback on trace_pipe
@@ -357,7 +356,6 @@ struct tracer {
        void                    (*stop)(struct trace_array *tr);
        void                    (*open)(struct trace_iterator *iter);
        void                    (*pipe_open)(struct trace_iterator *iter);
-       void                    (*wait_pipe)(struct trace_iterator *iter);
        void                    (*close)(struct trace_iterator *iter);
        void                    (*pipe_close)(struct trace_iterator *iter);
        ssize_t                 (*read)(struct trace_iterator *iter,
@@ -566,8 +564,6 @@ void trace_init_global_iter(struct trace_iterator *iter);
 
 void tracing_iter_reset(struct trace_iterator *iter, int cpu);
 
-void poll_wait_pipe(struct trace_iterator *iter);
-
 void tracing_sched_switch_trace(struct trace_array *tr,
                                struct task_struct *prev,
                                struct task_struct *next,
index 2d9482b8f26a20d8f1926d83956fbe1302c7af35..57f0ec962d2caee0ebaf3702253e75575fde4ac2 100644 (file)
@@ -252,7 +252,6 @@ static struct tracer function_trace __tracer_data =
        .init           = function_trace_init,
        .reset          = function_trace_reset,
        .start          = function_trace_start,
-       .wait_pipe      = poll_wait_pipe,
        .flags          = &func_flags,
        .set_flag       = func_set_flag,
        .allow_instances = true,
index deff11200261b52d888cd614126b4d4b4eba1de7..b86dd4d8c6a67e3e4dc4fafbdf71a4d2ea4bc3f1 100644 (file)
@@ -1505,7 +1505,6 @@ static struct tracer graph_trace __tracer_data = {
        .pipe_open      = graph_trace_open,
        .close          = graph_trace_close,
        .pipe_close     = graph_trace_close,
-       .wait_pipe      = poll_wait_pipe,
        .init           = graph_trace_init,
        .reset          = graph_trace_reset,
        .print_line     = print_graph_function,
index 69a5cc94c01a361f3a702f651a1efebc91903b30..fcf0a9e489162054cb2cf2c5f85b2eeeb35561bb 100644 (file)
@@ -91,7 +91,6 @@ struct tracer nop_trace __read_mostly =
        .name           = "nop",
        .init           = nop_trace_init,
        .reset          = nop_trace_reset,
-       .wait_pipe      = poll_wait_pipe,
 #ifdef CONFIG_FTRACE_SELFTEST
        .selftest       = trace_selftest_startup_nop,
 #endif
index 1573c03640d23a101ff519692c90d468cdc7fce1..19bd8928ce944da6232c1f63708d723a2563020f 100644 (file)
@@ -705,7 +705,6 @@ static struct tracer wakeup_rt_tracer __read_mostly =
        .reset          = wakeup_tracer_reset,
        .start          = wakeup_tracer_start,
        .stop           = wakeup_tracer_stop,
-       .wait_pipe      = poll_wait_pipe,
        .print_max      = true,
        .print_header   = wakeup_print_header,
        .print_line     = wakeup_print_line,
@@ -728,7 +727,6 @@ static struct tracer wakeup_dl_tracer __read_mostly =
        .reset          = wakeup_tracer_reset,
        .start          = wakeup_tracer_start,
        .stop           = wakeup_tracer_stop,
-       .wait_pipe      = poll_wait_pipe,
        .print_max      = true,
        .print_header   = wakeup_print_header,
        .print_line     = wakeup_print_line,