ftrace,kdb: Extend kdb to be able to dump the ftrace buffer
[sfrench/cifs-2.6.git] / kernel / trace / trace.c
index 95d0b1a28f93848a0ff5beaf667faeb9e4892062..d6736b93dc2aed0a1934165cf945012306a2f516 100644 (file)
@@ -101,10 +101,7 @@ static inline void ftrace_enable_cpu(void)
        preempt_enable();
 }
 
-static cpumask_var_t __read_mostly     tracing_buffer_mask;
-
-#define for_each_tracing_cpu(cpu)      \
-       for_each_cpu(cpu, tracing_buffer_mask)
+cpumask_var_t __read_mostly    tracing_buffer_mask;
 
 /*
  * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
@@ -1539,11 +1536,6 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
 }
 EXPORT_SYMBOL_GPL(trace_vprintk);
 
-enum trace_file_type {
-       TRACE_FILE_LAT_FMT      = 1,
-       TRACE_FILE_ANNOTATE     = 2,
-};
-
 static void trace_iterator_increment(struct trace_iterator *iter)
 {
        /* Don't allow ftrace to trace into the ring buffers */
@@ -1641,7 +1633,7 @@ struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
 }
 
 /* Find the next real entry, and increment the iterator to the next entry */
-static void *find_next_entry_inc(struct trace_iterator *iter)
+void *trace_find_next_entry_inc(struct trace_iterator *iter)
 {
        iter->ent = __find_next_entry(iter, &iter->cpu,
                                      &iter->lost_events, &iter->ts);
@@ -1676,19 +1668,19 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos)
                return NULL;
 
        if (iter->idx < 0)
-               ent = find_next_entry_inc(iter);
+               ent = trace_find_next_entry_inc(iter);
        else
                ent = iter;
 
        while (ent && iter->idx < i)
-               ent = find_next_entry_inc(iter);
+               ent = trace_find_next_entry_inc(iter);
 
        iter->pos = *pos;
 
        return ent;
 }
 
-static void tracing_iter_reset(struct trace_iterator *iter, int cpu)
+void tracing_iter_reset(struct trace_iterator *iter, int cpu)
 {
        struct trace_array *tr = iter->tr;
        struct ring_buffer_event *event;
@@ -2049,7 +2041,7 @@ int trace_empty(struct trace_iterator *iter)
 }
 
 /*  Called with trace_event_read_lock() held. */
-static enum print_line_t print_trace_line(struct trace_iterator *iter)
+enum print_line_t print_trace_line(struct trace_iterator *iter)
 {
        enum print_line_t ret;
 
@@ -3211,7 +3203,7 @@ waitagain:
 
        trace_event_read_lock();
        trace_access_lock(iter->cpu_file);
-       while (find_next_entry_inc(iter) != NULL) {
+       while (trace_find_next_entry_inc(iter) != NULL) {
                enum print_line_t ret;
                int len = iter->seq.len;
 
@@ -3294,7 +3286,7 @@ tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
                if (ret != TRACE_TYPE_NO_CONSUME)
                        trace_consume(iter);
                rem -= count;
-               if (!find_next_entry_inc(iter)) {
+               if (!trace_find_next_entry_inc(iter))   {
                        rem = 0;
                        iter->ent = NULL;
                        break;
@@ -3310,12 +3302,12 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
                                        size_t len,
                                        unsigned int flags)
 {
-       struct page *pages[PIPE_BUFFERS];
-       struct partial_page partial[PIPE_BUFFERS];
+       struct page *pages_def[PIPE_DEF_BUFFERS];
+       struct partial_page partial_def[PIPE_DEF_BUFFERS];
        struct trace_iterator *iter = filp->private_data;
        struct splice_pipe_desc spd = {
-               .pages          = pages,
-               .partial        = partial,
+               .pages          = pages_def,
+               .partial        = partial_def,
                .nr_pages       = 0, /* This gets updated below. */
                .flags          = flags,
                .ops            = &tracing_pipe_buf_ops,
@@ -3326,6 +3318,9 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
        size_t rem;
        unsigned int i;
 
+       if (splice_grow_spd(pipe, &spd))
+               return -ENOMEM;
+
        /* copy the tracer to avoid using a global lock all around */
        mutex_lock(&trace_types_lock);
        if (unlikely(old_tracer != current_trace && current_trace)) {
@@ -3347,7 +3342,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
        if (ret <= 0)
                goto out_err;
 
-       if (!iter->ent && !find_next_entry_inc(iter)) {
+       if (!iter->ent && !trace_find_next_entry_inc(iter)) {
                ret = -EFAULT;
                goto out_err;
        }
@@ -3356,23 +3351,23 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
        trace_access_lock(iter->cpu_file);
 
        /* Fill as many pages as possible. */
-       for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) {
-               pages[i] = alloc_page(GFP_KERNEL);
-               if (!pages[i])
+       for (i = 0, rem = len; i < pipe->buffers && rem; i++) {
+               spd.pages[i] = alloc_page(GFP_KERNEL);
+               if (!spd.pages[i])
                        break;
 
                rem = tracing_fill_pipe_page(rem, iter);
 
                /* Copy the data into the page, so we can start over. */
                ret = trace_seq_to_buffer(&iter->seq,
-                                         page_address(pages[i]),
+                                         page_address(spd.pages[i]),
                                          iter->seq.len);
                if (ret < 0) {
-                       __free_page(pages[i]);
+                       __free_page(spd.pages[i]);
                        break;
                }
-               partial[i].offset = 0;
-               partial[i].len = iter->seq.len;
+               spd.partial[i].offset = 0;
+               spd.partial[i].len = iter->seq.len;
 
                trace_seq_init(&iter->seq);
        }
@@ -3383,12 +3378,14 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
 
        spd.nr_pages = i;
 
-       return splice_to_pipe(pipe, &spd);
+       ret = splice_to_pipe(pipe, &spd);
+out:
+       splice_shrink_spd(pipe, &spd);
+       return ret;
 
 out_err:
        mutex_unlock(&iter->mutex);
-
-       return ret;
+       goto out;
 }
 
 static ssize_t
@@ -3781,11 +3778,11 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
                            unsigned int flags)
 {
        struct ftrace_buffer_info *info = file->private_data;
-       struct partial_page partial[PIPE_BUFFERS];
-       struct page *pages[PIPE_BUFFERS];
+       struct partial_page partial_def[PIPE_DEF_BUFFERS];
+       struct page *pages_def[PIPE_DEF_BUFFERS];
        struct splice_pipe_desc spd = {
-               .pages          = pages,
-               .partial        = partial,
+               .pages          = pages_def,
+               .partial        = partial_def,
                .flags          = flags,
                .ops            = &buffer_pipe_buf_ops,
                .spd_release    = buffer_spd_release,
@@ -3794,22 +3791,28 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
        int entries, size, i;
        size_t ret;
 
+       if (splice_grow_spd(pipe, &spd))
+               return -ENOMEM;
+
        if (*ppos & (PAGE_SIZE - 1)) {
                WARN_ONCE(1, "Ftrace: previous read must page-align\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
 
        if (len & (PAGE_SIZE - 1)) {
                WARN_ONCE(1, "Ftrace: splice_read should page-align\n");
-               if (len < PAGE_SIZE)
-                       return -EINVAL;
+               if (len < PAGE_SIZE) {
+                       ret = -EINVAL;
+                       goto out;
+               }
                len &= PAGE_MASK;
        }
 
        trace_access_lock(info->cpu);
        entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
 
-       for (i = 0; i < PIPE_BUFFERS && len && entries; i++, len -= PAGE_SIZE) {
+       for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
                struct page *page;
                int r;
 
@@ -3864,11 +3867,12 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
                else
                        ret = 0;
                /* TODO: block */
-               return ret;
+               goto out;
        }
 
        ret = splice_to_pipe(pipe, &spd);
-
+       splice_shrink_spd(pipe, &spd);
+out:
        return ret;
 }
 
@@ -4402,7 +4406,7 @@ static struct notifier_block trace_die_notifier = {
  */
 #define KERN_TRACE             KERN_EMERG
 
-static void
+void
 trace_printk_seq(struct trace_seq *s)
 {
        /* Probably should print a warning here. */
@@ -4417,6 +4421,13 @@ trace_printk_seq(struct trace_seq *s)
        trace_seq_init(s);
 }
 
+void trace_init_global_iter(struct trace_iterator *iter)
+{
+       iter->tr = &global_trace;
+       iter->trace = current_trace;
+       iter->cpu_file = TRACE_PIPE_ALL_CPU;
+}
+
 static void
 __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
 {
@@ -4442,8 +4453,10 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
        if (disable_tracing)
                ftrace_kill();
 
+       trace_init_global_iter(&iter);
+
        for_each_tracing_cpu(cpu) {
-               atomic_inc(&global_trace.data[cpu]->disabled);
+               atomic_inc(&iter.tr->data[cpu]->disabled);
        }
 
        old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
@@ -4492,7 +4505,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
                iter.iter_flags |= TRACE_FILE_LAT_FMT;
                iter.pos = -1;
 
-               if (find_next_entry_inc(&iter) != NULL) {
+               if (trace_find_next_entry_inc(&iter) != NULL) {
                        int ret;
 
                        ret = print_trace_line(&iter);
@@ -4514,7 +4527,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
                trace_flags |= old_userobj;
 
                for_each_tracing_cpu(cpu) {
-                       atomic_dec(&global_trace.data[cpu]->disabled);
+                       atomic_dec(&iter.tr->data[cpu]->disabled);
                }
                tracing_on();
        }