Merge branch 'devel' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux...
[sfrench/cifs-2.6.git] / kernel / trace / ring_buffer.c
1 /*
2  * Generic ring buffer
3  *
4  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5  */
6 #include <linux/ring_buffer.h>
7 #include <linux/spinlock.h>
8 #include <linux/debugfs.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/percpu.h>
12 #include <linux/mutex.h>
13 #include <linux/sched.h>        /* used for sched_clock() (for now) */
14 #include <linux/init.h>
15 #include <linux/hash.h>
16 #include <linux/list.h>
17 #include <linux/fs.h>
18
19 #include "trace.h"
20
21 /* Global flag to disable all recording to ring buffers */
22 static int ring_buffers_off __read_mostly;
23
24 /**
25  * tracing_on - enable all tracing buffers
26  *
27  * This function enables all tracing buffers that may have been
28  * disabled with tracing_off.
29  */
30 void tracing_on(void)
31 {
32         ring_buffers_off = 0;
33 }
34
35 /**
36  * tracing_off - turn off all tracing buffers
37  *
38  * This function stops all tracing buffers from recording data.
39  * It does not disable any overhead the tracers themselves may
40  * be causing. This function simply causes all recording to
41  * the ring buffers to fail.
42  */
43 void tracing_off(void)
44 {
45         ring_buffers_off = 1;
46 }
47
48 /* Up this if you want to test the TIME_EXTENTS and normalization */
49 #define DEBUG_SHIFT 0
50
51 /* FIXME!!! */
52 u64 ring_buffer_time_stamp(int cpu)
53 {
54         /* shift to debug/test normalization and TIME_EXTENTS */
55         return sched_clock() << DEBUG_SHIFT;
56 }
57
58 void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
59 {
60         /* Just stupid testing the normalize function and deltas */
61         *ts >>= DEBUG_SHIFT;
62 }
63
64 #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
65 #define RB_ALIGNMENT_SHIFT      2
66 #define RB_ALIGNMENT            (1 << RB_ALIGNMENT_SHIFT)
67 #define RB_MAX_SMALL_DATA       28
68
69 enum {
70         RB_LEN_TIME_EXTEND = 8,
71         RB_LEN_TIME_STAMP = 16,
72 };
73
74 /* inline for ring buffer fast paths */
75 static inline unsigned
76 rb_event_length(struct ring_buffer_event *event)
77 {
78         unsigned length;
79
80         switch (event->type) {
81         case RINGBUF_TYPE_PADDING:
82                 /* undefined */
83                 return -1;
84
85         case RINGBUF_TYPE_TIME_EXTEND:
86                 return RB_LEN_TIME_EXTEND;
87
88         case RINGBUF_TYPE_TIME_STAMP:
89                 return RB_LEN_TIME_STAMP;
90
91         case RINGBUF_TYPE_DATA:
92                 if (event->len)
93                         length = event->len << RB_ALIGNMENT_SHIFT;
94                 else
95                         length = event->array[0];
96                 return length + RB_EVNT_HDR_SIZE;
97         default:
98                 BUG();
99         }
100         /* not hit */
101         return 0;
102 }
103
104 /**
105  * ring_buffer_event_length - return the length of the event
106  * @event: the event to get the length of
107  */
108 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
109 {
110         return rb_event_length(event);
111 }
112
113 /* inline for ring buffer fast paths */
114 static inline void *
115 rb_event_data(struct ring_buffer_event *event)
116 {
117         BUG_ON(event->type != RINGBUF_TYPE_DATA);
118         /* If length is in len field, then array[0] has the data */
119         if (event->len)
120                 return (void *)&event->array[0];
121         /* Otherwise length is in array[0] and array[1] has the data */
122         return (void *)&event->array[1];
123 }
124
125 /**
126  * ring_buffer_event_data - return the data of the event
127  * @event: the event to get the data from
128  */
129 void *ring_buffer_event_data(struct ring_buffer_event *event)
130 {
131         return rb_event_data(event);
132 }
133
134 #define for_each_buffer_cpu(buffer, cpu)                \
135         for_each_cpu_mask(cpu, buffer->cpumask)
136
137 #define TS_SHIFT        27
138 #define TS_MASK         ((1ULL << TS_SHIFT) - 1)
139 #define TS_DELTA_TEST   (~TS_MASK)
140
141 /*
142  * This hack stolen from mm/slob.c.
143  * We can store per page timing information in the page frame of the page.
144  * Thanks to Peter Zijlstra for suggesting this idea.
145  */
146 struct buffer_page {
147         u64              time_stamp;    /* page time stamp */
148         local_t          write;         /* index for next write */
149         local_t          commit;        /* write commited index */
150         unsigned         read;          /* index for next read */
151         struct list_head list;          /* list of free pages */
152         void *page;                     /* Actual data page */
153 };
154
155 /*
156  * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
157  * this issue out.
158  */
159 static inline void free_buffer_page(struct buffer_page *bpage)
160 {
161         if (bpage->page)
162                 free_page((unsigned long)bpage->page);
163         kfree(bpage);
164 }
165
166 /*
167  * We need to fit the time_stamp delta into 27 bits.
168  */
169 static inline int test_time_stamp(u64 delta)
170 {
171         if (delta & TS_DELTA_TEST)
172                 return 1;
173         return 0;
174 }
175
176 #define BUF_PAGE_SIZE PAGE_SIZE
177
178 /*
179  * head_page == tail_page && head == tail then buffer is empty.
180  */
181 struct ring_buffer_per_cpu {
182         int                             cpu;
183         struct ring_buffer              *buffer;
184         spinlock_t                      lock;
185         struct lock_class_key           lock_key;
186         struct list_head                pages;
187         struct buffer_page              *head_page;     /* read from head */
188         struct buffer_page              *tail_page;     /* write to tail */
189         struct buffer_page              *commit_page;   /* commited pages */
190         struct buffer_page              *reader_page;
191         unsigned long                   overrun;
192         unsigned long                   entries;
193         u64                             write_stamp;
194         u64                             read_stamp;
195         atomic_t                        record_disabled;
196 };
197
198 struct ring_buffer {
199         unsigned long                   size;
200         unsigned                        pages;
201         unsigned                        flags;
202         int                             cpus;
203         cpumask_t                       cpumask;
204         atomic_t                        record_disabled;
205
206         struct mutex                    mutex;
207
208         struct ring_buffer_per_cpu      **buffers;
209 };
210
211 struct ring_buffer_iter {
212         struct ring_buffer_per_cpu      *cpu_buffer;
213         unsigned long                   head;
214         struct buffer_page              *head_page;
215         u64                             read_stamp;
216 };
217
218 #define RB_WARN_ON(buffer, cond)                                \
219         do {                                                    \
220                 if (unlikely(cond)) {                           \
221                         atomic_inc(&buffer->record_disabled);   \
222                         WARN_ON(1);                             \
223                 }                                               \
224         } while (0)
225
226 #define RB_WARN_ON_RET(buffer, cond)                            \
227         do {                                                    \
228                 if (unlikely(cond)) {                           \
229                         atomic_inc(&buffer->record_disabled);   \
230                         WARN_ON(1);                             \
231                         return -1;                              \
232                 }                                               \
233         } while (0)
234
235 #define RB_WARN_ON_ONCE(buffer, cond)                           \
236         do {                                                    \
237                 static int once;                                \
238                 if (unlikely(cond) && !once) {                  \
239                         once++;                                 \
240                         atomic_inc(&buffer->record_disabled);   \
241                         WARN_ON(1);                             \
242                 }                                               \
243         } while (0)
244
245 /**
246  * check_pages - integrity check of buffer pages
247  * @cpu_buffer: CPU buffer with pages to test
248  *
249  * As a safty measure we check to make sure the data pages have not
250  * been corrupted.
251  */
252 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
253 {
254         struct list_head *head = &cpu_buffer->pages;
255         struct buffer_page *page, *tmp;
256
257         RB_WARN_ON_RET(cpu_buffer, head->next->prev != head);
258         RB_WARN_ON_RET(cpu_buffer, head->prev->next != head);
259
260         list_for_each_entry_safe(page, tmp, head, list) {
261                 RB_WARN_ON_RET(cpu_buffer,
262                                page->list.next->prev != &page->list);
263                 RB_WARN_ON_RET(cpu_buffer,
264                                page->list.prev->next != &page->list);
265         }
266
267         return 0;
268 }
269
270 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
271                              unsigned nr_pages)
272 {
273         struct list_head *head = &cpu_buffer->pages;
274         struct buffer_page *page, *tmp;
275         unsigned long addr;
276         LIST_HEAD(pages);
277         unsigned i;
278
279         for (i = 0; i < nr_pages; i++) {
280                 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
281                                     GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
282                 if (!page)
283                         goto free_pages;
284                 list_add(&page->list, &pages);
285
286                 addr = __get_free_page(GFP_KERNEL);
287                 if (!addr)
288                         goto free_pages;
289                 page->page = (void *)addr;
290         }
291
292         list_splice(&pages, head);
293
294         rb_check_pages(cpu_buffer);
295
296         return 0;
297
298  free_pages:
299         list_for_each_entry_safe(page, tmp, &pages, list) {
300                 list_del_init(&page->list);
301                 free_buffer_page(page);
302         }
303         return -ENOMEM;
304 }
305
306 static struct ring_buffer_per_cpu *
307 rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
308 {
309         struct ring_buffer_per_cpu *cpu_buffer;
310         struct buffer_page *page;
311         unsigned long addr;
312         int ret;
313
314         cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
315                                   GFP_KERNEL, cpu_to_node(cpu));
316         if (!cpu_buffer)
317                 return NULL;
318
319         cpu_buffer->cpu = cpu;
320         cpu_buffer->buffer = buffer;
321         spin_lock_init(&cpu_buffer->lock);
322         INIT_LIST_HEAD(&cpu_buffer->pages);
323
324         page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
325                             GFP_KERNEL, cpu_to_node(cpu));
326         if (!page)
327                 goto fail_free_buffer;
328
329         cpu_buffer->reader_page = page;
330         addr = __get_free_page(GFP_KERNEL);
331         if (!addr)
332                 goto fail_free_reader;
333         page->page = (void *)addr;
334
335         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
336
337         ret = rb_allocate_pages(cpu_buffer, buffer->pages);
338         if (ret < 0)
339                 goto fail_free_reader;
340
341         cpu_buffer->head_page
342                 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
343         cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
344
345         return cpu_buffer;
346
347  fail_free_reader:
348         free_buffer_page(cpu_buffer->reader_page);
349
350  fail_free_buffer:
351         kfree(cpu_buffer);
352         return NULL;
353 }
354
355 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
356 {
357         struct list_head *head = &cpu_buffer->pages;
358         struct buffer_page *page, *tmp;
359
360         list_del_init(&cpu_buffer->reader_page->list);
361         free_buffer_page(cpu_buffer->reader_page);
362
363         list_for_each_entry_safe(page, tmp, head, list) {
364                 list_del_init(&page->list);
365                 free_buffer_page(page);
366         }
367         kfree(cpu_buffer);
368 }
369
370 /*
371  * Causes compile errors if the struct buffer_page gets bigger
372  * than the struct page.
373  */
374 extern int ring_buffer_page_too_big(void);
375
376 /**
377  * ring_buffer_alloc - allocate a new ring_buffer
378  * @size: the size in bytes that is needed.
379  * @flags: attributes to set for the ring buffer.
380  *
381  * Currently the only flag that is available is the RB_FL_OVERWRITE
382  * flag. This flag means that the buffer will overwrite old data
383  * when the buffer wraps. If this flag is not set, the buffer will
384  * drop data when the tail hits the head.
385  */
386 struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
387 {
388         struct ring_buffer *buffer;
389         int bsize;
390         int cpu;
391
392         /* Paranoid! Optimizes out when all is well */
393         if (sizeof(struct buffer_page) > sizeof(struct page))
394                 ring_buffer_page_too_big();
395
396
397         /* keep it in its own cache line */
398         buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
399                          GFP_KERNEL);
400         if (!buffer)
401                 return NULL;
402
403         buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
404         buffer->flags = flags;
405
406         /* need at least two pages */
407         if (buffer->pages == 1)
408                 buffer->pages++;
409
410         buffer->cpumask = cpu_possible_map;
411         buffer->cpus = nr_cpu_ids;
412
413         bsize = sizeof(void *) * nr_cpu_ids;
414         buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
415                                   GFP_KERNEL);
416         if (!buffer->buffers)
417                 goto fail_free_buffer;
418
419         for_each_buffer_cpu(buffer, cpu) {
420                 buffer->buffers[cpu] =
421                         rb_allocate_cpu_buffer(buffer, cpu);
422                 if (!buffer->buffers[cpu])
423                         goto fail_free_buffers;
424         }
425
426         mutex_init(&buffer->mutex);
427
428         return buffer;
429
430  fail_free_buffers:
431         for_each_buffer_cpu(buffer, cpu) {
432                 if (buffer->buffers[cpu])
433                         rb_free_cpu_buffer(buffer->buffers[cpu]);
434         }
435         kfree(buffer->buffers);
436
437  fail_free_buffer:
438         kfree(buffer);
439         return NULL;
440 }
441
442 /**
443  * ring_buffer_free - free a ring buffer.
444  * @buffer: the buffer to free.
445  */
446 void
447 ring_buffer_free(struct ring_buffer *buffer)
448 {
449         int cpu;
450
451         for_each_buffer_cpu(buffer, cpu)
452                 rb_free_cpu_buffer(buffer->buffers[cpu]);
453
454         kfree(buffer);
455 }
456
457 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
458
459 static void
460 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
461 {
462         struct buffer_page *page;
463         struct list_head *p;
464         unsigned i;
465
466         atomic_inc(&cpu_buffer->record_disabled);
467         synchronize_sched();
468
469         for (i = 0; i < nr_pages; i++) {
470                 BUG_ON(list_empty(&cpu_buffer->pages));
471                 p = cpu_buffer->pages.next;
472                 page = list_entry(p, struct buffer_page, list);
473                 list_del_init(&page->list);
474                 free_buffer_page(page);
475         }
476         BUG_ON(list_empty(&cpu_buffer->pages));
477
478         rb_reset_cpu(cpu_buffer);
479
480         rb_check_pages(cpu_buffer);
481
482         atomic_dec(&cpu_buffer->record_disabled);
483
484 }
485
486 static void
487 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
488                 struct list_head *pages, unsigned nr_pages)
489 {
490         struct buffer_page *page;
491         struct list_head *p;
492         unsigned i;
493
494         atomic_inc(&cpu_buffer->record_disabled);
495         synchronize_sched();
496
497         for (i = 0; i < nr_pages; i++) {
498                 BUG_ON(list_empty(pages));
499                 p = pages->next;
500                 page = list_entry(p, struct buffer_page, list);
501                 list_del_init(&page->list);
502                 list_add_tail(&page->list, &cpu_buffer->pages);
503         }
504         rb_reset_cpu(cpu_buffer);
505
506         rb_check_pages(cpu_buffer);
507
508         atomic_dec(&cpu_buffer->record_disabled);
509 }
510
511 /**
512  * ring_buffer_resize - resize the ring buffer
513  * @buffer: the buffer to resize.
514  * @size: the new size.
515  *
516  * The tracer is responsible for making sure that the buffer is
517  * not being used while changing the size.
518  * Note: We may be able to change the above requirement by using
519  *  RCU synchronizations.
520  *
521  * Minimum size is 2 * BUF_PAGE_SIZE.
522  *
523  * Returns -1 on failure.
524  */
525 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
526 {
527         struct ring_buffer_per_cpu *cpu_buffer;
528         unsigned nr_pages, rm_pages, new_pages;
529         struct buffer_page *page, *tmp;
530         unsigned long buffer_size;
531         unsigned long addr;
532         LIST_HEAD(pages);
533         int i, cpu;
534
535         size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
536         size *= BUF_PAGE_SIZE;
537         buffer_size = buffer->pages * BUF_PAGE_SIZE;
538
539         /* we need a minimum of two pages */
540         if (size < BUF_PAGE_SIZE * 2)
541                 size = BUF_PAGE_SIZE * 2;
542
543         if (size == buffer_size)
544                 return size;
545
546         mutex_lock(&buffer->mutex);
547
548         nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
549
550         if (size < buffer_size) {
551
552                 /* easy case, just free pages */
553                 BUG_ON(nr_pages >= buffer->pages);
554
555                 rm_pages = buffer->pages - nr_pages;
556
557                 for_each_buffer_cpu(buffer, cpu) {
558                         cpu_buffer = buffer->buffers[cpu];
559                         rb_remove_pages(cpu_buffer, rm_pages);
560                 }
561                 goto out;
562         }
563
564         /*
565          * This is a bit more difficult. We only want to add pages
566          * when we can allocate enough for all CPUs. We do this
567          * by allocating all the pages and storing them on a local
568          * link list. If we succeed in our allocation, then we
569          * add these pages to the cpu_buffers. Otherwise we just free
570          * them all and return -ENOMEM;
571          */
572         BUG_ON(nr_pages <= buffer->pages);
573         new_pages = nr_pages - buffer->pages;
574
575         for_each_buffer_cpu(buffer, cpu) {
576                 for (i = 0; i < new_pages; i++) {
577                         page = kzalloc_node(ALIGN(sizeof(*page),
578                                                   cache_line_size()),
579                                             GFP_KERNEL, cpu_to_node(cpu));
580                         if (!page)
581                                 goto free_pages;
582                         list_add(&page->list, &pages);
583                         addr = __get_free_page(GFP_KERNEL);
584                         if (!addr)
585                                 goto free_pages;
586                         page->page = (void *)addr;
587                 }
588         }
589
590         for_each_buffer_cpu(buffer, cpu) {
591                 cpu_buffer = buffer->buffers[cpu];
592                 rb_insert_pages(cpu_buffer, &pages, new_pages);
593         }
594
595         BUG_ON(!list_empty(&pages));
596
597  out:
598         buffer->pages = nr_pages;
599         mutex_unlock(&buffer->mutex);
600
601         return size;
602
603  free_pages:
604         list_for_each_entry_safe(page, tmp, &pages, list) {
605                 list_del_init(&page->list);
606                 free_buffer_page(page);
607         }
608         return -ENOMEM;
609 }
610
611 static inline int rb_null_event(struct ring_buffer_event *event)
612 {
613         return event->type == RINGBUF_TYPE_PADDING;
614 }
615
616 static inline void *__rb_page_index(struct buffer_page *page, unsigned index)
617 {
618         return page->page + index;
619 }
620
621 static inline struct ring_buffer_event *
622 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
623 {
624         return __rb_page_index(cpu_buffer->reader_page,
625                                cpu_buffer->reader_page->read);
626 }
627
628 static inline struct ring_buffer_event *
629 rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
630 {
631         return __rb_page_index(cpu_buffer->head_page,
632                                cpu_buffer->head_page->read);
633 }
634
635 static inline struct ring_buffer_event *
636 rb_iter_head_event(struct ring_buffer_iter *iter)
637 {
638         return __rb_page_index(iter->head_page, iter->head);
639 }
640
641 static inline unsigned rb_page_write(struct buffer_page *bpage)
642 {
643         return local_read(&bpage->write);
644 }
645
646 static inline unsigned rb_page_commit(struct buffer_page *bpage)
647 {
648         return local_read(&bpage->commit);
649 }
650
651 /* Size is determined by what has been commited */
652 static inline unsigned rb_page_size(struct buffer_page *bpage)
653 {
654         return rb_page_commit(bpage);
655 }
656
657 static inline unsigned
658 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
659 {
660         return rb_page_commit(cpu_buffer->commit_page);
661 }
662
663 static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
664 {
665         return rb_page_commit(cpu_buffer->head_page);
666 }
667
668 /*
669  * When the tail hits the head and the buffer is in overwrite mode,
670  * the head jumps to the next page and all content on the previous
671  * page is discarded. But before doing so, we update the overrun
672  * variable of the buffer.
673  */
674 static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
675 {
676         struct ring_buffer_event *event;
677         unsigned long head;
678
679         for (head = 0; head < rb_head_size(cpu_buffer);
680              head += rb_event_length(event)) {
681
682                 event = __rb_page_index(cpu_buffer->head_page, head);
683                 BUG_ON(rb_null_event(event));
684                 /* Only count data entries */
685                 if (event->type != RINGBUF_TYPE_DATA)
686                         continue;
687                 cpu_buffer->overrun++;
688                 cpu_buffer->entries--;
689         }
690 }
691
692 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
693                                struct buffer_page **page)
694 {
695         struct list_head *p = (*page)->list.next;
696
697         if (p == &cpu_buffer->pages)
698                 p = p->next;
699
700         *page = list_entry(p, struct buffer_page, list);
701 }
702
703 static inline unsigned
704 rb_event_index(struct ring_buffer_event *event)
705 {
706         unsigned long addr = (unsigned long)event;
707
708         return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
709 }
710
711 static inline int
712 rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
713              struct ring_buffer_event *event)
714 {
715         unsigned long addr = (unsigned long)event;
716         unsigned long index;
717
718         index = rb_event_index(event);
719         addr &= PAGE_MASK;
720
721         return cpu_buffer->commit_page->page == (void *)addr &&
722                 rb_commit_index(cpu_buffer) == index;
723 }
724
725 static inline void
726 rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
727                     struct ring_buffer_event *event)
728 {
729         unsigned long addr = (unsigned long)event;
730         unsigned long index;
731
732         index = rb_event_index(event);
733         addr &= PAGE_MASK;
734
735         while (cpu_buffer->commit_page->page != (void *)addr) {
736                 RB_WARN_ON(cpu_buffer,
737                            cpu_buffer->commit_page == cpu_buffer->tail_page);
738                 cpu_buffer->commit_page->commit =
739                         cpu_buffer->commit_page->write;
740                 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
741                 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
742         }
743
744         /* Now set the commit to the event's index */
745         local_set(&cpu_buffer->commit_page->commit, index);
746 }
747
748 static inline void
749 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
750 {
751         /*
752          * We only race with interrupts and NMIs on this CPU.
753          * If we own the commit event, then we can commit
754          * all others that interrupted us, since the interruptions
755          * are in stack format (they finish before they come
756          * back to us). This allows us to do a simple loop to
757          * assign the commit to the tail.
758          */
759         while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
760                 cpu_buffer->commit_page->commit =
761                         cpu_buffer->commit_page->write;
762                 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
763                 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
764                 /* add barrier to keep gcc from optimizing too much */
765                 barrier();
766         }
767         while (rb_commit_index(cpu_buffer) !=
768                rb_page_write(cpu_buffer->commit_page)) {
769                 cpu_buffer->commit_page->commit =
770                         cpu_buffer->commit_page->write;
771                 barrier();
772         }
773 }
774
775 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
776 {
777         cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp;
778         cpu_buffer->reader_page->read = 0;
779 }
780
781 static inline void rb_inc_iter(struct ring_buffer_iter *iter)
782 {
783         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
784
785         /*
786          * The iterator could be on the reader page (it starts there).
787          * But the head could have moved, since the reader was
788          * found. Check for this case and assign the iterator
789          * to the head page instead of next.
790          */
791         if (iter->head_page == cpu_buffer->reader_page)
792                 iter->head_page = cpu_buffer->head_page;
793         else
794                 rb_inc_page(cpu_buffer, &iter->head_page);
795
796         iter->read_stamp = iter->head_page->time_stamp;
797         iter->head = 0;
798 }
799
800 /**
801  * ring_buffer_update_event - update event type and data
802  * @event: the even to update
803  * @type: the type of event
804  * @length: the size of the event field in the ring buffer
805  *
806  * Update the type and data fields of the event. The length
807  * is the actual size that is written to the ring buffer,
808  * and with this, we can determine what to place into the
809  * data field.
810  */
811 static inline void
812 rb_update_event(struct ring_buffer_event *event,
813                          unsigned type, unsigned length)
814 {
815         event->type = type;
816
817         switch (type) {
818
819         case RINGBUF_TYPE_PADDING:
820                 break;
821
822         case RINGBUF_TYPE_TIME_EXTEND:
823                 event->len =
824                         (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1))
825                         >> RB_ALIGNMENT_SHIFT;
826                 break;
827
828         case RINGBUF_TYPE_TIME_STAMP:
829                 event->len =
830                         (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1))
831                         >> RB_ALIGNMENT_SHIFT;
832                 break;
833
834         case RINGBUF_TYPE_DATA:
835                 length -= RB_EVNT_HDR_SIZE;
836                 if (length > RB_MAX_SMALL_DATA) {
837                         event->len = 0;
838                         event->array[0] = length;
839                 } else
840                         event->len =
841                                 (length + (RB_ALIGNMENT-1))
842                                 >> RB_ALIGNMENT_SHIFT;
843                 break;
844         default:
845                 BUG();
846         }
847 }
848
849 static inline unsigned rb_calculate_event_length(unsigned length)
850 {
851         struct ring_buffer_event event; /* Used only for sizeof array */
852
853         /* zero length can cause confusions */
854         if (!length)
855                 length = 1;
856
857         if (length > RB_MAX_SMALL_DATA)
858                 length += sizeof(event.array[0]);
859
860         length += RB_EVNT_HDR_SIZE;
861         length = ALIGN(length, RB_ALIGNMENT);
862
863         return length;
864 }
865
866 static struct ring_buffer_event *
867 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
868                   unsigned type, unsigned long length, u64 *ts)
869 {
870         struct buffer_page *tail_page, *head_page, *reader_page;
871         unsigned long tail, write;
872         struct ring_buffer *buffer = cpu_buffer->buffer;
873         struct ring_buffer_event *event;
874         unsigned long flags;
875
876         tail_page = cpu_buffer->tail_page;
877         write = local_add_return(length, &tail_page->write);
878         tail = write - length;
879
880         /* See if we shot pass the end of this buffer page */
881         if (write > BUF_PAGE_SIZE) {
882                 struct buffer_page *next_page = tail_page;
883
884                 spin_lock_irqsave(&cpu_buffer->lock, flags);
885
886                 rb_inc_page(cpu_buffer, &next_page);
887
888                 head_page = cpu_buffer->head_page;
889                 reader_page = cpu_buffer->reader_page;
890
891                 /* we grabbed the lock before incrementing */
892                 RB_WARN_ON(cpu_buffer, next_page == reader_page);
893
894                 /*
895                  * If for some reason, we had an interrupt storm that made
896                  * it all the way around the buffer, bail, and warn
897                  * about it.
898                  */
899                 if (unlikely(next_page == cpu_buffer->commit_page)) {
900                         WARN_ON_ONCE(1);
901                         goto out_unlock;
902                 }
903
904                 if (next_page == head_page) {
905                         if (!(buffer->flags & RB_FL_OVERWRITE)) {
906                                 /* reset write */
907                                 if (tail <= BUF_PAGE_SIZE)
908                                         local_set(&tail_page->write, tail);
909                                 goto out_unlock;
910                         }
911
912                         /* tail_page has not moved yet? */
913                         if (tail_page == cpu_buffer->tail_page) {
914                                 /* count overflows */
915                                 rb_update_overflow(cpu_buffer);
916
917                                 rb_inc_page(cpu_buffer, &head_page);
918                                 cpu_buffer->head_page = head_page;
919                                 cpu_buffer->head_page->read = 0;
920                         }
921                 }
922
923                 /*
924                  * If the tail page is still the same as what we think
925                  * it is, then it is up to us to update the tail
926                  * pointer.
927                  */
928                 if (tail_page == cpu_buffer->tail_page) {
929                         local_set(&next_page->write, 0);
930                         local_set(&next_page->commit, 0);
931                         cpu_buffer->tail_page = next_page;
932
933                         /* reread the time stamp */
934                         *ts = ring_buffer_time_stamp(cpu_buffer->cpu);
935                         cpu_buffer->tail_page->time_stamp = *ts;
936                 }
937
938                 /*
939                  * The actual tail page has moved forward.
940                  */
941                 if (tail < BUF_PAGE_SIZE) {
942                         /* Mark the rest of the page with padding */
943                         event = __rb_page_index(tail_page, tail);
944                         event->type = RINGBUF_TYPE_PADDING;
945                 }
946
947                 if (tail <= BUF_PAGE_SIZE)
948                         /* Set the write back to the previous setting */
949                         local_set(&tail_page->write, tail);
950
951                 /*
952                  * If this was a commit entry that failed,
953                  * increment that too
954                  */
955                 if (tail_page == cpu_buffer->commit_page &&
956                     tail == rb_commit_index(cpu_buffer)) {
957                         rb_set_commit_to_write(cpu_buffer);
958                 }
959
960                 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
961
962                 /* fail and let the caller try again */
963                 return ERR_PTR(-EAGAIN);
964         }
965
966         /* We reserved something on the buffer */
967
968         BUG_ON(write > BUF_PAGE_SIZE);
969
970         event = __rb_page_index(tail_page, tail);
971         rb_update_event(event, type, length);
972
973         /*
974          * If this is a commit and the tail is zero, then update
975          * this page's time stamp.
976          */
977         if (!tail && rb_is_commit(cpu_buffer, event))
978                 cpu_buffer->commit_page->time_stamp = *ts;
979
980         return event;
981
982  out_unlock:
983         spin_unlock_irqrestore(&cpu_buffer->lock, flags);
984         return NULL;
985 }
986
987 static int
988 rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
989                   u64 *ts, u64 *delta)
990 {
991         struct ring_buffer_event *event;
992         static int once;
993         int ret;
994
995         if (unlikely(*delta > (1ULL << 59) && !once++)) {
996                 printk(KERN_WARNING "Delta way too big! %llu"
997                        " ts=%llu write stamp = %llu\n",
998                        (unsigned long long)*delta,
999                        (unsigned long long)*ts,
1000                        (unsigned long long)cpu_buffer->write_stamp);
1001                 WARN_ON(1);
1002         }
1003
1004         /*
1005          * The delta is too big, we to add a
1006          * new timestamp.
1007          */
1008         event = __rb_reserve_next(cpu_buffer,
1009                                   RINGBUF_TYPE_TIME_EXTEND,
1010                                   RB_LEN_TIME_EXTEND,
1011                                   ts);
1012         if (!event)
1013                 return -EBUSY;
1014
1015         if (PTR_ERR(event) == -EAGAIN)
1016                 return -EAGAIN;
1017
1018         /* Only a commited time event can update the write stamp */
1019         if (rb_is_commit(cpu_buffer, event)) {
1020                 /*
1021                  * If this is the first on the page, then we need to
1022                  * update the page itself, and just put in a zero.
1023                  */
1024                 if (rb_event_index(event)) {
1025                         event->time_delta = *delta & TS_MASK;
1026                         event->array[0] = *delta >> TS_SHIFT;
1027                 } else {
1028                         cpu_buffer->commit_page->time_stamp = *ts;
1029                         event->time_delta = 0;
1030                         event->array[0] = 0;
1031                 }
1032                 cpu_buffer->write_stamp = *ts;
1033                 /* let the caller know this was the commit */
1034                 ret = 1;
1035         } else {
1036                 /* Darn, this is just wasted space */
1037                 event->time_delta = 0;
1038                 event->array[0] = 0;
1039                 ret = 0;
1040         }
1041
1042         *delta = 0;
1043
1044         return ret;
1045 }
1046
1047 static struct ring_buffer_event *
1048 rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1049                       unsigned type, unsigned long length)
1050 {
1051         struct ring_buffer_event *event;
1052         u64 ts, delta;
1053         int commit = 0;
1054         int nr_loops = 0;
1055
1056  again:
1057         /*
1058          * We allow for interrupts to reenter here and do a trace.
1059          * If one does, it will cause this original code to loop
1060          * back here. Even with heavy interrupts happening, this
1061          * should only happen a few times in a row. If this happens
1062          * 1000 times in a row, there must be either an interrupt
1063          * storm or we have something buggy.
1064          * Bail!
1065          */
1066         if (unlikely(++nr_loops > 1000)) {
1067                 RB_WARN_ON(cpu_buffer, 1);
1068                 return NULL;
1069         }
1070
1071         ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1072
1073         /*
1074          * Only the first commit can update the timestamp.
1075          * Yes there is a race here. If an interrupt comes in
1076          * just after the conditional and it traces too, then it
1077          * will also check the deltas. More than one timestamp may
1078          * also be made. But only the entry that did the actual
1079          * commit will be something other than zero.
1080          */
1081         if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
1082             rb_page_write(cpu_buffer->tail_page) ==
1083             rb_commit_index(cpu_buffer)) {
1084
1085                 delta = ts - cpu_buffer->write_stamp;
1086
1087                 /* make sure this delta is calculated here */
1088                 barrier();
1089
1090                 /* Did the write stamp get updated already? */
1091                 if (unlikely(ts < cpu_buffer->write_stamp))
1092                         delta = 0;
1093
1094                 if (test_time_stamp(delta)) {
1095
1096                         commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1097
1098                         if (commit == -EBUSY)
1099                                 return NULL;
1100
1101                         if (commit == -EAGAIN)
1102                                 goto again;
1103
1104                         RB_WARN_ON(cpu_buffer, commit < 0);
1105                 }
1106         } else
1107                 /* Non commits have zero deltas */
1108                 delta = 0;
1109
1110         event = __rb_reserve_next(cpu_buffer, type, length, &ts);
1111         if (PTR_ERR(event) == -EAGAIN)
1112                 goto again;
1113
1114         if (!event) {
1115                 if (unlikely(commit))
1116                         /*
1117                          * Ouch! We needed a timestamp and it was commited. But
1118                          * we didn't get our event reserved.
1119                          */
1120                         rb_set_commit_to_write(cpu_buffer);
1121                 return NULL;
1122         }
1123
1124         /*
1125          * If the timestamp was commited, make the commit our entry
1126          * now so that we will update it when needed.
1127          */
1128         if (commit)
1129                 rb_set_commit_event(cpu_buffer, event);
1130         else if (!rb_is_commit(cpu_buffer, event))
1131                 delta = 0;
1132
1133         event->time_delta = delta;
1134
1135         return event;
1136 }
1137
1138 static DEFINE_PER_CPU(int, rb_need_resched);
1139
1140 /**
1141  * ring_buffer_lock_reserve - reserve a part of the buffer
1142  * @buffer: the ring buffer to reserve from
1143  * @length: the length of the data to reserve (excluding event header)
1144  * @flags: a pointer to save the interrupt flags
1145  *
1146  * Returns a reseverd event on the ring buffer to copy directly to.
1147  * The user of this interface will need to get the body to write into
1148  * and can use the ring_buffer_event_data() interface.
1149  *
1150  * The length is the length of the data needed, not the event length
1151  * which also includes the event header.
1152  *
1153  * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1154  * If NULL is returned, then nothing has been allocated or locked.
1155  */
1156 struct ring_buffer_event *
1157 ring_buffer_lock_reserve(struct ring_buffer *buffer,
1158                          unsigned long length,
1159                          unsigned long *flags)
1160 {
1161         struct ring_buffer_per_cpu *cpu_buffer;
1162         struct ring_buffer_event *event;
1163         int cpu, resched;
1164
1165         if (ring_buffers_off)
1166                 return NULL;
1167
1168         if (atomic_read(&buffer->record_disabled))
1169                 return NULL;
1170
1171         /* If we are tracing schedule, we don't want to recurse */
1172         resched = need_resched();
1173         preempt_disable_notrace();
1174
1175         cpu = raw_smp_processor_id();
1176
1177         if (!cpu_isset(cpu, buffer->cpumask))
1178                 goto out;
1179
1180         cpu_buffer = buffer->buffers[cpu];
1181
1182         if (atomic_read(&cpu_buffer->record_disabled))
1183                 goto out;
1184
1185         length = rb_calculate_event_length(length);
1186         if (length > BUF_PAGE_SIZE)
1187                 goto out;
1188
1189         event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
1190         if (!event)
1191                 goto out;
1192
1193         /*
1194          * Need to store resched state on this cpu.
1195          * Only the first needs to.
1196          */
1197
1198         if (preempt_count() == 1)
1199                 per_cpu(rb_need_resched, cpu) = resched;
1200
1201         return event;
1202
1203  out:
1204         if (resched)
1205                 preempt_enable_notrace();
1206         else
1207                 preempt_enable_notrace();
1208         return NULL;
1209 }
1210
1211 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1212                       struct ring_buffer_event *event)
1213 {
1214         cpu_buffer->entries++;
1215
1216         /* Only process further if we own the commit */
1217         if (!rb_is_commit(cpu_buffer, event))
1218                 return;
1219
1220         cpu_buffer->write_stamp += event->time_delta;
1221
1222         rb_set_commit_to_write(cpu_buffer);
1223 }
1224
1225 /**
1226  * ring_buffer_unlock_commit - commit a reserved
1227  * @buffer: The buffer to commit to
1228  * @event: The event pointer to commit.
1229  * @flags: the interrupt flags received from ring_buffer_lock_reserve.
1230  *
1231  * This commits the data to the ring buffer, and releases any locks held.
1232  *
1233  * Must be paired with ring_buffer_lock_reserve.
1234  */
1235 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1236                               struct ring_buffer_event *event,
1237                               unsigned long flags)
1238 {
1239         struct ring_buffer_per_cpu *cpu_buffer;
1240         int cpu = raw_smp_processor_id();
1241
1242         cpu_buffer = buffer->buffers[cpu];
1243
1244         rb_commit(cpu_buffer, event);
1245
1246         /*
1247          * Only the last preempt count needs to restore preemption.
1248          */
1249         if (preempt_count() == 1) {
1250                 if (per_cpu(rb_need_resched, cpu))
1251                         preempt_enable_no_resched_notrace();
1252                 else
1253                         preempt_enable_notrace();
1254         } else
1255                 preempt_enable_no_resched_notrace();
1256
1257         return 0;
1258 }
1259
1260 /**
1261  * ring_buffer_write - write data to the buffer without reserving
1262  * @buffer: The ring buffer to write to.
1263  * @length: The length of the data being written (excluding the event header)
1264  * @data: The data to write to the buffer.
1265  *
1266  * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1267  * one function. If you already have the data to write to the buffer, it
1268  * may be easier to simply call this function.
1269  *
1270  * Note, like ring_buffer_lock_reserve, the length is the length of the data
1271  * and not the length of the event which would hold the header.
1272  */
1273 int ring_buffer_write(struct ring_buffer *buffer,
1274                         unsigned long length,
1275                         void *data)
1276 {
1277         struct ring_buffer_per_cpu *cpu_buffer;
1278         struct ring_buffer_event *event;
1279         unsigned long event_length;
1280         void *body;
1281         int ret = -EBUSY;
1282         int cpu, resched;
1283
1284         if (ring_buffers_off)
1285                 return -EBUSY;
1286
1287         if (atomic_read(&buffer->record_disabled))
1288                 return -EBUSY;
1289
1290         resched = need_resched();
1291         preempt_disable_notrace();
1292
1293         cpu = raw_smp_processor_id();
1294
1295         if (!cpu_isset(cpu, buffer->cpumask))
1296                 goto out;
1297
1298         cpu_buffer = buffer->buffers[cpu];
1299
1300         if (atomic_read(&cpu_buffer->record_disabled))
1301                 goto out;
1302
1303         event_length = rb_calculate_event_length(length);
1304         event = rb_reserve_next_event(cpu_buffer,
1305                                       RINGBUF_TYPE_DATA, event_length);
1306         if (!event)
1307                 goto out;
1308
1309         body = rb_event_data(event);
1310
1311         memcpy(body, data, length);
1312
1313         rb_commit(cpu_buffer, event);
1314
1315         ret = 0;
1316  out:
1317         if (resched)
1318                 preempt_enable_no_resched_notrace();
1319         else
1320                 preempt_enable_notrace();
1321
1322         return ret;
1323 }
1324
1325 static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1326 {
1327         struct buffer_page *reader = cpu_buffer->reader_page;
1328         struct buffer_page *head = cpu_buffer->head_page;
1329         struct buffer_page *commit = cpu_buffer->commit_page;
1330
1331         return reader->read == rb_page_commit(reader) &&
1332                 (commit == reader ||
1333                  (commit == head &&
1334                   head->read == rb_page_commit(commit)));
1335 }
1336
1337 /**
1338  * ring_buffer_record_disable - stop all writes into the buffer
1339  * @buffer: The ring buffer to stop writes to.
1340  *
1341  * This prevents all writes to the buffer. Any attempt to write
1342  * to the buffer after this will fail and return NULL.
1343  *
1344  * The caller should call synchronize_sched() after this.
1345  */
1346 void ring_buffer_record_disable(struct ring_buffer *buffer)
1347 {
1348         atomic_inc(&buffer->record_disabled);
1349 }
1350
1351 /**
1352  * ring_buffer_record_enable - enable writes to the buffer
1353  * @buffer: The ring buffer to enable writes
1354  *
1355  * Note, multiple disables will need the same number of enables
1356  * to truely enable the writing (much like preempt_disable).
1357  */
1358 void ring_buffer_record_enable(struct ring_buffer *buffer)
1359 {
1360         atomic_dec(&buffer->record_disabled);
1361 }
1362
1363 /**
1364  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1365  * @buffer: The ring buffer to stop writes to.
1366  * @cpu: The CPU buffer to stop
1367  *
1368  * This prevents all writes to the buffer. Any attempt to write
1369  * to the buffer after this will fail and return NULL.
1370  *
1371  * The caller should call synchronize_sched() after this.
1372  */
1373 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1374 {
1375         struct ring_buffer_per_cpu *cpu_buffer;
1376
1377         if (!cpu_isset(cpu, buffer->cpumask))
1378                 return;
1379
1380         cpu_buffer = buffer->buffers[cpu];
1381         atomic_inc(&cpu_buffer->record_disabled);
1382 }
1383
1384 /**
1385  * ring_buffer_record_enable_cpu - enable writes to the buffer
1386  * @buffer: The ring buffer to enable writes
1387  * @cpu: The CPU to enable.
1388  *
1389  * Note, multiple disables will need the same number of enables
1390  * to truely enable the writing (much like preempt_disable).
1391  */
1392 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1393 {
1394         struct ring_buffer_per_cpu *cpu_buffer;
1395
1396         if (!cpu_isset(cpu, buffer->cpumask))
1397                 return;
1398
1399         cpu_buffer = buffer->buffers[cpu];
1400         atomic_dec(&cpu_buffer->record_disabled);
1401 }
1402
1403 /**
1404  * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1405  * @buffer: The ring buffer
1406  * @cpu: The per CPU buffer to get the entries from.
1407  */
1408 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1409 {
1410         struct ring_buffer_per_cpu *cpu_buffer;
1411
1412         if (!cpu_isset(cpu, buffer->cpumask))
1413                 return 0;
1414
1415         cpu_buffer = buffer->buffers[cpu];
1416         return cpu_buffer->entries;
1417 }
1418
1419 /**
1420  * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1421  * @buffer: The ring buffer
1422  * @cpu: The per CPU buffer to get the number of overruns from
1423  */
1424 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1425 {
1426         struct ring_buffer_per_cpu *cpu_buffer;
1427
1428         if (!cpu_isset(cpu, buffer->cpumask))
1429                 return 0;
1430
1431         cpu_buffer = buffer->buffers[cpu];
1432         return cpu_buffer->overrun;
1433 }
1434
1435 /**
1436  * ring_buffer_entries - get the number of entries in a buffer
1437  * @buffer: The ring buffer
1438  *
1439  * Returns the total number of entries in the ring buffer
1440  * (all CPU entries)
1441  */
1442 unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1443 {
1444         struct ring_buffer_per_cpu *cpu_buffer;
1445         unsigned long entries = 0;
1446         int cpu;
1447
1448         /* if you care about this being correct, lock the buffer */
1449         for_each_buffer_cpu(buffer, cpu) {
1450                 cpu_buffer = buffer->buffers[cpu];
1451                 entries += cpu_buffer->entries;
1452         }
1453
1454         return entries;
1455 }
1456
1457 /**
1458  * ring_buffer_overrun_cpu - get the number of overruns in buffer
1459  * @buffer: The ring buffer
1460  *
1461  * Returns the total number of overruns in the ring buffer
1462  * (all CPU entries)
1463  */
1464 unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1465 {
1466         struct ring_buffer_per_cpu *cpu_buffer;
1467         unsigned long overruns = 0;
1468         int cpu;
1469
1470         /* if you care about this being correct, lock the buffer */
1471         for_each_buffer_cpu(buffer, cpu) {
1472                 cpu_buffer = buffer->buffers[cpu];
1473                 overruns += cpu_buffer->overrun;
1474         }
1475
1476         return overruns;
1477 }
1478
1479 /**
1480  * ring_buffer_iter_reset - reset an iterator
1481  * @iter: The iterator to reset
1482  *
1483  * Resets the iterator, so that it will start from the beginning
1484  * again.
1485  */
1486 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1487 {
1488         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1489
1490         /* Iterator usage is expected to have record disabled */
1491         if (list_empty(&cpu_buffer->reader_page->list)) {
1492                 iter->head_page = cpu_buffer->head_page;
1493                 iter->head = cpu_buffer->head_page->read;
1494         } else {
1495                 iter->head_page = cpu_buffer->reader_page;
1496                 iter->head = cpu_buffer->reader_page->read;
1497         }
1498         if (iter->head)
1499                 iter->read_stamp = cpu_buffer->read_stamp;
1500         else
1501                 iter->read_stamp = iter->head_page->time_stamp;
1502 }
1503
1504 /**
1505  * ring_buffer_iter_empty - check if an iterator has no more to read
1506  * @iter: The iterator to check
1507  */
1508 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1509 {
1510         struct ring_buffer_per_cpu *cpu_buffer;
1511
1512         cpu_buffer = iter->cpu_buffer;
1513
1514         return iter->head_page == cpu_buffer->commit_page &&
1515                 iter->head == rb_commit_index(cpu_buffer);
1516 }
1517
1518 static void
1519 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1520                      struct ring_buffer_event *event)
1521 {
1522         u64 delta;
1523
1524         switch (event->type) {
1525         case RINGBUF_TYPE_PADDING:
1526                 return;
1527
1528         case RINGBUF_TYPE_TIME_EXTEND:
1529                 delta = event->array[0];
1530                 delta <<= TS_SHIFT;
1531                 delta += event->time_delta;
1532                 cpu_buffer->read_stamp += delta;
1533                 return;
1534
1535         case RINGBUF_TYPE_TIME_STAMP:
1536                 /* FIXME: not implemented */
1537                 return;
1538
1539         case RINGBUF_TYPE_DATA:
1540                 cpu_buffer->read_stamp += event->time_delta;
1541                 return;
1542
1543         default:
1544                 BUG();
1545         }
1546         return;
1547 }
1548
1549 static void
1550 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
1551                           struct ring_buffer_event *event)
1552 {
1553         u64 delta;
1554
1555         switch (event->type) {
1556         case RINGBUF_TYPE_PADDING:
1557                 return;
1558
1559         case RINGBUF_TYPE_TIME_EXTEND:
1560                 delta = event->array[0];
1561                 delta <<= TS_SHIFT;
1562                 delta += event->time_delta;
1563                 iter->read_stamp += delta;
1564                 return;
1565
1566         case RINGBUF_TYPE_TIME_STAMP:
1567                 /* FIXME: not implemented */
1568                 return;
1569
1570         case RINGBUF_TYPE_DATA:
1571                 iter->read_stamp += event->time_delta;
1572                 return;
1573
1574         default:
1575                 BUG();
1576         }
1577         return;
1578 }
1579
1580 static struct buffer_page *
1581 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1582 {
1583         struct buffer_page *reader = NULL;
1584         unsigned long flags;
1585         int nr_loops = 0;
1586
1587         spin_lock_irqsave(&cpu_buffer->lock, flags);
1588
1589  again:
1590         /*
1591          * This should normally only loop twice. But because the
1592          * start of the reader inserts an empty page, it causes
1593          * a case where we will loop three times. There should be no
1594          * reason to loop four times (that I know of).
1595          */
1596         if (unlikely(++nr_loops > 3)) {
1597                 RB_WARN_ON(cpu_buffer, 1);
1598                 reader = NULL;
1599                 goto out;
1600         }
1601
1602         reader = cpu_buffer->reader_page;
1603
1604         /* If there's more to read, return this page */
1605         if (cpu_buffer->reader_page->read < rb_page_size(reader))
1606                 goto out;
1607
1608         /* Never should we have an index greater than the size */
1609         RB_WARN_ON(cpu_buffer,
1610                    cpu_buffer->reader_page->read > rb_page_size(reader));
1611
1612         /* check if we caught up to the tail */
1613         reader = NULL;
1614         if (cpu_buffer->commit_page == cpu_buffer->reader_page)
1615                 goto out;
1616
1617         /*
1618          * Splice the empty reader page into the list around the head.
1619          * Reset the reader page to size zero.
1620          */
1621
1622         reader = cpu_buffer->head_page;
1623         cpu_buffer->reader_page->list.next = reader->list.next;
1624         cpu_buffer->reader_page->list.prev = reader->list.prev;
1625
1626         local_set(&cpu_buffer->reader_page->write, 0);
1627         local_set(&cpu_buffer->reader_page->commit, 0);
1628
1629         /* Make the reader page now replace the head */
1630         reader->list.prev->next = &cpu_buffer->reader_page->list;
1631         reader->list.next->prev = &cpu_buffer->reader_page->list;
1632
1633         /*
1634          * If the tail is on the reader, then we must set the head
1635          * to the inserted page, otherwise we set it one before.
1636          */
1637         cpu_buffer->head_page = cpu_buffer->reader_page;
1638
1639         if (cpu_buffer->commit_page != reader)
1640                 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
1641
1642         /* Finally update the reader page to the new head */
1643         cpu_buffer->reader_page = reader;
1644         rb_reset_reader_page(cpu_buffer);
1645
1646         goto again;
1647
1648  out:
1649         spin_unlock_irqrestore(&cpu_buffer->lock, flags);
1650
1651         return reader;
1652 }
1653
1654 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1655 {
1656         struct ring_buffer_event *event;
1657         struct buffer_page *reader;
1658         unsigned length;
1659
1660         reader = rb_get_reader_page(cpu_buffer);
1661
1662         /* This function should not be called when buffer is empty */
1663         BUG_ON(!reader);
1664
1665         event = rb_reader_event(cpu_buffer);
1666
1667         if (event->type == RINGBUF_TYPE_DATA)
1668                 cpu_buffer->entries--;
1669
1670         rb_update_read_stamp(cpu_buffer, event);
1671
1672         length = rb_event_length(event);
1673         cpu_buffer->reader_page->read += length;
1674 }
1675
1676 static void rb_advance_iter(struct ring_buffer_iter *iter)
1677 {
1678         struct ring_buffer *buffer;
1679         struct ring_buffer_per_cpu *cpu_buffer;
1680         struct ring_buffer_event *event;
1681         unsigned length;
1682
1683         cpu_buffer = iter->cpu_buffer;
1684         buffer = cpu_buffer->buffer;
1685
1686         /*
1687          * Check if we are at the end of the buffer.
1688          */
1689         if (iter->head >= rb_page_size(iter->head_page)) {
1690                 BUG_ON(iter->head_page == cpu_buffer->commit_page);
1691                 rb_inc_iter(iter);
1692                 return;
1693         }
1694
1695         event = rb_iter_head_event(iter);
1696
1697         length = rb_event_length(event);
1698
1699         /*
1700          * This should not be called to advance the header if we are
1701          * at the tail of the buffer.
1702          */
1703         BUG_ON((iter->head_page == cpu_buffer->commit_page) &&
1704                (iter->head + length > rb_commit_index(cpu_buffer)));
1705
1706         rb_update_iter_read_stamp(iter, event);
1707
1708         iter->head += length;
1709
1710         /* check for end of page padding */
1711         if ((iter->head >= rb_page_size(iter->head_page)) &&
1712             (iter->head_page != cpu_buffer->commit_page))
1713                 rb_advance_iter(iter);
1714 }
1715
1716 /**
1717  * ring_buffer_peek - peek at the next event to be read
1718  * @buffer: The ring buffer to read
1719  * @cpu: The cpu to peak at
1720  * @ts: The timestamp counter of this event.
1721  *
1722  * This will return the event that will be read next, but does
1723  * not consume the data.
1724  */
1725 struct ring_buffer_event *
1726 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1727 {
1728         struct ring_buffer_per_cpu *cpu_buffer;
1729         struct ring_buffer_event *event;
1730         struct buffer_page *reader;
1731         int nr_loops = 0;
1732
1733         if (!cpu_isset(cpu, buffer->cpumask))
1734                 return NULL;
1735
1736         cpu_buffer = buffer->buffers[cpu];
1737
1738  again:
1739         /*
1740          * We repeat when a timestamp is encountered. It is possible
1741          * to get multiple timestamps from an interrupt entering just
1742          * as one timestamp is about to be written. The max times
1743          * that this can happen is the number of nested interrupts we
1744          * can have.  Nesting 10 deep of interrupts is clearly
1745          * an anomaly.
1746          */
1747         if (unlikely(++nr_loops > 10)) {
1748                 RB_WARN_ON(cpu_buffer, 1);
1749                 return NULL;
1750         }
1751
1752         reader = rb_get_reader_page(cpu_buffer);
1753         if (!reader)
1754                 return NULL;
1755
1756         event = rb_reader_event(cpu_buffer);
1757
1758         switch (event->type) {
1759         case RINGBUF_TYPE_PADDING:
1760                 RB_WARN_ON(cpu_buffer, 1);
1761                 rb_advance_reader(cpu_buffer);
1762                 return NULL;
1763
1764         case RINGBUF_TYPE_TIME_EXTEND:
1765                 /* Internal data, OK to advance */
1766                 rb_advance_reader(cpu_buffer);
1767                 goto again;
1768
1769         case RINGBUF_TYPE_TIME_STAMP:
1770                 /* FIXME: not implemented */
1771                 rb_advance_reader(cpu_buffer);
1772                 goto again;
1773
1774         case RINGBUF_TYPE_DATA:
1775                 if (ts) {
1776                         *ts = cpu_buffer->read_stamp + event->time_delta;
1777                         ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1778                 }
1779                 return event;
1780
1781         default:
1782                 BUG();
1783         }
1784
1785         return NULL;
1786 }
1787
1788 /**
1789  * ring_buffer_iter_peek - peek at the next event to be read
1790  * @iter: The ring buffer iterator
1791  * @ts: The timestamp counter of this event.
1792  *
1793  * This will return the event that will be read next, but does
1794  * not increment the iterator.
1795  */
1796 struct ring_buffer_event *
1797 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1798 {
1799         struct ring_buffer *buffer;
1800         struct ring_buffer_per_cpu *cpu_buffer;
1801         struct ring_buffer_event *event;
1802         int nr_loops = 0;
1803
1804         if (ring_buffer_iter_empty(iter))
1805                 return NULL;
1806
1807         cpu_buffer = iter->cpu_buffer;
1808         buffer = cpu_buffer->buffer;
1809
1810  again:
1811         /*
1812          * We repeat when a timestamp is encountered. It is possible
1813          * to get multiple timestamps from an interrupt entering just
1814          * as one timestamp is about to be written. The max times
1815          * that this can happen is the number of nested interrupts we
1816          * can have. Nesting 10 deep of interrupts is clearly
1817          * an anomaly.
1818          */
1819         if (unlikely(++nr_loops > 10)) {
1820                 RB_WARN_ON(cpu_buffer, 1);
1821                 return NULL;
1822         }
1823
1824         if (rb_per_cpu_empty(cpu_buffer))
1825                 return NULL;
1826
1827         event = rb_iter_head_event(iter);
1828
1829         switch (event->type) {
1830         case RINGBUF_TYPE_PADDING:
1831                 rb_inc_iter(iter);
1832                 goto again;
1833
1834         case RINGBUF_TYPE_TIME_EXTEND:
1835                 /* Internal data, OK to advance */
1836                 rb_advance_iter(iter);
1837                 goto again;
1838
1839         case RINGBUF_TYPE_TIME_STAMP:
1840                 /* FIXME: not implemented */
1841                 rb_advance_iter(iter);
1842                 goto again;
1843
1844         case RINGBUF_TYPE_DATA:
1845                 if (ts) {
1846                         *ts = iter->read_stamp + event->time_delta;
1847                         ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1848                 }
1849                 return event;
1850
1851         default:
1852                 BUG();
1853         }
1854
1855         return NULL;
1856 }
1857
1858 /**
1859  * ring_buffer_consume - return an event and consume it
1860  * @buffer: The ring buffer to get the next event from
1861  *
1862  * Returns the next event in the ring buffer, and that event is consumed.
1863  * Meaning, that sequential reads will keep returning a different event,
1864  * and eventually empty the ring buffer if the producer is slower.
1865  */
1866 struct ring_buffer_event *
1867 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
1868 {
1869         struct ring_buffer_per_cpu *cpu_buffer;
1870         struct ring_buffer_event *event;
1871
1872         if (!cpu_isset(cpu, buffer->cpumask))
1873                 return NULL;
1874
1875         event = ring_buffer_peek(buffer, cpu, ts);
1876         if (!event)
1877                 return NULL;
1878
1879         cpu_buffer = buffer->buffers[cpu];
1880         rb_advance_reader(cpu_buffer);
1881
1882         return event;
1883 }
1884
1885 /**
1886  * ring_buffer_read_start - start a non consuming read of the buffer
1887  * @buffer: The ring buffer to read from
1888  * @cpu: The cpu buffer to iterate over
1889  *
1890  * This starts up an iteration through the buffer. It also disables
1891  * the recording to the buffer until the reading is finished.
1892  * This prevents the reading from being corrupted. This is not
1893  * a consuming read, so a producer is not expected.
1894  *
1895  * Must be paired with ring_buffer_finish.
1896  */
1897 struct ring_buffer_iter *
1898 ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
1899 {
1900         struct ring_buffer_per_cpu *cpu_buffer;
1901         struct ring_buffer_iter *iter;
1902         unsigned long flags;
1903
1904         if (!cpu_isset(cpu, buffer->cpumask))
1905                 return NULL;
1906
1907         iter = kmalloc(sizeof(*iter), GFP_KERNEL);
1908         if (!iter)
1909                 return NULL;
1910
1911         cpu_buffer = buffer->buffers[cpu];
1912
1913         iter->cpu_buffer = cpu_buffer;
1914
1915         atomic_inc(&cpu_buffer->record_disabled);
1916         synchronize_sched();
1917
1918         spin_lock_irqsave(&cpu_buffer->lock, flags);
1919         ring_buffer_iter_reset(iter);
1920         spin_unlock_irqrestore(&cpu_buffer->lock, flags);
1921
1922         return iter;
1923 }
1924
1925 /**
1926  * ring_buffer_finish - finish reading the iterator of the buffer
1927  * @iter: The iterator retrieved by ring_buffer_start
1928  *
1929  * This re-enables the recording to the buffer, and frees the
1930  * iterator.
1931  */
1932 void
1933 ring_buffer_read_finish(struct ring_buffer_iter *iter)
1934 {
1935         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1936
1937         atomic_dec(&cpu_buffer->record_disabled);
1938         kfree(iter);
1939 }
1940
1941 /**
1942  * ring_buffer_read - read the next item in the ring buffer by the iterator
1943  * @iter: The ring buffer iterator
1944  * @ts: The time stamp of the event read.
1945  *
1946  * This reads the next event in the ring buffer and increments the iterator.
1947  */
1948 struct ring_buffer_event *
1949 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
1950 {
1951         struct ring_buffer_event *event;
1952
1953         event = ring_buffer_iter_peek(iter, ts);
1954         if (!event)
1955                 return NULL;
1956
1957         rb_advance_iter(iter);
1958
1959         return event;
1960 }
1961
1962 /**
1963  * ring_buffer_size - return the size of the ring buffer (in bytes)
1964  * @buffer: The ring buffer.
1965  */
1966 unsigned long ring_buffer_size(struct ring_buffer *buffer)
1967 {
1968         return BUF_PAGE_SIZE * buffer->pages;
1969 }
1970
1971 static void
1972 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
1973 {
1974         cpu_buffer->head_page
1975                 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
1976         local_set(&cpu_buffer->head_page->write, 0);
1977         local_set(&cpu_buffer->head_page->commit, 0);
1978
1979         cpu_buffer->head_page->read = 0;
1980
1981         cpu_buffer->tail_page = cpu_buffer->head_page;
1982         cpu_buffer->commit_page = cpu_buffer->head_page;
1983
1984         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1985         local_set(&cpu_buffer->reader_page->write, 0);
1986         local_set(&cpu_buffer->reader_page->commit, 0);
1987         cpu_buffer->reader_page->read = 0;
1988
1989         cpu_buffer->overrun = 0;
1990         cpu_buffer->entries = 0;
1991 }
1992
1993 /**
1994  * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
1995  * @buffer: The ring buffer to reset a per cpu buffer of
1996  * @cpu: The CPU buffer to be reset
1997  */
1998 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
1999 {
2000         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2001         unsigned long flags;
2002
2003         if (!cpu_isset(cpu, buffer->cpumask))
2004                 return;
2005
2006         spin_lock_irqsave(&cpu_buffer->lock, flags);
2007
2008         rb_reset_cpu(cpu_buffer);
2009
2010         spin_unlock_irqrestore(&cpu_buffer->lock, flags);
2011 }
2012
2013 /**
2014  * ring_buffer_reset - reset a ring buffer
2015  * @buffer: The ring buffer to reset all cpu buffers
2016  */
2017 void ring_buffer_reset(struct ring_buffer *buffer)
2018 {
2019         int cpu;
2020
2021         for_each_buffer_cpu(buffer, cpu)
2022                 ring_buffer_reset_cpu(buffer, cpu);
2023 }
2024
2025 /**
2026  * rind_buffer_empty - is the ring buffer empty?
2027  * @buffer: The ring buffer to test
2028  */
2029 int ring_buffer_empty(struct ring_buffer *buffer)
2030 {
2031         struct ring_buffer_per_cpu *cpu_buffer;
2032         int cpu;
2033
2034         /* yes this is racy, but if you don't like the race, lock the buffer */
2035         for_each_buffer_cpu(buffer, cpu) {
2036                 cpu_buffer = buffer->buffers[cpu];
2037                 if (!rb_per_cpu_empty(cpu_buffer))
2038                         return 0;
2039         }
2040         return 1;
2041 }
2042
2043 /**
2044  * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2045  * @buffer: The ring buffer
2046  * @cpu: The CPU buffer to test
2047  */
2048 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2049 {
2050         struct ring_buffer_per_cpu *cpu_buffer;
2051
2052         if (!cpu_isset(cpu, buffer->cpumask))
2053                 return 1;
2054
2055         cpu_buffer = buffer->buffers[cpu];
2056         return rb_per_cpu_empty(cpu_buffer);
2057 }
2058
2059 /**
2060  * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2061  * @buffer_a: One buffer to swap with
2062  * @buffer_b: The other buffer to swap with
2063  *
2064  * This function is useful for tracers that want to take a "snapshot"
2065  * of a CPU buffer and has another back up buffer lying around.
2066  * it is expected that the tracer handles the cpu buffer not being
2067  * used at the moment.
2068  */
2069 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2070                          struct ring_buffer *buffer_b, int cpu)
2071 {
2072         struct ring_buffer_per_cpu *cpu_buffer_a;
2073         struct ring_buffer_per_cpu *cpu_buffer_b;
2074
2075         if (!cpu_isset(cpu, buffer_a->cpumask) ||
2076             !cpu_isset(cpu, buffer_b->cpumask))
2077                 return -EINVAL;
2078
2079         /* At least make sure the two buffers are somewhat the same */
2080         if (buffer_a->size != buffer_b->size ||
2081             buffer_a->pages != buffer_b->pages)
2082                 return -EINVAL;
2083
2084         cpu_buffer_a = buffer_a->buffers[cpu];
2085         cpu_buffer_b = buffer_b->buffers[cpu];
2086
2087         /*
2088          * We can't do a synchronize_sched here because this
2089          * function can be called in atomic context.
2090          * Normally this will be called from the same CPU as cpu.
2091          * If not it's up to the caller to protect this.
2092          */
2093         atomic_inc(&cpu_buffer_a->record_disabled);
2094         atomic_inc(&cpu_buffer_b->record_disabled);
2095
2096         buffer_a->buffers[cpu] = cpu_buffer_b;
2097         buffer_b->buffers[cpu] = cpu_buffer_a;
2098
2099         cpu_buffer_b->buffer = buffer_a;
2100         cpu_buffer_a->buffer = buffer_b;
2101
2102         atomic_dec(&cpu_buffer_a->record_disabled);
2103         atomic_dec(&cpu_buffer_b->record_disabled);
2104
2105         return 0;
2106 }
2107
2108 static ssize_t
2109 rb_simple_read(struct file *filp, char __user *ubuf,
2110                size_t cnt, loff_t *ppos)
2111 {
2112         int *p = filp->private_data;
2113         char buf[64];
2114         int r;
2115
2116         /* !ring_buffers_off == tracing_on */
2117         r = sprintf(buf, "%d\n", !*p);
2118
2119         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2120 }
2121
2122 static ssize_t
2123 rb_simple_write(struct file *filp, const char __user *ubuf,
2124                 size_t cnt, loff_t *ppos)
2125 {
2126         int *p = filp->private_data;
2127         char buf[64];
2128         long val;
2129         int ret;
2130
2131         if (cnt >= sizeof(buf))
2132                 return -EINVAL;
2133
2134         if (copy_from_user(&buf, ubuf, cnt))
2135                 return -EFAULT;
2136
2137         buf[cnt] = 0;
2138
2139         ret = strict_strtoul(buf, 10, &val);
2140         if (ret < 0)
2141                 return ret;
2142
2143         /* !ring_buffers_off == tracing_on */
2144         *p = !val;
2145
2146         (*ppos)++;
2147
2148         return cnt;
2149 }
2150
2151 static struct file_operations rb_simple_fops = {
2152         .open           = tracing_open_generic,
2153         .read           = rb_simple_read,
2154         .write          = rb_simple_write,
2155 };
2156
2157
2158 static __init int rb_init_debugfs(void)
2159 {
2160         struct dentry *d_tracer;
2161         struct dentry *entry;
2162
2163         d_tracer = tracing_init_dentry();
2164
2165         entry = debugfs_create_file("tracing_on", 0644, d_tracer,
2166                                     &ring_buffers_off, &rb_simple_fops);
2167         if (!entry)
2168                 pr_warning("Could not create debugfs 'tracing_on' entry\n");
2169
2170         return 0;
2171 }
2172
2173 fs_initcall(rb_init_debugfs);