4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
6 #include <linux/ring_buffer.h>
7 #include <linux/spinlock.h>
8 #include <linux/debugfs.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/percpu.h>
12 #include <linux/mutex.h>
13 #include <linux/sched.h> /* used for sched_clock() (for now) */
14 #include <linux/init.h>
15 #include <linux/hash.h>
16 #include <linux/list.h>
21 /* Global flag to disable all recording to ring buffers */
22 static int ring_buffers_off __read_mostly;
25 * tracing_on - enable all tracing buffers
27 * This function enables all tracing buffers that may have been
28 * disabled with tracing_off.
36 * tracing_off - turn off all tracing buffers
38 * This function stops all tracing buffers from recording data.
39 * It does not disable any overhead the tracers themselves may
40 * be causing. This function simply causes all recording to
41 * the ring buffers to fail.
43 void tracing_off(void)
48 /* Up this if you want to test the TIME_EXTENTS and normalization */
52 u64 ring_buffer_time_stamp(int cpu)
54 /* shift to debug/test normalization and TIME_EXTENTS */
55 return sched_clock() << DEBUG_SHIFT;
58 void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
60 /* Just stupid testing the normalize function and deltas */
64 #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
65 #define RB_ALIGNMENT_SHIFT 2
66 #define RB_ALIGNMENT (1 << RB_ALIGNMENT_SHIFT)
67 #define RB_MAX_SMALL_DATA 28
70 RB_LEN_TIME_EXTEND = 8,
71 RB_LEN_TIME_STAMP = 16,
74 /* inline for ring buffer fast paths */
75 static inline unsigned
76 rb_event_length(struct ring_buffer_event *event)
80 switch (event->type) {
81 case RINGBUF_TYPE_PADDING:
85 case RINGBUF_TYPE_TIME_EXTEND:
86 return RB_LEN_TIME_EXTEND;
88 case RINGBUF_TYPE_TIME_STAMP:
89 return RB_LEN_TIME_STAMP;
91 case RINGBUF_TYPE_DATA:
93 length = event->len << RB_ALIGNMENT_SHIFT;
95 length = event->array[0];
96 return length + RB_EVNT_HDR_SIZE;
105 * ring_buffer_event_length - return the length of the event
106 * @event: the event to get the length of
108 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
110 return rb_event_length(event);
113 /* inline for ring buffer fast paths */
115 rb_event_data(struct ring_buffer_event *event)
117 BUG_ON(event->type != RINGBUF_TYPE_DATA);
118 /* If length is in len field, then array[0] has the data */
120 return (void *)&event->array[0];
121 /* Otherwise length is in array[0] and array[1] has the data */
122 return (void *)&event->array[1];
126 * ring_buffer_event_data - return the data of the event
127 * @event: the event to get the data from
129 void *ring_buffer_event_data(struct ring_buffer_event *event)
131 return rb_event_data(event);
134 #define for_each_buffer_cpu(buffer, cpu) \
135 for_each_cpu_mask(cpu, buffer->cpumask)
138 #define TS_MASK ((1ULL << TS_SHIFT) - 1)
139 #define TS_DELTA_TEST (~TS_MASK)
142 * This hack stolen from mm/slob.c.
143 * We can store per page timing information in the page frame of the page.
144 * Thanks to Peter Zijlstra for suggesting this idea.
147 u64 time_stamp; /* page time stamp */
148 local_t write; /* index for next write */
149 local_t commit; /* write commited index */
150 unsigned read; /* index for next read */
151 struct list_head list; /* list of free pages */
152 void *page; /* Actual data page */
156 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
159 static inline void free_buffer_page(struct buffer_page *bpage)
162 free_page((unsigned long)bpage->page);
167 * We need to fit the time_stamp delta into 27 bits.
169 static inline int test_time_stamp(u64 delta)
171 if (delta & TS_DELTA_TEST)
176 #define BUF_PAGE_SIZE PAGE_SIZE
179 * head_page == tail_page && head == tail then buffer is empty.
181 struct ring_buffer_per_cpu {
183 struct ring_buffer *buffer;
185 struct lock_class_key lock_key;
186 struct list_head pages;
187 struct buffer_page *head_page; /* read from head */
188 struct buffer_page *tail_page; /* write to tail */
189 struct buffer_page *commit_page; /* commited pages */
190 struct buffer_page *reader_page;
191 unsigned long overrun;
192 unsigned long entries;
195 atomic_t record_disabled;
204 atomic_t record_disabled;
208 struct ring_buffer_per_cpu **buffers;
211 struct ring_buffer_iter {
212 struct ring_buffer_per_cpu *cpu_buffer;
214 struct buffer_page *head_page;
218 #define RB_WARN_ON(buffer, cond) \
220 if (unlikely(cond)) { \
221 atomic_inc(&buffer->record_disabled); \
226 #define RB_WARN_ON_RET(buffer, cond) \
228 if (unlikely(cond)) { \
229 atomic_inc(&buffer->record_disabled); \
235 #define RB_WARN_ON_ONCE(buffer, cond) \
238 if (unlikely(cond) && !once) { \
240 atomic_inc(&buffer->record_disabled); \
246 * check_pages - integrity check of buffer pages
247 * @cpu_buffer: CPU buffer with pages to test
249 * As a safty measure we check to make sure the data pages have not
252 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
254 struct list_head *head = &cpu_buffer->pages;
255 struct buffer_page *page, *tmp;
257 RB_WARN_ON_RET(cpu_buffer, head->next->prev != head);
258 RB_WARN_ON_RET(cpu_buffer, head->prev->next != head);
260 list_for_each_entry_safe(page, tmp, head, list) {
261 RB_WARN_ON_RET(cpu_buffer,
262 page->list.next->prev != &page->list);
263 RB_WARN_ON_RET(cpu_buffer,
264 page->list.prev->next != &page->list);
270 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
273 struct list_head *head = &cpu_buffer->pages;
274 struct buffer_page *page, *tmp;
279 for (i = 0; i < nr_pages; i++) {
280 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
281 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
284 list_add(&page->list, &pages);
286 addr = __get_free_page(GFP_KERNEL);
289 page->page = (void *)addr;
292 list_splice(&pages, head);
294 rb_check_pages(cpu_buffer);
299 list_for_each_entry_safe(page, tmp, &pages, list) {
300 list_del_init(&page->list);
301 free_buffer_page(page);
306 static struct ring_buffer_per_cpu *
307 rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
309 struct ring_buffer_per_cpu *cpu_buffer;
310 struct buffer_page *page;
314 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
315 GFP_KERNEL, cpu_to_node(cpu));
319 cpu_buffer->cpu = cpu;
320 cpu_buffer->buffer = buffer;
321 spin_lock_init(&cpu_buffer->lock);
322 INIT_LIST_HEAD(&cpu_buffer->pages);
324 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
325 GFP_KERNEL, cpu_to_node(cpu));
327 goto fail_free_buffer;
329 cpu_buffer->reader_page = page;
330 addr = __get_free_page(GFP_KERNEL);
332 goto fail_free_reader;
333 page->page = (void *)addr;
335 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
337 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
339 goto fail_free_reader;
341 cpu_buffer->head_page
342 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
343 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
348 free_buffer_page(cpu_buffer->reader_page);
355 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
357 struct list_head *head = &cpu_buffer->pages;
358 struct buffer_page *page, *tmp;
360 list_del_init(&cpu_buffer->reader_page->list);
361 free_buffer_page(cpu_buffer->reader_page);
363 list_for_each_entry_safe(page, tmp, head, list) {
364 list_del_init(&page->list);
365 free_buffer_page(page);
371 * Causes compile errors if the struct buffer_page gets bigger
372 * than the struct page.
374 extern int ring_buffer_page_too_big(void);
377 * ring_buffer_alloc - allocate a new ring_buffer
378 * @size: the size in bytes that is needed.
379 * @flags: attributes to set for the ring buffer.
381 * Currently the only flag that is available is the RB_FL_OVERWRITE
382 * flag. This flag means that the buffer will overwrite old data
383 * when the buffer wraps. If this flag is not set, the buffer will
384 * drop data when the tail hits the head.
386 struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
388 struct ring_buffer *buffer;
392 /* Paranoid! Optimizes out when all is well */
393 if (sizeof(struct buffer_page) > sizeof(struct page))
394 ring_buffer_page_too_big();
397 /* keep it in its own cache line */
398 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
403 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
404 buffer->flags = flags;
406 /* need at least two pages */
407 if (buffer->pages == 1)
410 buffer->cpumask = cpu_possible_map;
411 buffer->cpus = nr_cpu_ids;
413 bsize = sizeof(void *) * nr_cpu_ids;
414 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
416 if (!buffer->buffers)
417 goto fail_free_buffer;
419 for_each_buffer_cpu(buffer, cpu) {
420 buffer->buffers[cpu] =
421 rb_allocate_cpu_buffer(buffer, cpu);
422 if (!buffer->buffers[cpu])
423 goto fail_free_buffers;
426 mutex_init(&buffer->mutex);
431 for_each_buffer_cpu(buffer, cpu) {
432 if (buffer->buffers[cpu])
433 rb_free_cpu_buffer(buffer->buffers[cpu]);
435 kfree(buffer->buffers);
443 * ring_buffer_free - free a ring buffer.
444 * @buffer: the buffer to free.
447 ring_buffer_free(struct ring_buffer *buffer)
451 for_each_buffer_cpu(buffer, cpu)
452 rb_free_cpu_buffer(buffer->buffers[cpu]);
457 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
460 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
462 struct buffer_page *page;
466 atomic_inc(&cpu_buffer->record_disabled);
469 for (i = 0; i < nr_pages; i++) {
470 BUG_ON(list_empty(&cpu_buffer->pages));
471 p = cpu_buffer->pages.next;
472 page = list_entry(p, struct buffer_page, list);
473 list_del_init(&page->list);
474 free_buffer_page(page);
476 BUG_ON(list_empty(&cpu_buffer->pages));
478 rb_reset_cpu(cpu_buffer);
480 rb_check_pages(cpu_buffer);
482 atomic_dec(&cpu_buffer->record_disabled);
487 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
488 struct list_head *pages, unsigned nr_pages)
490 struct buffer_page *page;
494 atomic_inc(&cpu_buffer->record_disabled);
497 for (i = 0; i < nr_pages; i++) {
498 BUG_ON(list_empty(pages));
500 page = list_entry(p, struct buffer_page, list);
501 list_del_init(&page->list);
502 list_add_tail(&page->list, &cpu_buffer->pages);
504 rb_reset_cpu(cpu_buffer);
506 rb_check_pages(cpu_buffer);
508 atomic_dec(&cpu_buffer->record_disabled);
512 * ring_buffer_resize - resize the ring buffer
513 * @buffer: the buffer to resize.
514 * @size: the new size.
516 * The tracer is responsible for making sure that the buffer is
517 * not being used while changing the size.
518 * Note: We may be able to change the above requirement by using
519 * RCU synchronizations.
521 * Minimum size is 2 * BUF_PAGE_SIZE.
523 * Returns -1 on failure.
525 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
527 struct ring_buffer_per_cpu *cpu_buffer;
528 unsigned nr_pages, rm_pages, new_pages;
529 struct buffer_page *page, *tmp;
530 unsigned long buffer_size;
535 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
536 size *= BUF_PAGE_SIZE;
537 buffer_size = buffer->pages * BUF_PAGE_SIZE;
539 /* we need a minimum of two pages */
540 if (size < BUF_PAGE_SIZE * 2)
541 size = BUF_PAGE_SIZE * 2;
543 if (size == buffer_size)
546 mutex_lock(&buffer->mutex);
548 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
550 if (size < buffer_size) {
552 /* easy case, just free pages */
553 BUG_ON(nr_pages >= buffer->pages);
555 rm_pages = buffer->pages - nr_pages;
557 for_each_buffer_cpu(buffer, cpu) {
558 cpu_buffer = buffer->buffers[cpu];
559 rb_remove_pages(cpu_buffer, rm_pages);
565 * This is a bit more difficult. We only want to add pages
566 * when we can allocate enough for all CPUs. We do this
567 * by allocating all the pages and storing them on a local
568 * link list. If we succeed in our allocation, then we
569 * add these pages to the cpu_buffers. Otherwise we just free
570 * them all and return -ENOMEM;
572 BUG_ON(nr_pages <= buffer->pages);
573 new_pages = nr_pages - buffer->pages;
575 for_each_buffer_cpu(buffer, cpu) {
576 for (i = 0; i < new_pages; i++) {
577 page = kzalloc_node(ALIGN(sizeof(*page),
579 GFP_KERNEL, cpu_to_node(cpu));
582 list_add(&page->list, &pages);
583 addr = __get_free_page(GFP_KERNEL);
586 page->page = (void *)addr;
590 for_each_buffer_cpu(buffer, cpu) {
591 cpu_buffer = buffer->buffers[cpu];
592 rb_insert_pages(cpu_buffer, &pages, new_pages);
595 BUG_ON(!list_empty(&pages));
598 buffer->pages = nr_pages;
599 mutex_unlock(&buffer->mutex);
604 list_for_each_entry_safe(page, tmp, &pages, list) {
605 list_del_init(&page->list);
606 free_buffer_page(page);
611 static inline int rb_null_event(struct ring_buffer_event *event)
613 return event->type == RINGBUF_TYPE_PADDING;
616 static inline void *__rb_page_index(struct buffer_page *page, unsigned index)
618 return page->page + index;
621 static inline struct ring_buffer_event *
622 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
624 return __rb_page_index(cpu_buffer->reader_page,
625 cpu_buffer->reader_page->read);
628 static inline struct ring_buffer_event *
629 rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
631 return __rb_page_index(cpu_buffer->head_page,
632 cpu_buffer->head_page->read);
635 static inline struct ring_buffer_event *
636 rb_iter_head_event(struct ring_buffer_iter *iter)
638 return __rb_page_index(iter->head_page, iter->head);
641 static inline unsigned rb_page_write(struct buffer_page *bpage)
643 return local_read(&bpage->write);
646 static inline unsigned rb_page_commit(struct buffer_page *bpage)
648 return local_read(&bpage->commit);
651 /* Size is determined by what has been commited */
652 static inline unsigned rb_page_size(struct buffer_page *bpage)
654 return rb_page_commit(bpage);
657 static inline unsigned
658 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
660 return rb_page_commit(cpu_buffer->commit_page);
663 static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
665 return rb_page_commit(cpu_buffer->head_page);
669 * When the tail hits the head and the buffer is in overwrite mode,
670 * the head jumps to the next page and all content on the previous
671 * page is discarded. But before doing so, we update the overrun
672 * variable of the buffer.
674 static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
676 struct ring_buffer_event *event;
679 for (head = 0; head < rb_head_size(cpu_buffer);
680 head += rb_event_length(event)) {
682 event = __rb_page_index(cpu_buffer->head_page, head);
683 BUG_ON(rb_null_event(event));
684 /* Only count data entries */
685 if (event->type != RINGBUF_TYPE_DATA)
687 cpu_buffer->overrun++;
688 cpu_buffer->entries--;
692 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
693 struct buffer_page **page)
695 struct list_head *p = (*page)->list.next;
697 if (p == &cpu_buffer->pages)
700 *page = list_entry(p, struct buffer_page, list);
703 static inline unsigned
704 rb_event_index(struct ring_buffer_event *event)
706 unsigned long addr = (unsigned long)event;
708 return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
712 rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
713 struct ring_buffer_event *event)
715 unsigned long addr = (unsigned long)event;
718 index = rb_event_index(event);
721 return cpu_buffer->commit_page->page == (void *)addr &&
722 rb_commit_index(cpu_buffer) == index;
726 rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
727 struct ring_buffer_event *event)
729 unsigned long addr = (unsigned long)event;
732 index = rb_event_index(event);
735 while (cpu_buffer->commit_page->page != (void *)addr) {
736 RB_WARN_ON(cpu_buffer,
737 cpu_buffer->commit_page == cpu_buffer->tail_page);
738 cpu_buffer->commit_page->commit =
739 cpu_buffer->commit_page->write;
740 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
741 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
744 /* Now set the commit to the event's index */
745 local_set(&cpu_buffer->commit_page->commit, index);
749 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
752 * We only race with interrupts and NMIs on this CPU.
753 * If we own the commit event, then we can commit
754 * all others that interrupted us, since the interruptions
755 * are in stack format (they finish before they come
756 * back to us). This allows us to do a simple loop to
757 * assign the commit to the tail.
759 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
760 cpu_buffer->commit_page->commit =
761 cpu_buffer->commit_page->write;
762 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
763 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
764 /* add barrier to keep gcc from optimizing too much */
767 while (rb_commit_index(cpu_buffer) !=
768 rb_page_write(cpu_buffer->commit_page)) {
769 cpu_buffer->commit_page->commit =
770 cpu_buffer->commit_page->write;
775 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
777 cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp;
778 cpu_buffer->reader_page->read = 0;
781 static inline void rb_inc_iter(struct ring_buffer_iter *iter)
783 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
786 * The iterator could be on the reader page (it starts there).
787 * But the head could have moved, since the reader was
788 * found. Check for this case and assign the iterator
789 * to the head page instead of next.
791 if (iter->head_page == cpu_buffer->reader_page)
792 iter->head_page = cpu_buffer->head_page;
794 rb_inc_page(cpu_buffer, &iter->head_page);
796 iter->read_stamp = iter->head_page->time_stamp;
801 * ring_buffer_update_event - update event type and data
802 * @event: the even to update
803 * @type: the type of event
804 * @length: the size of the event field in the ring buffer
806 * Update the type and data fields of the event. The length
807 * is the actual size that is written to the ring buffer,
808 * and with this, we can determine what to place into the
812 rb_update_event(struct ring_buffer_event *event,
813 unsigned type, unsigned length)
819 case RINGBUF_TYPE_PADDING:
822 case RINGBUF_TYPE_TIME_EXTEND:
824 (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1))
825 >> RB_ALIGNMENT_SHIFT;
828 case RINGBUF_TYPE_TIME_STAMP:
830 (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1))
831 >> RB_ALIGNMENT_SHIFT;
834 case RINGBUF_TYPE_DATA:
835 length -= RB_EVNT_HDR_SIZE;
836 if (length > RB_MAX_SMALL_DATA) {
838 event->array[0] = length;
841 (length + (RB_ALIGNMENT-1))
842 >> RB_ALIGNMENT_SHIFT;
849 static inline unsigned rb_calculate_event_length(unsigned length)
851 struct ring_buffer_event event; /* Used only for sizeof array */
853 /* zero length can cause confusions */
857 if (length > RB_MAX_SMALL_DATA)
858 length += sizeof(event.array[0]);
860 length += RB_EVNT_HDR_SIZE;
861 length = ALIGN(length, RB_ALIGNMENT);
866 static struct ring_buffer_event *
867 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
868 unsigned type, unsigned long length, u64 *ts)
870 struct buffer_page *tail_page, *head_page, *reader_page;
871 unsigned long tail, write;
872 struct ring_buffer *buffer = cpu_buffer->buffer;
873 struct ring_buffer_event *event;
876 tail_page = cpu_buffer->tail_page;
877 write = local_add_return(length, &tail_page->write);
878 tail = write - length;
880 /* See if we shot pass the end of this buffer page */
881 if (write > BUF_PAGE_SIZE) {
882 struct buffer_page *next_page = tail_page;
884 spin_lock_irqsave(&cpu_buffer->lock, flags);
886 rb_inc_page(cpu_buffer, &next_page);
888 head_page = cpu_buffer->head_page;
889 reader_page = cpu_buffer->reader_page;
891 /* we grabbed the lock before incrementing */
892 RB_WARN_ON(cpu_buffer, next_page == reader_page);
895 * If for some reason, we had an interrupt storm that made
896 * it all the way around the buffer, bail, and warn
899 if (unlikely(next_page == cpu_buffer->commit_page)) {
904 if (next_page == head_page) {
905 if (!(buffer->flags & RB_FL_OVERWRITE)) {
907 if (tail <= BUF_PAGE_SIZE)
908 local_set(&tail_page->write, tail);
912 /* tail_page has not moved yet? */
913 if (tail_page == cpu_buffer->tail_page) {
914 /* count overflows */
915 rb_update_overflow(cpu_buffer);
917 rb_inc_page(cpu_buffer, &head_page);
918 cpu_buffer->head_page = head_page;
919 cpu_buffer->head_page->read = 0;
924 * If the tail page is still the same as what we think
925 * it is, then it is up to us to update the tail
928 if (tail_page == cpu_buffer->tail_page) {
929 local_set(&next_page->write, 0);
930 local_set(&next_page->commit, 0);
931 cpu_buffer->tail_page = next_page;
933 /* reread the time stamp */
934 *ts = ring_buffer_time_stamp(cpu_buffer->cpu);
935 cpu_buffer->tail_page->time_stamp = *ts;
939 * The actual tail page has moved forward.
941 if (tail < BUF_PAGE_SIZE) {
942 /* Mark the rest of the page with padding */
943 event = __rb_page_index(tail_page, tail);
944 event->type = RINGBUF_TYPE_PADDING;
947 if (tail <= BUF_PAGE_SIZE)
948 /* Set the write back to the previous setting */
949 local_set(&tail_page->write, tail);
952 * If this was a commit entry that failed,
955 if (tail_page == cpu_buffer->commit_page &&
956 tail == rb_commit_index(cpu_buffer)) {
957 rb_set_commit_to_write(cpu_buffer);
960 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
962 /* fail and let the caller try again */
963 return ERR_PTR(-EAGAIN);
966 /* We reserved something on the buffer */
968 BUG_ON(write > BUF_PAGE_SIZE);
970 event = __rb_page_index(tail_page, tail);
971 rb_update_event(event, type, length);
974 * If this is a commit and the tail is zero, then update
975 * this page's time stamp.
977 if (!tail && rb_is_commit(cpu_buffer, event))
978 cpu_buffer->commit_page->time_stamp = *ts;
983 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
988 rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
991 struct ring_buffer_event *event;
995 if (unlikely(*delta > (1ULL << 59) && !once++)) {
996 printk(KERN_WARNING "Delta way too big! %llu"
997 " ts=%llu write stamp = %llu\n",
998 (unsigned long long)*delta,
999 (unsigned long long)*ts,
1000 (unsigned long long)cpu_buffer->write_stamp);
1005 * The delta is too big, we to add a
1008 event = __rb_reserve_next(cpu_buffer,
1009 RINGBUF_TYPE_TIME_EXTEND,
1015 if (PTR_ERR(event) == -EAGAIN)
1018 /* Only a commited time event can update the write stamp */
1019 if (rb_is_commit(cpu_buffer, event)) {
1021 * If this is the first on the page, then we need to
1022 * update the page itself, and just put in a zero.
1024 if (rb_event_index(event)) {
1025 event->time_delta = *delta & TS_MASK;
1026 event->array[0] = *delta >> TS_SHIFT;
1028 cpu_buffer->commit_page->time_stamp = *ts;
1029 event->time_delta = 0;
1030 event->array[0] = 0;
1032 cpu_buffer->write_stamp = *ts;
1033 /* let the caller know this was the commit */
1036 /* Darn, this is just wasted space */
1037 event->time_delta = 0;
1038 event->array[0] = 0;
1047 static struct ring_buffer_event *
1048 rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1049 unsigned type, unsigned long length)
1051 struct ring_buffer_event *event;
1058 * We allow for interrupts to reenter here and do a trace.
1059 * If one does, it will cause this original code to loop
1060 * back here. Even with heavy interrupts happening, this
1061 * should only happen a few times in a row. If this happens
1062 * 1000 times in a row, there must be either an interrupt
1063 * storm or we have something buggy.
1066 if (unlikely(++nr_loops > 1000)) {
1067 RB_WARN_ON(cpu_buffer, 1);
1071 ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1074 * Only the first commit can update the timestamp.
1075 * Yes there is a race here. If an interrupt comes in
1076 * just after the conditional and it traces too, then it
1077 * will also check the deltas. More than one timestamp may
1078 * also be made. But only the entry that did the actual
1079 * commit will be something other than zero.
1081 if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
1082 rb_page_write(cpu_buffer->tail_page) ==
1083 rb_commit_index(cpu_buffer)) {
1085 delta = ts - cpu_buffer->write_stamp;
1087 /* make sure this delta is calculated here */
1090 /* Did the write stamp get updated already? */
1091 if (unlikely(ts < cpu_buffer->write_stamp))
1094 if (test_time_stamp(delta)) {
1096 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1098 if (commit == -EBUSY)
1101 if (commit == -EAGAIN)
1104 RB_WARN_ON(cpu_buffer, commit < 0);
1107 /* Non commits have zero deltas */
1110 event = __rb_reserve_next(cpu_buffer, type, length, &ts);
1111 if (PTR_ERR(event) == -EAGAIN)
1115 if (unlikely(commit))
1117 * Ouch! We needed a timestamp and it was commited. But
1118 * we didn't get our event reserved.
1120 rb_set_commit_to_write(cpu_buffer);
1125 * If the timestamp was commited, make the commit our entry
1126 * now so that we will update it when needed.
1129 rb_set_commit_event(cpu_buffer, event);
1130 else if (!rb_is_commit(cpu_buffer, event))
1133 event->time_delta = delta;
1138 static DEFINE_PER_CPU(int, rb_need_resched);
1141 * ring_buffer_lock_reserve - reserve a part of the buffer
1142 * @buffer: the ring buffer to reserve from
1143 * @length: the length of the data to reserve (excluding event header)
1144 * @flags: a pointer to save the interrupt flags
1146 * Returns a reseverd event on the ring buffer to copy directly to.
1147 * The user of this interface will need to get the body to write into
1148 * and can use the ring_buffer_event_data() interface.
1150 * The length is the length of the data needed, not the event length
1151 * which also includes the event header.
1153 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1154 * If NULL is returned, then nothing has been allocated or locked.
1156 struct ring_buffer_event *
1157 ring_buffer_lock_reserve(struct ring_buffer *buffer,
1158 unsigned long length,
1159 unsigned long *flags)
1161 struct ring_buffer_per_cpu *cpu_buffer;
1162 struct ring_buffer_event *event;
1165 if (ring_buffers_off)
1168 if (atomic_read(&buffer->record_disabled))
1171 /* If we are tracing schedule, we don't want to recurse */
1172 resched = need_resched();
1173 preempt_disable_notrace();
1175 cpu = raw_smp_processor_id();
1177 if (!cpu_isset(cpu, buffer->cpumask))
1180 cpu_buffer = buffer->buffers[cpu];
1182 if (atomic_read(&cpu_buffer->record_disabled))
1185 length = rb_calculate_event_length(length);
1186 if (length > BUF_PAGE_SIZE)
1189 event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
1194 * Need to store resched state on this cpu.
1195 * Only the first needs to.
1198 if (preempt_count() == 1)
1199 per_cpu(rb_need_resched, cpu) = resched;
1205 preempt_enable_notrace();
1207 preempt_enable_notrace();
1211 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1212 struct ring_buffer_event *event)
1214 cpu_buffer->entries++;
1216 /* Only process further if we own the commit */
1217 if (!rb_is_commit(cpu_buffer, event))
1220 cpu_buffer->write_stamp += event->time_delta;
1222 rb_set_commit_to_write(cpu_buffer);
1226 * ring_buffer_unlock_commit - commit a reserved
1227 * @buffer: The buffer to commit to
1228 * @event: The event pointer to commit.
1229 * @flags: the interrupt flags received from ring_buffer_lock_reserve.
1231 * This commits the data to the ring buffer, and releases any locks held.
1233 * Must be paired with ring_buffer_lock_reserve.
1235 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1236 struct ring_buffer_event *event,
1237 unsigned long flags)
1239 struct ring_buffer_per_cpu *cpu_buffer;
1240 int cpu = raw_smp_processor_id();
1242 cpu_buffer = buffer->buffers[cpu];
1244 rb_commit(cpu_buffer, event);
1247 * Only the last preempt count needs to restore preemption.
1249 if (preempt_count() == 1) {
1250 if (per_cpu(rb_need_resched, cpu))
1251 preempt_enable_no_resched_notrace();
1253 preempt_enable_notrace();
1255 preempt_enable_no_resched_notrace();
1261 * ring_buffer_write - write data to the buffer without reserving
1262 * @buffer: The ring buffer to write to.
1263 * @length: The length of the data being written (excluding the event header)
1264 * @data: The data to write to the buffer.
1266 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1267 * one function. If you already have the data to write to the buffer, it
1268 * may be easier to simply call this function.
1270 * Note, like ring_buffer_lock_reserve, the length is the length of the data
1271 * and not the length of the event which would hold the header.
1273 int ring_buffer_write(struct ring_buffer *buffer,
1274 unsigned long length,
1277 struct ring_buffer_per_cpu *cpu_buffer;
1278 struct ring_buffer_event *event;
1279 unsigned long event_length;
1284 if (ring_buffers_off)
1287 if (atomic_read(&buffer->record_disabled))
1290 resched = need_resched();
1291 preempt_disable_notrace();
1293 cpu = raw_smp_processor_id();
1295 if (!cpu_isset(cpu, buffer->cpumask))
1298 cpu_buffer = buffer->buffers[cpu];
1300 if (atomic_read(&cpu_buffer->record_disabled))
1303 event_length = rb_calculate_event_length(length);
1304 event = rb_reserve_next_event(cpu_buffer,
1305 RINGBUF_TYPE_DATA, event_length);
1309 body = rb_event_data(event);
1311 memcpy(body, data, length);
1313 rb_commit(cpu_buffer, event);
1318 preempt_enable_no_resched_notrace();
1320 preempt_enable_notrace();
1325 static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1327 struct buffer_page *reader = cpu_buffer->reader_page;
1328 struct buffer_page *head = cpu_buffer->head_page;
1329 struct buffer_page *commit = cpu_buffer->commit_page;
1331 return reader->read == rb_page_commit(reader) &&
1332 (commit == reader ||
1334 head->read == rb_page_commit(commit)));
1338 * ring_buffer_record_disable - stop all writes into the buffer
1339 * @buffer: The ring buffer to stop writes to.
1341 * This prevents all writes to the buffer. Any attempt to write
1342 * to the buffer after this will fail and return NULL.
1344 * The caller should call synchronize_sched() after this.
1346 void ring_buffer_record_disable(struct ring_buffer *buffer)
1348 atomic_inc(&buffer->record_disabled);
1352 * ring_buffer_record_enable - enable writes to the buffer
1353 * @buffer: The ring buffer to enable writes
1355 * Note, multiple disables will need the same number of enables
1356 * to truely enable the writing (much like preempt_disable).
1358 void ring_buffer_record_enable(struct ring_buffer *buffer)
1360 atomic_dec(&buffer->record_disabled);
1364 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1365 * @buffer: The ring buffer to stop writes to.
1366 * @cpu: The CPU buffer to stop
1368 * This prevents all writes to the buffer. Any attempt to write
1369 * to the buffer after this will fail and return NULL.
1371 * The caller should call synchronize_sched() after this.
1373 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1375 struct ring_buffer_per_cpu *cpu_buffer;
1377 if (!cpu_isset(cpu, buffer->cpumask))
1380 cpu_buffer = buffer->buffers[cpu];
1381 atomic_inc(&cpu_buffer->record_disabled);
1385 * ring_buffer_record_enable_cpu - enable writes to the buffer
1386 * @buffer: The ring buffer to enable writes
1387 * @cpu: The CPU to enable.
1389 * Note, multiple disables will need the same number of enables
1390 * to truely enable the writing (much like preempt_disable).
1392 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1394 struct ring_buffer_per_cpu *cpu_buffer;
1396 if (!cpu_isset(cpu, buffer->cpumask))
1399 cpu_buffer = buffer->buffers[cpu];
1400 atomic_dec(&cpu_buffer->record_disabled);
1404 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1405 * @buffer: The ring buffer
1406 * @cpu: The per CPU buffer to get the entries from.
1408 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1410 struct ring_buffer_per_cpu *cpu_buffer;
1412 if (!cpu_isset(cpu, buffer->cpumask))
1415 cpu_buffer = buffer->buffers[cpu];
1416 return cpu_buffer->entries;
1420 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1421 * @buffer: The ring buffer
1422 * @cpu: The per CPU buffer to get the number of overruns from
1424 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1426 struct ring_buffer_per_cpu *cpu_buffer;
1428 if (!cpu_isset(cpu, buffer->cpumask))
1431 cpu_buffer = buffer->buffers[cpu];
1432 return cpu_buffer->overrun;
1436 * ring_buffer_entries - get the number of entries in a buffer
1437 * @buffer: The ring buffer
1439 * Returns the total number of entries in the ring buffer
1442 unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1444 struct ring_buffer_per_cpu *cpu_buffer;
1445 unsigned long entries = 0;
1448 /* if you care about this being correct, lock the buffer */
1449 for_each_buffer_cpu(buffer, cpu) {
1450 cpu_buffer = buffer->buffers[cpu];
1451 entries += cpu_buffer->entries;
1458 * ring_buffer_overrun_cpu - get the number of overruns in buffer
1459 * @buffer: The ring buffer
1461 * Returns the total number of overruns in the ring buffer
1464 unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1466 struct ring_buffer_per_cpu *cpu_buffer;
1467 unsigned long overruns = 0;
1470 /* if you care about this being correct, lock the buffer */
1471 for_each_buffer_cpu(buffer, cpu) {
1472 cpu_buffer = buffer->buffers[cpu];
1473 overruns += cpu_buffer->overrun;
1480 * ring_buffer_iter_reset - reset an iterator
1481 * @iter: The iterator to reset
1483 * Resets the iterator, so that it will start from the beginning
1486 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1488 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1490 /* Iterator usage is expected to have record disabled */
1491 if (list_empty(&cpu_buffer->reader_page->list)) {
1492 iter->head_page = cpu_buffer->head_page;
1493 iter->head = cpu_buffer->head_page->read;
1495 iter->head_page = cpu_buffer->reader_page;
1496 iter->head = cpu_buffer->reader_page->read;
1499 iter->read_stamp = cpu_buffer->read_stamp;
1501 iter->read_stamp = iter->head_page->time_stamp;
1505 * ring_buffer_iter_empty - check if an iterator has no more to read
1506 * @iter: The iterator to check
1508 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1510 struct ring_buffer_per_cpu *cpu_buffer;
1512 cpu_buffer = iter->cpu_buffer;
1514 return iter->head_page == cpu_buffer->commit_page &&
1515 iter->head == rb_commit_index(cpu_buffer);
1519 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1520 struct ring_buffer_event *event)
1524 switch (event->type) {
1525 case RINGBUF_TYPE_PADDING:
1528 case RINGBUF_TYPE_TIME_EXTEND:
1529 delta = event->array[0];
1531 delta += event->time_delta;
1532 cpu_buffer->read_stamp += delta;
1535 case RINGBUF_TYPE_TIME_STAMP:
1536 /* FIXME: not implemented */
1539 case RINGBUF_TYPE_DATA:
1540 cpu_buffer->read_stamp += event->time_delta;
1550 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
1551 struct ring_buffer_event *event)
1555 switch (event->type) {
1556 case RINGBUF_TYPE_PADDING:
1559 case RINGBUF_TYPE_TIME_EXTEND:
1560 delta = event->array[0];
1562 delta += event->time_delta;
1563 iter->read_stamp += delta;
1566 case RINGBUF_TYPE_TIME_STAMP:
1567 /* FIXME: not implemented */
1570 case RINGBUF_TYPE_DATA:
1571 iter->read_stamp += event->time_delta;
1580 static struct buffer_page *
1581 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1583 struct buffer_page *reader = NULL;
1584 unsigned long flags;
1587 spin_lock_irqsave(&cpu_buffer->lock, flags);
1591 * This should normally only loop twice. But because the
1592 * start of the reader inserts an empty page, it causes
1593 * a case where we will loop three times. There should be no
1594 * reason to loop four times (that I know of).
1596 if (unlikely(++nr_loops > 3)) {
1597 RB_WARN_ON(cpu_buffer, 1);
1602 reader = cpu_buffer->reader_page;
1604 /* If there's more to read, return this page */
1605 if (cpu_buffer->reader_page->read < rb_page_size(reader))
1608 /* Never should we have an index greater than the size */
1609 RB_WARN_ON(cpu_buffer,
1610 cpu_buffer->reader_page->read > rb_page_size(reader));
1612 /* check if we caught up to the tail */
1614 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
1618 * Splice the empty reader page into the list around the head.
1619 * Reset the reader page to size zero.
1622 reader = cpu_buffer->head_page;
1623 cpu_buffer->reader_page->list.next = reader->list.next;
1624 cpu_buffer->reader_page->list.prev = reader->list.prev;
1626 local_set(&cpu_buffer->reader_page->write, 0);
1627 local_set(&cpu_buffer->reader_page->commit, 0);
1629 /* Make the reader page now replace the head */
1630 reader->list.prev->next = &cpu_buffer->reader_page->list;
1631 reader->list.next->prev = &cpu_buffer->reader_page->list;
1634 * If the tail is on the reader, then we must set the head
1635 * to the inserted page, otherwise we set it one before.
1637 cpu_buffer->head_page = cpu_buffer->reader_page;
1639 if (cpu_buffer->commit_page != reader)
1640 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
1642 /* Finally update the reader page to the new head */
1643 cpu_buffer->reader_page = reader;
1644 rb_reset_reader_page(cpu_buffer);
1649 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
1654 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1656 struct ring_buffer_event *event;
1657 struct buffer_page *reader;
1660 reader = rb_get_reader_page(cpu_buffer);
1662 /* This function should not be called when buffer is empty */
1665 event = rb_reader_event(cpu_buffer);
1667 if (event->type == RINGBUF_TYPE_DATA)
1668 cpu_buffer->entries--;
1670 rb_update_read_stamp(cpu_buffer, event);
1672 length = rb_event_length(event);
1673 cpu_buffer->reader_page->read += length;
1676 static void rb_advance_iter(struct ring_buffer_iter *iter)
1678 struct ring_buffer *buffer;
1679 struct ring_buffer_per_cpu *cpu_buffer;
1680 struct ring_buffer_event *event;
1683 cpu_buffer = iter->cpu_buffer;
1684 buffer = cpu_buffer->buffer;
1687 * Check if we are at the end of the buffer.
1689 if (iter->head >= rb_page_size(iter->head_page)) {
1690 BUG_ON(iter->head_page == cpu_buffer->commit_page);
1695 event = rb_iter_head_event(iter);
1697 length = rb_event_length(event);
1700 * This should not be called to advance the header if we are
1701 * at the tail of the buffer.
1703 BUG_ON((iter->head_page == cpu_buffer->commit_page) &&
1704 (iter->head + length > rb_commit_index(cpu_buffer)));
1706 rb_update_iter_read_stamp(iter, event);
1708 iter->head += length;
1710 /* check for end of page padding */
1711 if ((iter->head >= rb_page_size(iter->head_page)) &&
1712 (iter->head_page != cpu_buffer->commit_page))
1713 rb_advance_iter(iter);
1717 * ring_buffer_peek - peek at the next event to be read
1718 * @buffer: The ring buffer to read
1719 * @cpu: The cpu to peak at
1720 * @ts: The timestamp counter of this event.
1722 * This will return the event that will be read next, but does
1723 * not consume the data.
1725 struct ring_buffer_event *
1726 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1728 struct ring_buffer_per_cpu *cpu_buffer;
1729 struct ring_buffer_event *event;
1730 struct buffer_page *reader;
1733 if (!cpu_isset(cpu, buffer->cpumask))
1736 cpu_buffer = buffer->buffers[cpu];
1740 * We repeat when a timestamp is encountered. It is possible
1741 * to get multiple timestamps from an interrupt entering just
1742 * as one timestamp is about to be written. The max times
1743 * that this can happen is the number of nested interrupts we
1744 * can have. Nesting 10 deep of interrupts is clearly
1747 if (unlikely(++nr_loops > 10)) {
1748 RB_WARN_ON(cpu_buffer, 1);
1752 reader = rb_get_reader_page(cpu_buffer);
1756 event = rb_reader_event(cpu_buffer);
1758 switch (event->type) {
1759 case RINGBUF_TYPE_PADDING:
1760 RB_WARN_ON(cpu_buffer, 1);
1761 rb_advance_reader(cpu_buffer);
1764 case RINGBUF_TYPE_TIME_EXTEND:
1765 /* Internal data, OK to advance */
1766 rb_advance_reader(cpu_buffer);
1769 case RINGBUF_TYPE_TIME_STAMP:
1770 /* FIXME: not implemented */
1771 rb_advance_reader(cpu_buffer);
1774 case RINGBUF_TYPE_DATA:
1776 *ts = cpu_buffer->read_stamp + event->time_delta;
1777 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1789 * ring_buffer_iter_peek - peek at the next event to be read
1790 * @iter: The ring buffer iterator
1791 * @ts: The timestamp counter of this event.
1793 * This will return the event that will be read next, but does
1794 * not increment the iterator.
1796 struct ring_buffer_event *
1797 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1799 struct ring_buffer *buffer;
1800 struct ring_buffer_per_cpu *cpu_buffer;
1801 struct ring_buffer_event *event;
1804 if (ring_buffer_iter_empty(iter))
1807 cpu_buffer = iter->cpu_buffer;
1808 buffer = cpu_buffer->buffer;
1812 * We repeat when a timestamp is encountered. It is possible
1813 * to get multiple timestamps from an interrupt entering just
1814 * as one timestamp is about to be written. The max times
1815 * that this can happen is the number of nested interrupts we
1816 * can have. Nesting 10 deep of interrupts is clearly
1819 if (unlikely(++nr_loops > 10)) {
1820 RB_WARN_ON(cpu_buffer, 1);
1824 if (rb_per_cpu_empty(cpu_buffer))
1827 event = rb_iter_head_event(iter);
1829 switch (event->type) {
1830 case RINGBUF_TYPE_PADDING:
1834 case RINGBUF_TYPE_TIME_EXTEND:
1835 /* Internal data, OK to advance */
1836 rb_advance_iter(iter);
1839 case RINGBUF_TYPE_TIME_STAMP:
1840 /* FIXME: not implemented */
1841 rb_advance_iter(iter);
1844 case RINGBUF_TYPE_DATA:
1846 *ts = iter->read_stamp + event->time_delta;
1847 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1859 * ring_buffer_consume - return an event and consume it
1860 * @buffer: The ring buffer to get the next event from
1862 * Returns the next event in the ring buffer, and that event is consumed.
1863 * Meaning, that sequential reads will keep returning a different event,
1864 * and eventually empty the ring buffer if the producer is slower.
1866 struct ring_buffer_event *
1867 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
1869 struct ring_buffer_per_cpu *cpu_buffer;
1870 struct ring_buffer_event *event;
1872 if (!cpu_isset(cpu, buffer->cpumask))
1875 event = ring_buffer_peek(buffer, cpu, ts);
1879 cpu_buffer = buffer->buffers[cpu];
1880 rb_advance_reader(cpu_buffer);
1886 * ring_buffer_read_start - start a non consuming read of the buffer
1887 * @buffer: The ring buffer to read from
1888 * @cpu: The cpu buffer to iterate over
1890 * This starts up an iteration through the buffer. It also disables
1891 * the recording to the buffer until the reading is finished.
1892 * This prevents the reading from being corrupted. This is not
1893 * a consuming read, so a producer is not expected.
1895 * Must be paired with ring_buffer_finish.
1897 struct ring_buffer_iter *
1898 ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
1900 struct ring_buffer_per_cpu *cpu_buffer;
1901 struct ring_buffer_iter *iter;
1902 unsigned long flags;
1904 if (!cpu_isset(cpu, buffer->cpumask))
1907 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
1911 cpu_buffer = buffer->buffers[cpu];
1913 iter->cpu_buffer = cpu_buffer;
1915 atomic_inc(&cpu_buffer->record_disabled);
1916 synchronize_sched();
1918 spin_lock_irqsave(&cpu_buffer->lock, flags);
1919 ring_buffer_iter_reset(iter);
1920 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
1926 * ring_buffer_finish - finish reading the iterator of the buffer
1927 * @iter: The iterator retrieved by ring_buffer_start
1929 * This re-enables the recording to the buffer, and frees the
1933 ring_buffer_read_finish(struct ring_buffer_iter *iter)
1935 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1937 atomic_dec(&cpu_buffer->record_disabled);
1942 * ring_buffer_read - read the next item in the ring buffer by the iterator
1943 * @iter: The ring buffer iterator
1944 * @ts: The time stamp of the event read.
1946 * This reads the next event in the ring buffer and increments the iterator.
1948 struct ring_buffer_event *
1949 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
1951 struct ring_buffer_event *event;
1953 event = ring_buffer_iter_peek(iter, ts);
1957 rb_advance_iter(iter);
1963 * ring_buffer_size - return the size of the ring buffer (in bytes)
1964 * @buffer: The ring buffer.
1966 unsigned long ring_buffer_size(struct ring_buffer *buffer)
1968 return BUF_PAGE_SIZE * buffer->pages;
1972 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
1974 cpu_buffer->head_page
1975 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
1976 local_set(&cpu_buffer->head_page->write, 0);
1977 local_set(&cpu_buffer->head_page->commit, 0);
1979 cpu_buffer->head_page->read = 0;
1981 cpu_buffer->tail_page = cpu_buffer->head_page;
1982 cpu_buffer->commit_page = cpu_buffer->head_page;
1984 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1985 local_set(&cpu_buffer->reader_page->write, 0);
1986 local_set(&cpu_buffer->reader_page->commit, 0);
1987 cpu_buffer->reader_page->read = 0;
1989 cpu_buffer->overrun = 0;
1990 cpu_buffer->entries = 0;
1994 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
1995 * @buffer: The ring buffer to reset a per cpu buffer of
1996 * @cpu: The CPU buffer to be reset
1998 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2000 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2001 unsigned long flags;
2003 if (!cpu_isset(cpu, buffer->cpumask))
2006 spin_lock_irqsave(&cpu_buffer->lock, flags);
2008 rb_reset_cpu(cpu_buffer);
2010 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
2014 * ring_buffer_reset - reset a ring buffer
2015 * @buffer: The ring buffer to reset all cpu buffers
2017 void ring_buffer_reset(struct ring_buffer *buffer)
2021 for_each_buffer_cpu(buffer, cpu)
2022 ring_buffer_reset_cpu(buffer, cpu);
2026 * rind_buffer_empty - is the ring buffer empty?
2027 * @buffer: The ring buffer to test
2029 int ring_buffer_empty(struct ring_buffer *buffer)
2031 struct ring_buffer_per_cpu *cpu_buffer;
2034 /* yes this is racy, but if you don't like the race, lock the buffer */
2035 for_each_buffer_cpu(buffer, cpu) {
2036 cpu_buffer = buffer->buffers[cpu];
2037 if (!rb_per_cpu_empty(cpu_buffer))
2044 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2045 * @buffer: The ring buffer
2046 * @cpu: The CPU buffer to test
2048 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2050 struct ring_buffer_per_cpu *cpu_buffer;
2052 if (!cpu_isset(cpu, buffer->cpumask))
2055 cpu_buffer = buffer->buffers[cpu];
2056 return rb_per_cpu_empty(cpu_buffer);
2060 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2061 * @buffer_a: One buffer to swap with
2062 * @buffer_b: The other buffer to swap with
2064 * This function is useful for tracers that want to take a "snapshot"
2065 * of a CPU buffer and has another back up buffer lying around.
2066 * it is expected that the tracer handles the cpu buffer not being
2067 * used at the moment.
2069 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2070 struct ring_buffer *buffer_b, int cpu)
2072 struct ring_buffer_per_cpu *cpu_buffer_a;
2073 struct ring_buffer_per_cpu *cpu_buffer_b;
2075 if (!cpu_isset(cpu, buffer_a->cpumask) ||
2076 !cpu_isset(cpu, buffer_b->cpumask))
2079 /* At least make sure the two buffers are somewhat the same */
2080 if (buffer_a->size != buffer_b->size ||
2081 buffer_a->pages != buffer_b->pages)
2084 cpu_buffer_a = buffer_a->buffers[cpu];
2085 cpu_buffer_b = buffer_b->buffers[cpu];
2088 * We can't do a synchronize_sched here because this
2089 * function can be called in atomic context.
2090 * Normally this will be called from the same CPU as cpu.
2091 * If not it's up to the caller to protect this.
2093 atomic_inc(&cpu_buffer_a->record_disabled);
2094 atomic_inc(&cpu_buffer_b->record_disabled);
2096 buffer_a->buffers[cpu] = cpu_buffer_b;
2097 buffer_b->buffers[cpu] = cpu_buffer_a;
2099 cpu_buffer_b->buffer = buffer_a;
2100 cpu_buffer_a->buffer = buffer_b;
2102 atomic_dec(&cpu_buffer_a->record_disabled);
2103 atomic_dec(&cpu_buffer_b->record_disabled);
2109 rb_simple_read(struct file *filp, char __user *ubuf,
2110 size_t cnt, loff_t *ppos)
2112 int *p = filp->private_data;
2116 /* !ring_buffers_off == tracing_on */
2117 r = sprintf(buf, "%d\n", !*p);
2119 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2123 rb_simple_write(struct file *filp, const char __user *ubuf,
2124 size_t cnt, loff_t *ppos)
2126 int *p = filp->private_data;
2131 if (cnt >= sizeof(buf))
2134 if (copy_from_user(&buf, ubuf, cnt))
2139 ret = strict_strtoul(buf, 10, &val);
2143 /* !ring_buffers_off == tracing_on */
2151 static struct file_operations rb_simple_fops = {
2152 .open = tracing_open_generic,
2153 .read = rb_simple_read,
2154 .write = rb_simple_write,
2158 static __init int rb_init_debugfs(void)
2160 struct dentry *d_tracer;
2161 struct dentry *entry;
2163 d_tracer = tracing_init_dentry();
2165 entry = debugfs_create_file("tracing_on", 0644, d_tracer,
2166 &ring_buffers_off, &rb_simple_fops);
2168 pr_warning("Could not create debugfs 'tracing_on' entry\n");
2173 fs_initcall(rb_init_debugfs);