Merge tag 'rtc-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux
[sfrench/cifs-2.6.git] / kernel / trace / ring_buffer.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Generic ring buffer
4  *
5  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
6  */
7 #include <linux/trace_recursion.h>
8 #include <linux/trace_events.h>
9 #include <linux/ring_buffer.h>
10 #include <linux/trace_clock.h>
11 #include <linux/sched/clock.h>
12 #include <linux/trace_seq.h>
13 #include <linux/spinlock.h>
14 #include <linux/irq_work.h>
15 #include <linux/security.h>
16 #include <linux/uaccess.h>
17 #include <linux/hardirq.h>
18 #include <linux/kthread.h>      /* for self test */
19 #include <linux/module.h>
20 #include <linux/percpu.h>
21 #include <linux/mutex.h>
22 #include <linux/delay.h>
23 #include <linux/slab.h>
24 #include <linux/init.h>
25 #include <linux/hash.h>
26 #include <linux/list.h>
27 #include <linux/cpu.h>
28 #include <linux/oom.h>
29
30 #include <asm/local.h>
31
32 /*
33  * The "absolute" timestamp in the buffer is only 59 bits.
34  * If a clock has the 5 MSBs set, it needs to be saved and
35  * reinserted.
36  */
37 #define TS_MSB          (0xf8ULL << 56)
38 #define ABS_TS_MASK     (~TS_MSB)
39
40 static void update_pages_handler(struct work_struct *work);
41
42 /*
43  * The ring buffer header is special. We must manually up keep it.
44  */
45 int ring_buffer_print_entry_header(struct trace_seq *s)
46 {
47         trace_seq_puts(s, "# compressed entry header\n");
48         trace_seq_puts(s, "\ttype_len    :    5 bits\n");
49         trace_seq_puts(s, "\ttime_delta  :   27 bits\n");
50         trace_seq_puts(s, "\tarray       :   32 bits\n");
51         trace_seq_putc(s, '\n');
52         trace_seq_printf(s, "\tpadding     : type == %d\n",
53                          RINGBUF_TYPE_PADDING);
54         trace_seq_printf(s, "\ttime_extend : type == %d\n",
55                          RINGBUF_TYPE_TIME_EXTEND);
56         trace_seq_printf(s, "\ttime_stamp : type == %d\n",
57                          RINGBUF_TYPE_TIME_STAMP);
58         trace_seq_printf(s, "\tdata max type_len  == %d\n",
59                          RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
60
61         return !trace_seq_has_overflowed(s);
62 }
63
64 /*
65  * The ring buffer is made up of a list of pages. A separate list of pages is
66  * allocated for each CPU. A writer may only write to a buffer that is
67  * associated with the CPU it is currently executing on.  A reader may read
68  * from any per cpu buffer.
69  *
70  * The reader is special. For each per cpu buffer, the reader has its own
71  * reader page. When a reader has read the entire reader page, this reader
72  * page is swapped with another page in the ring buffer.
73  *
74  * Now, as long as the writer is off the reader page, the reader can do what
75  * ever it wants with that page. The writer will never write to that page
76  * again (as long as it is out of the ring buffer).
77  *
78  * Here's some silly ASCII art.
79  *
80  *   +------+
81  *   |reader|          RING BUFFER
82  *   |page  |
83  *   +------+        +---+   +---+   +---+
84  *                   |   |-->|   |-->|   |
85  *                   +---+   +---+   +---+
86  *                     ^               |
87  *                     |               |
88  *                     +---------------+
89  *
90  *
91  *   +------+
92  *   |reader|          RING BUFFER
93  *   |page  |------------------v
94  *   +------+        +---+   +---+   +---+
95  *                   |   |-->|   |-->|   |
96  *                   +---+   +---+   +---+
97  *                     ^               |
98  *                     |               |
99  *                     +---------------+
100  *
101  *
102  *   +------+
103  *   |reader|          RING BUFFER
104  *   |page  |------------------v
105  *   +------+        +---+   +---+   +---+
106  *      ^            |   |-->|   |-->|   |
107  *      |            +---+   +---+   +---+
108  *      |                              |
109  *      |                              |
110  *      +------------------------------+
111  *
112  *
113  *   +------+
114  *   |buffer|          RING BUFFER
115  *   |page  |------------------v
116  *   +------+        +---+   +---+   +---+
117  *      ^            |   |   |   |-->|   |
118  *      |   New      +---+   +---+   +---+
119  *      |  Reader------^               |
120  *      |   page                       |
121  *      +------------------------------+
122  *
123  *
124  * After we make this swap, the reader can hand this page off to the splice
125  * code and be done with it. It can even allocate a new page if it needs to
126  * and swap that into the ring buffer.
127  *
128  * We will be using cmpxchg soon to make all this lockless.
129  *
130  */
131
132 /* Used for individual buffers (after the counter) */
133 #define RB_BUFFER_OFF           (1 << 20)
134
135 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
136
137 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
138 #define RB_ALIGNMENT            4U
139 #define RB_MAX_SMALL_DATA       (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
140 #define RB_EVNT_MIN_SIZE        8U      /* two 32bit words */
141
142 #ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
143 # define RB_FORCE_8BYTE_ALIGNMENT       0
144 # define RB_ARCH_ALIGNMENT              RB_ALIGNMENT
145 #else
146 # define RB_FORCE_8BYTE_ALIGNMENT       1
147 # define RB_ARCH_ALIGNMENT              8U
148 #endif
149
150 #define RB_ALIGN_DATA           __aligned(RB_ARCH_ALIGNMENT)
151
152 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
153 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
154
155 enum {
156         RB_LEN_TIME_EXTEND = 8,
157         RB_LEN_TIME_STAMP =  8,
158 };
159
160 #define skip_time_extend(event) \
161         ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
162
163 #define extended_time(event) \
164         (event->type_len >= RINGBUF_TYPE_TIME_EXTEND)
165
166 static inline int rb_null_event(struct ring_buffer_event *event)
167 {
168         return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
169 }
170
171 static void rb_event_set_padding(struct ring_buffer_event *event)
172 {
173         /* padding has a NULL time_delta */
174         event->type_len = RINGBUF_TYPE_PADDING;
175         event->time_delta = 0;
176 }
177
178 static unsigned
179 rb_event_data_length(struct ring_buffer_event *event)
180 {
181         unsigned length;
182
183         if (event->type_len)
184                 length = event->type_len * RB_ALIGNMENT;
185         else
186                 length = event->array[0];
187         return length + RB_EVNT_HDR_SIZE;
188 }
189
190 /*
191  * Return the length of the given event. Will return
192  * the length of the time extend if the event is a
193  * time extend.
194  */
195 static inline unsigned
196 rb_event_length(struct ring_buffer_event *event)
197 {
198         switch (event->type_len) {
199         case RINGBUF_TYPE_PADDING:
200                 if (rb_null_event(event))
201                         /* undefined */
202                         return -1;
203                 return  event->array[0] + RB_EVNT_HDR_SIZE;
204
205         case RINGBUF_TYPE_TIME_EXTEND:
206                 return RB_LEN_TIME_EXTEND;
207
208         case RINGBUF_TYPE_TIME_STAMP:
209                 return RB_LEN_TIME_STAMP;
210
211         case RINGBUF_TYPE_DATA:
212                 return rb_event_data_length(event);
213         default:
214                 WARN_ON_ONCE(1);
215         }
216         /* not hit */
217         return 0;
218 }
219
220 /*
221  * Return total length of time extend and data,
222  *   or just the event length for all other events.
223  */
224 static inline unsigned
225 rb_event_ts_length(struct ring_buffer_event *event)
226 {
227         unsigned len = 0;
228
229         if (extended_time(event)) {
230                 /* time extends include the data event after it */
231                 len = RB_LEN_TIME_EXTEND;
232                 event = skip_time_extend(event);
233         }
234         return len + rb_event_length(event);
235 }
236
237 /**
238  * ring_buffer_event_length - return the length of the event
239  * @event: the event to get the length of
240  *
241  * Returns the size of the data load of a data event.
242  * If the event is something other than a data event, it
243  * returns the size of the event itself. With the exception
244  * of a TIME EXTEND, where it still returns the size of the
245  * data load of the data event after it.
246  */
247 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
248 {
249         unsigned length;
250
251         if (extended_time(event))
252                 event = skip_time_extend(event);
253
254         length = rb_event_length(event);
255         if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
256                 return length;
257         length -= RB_EVNT_HDR_SIZE;
258         if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
259                 length -= sizeof(event->array[0]);
260         return length;
261 }
262 EXPORT_SYMBOL_GPL(ring_buffer_event_length);
263
264 /* inline for ring buffer fast paths */
265 static __always_inline void *
266 rb_event_data(struct ring_buffer_event *event)
267 {
268         if (extended_time(event))
269                 event = skip_time_extend(event);
270         WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
271         /* If length is in len field, then array[0] has the data */
272         if (event->type_len)
273                 return (void *)&event->array[0];
274         /* Otherwise length is in array[0] and array[1] has the data */
275         return (void *)&event->array[1];
276 }
277
278 /**
279  * ring_buffer_event_data - return the data of the event
280  * @event: the event to get the data from
281  */
282 void *ring_buffer_event_data(struct ring_buffer_event *event)
283 {
284         return rb_event_data(event);
285 }
286 EXPORT_SYMBOL_GPL(ring_buffer_event_data);
287
288 #define for_each_buffer_cpu(buffer, cpu)                \
289         for_each_cpu(cpu, buffer->cpumask)
290
291 #define for_each_online_buffer_cpu(buffer, cpu)         \
292         for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
293
294 #define TS_SHIFT        27
295 #define TS_MASK         ((1ULL << TS_SHIFT) - 1)
296 #define TS_DELTA_TEST   (~TS_MASK)
297
298 static u64 rb_event_time_stamp(struct ring_buffer_event *event)
299 {
300         u64 ts;
301
302         ts = event->array[0];
303         ts <<= TS_SHIFT;
304         ts += event->time_delta;
305
306         return ts;
307 }
308
309 /* Flag when events were overwritten */
310 #define RB_MISSED_EVENTS        (1 << 31)
311 /* Missed count stored at end */
312 #define RB_MISSED_STORED        (1 << 30)
313
314 struct buffer_data_page {
315         u64              time_stamp;    /* page time stamp */
316         local_t          commit;        /* write committed index */
317         unsigned char    data[] RB_ALIGN_DATA;  /* data of buffer page */
318 };
319
320 /*
321  * Note, the buffer_page list must be first. The buffer pages
322  * are allocated in cache lines, which means that each buffer
323  * page will be at the beginning of a cache line, and thus
324  * the least significant bits will be zero. We use this to
325  * add flags in the list struct pointers, to make the ring buffer
326  * lockless.
327  */
328 struct buffer_page {
329         struct list_head list;          /* list of buffer pages */
330         local_t          write;         /* index for next write */
331         unsigned         read;          /* index for next read */
332         local_t          entries;       /* entries on this page */
333         unsigned long    real_end;      /* real end of data */
334         struct buffer_data_page *page;  /* Actual data page */
335 };
336
337 /*
338  * The buffer page counters, write and entries, must be reset
339  * atomically when crossing page boundaries. To synchronize this
340  * update, two counters are inserted into the number. One is
341  * the actual counter for the write position or count on the page.
342  *
343  * The other is a counter of updaters. Before an update happens
344  * the update partition of the counter is incremented. This will
345  * allow the updater to update the counter atomically.
346  *
347  * The counter is 20 bits, and the state data is 12.
348  */
349 #define RB_WRITE_MASK           0xfffff
350 #define RB_WRITE_INTCNT         (1 << 20)
351
352 static void rb_init_page(struct buffer_data_page *bpage)
353 {
354         local_set(&bpage->commit, 0);
355 }
356
357 /*
358  * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
359  * this issue out.
360  */
361 static void free_buffer_page(struct buffer_page *bpage)
362 {
363         free_page((unsigned long)bpage->page);
364         kfree(bpage);
365 }
366
367 /*
368  * We need to fit the time_stamp delta into 27 bits.
369  */
370 static inline int test_time_stamp(u64 delta)
371 {
372         if (delta & TS_DELTA_TEST)
373                 return 1;
374         return 0;
375 }
376
377 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
378
379 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
380 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
381
382 int ring_buffer_print_page_header(struct trace_seq *s)
383 {
384         struct buffer_data_page field;
385
386         trace_seq_printf(s, "\tfield: u64 timestamp;\t"
387                          "offset:0;\tsize:%u;\tsigned:%u;\n",
388                          (unsigned int)sizeof(field.time_stamp),
389                          (unsigned int)is_signed_type(u64));
390
391         trace_seq_printf(s, "\tfield: local_t commit;\t"
392                          "offset:%u;\tsize:%u;\tsigned:%u;\n",
393                          (unsigned int)offsetof(typeof(field), commit),
394                          (unsigned int)sizeof(field.commit),
395                          (unsigned int)is_signed_type(long));
396
397         trace_seq_printf(s, "\tfield: int overwrite;\t"
398                          "offset:%u;\tsize:%u;\tsigned:%u;\n",
399                          (unsigned int)offsetof(typeof(field), commit),
400                          1,
401                          (unsigned int)is_signed_type(long));
402
403         trace_seq_printf(s, "\tfield: char data;\t"
404                          "offset:%u;\tsize:%u;\tsigned:%u;\n",
405                          (unsigned int)offsetof(typeof(field), data),
406                          (unsigned int)BUF_PAGE_SIZE,
407                          (unsigned int)is_signed_type(char));
408
409         return !trace_seq_has_overflowed(s);
410 }
411
412 struct rb_irq_work {
413         struct irq_work                 work;
414         wait_queue_head_t               waiters;
415         wait_queue_head_t               full_waiters;
416         long                            wait_index;
417         bool                            waiters_pending;
418         bool                            full_waiters_pending;
419         bool                            wakeup_full;
420 };
421
422 /*
423  * Structure to hold event state and handle nested events.
424  */
425 struct rb_event_info {
426         u64                     ts;
427         u64                     delta;
428         u64                     before;
429         u64                     after;
430         unsigned long           length;
431         struct buffer_page      *tail_page;
432         int                     add_timestamp;
433 };
434
435 /*
436  * Used for the add_timestamp
437  *  NONE
438  *  EXTEND - wants a time extend
439  *  ABSOLUTE - the buffer requests all events to have absolute time stamps
440  *  FORCE - force a full time stamp.
441  */
442 enum {
443         RB_ADD_STAMP_NONE               = 0,
444         RB_ADD_STAMP_EXTEND             = BIT(1),
445         RB_ADD_STAMP_ABSOLUTE           = BIT(2),
446         RB_ADD_STAMP_FORCE              = BIT(3)
447 };
448 /*
449  * Used for which event context the event is in.
450  *  TRANSITION = 0
451  *  NMI     = 1
452  *  IRQ     = 2
453  *  SOFTIRQ = 3
454  *  NORMAL  = 4
455  *
456  * See trace_recursive_lock() comment below for more details.
457  */
458 enum {
459         RB_CTX_TRANSITION,
460         RB_CTX_NMI,
461         RB_CTX_IRQ,
462         RB_CTX_SOFTIRQ,
463         RB_CTX_NORMAL,
464         RB_CTX_MAX
465 };
466
467 #if BITS_PER_LONG == 32
468 #define RB_TIME_32
469 #endif
470
471 /* To test on 64 bit machines */
472 //#define RB_TIME_32
473
474 #ifdef RB_TIME_32
475
476 struct rb_time_struct {
477         local_t         cnt;
478         local_t         top;
479         local_t         bottom;
480         local_t         msb;
481 };
482 #else
483 #include <asm/local64.h>
484 struct rb_time_struct {
485         local64_t       time;
486 };
487 #endif
488 typedef struct rb_time_struct rb_time_t;
489
490 #define MAX_NEST        5
491
492 /*
493  * head_page == tail_page && head == tail then buffer is empty.
494  */
495 struct ring_buffer_per_cpu {
496         int                             cpu;
497         atomic_t                        record_disabled;
498         atomic_t                        resize_disabled;
499         struct trace_buffer     *buffer;
500         raw_spinlock_t                  reader_lock;    /* serialize readers */
501         arch_spinlock_t                 lock;
502         struct lock_class_key           lock_key;
503         struct buffer_data_page         *free_page;
504         unsigned long                   nr_pages;
505         unsigned int                    current_context;
506         struct list_head                *pages;
507         struct buffer_page              *head_page;     /* read from head */
508         struct buffer_page              *tail_page;     /* write to tail */
509         struct buffer_page              *commit_page;   /* committed pages */
510         struct buffer_page              *reader_page;
511         unsigned long                   lost_events;
512         unsigned long                   last_overrun;
513         unsigned long                   nest;
514         local_t                         entries_bytes;
515         local_t                         entries;
516         local_t                         overrun;
517         local_t                         commit_overrun;
518         local_t                         dropped_events;
519         local_t                         committing;
520         local_t                         commits;
521         local_t                         pages_touched;
522         local_t                         pages_lost;
523         local_t                         pages_read;
524         long                            last_pages_touch;
525         size_t                          shortest_full;
526         unsigned long                   read;
527         unsigned long                   read_bytes;
528         rb_time_t                       write_stamp;
529         rb_time_t                       before_stamp;
530         u64                             event_stamp[MAX_NEST];
531         u64                             read_stamp;
532         /* ring buffer pages to update, > 0 to add, < 0 to remove */
533         long                            nr_pages_to_update;
534         struct list_head                new_pages; /* new pages to add */
535         struct work_struct              update_pages_work;
536         struct completion               update_done;
537
538         struct rb_irq_work              irq_work;
539 };
540
541 struct trace_buffer {
542         unsigned                        flags;
543         int                             cpus;
544         atomic_t                        record_disabled;
545         cpumask_var_t                   cpumask;
546
547         struct lock_class_key           *reader_lock_key;
548
549         struct mutex                    mutex;
550
551         struct ring_buffer_per_cpu      **buffers;
552
553         struct hlist_node               node;
554         u64                             (*clock)(void);
555
556         struct rb_irq_work              irq_work;
557         bool                            time_stamp_abs;
558 };
559
560 struct ring_buffer_iter {
561         struct ring_buffer_per_cpu      *cpu_buffer;
562         unsigned long                   head;
563         unsigned long                   next_event;
564         struct buffer_page              *head_page;
565         struct buffer_page              *cache_reader_page;
566         unsigned long                   cache_read;
567         u64                             read_stamp;
568         u64                             page_stamp;
569         struct ring_buffer_event        *event;
570         int                             missed_events;
571 };
572
573 #ifdef RB_TIME_32
574
575 /*
576  * On 32 bit machines, local64_t is very expensive. As the ring
577  * buffer doesn't need all the features of a true 64 bit atomic,
578  * on 32 bit, it uses these functions (64 still uses local64_t).
579  *
580  * For the ring buffer, 64 bit required operations for the time is
581  * the following:
582  *
583  *  - Reads may fail if it interrupted a modification of the time stamp.
584  *      It will succeed if it did not interrupt another write even if
585  *      the read itself is interrupted by a write.
586  *      It returns whether it was successful or not.
587  *
588  *  - Writes always succeed and will overwrite other writes and writes
589  *      that were done by events interrupting the current write.
590  *
591  *  - A write followed by a read of the same time stamp will always succeed,
592  *      but may not contain the same value.
593  *
594  *  - A cmpxchg will fail if it interrupted another write or cmpxchg.
595  *      Other than that, it acts like a normal cmpxchg.
596  *
597  * The 60 bit time stamp is broken up by 30 bits in a top and bottom half
598  *  (bottom being the least significant 30 bits of the 60 bit time stamp).
599  *
600  * The two most significant bits of each half holds a 2 bit counter (0-3).
601  * Each update will increment this counter by one.
602  * When reading the top and bottom, if the two counter bits match then the
603  *  top and bottom together make a valid 60 bit number.
604  */
605 #define RB_TIME_SHIFT   30
606 #define RB_TIME_VAL_MASK ((1 << RB_TIME_SHIFT) - 1)
607 #define RB_TIME_MSB_SHIFT        60
608
609 static inline int rb_time_cnt(unsigned long val)
610 {
611         return (val >> RB_TIME_SHIFT) & 3;
612 }
613
614 static inline u64 rb_time_val(unsigned long top, unsigned long bottom)
615 {
616         u64 val;
617
618         val = top & RB_TIME_VAL_MASK;
619         val <<= RB_TIME_SHIFT;
620         val |= bottom & RB_TIME_VAL_MASK;
621
622         return val;
623 }
624
625 static inline bool __rb_time_read(rb_time_t *t, u64 *ret, unsigned long *cnt)
626 {
627         unsigned long top, bottom, msb;
628         unsigned long c;
629
630         /*
631          * If the read is interrupted by a write, then the cnt will
632          * be different. Loop until both top and bottom have been read
633          * without interruption.
634          */
635         do {
636                 c = local_read(&t->cnt);
637                 top = local_read(&t->top);
638                 bottom = local_read(&t->bottom);
639                 msb = local_read(&t->msb);
640         } while (c != local_read(&t->cnt));
641
642         *cnt = rb_time_cnt(top);
643
644         /* If top and bottom counts don't match, this interrupted a write */
645         if (*cnt != rb_time_cnt(bottom))
646                 return false;
647
648         /* The shift to msb will lose its cnt bits */
649         *ret = rb_time_val(top, bottom) | ((u64)msb << RB_TIME_MSB_SHIFT);
650         return true;
651 }
652
653 static bool rb_time_read(rb_time_t *t, u64 *ret)
654 {
655         unsigned long cnt;
656
657         return __rb_time_read(t, ret, &cnt);
658 }
659
660 static inline unsigned long rb_time_val_cnt(unsigned long val, unsigned long cnt)
661 {
662         return (val & RB_TIME_VAL_MASK) | ((cnt & 3) << RB_TIME_SHIFT);
663 }
664
665 static inline void rb_time_split(u64 val, unsigned long *top, unsigned long *bottom,
666                                  unsigned long *msb)
667 {
668         *top = (unsigned long)((val >> RB_TIME_SHIFT) & RB_TIME_VAL_MASK);
669         *bottom = (unsigned long)(val & RB_TIME_VAL_MASK);
670         *msb = (unsigned long)(val >> RB_TIME_MSB_SHIFT);
671 }
672
673 static inline void rb_time_val_set(local_t *t, unsigned long val, unsigned long cnt)
674 {
675         val = rb_time_val_cnt(val, cnt);
676         local_set(t, val);
677 }
678
679 static void rb_time_set(rb_time_t *t, u64 val)
680 {
681         unsigned long cnt, top, bottom, msb;
682
683         rb_time_split(val, &top, &bottom, &msb);
684
685         /* Writes always succeed with a valid number even if it gets interrupted. */
686         do {
687                 cnt = local_inc_return(&t->cnt);
688                 rb_time_val_set(&t->top, top, cnt);
689                 rb_time_val_set(&t->bottom, bottom, cnt);
690                 rb_time_val_set(&t->msb, val >> RB_TIME_MSB_SHIFT, cnt);
691         } while (cnt != local_read(&t->cnt));
692 }
693
694 static inline bool
695 rb_time_read_cmpxchg(local_t *l, unsigned long expect, unsigned long set)
696 {
697         unsigned long ret;
698
699         ret = local_cmpxchg(l, expect, set);
700         return ret == expect;
701 }
702
703 static int rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
704 {
705         unsigned long cnt, top, bottom, msb;
706         unsigned long cnt2, top2, bottom2, msb2;
707         u64 val;
708
709         /* The cmpxchg always fails if it interrupted an update */
710          if (!__rb_time_read(t, &val, &cnt2))
711                  return false;
712
713          if (val != expect)
714                  return false;
715
716          cnt = local_read(&t->cnt);
717          if ((cnt & 3) != cnt2)
718                  return false;
719
720          cnt2 = cnt + 1;
721
722          rb_time_split(val, &top, &bottom, &msb);
723          top = rb_time_val_cnt(top, cnt);
724          bottom = rb_time_val_cnt(bottom, cnt);
725
726          rb_time_split(set, &top2, &bottom2, &msb2);
727          top2 = rb_time_val_cnt(top2, cnt2);
728          bottom2 = rb_time_val_cnt(bottom2, cnt2);
729
730         if (!rb_time_read_cmpxchg(&t->cnt, cnt, cnt2))
731                 return false;
732         if (!rb_time_read_cmpxchg(&t->msb, msb, msb2))
733                 return false;
734         if (!rb_time_read_cmpxchg(&t->top, top, top2))
735                 return false;
736         if (!rb_time_read_cmpxchg(&t->bottom, bottom, bottom2))
737                 return false;
738         return true;
739 }
740
741 #else /* 64 bits */
742
743 /* local64_t always succeeds */
744
745 static inline bool rb_time_read(rb_time_t *t, u64 *ret)
746 {
747         *ret = local64_read(&t->time);
748         return true;
749 }
750 static void rb_time_set(rb_time_t *t, u64 val)
751 {
752         local64_set(&t->time, val);
753 }
754
755 static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
756 {
757         u64 val;
758         val = local64_cmpxchg(&t->time, expect, set);
759         return val == expect;
760 }
761 #endif
762
763 /*
764  * Enable this to make sure that the event passed to
765  * ring_buffer_event_time_stamp() is not committed and also
766  * is on the buffer that it passed in.
767  */
768 //#define RB_VERIFY_EVENT
769 #ifdef RB_VERIFY_EVENT
770 static struct list_head *rb_list_head(struct list_head *list);
771 static void verify_event(struct ring_buffer_per_cpu *cpu_buffer,
772                          void *event)
773 {
774         struct buffer_page *page = cpu_buffer->commit_page;
775         struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page);
776         struct list_head *next;
777         long commit, write;
778         unsigned long addr = (unsigned long)event;
779         bool done = false;
780         int stop = 0;
781
782         /* Make sure the event exists and is not committed yet */
783         do {
784                 if (page == tail_page || WARN_ON_ONCE(stop++ > 100))
785                         done = true;
786                 commit = local_read(&page->page->commit);
787                 write = local_read(&page->write);
788                 if (addr >= (unsigned long)&page->page->data[commit] &&
789                     addr < (unsigned long)&page->page->data[write])
790                         return;
791
792                 next = rb_list_head(page->list.next);
793                 page = list_entry(next, struct buffer_page, list);
794         } while (!done);
795         WARN_ON_ONCE(1);
796 }
797 #else
798 static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer,
799                          void *event)
800 {
801 }
802 #endif
803
804 /*
805  * The absolute time stamp drops the 5 MSBs and some clocks may
806  * require them. The rb_fix_abs_ts() will take a previous full
807  * time stamp, and add the 5 MSB of that time stamp on to the
808  * saved absolute time stamp. Then they are compared in case of
809  * the unlikely event that the latest time stamp incremented
810  * the 5 MSB.
811  */
812 static inline u64 rb_fix_abs_ts(u64 abs, u64 save_ts)
813 {
814         if (save_ts & TS_MSB) {
815                 abs |= save_ts & TS_MSB;
816                 /* Check for overflow */
817                 if (unlikely(abs < save_ts))
818                         abs += 1ULL << 59;
819         }
820         return abs;
821 }
822
823 static inline u64 rb_time_stamp(struct trace_buffer *buffer);
824
825 /**
826  * ring_buffer_event_time_stamp - return the event's current time stamp
827  * @buffer: The buffer that the event is on
828  * @event: the event to get the time stamp of
829  *
830  * Note, this must be called after @event is reserved, and before it is
831  * committed to the ring buffer. And must be called from the same
832  * context where the event was reserved (normal, softirq, irq, etc).
833  *
834  * Returns the time stamp associated with the current event.
835  * If the event has an extended time stamp, then that is used as
836  * the time stamp to return.
837  * In the highly unlikely case that the event was nested more than
838  * the max nesting, then the write_stamp of the buffer is returned,
839  * otherwise  current time is returned, but that really neither of
840  * the last two cases should ever happen.
841  */
842 u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer,
843                                  struct ring_buffer_event *event)
844 {
845         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()];
846         unsigned int nest;
847         u64 ts;
848
849         /* If the event includes an absolute time, then just use that */
850         if (event->type_len == RINGBUF_TYPE_TIME_STAMP) {
851                 ts = rb_event_time_stamp(event);
852                 return rb_fix_abs_ts(ts, cpu_buffer->tail_page->page->time_stamp);
853         }
854
855         nest = local_read(&cpu_buffer->committing);
856         verify_event(cpu_buffer, event);
857         if (WARN_ON_ONCE(!nest))
858                 goto fail;
859
860         /* Read the current saved nesting level time stamp */
861         if (likely(--nest < MAX_NEST))
862                 return cpu_buffer->event_stamp[nest];
863
864         /* Shouldn't happen, warn if it does */
865         WARN_ONCE(1, "nest (%d) greater than max", nest);
866
867  fail:
868         /* Can only fail on 32 bit */
869         if (!rb_time_read(&cpu_buffer->write_stamp, &ts))
870                 /* Screw it, just read the current time */
871                 ts = rb_time_stamp(cpu_buffer->buffer);
872
873         return ts;
874 }
875
876 /**
877  * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer
878  * @buffer: The ring_buffer to get the number of pages from
879  * @cpu: The cpu of the ring_buffer to get the number of pages from
880  *
881  * Returns the number of pages used by a per_cpu buffer of the ring buffer.
882  */
883 size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu)
884 {
885         return buffer->buffers[cpu]->nr_pages;
886 }
887
888 /**
889  * ring_buffer_nr_dirty_pages - get the number of used pages in the ring buffer
890  * @buffer: The ring_buffer to get the number of pages from
891  * @cpu: The cpu of the ring_buffer to get the number of pages from
892  *
893  * Returns the number of pages that have content in the ring buffer.
894  */
895 size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu)
896 {
897         size_t read;
898         size_t lost;
899         size_t cnt;
900
901         read = local_read(&buffer->buffers[cpu]->pages_read);
902         lost = local_read(&buffer->buffers[cpu]->pages_lost);
903         cnt = local_read(&buffer->buffers[cpu]->pages_touched);
904
905         if (WARN_ON_ONCE(cnt < lost))
906                 return 0;
907
908         cnt -= lost;
909
910         /* The reader can read an empty page, but not more than that */
911         if (cnt < read) {
912                 WARN_ON_ONCE(read > cnt + 1);
913                 return 0;
914         }
915
916         return cnt - read;
917 }
918
919 static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full)
920 {
921         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
922         size_t nr_pages;
923         size_t dirty;
924
925         nr_pages = cpu_buffer->nr_pages;
926         if (!nr_pages || !full)
927                 return true;
928
929         dirty = ring_buffer_nr_dirty_pages(buffer, cpu);
930
931         return (dirty * 100) > (full * nr_pages);
932 }
933
934 /*
935  * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
936  *
937  * Schedules a delayed work to wake up any task that is blocked on the
938  * ring buffer waiters queue.
939  */
940 static void rb_wake_up_waiters(struct irq_work *work)
941 {
942         struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
943
944         wake_up_all(&rbwork->waiters);
945         if (rbwork->full_waiters_pending || rbwork->wakeup_full) {
946                 rbwork->wakeup_full = false;
947                 rbwork->full_waiters_pending = false;
948                 wake_up_all(&rbwork->full_waiters);
949         }
950 }
951
952 /**
953  * ring_buffer_wake_waiters - wake up any waiters on this ring buffer
954  * @buffer: The ring buffer to wake waiters on
955  *
956  * In the case of a file that represents a ring buffer is closing,
957  * it is prudent to wake up any waiters that are on this.
958  */
959 void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu)
960 {
961         struct ring_buffer_per_cpu *cpu_buffer;
962         struct rb_irq_work *rbwork;
963
964         if (!buffer)
965                 return;
966
967         if (cpu == RING_BUFFER_ALL_CPUS) {
968
969                 /* Wake up individual ones too. One level recursion */
970                 for_each_buffer_cpu(buffer, cpu)
971                         ring_buffer_wake_waiters(buffer, cpu);
972
973                 rbwork = &buffer->irq_work;
974         } else {
975                 if (WARN_ON_ONCE(!buffer->buffers))
976                         return;
977                 if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
978                         return;
979
980                 cpu_buffer = buffer->buffers[cpu];
981                 /* The CPU buffer may not have been initialized yet */
982                 if (!cpu_buffer)
983                         return;
984                 rbwork = &cpu_buffer->irq_work;
985         }
986
987         rbwork->wait_index++;
988         /* make sure the waiters see the new index */
989         smp_wmb();
990
991         rb_wake_up_waiters(&rbwork->work);
992 }
993
994 /**
995  * ring_buffer_wait - wait for input to the ring buffer
996  * @buffer: buffer to wait on
997  * @cpu: the cpu buffer to wait on
998  * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
999  *
1000  * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
1001  * as data is added to any of the @buffer's cpu buffers. Otherwise
1002  * it will wait for data to be added to a specific cpu buffer.
1003  */
1004 int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
1005 {
1006         struct ring_buffer_per_cpu *cpu_buffer;
1007         DEFINE_WAIT(wait);
1008         struct rb_irq_work *work;
1009         long wait_index;
1010         int ret = 0;
1011
1012         /*
1013          * Depending on what the caller is waiting for, either any
1014          * data in any cpu buffer, or a specific buffer, put the
1015          * caller on the appropriate wait queue.
1016          */
1017         if (cpu == RING_BUFFER_ALL_CPUS) {
1018                 work = &buffer->irq_work;
1019                 /* Full only makes sense on per cpu reads */
1020                 full = 0;
1021         } else {
1022                 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1023                         return -ENODEV;
1024                 cpu_buffer = buffer->buffers[cpu];
1025                 work = &cpu_buffer->irq_work;
1026         }
1027
1028         wait_index = READ_ONCE(work->wait_index);
1029
1030         while (true) {
1031                 if (full)
1032                         prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
1033                 else
1034                         prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
1035
1036                 /*
1037                  * The events can happen in critical sections where
1038                  * checking a work queue can cause deadlocks.
1039                  * After adding a task to the queue, this flag is set
1040                  * only to notify events to try to wake up the queue
1041                  * using irq_work.
1042                  *
1043                  * We don't clear it even if the buffer is no longer
1044                  * empty. The flag only causes the next event to run
1045                  * irq_work to do the work queue wake up. The worse
1046                  * that can happen if we race with !trace_empty() is that
1047                  * an event will cause an irq_work to try to wake up
1048                  * an empty queue.
1049                  *
1050                  * There's no reason to protect this flag either, as
1051                  * the work queue and irq_work logic will do the necessary
1052                  * synchronization for the wake ups. The only thing
1053                  * that is necessary is that the wake up happens after
1054                  * a task has been queued. It's OK for spurious wake ups.
1055                  */
1056                 if (full)
1057                         work->full_waiters_pending = true;
1058                 else
1059                         work->waiters_pending = true;
1060
1061                 if (signal_pending(current)) {
1062                         ret = -EINTR;
1063                         break;
1064                 }
1065
1066                 if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer))
1067                         break;
1068
1069                 if (cpu != RING_BUFFER_ALL_CPUS &&
1070                     !ring_buffer_empty_cpu(buffer, cpu)) {
1071                         unsigned long flags;
1072                         bool pagebusy;
1073                         bool done;
1074
1075                         if (!full)
1076                                 break;
1077
1078                         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1079                         pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
1080                         done = !pagebusy && full_hit(buffer, cpu, full);
1081
1082                         if (!cpu_buffer->shortest_full ||
1083                             cpu_buffer->shortest_full > full)
1084                                 cpu_buffer->shortest_full = full;
1085                         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1086                         if (done)
1087                                 break;
1088                 }
1089
1090                 schedule();
1091
1092                 /* Make sure to see the new wait index */
1093                 smp_rmb();
1094                 if (wait_index != work->wait_index)
1095                         break;
1096         }
1097
1098         if (full)
1099                 finish_wait(&work->full_waiters, &wait);
1100         else
1101                 finish_wait(&work->waiters, &wait);
1102
1103         return ret;
1104 }
1105
1106 /**
1107  * ring_buffer_poll_wait - poll on buffer input
1108  * @buffer: buffer to wait on
1109  * @cpu: the cpu buffer to wait on
1110  * @filp: the file descriptor
1111  * @poll_table: The poll descriptor
1112  * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
1113  *
1114  * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
1115  * as data is added to any of the @buffer's cpu buffers. Otherwise
1116  * it will wait for data to be added to a specific cpu buffer.
1117  *
1118  * Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers,
1119  * zero otherwise.
1120  */
1121 __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
1122                           struct file *filp, poll_table *poll_table, int full)
1123 {
1124         struct ring_buffer_per_cpu *cpu_buffer;
1125         struct rb_irq_work *work;
1126
1127         if (cpu == RING_BUFFER_ALL_CPUS) {
1128                 work = &buffer->irq_work;
1129                 full = 0;
1130         } else {
1131                 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1132                         return -EINVAL;
1133
1134                 cpu_buffer = buffer->buffers[cpu];
1135                 work = &cpu_buffer->irq_work;
1136         }
1137
1138         if (full) {
1139                 poll_wait(filp, &work->full_waiters, poll_table);
1140                 work->full_waiters_pending = true;
1141         } else {
1142                 poll_wait(filp, &work->waiters, poll_table);
1143                 work->waiters_pending = true;
1144         }
1145
1146         /*
1147          * There's a tight race between setting the waiters_pending and
1148          * checking if the ring buffer is empty.  Once the waiters_pending bit
1149          * is set, the next event will wake the task up, but we can get stuck
1150          * if there's only a single event in.
1151          *
1152          * FIXME: Ideally, we need a memory barrier on the writer side as well,
1153          * but adding a memory barrier to all events will cause too much of a
1154          * performance hit in the fast path.  We only need a memory barrier when
1155          * the buffer goes from empty to having content.  But as this race is
1156          * extremely small, and it's not a problem if another event comes in, we
1157          * will fix it later.
1158          */
1159         smp_mb();
1160
1161         if (full)
1162                 return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0;
1163
1164         if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
1165             (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
1166                 return EPOLLIN | EPOLLRDNORM;
1167         return 0;
1168 }
1169
1170 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
1171 #define RB_WARN_ON(b, cond)                                             \
1172         ({                                                              \
1173                 int _____ret = unlikely(cond);                          \
1174                 if (_____ret) {                                         \
1175                         if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
1176                                 struct ring_buffer_per_cpu *__b =       \
1177                                         (void *)b;                      \
1178                                 atomic_inc(&__b->buffer->record_disabled); \
1179                         } else                                          \
1180                                 atomic_inc(&b->record_disabled);        \
1181                         WARN_ON(1);                                     \
1182                 }                                                       \
1183                 _____ret;                                               \
1184         })
1185
1186 /* Up this if you want to test the TIME_EXTENTS and normalization */
1187 #define DEBUG_SHIFT 0
1188
1189 static inline u64 rb_time_stamp(struct trace_buffer *buffer)
1190 {
1191         u64 ts;
1192
1193         /* Skip retpolines :-( */
1194         if (IS_ENABLED(CONFIG_RETPOLINE) && likely(buffer->clock == trace_clock_local))
1195                 ts = trace_clock_local();
1196         else
1197                 ts = buffer->clock();
1198
1199         /* shift to debug/test normalization and TIME_EXTENTS */
1200         return ts << DEBUG_SHIFT;
1201 }
1202
1203 u64 ring_buffer_time_stamp(struct trace_buffer *buffer)
1204 {
1205         u64 time;
1206
1207         preempt_disable_notrace();
1208         time = rb_time_stamp(buffer);
1209         preempt_enable_notrace();
1210
1211         return time;
1212 }
1213 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
1214
1215 void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
1216                                       int cpu, u64 *ts)
1217 {
1218         /* Just stupid testing the normalize function and deltas */
1219         *ts >>= DEBUG_SHIFT;
1220 }
1221 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
1222
1223 /*
1224  * Making the ring buffer lockless makes things tricky.
1225  * Although writes only happen on the CPU that they are on,
1226  * and they only need to worry about interrupts. Reads can
1227  * happen on any CPU.
1228  *
1229  * The reader page is always off the ring buffer, but when the
1230  * reader finishes with a page, it needs to swap its page with
1231  * a new one from the buffer. The reader needs to take from
1232  * the head (writes go to the tail). But if a writer is in overwrite
1233  * mode and wraps, it must push the head page forward.
1234  *
1235  * Here lies the problem.
1236  *
1237  * The reader must be careful to replace only the head page, and
1238  * not another one. As described at the top of the file in the
1239  * ASCII art, the reader sets its old page to point to the next
1240  * page after head. It then sets the page after head to point to
1241  * the old reader page. But if the writer moves the head page
1242  * during this operation, the reader could end up with the tail.
1243  *
1244  * We use cmpxchg to help prevent this race. We also do something
1245  * special with the page before head. We set the LSB to 1.
1246  *
1247  * When the writer must push the page forward, it will clear the
1248  * bit that points to the head page, move the head, and then set
1249  * the bit that points to the new head page.
1250  *
1251  * We also don't want an interrupt coming in and moving the head
1252  * page on another writer. Thus we use the second LSB to catch
1253  * that too. Thus:
1254  *
1255  * head->list->prev->next        bit 1          bit 0
1256  *                              -------        -------
1257  * Normal page                     0              0
1258  * Points to head page             0              1
1259  * New head page                   1              0
1260  *
1261  * Note we can not trust the prev pointer of the head page, because:
1262  *
1263  * +----+       +-----+        +-----+
1264  * |    |------>|  T  |---X--->|  N  |
1265  * |    |<------|     |        |     |
1266  * +----+       +-----+        +-----+
1267  *   ^                           ^ |
1268  *   |          +-----+          | |
1269  *   +----------|  R  |----------+ |
1270  *              |     |<-----------+
1271  *              +-----+
1272  *
1273  * Key:  ---X-->  HEAD flag set in pointer
1274  *         T      Tail page
1275  *         R      Reader page
1276  *         N      Next page
1277  *
1278  * (see __rb_reserve_next() to see where this happens)
1279  *
1280  *  What the above shows is that the reader just swapped out
1281  *  the reader page with a page in the buffer, but before it
1282  *  could make the new header point back to the new page added
1283  *  it was preempted by a writer. The writer moved forward onto
1284  *  the new page added by the reader and is about to move forward
1285  *  again.
1286  *
1287  *  You can see, it is legitimate for the previous pointer of
1288  *  the head (or any page) not to point back to itself. But only
1289  *  temporarily.
1290  */
1291
1292 #define RB_PAGE_NORMAL          0UL
1293 #define RB_PAGE_HEAD            1UL
1294 #define RB_PAGE_UPDATE          2UL
1295
1296
1297 #define RB_FLAG_MASK            3UL
1298
1299 /* PAGE_MOVED is not part of the mask */
1300 #define RB_PAGE_MOVED           4UL
1301
1302 /*
1303  * rb_list_head - remove any bit
1304  */
1305 static struct list_head *rb_list_head(struct list_head *list)
1306 {
1307         unsigned long val = (unsigned long)list;
1308
1309         return (struct list_head *)(val & ~RB_FLAG_MASK);
1310 }
1311
1312 /*
1313  * rb_is_head_page - test if the given page is the head page
1314  *
1315  * Because the reader may move the head_page pointer, we can
1316  * not trust what the head page is (it may be pointing to
1317  * the reader page). But if the next page is a header page,
1318  * its flags will be non zero.
1319  */
1320 static inline int
1321 rb_is_head_page(struct buffer_page *page, struct list_head *list)
1322 {
1323         unsigned long val;
1324
1325         val = (unsigned long)list->next;
1326
1327         if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
1328                 return RB_PAGE_MOVED;
1329
1330         return val & RB_FLAG_MASK;
1331 }
1332
1333 /*
1334  * rb_is_reader_page
1335  *
1336  * The unique thing about the reader page, is that, if the
1337  * writer is ever on it, the previous pointer never points
1338  * back to the reader page.
1339  */
1340 static bool rb_is_reader_page(struct buffer_page *page)
1341 {
1342         struct list_head *list = page->list.prev;
1343
1344         return rb_list_head(list->next) != &page->list;
1345 }
1346
1347 /*
1348  * rb_set_list_to_head - set a list_head to be pointing to head.
1349  */
1350 static void rb_set_list_to_head(struct list_head *list)
1351 {
1352         unsigned long *ptr;
1353
1354         ptr = (unsigned long *)&list->next;
1355         *ptr |= RB_PAGE_HEAD;
1356         *ptr &= ~RB_PAGE_UPDATE;
1357 }
1358
1359 /*
1360  * rb_head_page_activate - sets up head page
1361  */
1362 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
1363 {
1364         struct buffer_page *head;
1365
1366         head = cpu_buffer->head_page;
1367         if (!head)
1368                 return;
1369
1370         /*
1371          * Set the previous list pointer to have the HEAD flag.
1372          */
1373         rb_set_list_to_head(head->list.prev);
1374 }
1375
1376 static void rb_list_head_clear(struct list_head *list)
1377 {
1378         unsigned long *ptr = (unsigned long *)&list->next;
1379
1380         *ptr &= ~RB_FLAG_MASK;
1381 }
1382
1383 /*
1384  * rb_head_page_deactivate - clears head page ptr (for free list)
1385  */
1386 static void
1387 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
1388 {
1389         struct list_head *hd;
1390
1391         /* Go through the whole list and clear any pointers found. */
1392         rb_list_head_clear(cpu_buffer->pages);
1393
1394         list_for_each(hd, cpu_buffer->pages)
1395                 rb_list_head_clear(hd);
1396 }
1397
1398 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
1399                             struct buffer_page *head,
1400                             struct buffer_page *prev,
1401                             int old_flag, int new_flag)
1402 {
1403         struct list_head *list;
1404         unsigned long val = (unsigned long)&head->list;
1405         unsigned long ret;
1406
1407         list = &prev->list;
1408
1409         val &= ~RB_FLAG_MASK;
1410
1411         ret = cmpxchg((unsigned long *)&list->next,
1412                       val | old_flag, val | new_flag);
1413
1414         /* check if the reader took the page */
1415         if ((ret & ~RB_FLAG_MASK) != val)
1416                 return RB_PAGE_MOVED;
1417
1418         return ret & RB_FLAG_MASK;
1419 }
1420
1421 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
1422                                    struct buffer_page *head,
1423                                    struct buffer_page *prev,
1424                                    int old_flag)
1425 {
1426         return rb_head_page_set(cpu_buffer, head, prev,
1427                                 old_flag, RB_PAGE_UPDATE);
1428 }
1429
1430 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
1431                                  struct buffer_page *head,
1432                                  struct buffer_page *prev,
1433                                  int old_flag)
1434 {
1435         return rb_head_page_set(cpu_buffer, head, prev,
1436                                 old_flag, RB_PAGE_HEAD);
1437 }
1438
1439 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
1440                                    struct buffer_page *head,
1441                                    struct buffer_page *prev,
1442                                    int old_flag)
1443 {
1444         return rb_head_page_set(cpu_buffer, head, prev,
1445                                 old_flag, RB_PAGE_NORMAL);
1446 }
1447
1448 static inline void rb_inc_page(struct buffer_page **bpage)
1449 {
1450         struct list_head *p = rb_list_head((*bpage)->list.next);
1451
1452         *bpage = list_entry(p, struct buffer_page, list);
1453 }
1454
1455 static struct buffer_page *
1456 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
1457 {
1458         struct buffer_page *head;
1459         struct buffer_page *page;
1460         struct list_head *list;
1461         int i;
1462
1463         if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
1464                 return NULL;
1465
1466         /* sanity check */
1467         list = cpu_buffer->pages;
1468         if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
1469                 return NULL;
1470
1471         page = head = cpu_buffer->head_page;
1472         /*
1473          * It is possible that the writer moves the header behind
1474          * where we started, and we miss in one loop.
1475          * A second loop should grab the header, but we'll do
1476          * three loops just because I'm paranoid.
1477          */
1478         for (i = 0; i < 3; i++) {
1479                 do {
1480                         if (rb_is_head_page(page, page->list.prev)) {
1481                                 cpu_buffer->head_page = page;
1482                                 return page;
1483                         }
1484                         rb_inc_page(&page);
1485                 } while (page != head);
1486         }
1487
1488         RB_WARN_ON(cpu_buffer, 1);
1489
1490         return NULL;
1491 }
1492
1493 static int rb_head_page_replace(struct buffer_page *old,
1494                                 struct buffer_page *new)
1495 {
1496         unsigned long *ptr = (unsigned long *)&old->list.prev->next;
1497         unsigned long val;
1498         unsigned long ret;
1499
1500         val = *ptr & ~RB_FLAG_MASK;
1501         val |= RB_PAGE_HEAD;
1502
1503         ret = cmpxchg(ptr, val, (unsigned long)&new->list);
1504
1505         return ret == val;
1506 }
1507
1508 /*
1509  * rb_tail_page_update - move the tail page forward
1510  */
1511 static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
1512                                struct buffer_page *tail_page,
1513                                struct buffer_page *next_page)
1514 {
1515         unsigned long old_entries;
1516         unsigned long old_write;
1517
1518         /*
1519          * The tail page now needs to be moved forward.
1520          *
1521          * We need to reset the tail page, but without messing
1522          * with possible erasing of data brought in by interrupts
1523          * that have moved the tail page and are currently on it.
1524          *
1525          * We add a counter to the write field to denote this.
1526          */
1527         old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
1528         old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
1529
1530         local_inc(&cpu_buffer->pages_touched);
1531         /*
1532          * Just make sure we have seen our old_write and synchronize
1533          * with any interrupts that come in.
1534          */
1535         barrier();
1536
1537         /*
1538          * If the tail page is still the same as what we think
1539          * it is, then it is up to us to update the tail
1540          * pointer.
1541          */
1542         if (tail_page == READ_ONCE(cpu_buffer->tail_page)) {
1543                 /* Zero the write counter */
1544                 unsigned long val = old_write & ~RB_WRITE_MASK;
1545                 unsigned long eval = old_entries & ~RB_WRITE_MASK;
1546
1547                 /*
1548                  * This will only succeed if an interrupt did
1549                  * not come in and change it. In which case, we
1550                  * do not want to modify it.
1551                  *
1552                  * We add (void) to let the compiler know that we do not care
1553                  * about the return value of these functions. We use the
1554                  * cmpxchg to only update if an interrupt did not already
1555                  * do it for us. If the cmpxchg fails, we don't care.
1556                  */
1557                 (void)local_cmpxchg(&next_page->write, old_write, val);
1558                 (void)local_cmpxchg(&next_page->entries, old_entries, eval);
1559
1560                 /*
1561                  * No need to worry about races with clearing out the commit.
1562                  * it only can increment when a commit takes place. But that
1563                  * only happens in the outer most nested commit.
1564                  */
1565                 local_set(&next_page->page->commit, 0);
1566
1567                 /* Again, either we update tail_page or an interrupt does */
1568                 (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page);
1569         }
1570 }
1571
1572 static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
1573                           struct buffer_page *bpage)
1574 {
1575         unsigned long val = (unsigned long)bpage;
1576
1577         if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
1578                 return 1;
1579
1580         return 0;
1581 }
1582
1583 /**
1584  * rb_check_pages - integrity check of buffer pages
1585  * @cpu_buffer: CPU buffer with pages to test
1586  *
1587  * As a safety measure we check to make sure the data pages have not
1588  * been corrupted.
1589  */
1590 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
1591 {
1592         struct list_head *head = rb_list_head(cpu_buffer->pages);
1593         struct list_head *tmp;
1594
1595         if (RB_WARN_ON(cpu_buffer,
1596                         rb_list_head(rb_list_head(head->next)->prev) != head))
1597                 return -1;
1598
1599         if (RB_WARN_ON(cpu_buffer,
1600                         rb_list_head(rb_list_head(head->prev)->next) != head))
1601                 return -1;
1602
1603         for (tmp = rb_list_head(head->next); tmp != head; tmp = rb_list_head(tmp->next)) {
1604                 if (RB_WARN_ON(cpu_buffer,
1605                                 rb_list_head(rb_list_head(tmp->next)->prev) != tmp))
1606                         return -1;
1607
1608                 if (RB_WARN_ON(cpu_buffer,
1609                                 rb_list_head(rb_list_head(tmp->prev)->next) != tmp))
1610                         return -1;
1611         }
1612
1613         return 0;
1614 }
1615
1616 static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1617                 long nr_pages, struct list_head *pages)
1618 {
1619         struct buffer_page *bpage, *tmp;
1620         bool user_thread = current->mm != NULL;
1621         gfp_t mflags;
1622         long i;
1623
1624         /*
1625          * Check if the available memory is there first.
1626          * Note, si_mem_available() only gives us a rough estimate of available
1627          * memory. It may not be accurate. But we don't care, we just want
1628          * to prevent doing any allocation when it is obvious that it is
1629          * not going to succeed.
1630          */
1631         i = si_mem_available();
1632         if (i < nr_pages)
1633                 return -ENOMEM;
1634
1635         /*
1636          * __GFP_RETRY_MAYFAIL flag makes sure that the allocation fails
1637          * gracefully without invoking oom-killer and the system is not
1638          * destabilized.
1639          */
1640         mflags = GFP_KERNEL | __GFP_RETRY_MAYFAIL;
1641
1642         /*
1643          * If a user thread allocates too much, and si_mem_available()
1644          * reports there's enough memory, even though there is not.
1645          * Make sure the OOM killer kills this thread. This can happen
1646          * even with RETRY_MAYFAIL because another task may be doing
1647          * an allocation after this task has taken all memory.
1648          * This is the task the OOM killer needs to take out during this
1649          * loop, even if it was triggered by an allocation somewhere else.
1650          */
1651         if (user_thread)
1652                 set_current_oom_origin();
1653         for (i = 0; i < nr_pages; i++) {
1654                 struct page *page;
1655
1656                 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1657                                     mflags, cpu_to_node(cpu_buffer->cpu));
1658                 if (!bpage)
1659                         goto free_pages;
1660
1661                 rb_check_bpage(cpu_buffer, bpage);
1662
1663                 list_add(&bpage->list, pages);
1664
1665                 page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), mflags, 0);
1666                 if (!page)
1667                         goto free_pages;
1668                 bpage->page = page_address(page);
1669                 rb_init_page(bpage->page);
1670
1671                 if (user_thread && fatal_signal_pending(current))
1672                         goto free_pages;
1673         }
1674         if (user_thread)
1675                 clear_current_oom_origin();
1676
1677         return 0;
1678
1679 free_pages:
1680         list_for_each_entry_safe(bpage, tmp, pages, list) {
1681                 list_del_init(&bpage->list);
1682                 free_buffer_page(bpage);
1683         }
1684         if (user_thread)
1685                 clear_current_oom_origin();
1686
1687         return -ENOMEM;
1688 }
1689
1690 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1691                              unsigned long nr_pages)
1692 {
1693         LIST_HEAD(pages);
1694
1695         WARN_ON(!nr_pages);
1696
1697         if (__rb_allocate_pages(cpu_buffer, nr_pages, &pages))
1698                 return -ENOMEM;
1699
1700         /*
1701          * The ring buffer page list is a circular list that does not
1702          * start and end with a list head. All page list items point to
1703          * other pages.
1704          */
1705         cpu_buffer->pages = pages.next;
1706         list_del(&pages);
1707
1708         cpu_buffer->nr_pages = nr_pages;
1709
1710         rb_check_pages(cpu_buffer);
1711
1712         return 0;
1713 }
1714
1715 static struct ring_buffer_per_cpu *
1716 rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
1717 {
1718         struct ring_buffer_per_cpu *cpu_buffer;
1719         struct buffer_page *bpage;
1720         struct page *page;
1721         int ret;
1722
1723         cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1724                                   GFP_KERNEL, cpu_to_node(cpu));
1725         if (!cpu_buffer)
1726                 return NULL;
1727
1728         cpu_buffer->cpu = cpu;
1729         cpu_buffer->buffer = buffer;
1730         raw_spin_lock_init(&cpu_buffer->reader_lock);
1731         lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1732         cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1733         INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
1734         init_completion(&cpu_buffer->update_done);
1735         init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
1736         init_waitqueue_head(&cpu_buffer->irq_work.waiters);
1737         init_waitqueue_head(&cpu_buffer->irq_work.full_waiters);
1738
1739         bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1740                             GFP_KERNEL, cpu_to_node(cpu));
1741         if (!bpage)
1742                 goto fail_free_buffer;
1743
1744         rb_check_bpage(cpu_buffer, bpage);
1745
1746         cpu_buffer->reader_page = bpage;
1747         page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
1748         if (!page)
1749                 goto fail_free_reader;
1750         bpage->page = page_address(page);
1751         rb_init_page(bpage->page);
1752
1753         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1754         INIT_LIST_HEAD(&cpu_buffer->new_pages);
1755
1756         ret = rb_allocate_pages(cpu_buffer, nr_pages);
1757         if (ret < 0)
1758                 goto fail_free_reader;
1759
1760         cpu_buffer->head_page
1761                 = list_entry(cpu_buffer->pages, struct buffer_page, list);
1762         cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1763
1764         rb_head_page_activate(cpu_buffer);
1765
1766         return cpu_buffer;
1767
1768  fail_free_reader:
1769         free_buffer_page(cpu_buffer->reader_page);
1770
1771  fail_free_buffer:
1772         kfree(cpu_buffer);
1773         return NULL;
1774 }
1775
1776 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1777 {
1778         struct list_head *head = cpu_buffer->pages;
1779         struct buffer_page *bpage, *tmp;
1780
1781         free_buffer_page(cpu_buffer->reader_page);
1782
1783         if (head) {
1784                 rb_head_page_deactivate(cpu_buffer);
1785
1786                 list_for_each_entry_safe(bpage, tmp, head, list) {
1787                         list_del_init(&bpage->list);
1788                         free_buffer_page(bpage);
1789                 }
1790                 bpage = list_entry(head, struct buffer_page, list);
1791                 free_buffer_page(bpage);
1792         }
1793
1794         kfree(cpu_buffer);
1795 }
1796
1797 /**
1798  * __ring_buffer_alloc - allocate a new ring_buffer
1799  * @size: the size in bytes per cpu that is needed.
1800  * @flags: attributes to set for the ring buffer.
1801  * @key: ring buffer reader_lock_key.
1802  *
1803  * Currently the only flag that is available is the RB_FL_OVERWRITE
1804  * flag. This flag means that the buffer will overwrite old data
1805  * when the buffer wraps. If this flag is not set, the buffer will
1806  * drop data when the tail hits the head.
1807  */
1808 struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1809                                         struct lock_class_key *key)
1810 {
1811         struct trace_buffer *buffer;
1812         long nr_pages;
1813         int bsize;
1814         int cpu;
1815         int ret;
1816
1817         /* keep it in its own cache line */
1818         buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1819                          GFP_KERNEL);
1820         if (!buffer)
1821                 return NULL;
1822
1823         if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1824                 goto fail_free_buffer;
1825
1826         nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1827         buffer->flags = flags;
1828         buffer->clock = trace_clock_local;
1829         buffer->reader_lock_key = key;
1830
1831         init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
1832         init_waitqueue_head(&buffer->irq_work.waiters);
1833
1834         /* need at least two pages */
1835         if (nr_pages < 2)
1836                 nr_pages = 2;
1837
1838         buffer->cpus = nr_cpu_ids;
1839
1840         bsize = sizeof(void *) * nr_cpu_ids;
1841         buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1842                                   GFP_KERNEL);
1843         if (!buffer->buffers)
1844                 goto fail_free_cpumask;
1845
1846         cpu = raw_smp_processor_id();
1847         cpumask_set_cpu(cpu, buffer->cpumask);
1848         buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
1849         if (!buffer->buffers[cpu])
1850                 goto fail_free_buffers;
1851
1852         ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
1853         if (ret < 0)
1854                 goto fail_free_buffers;
1855
1856         mutex_init(&buffer->mutex);
1857
1858         return buffer;
1859
1860  fail_free_buffers:
1861         for_each_buffer_cpu(buffer, cpu) {
1862                 if (buffer->buffers[cpu])
1863                         rb_free_cpu_buffer(buffer->buffers[cpu]);
1864         }
1865         kfree(buffer->buffers);
1866
1867  fail_free_cpumask:
1868         free_cpumask_var(buffer->cpumask);
1869
1870  fail_free_buffer:
1871         kfree(buffer);
1872         return NULL;
1873 }
1874 EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
1875
1876 /**
1877  * ring_buffer_free - free a ring buffer.
1878  * @buffer: the buffer to free.
1879  */
1880 void
1881 ring_buffer_free(struct trace_buffer *buffer)
1882 {
1883         int cpu;
1884
1885         cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
1886
1887         for_each_buffer_cpu(buffer, cpu)
1888                 rb_free_cpu_buffer(buffer->buffers[cpu]);
1889
1890         kfree(buffer->buffers);
1891         free_cpumask_var(buffer->cpumask);
1892
1893         kfree(buffer);
1894 }
1895 EXPORT_SYMBOL_GPL(ring_buffer_free);
1896
1897 void ring_buffer_set_clock(struct trace_buffer *buffer,
1898                            u64 (*clock)(void))
1899 {
1900         buffer->clock = clock;
1901 }
1902
1903 void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs)
1904 {
1905         buffer->time_stamp_abs = abs;
1906 }
1907
1908 bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer)
1909 {
1910         return buffer->time_stamp_abs;
1911 }
1912
1913 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1914
1915 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1916 {
1917         return local_read(&bpage->entries) & RB_WRITE_MASK;
1918 }
1919
1920 static inline unsigned long rb_page_write(struct buffer_page *bpage)
1921 {
1922         return local_read(&bpage->write) & RB_WRITE_MASK;
1923 }
1924
1925 static int
1926 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
1927 {
1928         struct list_head *tail_page, *to_remove, *next_page;
1929         struct buffer_page *to_remove_page, *tmp_iter_page;
1930         struct buffer_page *last_page, *first_page;
1931         unsigned long nr_removed;
1932         unsigned long head_bit;
1933         int page_entries;
1934
1935         head_bit = 0;
1936
1937         raw_spin_lock_irq(&cpu_buffer->reader_lock);
1938         atomic_inc(&cpu_buffer->record_disabled);
1939         /*
1940          * We don't race with the readers since we have acquired the reader
1941          * lock. We also don't race with writers after disabling recording.
1942          * This makes it easy to figure out the first and the last page to be
1943          * removed from the list. We unlink all the pages in between including
1944          * the first and last pages. This is done in a busy loop so that we
1945          * lose the least number of traces.
1946          * The pages are freed after we restart recording and unlock readers.
1947          */
1948         tail_page = &cpu_buffer->tail_page->list;
1949
1950         /*
1951          * tail page might be on reader page, we remove the next page
1952          * from the ring buffer
1953          */
1954         if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1955                 tail_page = rb_list_head(tail_page->next);
1956         to_remove = tail_page;
1957
1958         /* start of pages to remove */
1959         first_page = list_entry(rb_list_head(to_remove->next),
1960                                 struct buffer_page, list);
1961
1962         for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
1963                 to_remove = rb_list_head(to_remove)->next;
1964                 head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
1965         }
1966
1967         next_page = rb_list_head(to_remove)->next;
1968
1969         /*
1970          * Now we remove all pages between tail_page and next_page.
1971          * Make sure that we have head_bit value preserved for the
1972          * next page
1973          */
1974         tail_page->next = (struct list_head *)((unsigned long)next_page |
1975                                                 head_bit);
1976         next_page = rb_list_head(next_page);
1977         next_page->prev = tail_page;
1978
1979         /* make sure pages points to a valid page in the ring buffer */
1980         cpu_buffer->pages = next_page;
1981
1982         /* update head page */
1983         if (head_bit)
1984                 cpu_buffer->head_page = list_entry(next_page,
1985                                                 struct buffer_page, list);
1986
1987         /*
1988          * change read pointer to make sure any read iterators reset
1989          * themselves
1990          */
1991         cpu_buffer->read = 0;
1992
1993         /* pages are removed, resume tracing and then free the pages */
1994         atomic_dec(&cpu_buffer->record_disabled);
1995         raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1996
1997         RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
1998
1999         /* last buffer page to remove */
2000         last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
2001                                 list);
2002         tmp_iter_page = first_page;
2003
2004         do {
2005                 cond_resched();
2006
2007                 to_remove_page = tmp_iter_page;
2008                 rb_inc_page(&tmp_iter_page);
2009
2010                 /* update the counters */
2011                 page_entries = rb_page_entries(to_remove_page);
2012                 if (page_entries) {
2013                         /*
2014                          * If something was added to this page, it was full
2015                          * since it is not the tail page. So we deduct the
2016                          * bytes consumed in ring buffer from here.
2017                          * Increment overrun to account for the lost events.
2018                          */
2019                         local_add(page_entries, &cpu_buffer->overrun);
2020                         local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
2021                         local_inc(&cpu_buffer->pages_lost);
2022                 }
2023
2024                 /*
2025                  * We have already removed references to this list item, just
2026                  * free up the buffer_page and its page
2027                  */
2028                 free_buffer_page(to_remove_page);
2029                 nr_removed--;
2030
2031         } while (to_remove_page != last_page);
2032
2033         RB_WARN_ON(cpu_buffer, nr_removed);
2034
2035         return nr_removed == 0;
2036 }
2037
2038 static int
2039 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
2040 {
2041         struct list_head *pages = &cpu_buffer->new_pages;
2042         int retries, success;
2043         unsigned long flags;
2044
2045         /* Can be called at early boot up, where interrupts must not been enabled */
2046         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2047         /*
2048          * We are holding the reader lock, so the reader page won't be swapped
2049          * in the ring buffer. Now we are racing with the writer trying to
2050          * move head page and the tail page.
2051          * We are going to adapt the reader page update process where:
2052          * 1. We first splice the start and end of list of new pages between
2053          *    the head page and its previous page.
2054          * 2. We cmpxchg the prev_page->next to point from head page to the
2055          *    start of new pages list.
2056          * 3. Finally, we update the head->prev to the end of new list.
2057          *
2058          * We will try this process 10 times, to make sure that we don't keep
2059          * spinning.
2060          */
2061         retries = 10;
2062         success = 0;
2063         while (retries--) {
2064                 struct list_head *head_page, *prev_page, *r;
2065                 struct list_head *last_page, *first_page;
2066                 struct list_head *head_page_with_bit;
2067
2068                 head_page = &rb_set_head_page(cpu_buffer)->list;
2069                 if (!head_page)
2070                         break;
2071                 prev_page = head_page->prev;
2072
2073                 first_page = pages->next;
2074                 last_page  = pages->prev;
2075
2076                 head_page_with_bit = (struct list_head *)
2077                                      ((unsigned long)head_page | RB_PAGE_HEAD);
2078
2079                 last_page->next = head_page_with_bit;
2080                 first_page->prev = prev_page;
2081
2082                 r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
2083
2084                 if (r == head_page_with_bit) {
2085                         /*
2086                          * yay, we replaced the page pointer to our new list,
2087                          * now, we just have to update to head page's prev
2088                          * pointer to point to end of list
2089                          */
2090                         head_page->prev = last_page;
2091                         success = 1;
2092                         break;
2093                 }
2094         }
2095
2096         if (success)
2097                 INIT_LIST_HEAD(pages);
2098         /*
2099          * If we weren't successful in adding in new pages, warn and stop
2100          * tracing
2101          */
2102         RB_WARN_ON(cpu_buffer, !success);
2103         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2104
2105         /* free pages if they weren't inserted */
2106         if (!success) {
2107                 struct buffer_page *bpage, *tmp;
2108                 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
2109                                          list) {
2110                         list_del_init(&bpage->list);
2111                         free_buffer_page(bpage);
2112                 }
2113         }
2114         return success;
2115 }
2116
2117 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
2118 {
2119         int success;
2120
2121         if (cpu_buffer->nr_pages_to_update > 0)
2122                 success = rb_insert_pages(cpu_buffer);
2123         else
2124                 success = rb_remove_pages(cpu_buffer,
2125                                         -cpu_buffer->nr_pages_to_update);
2126
2127         if (success)
2128                 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
2129 }
2130
2131 static void update_pages_handler(struct work_struct *work)
2132 {
2133         struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
2134                         struct ring_buffer_per_cpu, update_pages_work);
2135         rb_update_pages(cpu_buffer);
2136         complete(&cpu_buffer->update_done);
2137 }
2138
2139 /**
2140  * ring_buffer_resize - resize the ring buffer
2141  * @buffer: the buffer to resize.
2142  * @size: the new size.
2143  * @cpu_id: the cpu buffer to resize
2144  *
2145  * Minimum size is 2 * BUF_PAGE_SIZE.
2146  *
2147  * Returns 0 on success and < 0 on failure.
2148  */
2149 int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
2150                         int cpu_id)
2151 {
2152         struct ring_buffer_per_cpu *cpu_buffer;
2153         unsigned long nr_pages;
2154         int cpu, err;
2155
2156         /*
2157          * Always succeed at resizing a non-existent buffer:
2158          */
2159         if (!buffer)
2160                 return 0;
2161
2162         /* Make sure the requested buffer exists */
2163         if (cpu_id != RING_BUFFER_ALL_CPUS &&
2164             !cpumask_test_cpu(cpu_id, buffer->cpumask))
2165                 return 0;
2166
2167         nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
2168
2169         /* we need a minimum of two pages */
2170         if (nr_pages < 2)
2171                 nr_pages = 2;
2172
2173         /* prevent another thread from changing buffer sizes */
2174         mutex_lock(&buffer->mutex);
2175
2176
2177         if (cpu_id == RING_BUFFER_ALL_CPUS) {
2178                 /*
2179                  * Don't succeed if resizing is disabled, as a reader might be
2180                  * manipulating the ring buffer and is expecting a sane state while
2181                  * this is true.
2182                  */
2183                 for_each_buffer_cpu(buffer, cpu) {
2184                         cpu_buffer = buffer->buffers[cpu];
2185                         if (atomic_read(&cpu_buffer->resize_disabled)) {
2186                                 err = -EBUSY;
2187                                 goto out_err_unlock;
2188                         }
2189                 }
2190
2191                 /* calculate the pages to update */
2192                 for_each_buffer_cpu(buffer, cpu) {
2193                         cpu_buffer = buffer->buffers[cpu];
2194
2195                         cpu_buffer->nr_pages_to_update = nr_pages -
2196                                                         cpu_buffer->nr_pages;
2197                         /*
2198                          * nothing more to do for removing pages or no update
2199                          */
2200                         if (cpu_buffer->nr_pages_to_update <= 0)
2201                                 continue;
2202                         /*
2203                          * to add pages, make sure all new pages can be
2204                          * allocated without receiving ENOMEM
2205                          */
2206                         INIT_LIST_HEAD(&cpu_buffer->new_pages);
2207                         if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update,
2208                                                 &cpu_buffer->new_pages)) {
2209                                 /* not enough memory for new pages */
2210                                 err = -ENOMEM;
2211                                 goto out_err;
2212                         }
2213                 }
2214
2215                 cpus_read_lock();
2216                 /*
2217                  * Fire off all the required work handlers
2218                  * We can't schedule on offline CPUs, but it's not necessary
2219                  * since we can change their buffer sizes without any race.
2220                  */
2221                 for_each_buffer_cpu(buffer, cpu) {
2222                         cpu_buffer = buffer->buffers[cpu];
2223                         if (!cpu_buffer->nr_pages_to_update)
2224                                 continue;
2225
2226                         /* Can't run something on an offline CPU. */
2227                         if (!cpu_online(cpu)) {
2228                                 rb_update_pages(cpu_buffer);
2229                                 cpu_buffer->nr_pages_to_update = 0;
2230                         } else {
2231                                 /* Run directly if possible. */
2232                                 migrate_disable();
2233                                 if (cpu != smp_processor_id()) {
2234                                         migrate_enable();
2235                                         schedule_work_on(cpu,
2236                                                          &cpu_buffer->update_pages_work);
2237                                 } else {
2238                                         update_pages_handler(&cpu_buffer->update_pages_work);
2239                                         migrate_enable();
2240                                 }
2241                         }
2242                 }
2243
2244                 /* wait for all the updates to complete */
2245                 for_each_buffer_cpu(buffer, cpu) {
2246                         cpu_buffer = buffer->buffers[cpu];
2247                         if (!cpu_buffer->nr_pages_to_update)
2248                                 continue;
2249
2250                         if (cpu_online(cpu))
2251                                 wait_for_completion(&cpu_buffer->update_done);
2252                         cpu_buffer->nr_pages_to_update = 0;
2253                 }
2254
2255                 cpus_read_unlock();
2256         } else {
2257                 cpu_buffer = buffer->buffers[cpu_id];
2258
2259                 if (nr_pages == cpu_buffer->nr_pages)
2260                         goto out;
2261
2262                 /*
2263                  * Don't succeed if resizing is disabled, as a reader might be
2264                  * manipulating the ring buffer and is expecting a sane state while
2265                  * this is true.
2266                  */
2267                 if (atomic_read(&cpu_buffer->resize_disabled)) {
2268                         err = -EBUSY;
2269                         goto out_err_unlock;
2270                 }
2271
2272                 cpu_buffer->nr_pages_to_update = nr_pages -
2273                                                 cpu_buffer->nr_pages;
2274
2275                 INIT_LIST_HEAD(&cpu_buffer->new_pages);
2276                 if (cpu_buffer->nr_pages_to_update > 0 &&
2277                         __rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update,
2278                                             &cpu_buffer->new_pages)) {
2279                         err = -ENOMEM;
2280                         goto out_err;
2281                 }
2282
2283                 cpus_read_lock();
2284
2285                 /* Can't run something on an offline CPU. */
2286                 if (!cpu_online(cpu_id))
2287                         rb_update_pages(cpu_buffer);
2288                 else {
2289                         /* Run directly if possible. */
2290                         migrate_disable();
2291                         if (cpu_id == smp_processor_id()) {
2292                                 rb_update_pages(cpu_buffer);
2293                                 migrate_enable();
2294                         } else {
2295                                 migrate_enable();
2296                                 schedule_work_on(cpu_id,
2297                                                  &cpu_buffer->update_pages_work);
2298                                 wait_for_completion(&cpu_buffer->update_done);
2299                         }
2300                 }
2301
2302                 cpu_buffer->nr_pages_to_update = 0;
2303                 cpus_read_unlock();
2304         }
2305
2306  out:
2307         /*
2308          * The ring buffer resize can happen with the ring buffer
2309          * enabled, so that the update disturbs the tracing as little
2310          * as possible. But if the buffer is disabled, we do not need
2311          * to worry about that, and we can take the time to verify
2312          * that the buffer is not corrupt.
2313          */
2314         if (atomic_read(&buffer->record_disabled)) {
2315                 atomic_inc(&buffer->record_disabled);
2316                 /*
2317                  * Even though the buffer was disabled, we must make sure
2318                  * that it is truly disabled before calling rb_check_pages.
2319                  * There could have been a race between checking
2320                  * record_disable and incrementing it.
2321                  */
2322                 synchronize_rcu();
2323                 for_each_buffer_cpu(buffer, cpu) {
2324                         cpu_buffer = buffer->buffers[cpu];
2325                         rb_check_pages(cpu_buffer);
2326                 }
2327                 atomic_dec(&buffer->record_disabled);
2328         }
2329
2330         mutex_unlock(&buffer->mutex);
2331         return 0;
2332
2333  out_err:
2334         for_each_buffer_cpu(buffer, cpu) {
2335                 struct buffer_page *bpage, *tmp;
2336
2337                 cpu_buffer = buffer->buffers[cpu];
2338                 cpu_buffer->nr_pages_to_update = 0;
2339
2340                 if (list_empty(&cpu_buffer->new_pages))
2341                         continue;
2342
2343                 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
2344                                         list) {
2345                         list_del_init(&bpage->list);
2346                         free_buffer_page(bpage);
2347                 }
2348         }
2349  out_err_unlock:
2350         mutex_unlock(&buffer->mutex);
2351         return err;
2352 }
2353 EXPORT_SYMBOL_GPL(ring_buffer_resize);
2354
2355 void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val)
2356 {
2357         mutex_lock(&buffer->mutex);
2358         if (val)
2359                 buffer->flags |= RB_FL_OVERWRITE;
2360         else
2361                 buffer->flags &= ~RB_FL_OVERWRITE;
2362         mutex_unlock(&buffer->mutex);
2363 }
2364 EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
2365
2366 static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
2367 {
2368         return bpage->page->data + index;
2369 }
2370
2371 static __always_inline struct ring_buffer_event *
2372 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
2373 {
2374         return __rb_page_index(cpu_buffer->reader_page,
2375                                cpu_buffer->reader_page->read);
2376 }
2377
2378 static __always_inline unsigned rb_page_commit(struct buffer_page *bpage)
2379 {
2380         return local_read(&bpage->page->commit);
2381 }
2382
2383 static struct ring_buffer_event *
2384 rb_iter_head_event(struct ring_buffer_iter *iter)
2385 {
2386         struct ring_buffer_event *event;
2387         struct buffer_page *iter_head_page = iter->head_page;
2388         unsigned long commit;
2389         unsigned length;
2390
2391         if (iter->head != iter->next_event)
2392                 return iter->event;
2393
2394         /*
2395          * When the writer goes across pages, it issues a cmpxchg which
2396          * is a mb(), which will synchronize with the rmb here.
2397          * (see rb_tail_page_update() and __rb_reserve_next())
2398          */
2399         commit = rb_page_commit(iter_head_page);
2400         smp_rmb();
2401         event = __rb_page_index(iter_head_page, iter->head);
2402         length = rb_event_length(event);
2403
2404         /*
2405          * READ_ONCE() doesn't work on functions and we don't want the
2406          * compiler doing any crazy optimizations with length.
2407          */
2408         barrier();
2409
2410         if ((iter->head + length) > commit || length > BUF_MAX_DATA_SIZE)
2411                 /* Writer corrupted the read? */
2412                 goto reset;
2413
2414         memcpy(iter->event, event, length);
2415         /*
2416          * If the page stamp is still the same after this rmb() then the
2417          * event was safely copied without the writer entering the page.
2418          */
2419         smp_rmb();
2420
2421         /* Make sure the page didn't change since we read this */
2422         if (iter->page_stamp != iter_head_page->page->time_stamp ||
2423             commit > rb_page_commit(iter_head_page))
2424                 goto reset;
2425
2426         iter->next_event = iter->head + length;
2427         return iter->event;
2428  reset:
2429         /* Reset to the beginning */
2430         iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
2431         iter->head = 0;
2432         iter->next_event = 0;
2433         iter->missed_events = 1;
2434         return NULL;
2435 }
2436
2437 /* Size is determined by what has been committed */
2438 static __always_inline unsigned rb_page_size(struct buffer_page *bpage)
2439 {
2440         return rb_page_commit(bpage);
2441 }
2442
2443 static __always_inline unsigned
2444 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
2445 {
2446         return rb_page_commit(cpu_buffer->commit_page);
2447 }
2448
2449 static __always_inline unsigned
2450 rb_event_index(struct ring_buffer_event *event)
2451 {
2452         unsigned long addr = (unsigned long)event;
2453
2454         return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
2455 }
2456
2457 static void rb_inc_iter(struct ring_buffer_iter *iter)
2458 {
2459         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2460
2461         /*
2462          * The iterator could be on the reader page (it starts there).
2463          * But the head could have moved, since the reader was
2464          * found. Check for this case and assign the iterator
2465          * to the head page instead of next.
2466          */
2467         if (iter->head_page == cpu_buffer->reader_page)
2468                 iter->head_page = rb_set_head_page(cpu_buffer);
2469         else
2470                 rb_inc_page(&iter->head_page);
2471
2472         iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
2473         iter->head = 0;
2474         iter->next_event = 0;
2475 }
2476
2477 /*
2478  * rb_handle_head_page - writer hit the head page
2479  *
2480  * Returns: +1 to retry page
2481  *           0 to continue
2482  *          -1 on error
2483  */
2484 static int
2485 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
2486                     struct buffer_page *tail_page,
2487                     struct buffer_page *next_page)
2488 {
2489         struct buffer_page *new_head;
2490         int entries;
2491         int type;
2492         int ret;
2493
2494         entries = rb_page_entries(next_page);
2495
2496         /*
2497          * The hard part is here. We need to move the head
2498          * forward, and protect against both readers on
2499          * other CPUs and writers coming in via interrupts.
2500          */
2501         type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
2502                                        RB_PAGE_HEAD);
2503
2504         /*
2505          * type can be one of four:
2506          *  NORMAL - an interrupt already moved it for us
2507          *  HEAD   - we are the first to get here.
2508          *  UPDATE - we are the interrupt interrupting
2509          *           a current move.
2510          *  MOVED  - a reader on another CPU moved the next
2511          *           pointer to its reader page. Give up
2512          *           and try again.
2513          */
2514
2515         switch (type) {
2516         case RB_PAGE_HEAD:
2517                 /*
2518                  * We changed the head to UPDATE, thus
2519                  * it is our responsibility to update
2520                  * the counters.
2521                  */
2522                 local_add(entries, &cpu_buffer->overrun);
2523                 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
2524                 local_inc(&cpu_buffer->pages_lost);
2525
2526                 /*
2527                  * The entries will be zeroed out when we move the
2528                  * tail page.
2529                  */
2530
2531                 /* still more to do */
2532                 break;
2533
2534         case RB_PAGE_UPDATE:
2535                 /*
2536                  * This is an interrupt that interrupt the
2537                  * previous update. Still more to do.
2538                  */
2539                 break;
2540         case RB_PAGE_NORMAL:
2541                 /*
2542                  * An interrupt came in before the update
2543                  * and processed this for us.
2544                  * Nothing left to do.
2545                  */
2546                 return 1;
2547         case RB_PAGE_MOVED:
2548                 /*
2549                  * The reader is on another CPU and just did
2550                  * a swap with our next_page.
2551                  * Try again.
2552                  */
2553                 return 1;
2554         default:
2555                 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
2556                 return -1;
2557         }
2558
2559         /*
2560          * Now that we are here, the old head pointer is
2561          * set to UPDATE. This will keep the reader from
2562          * swapping the head page with the reader page.
2563          * The reader (on another CPU) will spin till
2564          * we are finished.
2565          *
2566          * We just need to protect against interrupts
2567          * doing the job. We will set the next pointer
2568          * to HEAD. After that, we set the old pointer
2569          * to NORMAL, but only if it was HEAD before.
2570          * otherwise we are an interrupt, and only
2571          * want the outer most commit to reset it.
2572          */
2573         new_head = next_page;
2574         rb_inc_page(&new_head);
2575
2576         ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
2577                                     RB_PAGE_NORMAL);
2578
2579         /*
2580          * Valid returns are:
2581          *  HEAD   - an interrupt came in and already set it.
2582          *  NORMAL - One of two things:
2583          *            1) We really set it.
2584          *            2) A bunch of interrupts came in and moved
2585          *               the page forward again.
2586          */
2587         switch (ret) {
2588         case RB_PAGE_HEAD:
2589         case RB_PAGE_NORMAL:
2590                 /* OK */
2591                 break;
2592         default:
2593                 RB_WARN_ON(cpu_buffer, 1);
2594                 return -1;
2595         }
2596
2597         /*
2598          * It is possible that an interrupt came in,
2599          * set the head up, then more interrupts came in
2600          * and moved it again. When we get back here,
2601          * the page would have been set to NORMAL but we
2602          * just set it back to HEAD.
2603          *
2604          * How do you detect this? Well, if that happened
2605          * the tail page would have moved.
2606          */
2607         if (ret == RB_PAGE_NORMAL) {
2608                 struct buffer_page *buffer_tail_page;
2609
2610                 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page);
2611                 /*
2612                  * If the tail had moved passed next, then we need
2613                  * to reset the pointer.
2614                  */
2615                 if (buffer_tail_page != tail_page &&
2616                     buffer_tail_page != next_page)
2617                         rb_head_page_set_normal(cpu_buffer, new_head,
2618                                                 next_page,
2619                                                 RB_PAGE_HEAD);
2620         }
2621
2622         /*
2623          * If this was the outer most commit (the one that
2624          * changed the original pointer from HEAD to UPDATE),
2625          * then it is up to us to reset it to NORMAL.
2626          */
2627         if (type == RB_PAGE_HEAD) {
2628                 ret = rb_head_page_set_normal(cpu_buffer, next_page,
2629                                               tail_page,
2630                                               RB_PAGE_UPDATE);
2631                 if (RB_WARN_ON(cpu_buffer,
2632                                ret != RB_PAGE_UPDATE))
2633                         return -1;
2634         }
2635
2636         return 0;
2637 }
2638
2639 static inline void
2640 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2641               unsigned long tail, struct rb_event_info *info)
2642 {
2643         struct buffer_page *tail_page = info->tail_page;
2644         struct ring_buffer_event *event;
2645         unsigned long length = info->length;
2646
2647         /*
2648          * Only the event that crossed the page boundary
2649          * must fill the old tail_page with padding.
2650          */
2651         if (tail >= BUF_PAGE_SIZE) {
2652                 /*
2653                  * If the page was filled, then we still need
2654                  * to update the real_end. Reset it to zero
2655                  * and the reader will ignore it.
2656                  */
2657                 if (tail == BUF_PAGE_SIZE)
2658                         tail_page->real_end = 0;
2659
2660                 local_sub(length, &tail_page->write);
2661                 return;
2662         }
2663
2664         event = __rb_page_index(tail_page, tail);
2665
2666         /* account for padding bytes */
2667         local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
2668
2669         /*
2670          * Save the original length to the meta data.
2671          * This will be used by the reader to add lost event
2672          * counter.
2673          */
2674         tail_page->real_end = tail;
2675
2676         /*
2677          * If this event is bigger than the minimum size, then
2678          * we need to be careful that we don't subtract the
2679          * write counter enough to allow another writer to slip
2680          * in on this page.
2681          * We put in a discarded commit instead, to make sure
2682          * that this space is not used again.
2683          *
2684          * If we are less than the minimum size, we don't need to
2685          * worry about it.
2686          */
2687         if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
2688                 /* No room for any events */
2689
2690                 /* Mark the rest of the page with padding */
2691                 rb_event_set_padding(event);
2692
2693                 /* Make sure the padding is visible before the write update */
2694                 smp_wmb();
2695
2696                 /* Set the write back to the previous setting */
2697                 local_sub(length, &tail_page->write);
2698                 return;
2699         }
2700
2701         /* Put in a discarded event */
2702         event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
2703         event->type_len = RINGBUF_TYPE_PADDING;
2704         /* time delta must be non zero */
2705         event->time_delta = 1;
2706
2707         /* Make sure the padding is visible before the tail_page->write update */
2708         smp_wmb();
2709
2710         /* Set write to end of buffer */
2711         length = (tail + length) - BUF_PAGE_SIZE;
2712         local_sub(length, &tail_page->write);
2713 }
2714
2715 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
2716
2717 /*
2718  * This is the slow path, force gcc not to inline it.
2719  */
2720 static noinline struct ring_buffer_event *
2721 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
2722              unsigned long tail, struct rb_event_info *info)
2723 {
2724         struct buffer_page *tail_page = info->tail_page;
2725         struct buffer_page *commit_page = cpu_buffer->commit_page;
2726         struct trace_buffer *buffer = cpu_buffer->buffer;
2727         struct buffer_page *next_page;
2728         int ret;
2729
2730         next_page = tail_page;
2731
2732         rb_inc_page(&next_page);
2733
2734         /*
2735          * If for some reason, we had an interrupt storm that made
2736          * it all the way around the buffer, bail, and warn
2737          * about it.
2738          */
2739         if (unlikely(next_page == commit_page)) {
2740                 local_inc(&cpu_buffer->commit_overrun);
2741                 goto out_reset;
2742         }
2743
2744         /*
2745          * This is where the fun begins!
2746          *
2747          * We are fighting against races between a reader that
2748          * could be on another CPU trying to swap its reader
2749          * page with the buffer head.
2750          *
2751          * We are also fighting against interrupts coming in and
2752          * moving the head or tail on us as well.
2753          *
2754          * If the next page is the head page then we have filled
2755          * the buffer, unless the commit page is still on the
2756          * reader page.
2757          */
2758         if (rb_is_head_page(next_page, &tail_page->list)) {
2759
2760                 /*
2761                  * If the commit is not on the reader page, then
2762                  * move the header page.
2763                  */
2764                 if (!rb_is_reader_page(cpu_buffer->commit_page)) {
2765                         /*
2766                          * If we are not in overwrite mode,
2767                          * this is easy, just stop here.
2768                          */
2769                         if (!(buffer->flags & RB_FL_OVERWRITE)) {
2770                                 local_inc(&cpu_buffer->dropped_events);
2771                                 goto out_reset;
2772                         }
2773
2774                         ret = rb_handle_head_page(cpu_buffer,
2775                                                   tail_page,
2776                                                   next_page);
2777                         if (ret < 0)
2778                                 goto out_reset;
2779                         if (ret)
2780                                 goto out_again;
2781                 } else {
2782                         /*
2783                          * We need to be careful here too. The
2784                          * commit page could still be on the reader
2785                          * page. We could have a small buffer, and
2786                          * have filled up the buffer with events
2787                          * from interrupts and such, and wrapped.
2788                          *
2789                          * Note, if the tail page is also on the
2790                          * reader_page, we let it move out.
2791                          */
2792                         if (unlikely((cpu_buffer->commit_page !=
2793                                       cpu_buffer->tail_page) &&
2794                                      (cpu_buffer->commit_page ==
2795                                       cpu_buffer->reader_page))) {
2796                                 local_inc(&cpu_buffer->commit_overrun);
2797                                 goto out_reset;
2798                         }
2799                 }
2800         }
2801
2802         rb_tail_page_update(cpu_buffer, tail_page, next_page);
2803
2804  out_again:
2805
2806         rb_reset_tail(cpu_buffer, tail, info);
2807
2808         /* Commit what we have for now. */
2809         rb_end_commit(cpu_buffer);
2810         /* rb_end_commit() decs committing */
2811         local_inc(&cpu_buffer->committing);
2812
2813         /* fail and let the caller try again */
2814         return ERR_PTR(-EAGAIN);
2815
2816  out_reset:
2817         /* reset write */
2818         rb_reset_tail(cpu_buffer, tail, info);
2819
2820         return NULL;
2821 }
2822
2823 /* Slow path */
2824 static struct ring_buffer_event *
2825 rb_add_time_stamp(struct ring_buffer_event *event, u64 delta, bool abs)
2826 {
2827         if (abs)
2828                 event->type_len = RINGBUF_TYPE_TIME_STAMP;
2829         else
2830                 event->type_len = RINGBUF_TYPE_TIME_EXTEND;
2831
2832         /* Not the first event on the page, or not delta? */
2833         if (abs || rb_event_index(event)) {
2834                 event->time_delta = delta & TS_MASK;
2835                 event->array[0] = delta >> TS_SHIFT;
2836         } else {
2837                 /* nope, just zero it */
2838                 event->time_delta = 0;
2839                 event->array[0] = 0;
2840         }
2841
2842         return skip_time_extend(event);
2843 }
2844
2845 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2846 static inline bool sched_clock_stable(void)
2847 {
2848         return true;
2849 }
2850 #endif
2851
2852 static void
2853 rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
2854                    struct rb_event_info *info)
2855 {
2856         u64 write_stamp;
2857
2858         WARN_ONCE(1, "Delta way too big! %llu ts=%llu before=%llu after=%llu write stamp=%llu\n%s",
2859                   (unsigned long long)info->delta,
2860                   (unsigned long long)info->ts,
2861                   (unsigned long long)info->before,
2862                   (unsigned long long)info->after,
2863                   (unsigned long long)(rb_time_read(&cpu_buffer->write_stamp, &write_stamp) ? write_stamp : 0),
2864                   sched_clock_stable() ? "" :
2865                   "If you just came from a suspend/resume,\n"
2866                   "please switch to the trace global clock:\n"
2867                   "  echo global > /sys/kernel/tracing/trace_clock\n"
2868                   "or add trace_clock=global to the kernel command line\n");
2869 }
2870
2871 static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
2872                                       struct ring_buffer_event **event,
2873                                       struct rb_event_info *info,
2874                                       u64 *delta,
2875                                       unsigned int *length)
2876 {
2877         bool abs = info->add_timestamp &
2878                 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE);
2879
2880         if (unlikely(info->delta > (1ULL << 59))) {
2881                 /*
2882                  * Some timers can use more than 59 bits, and when a timestamp
2883                  * is added to the buffer, it will lose those bits.
2884                  */
2885                 if (abs && (info->ts & TS_MSB)) {
2886                         info->delta &= ABS_TS_MASK;
2887
2888                 /* did the clock go backwards */
2889                 } else if (info->before == info->after && info->before > info->ts) {
2890                         /* not interrupted */
2891                         static int once;
2892
2893                         /*
2894                          * This is possible with a recalibrating of the TSC.
2895                          * Do not produce a call stack, but just report it.
2896                          */
2897                         if (!once) {
2898                                 once++;
2899                                 pr_warn("Ring buffer clock went backwards: %llu -> %llu\n",
2900                                         info->before, info->ts);
2901                         }
2902                 } else
2903                         rb_check_timestamp(cpu_buffer, info);
2904                 if (!abs)
2905                         info->delta = 0;
2906         }
2907         *event = rb_add_time_stamp(*event, info->delta, abs);
2908         *length -= RB_LEN_TIME_EXTEND;
2909         *delta = 0;
2910 }
2911
2912 /**
2913  * rb_update_event - update event type and data
2914  * @cpu_buffer: The per cpu buffer of the @event
2915  * @event: the event to update
2916  * @info: The info to update the @event with (contains length and delta)
2917  *
2918  * Update the type and data fields of the @event. The length
2919  * is the actual size that is written to the ring buffer,
2920  * and with this, we can determine what to place into the
2921  * data field.
2922  */
2923 static void
2924 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
2925                 struct ring_buffer_event *event,
2926                 struct rb_event_info *info)
2927 {
2928         unsigned length = info->length;
2929         u64 delta = info->delta;
2930         unsigned int nest = local_read(&cpu_buffer->committing) - 1;
2931
2932         if (!WARN_ON_ONCE(nest >= MAX_NEST))
2933                 cpu_buffer->event_stamp[nest] = info->ts;
2934
2935         /*
2936          * If we need to add a timestamp, then we
2937          * add it to the start of the reserved space.
2938          */
2939         if (unlikely(info->add_timestamp))
2940                 rb_add_timestamp(cpu_buffer, &event, info, &delta, &length);
2941
2942         event->time_delta = delta;
2943         length -= RB_EVNT_HDR_SIZE;
2944         if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
2945                 event->type_len = 0;
2946                 event->array[0] = length;
2947         } else
2948                 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
2949 }
2950
2951 static unsigned rb_calculate_event_length(unsigned length)
2952 {
2953         struct ring_buffer_event event; /* Used only for sizeof array */
2954
2955         /* zero length can cause confusions */
2956         if (!length)
2957                 length++;
2958
2959         if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
2960                 length += sizeof(event.array[0]);
2961
2962         length += RB_EVNT_HDR_SIZE;
2963         length = ALIGN(length, RB_ARCH_ALIGNMENT);
2964
2965         /*
2966          * In case the time delta is larger than the 27 bits for it
2967          * in the header, we need to add a timestamp. If another
2968          * event comes in when trying to discard this one to increase
2969          * the length, then the timestamp will be added in the allocated
2970          * space of this event. If length is bigger than the size needed
2971          * for the TIME_EXTEND, then padding has to be used. The events
2972          * length must be either RB_LEN_TIME_EXTEND, or greater than or equal
2973          * to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding.
2974          * As length is a multiple of 4, we only need to worry if it
2975          * is 12 (RB_LEN_TIME_EXTEND + 4).
2976          */
2977         if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT)
2978                 length += RB_ALIGNMENT;
2979
2980         return length;
2981 }
2982
2983 static u64 rb_time_delta(struct ring_buffer_event *event)
2984 {
2985         switch (event->type_len) {
2986         case RINGBUF_TYPE_PADDING:
2987                 return 0;
2988
2989         case RINGBUF_TYPE_TIME_EXTEND:
2990                 return rb_event_time_stamp(event);
2991
2992         case RINGBUF_TYPE_TIME_STAMP:
2993                 return 0;
2994
2995         case RINGBUF_TYPE_DATA:
2996                 return event->time_delta;
2997         default:
2998                 return 0;
2999         }
3000 }
3001
3002 static inline int
3003 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
3004                   struct ring_buffer_event *event)
3005 {
3006         unsigned long new_index, old_index;
3007         struct buffer_page *bpage;
3008         unsigned long index;
3009         unsigned long addr;
3010         u64 write_stamp;
3011         u64 delta;
3012
3013         new_index = rb_event_index(event);
3014         old_index = new_index + rb_event_ts_length(event);
3015         addr = (unsigned long)event;
3016         addr &= PAGE_MASK;
3017
3018         bpage = READ_ONCE(cpu_buffer->tail_page);
3019
3020         delta = rb_time_delta(event);
3021
3022         if (!rb_time_read(&cpu_buffer->write_stamp, &write_stamp))
3023                 return 0;
3024
3025         /* Make sure the write stamp is read before testing the location */
3026         barrier();
3027
3028         if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
3029                 unsigned long write_mask =
3030                         local_read(&bpage->write) & ~RB_WRITE_MASK;
3031                 unsigned long event_length = rb_event_length(event);
3032
3033                 /* Something came in, can't discard */
3034                 if (!rb_time_cmpxchg(&cpu_buffer->write_stamp,
3035                                        write_stamp, write_stamp - delta))
3036                         return 0;
3037
3038                 /*
3039                  * It's possible that the event time delta is zero
3040                  * (has the same time stamp as the previous event)
3041                  * in which case write_stamp and before_stamp could
3042                  * be the same. In such a case, force before_stamp
3043                  * to be different than write_stamp. It doesn't
3044                  * matter what it is, as long as its different.
3045                  */
3046                 if (!delta)
3047                         rb_time_set(&cpu_buffer->before_stamp, 0);
3048
3049                 /*
3050                  * If an event were to come in now, it would see that the
3051                  * write_stamp and the before_stamp are different, and assume
3052                  * that this event just added itself before updating
3053                  * the write stamp. The interrupting event will fix the
3054                  * write stamp for us, and use the before stamp as its delta.
3055                  */
3056
3057                 /*
3058                  * This is on the tail page. It is possible that
3059                  * a write could come in and move the tail page
3060                  * and write to the next page. That is fine
3061                  * because we just shorten what is on this page.
3062                  */
3063                 old_index += write_mask;
3064                 new_index += write_mask;
3065                 index = local_cmpxchg(&bpage->write, old_index, new_index);
3066                 if (index == old_index) {
3067                         /* update counters */
3068                         local_sub(event_length, &cpu_buffer->entries_bytes);
3069                         return 1;
3070                 }
3071         }
3072
3073         /* could not discard */
3074         return 0;
3075 }
3076
3077 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
3078 {
3079         local_inc(&cpu_buffer->committing);
3080         local_inc(&cpu_buffer->commits);
3081 }
3082
3083 static __always_inline void
3084 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
3085 {
3086         unsigned long max_count;
3087
3088         /*
3089          * We only race with interrupts and NMIs on this CPU.
3090          * If we own the commit event, then we can commit
3091          * all others that interrupted us, since the interruptions
3092          * are in stack format (they finish before they come
3093          * back to us). This allows us to do a simple loop to
3094          * assign the commit to the tail.
3095          */
3096  again:
3097         max_count = cpu_buffer->nr_pages * 100;
3098
3099         while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) {
3100                 if (RB_WARN_ON(cpu_buffer, !(--max_count)))
3101                         return;
3102                 if (RB_WARN_ON(cpu_buffer,
3103                                rb_is_reader_page(cpu_buffer->tail_page)))
3104                         return;
3105                 local_set(&cpu_buffer->commit_page->page->commit,
3106                           rb_page_write(cpu_buffer->commit_page));
3107                 rb_inc_page(&cpu_buffer->commit_page);
3108                 /* add barrier to keep gcc from optimizing too much */
3109                 barrier();
3110         }
3111         while (rb_commit_index(cpu_buffer) !=
3112                rb_page_write(cpu_buffer->commit_page)) {
3113
3114                 local_set(&cpu_buffer->commit_page->page->commit,
3115                           rb_page_write(cpu_buffer->commit_page));
3116                 RB_WARN_ON(cpu_buffer,
3117                            local_read(&cpu_buffer->commit_page->page->commit) &
3118                            ~RB_WRITE_MASK);
3119                 barrier();
3120         }
3121
3122         /* again, keep gcc from optimizing */
3123         barrier();
3124
3125         /*
3126          * If an interrupt came in just after the first while loop
3127          * and pushed the tail page forward, we will be left with
3128          * a dangling commit that will never go forward.
3129          */
3130         if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)))
3131                 goto again;
3132 }
3133
3134 static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
3135 {
3136         unsigned long commits;
3137
3138         if (RB_WARN_ON(cpu_buffer,
3139                        !local_read(&cpu_buffer->committing)))
3140                 return;
3141
3142  again:
3143         commits = local_read(&cpu_buffer->commits);
3144         /* synchronize with interrupts */
3145         barrier();
3146         if (local_read(&cpu_buffer->committing) == 1)
3147                 rb_set_commit_to_write(cpu_buffer);
3148
3149         local_dec(&cpu_buffer->committing);
3150
3151         /* synchronize with interrupts */
3152         barrier();
3153
3154         /*
3155          * Need to account for interrupts coming in between the
3156          * updating of the commit page and the clearing of the
3157          * committing counter.
3158          */
3159         if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
3160             !local_read(&cpu_buffer->committing)) {
3161                 local_inc(&cpu_buffer->committing);
3162                 goto again;
3163         }
3164 }
3165
3166 static inline void rb_event_discard(struct ring_buffer_event *event)
3167 {
3168         if (extended_time(event))
3169                 event = skip_time_extend(event);
3170
3171         /* array[0] holds the actual length for the discarded event */
3172         event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
3173         event->type_len = RINGBUF_TYPE_PADDING;
3174         /* time delta must be non zero */
3175         if (!event->time_delta)
3176                 event->time_delta = 1;
3177 }
3178
3179 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer)
3180 {
3181         local_inc(&cpu_buffer->entries);
3182         rb_end_commit(cpu_buffer);
3183 }
3184
3185 static __always_inline void
3186 rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
3187 {
3188         if (buffer->irq_work.waiters_pending) {
3189                 buffer->irq_work.waiters_pending = false;
3190                 /* irq_work_queue() supplies it's own memory barriers */
3191                 irq_work_queue(&buffer->irq_work.work);
3192         }
3193
3194         if (cpu_buffer->irq_work.waiters_pending) {
3195                 cpu_buffer->irq_work.waiters_pending = false;
3196                 /* irq_work_queue() supplies it's own memory barriers */
3197                 irq_work_queue(&cpu_buffer->irq_work.work);
3198         }
3199
3200         if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched))
3201                 return;
3202
3203         if (cpu_buffer->reader_page == cpu_buffer->commit_page)
3204                 return;
3205
3206         if (!cpu_buffer->irq_work.full_waiters_pending)
3207                 return;
3208
3209         cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched);
3210
3211         if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full))
3212                 return;
3213
3214         cpu_buffer->irq_work.wakeup_full = true;
3215         cpu_buffer->irq_work.full_waiters_pending = false;
3216         /* irq_work_queue() supplies it's own memory barriers */
3217         irq_work_queue(&cpu_buffer->irq_work.work);
3218 }
3219
3220 #ifdef CONFIG_RING_BUFFER_RECORD_RECURSION
3221 # define do_ring_buffer_record_recursion()      \
3222         do_ftrace_record_recursion(_THIS_IP_, _RET_IP_)
3223 #else
3224 # define do_ring_buffer_record_recursion() do { } while (0)
3225 #endif
3226
3227 /*
3228  * The lock and unlock are done within a preempt disable section.
3229  * The current_context per_cpu variable can only be modified
3230  * by the current task between lock and unlock. But it can
3231  * be modified more than once via an interrupt. To pass this
3232  * information from the lock to the unlock without having to
3233  * access the 'in_interrupt()' functions again (which do show
3234  * a bit of overhead in something as critical as function tracing,
3235  * we use a bitmask trick.
3236  *
3237  *  bit 1 =  NMI context
3238  *  bit 2 =  IRQ context
3239  *  bit 3 =  SoftIRQ context
3240  *  bit 4 =  normal context.
3241  *
3242  * This works because this is the order of contexts that can
3243  * preempt other contexts. A SoftIRQ never preempts an IRQ
3244  * context.
3245  *
3246  * When the context is determined, the corresponding bit is
3247  * checked and set (if it was set, then a recursion of that context
3248  * happened).
3249  *
3250  * On unlock, we need to clear this bit. To do so, just subtract
3251  * 1 from the current_context and AND it to itself.
3252  *
3253  * (binary)
3254  *  101 - 1 = 100
3255  *  101 & 100 = 100 (clearing bit zero)
3256  *
3257  *  1010 - 1 = 1001
3258  *  1010 & 1001 = 1000 (clearing bit 1)
3259  *
3260  * The least significant bit can be cleared this way, and it
3261  * just so happens that it is the same bit corresponding to
3262  * the current context.
3263  *
3264  * Now the TRANSITION bit breaks the above slightly. The TRANSITION bit
3265  * is set when a recursion is detected at the current context, and if
3266  * the TRANSITION bit is already set, it will fail the recursion.
3267  * This is needed because there's a lag between the changing of
3268  * interrupt context and updating the preempt count. In this case,
3269  * a false positive will be found. To handle this, one extra recursion
3270  * is allowed, and this is done by the TRANSITION bit. If the TRANSITION
3271  * bit is already set, then it is considered a recursion and the function
3272  * ends. Otherwise, the TRANSITION bit is set, and that bit is returned.
3273  *
3274  * On the trace_recursive_unlock(), the TRANSITION bit will be the first
3275  * to be cleared. Even if it wasn't the context that set it. That is,
3276  * if an interrupt comes in while NORMAL bit is set and the ring buffer
3277  * is called before preempt_count() is updated, since the check will
3278  * be on the NORMAL bit, the TRANSITION bit will then be set. If an
3279  * NMI then comes in, it will set the NMI bit, but when the NMI code
3280  * does the trace_recursive_unlock() it will clear the TRANSITION bit
3281  * and leave the NMI bit set. But this is fine, because the interrupt
3282  * code that set the TRANSITION bit will then clear the NMI bit when it
3283  * calls trace_recursive_unlock(). If another NMI comes in, it will
3284  * set the TRANSITION bit and continue.
3285  *
3286  * Note: The TRANSITION bit only handles a single transition between context.
3287  */
3288
3289 static __always_inline int
3290 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
3291 {
3292         unsigned int val = cpu_buffer->current_context;
3293         int bit = interrupt_context_level();
3294
3295         bit = RB_CTX_NORMAL - bit;
3296
3297         if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) {
3298                 /*
3299                  * It is possible that this was called by transitioning
3300                  * between interrupt context, and preempt_count() has not
3301                  * been updated yet. In this case, use the TRANSITION bit.
3302                  */
3303                 bit = RB_CTX_TRANSITION;
3304                 if (val & (1 << (bit + cpu_buffer->nest))) {
3305                         do_ring_buffer_record_recursion();
3306                         return 1;
3307                 }
3308         }
3309
3310         val |= (1 << (bit + cpu_buffer->nest));
3311         cpu_buffer->current_context = val;
3312
3313         return 0;
3314 }
3315
3316 static __always_inline void
3317 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
3318 {
3319         cpu_buffer->current_context &=
3320                 cpu_buffer->current_context - (1 << cpu_buffer->nest);
3321 }
3322
3323 /* The recursive locking above uses 5 bits */
3324 #define NESTED_BITS 5
3325
3326 /**
3327  * ring_buffer_nest_start - Allow to trace while nested
3328  * @buffer: The ring buffer to modify
3329  *
3330  * The ring buffer has a safety mechanism to prevent recursion.
3331  * But there may be a case where a trace needs to be done while
3332  * tracing something else. In this case, calling this function
3333  * will allow this function to nest within a currently active
3334  * ring_buffer_lock_reserve().
3335  *
3336  * Call this function before calling another ring_buffer_lock_reserve() and
3337  * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit().
3338  */
3339 void ring_buffer_nest_start(struct trace_buffer *buffer)
3340 {
3341         struct ring_buffer_per_cpu *cpu_buffer;
3342         int cpu;
3343
3344         /* Enabled by ring_buffer_nest_end() */
3345         preempt_disable_notrace();
3346         cpu = raw_smp_processor_id();
3347         cpu_buffer = buffer->buffers[cpu];
3348         /* This is the shift value for the above recursive locking */
3349         cpu_buffer->nest += NESTED_BITS;
3350 }
3351
3352 /**
3353  * ring_buffer_nest_end - Allow to trace while nested
3354  * @buffer: The ring buffer to modify
3355  *
3356  * Must be called after ring_buffer_nest_start() and after the
3357  * ring_buffer_unlock_commit().
3358  */
3359 void ring_buffer_nest_end(struct trace_buffer *buffer)
3360 {
3361         struct ring_buffer_per_cpu *cpu_buffer;
3362         int cpu;
3363
3364         /* disabled by ring_buffer_nest_start() */
3365         cpu = raw_smp_processor_id();
3366         cpu_buffer = buffer->buffers[cpu];
3367         /* This is the shift value for the above recursive locking */
3368         cpu_buffer->nest -= NESTED_BITS;
3369         preempt_enable_notrace();
3370 }
3371
3372 /**
3373  * ring_buffer_unlock_commit - commit a reserved
3374  * @buffer: The buffer to commit to
3375  * @event: The event pointer to commit.
3376  *
3377  * This commits the data to the ring buffer, and releases any locks held.
3378  *
3379  * Must be paired with ring_buffer_lock_reserve.
3380  */
3381 int ring_buffer_unlock_commit(struct trace_buffer *buffer)
3382 {
3383         struct ring_buffer_per_cpu *cpu_buffer;
3384         int cpu = raw_smp_processor_id();
3385
3386         cpu_buffer = buffer->buffers[cpu];
3387
3388         rb_commit(cpu_buffer);
3389
3390         rb_wakeups(buffer, cpu_buffer);
3391
3392         trace_recursive_unlock(cpu_buffer);
3393
3394         preempt_enable_notrace();
3395
3396         return 0;
3397 }
3398 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
3399
3400 /* Special value to validate all deltas on a page. */
3401 #define CHECK_FULL_PAGE         1L
3402
3403 #ifdef CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS
3404 static void dump_buffer_page(struct buffer_data_page *bpage,
3405                              struct rb_event_info *info,
3406                              unsigned long tail)
3407 {
3408         struct ring_buffer_event *event;
3409         u64 ts, delta;
3410         int e;
3411
3412         ts = bpage->time_stamp;
3413         pr_warn("  [%lld] PAGE TIME STAMP\n", ts);
3414
3415         for (e = 0; e < tail; e += rb_event_length(event)) {
3416
3417                 event = (struct ring_buffer_event *)(bpage->data + e);
3418
3419                 switch (event->type_len) {
3420
3421                 case RINGBUF_TYPE_TIME_EXTEND:
3422                         delta = rb_event_time_stamp(event);
3423                         ts += delta;
3424                         pr_warn("  [%lld] delta:%lld TIME EXTEND\n", ts, delta);
3425                         break;
3426
3427                 case RINGBUF_TYPE_TIME_STAMP:
3428                         delta = rb_event_time_stamp(event);
3429                         ts = rb_fix_abs_ts(delta, ts);
3430                         pr_warn("  [%lld] absolute:%lld TIME STAMP\n", ts, delta);
3431                         break;
3432
3433                 case RINGBUF_TYPE_PADDING:
3434                         ts += event->time_delta;
3435                         pr_warn("  [%lld] delta:%d PADDING\n", ts, event->time_delta);
3436                         break;
3437
3438                 case RINGBUF_TYPE_DATA:
3439                         ts += event->time_delta;
3440                         pr_warn("  [%lld] delta:%d\n", ts, event->time_delta);
3441                         break;
3442
3443                 default:
3444                         break;
3445                 }
3446         }
3447 }
3448
3449 static DEFINE_PER_CPU(atomic_t, checking);
3450 static atomic_t ts_dump;
3451
3452 /*
3453  * Check if the current event time stamp matches the deltas on
3454  * the buffer page.
3455  */
3456 static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
3457                          struct rb_event_info *info,
3458                          unsigned long tail)
3459 {
3460         struct ring_buffer_event *event;
3461         struct buffer_data_page *bpage;
3462         u64 ts, delta;
3463         bool full = false;
3464         int e;
3465
3466         bpage = info->tail_page->page;
3467
3468         if (tail == CHECK_FULL_PAGE) {
3469                 full = true;
3470                 tail = local_read(&bpage->commit);
3471         } else if (info->add_timestamp &
3472                    (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)) {
3473                 /* Ignore events with absolute time stamps */
3474                 return;
3475         }
3476
3477         /*
3478          * Do not check the first event (skip possible extends too).
3479          * Also do not check if previous events have not been committed.
3480          */
3481         if (tail <= 8 || tail > local_read(&bpage->commit))
3482                 return;
3483
3484         /*
3485          * If this interrupted another event, 
3486          */
3487         if (atomic_inc_return(this_cpu_ptr(&checking)) != 1)
3488                 goto out;
3489
3490         ts = bpage->time_stamp;
3491
3492         for (e = 0; e < tail; e += rb_event_length(event)) {
3493
3494                 event = (struct ring_buffer_event *)(bpage->data + e);
3495
3496                 switch (event->type_len) {
3497
3498                 case RINGBUF_TYPE_TIME_EXTEND:
3499                         delta = rb_event_time_stamp(event);
3500                         ts += delta;
3501                         break;
3502
3503                 case RINGBUF_TYPE_TIME_STAMP:
3504                         delta = rb_event_time_stamp(event);
3505                         ts = rb_fix_abs_ts(delta, ts);
3506                         break;
3507
3508                 case RINGBUF_TYPE_PADDING:
3509                         if (event->time_delta == 1)
3510                                 break;
3511                         fallthrough;
3512                 case RINGBUF_TYPE_DATA:
3513                         ts += event->time_delta;
3514                         break;
3515
3516                 default:
3517                         RB_WARN_ON(cpu_buffer, 1);
3518                 }
3519         }
3520         if ((full && ts > info->ts) ||
3521             (!full && ts + info->delta != info->ts)) {
3522                 /* If another report is happening, ignore this one */
3523                 if (atomic_inc_return(&ts_dump) != 1) {
3524                         atomic_dec(&ts_dump);
3525                         goto out;
3526                 }
3527                 atomic_inc(&cpu_buffer->record_disabled);
3528                 /* There's some cases in boot up that this can happen */
3529                 WARN_ON_ONCE(system_state != SYSTEM_BOOTING);
3530                 pr_warn("[CPU: %d]TIME DOES NOT MATCH expected:%lld actual:%lld delta:%lld before:%lld after:%lld%s\n",
3531                         cpu_buffer->cpu,
3532                         ts + info->delta, info->ts, info->delta,
3533                         info->before, info->after,
3534                         full ? " (full)" : "");
3535                 dump_buffer_page(bpage, info, tail);
3536                 atomic_dec(&ts_dump);
3537                 /* Do not re-enable checking */
3538                 return;
3539         }
3540 out:
3541         atomic_dec(this_cpu_ptr(&checking));
3542 }
3543 #else
3544 static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
3545                          struct rb_event_info *info,
3546                          unsigned long tail)
3547 {
3548 }
3549 #endif /* CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS */
3550
3551 static struct ring_buffer_event *
3552 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
3553                   struct rb_event_info *info)
3554 {
3555         struct ring_buffer_event *event;
3556         struct buffer_page *tail_page;
3557         unsigned long tail, write, w;
3558         bool a_ok;
3559         bool b_ok;
3560
3561         /* Don't let the compiler play games with cpu_buffer->tail_page */
3562         tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page);
3563
3564  /*A*/  w = local_read(&tail_page->write) & RB_WRITE_MASK;
3565         barrier();
3566         b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before);
3567         a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
3568         barrier();
3569         info->ts = rb_time_stamp(cpu_buffer->buffer);
3570
3571         if ((info->add_timestamp & RB_ADD_STAMP_ABSOLUTE)) {
3572                 info->delta = info->ts;
3573         } else {
3574                 /*
3575                  * If interrupting an event time update, we may need an
3576                  * absolute timestamp.
3577                  * Don't bother if this is the start of a new page (w == 0).
3578                  */
3579                 if (unlikely(!a_ok || !b_ok || (info->before != info->after && w))) {
3580                         info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND;
3581                         info->length += RB_LEN_TIME_EXTEND;
3582                 } else {
3583                         info->delta = info->ts - info->after;
3584                         if (unlikely(test_time_stamp(info->delta))) {
3585                                 info->add_timestamp |= RB_ADD_STAMP_EXTEND;
3586                                 info->length += RB_LEN_TIME_EXTEND;
3587                         }
3588                 }
3589         }
3590
3591  /*B*/  rb_time_set(&cpu_buffer->before_stamp, info->ts);
3592
3593  /*C*/  write = local_add_return(info->length, &tail_page->write);
3594
3595         /* set write to only the index of the write */
3596         write &= RB_WRITE_MASK;
3597
3598         tail = write - info->length;
3599
3600         /* See if we shot pass the end of this buffer page */
3601         if (unlikely(write > BUF_PAGE_SIZE)) {
3602                 /* before and after may now different, fix it up*/
3603                 b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before);
3604                 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
3605                 if (a_ok && b_ok && info->before != info->after)
3606                         (void)rb_time_cmpxchg(&cpu_buffer->before_stamp,
3607                                               info->before, info->after);
3608                 if (a_ok && b_ok)
3609                         check_buffer(cpu_buffer, info, CHECK_FULL_PAGE);
3610                 return rb_move_tail(cpu_buffer, tail, info);
3611         }
3612
3613         if (likely(tail == w)) {
3614                 u64 save_before;
3615                 bool s_ok;
3616
3617                 /* Nothing interrupted us between A and C */
3618  /*D*/          rb_time_set(&cpu_buffer->write_stamp, info->ts);
3619                 barrier();
3620  /*E*/          s_ok = rb_time_read(&cpu_buffer->before_stamp, &save_before);
3621                 RB_WARN_ON(cpu_buffer, !s_ok);
3622                 if (likely(!(info->add_timestamp &
3623                              (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
3624                         /* This did not interrupt any time update */
3625                         info->delta = info->ts - info->after;
3626                 else
3627                         /* Just use full timestamp for interrupting event */
3628                         info->delta = info->ts;
3629                 barrier();
3630                 check_buffer(cpu_buffer, info, tail);
3631                 if (unlikely(info->ts != save_before)) {
3632                         /* SLOW PATH - Interrupted between C and E */
3633
3634                         a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
3635                         RB_WARN_ON(cpu_buffer, !a_ok);
3636
3637                         /* Write stamp must only go forward */
3638                         if (save_before > info->after) {
3639                                 /*
3640                                  * We do not care about the result, only that
3641                                  * it gets updated atomically.
3642                                  */
3643                                 (void)rb_time_cmpxchg(&cpu_buffer->write_stamp,
3644                                                       info->after, save_before);
3645                         }
3646                 }
3647         } else {
3648                 u64 ts;
3649                 /* SLOW PATH - Interrupted between A and C */
3650                 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
3651                 /* Was interrupted before here, write_stamp must be valid */
3652                 RB_WARN_ON(cpu_buffer, !a_ok);
3653                 ts = rb_time_stamp(cpu_buffer->buffer);
3654                 barrier();
3655  /*E*/          if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) &&
3656                     info->after < ts &&
3657                     rb_time_cmpxchg(&cpu_buffer->write_stamp,
3658                                     info->after, ts)) {
3659                         /* Nothing came after this event between C and E */
3660                         info->delta = ts - info->after;
3661                 } else {
3662                         /*
3663                          * Interrupted between C and E:
3664                          * Lost the previous events time stamp. Just set the
3665                          * delta to zero, and this will be the same time as
3666                          * the event this event interrupted. And the events that
3667                          * came after this will still be correct (as they would
3668                          * have built their delta on the previous event.
3669                          */
3670                         info->delta = 0;
3671                 }
3672                 info->ts = ts;
3673                 info->add_timestamp &= ~RB_ADD_STAMP_FORCE;
3674         }
3675
3676         /*
3677          * If this is the first commit on the page, then it has the same
3678          * timestamp as the page itself.
3679          */
3680         if (unlikely(!tail && !(info->add_timestamp &
3681                                 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
3682                 info->delta = 0;
3683
3684         /* We reserved something on the buffer */
3685
3686         event = __rb_page_index(tail_page, tail);
3687         rb_update_event(cpu_buffer, event, info);
3688
3689         local_inc(&tail_page->entries);
3690
3691         /*
3692          * If this is the first commit on the page, then update
3693          * its timestamp.
3694          */
3695         if (unlikely(!tail))
3696                 tail_page->page->time_stamp = info->ts;
3697
3698         /* account for these added bytes */
3699         local_add(info->length, &cpu_buffer->entries_bytes);
3700
3701         return event;
3702 }
3703
3704 static __always_inline struct ring_buffer_event *
3705 rb_reserve_next_event(struct trace_buffer *buffer,
3706                       struct ring_buffer_per_cpu *cpu_buffer,
3707                       unsigned long length)
3708 {
3709         struct ring_buffer_event *event;
3710         struct rb_event_info info;
3711         int nr_loops = 0;
3712         int add_ts_default;
3713
3714         rb_start_commit(cpu_buffer);
3715         /* The commit page can not change after this */
3716
3717 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3718         /*
3719          * Due to the ability to swap a cpu buffer from a buffer
3720          * it is possible it was swapped before we committed.
3721          * (committing stops a swap). We check for it here and
3722          * if it happened, we have to fail the write.
3723          */
3724         barrier();
3725         if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) {
3726                 local_dec(&cpu_buffer->committing);
3727                 local_dec(&cpu_buffer->commits);
3728                 return NULL;
3729         }
3730 #endif
3731
3732         info.length = rb_calculate_event_length(length);
3733
3734         if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) {
3735                 add_ts_default = RB_ADD_STAMP_ABSOLUTE;
3736                 info.length += RB_LEN_TIME_EXTEND;
3737         } else {
3738                 add_ts_default = RB_ADD_STAMP_NONE;
3739         }
3740
3741  again:
3742         info.add_timestamp = add_ts_default;
3743         info.delta = 0;
3744
3745         /*
3746          * We allow for interrupts to reenter here and do a trace.
3747          * If one does, it will cause this original code to loop
3748          * back here. Even with heavy interrupts happening, this
3749          * should only happen a few times in a row. If this happens
3750          * 1000 times in a row, there must be either an interrupt
3751          * storm or we have something buggy.
3752          * Bail!
3753          */
3754         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
3755                 goto out_fail;
3756
3757         event = __rb_reserve_next(cpu_buffer, &info);
3758
3759         if (unlikely(PTR_ERR(event) == -EAGAIN)) {
3760                 if (info.add_timestamp & (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND))
3761                         info.length -= RB_LEN_TIME_EXTEND;
3762                 goto again;
3763         }
3764
3765         if (likely(event))
3766                 return event;
3767  out_fail:
3768         rb_end_commit(cpu_buffer);
3769         return NULL;
3770 }
3771
3772 /**
3773  * ring_buffer_lock_reserve - reserve a part of the buffer
3774  * @buffer: the ring buffer to reserve from
3775  * @length: the length of the data to reserve (excluding event header)
3776  *
3777  * Returns a reserved event on the ring buffer to copy directly to.
3778  * The user of this interface will need to get the body to write into
3779  * and can use the ring_buffer_event_data() interface.
3780  *
3781  * The length is the length of the data needed, not the event length
3782  * which also includes the event header.
3783  *
3784  * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
3785  * If NULL is returned, then nothing has been allocated or locked.
3786  */
3787 struct ring_buffer_event *
3788 ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length)
3789 {
3790         struct ring_buffer_per_cpu *cpu_buffer;
3791         struct ring_buffer_event *event;
3792         int cpu;
3793
3794         /* If we are tracing schedule, we don't want to recurse */
3795         preempt_disable_notrace();
3796
3797         if (unlikely(atomic_read(&buffer->record_disabled)))
3798                 goto out;
3799
3800         cpu = raw_smp_processor_id();
3801
3802         if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask)))
3803                 goto out;
3804
3805         cpu_buffer = buffer->buffers[cpu];
3806
3807         if (unlikely(atomic_read(&cpu_buffer->record_disabled)))
3808                 goto out;
3809
3810         if (unlikely(length > BUF_MAX_DATA_SIZE))
3811                 goto out;
3812
3813         if (unlikely(trace_recursive_lock(cpu_buffer)))
3814                 goto out;
3815
3816         event = rb_reserve_next_event(buffer, cpu_buffer, length);
3817         if (!event)
3818                 goto out_unlock;
3819
3820         return event;
3821
3822  out_unlock:
3823         trace_recursive_unlock(cpu_buffer);
3824  out:
3825         preempt_enable_notrace();
3826         return NULL;
3827 }
3828 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
3829
3830 /*
3831  * Decrement the entries to the page that an event is on.
3832  * The event does not even need to exist, only the pointer
3833  * to the page it is on. This may only be called before the commit
3834  * takes place.
3835  */
3836 static inline void
3837 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
3838                    struct ring_buffer_event *event)
3839 {
3840         unsigned long addr = (unsigned long)event;
3841         struct buffer_page *bpage = cpu_buffer->commit_page;
3842         struct buffer_page *start;
3843
3844         addr &= PAGE_MASK;
3845
3846         /* Do the likely case first */
3847         if (likely(bpage->page == (void *)addr)) {
3848                 local_dec(&bpage->entries);
3849                 return;
3850         }
3851
3852         /*
3853          * Because the commit page may be on the reader page we
3854          * start with the next page and check the end loop there.
3855          */
3856         rb_inc_page(&bpage);
3857         start = bpage;
3858         do {
3859                 if (bpage->page == (void *)addr) {
3860                         local_dec(&bpage->entries);
3861                         return;
3862                 }
3863                 rb_inc_page(&bpage);
3864         } while (bpage != start);
3865
3866         /* commit not part of this buffer?? */
3867         RB_WARN_ON(cpu_buffer, 1);
3868 }
3869
3870 /**
3871  * ring_buffer_discard_commit - discard an event that has not been committed
3872  * @buffer: the ring buffer
3873  * @event: non committed event to discard
3874  *
3875  * Sometimes an event that is in the ring buffer needs to be ignored.
3876  * This function lets the user discard an event in the ring buffer
3877  * and then that event will not be read later.
3878  *
3879  * This function only works if it is called before the item has been
3880  * committed. It will try to free the event from the ring buffer
3881  * if another event has not been added behind it.
3882  *
3883  * If another event has been added behind it, it will set the event
3884  * up as discarded, and perform the commit.
3885  *
3886  * If this function is called, do not call ring_buffer_unlock_commit on
3887  * the event.
3888  */
3889 void ring_buffer_discard_commit(struct trace_buffer *buffer,
3890                                 struct ring_buffer_event *event)
3891 {
3892         struct ring_buffer_per_cpu *cpu_buffer;
3893         int cpu;
3894
3895         /* The event is discarded regardless */
3896         rb_event_discard(event);
3897
3898         cpu = smp_processor_id();
3899         cpu_buffer = buffer->buffers[cpu];
3900
3901         /*
3902          * This must only be called if the event has not been
3903          * committed yet. Thus we can assume that preemption
3904          * is still disabled.
3905          */
3906         RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
3907
3908         rb_decrement_entry(cpu_buffer, event);
3909         if (rb_try_to_discard(cpu_buffer, event))
3910                 goto out;
3911
3912  out:
3913         rb_end_commit(cpu_buffer);
3914
3915         trace_recursive_unlock(cpu_buffer);
3916
3917         preempt_enable_notrace();
3918
3919 }
3920 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
3921
3922 /**
3923  * ring_buffer_write - write data to the buffer without reserving
3924  * @buffer: The ring buffer to write to.
3925  * @length: The length of the data being written (excluding the event header)
3926  * @data: The data to write to the buffer.
3927  *
3928  * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
3929  * one function. If you already have the data to write to the buffer, it
3930  * may be easier to simply call this function.
3931  *
3932  * Note, like ring_buffer_lock_reserve, the length is the length of the data
3933  * and not the length of the event which would hold the header.
3934  */
3935 int ring_buffer_write(struct trace_buffer *buffer,
3936                       unsigned long length,
3937                       void *data)
3938 {
3939         struct ring_buffer_per_cpu *cpu_buffer;
3940         struct ring_buffer_event *event;
3941         void *body;
3942         int ret = -EBUSY;
3943         int cpu;
3944
3945         preempt_disable_notrace();
3946
3947         if (atomic_read(&buffer->record_disabled))
3948                 goto out;
3949
3950         cpu = raw_smp_processor_id();
3951
3952         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3953                 goto out;
3954
3955         cpu_buffer = buffer->buffers[cpu];
3956
3957         if (atomic_read(&cpu_buffer->record_disabled))
3958                 goto out;
3959
3960         if (length > BUF_MAX_DATA_SIZE)
3961                 goto out;
3962
3963         if (unlikely(trace_recursive_lock(cpu_buffer)))
3964                 goto out;
3965
3966         event = rb_reserve_next_event(buffer, cpu_buffer, length);
3967         if (!event)
3968                 goto out_unlock;
3969
3970         body = rb_event_data(event);
3971
3972         memcpy(body, data, length);
3973
3974         rb_commit(cpu_buffer);
3975
3976         rb_wakeups(buffer, cpu_buffer);
3977
3978         ret = 0;
3979
3980  out_unlock:
3981         trace_recursive_unlock(cpu_buffer);
3982
3983  out:
3984         preempt_enable_notrace();
3985
3986         return ret;
3987 }
3988 EXPORT_SYMBOL_GPL(ring_buffer_write);
3989
3990 static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
3991 {
3992         struct buffer_page *reader = cpu_buffer->reader_page;
3993         struct buffer_page *head = rb_set_head_page(cpu_buffer);
3994         struct buffer_page *commit = cpu_buffer->commit_page;
3995
3996         /* In case of error, head will be NULL */
3997         if (unlikely(!head))
3998                 return true;
3999
4000         /* Reader should exhaust content in reader page */
4001         if (reader->read != rb_page_commit(reader))
4002                 return false;
4003
4004         /*
4005          * If writers are committing on the reader page, knowing all
4006          * committed content has been read, the ring buffer is empty.
4007          */
4008         if (commit == reader)
4009                 return true;
4010
4011         /*
4012          * If writers are committing on a page other than reader page
4013          * and head page, there should always be content to read.
4014          */
4015         if (commit != head)
4016                 return false;
4017
4018         /*
4019          * Writers are committing on the head page, we just need
4020          * to care about there're committed data, and the reader will
4021          * swap reader page with head page when it is to read data.
4022          */
4023         return rb_page_commit(commit) == 0;
4024 }
4025
4026 /**
4027  * ring_buffer_record_disable - stop all writes into the buffer
4028  * @buffer: The ring buffer to stop writes to.
4029  *
4030  * This prevents all writes to the buffer. Any attempt to write
4031  * to the buffer after this will fail and return NULL.
4032  *
4033  * The caller should call synchronize_rcu() after this.
4034  */
4035 void ring_buffer_record_disable(struct trace_buffer *buffer)
4036 {
4037         atomic_inc(&buffer->record_disabled);
4038 }
4039 EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
4040
4041 /**
4042  * ring_buffer_record_enable - enable writes to the buffer
4043  * @buffer: The ring buffer to enable writes
4044  *
4045  * Note, multiple disables will need the same number of enables
4046  * to truly enable the writing (much like preempt_disable).
4047  */
4048 void ring_buffer_record_enable(struct trace_buffer *buffer)
4049 {
4050         atomic_dec(&buffer->record_disabled);
4051 }
4052 EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
4053
4054 /**
4055  * ring_buffer_record_off - stop all writes into the buffer
4056  * @buffer: The ring buffer to stop writes to.
4057  *
4058  * This prevents all writes to the buffer. Any attempt to write
4059  * to the buffer after this will fail and return NULL.
4060  *
4061  * This is different than ring_buffer_record_disable() as
4062  * it works like an on/off switch, where as the disable() version
4063  * must be paired with a enable().
4064  */
4065 void ring_buffer_record_off(struct trace_buffer *buffer)
4066 {
4067         unsigned int rd;
4068         unsigned int new_rd;
4069
4070         do {
4071                 rd = atomic_read(&buffer->record_disabled);
4072                 new_rd = rd | RB_BUFFER_OFF;
4073         } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
4074 }
4075 EXPORT_SYMBOL_GPL(ring_buffer_record_off);
4076
4077 /**
4078  * ring_buffer_record_on - restart writes into the buffer
4079  * @buffer: The ring buffer to start writes to.
4080  *
4081  * This enables all writes to the buffer that was disabled by
4082  * ring_buffer_record_off().
4083  *
4084  * This is different than ring_buffer_record_enable() as
4085  * it works like an on/off switch, where as the enable() version
4086  * must be paired with a disable().
4087  */
4088 void ring_buffer_record_on(struct trace_buffer *buffer)
4089 {
4090         unsigned int rd;
4091         unsigned int new_rd;
4092
4093         do {
4094                 rd = atomic_read(&buffer->record_disabled);
4095                 new_rd = rd & ~RB_BUFFER_OFF;
4096         } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
4097 }
4098 EXPORT_SYMBOL_GPL(ring_buffer_record_on);
4099
4100 /**
4101  * ring_buffer_record_is_on - return true if the ring buffer can write
4102  * @buffer: The ring buffer to see if write is enabled
4103  *
4104  * Returns true if the ring buffer is in a state that it accepts writes.
4105  */
4106 bool ring_buffer_record_is_on(struct trace_buffer *buffer)
4107 {
4108         return !atomic_read(&buffer->record_disabled);
4109 }
4110
4111 /**
4112  * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
4113  * @buffer: The ring buffer to see if write is set enabled
4114  *
4115  * Returns true if the ring buffer is set writable by ring_buffer_record_on().
4116  * Note that this does NOT mean it is in a writable state.
4117  *
4118  * It may return true when the ring buffer has been disabled by
4119  * ring_buffer_record_disable(), as that is a temporary disabling of
4120  * the ring buffer.
4121  */
4122 bool ring_buffer_record_is_set_on(struct trace_buffer *buffer)
4123 {
4124         return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
4125 }
4126
4127 /**
4128  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
4129  * @buffer: The ring buffer to stop writes to.
4130  * @cpu: The CPU buffer to stop
4131  *
4132  * This prevents all writes to the buffer. Any attempt to write
4133  * to the buffer after this will fail and return NULL.
4134  *
4135  * The caller should call synchronize_rcu() after this.
4136  */
4137 void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu)
4138 {
4139         struct ring_buffer_per_cpu *cpu_buffer;
4140
4141         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4142                 return;
4143
4144         cpu_buffer = buffer->buffers[cpu];
4145         atomic_inc(&cpu_buffer->record_disabled);
4146 }
4147 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
4148
4149 /**
4150  * ring_buffer_record_enable_cpu - enable writes to the buffer
4151  * @buffer: The ring buffer to enable writes
4152  * @cpu: The CPU to enable.
4153  *
4154  * Note, multiple disables will need the same number of enables
4155  * to truly enable the writing (much like preempt_disable).
4156  */
4157 void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu)
4158 {
4159         struct ring_buffer_per_cpu *cpu_buffer;
4160
4161         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4162                 return;
4163
4164         cpu_buffer = buffer->buffers[cpu];
4165         atomic_dec(&cpu_buffer->record_disabled);
4166 }
4167 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
4168
4169 /*
4170  * The total entries in the ring buffer is the running counter
4171  * of entries entered into the ring buffer, minus the sum of
4172  * the entries read from the ring buffer and the number of
4173  * entries that were overwritten.
4174  */
4175 static inline unsigned long
4176 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
4177 {
4178         return local_read(&cpu_buffer->entries) -
4179                 (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
4180 }
4181
4182 /**
4183  * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
4184  * @buffer: The ring buffer
4185  * @cpu: The per CPU buffer to read from.
4186  */
4187 u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu)
4188 {
4189         unsigned long flags;
4190         struct ring_buffer_per_cpu *cpu_buffer;
4191         struct buffer_page *bpage;
4192         u64 ret = 0;
4193
4194         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4195                 return 0;
4196
4197         cpu_buffer = buffer->buffers[cpu];
4198         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4199         /*
4200          * if the tail is on reader_page, oldest time stamp is on the reader
4201          * page
4202          */
4203         if (cpu_buffer->tail_page == cpu_buffer->reader_page)
4204                 bpage = cpu_buffer->reader_page;
4205         else
4206                 bpage = rb_set_head_page(cpu_buffer);
4207         if (bpage)
4208                 ret = bpage->page->time_stamp;
4209         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4210
4211         return ret;
4212 }
4213 EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
4214
4215 /**
4216  * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
4217  * @buffer: The ring buffer
4218  * @cpu: The per CPU buffer to read from.
4219  */
4220 unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu)
4221 {
4222         struct ring_buffer_per_cpu *cpu_buffer;
4223         unsigned long ret;
4224
4225         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4226                 return 0;
4227
4228         cpu_buffer = buffer->buffers[cpu];
4229         ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
4230
4231         return ret;
4232 }
4233 EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
4234
4235 /**
4236  * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
4237  * @buffer: The ring buffer
4238  * @cpu: The per CPU buffer to get the entries from.
4239  */
4240 unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu)
4241 {
4242         struct ring_buffer_per_cpu *cpu_buffer;
4243
4244         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4245                 return 0;
4246
4247         cpu_buffer = buffer->buffers[cpu];
4248
4249         return rb_num_of_entries(cpu_buffer);
4250 }
4251 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
4252
4253 /**
4254  * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
4255  * buffer wrapping around (only if RB_FL_OVERWRITE is on).
4256  * @buffer: The ring buffer
4257  * @cpu: The per CPU buffer to get the number of overruns from
4258  */
4259 unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu)
4260 {
4261         struct ring_buffer_per_cpu *cpu_buffer;
4262         unsigned long ret;
4263
4264         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4265                 return 0;
4266
4267         cpu_buffer = buffer->buffers[cpu];
4268         ret = local_read(&cpu_buffer->overrun);
4269
4270         return ret;
4271 }
4272 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
4273
4274 /**
4275  * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
4276  * commits failing due to the buffer wrapping around while there are uncommitted
4277  * events, such as during an interrupt storm.
4278  * @buffer: The ring buffer
4279  * @cpu: The per CPU buffer to get the number of overruns from
4280  */
4281 unsigned long
4282 ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu)
4283 {
4284         struct ring_buffer_per_cpu *cpu_buffer;
4285         unsigned long ret;
4286
4287         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4288                 return 0;
4289
4290         cpu_buffer = buffer->buffers[cpu];
4291         ret = local_read(&cpu_buffer->commit_overrun);
4292
4293         return ret;
4294 }
4295 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
4296
4297 /**
4298  * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
4299  * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
4300  * @buffer: The ring buffer
4301  * @cpu: The per CPU buffer to get the number of overruns from
4302  */
4303 unsigned long
4304 ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu)
4305 {
4306         struct ring_buffer_per_cpu *cpu_buffer;
4307         unsigned long ret;
4308
4309         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4310                 return 0;
4311
4312         cpu_buffer = buffer->buffers[cpu];
4313         ret = local_read(&cpu_buffer->dropped_events);
4314
4315         return ret;
4316 }
4317 EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
4318
4319 /**
4320  * ring_buffer_read_events_cpu - get the number of events successfully read
4321  * @buffer: The ring buffer
4322  * @cpu: The per CPU buffer to get the number of events read
4323  */
4324 unsigned long
4325 ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu)
4326 {
4327         struct ring_buffer_per_cpu *cpu_buffer;
4328
4329         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4330                 return 0;
4331
4332         cpu_buffer = buffer->buffers[cpu];
4333         return cpu_buffer->read;
4334 }
4335 EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
4336
4337 /**
4338  * ring_buffer_entries - get the number of entries in a buffer
4339  * @buffer: The ring buffer
4340  *
4341  * Returns the total number of entries in the ring buffer
4342  * (all CPU entries)
4343  */
4344 unsigned long ring_buffer_entries(struct trace_buffer *buffer)
4345 {
4346         struct ring_buffer_per_cpu *cpu_buffer;
4347         unsigned long entries = 0;
4348         int cpu;
4349
4350         /* if you care about this being correct, lock the buffer */
4351         for_each_buffer_cpu(buffer, cpu) {
4352                 cpu_buffer = buffer->buffers[cpu];
4353                 entries += rb_num_of_entries(cpu_buffer);
4354         }
4355
4356         return entries;
4357 }
4358 EXPORT_SYMBOL_GPL(ring_buffer_entries);
4359
4360 /**
4361  * ring_buffer_overruns - get the number of overruns in buffer
4362  * @buffer: The ring buffer
4363  *
4364  * Returns the total number of overruns in the ring buffer
4365  * (all CPU entries)
4366  */
4367 unsigned long ring_buffer_overruns(struct trace_buffer *buffer)
4368 {
4369         struct ring_buffer_per_cpu *cpu_buffer;
4370         unsigned long overruns = 0;
4371         int cpu;
4372
4373         /* if you care about this being correct, lock the buffer */
4374         for_each_buffer_cpu(buffer, cpu) {
4375                 cpu_buffer = buffer->buffers[cpu];
4376                 overruns += local_read(&cpu_buffer->overrun);
4377         }
4378
4379         return overruns;
4380 }
4381 EXPORT_SYMBOL_GPL(ring_buffer_overruns);
4382
4383 static void rb_iter_reset(struct ring_buffer_iter *iter)
4384 {
4385         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4386
4387         /* Iterator usage is expected to have record disabled */
4388         iter->head_page = cpu_buffer->reader_page;
4389         iter->head = cpu_buffer->reader_page->read;
4390         iter->next_event = iter->head;
4391
4392         iter->cache_reader_page = iter->head_page;
4393         iter->cache_read = cpu_buffer->read;
4394
4395         if (iter->head) {
4396                 iter->read_stamp = cpu_buffer->read_stamp;
4397                 iter->page_stamp = cpu_buffer->reader_page->page->time_stamp;
4398         } else {
4399                 iter->read_stamp = iter->head_page->page->time_stamp;
4400                 iter->page_stamp = iter->read_stamp;
4401         }
4402 }
4403
4404 /**
4405  * ring_buffer_iter_reset - reset an iterator
4406  * @iter: The iterator to reset
4407  *
4408  * Resets the iterator, so that it will start from the beginning
4409  * again.
4410  */
4411 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
4412 {
4413         struct ring_buffer_per_cpu *cpu_buffer;
4414         unsigned long flags;
4415
4416         if (!iter)
4417                 return;
4418
4419         cpu_buffer = iter->cpu_buffer;
4420
4421         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4422         rb_iter_reset(iter);
4423         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4424 }
4425 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
4426
4427 /**
4428  * ring_buffer_iter_empty - check if an iterator has no more to read
4429  * @iter: The iterator to check
4430  */
4431 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
4432 {
4433         struct ring_buffer_per_cpu *cpu_buffer;
4434         struct buffer_page *reader;
4435         struct buffer_page *head_page;
4436         struct buffer_page *commit_page;
4437         struct buffer_page *curr_commit_page;
4438         unsigned commit;
4439         u64 curr_commit_ts;
4440         u64 commit_ts;
4441
4442         cpu_buffer = iter->cpu_buffer;
4443         reader = cpu_buffer->reader_page;
4444         head_page = cpu_buffer->head_page;
4445         commit_page = cpu_buffer->commit_page;
4446         commit_ts = commit_page->page->time_stamp;
4447
4448         /*
4449          * When the writer goes across pages, it issues a cmpxchg which
4450          * is a mb(), which will synchronize with the rmb here.
4451          * (see rb_tail_page_update())
4452          */
4453         smp_rmb();
4454         commit = rb_page_commit(commit_page);
4455         /* We want to make sure that the commit page doesn't change */
4456         smp_rmb();
4457
4458         /* Make sure commit page didn't change */
4459         curr_commit_page = READ_ONCE(cpu_buffer->commit_page);
4460         curr_commit_ts = READ_ONCE(curr_commit_page->page->time_stamp);
4461
4462         /* If the commit page changed, then there's more data */
4463         if (curr_commit_page != commit_page ||
4464             curr_commit_ts != commit_ts)
4465                 return 0;
4466
4467         /* Still racy, as it may return a false positive, but that's OK */
4468         return ((iter->head_page == commit_page && iter->head >= commit) ||
4469                 (iter->head_page == reader && commit_page == head_page &&
4470                  head_page->read == commit &&
4471                  iter->head == rb_page_commit(cpu_buffer->reader_page)));
4472 }
4473 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
4474
4475 static void
4476 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
4477                      struct ring_buffer_event *event)
4478 {
4479         u64 delta;
4480
4481         switch (event->type_len) {
4482         case RINGBUF_TYPE_PADDING:
4483                 return;
4484
4485         case RINGBUF_TYPE_TIME_EXTEND:
4486                 delta = rb_event_time_stamp(event);
4487                 cpu_buffer->read_stamp += delta;
4488                 return;
4489
4490         case RINGBUF_TYPE_TIME_STAMP:
4491                 delta = rb_event_time_stamp(event);
4492                 delta = rb_fix_abs_ts(delta, cpu_buffer->read_stamp);
4493                 cpu_buffer->read_stamp = delta;
4494                 return;
4495
4496         case RINGBUF_TYPE_DATA:
4497                 cpu_buffer->read_stamp += event->time_delta;
4498                 return;
4499
4500         default:
4501                 RB_WARN_ON(cpu_buffer, 1);
4502         }
4503         return;
4504 }
4505
4506 static void
4507 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
4508                           struct ring_buffer_event *event)
4509 {
4510         u64 delta;
4511
4512         switch (event->type_len) {
4513         case RINGBUF_TYPE_PADDING:
4514                 return;
4515
4516         case RINGBUF_TYPE_TIME_EXTEND:
4517                 delta = rb_event_time_stamp(event);
4518                 iter->read_stamp += delta;
4519                 return;
4520
4521         case RINGBUF_TYPE_TIME_STAMP:
4522                 delta = rb_event_time_stamp(event);
4523                 delta = rb_fix_abs_ts(delta, iter->read_stamp);
4524                 iter->read_stamp = delta;
4525                 return;
4526
4527         case RINGBUF_TYPE_DATA:
4528                 iter->read_stamp += event->time_delta;
4529                 return;
4530
4531         default:
4532                 RB_WARN_ON(iter->cpu_buffer, 1);
4533         }
4534         return;
4535 }
4536
4537 static struct buffer_page *
4538 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
4539 {
4540         struct buffer_page *reader = NULL;
4541         unsigned long overwrite;
4542         unsigned long flags;
4543         int nr_loops = 0;
4544         int ret;
4545
4546         local_irq_save(flags);
4547         arch_spin_lock(&cpu_buffer->lock);
4548
4549  again:
4550         /*
4551          * This should normally only loop twice. But because the
4552          * start of the reader inserts an empty page, it causes
4553          * a case where we will loop three times. There should be no
4554          * reason to loop four times (that I know of).
4555          */
4556         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
4557                 reader = NULL;
4558                 goto out;
4559         }
4560
4561         reader = cpu_buffer->reader_page;
4562
4563         /* If there's more to read, return this page */
4564         if (cpu_buffer->reader_page->read < rb_page_size(reader))
4565                 goto out;
4566
4567         /* Never should we have an index greater than the size */
4568         if (RB_WARN_ON(cpu_buffer,
4569                        cpu_buffer->reader_page->read > rb_page_size(reader)))
4570                 goto out;
4571
4572         /* check if we caught up to the tail */
4573         reader = NULL;
4574         if (cpu_buffer->commit_page == cpu_buffer->reader_page)
4575                 goto out;
4576
4577         /* Don't bother swapping if the ring buffer is empty */
4578         if (rb_num_of_entries(cpu_buffer) == 0)
4579                 goto out;
4580
4581         /*
4582          * Reset the reader page to size zero.
4583          */
4584         local_set(&cpu_buffer->reader_page->write, 0);
4585         local_set(&cpu_buffer->reader_page->entries, 0);
4586         local_set(&cpu_buffer->reader_page->page->commit, 0);
4587         cpu_buffer->reader_page->real_end = 0;
4588
4589  spin:
4590         /*
4591          * Splice the empty reader page into the list around the head.
4592          */
4593         reader = rb_set_head_page(cpu_buffer);
4594         if (!reader)
4595                 goto out;
4596         cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
4597         cpu_buffer->reader_page->list.prev = reader->list.prev;
4598
4599         /*
4600          * cpu_buffer->pages just needs to point to the buffer, it
4601          *  has no specific buffer page to point to. Lets move it out
4602          *  of our way so we don't accidentally swap it.
4603          */
4604         cpu_buffer->pages = reader->list.prev;
4605
4606         /* The reader page will be pointing to the new head */
4607         rb_set_list_to_head(&cpu_buffer->reader_page->list);
4608
4609         /*
4610          * We want to make sure we read the overruns after we set up our
4611          * pointers to the next object. The writer side does a
4612          * cmpxchg to cross pages which acts as the mb on the writer
4613          * side. Note, the reader will constantly fail the swap
4614          * while the writer is updating the pointers, so this
4615          * guarantees that the overwrite recorded here is the one we
4616          * want to compare with the last_overrun.
4617          */
4618         smp_mb();
4619         overwrite = local_read(&(cpu_buffer->overrun));
4620
4621         /*
4622          * Here's the tricky part.
4623          *
4624          * We need to move the pointer past the header page.
4625          * But we can only do that if a writer is not currently
4626          * moving it. The page before the header page has the
4627          * flag bit '1' set if it is pointing to the page we want.
4628          * but if the writer is in the process of moving it
4629          * than it will be '2' or already moved '0'.
4630          */
4631
4632         ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
4633
4634         /*
4635          * If we did not convert it, then we must try again.
4636          */
4637         if (!ret)
4638                 goto spin;
4639
4640         /*
4641          * Yay! We succeeded in replacing the page.
4642          *
4643          * Now make the new head point back to the reader page.
4644          */
4645         rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
4646         rb_inc_page(&cpu_buffer->head_page);
4647
4648         local_inc(&cpu_buffer->pages_read);
4649
4650         /* Finally update the reader page to the new head */
4651         cpu_buffer->reader_page = reader;
4652         cpu_buffer->reader_page->read = 0;
4653
4654         if (overwrite != cpu_buffer->last_overrun) {
4655                 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
4656                 cpu_buffer->last_overrun = overwrite;
4657         }
4658
4659         goto again;
4660
4661  out:
4662         /* Update the read_stamp on the first event */
4663         if (reader && reader->read == 0)
4664                 cpu_buffer->read_stamp = reader->page->time_stamp;
4665
4666         arch_spin_unlock(&cpu_buffer->lock);
4667         local_irq_restore(flags);
4668
4669         /*
4670          * The writer has preempt disable, wait for it. But not forever
4671          * Although, 1 second is pretty much "forever"
4672          */
4673 #define USECS_WAIT      1000000
4674         for (nr_loops = 0; nr_loops < USECS_WAIT; nr_loops++) {
4675                 /* If the write is past the end of page, a writer is still updating it */
4676                 if (likely(!reader || rb_page_write(reader) <= BUF_PAGE_SIZE))
4677                         break;
4678
4679                 udelay(1);
4680
4681                 /* Get the latest version of the reader write value */
4682                 smp_rmb();
4683         }
4684
4685         /* The writer is not moving forward? Something is wrong */
4686         if (RB_WARN_ON(cpu_buffer, nr_loops == USECS_WAIT))
4687                 reader = NULL;
4688
4689         /*
4690          * Make sure we see any padding after the write update
4691          * (see rb_reset_tail())
4692          */
4693         smp_rmb();
4694
4695
4696         return reader;
4697 }
4698
4699 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
4700 {
4701         struct ring_buffer_event *event;
4702         struct buffer_page *reader;
4703         unsigned length;
4704
4705         reader = rb_get_reader_page(cpu_buffer);
4706
4707         /* This function should not be called when buffer is empty */
4708         if (RB_WARN_ON(cpu_buffer, !reader))
4709                 return;
4710
4711         event = rb_reader_event(cpu_buffer);
4712
4713         if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
4714                 cpu_buffer->read++;
4715
4716         rb_update_read_stamp(cpu_buffer, event);
4717
4718         length = rb_event_length(event);
4719         cpu_buffer->reader_page->read += length;
4720 }
4721
4722 static void rb_advance_iter(struct ring_buffer_iter *iter)
4723 {
4724         struct ring_buffer_per_cpu *cpu_buffer;
4725
4726         cpu_buffer = iter->cpu_buffer;
4727
4728         /* If head == next_event then we need to jump to the next event */
4729         if (iter->head == iter->next_event) {
4730                 /* If the event gets overwritten again, there's nothing to do */
4731                 if (rb_iter_head_event(iter) == NULL)
4732                         return;
4733         }
4734
4735         iter->head = iter->next_event;
4736
4737         /*
4738          * Check if we are at the end of the buffer.
4739          */
4740         if (iter->next_event >= rb_page_size(iter->head_page)) {
4741                 /* discarded commits can make the page empty */
4742                 if (iter->head_page == cpu_buffer->commit_page)
4743                         return;
4744                 rb_inc_iter(iter);
4745                 return;
4746         }
4747
4748         rb_update_iter_read_stamp(iter, iter->event);
4749 }
4750
4751 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
4752 {
4753         return cpu_buffer->lost_events;
4754 }
4755
4756 static struct ring_buffer_event *
4757 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
4758                unsigned long *lost_events)
4759 {
4760         struct ring_buffer_event *event;
4761         struct buffer_page *reader;
4762         int nr_loops = 0;
4763
4764         if (ts)
4765                 *ts = 0;
4766  again:
4767         /*
4768          * We repeat when a time extend is encountered.
4769          * Since the time extend is always attached to a data event,
4770          * we should never loop more than once.
4771          * (We never hit the following condition more than twice).
4772          */
4773         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
4774                 return NULL;
4775
4776         reader = rb_get_reader_page(cpu_buffer);
4777         if (!reader)
4778                 return NULL;
4779
4780         event = rb_reader_event(cpu_buffer);
4781
4782         switch (event->type_len) {
4783         case RINGBUF_TYPE_PADDING:
4784                 if (rb_null_event(event))
4785                         RB_WARN_ON(cpu_buffer, 1);
4786                 /*
4787                  * Because the writer could be discarding every
4788                  * event it creates (which would probably be bad)
4789                  * if we were to go back to "again" then we may never
4790                  * catch up, and will trigger the warn on, or lock
4791                  * the box. Return the padding, and we will release
4792                  * the current locks, and try again.
4793                  */
4794                 return event;
4795
4796         case RINGBUF_TYPE_TIME_EXTEND:
4797                 /* Internal data, OK to advance */
4798                 rb_advance_reader(cpu_buffer);
4799                 goto again;
4800
4801         case RINGBUF_TYPE_TIME_STAMP:
4802                 if (ts) {
4803                         *ts = rb_event_time_stamp(event);
4804                         *ts = rb_fix_abs_ts(*ts, reader->page->time_stamp);
4805                         ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4806                                                          cpu_buffer->cpu, ts);
4807                 }
4808                 /* Internal data, OK to advance */
4809                 rb_advance_reader(cpu_buffer);
4810                 goto again;
4811
4812         case RINGBUF_TYPE_DATA:
4813                 if (ts && !(*ts)) {
4814                         *ts = cpu_buffer->read_stamp + event->time_delta;
4815                         ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4816                                                          cpu_buffer->cpu, ts);
4817                 }
4818                 if (lost_events)
4819                         *lost_events = rb_lost_events(cpu_buffer);
4820                 return event;
4821
4822         default:
4823                 RB_WARN_ON(cpu_buffer, 1);
4824         }
4825
4826         return NULL;
4827 }
4828 EXPORT_SYMBOL_GPL(ring_buffer_peek);
4829
4830 static struct ring_buffer_event *
4831 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
4832 {
4833         struct trace_buffer *buffer;
4834         struct ring_buffer_per_cpu *cpu_buffer;
4835         struct ring_buffer_event *event;
4836         int nr_loops = 0;
4837
4838         if (ts)
4839                 *ts = 0;
4840
4841         cpu_buffer = iter->cpu_buffer;
4842         buffer = cpu_buffer->buffer;
4843
4844         /*
4845          * Check if someone performed a consuming read to
4846          * the buffer. A consuming read invalidates the iterator
4847          * and we need to reset the iterator in this case.
4848          */
4849         if (unlikely(iter->cache_read != cpu_buffer->read ||
4850                      iter->cache_reader_page != cpu_buffer->reader_page))
4851                 rb_iter_reset(iter);
4852
4853  again:
4854         if (ring_buffer_iter_empty(iter))
4855                 return NULL;
4856
4857         /*
4858          * As the writer can mess with what the iterator is trying
4859          * to read, just give up if we fail to get an event after
4860          * three tries. The iterator is not as reliable when reading
4861          * the ring buffer with an active write as the consumer is.
4862          * Do not warn if the three failures is reached.
4863          */
4864         if (++nr_loops > 3)
4865                 return NULL;
4866
4867         if (rb_per_cpu_empty(cpu_buffer))
4868                 return NULL;
4869
4870         if (iter->head >= rb_page_size(iter->head_page)) {
4871                 rb_inc_iter(iter);
4872                 goto again;
4873         }
4874
4875         event = rb_iter_head_event(iter);
4876         if (!event)
4877                 goto again;
4878
4879         switch (event->type_len) {
4880         case RINGBUF_TYPE_PADDING:
4881                 if (rb_null_event(event)) {
4882                         rb_inc_iter(iter);
4883                         goto again;
4884                 }
4885                 rb_advance_iter(iter);
4886                 return event;
4887
4888         case RINGBUF_TYPE_TIME_EXTEND:
4889                 /* Internal data, OK to advance */
4890                 rb_advance_iter(iter);
4891                 goto again;
4892
4893         case RINGBUF_TYPE_TIME_STAMP:
4894                 if (ts) {
4895                         *ts = rb_event_time_stamp(event);
4896                         *ts = rb_fix_abs_ts(*ts, iter->head_page->page->time_stamp);
4897                         ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4898                                                          cpu_buffer->cpu, ts);
4899                 }
4900                 /* Internal data, OK to advance */
4901                 rb_advance_iter(iter);
4902                 goto again;
4903
4904         case RINGBUF_TYPE_DATA:
4905                 if (ts && !(*ts)) {
4906                         *ts = iter->read_stamp + event->time_delta;
4907                         ring_buffer_normalize_time_stamp(buffer,
4908                                                          cpu_buffer->cpu, ts);
4909                 }
4910                 return event;
4911
4912         default:
4913                 RB_WARN_ON(cpu_buffer, 1);
4914         }
4915
4916         return NULL;
4917 }
4918 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
4919
4920 static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer)
4921 {
4922         if (likely(!in_nmi())) {
4923                 raw_spin_lock(&cpu_buffer->reader_lock);
4924                 return true;
4925         }
4926
4927         /*
4928          * If an NMI die dumps out the content of the ring buffer
4929          * trylock must be used to prevent a deadlock if the NMI
4930          * preempted a task that holds the ring buffer locks. If
4931          * we get the lock then all is fine, if not, then continue
4932          * to do the read, but this can corrupt the ring buffer,
4933          * so it must be permanently disabled from future writes.
4934          * Reading from NMI is a oneshot deal.
4935          */
4936         if (raw_spin_trylock(&cpu_buffer->reader_lock))
4937                 return true;
4938
4939         /* Continue without locking, but disable the ring buffer */
4940         atomic_inc(&cpu_buffer->record_disabled);
4941         return false;
4942 }
4943
4944 static inline void
4945 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
4946 {
4947         if (likely(locked))
4948                 raw_spin_unlock(&cpu_buffer->reader_lock);
4949         return;
4950 }
4951
4952 /**
4953  * ring_buffer_peek - peek at the next event to be read
4954  * @buffer: The ring buffer to read
4955  * @cpu: The cpu to peak at
4956  * @ts: The timestamp counter of this event.
4957  * @lost_events: a variable to store if events were lost (may be NULL)
4958  *
4959  * This will return the event that will be read next, but does
4960  * not consume the data.
4961  */
4962 struct ring_buffer_event *
4963 ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
4964                  unsigned long *lost_events)
4965 {
4966         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4967         struct ring_buffer_event *event;
4968         unsigned long flags;
4969         bool dolock;
4970
4971         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4972                 return NULL;
4973
4974  again:
4975         local_irq_save(flags);
4976         dolock = rb_reader_lock(cpu_buffer);
4977         event = rb_buffer_peek(cpu_buffer, ts, lost_events);
4978         if (event && event->type_len == RINGBUF_TYPE_PADDING)
4979                 rb_advance_reader(cpu_buffer);
4980         rb_reader_unlock(cpu_buffer, dolock);
4981         local_irq_restore(flags);
4982
4983         if (event && event->type_len == RINGBUF_TYPE_PADDING)
4984                 goto again;
4985
4986         return event;
4987 }
4988
4989 /** ring_buffer_iter_dropped - report if there are dropped events
4990  * @iter: The ring buffer iterator
4991  *
4992  * Returns true if there was dropped events since the last peek.
4993  */
4994 bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter)
4995 {
4996         bool ret = iter->missed_events != 0;
4997
4998         iter->missed_events = 0;
4999         return ret;
5000 }
5001 EXPORT_SYMBOL_GPL(ring_buffer_iter_dropped);
5002
5003 /**
5004  * ring_buffer_iter_peek - peek at the next event to be read
5005  * @iter: The ring buffer iterator
5006  * @ts: The timestamp counter of this event.
5007  *
5008  * This will return the event that will be read next, but does
5009  * not increment the iterator.
5010  */
5011 struct ring_buffer_event *
5012 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
5013 {
5014         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
5015         struct ring_buffer_event *event;
5016         unsigned long flags;
5017
5018  again:
5019         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5020         event = rb_iter_peek(iter, ts);
5021         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5022
5023         if (event && event->type_len == RINGBUF_TYPE_PADDING)
5024                 goto again;
5025
5026         return event;
5027 }
5028
5029 /**
5030  * ring_buffer_consume - return an event and consume it
5031  * @buffer: The ring buffer to get the next event from
5032  * @cpu: the cpu to read the buffer from
5033  * @ts: a variable to store the timestamp (may be NULL)
5034  * @lost_events: a variable to store if events were lost (may be NULL)
5035  *
5036  * Returns the next event in the ring buffer, and that event is consumed.
5037  * Meaning, that sequential reads will keep returning a different event,
5038  * and eventually empty the ring buffer if the producer is slower.
5039  */
5040 struct ring_buffer_event *
5041 ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
5042                     unsigned long *lost_events)
5043 {
5044         struct ring_buffer_per_cpu *cpu_buffer;
5045         struct ring_buffer_event *event = NULL;
5046         unsigned long flags;
5047         bool dolock;
5048
5049  again:
5050         /* might be called in atomic */
5051         preempt_disable();
5052
5053         if (!cpumask_test_cpu(cpu, buffer->cpumask))
5054                 goto out;
5055
5056         cpu_buffer = buffer->buffers[cpu];
5057         local_irq_save(flags);
5058         dolock = rb_reader_lock(cpu_buffer);
5059
5060         event = rb_buffer_peek(cpu_buffer, ts, lost_events);
5061         if (event) {
5062                 cpu_buffer->lost_events = 0;
5063                 rb_advance_reader(cpu_buffer);
5064         }
5065
5066         rb_reader_unlock(cpu_buffer, dolock);
5067         local_irq_restore(flags);
5068
5069  out:
5070         preempt_enable();
5071
5072         if (event && event->type_len == RINGBUF_TYPE_PADDING)
5073                 goto again;
5074
5075         return event;
5076 }
5077 EXPORT_SYMBOL_GPL(ring_buffer_consume);
5078
5079 /**
5080  * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
5081  * @buffer: The ring buffer to read from
5082  * @cpu: The cpu buffer to iterate over
5083  * @flags: gfp flags to use for memory allocation
5084  *
5085  * This performs the initial preparations necessary to iterate
5086  * through the buffer.  Memory is allocated, buffer recording
5087  * is disabled, and the iterator pointer is returned to the caller.
5088  *
5089  * Disabling buffer recording prevents the reading from being
5090  * corrupted. This is not a consuming read, so a producer is not
5091  * expected.
5092  *
5093  * After a sequence of ring_buffer_read_prepare calls, the user is
5094  * expected to make at least one call to ring_buffer_read_prepare_sync.
5095  * Afterwards, ring_buffer_read_start is invoked to get things going
5096  * for real.
5097  *
5098  * This overall must be paired with ring_buffer_read_finish.
5099  */
5100 struct ring_buffer_iter *
5101 ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags)
5102 {
5103         struct ring_buffer_per_cpu *cpu_buffer;
5104         struct ring_buffer_iter *iter;
5105
5106         if (!cpumask_test_cpu(cpu, buffer->cpumask))
5107                 return NULL;
5108
5109         iter = kzalloc(sizeof(*iter), flags);
5110         if (!iter)
5111                 return NULL;
5112
5113         iter->event = kmalloc(BUF_MAX_DATA_SIZE, flags);
5114         if (!iter->event) {
5115                 kfree(iter);
5116                 return NULL;
5117         }
5118
5119         cpu_buffer = buffer->buffers[cpu];
5120
5121         iter->cpu_buffer = cpu_buffer;
5122
5123         atomic_inc(&cpu_buffer->resize_disabled);
5124
5125         return iter;
5126 }
5127 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
5128
5129 /**
5130  * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
5131  *
5132  * All previously invoked ring_buffer_read_prepare calls to prepare
5133  * iterators will be synchronized.  Afterwards, read_buffer_read_start
5134  * calls on those iterators are allowed.
5135  */
5136 void
5137 ring_buffer_read_prepare_sync(void)
5138 {
5139         synchronize_rcu();
5140 }
5141 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
5142
5143 /**
5144  * ring_buffer_read_start - start a non consuming read of the buffer
5145  * @iter: The iterator returned by ring_buffer_read_prepare
5146  *
5147  * This finalizes the startup of an iteration through the buffer.
5148  * The iterator comes from a call to ring_buffer_read_prepare and
5149  * an intervening ring_buffer_read_prepare_sync must have been
5150  * performed.
5151  *
5152  * Must be paired with ring_buffer_read_finish.
5153  */
5154 void
5155 ring_buffer_read_start(struct ring_buffer_iter *iter)
5156 {
5157         struct ring_buffer_per_cpu *cpu_buffer;
5158         unsigned long flags;
5159
5160         if (!iter)
5161                 return;
5162
5163         cpu_buffer = iter->cpu_buffer;
5164
5165         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5166         arch_spin_lock(&cpu_buffer->lock);
5167         rb_iter_reset(iter);
5168         arch_spin_unlock(&cpu_buffer->lock);
5169         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5170 }
5171 EXPORT_SYMBOL_GPL(ring_buffer_read_start);
5172
5173 /**
5174  * ring_buffer_read_finish - finish reading the iterator of the buffer
5175  * @iter: The iterator retrieved by ring_buffer_start
5176  *
5177  * This re-enables the recording to the buffer, and frees the
5178  * iterator.
5179  */
5180 void
5181 ring_buffer_read_finish(struct ring_buffer_iter *iter)
5182 {
5183         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
5184         unsigned long flags;
5185
5186         /*
5187          * Ring buffer is disabled from recording, here's a good place
5188          * to check the integrity of the ring buffer.
5189          * Must prevent readers from trying to read, as the check
5190          * clears the HEAD page and readers require it.
5191          */
5192         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5193         rb_check_pages(cpu_buffer);
5194         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5195
5196         atomic_dec(&cpu_buffer->resize_disabled);
5197         kfree(iter->event);
5198         kfree(iter);
5199 }
5200 EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
5201
5202 /**
5203  * ring_buffer_iter_advance - advance the iterator to the next location
5204  * @iter: The ring buffer iterator
5205  *
5206  * Move the location of the iterator such that the next read will
5207  * be the next location of the iterator.
5208  */
5209 void ring_buffer_iter_advance(struct ring_buffer_iter *iter)
5210 {
5211         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
5212         unsigned long flags;
5213
5214         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5215
5216         rb_advance_iter(iter);
5217
5218         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5219 }
5220 EXPORT_SYMBOL_GPL(ring_buffer_iter_advance);
5221
5222 /**
5223  * ring_buffer_size - return the size of the ring buffer (in bytes)
5224  * @buffer: The ring buffer.
5225  * @cpu: The CPU to get ring buffer size from.
5226  */
5227 unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu)
5228 {
5229         /*
5230          * Earlier, this method returned
5231          *      BUF_PAGE_SIZE * buffer->nr_pages
5232          * Since the nr_pages field is now removed, we have converted this to
5233          * return the per cpu buffer value.
5234          */
5235         if (!cpumask_test_cpu(cpu, buffer->cpumask))
5236                 return 0;
5237
5238         return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
5239 }
5240 EXPORT_SYMBOL_GPL(ring_buffer_size);
5241
5242 static void
5243 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
5244 {
5245         rb_head_page_deactivate(cpu_buffer);
5246
5247         cpu_buffer->head_page
5248                 = list_entry(cpu_buffer->pages, struct buffer_page, list);
5249         local_set(&cpu_buffer->head_page->write, 0);
5250         local_set(&cpu_buffer->head_page->entries, 0);
5251         local_set(&cpu_buffer->head_page->page->commit, 0);
5252
5253         cpu_buffer->head_page->read = 0;
5254
5255         cpu_buffer->tail_page = cpu_buffer->head_page;
5256         cpu_buffer->commit_page = cpu_buffer->head_page;
5257
5258         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
5259         INIT_LIST_HEAD(&cpu_buffer->new_pages);
5260         local_set(&cpu_buffer->reader_page->write, 0);
5261         local_set(&cpu_buffer->reader_page->entries, 0);
5262         local_set(&cpu_buffer->reader_page->page->commit, 0);
5263         cpu_buffer->reader_page->read = 0;
5264
5265         local_set(&cpu_buffer->entries_bytes, 0);
5266         local_set(&cpu_buffer->overrun, 0);
5267         local_set(&cpu_buffer->commit_overrun, 0);
5268         local_set(&cpu_buffer->dropped_events, 0);
5269         local_set(&cpu_buffer->entries, 0);
5270         local_set(&cpu_buffer->committing, 0);
5271         local_set(&cpu_buffer->commits, 0);
5272         local_set(&cpu_buffer->pages_touched, 0);
5273         local_set(&cpu_buffer->pages_lost, 0);
5274         local_set(&cpu_buffer->pages_read, 0);
5275         cpu_buffer->last_pages_touch = 0;
5276         cpu_buffer->shortest_full = 0;
5277         cpu_buffer->read = 0;
5278         cpu_buffer->read_bytes = 0;
5279
5280         rb_time_set(&cpu_buffer->write_stamp, 0);
5281         rb_time_set(&cpu_buffer->before_stamp, 0);
5282
5283         memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp));
5284
5285         cpu_buffer->lost_events = 0;
5286         cpu_buffer->last_overrun = 0;
5287
5288         rb_head_page_activate(cpu_buffer);
5289 }
5290
5291 /* Must have disabled the cpu buffer then done a synchronize_rcu */
5292 static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
5293 {
5294         unsigned long flags;
5295
5296         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5297
5298         if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
5299                 goto out;
5300
5301         arch_spin_lock(&cpu_buffer->lock);
5302
5303         rb_reset_cpu(cpu_buffer);
5304
5305         arch_spin_unlock(&cpu_buffer->lock);
5306
5307  out:
5308         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5309 }
5310
5311 /**
5312  * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
5313  * @buffer: The ring buffer to reset a per cpu buffer of
5314  * @cpu: The CPU buffer to be reset
5315  */
5316 void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
5317 {
5318         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
5319
5320         if (!cpumask_test_cpu(cpu, buffer->cpumask))
5321                 return;
5322
5323         /* prevent another thread from changing buffer sizes */
5324         mutex_lock(&buffer->mutex);
5325
5326         atomic_inc(&cpu_buffer->resize_disabled);
5327         atomic_inc(&cpu_buffer->record_disabled);
5328
5329         /* Make sure all commits have finished */
5330         synchronize_rcu();
5331
5332         reset_disabled_cpu_buffer(cpu_buffer);
5333
5334         atomic_dec(&cpu_buffer->record_disabled);
5335         atomic_dec(&cpu_buffer->resize_disabled);
5336
5337         mutex_unlock(&buffer->mutex);
5338 }
5339 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
5340
5341 /**
5342  * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer
5343  * @buffer: The ring buffer to reset a per cpu buffer of
5344  * @cpu: The CPU buffer to be reset
5345  */
5346 void ring_buffer_reset_online_cpus(struct trace_buffer *buffer)
5347 {
5348         struct ring_buffer_per_cpu *cpu_buffer;
5349         int cpu;
5350
5351         /* prevent another thread from changing buffer sizes */
5352         mutex_lock(&buffer->mutex);
5353
5354         for_each_online_buffer_cpu(buffer, cpu) {
5355                 cpu_buffer = buffer->buffers[cpu];
5356
5357                 atomic_inc(&cpu_buffer->resize_disabled);
5358                 atomic_inc(&cpu_buffer->record_disabled);
5359         }
5360
5361         /* Make sure all commits have finished */
5362         synchronize_rcu();
5363
5364         for_each_online_buffer_cpu(buffer, cpu) {
5365                 cpu_buffer = buffer->buffers[cpu];
5366
5367                 reset_disabled_cpu_buffer(cpu_buffer);
5368
5369                 atomic_dec(&cpu_buffer->record_disabled);
5370                 atomic_dec(&cpu_buffer->resize_disabled);
5371         }
5372
5373         mutex_unlock(&buffer->mutex);
5374 }
5375
5376 /**
5377  * ring_buffer_reset - reset a ring buffer
5378  * @buffer: The ring buffer to reset all cpu buffers
5379  */
5380 void ring_buffer_reset(struct trace_buffer *buffer)
5381 {
5382         struct ring_buffer_per_cpu *cpu_buffer;
5383         int cpu;
5384
5385         /* prevent another thread from changing buffer sizes */
5386         mutex_lock(&buffer->mutex);
5387
5388         for_each_buffer_cpu(buffer, cpu) {
5389                 cpu_buffer = buffer->buffers[cpu];
5390
5391                 atomic_inc(&cpu_buffer->resize_disabled);
5392                 atomic_inc(&cpu_buffer->record_disabled);
5393         }
5394
5395         /* Make sure all commits have finished */
5396         synchronize_rcu();
5397
5398         for_each_buffer_cpu(buffer, cpu) {
5399                 cpu_buffer = buffer->buffers[cpu];
5400
5401                 reset_disabled_cpu_buffer(cpu_buffer);
5402
5403                 atomic_dec(&cpu_buffer->record_disabled);
5404                 atomic_dec(&cpu_buffer->resize_disabled);
5405         }
5406
5407         mutex_unlock(&buffer->mutex);
5408 }
5409 EXPORT_SYMBOL_GPL(ring_buffer_reset);
5410
5411 /**
5412  * ring_buffer_empty - is the ring buffer empty?
5413  * @buffer: The ring buffer to test
5414  */
5415 bool ring_buffer_empty(struct trace_buffer *buffer)
5416 {
5417         struct ring_buffer_per_cpu *cpu_buffer;
5418         unsigned long flags;
5419         bool dolock;
5420         int cpu;
5421         int ret;
5422
5423         /* yes this is racy, but if you don't like the race, lock the buffer */
5424         for_each_buffer_cpu(buffer, cpu) {
5425                 cpu_buffer = buffer->buffers[cpu];
5426                 local_irq_save(flags);
5427                 dolock = rb_reader_lock(cpu_buffer);
5428                 ret = rb_per_cpu_empty(cpu_buffer);
5429                 rb_reader_unlock(cpu_buffer, dolock);
5430                 local_irq_restore(flags);
5431
5432                 if (!ret)
5433                         return false;
5434         }
5435
5436         return true;
5437 }
5438 EXPORT_SYMBOL_GPL(ring_buffer_empty);
5439
5440 /**
5441  * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
5442  * @buffer: The ring buffer
5443  * @cpu: The CPU buffer to test
5444  */
5445 bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu)
5446 {
5447         struct ring_buffer_per_cpu *cpu_buffer;
5448         unsigned long flags;
5449         bool dolock;
5450         int ret;
5451
5452         if (!cpumask_test_cpu(cpu, buffer->cpumask))
5453                 return true;
5454
5455         cpu_buffer = buffer->buffers[cpu];
5456         local_irq_save(flags);
5457         dolock = rb_reader_lock(cpu_buffer);
5458         ret = rb_per_cpu_empty(cpu_buffer);
5459         rb_reader_unlock(cpu_buffer, dolock);
5460         local_irq_restore(flags);
5461
5462         return ret;
5463 }
5464 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
5465
5466 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
5467 /**
5468  * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
5469  * @buffer_a: One buffer to swap with
5470  * @buffer_b: The other buffer to swap with
5471  * @cpu: the CPU of the buffers to swap
5472  *
5473  * This function is useful for tracers that want to take a "snapshot"
5474  * of a CPU buffer and has another back up buffer lying around.
5475  * it is expected that the tracer handles the cpu buffer not being
5476  * used at the moment.
5477  */
5478 int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
5479                          struct trace_buffer *buffer_b, int cpu)
5480 {
5481         struct ring_buffer_per_cpu *cpu_buffer_a;
5482         struct ring_buffer_per_cpu *cpu_buffer_b;
5483         int ret = -EINVAL;
5484
5485         if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
5486             !cpumask_test_cpu(cpu, buffer_b->cpumask))
5487                 goto out;
5488
5489         cpu_buffer_a = buffer_a->buffers[cpu];
5490         cpu_buffer_b = buffer_b->buffers[cpu];
5491
5492         /* At least make sure the two buffers are somewhat the same */
5493         if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
5494                 goto out;
5495
5496         ret = -EAGAIN;
5497
5498         if (atomic_read(&buffer_a->record_disabled))
5499                 goto out;
5500
5501         if (atomic_read(&buffer_b->record_disabled))
5502                 goto out;
5503
5504         if (atomic_read(&cpu_buffer_a->record_disabled))
5505                 goto out;
5506
5507         if (atomic_read(&cpu_buffer_b->record_disabled))
5508                 goto out;
5509
5510         /*
5511          * We can't do a synchronize_rcu here because this
5512          * function can be called in atomic context.
5513          * Normally this will be called from the same CPU as cpu.
5514          * If not it's up to the caller to protect this.
5515          */
5516         atomic_inc(&cpu_buffer_a->record_disabled);
5517         atomic_inc(&cpu_buffer_b->record_disabled);
5518
5519         ret = -EBUSY;
5520         if (local_read(&cpu_buffer_a->committing))
5521                 goto out_dec;
5522         if (local_read(&cpu_buffer_b->committing))
5523                 goto out_dec;
5524
5525         buffer_a->buffers[cpu] = cpu_buffer_b;
5526         buffer_b->buffers[cpu] = cpu_buffer_a;
5527
5528         cpu_buffer_b->buffer = buffer_a;
5529         cpu_buffer_a->buffer = buffer_b;
5530
5531         ret = 0;
5532
5533 out_dec:
5534         atomic_dec(&cpu_buffer_a->record_disabled);
5535         atomic_dec(&cpu_buffer_b->record_disabled);
5536 out:
5537         return ret;
5538 }
5539 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
5540 #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
5541
5542 /**
5543  * ring_buffer_alloc_read_page - allocate a page to read from buffer
5544  * @buffer: the buffer to allocate for.
5545  * @cpu: the cpu buffer to allocate.
5546  *
5547  * This function is used in conjunction with ring_buffer_read_page.
5548  * When reading a full page from the ring buffer, these functions
5549  * can be used to speed up the process. The calling function should
5550  * allocate a few pages first with this function. Then when it
5551  * needs to get pages from the ring buffer, it passes the result
5552  * of this function into ring_buffer_read_page, which will swap
5553  * the page that was allocated, with the read page of the buffer.
5554  *
5555  * Returns:
5556  *  The page allocated, or ERR_PTR
5557  */
5558 void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu)
5559 {
5560         struct ring_buffer_per_cpu *cpu_buffer;
5561         struct buffer_data_page *bpage = NULL;
5562         unsigned long flags;
5563         struct page *page;
5564
5565         if (!cpumask_test_cpu(cpu, buffer->cpumask))
5566                 return ERR_PTR(-ENODEV);
5567
5568         cpu_buffer = buffer->buffers[cpu];
5569         local_irq_save(flags);
5570         arch_spin_lock(&cpu_buffer->lock);
5571
5572         if (cpu_buffer->free_page) {
5573                 bpage = cpu_buffer->free_page;
5574                 cpu_buffer->free_page = NULL;
5575         }
5576
5577         arch_spin_unlock(&cpu_buffer->lock);
5578         local_irq_restore(flags);
5579
5580         if (bpage)
5581                 goto out;
5582
5583         page = alloc_pages_node(cpu_to_node(cpu),
5584                                 GFP_KERNEL | __GFP_NORETRY, 0);
5585         if (!page)
5586                 return ERR_PTR(-ENOMEM);
5587
5588         bpage = page_address(page);
5589
5590  out:
5591         rb_init_page(bpage);
5592
5593         return bpage;
5594 }
5595 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
5596
5597 /**
5598  * ring_buffer_free_read_page - free an allocated read page
5599  * @buffer: the buffer the page was allocate for
5600  * @cpu: the cpu buffer the page came from
5601  * @data: the page to free
5602  *
5603  * Free a page allocated from ring_buffer_alloc_read_page.
5604  */
5605 void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data)
5606 {
5607         struct ring_buffer_per_cpu *cpu_buffer;
5608         struct buffer_data_page *bpage = data;
5609         struct page *page = virt_to_page(bpage);
5610         unsigned long flags;
5611
5612         if (!buffer || !buffer->buffers || !buffer->buffers[cpu])
5613                 return;
5614
5615         cpu_buffer = buffer->buffers[cpu];
5616
5617         /* If the page is still in use someplace else, we can't reuse it */
5618         if (page_ref_count(page) > 1)
5619                 goto out;
5620
5621         local_irq_save(flags);
5622         arch_spin_lock(&cpu_buffer->lock);
5623
5624         if (!cpu_buffer->free_page) {
5625                 cpu_buffer->free_page = bpage;
5626                 bpage = NULL;
5627         }
5628
5629         arch_spin_unlock(&cpu_buffer->lock);
5630         local_irq_restore(flags);
5631
5632  out:
5633         free_page((unsigned long)bpage);
5634 }
5635 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
5636
5637 /**
5638  * ring_buffer_read_page - extract a page from the ring buffer
5639  * @buffer: buffer to extract from
5640  * @data_page: the page to use allocated from ring_buffer_alloc_read_page
5641  * @len: amount to extract
5642  * @cpu: the cpu of the buffer to extract
5643  * @full: should the extraction only happen when the page is full.
5644  *
5645  * This function will pull out a page from the ring buffer and consume it.
5646  * @data_page must be the address of the variable that was returned
5647  * from ring_buffer_alloc_read_page. This is because the page might be used
5648  * to swap with a page in the ring buffer.
5649  *
5650  * for example:
5651  *      rpage = ring_buffer_alloc_read_page(buffer, cpu);
5652  *      if (IS_ERR(rpage))
5653  *              return PTR_ERR(rpage);
5654  *      ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
5655  *      if (ret >= 0)
5656  *              process_page(rpage, ret);
5657  *
5658  * When @full is set, the function will not return true unless
5659  * the writer is off the reader page.
5660  *
5661  * Note: it is up to the calling functions to handle sleeps and wakeups.
5662  *  The ring buffer can be used anywhere in the kernel and can not
5663  *  blindly call wake_up. The layer that uses the ring buffer must be
5664  *  responsible for that.
5665  *
5666  * Returns:
5667  *  >=0 if data has been transferred, returns the offset of consumed data.
5668  *  <0 if no data has been transferred.
5669  */
5670 int ring_buffer_read_page(struct trace_buffer *buffer,
5671                           void **data_page, size_t len, int cpu, int full)
5672 {
5673         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
5674         struct ring_buffer_event *event;
5675         struct buffer_data_page *bpage;
5676         struct buffer_page *reader;
5677         unsigned long missed_events;
5678         unsigned long flags;
5679         unsigned int commit;
5680         unsigned int read;
5681         u64 save_timestamp;
5682         int ret = -1;
5683
5684         if (!cpumask_test_cpu(cpu, buffer->cpumask))
5685                 goto out;
5686
5687         /*
5688          * If len is not big enough to hold the page header, then
5689          * we can not copy anything.
5690          */
5691         if (len <= BUF_PAGE_HDR_SIZE)
5692                 goto out;
5693
5694         len -= BUF_PAGE_HDR_SIZE;
5695
5696         if (!data_page)
5697                 goto out;
5698
5699         bpage = *data_page;
5700         if (!bpage)
5701                 goto out;
5702
5703         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5704
5705         reader = rb_get_reader_page(cpu_buffer);
5706         if (!reader)
5707                 goto out_unlock;
5708
5709         event = rb_reader_event(cpu_buffer);
5710
5711         read = reader->read;
5712         commit = rb_page_commit(reader);
5713
5714         /* Check if any events were dropped */
5715         missed_events = cpu_buffer->lost_events;
5716
5717         /*
5718          * If this page has been partially read or
5719          * if len is not big enough to read the rest of the page or
5720          * a writer is still on the page, then
5721          * we must copy the data from the page to the buffer.
5722          * Otherwise, we can simply swap the page with the one passed in.
5723          */
5724         if (read || (len < (commit - read)) ||
5725             cpu_buffer->reader_page == cpu_buffer->commit_page) {
5726                 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
5727                 unsigned int rpos = read;
5728                 unsigned int pos = 0;
5729                 unsigned int size;
5730
5731                 /*
5732                  * If a full page is expected, this can still be returned
5733                  * if there's been a previous partial read and the
5734                  * rest of the page can be read and the commit page is off
5735                  * the reader page.
5736                  */
5737                 if (full &&
5738                     (!read || (len < (commit - read)) ||
5739                      cpu_buffer->reader_page == cpu_buffer->commit_page))
5740                         goto out_unlock;
5741
5742                 if (len > (commit - read))
5743                         len = (commit - read);
5744
5745                 /* Always keep the time extend and data together */
5746                 size = rb_event_ts_length(event);
5747
5748                 if (len < size)
5749                         goto out_unlock;
5750
5751                 /* save the current timestamp, since the user will need it */
5752                 save_timestamp = cpu_buffer->read_stamp;
5753
5754                 /* Need to copy one event at a time */
5755                 do {
5756                         /* We need the size of one event, because
5757                          * rb_advance_reader only advances by one event,
5758                          * whereas rb_event_ts_length may include the size of
5759                          * one or two events.
5760                          * We have already ensured there's enough space if this
5761                          * is a time extend. */
5762                         size = rb_event_length(event);
5763                         memcpy(bpage->data + pos, rpage->data + rpos, size);
5764
5765                         len -= size;
5766
5767                         rb_advance_reader(cpu_buffer);
5768                         rpos = reader->read;
5769                         pos += size;
5770
5771                         if (rpos >= commit)
5772                                 break;
5773
5774                         event = rb_reader_event(cpu_buffer);
5775                         /* Always keep the time extend and data together */
5776                         size = rb_event_ts_length(event);
5777                 } while (len >= size);
5778
5779                 /* update bpage */
5780                 local_set(&bpage->commit, pos);
5781                 bpage->time_stamp = save_timestamp;
5782
5783                 /* we copied everything to the beginning */
5784                 read = 0;
5785         } else {
5786                 /* update the entry counter */
5787                 cpu_buffer->read += rb_page_entries(reader);
5788                 cpu_buffer->read_bytes += BUF_PAGE_SIZE;
5789
5790                 /* swap the pages */
5791                 rb_init_page(bpage);
5792                 bpage = reader->page;
5793                 reader->page = *data_page;
5794                 local_set(&reader->write, 0);
5795                 local_set(&reader->entries, 0);
5796                 reader->read = 0;
5797                 *data_page = bpage;
5798
5799                 /*
5800                  * Use the real_end for the data size,
5801                  * This gives us a chance to store the lost events
5802                  * on the page.
5803                  */
5804                 if (reader->real_end)
5805                         local_set(&bpage->commit, reader->real_end);
5806         }
5807         ret = read;
5808
5809         cpu_buffer->lost_events = 0;
5810
5811         commit = local_read(&bpage->commit);
5812         /*
5813          * Set a flag in the commit field if we lost events
5814          */
5815         if (missed_events) {
5816                 /* If there is room at the end of the page to save the
5817                  * missed events, then record it there.
5818                  */
5819                 if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
5820                         memcpy(&bpage->data[commit], &missed_events,
5821                                sizeof(missed_events));
5822                         local_add(RB_MISSED_STORED, &bpage->commit);
5823                         commit += sizeof(missed_events);
5824                 }
5825                 local_add(RB_MISSED_EVENTS, &bpage->commit);
5826         }
5827
5828         /*
5829          * This page may be off to user land. Zero it out here.
5830          */
5831         if (commit < BUF_PAGE_SIZE)
5832                 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
5833
5834  out_unlock:
5835         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5836
5837  out:
5838         return ret;
5839 }
5840 EXPORT_SYMBOL_GPL(ring_buffer_read_page);
5841
5842 /*
5843  * We only allocate new buffers, never free them if the CPU goes down.
5844  * If we were to free the buffer, then the user would lose any trace that was in
5845  * the buffer.
5846  */
5847 int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
5848 {
5849         struct trace_buffer *buffer;
5850         long nr_pages_same;
5851         int cpu_i;
5852         unsigned long nr_pages;
5853
5854         buffer = container_of(node, struct trace_buffer, node);
5855         if (cpumask_test_cpu(cpu, buffer->cpumask))
5856                 return 0;
5857
5858         nr_pages = 0;
5859         nr_pages_same = 1;
5860         /* check if all cpu sizes are same */
5861         for_each_buffer_cpu(buffer, cpu_i) {
5862                 /* fill in the size from first enabled cpu */
5863                 if (nr_pages == 0)
5864                         nr_pages = buffer->buffers[cpu_i]->nr_pages;
5865                 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
5866                         nr_pages_same = 0;
5867                         break;
5868                 }
5869         }
5870         /* allocate minimum pages, user can later expand it */
5871         if (!nr_pages_same)
5872                 nr_pages = 2;
5873         buffer->buffers[cpu] =
5874                 rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
5875         if (!buffer->buffers[cpu]) {
5876                 WARN(1, "failed to allocate ring buffer on CPU %u\n",
5877                      cpu);
5878                 return -ENOMEM;
5879         }
5880         smp_wmb();
5881         cpumask_set_cpu(cpu, buffer->cpumask);
5882         return 0;
5883 }
5884
5885 #ifdef CONFIG_RING_BUFFER_STARTUP_TEST
5886 /*
5887  * This is a basic integrity check of the ring buffer.
5888  * Late in the boot cycle this test will run when configured in.
5889  * It will kick off a thread per CPU that will go into a loop
5890  * writing to the per cpu ring buffer various sizes of data.
5891  * Some of the data will be large items, some small.
5892  *
5893  * Another thread is created that goes into a spin, sending out
5894  * IPIs to the other CPUs to also write into the ring buffer.
5895  * this is to test the nesting ability of the buffer.
5896  *
5897  * Basic stats are recorded and reported. If something in the
5898  * ring buffer should happen that's not expected, a big warning
5899  * is displayed and all ring buffers are disabled.
5900  */
5901 static struct task_struct *rb_threads[NR_CPUS] __initdata;
5902
5903 struct rb_test_data {
5904         struct trace_buffer *buffer;
5905         unsigned long           events;
5906         unsigned long           bytes_written;
5907         unsigned long           bytes_alloc;
5908         unsigned long           bytes_dropped;
5909         unsigned long           events_nested;
5910         unsigned long           bytes_written_nested;
5911         unsigned long           bytes_alloc_nested;
5912         unsigned long           bytes_dropped_nested;
5913         int                     min_size_nested;
5914         int                     max_size_nested;
5915         int                     max_size;
5916         int                     min_size;
5917         int                     cpu;
5918         int                     cnt;
5919 };
5920
5921 static struct rb_test_data rb_data[NR_CPUS] __initdata;
5922
5923 /* 1 meg per cpu */
5924 #define RB_TEST_BUFFER_SIZE     1048576
5925
5926 static char rb_string[] __initdata =
5927         "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
5928         "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
5929         "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
5930
5931 static bool rb_test_started __initdata;
5932
5933 struct rb_item {
5934         int size;
5935         char str[];
5936 };
5937
5938 static __init int rb_write_something(struct rb_test_data *data, bool nested)
5939 {
5940         struct ring_buffer_event *event;
5941         struct rb_item *item;
5942         bool started;
5943         int event_len;
5944         int size;
5945         int len;
5946         int cnt;
5947
5948         /* Have nested writes different that what is written */
5949         cnt = data->cnt + (nested ? 27 : 0);
5950
5951         /* Multiply cnt by ~e, to make some unique increment */
5952         size = (cnt * 68 / 25) % (sizeof(rb_string) - 1);
5953
5954         len = size + sizeof(struct rb_item);
5955
5956         started = rb_test_started;
5957         /* read rb_test_started before checking buffer enabled */
5958         smp_rmb();
5959
5960         event = ring_buffer_lock_reserve(data->buffer, len);
5961         if (!event) {
5962                 /* Ignore dropped events before test starts. */
5963                 if (started) {
5964                         if (nested)
5965                                 data->bytes_dropped += len;
5966                         else
5967                                 data->bytes_dropped_nested += len;
5968                 }
5969                 return len;
5970         }
5971
5972         event_len = ring_buffer_event_length(event);
5973
5974         if (RB_WARN_ON(data->buffer, event_len < len))
5975                 goto out;
5976
5977         item = ring_buffer_event_data(event);
5978         item->size = size;
5979         memcpy(item->str, rb_string, size);
5980
5981         if (nested) {
5982                 data->bytes_alloc_nested += event_len;
5983                 data->bytes_written_nested += len;
5984                 data->events_nested++;
5985                 if (!data->min_size_nested || len < data->min_size_nested)
5986                         data->min_size_nested = len;
5987                 if (len > data->max_size_nested)
5988                         data->max_size_nested = len;
5989         } else {
5990                 data->bytes_alloc += event_len;
5991                 data->bytes_written += len;
5992                 data->events++;
5993                 if (!data->min_size || len < data->min_size)
5994                         data->max_size = len;
5995                 if (len > data->max_size)
5996                         data->max_size = len;
5997         }
5998
5999  out:
6000         ring_buffer_unlock_commit(data->buffer);
6001
6002         return 0;
6003 }
6004
6005 static __init int rb_test(void *arg)
6006 {
6007         struct rb_test_data *data = arg;
6008
6009         while (!kthread_should_stop()) {
6010                 rb_write_something(data, false);
6011                 data->cnt++;
6012
6013                 set_current_state(TASK_INTERRUPTIBLE);
6014                 /* Now sleep between a min of 100-300us and a max of 1ms */
6015                 usleep_range(((data->cnt % 3) + 1) * 100, 1000);
6016         }
6017
6018         return 0;
6019 }
6020
6021 static __init void rb_ipi(void *ignore)
6022 {
6023         struct rb_test_data *data;
6024         int cpu = smp_processor_id();
6025
6026         data = &rb_data[cpu];
6027         rb_write_something(data, true);
6028 }
6029
6030 static __init int rb_hammer_test(void *arg)
6031 {
6032         while (!kthread_should_stop()) {
6033
6034                 /* Send an IPI to all cpus to write data! */
6035                 smp_call_function(rb_ipi, NULL, 1);
6036                 /* No sleep, but for non preempt, let others run */
6037                 schedule();
6038         }
6039
6040         return 0;
6041 }
6042
6043 static __init int test_ringbuffer(void)
6044 {
6045         struct task_struct *rb_hammer;
6046         struct trace_buffer *buffer;
6047         int cpu;
6048         int ret = 0;
6049
6050         if (security_locked_down(LOCKDOWN_TRACEFS)) {
6051                 pr_warn("Lockdown is enabled, skipping ring buffer tests\n");
6052                 return 0;
6053         }
6054
6055         pr_info("Running ring buffer tests...\n");
6056
6057         buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE);
6058         if (WARN_ON(!buffer))
6059                 return 0;
6060
6061         /* Disable buffer so that threads can't write to it yet */
6062         ring_buffer_record_off(buffer);
6063
6064         for_each_online_cpu(cpu) {
6065                 rb_data[cpu].buffer = buffer;
6066                 rb_data[cpu].cpu = cpu;
6067                 rb_data[cpu].cnt = cpu;
6068                 rb_threads[cpu] = kthread_run_on_cpu(rb_test, &rb_data[cpu],
6069                                                      cpu, "rbtester/%u");
6070                 if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
6071                         pr_cont("FAILED\n");
6072                         ret = PTR_ERR(rb_threads[cpu]);
6073                         goto out_free;
6074                 }
6075         }
6076
6077         /* Now create the rb hammer! */
6078         rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
6079         if (WARN_ON(IS_ERR(rb_hammer))) {
6080                 pr_cont("FAILED\n");
6081                 ret = PTR_ERR(rb_hammer);
6082                 goto out_free;
6083         }
6084
6085         ring_buffer_record_on(buffer);
6086         /*
6087          * Show buffer is enabled before setting rb_test_started.
6088          * Yes there's a small race window where events could be
6089          * dropped and the thread wont catch it. But when a ring
6090          * buffer gets enabled, there will always be some kind of
6091          * delay before other CPUs see it. Thus, we don't care about
6092          * those dropped events. We care about events dropped after
6093          * the threads see that the buffer is active.
6094          */
6095         smp_wmb();
6096         rb_test_started = true;
6097
6098         set_current_state(TASK_INTERRUPTIBLE);
6099         /* Just run for 10 seconds */;
6100         schedule_timeout(10 * HZ);
6101
6102         kthread_stop(rb_hammer);
6103
6104  out_free:
6105         for_each_online_cpu(cpu) {
6106                 if (!rb_threads[cpu])
6107                         break;
6108                 kthread_stop(rb_threads[cpu]);
6109         }
6110         if (ret) {
6111                 ring_buffer_free(buffer);
6112                 return ret;
6113         }
6114
6115         /* Report! */
6116         pr_info("finished\n");
6117         for_each_online_cpu(cpu) {
6118                 struct ring_buffer_event *event;
6119                 struct rb_test_data *data = &rb_data[cpu];
6120                 struct rb_item *item;
6121                 unsigned long total_events;
6122                 unsigned long total_dropped;
6123                 unsigned long total_written;
6124                 unsigned long total_alloc;
6125                 unsigned long total_read = 0;
6126                 unsigned long total_size = 0;
6127                 unsigned long total_len = 0;
6128                 unsigned long total_lost = 0;
6129                 unsigned long lost;
6130                 int big_event_size;
6131                 int small_event_size;
6132
6133                 ret = -1;
6134
6135                 total_events = data->events + data->events_nested;
6136                 total_written = data->bytes_written + data->bytes_written_nested;
6137                 total_alloc = data->bytes_alloc + data->bytes_alloc_nested;
6138                 total_dropped = data->bytes_dropped + data->bytes_dropped_nested;
6139
6140                 big_event_size = data->max_size + data->max_size_nested;
6141                 small_event_size = data->min_size + data->min_size_nested;
6142
6143                 pr_info("CPU %d:\n", cpu);
6144                 pr_info("              events:    %ld\n", total_events);
6145                 pr_info("       dropped bytes:    %ld\n", total_dropped);
6146                 pr_info("       alloced bytes:    %ld\n", total_alloc);
6147                 pr_info("       written bytes:    %ld\n", total_written);
6148                 pr_info("       biggest event:    %d\n", big_event_size);
6149                 pr_info("      smallest event:    %d\n", small_event_size);
6150
6151                 if (RB_WARN_ON(buffer, total_dropped))
6152                         break;
6153
6154                 ret = 0;
6155
6156                 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
6157                         total_lost += lost;
6158                         item = ring_buffer_event_data(event);
6159                         total_len += ring_buffer_event_length(event);
6160                         total_size += item->size + sizeof(struct rb_item);
6161                         if (memcmp(&item->str[0], rb_string, item->size) != 0) {
6162                                 pr_info("FAILED!\n");
6163                                 pr_info("buffer had: %.*s\n", item->size, item->str);
6164                                 pr_info("expected:   %.*s\n", item->size, rb_string);
6165                                 RB_WARN_ON(buffer, 1);
6166                                 ret = -1;
6167                                 break;
6168                         }
6169                         total_read++;
6170                 }
6171                 if (ret)
6172                         break;
6173
6174                 ret = -1;
6175
6176                 pr_info("         read events:   %ld\n", total_read);
6177                 pr_info("         lost events:   %ld\n", total_lost);
6178                 pr_info("        total events:   %ld\n", total_lost + total_read);
6179                 pr_info("  recorded len bytes:   %ld\n", total_len);
6180                 pr_info(" recorded size bytes:   %ld\n", total_size);
6181                 if (total_lost) {
6182                         pr_info(" With dropped events, record len and size may not match\n"
6183                                 " alloced and written from above\n");
6184                 } else {
6185                         if (RB_WARN_ON(buffer, total_len != total_alloc ||
6186                                        total_size != total_written))
6187                                 break;
6188                 }
6189                 if (RB_WARN_ON(buffer, total_lost + total_read != total_events))
6190                         break;
6191
6192                 ret = 0;
6193         }
6194         if (!ret)
6195                 pr_info("Ring buffer PASSED!\n");
6196
6197         ring_buffer_free(buffer);
6198         return 0;
6199 }
6200
6201 late_initcall(test_ringbuffer);
6202 #endif /* CONFIG_RING_BUFFER_STARTUP_TEST */