Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / tools / perf / util / ordered-events.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <linux/list.h>
5 #include <linux/compiler.h>
6 #include <linux/string.h>
7 #include "ordered-events.h"
8 #include "session.h"
9 #include "asm/bug.h"
10 #include "debug.h"
11
12 #define pr_N(n, fmt, ...) \
13         eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__)
14
15 #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
16
17 static void queue_event(struct ordered_events *oe, struct ordered_event *new)
18 {
19         struct ordered_event *last = oe->last;
20         u64 timestamp = new->timestamp;
21         struct list_head *p;
22
23         ++oe->nr_events;
24         oe->last = new;
25
26         pr_oe_time2(timestamp, "queue_event nr_events %u\n", oe->nr_events);
27
28         if (!last) {
29                 list_add(&new->list, &oe->events);
30                 oe->max_timestamp = timestamp;
31                 return;
32         }
33
34         /*
35          * last event might point to some random place in the list as it's
36          * the last queued event. We expect that the new event is close to
37          * this.
38          */
39         if (last->timestamp <= timestamp) {
40                 while (last->timestamp <= timestamp) {
41                         p = last->list.next;
42                         if (p == &oe->events) {
43                                 list_add_tail(&new->list, &oe->events);
44                                 oe->max_timestamp = timestamp;
45                                 return;
46                         }
47                         last = list_entry(p, struct ordered_event, list);
48                 }
49                 list_add_tail(&new->list, &last->list);
50         } else {
51                 while (last->timestamp > timestamp) {
52                         p = last->list.prev;
53                         if (p == &oe->events) {
54                                 list_add(&new->list, &oe->events);
55                                 return;
56                         }
57                         last = list_entry(p, struct ordered_event, list);
58                 }
59                 list_add(&new->list, &last->list);
60         }
61 }
62
63 static union perf_event *__dup_event(struct ordered_events *oe,
64                                      union perf_event *event)
65 {
66         union perf_event *new_event = NULL;
67
68         if (oe->cur_alloc_size < oe->max_alloc_size) {
69                 new_event = memdup(event, event->header.size);
70                 if (new_event)
71                         oe->cur_alloc_size += event->header.size;
72         }
73
74         return new_event;
75 }
76
77 static union perf_event *dup_event(struct ordered_events *oe,
78                                    union perf_event *event)
79 {
80         return oe->copy_on_queue ? __dup_event(oe, event) : event;
81 }
82
83 static void __free_dup_event(struct ordered_events *oe, union perf_event *event)
84 {
85         if (event) {
86                 oe->cur_alloc_size -= event->header.size;
87                 free(event);
88         }
89 }
90
91 static void free_dup_event(struct ordered_events *oe, union perf_event *event)
92 {
93         if (oe->copy_on_queue)
94                 __free_dup_event(oe, event);
95 }
96
97 #define MAX_SAMPLE_BUFFER       (64 * 1024 / sizeof(struct ordered_event))
98 static struct ordered_event *alloc_event(struct ordered_events *oe,
99                                          union perf_event *event)
100 {
101         struct list_head *cache = &oe->cache;
102         struct ordered_event *new = NULL;
103         union perf_event *new_event;
104         size_t size;
105
106         new_event = dup_event(oe, event);
107         if (!new_event)
108                 return NULL;
109
110         /*
111          * We maintain the following scheme of buffers for ordered
112          * event allocation:
113          *
114          *   to_free list -> buffer1 (64K)
115          *                   buffer2 (64K)
116          *                   ...
117          *
118          * Each buffer keeps an array of ordered events objects:
119          *    buffer -> event[0]
120          *              event[1]
121          *              ...
122          *
123          * Each allocated ordered event is linked to one of
124          * following lists:
125          *   - time ordered list 'events'
126          *   - list of currently removed events 'cache'
127          *
128          * Allocation of the ordered event uses the following order
129          * to get the memory:
130          *   - use recently removed object from 'cache' list
131          *   - use available object in current allocation buffer
132          *   - allocate new buffer if the current buffer is full
133          *
134          * Removal of ordered event object moves it from events to
135          * the cache list.
136          */
137         size = sizeof(*oe->buffer) + MAX_SAMPLE_BUFFER * sizeof(*new);
138
139         if (!list_empty(cache)) {
140                 new = list_entry(cache->next, struct ordered_event, list);
141                 list_del(&new->list);
142         } else if (oe->buffer) {
143                 new = &oe->buffer->event[oe->buffer_idx];
144                 if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
145                         oe->buffer = NULL;
146         } else if ((oe->cur_alloc_size + size) < oe->max_alloc_size) {
147                 oe->buffer = malloc(size);
148                 if (!oe->buffer) {
149                         free_dup_event(oe, new_event);
150                         return NULL;
151                 }
152
153                 pr("alloc size %" PRIu64 "B (+%zu), max %" PRIu64 "B\n",
154                    oe->cur_alloc_size, size, oe->max_alloc_size);
155
156                 oe->cur_alloc_size += size;
157                 list_add(&oe->buffer->list, &oe->to_free);
158
159                 oe->buffer_idx = 1;
160                 new = &oe->buffer->event[0];
161         } else {
162                 pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size);
163                 return NULL;
164         }
165
166         new->event = new_event;
167         return new;
168 }
169
170 static struct ordered_event *
171 ordered_events__new_event(struct ordered_events *oe, u64 timestamp,
172                     union perf_event *event)
173 {
174         struct ordered_event *new;
175
176         new = alloc_event(oe, event);
177         if (new) {
178                 new->timestamp = timestamp;
179                 queue_event(oe, new);
180         }
181
182         return new;
183 }
184
185 void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
186 {
187         list_move(&event->list, &oe->cache);
188         oe->nr_events--;
189         free_dup_event(oe, event->event);
190         event->event = NULL;
191 }
192
193 int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
194                           u64 timestamp, u64 file_offset)
195 {
196         struct ordered_event *oevent;
197
198         if (!timestamp || timestamp == ~0ULL)
199                 return -ETIME;
200
201         if (timestamp < oe->last_flush) {
202                 pr_oe_time(timestamp,      "out of order event\n");
203                 pr_oe_time(oe->last_flush, "last flush, last_flush_type %d\n",
204                            oe->last_flush_type);
205
206                 oe->nr_unordered_events++;
207         }
208
209         oevent = ordered_events__new_event(oe, timestamp, event);
210         if (!oevent) {
211                 ordered_events__flush(oe, OE_FLUSH__HALF);
212                 oevent = ordered_events__new_event(oe, timestamp, event);
213         }
214
215         if (!oevent)
216                 return -ENOMEM;
217
218         oevent->file_offset = file_offset;
219         return 0;
220 }
221
222 static int __ordered_events__flush(struct ordered_events *oe)
223 {
224         struct list_head *head = &oe->events;
225         struct ordered_event *tmp, *iter;
226         u64 limit = oe->next_flush;
227         u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
228         bool show_progress = limit == ULLONG_MAX;
229         struct ui_progress prog;
230         int ret;
231
232         if (!limit)
233                 return 0;
234
235         if (show_progress)
236                 ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
237
238         list_for_each_entry_safe(iter, tmp, head, list) {
239                 if (session_done())
240                         return 0;
241
242                 if (iter->timestamp > limit)
243                         break;
244                 ret = oe->deliver(oe, iter);
245                 if (ret)
246                         return ret;
247
248                 ordered_events__delete(oe, iter);
249                 oe->last_flush = iter->timestamp;
250
251                 if (show_progress)
252                         ui_progress__update(&prog, 1);
253         }
254
255         if (list_empty(head))
256                 oe->last = NULL;
257         else if (last_ts <= limit)
258                 oe->last = list_entry(head->prev, struct ordered_event, list);
259
260         if (show_progress)
261                 ui_progress__finish();
262
263         return 0;
264 }
265
266 int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
267 {
268         static const char * const str[] = {
269                 "NONE",
270                 "FINAL",
271                 "ROUND",
272                 "HALF ",
273         };
274         int err;
275
276         if (oe->nr_events == 0)
277                 return 0;
278
279         switch (how) {
280         case OE_FLUSH__FINAL:
281                 oe->next_flush = ULLONG_MAX;
282                 break;
283
284         case OE_FLUSH__HALF:
285         {
286                 struct ordered_event *first, *last;
287                 struct list_head *head = &oe->events;
288
289                 first = list_entry(head->next, struct ordered_event, list);
290                 last = oe->last;
291
292                 /* Warn if we are called before any event got allocated. */
293                 if (WARN_ONCE(!last || list_empty(head), "empty queue"))
294                         return 0;
295
296                 oe->next_flush  = first->timestamp;
297                 oe->next_flush += (last->timestamp - first->timestamp) / 2;
298                 break;
299         }
300
301         case OE_FLUSH__ROUND:
302         case OE_FLUSH__NONE:
303         default:
304                 break;
305         };
306
307         pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush PRE  %s, nr_events %u\n",
308                    str[how], oe->nr_events);
309         pr_oe_time(oe->max_timestamp, "max_timestamp\n");
310
311         err = __ordered_events__flush(oe);
312
313         if (!err) {
314                 if (how == OE_FLUSH__ROUND)
315                         oe->next_flush = oe->max_timestamp;
316
317                 oe->last_flush_type = how;
318         }
319
320         pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush POST %s, nr_events %u\n",
321                    str[how], oe->nr_events);
322         pr_oe_time(oe->last_flush, "last_flush\n");
323
324         return err;
325 }
326
327 void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver)
328 {
329         INIT_LIST_HEAD(&oe->events);
330         INIT_LIST_HEAD(&oe->cache);
331         INIT_LIST_HEAD(&oe->to_free);
332         oe->max_alloc_size = (u64) -1;
333         oe->cur_alloc_size = 0;
334         oe->deliver        = deliver;
335 }
336
337 static void
338 ordered_events_buffer__free(struct ordered_events_buffer *buffer,
339                             unsigned int max, struct ordered_events *oe)
340 {
341         if (oe->copy_on_queue) {
342                 unsigned int i;
343
344                 for (i = 0; i < max; i++)
345                         __free_dup_event(oe, buffer->event[i].event);
346         }
347
348         free(buffer);
349 }
350
351 void ordered_events__free(struct ordered_events *oe)
352 {
353         struct ordered_events_buffer *buffer, *tmp;
354
355         if (list_empty(&oe->to_free))
356                 return;
357
358         /*
359          * Current buffer might not have all the events allocated
360          * yet, we need to free only allocated ones ...
361          */
362         list_del(&oe->buffer->list);
363         ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe);
364
365         /* ... and continue with the rest */
366         list_for_each_entry_safe(buffer, tmp, &oe->to_free, list) {
367                 list_del(&buffer->list);
368                 ordered_events_buffer__free(buffer, MAX_SAMPLE_BUFFER, oe);
369         }
370 }
371
372 void ordered_events__reinit(struct ordered_events *oe)
373 {
374         ordered_events__deliver_t old_deliver = oe->deliver;
375
376         ordered_events__free(oe);
377         memset(oe, '\0', sizeof(*oe));
378         ordered_events__init(oe, old_deliver);
379 }