Input: wm97xx: add new AC97 bus support
[sfrench/cifs-2.6.git] / tools / perf / util / session.c
1 #include <errno.h>
2 #include <inttypes.h>
3 #include <linux/kernel.h>
4 #include <traceevent/event-parse.h>
5 #include <api/fs/fs.h>
6
7 #include <byteswap.h>
8 #include <unistd.h>
9 #include <sys/types.h>
10 #include <sys/mman.h>
11
12 #include "evlist.h"
13 #include "evsel.h"
14 #include "memswap.h"
15 #include "session.h"
16 #include "tool.h"
17 #include "sort.h"
18 #include "util.h"
19 #include "cpumap.h"
20 #include "perf_regs.h"
21 #include "asm/bug.h"
22 #include "auxtrace.h"
23 #include "thread.h"
24 #include "thread-stack.h"
25 #include "stat.h"
26
27 static int perf_session__deliver_event(struct perf_session *session,
28                                        union perf_event *event,
29                                        struct perf_sample *sample,
30                                        struct perf_tool *tool,
31                                        u64 file_offset);
32
33 static int perf_session__open(struct perf_session *session)
34 {
35         struct perf_data_file *file = session->file;
36
37         if (perf_session__read_header(session) < 0) {
38                 pr_err("incompatible file format (rerun with -v to learn more)\n");
39                 return -1;
40         }
41
42         if (perf_data_file__is_pipe(file))
43                 return 0;
44
45         if (perf_header__has_feat(&session->header, HEADER_STAT))
46                 return 0;
47
48         if (!perf_evlist__valid_sample_type(session->evlist)) {
49                 pr_err("non matching sample_type\n");
50                 return -1;
51         }
52
53         if (!perf_evlist__valid_sample_id_all(session->evlist)) {
54                 pr_err("non matching sample_id_all\n");
55                 return -1;
56         }
57
58         if (!perf_evlist__valid_read_format(session->evlist)) {
59                 pr_err("non matching read_format\n");
60                 return -1;
61         }
62
63         return 0;
64 }
65
66 void perf_session__set_id_hdr_size(struct perf_session *session)
67 {
68         u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
69
70         machines__set_id_hdr_size(&session->machines, id_hdr_size);
71 }
72
73 int perf_session__create_kernel_maps(struct perf_session *session)
74 {
75         int ret = machine__create_kernel_maps(&session->machines.host);
76
77         if (ret >= 0)
78                 ret = machines__create_guest_kernel_maps(&session->machines);
79         return ret;
80 }
81
82 static void perf_session__destroy_kernel_maps(struct perf_session *session)
83 {
84         machines__destroy_kernel_maps(&session->machines);
85 }
86
87 static bool perf_session__has_comm_exec(struct perf_session *session)
88 {
89         struct perf_evsel *evsel;
90
91         evlist__for_each_entry(session->evlist, evsel) {
92                 if (evsel->attr.comm_exec)
93                         return true;
94         }
95
96         return false;
97 }
98
99 static void perf_session__set_comm_exec(struct perf_session *session)
100 {
101         bool comm_exec = perf_session__has_comm_exec(session);
102
103         machines__set_comm_exec(&session->machines, comm_exec);
104 }
105
106 static int ordered_events__deliver_event(struct ordered_events *oe,
107                                          struct ordered_event *event)
108 {
109         struct perf_sample sample;
110         struct perf_session *session = container_of(oe, struct perf_session,
111                                                     ordered_events);
112         int ret = perf_evlist__parse_sample(session->evlist, event->event, &sample);
113
114         if (ret) {
115                 pr_err("Can't parse sample, err = %d\n", ret);
116                 return ret;
117         }
118
119         return perf_session__deliver_event(session, event->event, &sample,
120                                            session->tool, event->file_offset);
121 }
122
123 struct perf_session *perf_session__new(struct perf_data_file *file,
124                                        bool repipe, struct perf_tool *tool)
125 {
126         struct perf_session *session = zalloc(sizeof(*session));
127
128         if (!session)
129                 goto out;
130
131         session->repipe = repipe;
132         session->tool   = tool;
133         INIT_LIST_HEAD(&session->auxtrace_index);
134         machines__init(&session->machines);
135         ordered_events__init(&session->ordered_events, ordered_events__deliver_event);
136
137         if (file) {
138                 if (perf_data_file__open(file))
139                         goto out_delete;
140
141                 session->file = file;
142
143                 if (perf_data_file__is_read(file)) {
144                         if (perf_session__open(session) < 0)
145                                 goto out_close;
146
147                         /*
148                          * set session attributes that are present in perf.data
149                          * but not in pipe-mode.
150                          */
151                         if (!file->is_pipe) {
152                                 perf_session__set_id_hdr_size(session);
153                                 perf_session__set_comm_exec(session);
154                         }
155                 }
156         } else  {
157                 session->machines.host.env = &perf_env;
158         }
159
160         if (!file || perf_data_file__is_write(file)) {
161                 /*
162                  * In O_RDONLY mode this will be performed when reading the
163                  * kernel MMAP event, in perf_event__process_mmap().
164                  */
165                 if (perf_session__create_kernel_maps(session) < 0)
166                         pr_warning("Cannot read kernel map\n");
167         }
168
169         /*
170          * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
171          * processed, so perf_evlist__sample_id_all is not meaningful here.
172          */
173         if ((!file || !file->is_pipe) && tool && tool->ordering_requires_timestamps &&
174             tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
175                 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
176                 tool->ordered_events = false;
177         }
178
179         return session;
180
181  out_close:
182         perf_data_file__close(file);
183  out_delete:
184         perf_session__delete(session);
185  out:
186         return NULL;
187 }
188
189 static void perf_session__delete_threads(struct perf_session *session)
190 {
191         machine__delete_threads(&session->machines.host);
192 }
193
194 void perf_session__delete(struct perf_session *session)
195 {
196         if (session == NULL)
197                 return;
198         auxtrace__free(session);
199         auxtrace_index__free(&session->auxtrace_index);
200         perf_session__destroy_kernel_maps(session);
201         perf_session__delete_threads(session);
202         perf_env__exit(&session->header.env);
203         machines__exit(&session->machines);
204         if (session->file)
205                 perf_data_file__close(session->file);
206         free(session);
207 }
208
209 static int process_event_synth_tracing_data_stub(struct perf_tool *tool
210                                                  __maybe_unused,
211                                                  union perf_event *event
212                                                  __maybe_unused,
213                                                  struct perf_session *session
214                                                 __maybe_unused)
215 {
216         dump_printf(": unhandled!\n");
217         return 0;
218 }
219
220 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
221                                          union perf_event *event __maybe_unused,
222                                          struct perf_evlist **pevlist
223                                          __maybe_unused)
224 {
225         dump_printf(": unhandled!\n");
226         return 0;
227 }
228
229 static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
230                                                  union perf_event *event __maybe_unused,
231                                                  struct perf_evlist **pevlist
232                                                  __maybe_unused)
233 {
234         if (dump_trace)
235                 perf_event__fprintf_event_update(event, stdout);
236
237         dump_printf(": unhandled!\n");
238         return 0;
239 }
240
241 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
242                                      union perf_event *event __maybe_unused,
243                                      struct perf_sample *sample __maybe_unused,
244                                      struct perf_evsel *evsel __maybe_unused,
245                                      struct machine *machine __maybe_unused)
246 {
247         dump_printf(": unhandled!\n");
248         return 0;
249 }
250
251 static int process_event_stub(struct perf_tool *tool __maybe_unused,
252                               union perf_event *event __maybe_unused,
253                               struct perf_sample *sample __maybe_unused,
254                               struct machine *machine __maybe_unused)
255 {
256         dump_printf(": unhandled!\n");
257         return 0;
258 }
259
260 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
261                                        union perf_event *event __maybe_unused,
262                                        struct ordered_events *oe __maybe_unused)
263 {
264         dump_printf(": unhandled!\n");
265         return 0;
266 }
267
268 static int process_finished_round(struct perf_tool *tool,
269                                   union perf_event *event,
270                                   struct ordered_events *oe);
271
272 static int skipn(int fd, off_t n)
273 {
274         char buf[4096];
275         ssize_t ret;
276
277         while (n > 0) {
278                 ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
279                 if (ret <= 0)
280                         return ret;
281                 n -= ret;
282         }
283
284         return 0;
285 }
286
287 static s64 process_event_auxtrace_stub(struct perf_tool *tool __maybe_unused,
288                                        union perf_event *event,
289                                        struct perf_session *session
290                                        __maybe_unused)
291 {
292         dump_printf(": unhandled!\n");
293         if (perf_data_file__is_pipe(session->file))
294                 skipn(perf_data_file__fd(session->file), event->auxtrace.size);
295         return event->auxtrace.size;
296 }
297
298 static int process_event_op2_stub(struct perf_tool *tool __maybe_unused,
299                                   union perf_event *event __maybe_unused,
300                                   struct perf_session *session __maybe_unused)
301 {
302         dump_printf(": unhandled!\n");
303         return 0;
304 }
305
306
307 static
308 int process_event_thread_map_stub(struct perf_tool *tool __maybe_unused,
309                                   union perf_event *event __maybe_unused,
310                                   struct perf_session *session __maybe_unused)
311 {
312         if (dump_trace)
313                 perf_event__fprintf_thread_map(event, stdout);
314
315         dump_printf(": unhandled!\n");
316         return 0;
317 }
318
319 static
320 int process_event_cpu_map_stub(struct perf_tool *tool __maybe_unused,
321                                union perf_event *event __maybe_unused,
322                                struct perf_session *session __maybe_unused)
323 {
324         if (dump_trace)
325                 perf_event__fprintf_cpu_map(event, stdout);
326
327         dump_printf(": unhandled!\n");
328         return 0;
329 }
330
331 static
332 int process_event_stat_config_stub(struct perf_tool *tool __maybe_unused,
333                                    union perf_event *event __maybe_unused,
334                                    struct perf_session *session __maybe_unused)
335 {
336         if (dump_trace)
337                 perf_event__fprintf_stat_config(event, stdout);
338
339         dump_printf(": unhandled!\n");
340         return 0;
341 }
342
343 static int process_stat_stub(struct perf_tool *tool __maybe_unused,
344                              union perf_event *event __maybe_unused,
345                              struct perf_session *perf_session
346                              __maybe_unused)
347 {
348         if (dump_trace)
349                 perf_event__fprintf_stat(event, stdout);
350
351         dump_printf(": unhandled!\n");
352         return 0;
353 }
354
355 static int process_stat_round_stub(struct perf_tool *tool __maybe_unused,
356                                    union perf_event *event __maybe_unused,
357                                    struct perf_session *perf_session
358                                    __maybe_unused)
359 {
360         if (dump_trace)
361                 perf_event__fprintf_stat_round(event, stdout);
362
363         dump_printf(": unhandled!\n");
364         return 0;
365 }
366
367 void perf_tool__fill_defaults(struct perf_tool *tool)
368 {
369         if (tool->sample == NULL)
370                 tool->sample = process_event_sample_stub;
371         if (tool->mmap == NULL)
372                 tool->mmap = process_event_stub;
373         if (tool->mmap2 == NULL)
374                 tool->mmap2 = process_event_stub;
375         if (tool->comm == NULL)
376                 tool->comm = process_event_stub;
377         if (tool->fork == NULL)
378                 tool->fork = process_event_stub;
379         if (tool->exit == NULL)
380                 tool->exit = process_event_stub;
381         if (tool->lost == NULL)
382                 tool->lost = perf_event__process_lost;
383         if (tool->lost_samples == NULL)
384                 tool->lost_samples = perf_event__process_lost_samples;
385         if (tool->aux == NULL)
386                 tool->aux = perf_event__process_aux;
387         if (tool->itrace_start == NULL)
388                 tool->itrace_start = perf_event__process_itrace_start;
389         if (tool->context_switch == NULL)
390                 tool->context_switch = perf_event__process_switch;
391         if (tool->read == NULL)
392                 tool->read = process_event_sample_stub;
393         if (tool->throttle == NULL)
394                 tool->throttle = process_event_stub;
395         if (tool->unthrottle == NULL)
396                 tool->unthrottle = process_event_stub;
397         if (tool->attr == NULL)
398                 tool->attr = process_event_synth_attr_stub;
399         if (tool->event_update == NULL)
400                 tool->event_update = process_event_synth_event_update_stub;
401         if (tool->tracing_data == NULL)
402                 tool->tracing_data = process_event_synth_tracing_data_stub;
403         if (tool->build_id == NULL)
404                 tool->build_id = process_event_op2_stub;
405         if (tool->finished_round == NULL) {
406                 if (tool->ordered_events)
407                         tool->finished_round = process_finished_round;
408                 else
409                         tool->finished_round = process_finished_round_stub;
410         }
411         if (tool->id_index == NULL)
412                 tool->id_index = process_event_op2_stub;
413         if (tool->auxtrace_info == NULL)
414                 tool->auxtrace_info = process_event_op2_stub;
415         if (tool->auxtrace == NULL)
416                 tool->auxtrace = process_event_auxtrace_stub;
417         if (tool->auxtrace_error == NULL)
418                 tool->auxtrace_error = process_event_op2_stub;
419         if (tool->thread_map == NULL)
420                 tool->thread_map = process_event_thread_map_stub;
421         if (tool->cpu_map == NULL)
422                 tool->cpu_map = process_event_cpu_map_stub;
423         if (tool->stat_config == NULL)
424                 tool->stat_config = process_event_stat_config_stub;
425         if (tool->stat == NULL)
426                 tool->stat = process_stat_stub;
427         if (tool->stat_round == NULL)
428                 tool->stat_round = process_stat_round_stub;
429         if (tool->time_conv == NULL)
430                 tool->time_conv = process_event_op2_stub;
431         if (tool->feature == NULL)
432                 tool->feature = process_event_op2_stub;
433 }
434
435 static void swap_sample_id_all(union perf_event *event, void *data)
436 {
437         void *end = (void *) event + event->header.size;
438         int size = end - data;
439
440         BUG_ON(size % sizeof(u64));
441         mem_bswap_64(data, size);
442 }
443
444 static void perf_event__all64_swap(union perf_event *event,
445                                    bool sample_id_all __maybe_unused)
446 {
447         struct perf_event_header *hdr = &event->header;
448         mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
449 }
450
451 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
452 {
453         event->comm.pid = bswap_32(event->comm.pid);
454         event->comm.tid = bswap_32(event->comm.tid);
455
456         if (sample_id_all) {
457                 void *data = &event->comm.comm;
458
459                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
460                 swap_sample_id_all(event, data);
461         }
462 }
463
464 static void perf_event__mmap_swap(union perf_event *event,
465                                   bool sample_id_all)
466 {
467         event->mmap.pid   = bswap_32(event->mmap.pid);
468         event->mmap.tid   = bswap_32(event->mmap.tid);
469         event->mmap.start = bswap_64(event->mmap.start);
470         event->mmap.len   = bswap_64(event->mmap.len);
471         event->mmap.pgoff = bswap_64(event->mmap.pgoff);
472
473         if (sample_id_all) {
474                 void *data = &event->mmap.filename;
475
476                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
477                 swap_sample_id_all(event, data);
478         }
479 }
480
481 static void perf_event__mmap2_swap(union perf_event *event,
482                                   bool sample_id_all)
483 {
484         event->mmap2.pid   = bswap_32(event->mmap2.pid);
485         event->mmap2.tid   = bswap_32(event->mmap2.tid);
486         event->mmap2.start = bswap_64(event->mmap2.start);
487         event->mmap2.len   = bswap_64(event->mmap2.len);
488         event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
489         event->mmap2.maj   = bswap_32(event->mmap2.maj);
490         event->mmap2.min   = bswap_32(event->mmap2.min);
491         event->mmap2.ino   = bswap_64(event->mmap2.ino);
492
493         if (sample_id_all) {
494                 void *data = &event->mmap2.filename;
495
496                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
497                 swap_sample_id_all(event, data);
498         }
499 }
500 static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
501 {
502         event->fork.pid  = bswap_32(event->fork.pid);
503         event->fork.tid  = bswap_32(event->fork.tid);
504         event->fork.ppid = bswap_32(event->fork.ppid);
505         event->fork.ptid = bswap_32(event->fork.ptid);
506         event->fork.time = bswap_64(event->fork.time);
507
508         if (sample_id_all)
509                 swap_sample_id_all(event, &event->fork + 1);
510 }
511
512 static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
513 {
514         event->read.pid          = bswap_32(event->read.pid);
515         event->read.tid          = bswap_32(event->read.tid);
516         event->read.value        = bswap_64(event->read.value);
517         event->read.time_enabled = bswap_64(event->read.time_enabled);
518         event->read.time_running = bswap_64(event->read.time_running);
519         event->read.id           = bswap_64(event->read.id);
520
521         if (sample_id_all)
522                 swap_sample_id_all(event, &event->read + 1);
523 }
524
525 static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
526 {
527         event->aux.aux_offset = bswap_64(event->aux.aux_offset);
528         event->aux.aux_size   = bswap_64(event->aux.aux_size);
529         event->aux.flags      = bswap_64(event->aux.flags);
530
531         if (sample_id_all)
532                 swap_sample_id_all(event, &event->aux + 1);
533 }
534
535 static void perf_event__itrace_start_swap(union perf_event *event,
536                                           bool sample_id_all)
537 {
538         event->itrace_start.pid  = bswap_32(event->itrace_start.pid);
539         event->itrace_start.tid  = bswap_32(event->itrace_start.tid);
540
541         if (sample_id_all)
542                 swap_sample_id_all(event, &event->itrace_start + 1);
543 }
544
545 static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
546 {
547         if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
548                 event->context_switch.next_prev_pid =
549                                 bswap_32(event->context_switch.next_prev_pid);
550                 event->context_switch.next_prev_tid =
551                                 bswap_32(event->context_switch.next_prev_tid);
552         }
553
554         if (sample_id_all)
555                 swap_sample_id_all(event, &event->context_switch + 1);
556 }
557
558 static void perf_event__throttle_swap(union perf_event *event,
559                                       bool sample_id_all)
560 {
561         event->throttle.time      = bswap_64(event->throttle.time);
562         event->throttle.id        = bswap_64(event->throttle.id);
563         event->throttle.stream_id = bswap_64(event->throttle.stream_id);
564
565         if (sample_id_all)
566                 swap_sample_id_all(event, &event->throttle + 1);
567 }
568
569 static u8 revbyte(u8 b)
570 {
571         int rev = (b >> 4) | ((b & 0xf) << 4);
572         rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
573         rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
574         return (u8) rev;
575 }
576
577 /*
578  * XXX this is hack in attempt to carry flags bitfield
579  * through endian village. ABI says:
580  *
581  * Bit-fields are allocated from right to left (least to most significant)
582  * on little-endian implementations and from left to right (most to least
583  * significant) on big-endian implementations.
584  *
585  * The above seems to be byte specific, so we need to reverse each
586  * byte of the bitfield. 'Internet' also says this might be implementation
587  * specific and we probably need proper fix and carry perf_event_attr
588  * bitfield flags in separate data file FEAT_ section. Thought this seems
589  * to work for now.
590  */
591 static void swap_bitfield(u8 *p, unsigned len)
592 {
593         unsigned i;
594
595         for (i = 0; i < len; i++) {
596                 *p = revbyte(*p);
597                 p++;
598         }
599 }
600
601 /* exported for swapping attributes in file header */
602 void perf_event__attr_swap(struct perf_event_attr *attr)
603 {
604         attr->type              = bswap_32(attr->type);
605         attr->size              = bswap_32(attr->size);
606
607 #define bswap_safe(f, n)                                        \
608         (attr->size > (offsetof(struct perf_event_attr, f) +    \
609                        sizeof(attr->f) * (n)))
610 #define bswap_field(f, sz)                      \
611 do {                                            \
612         if (bswap_safe(f, 0))                   \
613                 attr->f = bswap_##sz(attr->f);  \
614 } while(0)
615 #define bswap_field_16(f) bswap_field(f, 16)
616 #define bswap_field_32(f) bswap_field(f, 32)
617 #define bswap_field_64(f) bswap_field(f, 64)
618
619         bswap_field_64(config);
620         bswap_field_64(sample_period);
621         bswap_field_64(sample_type);
622         bswap_field_64(read_format);
623         bswap_field_32(wakeup_events);
624         bswap_field_32(bp_type);
625         bswap_field_64(bp_addr);
626         bswap_field_64(bp_len);
627         bswap_field_64(branch_sample_type);
628         bswap_field_64(sample_regs_user);
629         bswap_field_32(sample_stack_user);
630         bswap_field_32(aux_watermark);
631         bswap_field_16(sample_max_stack);
632
633         /*
634          * After read_format are bitfields. Check read_format because
635          * we are unable to use offsetof on bitfield.
636          */
637         if (bswap_safe(read_format, 1))
638                 swap_bitfield((u8 *) (&attr->read_format + 1),
639                               sizeof(u64));
640 #undef bswap_field_64
641 #undef bswap_field_32
642 #undef bswap_field
643 #undef bswap_safe
644 }
645
646 static void perf_event__hdr_attr_swap(union perf_event *event,
647                                       bool sample_id_all __maybe_unused)
648 {
649         size_t size;
650
651         perf_event__attr_swap(&event->attr.attr);
652
653         size = event->header.size;
654         size -= (void *)&event->attr.id - (void *)event;
655         mem_bswap_64(event->attr.id, size);
656 }
657
658 static void perf_event__event_update_swap(union perf_event *event,
659                                           bool sample_id_all __maybe_unused)
660 {
661         event->event_update.type = bswap_64(event->event_update.type);
662         event->event_update.id   = bswap_64(event->event_update.id);
663 }
664
665 static void perf_event__event_type_swap(union perf_event *event,
666                                         bool sample_id_all __maybe_unused)
667 {
668         event->event_type.event_type.event_id =
669                 bswap_64(event->event_type.event_type.event_id);
670 }
671
672 static void perf_event__tracing_data_swap(union perf_event *event,
673                                           bool sample_id_all __maybe_unused)
674 {
675         event->tracing_data.size = bswap_32(event->tracing_data.size);
676 }
677
678 static void perf_event__auxtrace_info_swap(union perf_event *event,
679                                            bool sample_id_all __maybe_unused)
680 {
681         size_t size;
682
683         event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
684
685         size = event->header.size;
686         size -= (void *)&event->auxtrace_info.priv - (void *)event;
687         mem_bswap_64(event->auxtrace_info.priv, size);
688 }
689
690 static void perf_event__auxtrace_swap(union perf_event *event,
691                                       bool sample_id_all __maybe_unused)
692 {
693         event->auxtrace.size      = bswap_64(event->auxtrace.size);
694         event->auxtrace.offset    = bswap_64(event->auxtrace.offset);
695         event->auxtrace.reference = bswap_64(event->auxtrace.reference);
696         event->auxtrace.idx       = bswap_32(event->auxtrace.idx);
697         event->auxtrace.tid       = bswap_32(event->auxtrace.tid);
698         event->auxtrace.cpu       = bswap_32(event->auxtrace.cpu);
699 }
700
701 static void perf_event__auxtrace_error_swap(union perf_event *event,
702                                             bool sample_id_all __maybe_unused)
703 {
704         event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
705         event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
706         event->auxtrace_error.cpu  = bswap_32(event->auxtrace_error.cpu);
707         event->auxtrace_error.pid  = bswap_32(event->auxtrace_error.pid);
708         event->auxtrace_error.tid  = bswap_32(event->auxtrace_error.tid);
709         event->auxtrace_error.ip   = bswap_64(event->auxtrace_error.ip);
710 }
711
712 static void perf_event__thread_map_swap(union perf_event *event,
713                                         bool sample_id_all __maybe_unused)
714 {
715         unsigned i;
716
717         event->thread_map.nr = bswap_64(event->thread_map.nr);
718
719         for (i = 0; i < event->thread_map.nr; i++)
720                 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
721 }
722
723 static void perf_event__cpu_map_swap(union perf_event *event,
724                                      bool sample_id_all __maybe_unused)
725 {
726         struct cpu_map_data *data = &event->cpu_map.data;
727         struct cpu_map_entries *cpus;
728         struct cpu_map_mask *mask;
729         unsigned i;
730
731         data->type = bswap_64(data->type);
732
733         switch (data->type) {
734         case PERF_CPU_MAP__CPUS:
735                 cpus = (struct cpu_map_entries *)data->data;
736
737                 cpus->nr = bswap_16(cpus->nr);
738
739                 for (i = 0; i < cpus->nr; i++)
740                         cpus->cpu[i] = bswap_16(cpus->cpu[i]);
741                 break;
742         case PERF_CPU_MAP__MASK:
743                 mask = (struct cpu_map_mask *) data->data;
744
745                 mask->nr = bswap_16(mask->nr);
746                 mask->long_size = bswap_16(mask->long_size);
747
748                 switch (mask->long_size) {
749                 case 4: mem_bswap_32(&mask->mask, mask->nr); break;
750                 case 8: mem_bswap_64(&mask->mask, mask->nr); break;
751                 default:
752                         pr_err("cpu_map swap: unsupported long size\n");
753                 }
754         default:
755                 break;
756         }
757 }
758
759 static void perf_event__stat_config_swap(union perf_event *event,
760                                          bool sample_id_all __maybe_unused)
761 {
762         u64 size;
763
764         size  = event->stat_config.nr * sizeof(event->stat_config.data[0]);
765         size += 1; /* nr item itself */
766         mem_bswap_64(&event->stat_config.nr, size);
767 }
768
769 static void perf_event__stat_swap(union perf_event *event,
770                                   bool sample_id_all __maybe_unused)
771 {
772         event->stat.id     = bswap_64(event->stat.id);
773         event->stat.thread = bswap_32(event->stat.thread);
774         event->stat.cpu    = bswap_32(event->stat.cpu);
775         event->stat.val    = bswap_64(event->stat.val);
776         event->stat.ena    = bswap_64(event->stat.ena);
777         event->stat.run    = bswap_64(event->stat.run);
778 }
779
780 static void perf_event__stat_round_swap(union perf_event *event,
781                                         bool sample_id_all __maybe_unused)
782 {
783         event->stat_round.type = bswap_64(event->stat_round.type);
784         event->stat_round.time = bswap_64(event->stat_round.time);
785 }
786
787 typedef void (*perf_event__swap_op)(union perf_event *event,
788                                     bool sample_id_all);
789
790 static perf_event__swap_op perf_event__swap_ops[] = {
791         [PERF_RECORD_MMAP]                = perf_event__mmap_swap,
792         [PERF_RECORD_MMAP2]               = perf_event__mmap2_swap,
793         [PERF_RECORD_COMM]                = perf_event__comm_swap,
794         [PERF_RECORD_FORK]                = perf_event__task_swap,
795         [PERF_RECORD_EXIT]                = perf_event__task_swap,
796         [PERF_RECORD_LOST]                = perf_event__all64_swap,
797         [PERF_RECORD_READ]                = perf_event__read_swap,
798         [PERF_RECORD_THROTTLE]            = perf_event__throttle_swap,
799         [PERF_RECORD_UNTHROTTLE]          = perf_event__throttle_swap,
800         [PERF_RECORD_SAMPLE]              = perf_event__all64_swap,
801         [PERF_RECORD_AUX]                 = perf_event__aux_swap,
802         [PERF_RECORD_ITRACE_START]        = perf_event__itrace_start_swap,
803         [PERF_RECORD_LOST_SAMPLES]        = perf_event__all64_swap,
804         [PERF_RECORD_SWITCH]              = perf_event__switch_swap,
805         [PERF_RECORD_SWITCH_CPU_WIDE]     = perf_event__switch_swap,
806         [PERF_RECORD_HEADER_ATTR]         = perf_event__hdr_attr_swap,
807         [PERF_RECORD_HEADER_EVENT_TYPE]   = perf_event__event_type_swap,
808         [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
809         [PERF_RECORD_HEADER_BUILD_ID]     = NULL,
810         [PERF_RECORD_ID_INDEX]            = perf_event__all64_swap,
811         [PERF_RECORD_AUXTRACE_INFO]       = perf_event__auxtrace_info_swap,
812         [PERF_RECORD_AUXTRACE]            = perf_event__auxtrace_swap,
813         [PERF_RECORD_AUXTRACE_ERROR]      = perf_event__auxtrace_error_swap,
814         [PERF_RECORD_THREAD_MAP]          = perf_event__thread_map_swap,
815         [PERF_RECORD_CPU_MAP]             = perf_event__cpu_map_swap,
816         [PERF_RECORD_STAT_CONFIG]         = perf_event__stat_config_swap,
817         [PERF_RECORD_STAT]                = perf_event__stat_swap,
818         [PERF_RECORD_STAT_ROUND]          = perf_event__stat_round_swap,
819         [PERF_RECORD_EVENT_UPDATE]        = perf_event__event_update_swap,
820         [PERF_RECORD_TIME_CONV]           = perf_event__all64_swap,
821         [PERF_RECORD_HEADER_MAX]          = NULL,
822 };
823
824 /*
825  * When perf record finishes a pass on every buffers, it records this pseudo
826  * event.
827  * We record the max timestamp t found in the pass n.
828  * Assuming these timestamps are monotonic across cpus, we know that if
829  * a buffer still has events with timestamps below t, they will be all
830  * available and then read in the pass n + 1.
831  * Hence when we start to read the pass n + 2, we can safely flush every
832  * events with timestamps below t.
833  *
834  *    ============ PASS n =================
835  *       CPU 0         |   CPU 1
836  *                     |
837  *    cnt1 timestamps  |   cnt2 timestamps
838  *          1          |         2
839  *          2          |         3
840  *          -          |         4  <--- max recorded
841  *
842  *    ============ PASS n + 1 ==============
843  *       CPU 0         |   CPU 1
844  *                     |
845  *    cnt1 timestamps  |   cnt2 timestamps
846  *          3          |         5
847  *          4          |         6
848  *          5          |         7 <---- max recorded
849  *
850  *      Flush every events below timestamp 4
851  *
852  *    ============ PASS n + 2 ==============
853  *       CPU 0         |   CPU 1
854  *                     |
855  *    cnt1 timestamps  |   cnt2 timestamps
856  *          6          |         8
857  *          7          |         9
858  *          -          |         10
859  *
860  *      Flush every events below timestamp 7
861  *      etc...
862  */
863 static int process_finished_round(struct perf_tool *tool __maybe_unused,
864                                   union perf_event *event __maybe_unused,
865                                   struct ordered_events *oe)
866 {
867         if (dump_trace)
868                 fprintf(stdout, "\n");
869         return ordered_events__flush(oe, OE_FLUSH__ROUND);
870 }
871
872 int perf_session__queue_event(struct perf_session *s, union perf_event *event,
873                               struct perf_sample *sample, u64 file_offset)
874 {
875         return ordered_events__queue(&s->ordered_events, event, sample, file_offset);
876 }
877
878 static void callchain__lbr_callstack_printf(struct perf_sample *sample)
879 {
880         struct ip_callchain *callchain = sample->callchain;
881         struct branch_stack *lbr_stack = sample->branch_stack;
882         u64 kernel_callchain_nr = callchain->nr;
883         unsigned int i;
884
885         for (i = 0; i < kernel_callchain_nr; i++) {
886                 if (callchain->ips[i] == PERF_CONTEXT_USER)
887                         break;
888         }
889
890         if ((i != kernel_callchain_nr) && lbr_stack->nr) {
891                 u64 total_nr;
892                 /*
893                  * LBR callstack can only get user call chain,
894                  * i is kernel call chain number,
895                  * 1 is PERF_CONTEXT_USER.
896                  *
897                  * The user call chain is stored in LBR registers.
898                  * LBR are pair registers. The caller is stored
899                  * in "from" register, while the callee is stored
900                  * in "to" register.
901                  * For example, there is a call stack
902                  * "A"->"B"->"C"->"D".
903                  * The LBR registers will recorde like
904                  * "C"->"D", "B"->"C", "A"->"B".
905                  * So only the first "to" register and all "from"
906                  * registers are needed to construct the whole stack.
907                  */
908                 total_nr = i + 1 + lbr_stack->nr + 1;
909                 kernel_callchain_nr = i + 1;
910
911                 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
912
913                 for (i = 0; i < kernel_callchain_nr; i++)
914                         printf("..... %2d: %016" PRIx64 "\n",
915                                i, callchain->ips[i]);
916
917                 printf("..... %2d: %016" PRIx64 "\n",
918                        (int)(kernel_callchain_nr), lbr_stack->entries[0].to);
919                 for (i = 0; i < lbr_stack->nr; i++)
920                         printf("..... %2d: %016" PRIx64 "\n",
921                                (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from);
922         }
923 }
924
925 static void callchain__printf(struct perf_evsel *evsel,
926                               struct perf_sample *sample)
927 {
928         unsigned int i;
929         struct ip_callchain *callchain = sample->callchain;
930
931         if (perf_evsel__has_branch_callstack(evsel))
932                 callchain__lbr_callstack_printf(sample);
933
934         printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
935
936         for (i = 0; i < callchain->nr; i++)
937                 printf("..... %2d: %016" PRIx64 "\n",
938                        i, callchain->ips[i]);
939 }
940
941 static void branch_stack__printf(struct perf_sample *sample)
942 {
943         uint64_t i;
944
945         printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);
946
947         for (i = 0; i < sample->branch_stack->nr; i++) {
948                 struct branch_entry *e = &sample->branch_stack->entries[i];
949
950                 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
951                         i, e->from, e->to,
952                         (unsigned short)e->flags.cycles,
953                         e->flags.mispred ? "M" : " ",
954                         e->flags.predicted ? "P" : " ",
955                         e->flags.abort ? "A" : " ",
956                         e->flags.in_tx ? "T" : " ",
957                         (unsigned)e->flags.reserved);
958         }
959 }
960
961 static void regs_dump__printf(u64 mask, u64 *regs)
962 {
963         unsigned rid, i = 0;
964
965         for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
966                 u64 val = regs[i++];
967
968                 printf(".... %-5s 0x%" PRIx64 "\n",
969                        perf_reg_name(rid), val);
970         }
971 }
972
973 static const char *regs_abi[] = {
974         [PERF_SAMPLE_REGS_ABI_NONE] = "none",
975         [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
976         [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
977 };
978
979 static inline const char *regs_dump_abi(struct regs_dump *d)
980 {
981         if (d->abi > PERF_SAMPLE_REGS_ABI_64)
982                 return "unknown";
983
984         return regs_abi[d->abi];
985 }
986
987 static void regs__printf(const char *type, struct regs_dump *regs)
988 {
989         u64 mask = regs->mask;
990
991         printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
992                type,
993                mask,
994                regs_dump_abi(regs));
995
996         regs_dump__printf(mask, regs->regs);
997 }
998
999 static void regs_user__printf(struct perf_sample *sample)
1000 {
1001         struct regs_dump *user_regs = &sample->user_regs;
1002
1003         if (user_regs->regs)
1004                 regs__printf("user", user_regs);
1005 }
1006
1007 static void regs_intr__printf(struct perf_sample *sample)
1008 {
1009         struct regs_dump *intr_regs = &sample->intr_regs;
1010
1011         if (intr_regs->regs)
1012                 regs__printf("intr", intr_regs);
1013 }
1014
1015 static void stack_user__printf(struct stack_dump *dump)
1016 {
1017         printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1018                dump->size, dump->offset);
1019 }
1020
1021 static void perf_evlist__print_tstamp(struct perf_evlist *evlist,
1022                                        union perf_event *event,
1023                                        struct perf_sample *sample)
1024 {
1025         u64 sample_type = __perf_evlist__combined_sample_type(evlist);
1026
1027         if (event->header.type != PERF_RECORD_SAMPLE &&
1028             !perf_evlist__sample_id_all(evlist)) {
1029                 fputs("-1 -1 ", stdout);
1030                 return;
1031         }
1032
1033         if ((sample_type & PERF_SAMPLE_CPU))
1034                 printf("%u ", sample->cpu);
1035
1036         if (sample_type & PERF_SAMPLE_TIME)
1037                 printf("%" PRIu64 " ", sample->time);
1038 }
1039
1040 static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1041 {
1042         printf("... sample_read:\n");
1043
1044         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1045                 printf("...... time enabled %016" PRIx64 "\n",
1046                        sample->read.time_enabled);
1047
1048         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1049                 printf("...... time running %016" PRIx64 "\n",
1050                        sample->read.time_running);
1051
1052         if (read_format & PERF_FORMAT_GROUP) {
1053                 u64 i;
1054
1055                 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1056
1057                 for (i = 0; i < sample->read.group.nr; i++) {
1058                         struct sample_read_value *value;
1059
1060                         value = &sample->read.group.values[i];
1061                         printf("..... id %016" PRIx64
1062                                ", value %016" PRIx64 "\n",
1063                                value->id, value->value);
1064                 }
1065         } else
1066                 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
1067                         sample->read.one.id, sample->read.one.value);
1068 }
1069
1070 static void dump_event(struct perf_evlist *evlist, union perf_event *event,
1071                        u64 file_offset, struct perf_sample *sample)
1072 {
1073         if (!dump_trace)
1074                 return;
1075
1076         printf("\n%#" PRIx64 " [%#x]: event: %d\n",
1077                file_offset, event->header.size, event->header.type);
1078
1079         trace_event(event);
1080
1081         if (sample)
1082                 perf_evlist__print_tstamp(evlist, event, sample);
1083
1084         printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1085                event->header.size, perf_event__name(event->header.type));
1086 }
1087
1088 static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
1089                         struct perf_sample *sample)
1090 {
1091         u64 sample_type;
1092
1093         if (!dump_trace)
1094                 return;
1095
1096         printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1097                event->header.misc, sample->pid, sample->tid, sample->ip,
1098                sample->period, sample->addr);
1099
1100         sample_type = evsel->attr.sample_type;
1101
1102         if (sample_type & PERF_SAMPLE_CALLCHAIN)
1103                 callchain__printf(evsel, sample);
1104
1105         if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !perf_evsel__has_branch_callstack(evsel))
1106                 branch_stack__printf(sample);
1107
1108         if (sample_type & PERF_SAMPLE_REGS_USER)
1109                 regs_user__printf(sample);
1110
1111         if (sample_type & PERF_SAMPLE_REGS_INTR)
1112                 regs_intr__printf(sample);
1113
1114         if (sample_type & PERF_SAMPLE_STACK_USER)
1115                 stack_user__printf(&sample->user_stack);
1116
1117         if (sample_type & PERF_SAMPLE_WEIGHT)
1118                 printf("... weight: %" PRIu64 "\n", sample->weight);
1119
1120         if (sample_type & PERF_SAMPLE_DATA_SRC)
1121                 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1122
1123         if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1124                 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1125
1126         if (sample_type & PERF_SAMPLE_TRANSACTION)
1127                 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1128
1129         if (sample_type & PERF_SAMPLE_READ)
1130                 sample_read__printf(sample, evsel->attr.read_format);
1131 }
1132
1133 static void dump_read(struct perf_evsel *evsel, union perf_event *event)
1134 {
1135         struct read_event *read_event = &event->read;
1136         u64 read_format;
1137
1138         if (!dump_trace)
1139                 return;
1140
1141         printf(": %d %d %s %" PRIu64 "\n", event->read.pid, event->read.tid,
1142                evsel ? perf_evsel__name(evsel) : "FAIL",
1143                event->read.value);
1144
1145         read_format = evsel->attr.read_format;
1146
1147         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1148                 printf("... time enabled : %" PRIu64 "\n", read_event->time_enabled);
1149
1150         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1151                 printf("... time running : %" PRIu64 "\n", read_event->time_running);
1152
1153         if (read_format & PERF_FORMAT_ID)
1154                 printf("... id           : %" PRIu64 "\n", read_event->id);
1155 }
1156
1157 static struct machine *machines__find_for_cpumode(struct machines *machines,
1158                                                union perf_event *event,
1159                                                struct perf_sample *sample)
1160 {
1161         struct machine *machine;
1162
1163         if (perf_guest &&
1164             ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1165              (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1166                 u32 pid;
1167
1168                 if (event->header.type == PERF_RECORD_MMAP
1169                     || event->header.type == PERF_RECORD_MMAP2)
1170                         pid = event->mmap.pid;
1171                 else
1172                         pid = sample->pid;
1173
1174                 machine = machines__find(machines, pid);
1175                 if (!machine)
1176                         machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
1177                 return machine;
1178         }
1179
1180         return &machines->host;
1181 }
1182
1183 static int deliver_sample_value(struct perf_evlist *evlist,
1184                                 struct perf_tool *tool,
1185                                 union perf_event *event,
1186                                 struct perf_sample *sample,
1187                                 struct sample_read_value *v,
1188                                 struct machine *machine)
1189 {
1190         struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
1191
1192         if (sid) {
1193                 sample->id     = v->id;
1194                 sample->period = v->value - sid->period;
1195                 sid->period    = v->value;
1196         }
1197
1198         if (!sid || sid->evsel == NULL) {
1199                 ++evlist->stats.nr_unknown_id;
1200                 return 0;
1201         }
1202
1203         return tool->sample(tool, event, sample, sid->evsel, machine);
1204 }
1205
1206 static int deliver_sample_group(struct perf_evlist *evlist,
1207                                 struct perf_tool *tool,
1208                                 union  perf_event *event,
1209                                 struct perf_sample *sample,
1210                                 struct machine *machine)
1211 {
1212         int ret = -EINVAL;
1213         u64 i;
1214
1215         for (i = 0; i < sample->read.group.nr; i++) {
1216                 ret = deliver_sample_value(evlist, tool, event, sample,
1217                                            &sample->read.group.values[i],
1218                                            machine);
1219                 if (ret)
1220                         break;
1221         }
1222
1223         return ret;
1224 }
1225
1226 static int
1227  perf_evlist__deliver_sample(struct perf_evlist *evlist,
1228                              struct perf_tool *tool,
1229                              union  perf_event *event,
1230                              struct perf_sample *sample,
1231                              struct perf_evsel *evsel,
1232                              struct machine *machine)
1233 {
1234         /* We know evsel != NULL. */
1235         u64 sample_type = evsel->attr.sample_type;
1236         u64 read_format = evsel->attr.read_format;
1237
1238         /* Standard sample delivery. */
1239         if (!(sample_type & PERF_SAMPLE_READ))
1240                 return tool->sample(tool, event, sample, evsel, machine);
1241
1242         /* For PERF_SAMPLE_READ we have either single or group mode. */
1243         if (read_format & PERF_FORMAT_GROUP)
1244                 return deliver_sample_group(evlist, tool, event, sample,
1245                                             machine);
1246         else
1247                 return deliver_sample_value(evlist, tool, event, sample,
1248                                             &sample->read.one, machine);
1249 }
1250
1251 static int machines__deliver_event(struct machines *machines,
1252                                    struct perf_evlist *evlist,
1253                                    union perf_event *event,
1254                                    struct perf_sample *sample,
1255                                    struct perf_tool *tool, u64 file_offset)
1256 {
1257         struct perf_evsel *evsel;
1258         struct machine *machine;
1259
1260         dump_event(evlist, event, file_offset, sample);
1261
1262         evsel = perf_evlist__id2evsel(evlist, sample->id);
1263
1264         machine = machines__find_for_cpumode(machines, event, sample);
1265
1266         switch (event->header.type) {
1267         case PERF_RECORD_SAMPLE:
1268                 if (evsel == NULL) {
1269                         ++evlist->stats.nr_unknown_id;
1270                         return 0;
1271                 }
1272                 dump_sample(evsel, event, sample);
1273                 if (machine == NULL) {
1274                         ++evlist->stats.nr_unprocessable_samples;
1275                         return 0;
1276                 }
1277                 return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1278         case PERF_RECORD_MMAP:
1279                 return tool->mmap(tool, event, sample, machine);
1280         case PERF_RECORD_MMAP2:
1281                 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1282                         ++evlist->stats.nr_proc_map_timeout;
1283                 return tool->mmap2(tool, event, sample, machine);
1284         case PERF_RECORD_COMM:
1285                 return tool->comm(tool, event, sample, machine);
1286         case PERF_RECORD_NAMESPACES:
1287                 return tool->namespaces(tool, event, sample, machine);
1288         case PERF_RECORD_FORK:
1289                 return tool->fork(tool, event, sample, machine);
1290         case PERF_RECORD_EXIT:
1291                 return tool->exit(tool, event, sample, machine);
1292         case PERF_RECORD_LOST:
1293                 if (tool->lost == perf_event__process_lost)
1294                         evlist->stats.total_lost += event->lost.lost;
1295                 return tool->lost(tool, event, sample, machine);
1296         case PERF_RECORD_LOST_SAMPLES:
1297                 if (tool->lost_samples == perf_event__process_lost_samples)
1298                         evlist->stats.total_lost_samples += event->lost_samples.lost;
1299                 return tool->lost_samples(tool, event, sample, machine);
1300         case PERF_RECORD_READ:
1301                 dump_read(evsel, event);
1302                 return tool->read(tool, event, sample, evsel, machine);
1303         case PERF_RECORD_THROTTLE:
1304                 return tool->throttle(tool, event, sample, machine);
1305         case PERF_RECORD_UNTHROTTLE:
1306                 return tool->unthrottle(tool, event, sample, machine);
1307         case PERF_RECORD_AUX:
1308                 if (tool->aux == perf_event__process_aux) {
1309                         if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1310                                 evlist->stats.total_aux_lost += 1;
1311                         if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1312                                 evlist->stats.total_aux_partial += 1;
1313                 }
1314                 return tool->aux(tool, event, sample, machine);
1315         case PERF_RECORD_ITRACE_START:
1316                 return tool->itrace_start(tool, event, sample, machine);
1317         case PERF_RECORD_SWITCH:
1318         case PERF_RECORD_SWITCH_CPU_WIDE:
1319                 return tool->context_switch(tool, event, sample, machine);
1320         default:
1321                 ++evlist->stats.nr_unknown_events;
1322                 return -1;
1323         }
1324 }
1325
1326 static int perf_session__deliver_event(struct perf_session *session,
1327                                        union perf_event *event,
1328                                        struct perf_sample *sample,
1329                                        struct perf_tool *tool,
1330                                        u64 file_offset)
1331 {
1332         int ret;
1333
1334         ret = auxtrace__process_event(session, event, sample, tool);
1335         if (ret < 0)
1336                 return ret;
1337         if (ret > 0)
1338                 return 0;
1339
1340         return machines__deliver_event(&session->machines, session->evlist,
1341                                        event, sample, tool, file_offset);
1342 }
1343
1344 static s64 perf_session__process_user_event(struct perf_session *session,
1345                                             union perf_event *event,
1346                                             u64 file_offset)
1347 {
1348         struct ordered_events *oe = &session->ordered_events;
1349         struct perf_tool *tool = session->tool;
1350         int fd = perf_data_file__fd(session->file);
1351         int err;
1352
1353         dump_event(session->evlist, event, file_offset, NULL);
1354
1355         /* These events are processed right away */
1356         switch (event->header.type) {
1357         case PERF_RECORD_HEADER_ATTR:
1358                 err = tool->attr(tool, event, &session->evlist);
1359                 if (err == 0) {
1360                         perf_session__set_id_hdr_size(session);
1361                         perf_session__set_comm_exec(session);
1362                 }
1363                 return err;
1364         case PERF_RECORD_EVENT_UPDATE:
1365                 return tool->event_update(tool, event, &session->evlist);
1366         case PERF_RECORD_HEADER_EVENT_TYPE:
1367                 /*
1368                  * Depreceated, but we need to handle it for sake
1369                  * of old data files create in pipe mode.
1370                  */
1371                 return 0;
1372         case PERF_RECORD_HEADER_TRACING_DATA:
1373                 /* setup for reading amidst mmap */
1374                 lseek(fd, file_offset, SEEK_SET);
1375                 return tool->tracing_data(tool, event, session);
1376         case PERF_RECORD_HEADER_BUILD_ID:
1377                 return tool->build_id(tool, event, session);
1378         case PERF_RECORD_FINISHED_ROUND:
1379                 return tool->finished_round(tool, event, oe);
1380         case PERF_RECORD_ID_INDEX:
1381                 return tool->id_index(tool, event, session);
1382         case PERF_RECORD_AUXTRACE_INFO:
1383                 return tool->auxtrace_info(tool, event, session);
1384         case PERF_RECORD_AUXTRACE:
1385                 /* setup for reading amidst mmap */
1386                 lseek(fd, file_offset + event->header.size, SEEK_SET);
1387                 return tool->auxtrace(tool, event, session);
1388         case PERF_RECORD_AUXTRACE_ERROR:
1389                 perf_session__auxtrace_error_inc(session, event);
1390                 return tool->auxtrace_error(tool, event, session);
1391         case PERF_RECORD_THREAD_MAP:
1392                 return tool->thread_map(tool, event, session);
1393         case PERF_RECORD_CPU_MAP:
1394                 return tool->cpu_map(tool, event, session);
1395         case PERF_RECORD_STAT_CONFIG:
1396                 return tool->stat_config(tool, event, session);
1397         case PERF_RECORD_STAT:
1398                 return tool->stat(tool, event, session);
1399         case PERF_RECORD_STAT_ROUND:
1400                 return tool->stat_round(tool, event, session);
1401         case PERF_RECORD_TIME_CONV:
1402                 session->time_conv = event->time_conv;
1403                 return tool->time_conv(tool, event, session);
1404         case PERF_RECORD_HEADER_FEATURE:
1405                 return tool->feature(tool, event, session);
1406         default:
1407                 return -EINVAL;
1408         }
1409 }
1410
1411 int perf_session__deliver_synth_event(struct perf_session *session,
1412                                       union perf_event *event,
1413                                       struct perf_sample *sample)
1414 {
1415         struct perf_evlist *evlist = session->evlist;
1416         struct perf_tool *tool = session->tool;
1417
1418         events_stats__inc(&evlist->stats, event->header.type);
1419
1420         if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1421                 return perf_session__process_user_event(session, event, 0);
1422
1423         return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
1424 }
1425
1426 static void event_swap(union perf_event *event, bool sample_id_all)
1427 {
1428         perf_event__swap_op swap;
1429
1430         swap = perf_event__swap_ops[event->header.type];
1431         if (swap)
1432                 swap(event, sample_id_all);
1433 }
1434
1435 int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1436                              void *buf, size_t buf_sz,
1437                              union perf_event **event_ptr,
1438                              struct perf_sample *sample)
1439 {
1440         union perf_event *event;
1441         size_t hdr_sz, rest;
1442         int fd;
1443
1444         if (session->one_mmap && !session->header.needs_swap) {
1445                 event = file_offset - session->one_mmap_offset +
1446                         session->one_mmap_addr;
1447                 goto out_parse_sample;
1448         }
1449
1450         if (perf_data_file__is_pipe(session->file))
1451                 return -1;
1452
1453         fd = perf_data_file__fd(session->file);
1454         hdr_sz = sizeof(struct perf_event_header);
1455
1456         if (buf_sz < hdr_sz)
1457                 return -1;
1458
1459         if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1460             readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1461                 return -1;
1462
1463         event = (union perf_event *)buf;
1464
1465         if (session->header.needs_swap)
1466                 perf_event_header__bswap(&event->header);
1467
1468         if (event->header.size < hdr_sz || event->header.size > buf_sz)
1469                 return -1;
1470
1471         rest = event->header.size - hdr_sz;
1472
1473         if (readn(fd, buf, rest) != (ssize_t)rest)
1474                 return -1;
1475
1476         if (session->header.needs_swap)
1477                 event_swap(event, perf_evlist__sample_id_all(session->evlist));
1478
1479 out_parse_sample:
1480
1481         if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1482             perf_evlist__parse_sample(session->evlist, event, sample))
1483                 return -1;
1484
1485         *event_ptr = event;
1486
1487         return 0;
1488 }
1489
1490 static s64 perf_session__process_event(struct perf_session *session,
1491                                        union perf_event *event, u64 file_offset)
1492 {
1493         struct perf_evlist *evlist = session->evlist;
1494         struct perf_tool *tool = session->tool;
1495         struct perf_sample sample;
1496         int ret;
1497
1498         if (session->header.needs_swap)
1499                 event_swap(event, perf_evlist__sample_id_all(evlist));
1500
1501         if (event->header.type >= PERF_RECORD_HEADER_MAX)
1502                 return -EINVAL;
1503
1504         events_stats__inc(&evlist->stats, event->header.type);
1505
1506         if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1507                 return perf_session__process_user_event(session, event, file_offset);
1508
1509         /*
1510          * For all kernel events we get the sample data
1511          */
1512         ret = perf_evlist__parse_sample(evlist, event, &sample);
1513         if (ret)
1514                 return ret;
1515
1516         if (tool->ordered_events) {
1517                 ret = perf_session__queue_event(session, event, &sample, file_offset);
1518                 if (ret != -ETIME)
1519                         return ret;
1520         }
1521
1522         return perf_session__deliver_event(session, event, &sample, tool,
1523                                            file_offset);
1524 }
1525
1526 void perf_event_header__bswap(struct perf_event_header *hdr)
1527 {
1528         hdr->type = bswap_32(hdr->type);
1529         hdr->misc = bswap_16(hdr->misc);
1530         hdr->size = bswap_16(hdr->size);
1531 }
1532
1533 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1534 {
1535         return machine__findnew_thread(&session->machines.host, -1, pid);
1536 }
1537
1538 int perf_session__register_idle_thread(struct perf_session *session)
1539 {
1540         struct thread *thread;
1541         int err = 0;
1542
1543         thread = machine__findnew_thread(&session->machines.host, 0, 0);
1544         if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
1545                 pr_err("problem inserting idle task.\n");
1546                 err = -1;
1547         }
1548
1549         if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) {
1550                 pr_err("problem inserting idle task.\n");
1551                 err = -1;
1552         }
1553
1554         /* machine__findnew_thread() got the thread, so put it */
1555         thread__put(thread);
1556         return err;
1557 }
1558
1559 static void
1560 perf_session__warn_order(const struct perf_session *session)
1561 {
1562         const struct ordered_events *oe = &session->ordered_events;
1563         struct perf_evsel *evsel;
1564         bool should_warn = true;
1565
1566         evlist__for_each_entry(session->evlist, evsel) {
1567                 if (evsel->attr.write_backward)
1568                         should_warn = false;
1569         }
1570
1571         if (!should_warn)
1572                 return;
1573         if (oe->nr_unordered_events != 0)
1574                 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1575 }
1576
1577 static void perf_session__warn_about_errors(const struct perf_session *session)
1578 {
1579         const struct events_stats *stats = &session->evlist->stats;
1580
1581         if (session->tool->lost == perf_event__process_lost &&
1582             stats->nr_events[PERF_RECORD_LOST] != 0) {
1583                 ui__warning("Processed %d events and lost %d chunks!\n\n"
1584                             "Check IO/CPU overload!\n\n",
1585                             stats->nr_events[0],
1586                             stats->nr_events[PERF_RECORD_LOST]);
1587         }
1588
1589         if (session->tool->lost_samples == perf_event__process_lost_samples) {
1590                 double drop_rate;
1591
1592                 drop_rate = (double)stats->total_lost_samples /
1593                             (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1594                 if (drop_rate > 0.05) {
1595                         ui__warning("Processed %" PRIu64 " samples and lost %3.2f%% samples!\n\n",
1596                                     stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1597                                     drop_rate * 100.0);
1598                 }
1599         }
1600
1601         if (session->tool->aux == perf_event__process_aux &&
1602             stats->total_aux_lost != 0) {
1603                 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1604                             stats->total_aux_lost,
1605                             stats->nr_events[PERF_RECORD_AUX]);
1606         }
1607
1608         if (session->tool->aux == perf_event__process_aux &&
1609             stats->total_aux_partial != 0) {
1610                 bool vmm_exclusive = false;
1611
1612                 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1613                                        &vmm_exclusive);
1614
1615                 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1616                             "Are you running a KVM guest in the background?%s\n\n",
1617                             stats->total_aux_partial,
1618                             stats->nr_events[PERF_RECORD_AUX],
1619                             vmm_exclusive ?
1620                             "\nReloading kvm_intel module with vmm_exclusive=0\n"
1621                             "will reduce the gaps to only guest's timeslices." :
1622                             "");
1623         }
1624
1625         if (stats->nr_unknown_events != 0) {
1626                 ui__warning("Found %u unknown events!\n\n"
1627                             "Is this an older tool processing a perf.data "
1628                             "file generated by a more recent tool?\n\n"
1629                             "If that is not the case, consider "
1630                             "reporting to linux-kernel@vger.kernel.org.\n\n",
1631                             stats->nr_unknown_events);
1632         }
1633
1634         if (stats->nr_unknown_id != 0) {
1635                 ui__warning("%u samples with id not present in the header\n",
1636                             stats->nr_unknown_id);
1637         }
1638
1639         if (stats->nr_invalid_chains != 0) {
1640                 ui__warning("Found invalid callchains!\n\n"
1641                             "%u out of %u events were discarded for this reason.\n\n"
1642                             "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1643                             stats->nr_invalid_chains,
1644                             stats->nr_events[PERF_RECORD_SAMPLE]);
1645         }
1646
1647         if (stats->nr_unprocessable_samples != 0) {
1648                 ui__warning("%u unprocessable samples recorded.\n"
1649                             "Do you have a KVM guest running and not using 'perf kvm'?\n",
1650                             stats->nr_unprocessable_samples);
1651         }
1652
1653         perf_session__warn_order(session);
1654
1655         events_stats__auxtrace_error_warn(stats);
1656
1657         if (stats->nr_proc_map_timeout != 0) {
1658                 ui__warning("%d map information files for pre-existing threads were\n"
1659                             "not processed, if there are samples for addresses they\n"
1660                             "will not be resolved, you may find out which are these\n"
1661                             "threads by running with -v and redirecting the output\n"
1662                             "to a file.\n"
1663                             "The time limit to process proc map is too short?\n"
1664                             "Increase it by --proc-map-timeout\n",
1665                             stats->nr_proc_map_timeout);
1666         }
1667 }
1668
1669 static int perf_session__flush_thread_stack(struct thread *thread,
1670                                             void *p __maybe_unused)
1671 {
1672         return thread_stack__flush(thread);
1673 }
1674
1675 static int perf_session__flush_thread_stacks(struct perf_session *session)
1676 {
1677         return machines__for_each_thread(&session->machines,
1678                                          perf_session__flush_thread_stack,
1679                                          NULL);
1680 }
1681
1682 volatile int session_done;
1683
1684 static int __perf_session__process_pipe_events(struct perf_session *session)
1685 {
1686         struct ordered_events *oe = &session->ordered_events;
1687         struct perf_tool *tool = session->tool;
1688         int fd = perf_data_file__fd(session->file);
1689         union perf_event *event;
1690         uint32_t size, cur_size = 0;
1691         void *buf = NULL;
1692         s64 skip = 0;
1693         u64 head;
1694         ssize_t err;
1695         void *p;
1696
1697         perf_tool__fill_defaults(tool);
1698
1699         head = 0;
1700         cur_size = sizeof(union perf_event);
1701
1702         buf = malloc(cur_size);
1703         if (!buf)
1704                 return -errno;
1705         ordered_events__set_copy_on_queue(oe, true);
1706 more:
1707         event = buf;
1708         err = readn(fd, event, sizeof(struct perf_event_header));
1709         if (err <= 0) {
1710                 if (err == 0)
1711                         goto done;
1712
1713                 pr_err("failed to read event header\n");
1714                 goto out_err;
1715         }
1716
1717         if (session->header.needs_swap)
1718                 perf_event_header__bswap(&event->header);
1719
1720         size = event->header.size;
1721         if (size < sizeof(struct perf_event_header)) {
1722                 pr_err("bad event header size\n");
1723                 goto out_err;
1724         }
1725
1726         if (size > cur_size) {
1727                 void *new = realloc(buf, size);
1728                 if (!new) {
1729                         pr_err("failed to allocate memory to read event\n");
1730                         goto out_err;
1731                 }
1732                 buf = new;
1733                 cur_size = size;
1734                 event = buf;
1735         }
1736         p = event;
1737         p += sizeof(struct perf_event_header);
1738
1739         if (size - sizeof(struct perf_event_header)) {
1740                 err = readn(fd, p, size - sizeof(struct perf_event_header));
1741                 if (err <= 0) {
1742                         if (err == 0) {
1743                                 pr_err("unexpected end of event stream\n");
1744                                 goto done;
1745                         }
1746
1747                         pr_err("failed to read event data\n");
1748                         goto out_err;
1749                 }
1750         }
1751
1752         if ((skip = perf_session__process_event(session, event, head)) < 0) {
1753                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1754                        head, event->header.size, event->header.type);
1755                 err = -EINVAL;
1756                 goto out_err;
1757         }
1758
1759         head += size;
1760
1761         if (skip > 0)
1762                 head += skip;
1763
1764         if (!session_done())
1765                 goto more;
1766 done:
1767         /* do the final flush for ordered samples */
1768         err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1769         if (err)
1770                 goto out_err;
1771         err = auxtrace__flush_events(session, tool);
1772         if (err)
1773                 goto out_err;
1774         err = perf_session__flush_thread_stacks(session);
1775 out_err:
1776         free(buf);
1777         perf_session__warn_about_errors(session);
1778         ordered_events__free(&session->ordered_events);
1779         auxtrace__free_events(session);
1780         return err;
1781 }
1782
1783 static union perf_event *
1784 fetch_mmaped_event(struct perf_session *session,
1785                    u64 head, size_t mmap_size, char *buf)
1786 {
1787         union perf_event *event;
1788
1789         /*
1790          * Ensure we have enough space remaining to read
1791          * the size of the event in the headers.
1792          */
1793         if (head + sizeof(event->header) > mmap_size)
1794                 return NULL;
1795
1796         event = (union perf_event *)(buf + head);
1797
1798         if (session->header.needs_swap)
1799                 perf_event_header__bswap(&event->header);
1800
1801         if (head + event->header.size > mmap_size) {
1802                 /* We're not fetching the event so swap back again */
1803                 if (session->header.needs_swap)
1804                         perf_event_header__bswap(&event->header);
1805                 return NULL;
1806         }
1807
1808         return event;
1809 }
1810
1811 /*
1812  * On 64bit we can mmap the data file in one go. No need for tiny mmap
1813  * slices. On 32bit we use 32MB.
1814  */
1815 #if BITS_PER_LONG == 64
1816 #define MMAP_SIZE ULLONG_MAX
1817 #define NUM_MMAPS 1
1818 #else
1819 #define MMAP_SIZE (32 * 1024 * 1024ULL)
1820 #define NUM_MMAPS 128
1821 #endif
1822
1823 static int __perf_session__process_events(struct perf_session *session,
1824                                           u64 data_offset, u64 data_size,
1825                                           u64 file_size)
1826 {
1827         struct ordered_events *oe = &session->ordered_events;
1828         struct perf_tool *tool = session->tool;
1829         int fd = perf_data_file__fd(session->file);
1830         u64 head, page_offset, file_offset, file_pos, size;
1831         int err, mmap_prot, mmap_flags, map_idx = 0;
1832         size_t  mmap_size;
1833         char *buf, *mmaps[NUM_MMAPS];
1834         union perf_event *event;
1835         struct ui_progress prog;
1836         s64 skip;
1837
1838         perf_tool__fill_defaults(tool);
1839
1840         page_offset = page_size * (data_offset / page_size);
1841         file_offset = page_offset;
1842         head = data_offset - page_offset;
1843
1844         if (data_size == 0)
1845                 goto out;
1846
1847         if (data_offset + data_size < file_size)
1848                 file_size = data_offset + data_size;
1849
1850         ui_progress__init(&prog, file_size, "Processing events...");
1851
1852         mmap_size = MMAP_SIZE;
1853         if (mmap_size > file_size) {
1854                 mmap_size = file_size;
1855                 session->one_mmap = true;
1856         }
1857
1858         memset(mmaps, 0, sizeof(mmaps));
1859
1860         mmap_prot  = PROT_READ;
1861         mmap_flags = MAP_SHARED;
1862
1863         if (session->header.needs_swap) {
1864                 mmap_prot  |= PROT_WRITE;
1865                 mmap_flags = MAP_PRIVATE;
1866         }
1867 remap:
1868         buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, fd,
1869                    file_offset);
1870         if (buf == MAP_FAILED) {
1871                 pr_err("failed to mmap file\n");
1872                 err = -errno;
1873                 goto out_err;
1874         }
1875         mmaps[map_idx] = buf;
1876         map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1877         file_pos = file_offset + head;
1878         if (session->one_mmap) {
1879                 session->one_mmap_addr = buf;
1880                 session->one_mmap_offset = file_offset;
1881         }
1882
1883 more:
1884         event = fetch_mmaped_event(session, head, mmap_size, buf);
1885         if (!event) {
1886                 if (mmaps[map_idx]) {
1887                         munmap(mmaps[map_idx], mmap_size);
1888                         mmaps[map_idx] = NULL;
1889                 }
1890
1891                 page_offset = page_size * (head / page_size);
1892                 file_offset += page_offset;
1893                 head -= page_offset;
1894                 goto remap;
1895         }
1896
1897         size = event->header.size;
1898
1899         if (size < sizeof(struct perf_event_header) ||
1900             (skip = perf_session__process_event(session, event, file_pos)) < 0) {
1901                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1902                        file_offset + head, event->header.size,
1903                        event->header.type);
1904                 err = -EINVAL;
1905                 goto out_err;
1906         }
1907
1908         if (skip)
1909                 size += skip;
1910
1911         head += size;
1912         file_pos += size;
1913
1914         ui_progress__update(&prog, size);
1915
1916         if (session_done())
1917                 goto out;
1918
1919         if (file_pos < file_size)
1920                 goto more;
1921
1922 out:
1923         /* do the final flush for ordered samples */
1924         err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1925         if (err)
1926                 goto out_err;
1927         err = auxtrace__flush_events(session, tool);
1928         if (err)
1929                 goto out_err;
1930         err = perf_session__flush_thread_stacks(session);
1931 out_err:
1932         ui_progress__finish();
1933         perf_session__warn_about_errors(session);
1934         /*
1935          * We may switching perf.data output, make ordered_events
1936          * reusable.
1937          */
1938         ordered_events__reinit(&session->ordered_events);
1939         auxtrace__free_events(session);
1940         session->one_mmap = false;
1941         return err;
1942 }
1943
1944 int perf_session__process_events(struct perf_session *session)
1945 {
1946         u64 size = perf_data_file__size(session->file);
1947         int err;
1948
1949         if (perf_session__register_idle_thread(session) < 0)
1950                 return -ENOMEM;
1951
1952         if (!perf_data_file__is_pipe(session->file))
1953                 err = __perf_session__process_events(session,
1954                                                      session->header.data_offset,
1955                                                      session->header.data_size, size);
1956         else
1957                 err = __perf_session__process_pipe_events(session);
1958
1959         return err;
1960 }
1961
1962 bool perf_session__has_traces(struct perf_session *session, const char *msg)
1963 {
1964         struct perf_evsel *evsel;
1965
1966         evlist__for_each_entry(session->evlist, evsel) {
1967                 if (evsel->attr.type == PERF_TYPE_TRACEPOINT)
1968                         return true;
1969         }
1970
1971         pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
1972         return false;
1973 }
1974
1975 int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
1976                                      const char *symbol_name, u64 addr)
1977 {
1978         char *bracket;
1979         int i;
1980         struct ref_reloc_sym *ref;
1981
1982         ref = zalloc(sizeof(struct ref_reloc_sym));
1983         if (ref == NULL)
1984                 return -ENOMEM;
1985
1986         ref->name = strdup(symbol_name);
1987         if (ref->name == NULL) {
1988                 free(ref);
1989                 return -ENOMEM;
1990         }
1991
1992         bracket = strchr(ref->name, ']');
1993         if (bracket)
1994                 *bracket = '\0';
1995
1996         ref->addr = addr;
1997
1998         for (i = 0; i < MAP__NR_TYPES; ++i) {
1999                 struct kmap *kmap = map__kmap(maps[i]);
2000
2001                 if (!kmap)
2002                         continue;
2003                 kmap->ref_reloc_sym = ref;
2004         }
2005
2006         return 0;
2007 }
2008
2009 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
2010 {
2011         return machines__fprintf_dsos(&session->machines, fp);
2012 }
2013
2014 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
2015                                           bool (skip)(struct dso *dso, int parm), int parm)
2016 {
2017         return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
2018 }
2019
2020 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
2021 {
2022         size_t ret;
2023         const char *msg = "";
2024
2025         if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2026                 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2027
2028         ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
2029
2030         ret += events_stats__fprintf(&session->evlist->stats, fp);
2031         return ret;
2032 }
2033
2034 size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2035 {
2036         /*
2037          * FIXME: Here we have to actually print all the machines in this
2038          * session, not just the host...
2039          */
2040         return machine__fprintf(&session->machines.host, fp);
2041 }
2042
2043 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
2044                                               unsigned int type)
2045 {
2046         struct perf_evsel *pos;
2047
2048         evlist__for_each_entry(session->evlist, pos) {
2049                 if (pos->attr.type == type)
2050                         return pos;
2051         }
2052         return NULL;
2053 }
2054
2055 int perf_session__cpu_bitmap(struct perf_session *session,
2056                              const char *cpu_list, unsigned long *cpu_bitmap)
2057 {
2058         int i, err = -1;
2059         struct cpu_map *map;
2060
2061         for (i = 0; i < PERF_TYPE_MAX; ++i) {
2062                 struct perf_evsel *evsel;
2063
2064                 evsel = perf_session__find_first_evtype(session, i);
2065                 if (!evsel)
2066                         continue;
2067
2068                 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
2069                         pr_err("File does not contain CPU events. "
2070                                "Remove -C option to proceed.\n");
2071                         return -1;
2072                 }
2073         }
2074
2075         map = cpu_map__new(cpu_list);
2076         if (map == NULL) {
2077                 pr_err("Invalid cpu_list\n");
2078                 return -1;
2079         }
2080
2081         for (i = 0; i < map->nr; i++) {
2082                 int cpu = map->map[i];
2083
2084                 if (cpu >= MAX_NR_CPUS) {
2085                         pr_err("Requested CPU %d too large. "
2086                                "Consider raising MAX_NR_CPUS\n", cpu);
2087                         goto out_delete_map;
2088                 }
2089
2090                 set_bit(cpu, cpu_bitmap);
2091         }
2092
2093         err = 0;
2094
2095 out_delete_map:
2096         cpu_map__put(map);
2097         return err;
2098 }
2099
2100 void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2101                                 bool full)
2102 {
2103         if (session == NULL || fp == NULL)
2104                 return;
2105
2106         fprintf(fp, "# ========\n");
2107         perf_header__fprintf_info(session, fp, full);
2108         fprintf(fp, "# ========\n#\n");
2109 }
2110
2111
2112 int __perf_session__set_tracepoints_handlers(struct perf_session *session,
2113                                              const struct perf_evsel_str_handler *assocs,
2114                                              size_t nr_assocs)
2115 {
2116         struct perf_evsel *evsel;
2117         size_t i;
2118         int err;
2119
2120         for (i = 0; i < nr_assocs; i++) {
2121                 /*
2122                  * Adding a handler for an event not in the session,
2123                  * just ignore it.
2124                  */
2125                 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name);
2126                 if (evsel == NULL)
2127                         continue;
2128
2129                 err = -EEXIST;
2130                 if (evsel->handler != NULL)
2131                         goto out;
2132                 evsel->handler = assocs[i].handler;
2133         }
2134
2135         err = 0;
2136 out:
2137         return err;
2138 }
2139
2140 int perf_event__process_id_index(struct perf_tool *tool __maybe_unused,
2141                                  union perf_event *event,
2142                                  struct perf_session *session)
2143 {
2144         struct perf_evlist *evlist = session->evlist;
2145         struct id_index_event *ie = &event->id_index;
2146         size_t i, nr, max_nr;
2147
2148         max_nr = (ie->header.size - sizeof(struct id_index_event)) /
2149                  sizeof(struct id_index_entry);
2150         nr = ie->nr;
2151         if (nr > max_nr)
2152                 return -EINVAL;
2153
2154         if (dump_trace)
2155                 fprintf(stdout, " nr: %zu\n", nr);
2156
2157         for (i = 0; i < nr; i++) {
2158                 struct id_index_entry *e = &ie->entries[i];
2159                 struct perf_sample_id *sid;
2160
2161                 if (dump_trace) {
2162                         fprintf(stdout, " ... id: %"PRIu64, e->id);
2163                         fprintf(stdout, "  idx: %"PRIu64, e->idx);
2164                         fprintf(stdout, "  cpu: %"PRId64, e->cpu);
2165                         fprintf(stdout, "  tid: %"PRId64"\n", e->tid);
2166                 }
2167
2168                 sid = perf_evlist__id2sid(evlist, e->id);
2169                 if (!sid)
2170                         return -ENOENT;
2171                 sid->idx = e->idx;
2172                 sid->cpu = e->cpu;
2173                 sid->tid = e->tid;
2174         }
2175         return 0;
2176 }
2177
2178 int perf_event__synthesize_id_index(struct perf_tool *tool,
2179                                     perf_event__handler_t process,
2180                                     struct perf_evlist *evlist,
2181                                     struct machine *machine)
2182 {
2183         union perf_event *ev;
2184         struct perf_evsel *evsel;
2185         size_t nr = 0, i = 0, sz, max_nr, n;
2186         int err;
2187
2188         pr_debug2("Synthesizing id index\n");
2189
2190         max_nr = (UINT16_MAX - sizeof(struct id_index_event)) /
2191                  sizeof(struct id_index_entry);
2192
2193         evlist__for_each_entry(evlist, evsel)
2194                 nr += evsel->ids;
2195
2196         n = nr > max_nr ? max_nr : nr;
2197         sz = sizeof(struct id_index_event) + n * sizeof(struct id_index_entry);
2198         ev = zalloc(sz);
2199         if (!ev)
2200                 return -ENOMEM;
2201
2202         ev->id_index.header.type = PERF_RECORD_ID_INDEX;
2203         ev->id_index.header.size = sz;
2204         ev->id_index.nr = n;
2205
2206         evlist__for_each_entry(evlist, evsel) {
2207                 u32 j;
2208
2209                 for (j = 0; j < evsel->ids; j++) {
2210                         struct id_index_entry *e;
2211                         struct perf_sample_id *sid;
2212
2213                         if (i >= n) {
2214                                 err = process(tool, ev, NULL, machine);
2215                                 if (err)
2216                                         goto out_err;
2217                                 nr -= n;
2218                                 i = 0;
2219                         }
2220
2221                         e = &ev->id_index.entries[i++];
2222
2223                         e->id = evsel->id[j];
2224
2225                         sid = perf_evlist__id2sid(evlist, e->id);
2226                         if (!sid) {
2227                                 free(ev);
2228                                 return -ENOENT;
2229                         }
2230
2231                         e->idx = sid->idx;
2232                         e->cpu = sid->cpu;
2233                         e->tid = sid->tid;
2234                 }
2235         }
2236
2237         sz = sizeof(struct id_index_event) + nr * sizeof(struct id_index_entry);
2238         ev->id_index.header.size = sz;
2239         ev->id_index.nr = nr;
2240
2241         err = process(tool, ev, NULL, machine);
2242 out_err:
2243         free(ev);
2244
2245         return err;
2246 }