2 * intel_pt.c: Intel Processor Trace support
3 * Copyright (c) 2013-2015, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 #include <linux/kernel.h>
21 #include <linux/types.h>
36 #include "thread-stack.h"
38 #include "callchain.h"
46 #include "intel-pt-decoder/intel-pt-log.h"
47 #include "intel-pt-decoder/intel-pt-decoder.h"
48 #include "intel-pt-decoder/intel-pt-insn-decoder.h"
49 #include "intel-pt-decoder/intel-pt-pkt-decoder.h"
51 #define MAX_TIMESTAMP (~0ULL)
54 struct auxtrace auxtrace;
55 struct auxtrace_queues queues;
56 struct auxtrace_heap heap;
58 struct perf_session *session;
59 struct machine *machine;
60 struct perf_evsel *switch_evsel;
61 struct thread *unknown_thread;
62 bool timeless_decoding;
71 int have_sched_switch;
77 struct perf_tsc_conversion tc;
78 bool cap_user_time_zero;
80 struct itrace_synth_opts synth_opts;
82 bool sample_instructions;
83 u64 instructions_sample_type;
88 u64 branches_sample_type;
91 bool sample_transactions;
92 u64 transactions_sample_type;
96 u64 ptwrites_sample_type;
99 bool sample_pwr_events;
100 u64 pwr_events_sample_type;
114 unsigned max_non_turbo_ratio;
117 unsigned long num_events;
120 struct addr_filters filts;
124 INTEL_PT_SS_NOT_TRACING,
127 INTEL_PT_SS_EXPECTING_SWITCH_EVENT,
128 INTEL_PT_SS_EXPECTING_SWITCH_IP,
131 struct intel_pt_queue {
133 unsigned int queue_nr;
134 struct auxtrace_buffer *buffer;
135 struct auxtrace_buffer *old_buffer;
137 const struct intel_pt_state *state;
138 struct ip_callchain *chain;
139 struct branch_stack *last_branch;
140 struct branch_stack *last_branch_rb;
141 size_t last_branch_pos;
142 union perf_event *event_buf;
145 bool step_through_buffers;
146 bool use_buffer_pid_tid;
152 struct thread *thread;
160 char insn[INTEL_PT_INSN_BUF_SZ];
163 static void intel_pt_dump(struct intel_pt *pt __maybe_unused,
164 unsigned char *buf, size_t len)
166 struct intel_pt_pkt packet;
169 char desc[INTEL_PT_PKT_DESC_MAX];
170 const char *color = PERF_COLOR_BLUE;
172 color_fprintf(stdout, color,
173 ". ... Intel Processor Trace data: size %zu bytes\n",
177 ret = intel_pt_get_packet(buf, len, &packet);
183 color_fprintf(stdout, color, " %08x: ", pos);
184 for (i = 0; i < pkt_len; i++)
185 color_fprintf(stdout, color, " %02x", buf[i]);
187 color_fprintf(stdout, color, " ");
189 ret = intel_pt_pkt_desc(&packet, desc,
190 INTEL_PT_PKT_DESC_MAX);
192 color_fprintf(stdout, color, " %s\n", desc);
194 color_fprintf(stdout, color, " Bad packet!\n");
202 static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
206 intel_pt_dump(pt, buf, len);
209 static void intel_pt_log_event(union perf_event *event)
211 FILE *f = intel_pt_log_fp();
213 if (!intel_pt_enable_logging || !f)
216 perf_event__fprintf(event, f);
219 static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
220 struct auxtrace_buffer *b)
222 bool consecutive = false;
225 start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
226 pt->have_tsc, &consecutive);
229 b->use_size = b->data + b->size - start;
231 if (b->use_size && consecutive)
232 b->consecutive = true;
236 /* This function assumes data is processed sequentially only */
237 static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
239 struct intel_pt_queue *ptq = data;
240 struct auxtrace_buffer *buffer = ptq->buffer;
241 struct auxtrace_buffer *old_buffer = ptq->old_buffer;
242 struct auxtrace_queue *queue;
250 queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
252 buffer = auxtrace_buffer__next(queue, buffer);
255 auxtrace_buffer__drop_data(old_buffer);
260 ptq->buffer = buffer;
263 int fd = perf_data__fd(ptq->pt->session->data);
265 buffer->data = auxtrace_buffer__get_data(buffer, fd);
270 might_overlap = ptq->pt->snapshot_mode || ptq->pt->sampling_mode;
271 if (might_overlap && !buffer->consecutive && old_buffer &&
272 intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer))
275 if (buffer->use_data) {
276 b->len = buffer->use_size;
277 b->buf = buffer->use_data;
279 b->len = buffer->size;
280 b->buf = buffer->data;
282 b->ref_timestamp = buffer->reference;
284 if (!old_buffer || (might_overlap && !buffer->consecutive)) {
285 b->consecutive = false;
286 b->trace_nr = buffer->buffer_nr + 1;
288 b->consecutive = true;
291 if (ptq->step_through_buffers)
296 auxtrace_buffer__drop_data(old_buffer);
297 ptq->old_buffer = buffer;
299 auxtrace_buffer__drop_data(buffer);
300 return intel_pt_get_trace(b, data);
306 struct intel_pt_cache_entry {
307 struct auxtrace_cache_entry entry;
310 enum intel_pt_insn_op op;
311 enum intel_pt_insn_branch branch;
314 char insn[INTEL_PT_INSN_BUF_SZ];
317 static int intel_pt_config_div(const char *var, const char *value, void *data)
322 if (!strcmp(var, "intel-pt.cache-divisor")) {
323 val = strtol(value, NULL, 0);
324 if (val > 0 && val <= INT_MAX)
331 static int intel_pt_cache_divisor(void)
338 perf_config(intel_pt_config_div, &d);
346 static unsigned int intel_pt_cache_size(struct dso *dso,
347 struct machine *machine)
351 size = dso__data_size(dso, machine);
352 size /= intel_pt_cache_divisor();
355 if (size > (1 << 21))
357 return 32 - __builtin_clz(size);
360 static struct auxtrace_cache *intel_pt_cache(struct dso *dso,
361 struct machine *machine)
363 struct auxtrace_cache *c;
366 if (dso->auxtrace_cache)
367 return dso->auxtrace_cache;
369 bits = intel_pt_cache_size(dso, machine);
371 /* Ignoring cache creation failure */
372 c = auxtrace_cache__new(bits, sizeof(struct intel_pt_cache_entry), 200);
374 dso->auxtrace_cache = c;
379 static int intel_pt_cache_add(struct dso *dso, struct machine *machine,
380 u64 offset, u64 insn_cnt, u64 byte_cnt,
381 struct intel_pt_insn *intel_pt_insn)
383 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
384 struct intel_pt_cache_entry *e;
390 e = auxtrace_cache__alloc_entry(c);
394 e->insn_cnt = insn_cnt;
395 e->byte_cnt = byte_cnt;
396 e->op = intel_pt_insn->op;
397 e->branch = intel_pt_insn->branch;
398 e->length = intel_pt_insn->length;
399 e->rel = intel_pt_insn->rel;
400 memcpy(e->insn, intel_pt_insn->buf, INTEL_PT_INSN_BUF_SZ);
402 err = auxtrace_cache__add(c, offset, &e->entry);
404 auxtrace_cache__free_entry(c, e);
409 static struct intel_pt_cache_entry *
410 intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset)
412 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
417 return auxtrace_cache__lookup(dso->auxtrace_cache, offset);
420 static inline u8 intel_pt_cpumode(struct intel_pt *pt, uint64_t ip)
422 return ip >= pt->kernel_start ?
423 PERF_RECORD_MISC_KERNEL :
424 PERF_RECORD_MISC_USER;
427 static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
428 uint64_t *insn_cnt_ptr, uint64_t *ip,
429 uint64_t to_ip, uint64_t max_insn_cnt,
432 struct intel_pt_queue *ptq = data;
433 struct machine *machine = ptq->pt->machine;
434 struct thread *thread;
435 struct addr_location al;
436 unsigned char buf[INTEL_PT_INSN_BUF_SZ];
440 u64 offset, start_offset, start_ip;
444 intel_pt_insn->length = 0;
446 if (to_ip && *ip == to_ip)
449 cpumode = intel_pt_cpumode(ptq->pt, *ip);
451 thread = ptq->thread;
453 if (cpumode != PERF_RECORD_MISC_KERNEL)
455 thread = ptq->pt->unknown_thread;
459 if (!thread__find_map(thread, cpumode, *ip, &al) || !al.map->dso)
462 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
463 dso__data_status_seen(al.map->dso,
464 DSO_DATA_STATUS_SEEN_ITRACE))
467 offset = al.map->map_ip(al.map, *ip);
469 if (!to_ip && one_map) {
470 struct intel_pt_cache_entry *e;
472 e = intel_pt_cache_lookup(al.map->dso, machine, offset);
474 (!max_insn_cnt || e->insn_cnt <= max_insn_cnt)) {
475 *insn_cnt_ptr = e->insn_cnt;
477 intel_pt_insn->op = e->op;
478 intel_pt_insn->branch = e->branch;
479 intel_pt_insn->length = e->length;
480 intel_pt_insn->rel = e->rel;
481 memcpy(intel_pt_insn->buf, e->insn,
482 INTEL_PT_INSN_BUF_SZ);
483 intel_pt_log_insn_no_data(intel_pt_insn, *ip);
488 start_offset = offset;
491 /* Load maps to ensure dso->is_64_bit has been updated */
494 x86_64 = al.map->dso->is_64_bit;
497 len = dso__data_read_offset(al.map->dso, machine,
499 INTEL_PT_INSN_BUF_SZ);
503 if (intel_pt_get_insn(buf, len, x86_64, intel_pt_insn))
506 intel_pt_log_insn(intel_pt_insn, *ip);
510 if (intel_pt_insn->branch != INTEL_PT_BR_NO_BRANCH)
513 if (max_insn_cnt && insn_cnt >= max_insn_cnt)
516 *ip += intel_pt_insn->length;
518 if (to_ip && *ip == to_ip)
521 if (*ip >= al.map->end)
524 offset += intel_pt_insn->length;
529 *insn_cnt_ptr = insn_cnt;
535 * Didn't lookup in the 'to_ip' case, so do it now to prevent duplicate
539 struct intel_pt_cache_entry *e;
541 e = intel_pt_cache_lookup(al.map->dso, machine, start_offset);
546 /* Ignore cache errors */
547 intel_pt_cache_add(al.map->dso, machine, start_offset, insn_cnt,
548 *ip - start_ip, intel_pt_insn);
553 *insn_cnt_ptr = insn_cnt;
557 static bool intel_pt_match_pgd_ip(struct intel_pt *pt, uint64_t ip,
558 uint64_t offset, const char *filename)
560 struct addr_filter *filt;
561 bool have_filter = false;
562 bool hit_tracestop = false;
563 bool hit_filter = false;
565 list_for_each_entry(filt, &pt->filts.head, list) {
569 if ((filename && !filt->filename) ||
570 (!filename && filt->filename) ||
571 (filename && strcmp(filename, filt->filename)))
574 if (!(offset >= filt->addr && offset < filt->addr + filt->size))
577 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s hit filter: %s offset %#"PRIx64" size %#"PRIx64"\n",
578 ip, offset, filename ? filename : "[kernel]",
579 filt->start ? "filter" : "stop",
580 filt->addr, filt->size);
585 hit_tracestop = true;
588 if (!hit_tracestop && !hit_filter)
589 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s is not in a filter region\n",
590 ip, offset, filename ? filename : "[kernel]");
592 return hit_tracestop || (have_filter && !hit_filter);
595 static int __intel_pt_pgd_ip(uint64_t ip, void *data)
597 struct intel_pt_queue *ptq = data;
598 struct thread *thread;
599 struct addr_location al;
603 if (ip >= ptq->pt->kernel_start)
604 return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
606 cpumode = PERF_RECORD_MISC_USER;
608 thread = ptq->thread;
612 if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso)
615 offset = al.map->map_ip(al.map, ip);
617 return intel_pt_match_pgd_ip(ptq->pt, ip, offset,
618 al.map->dso->long_name);
621 static bool intel_pt_pgd_ip(uint64_t ip, void *data)
623 return __intel_pt_pgd_ip(ip, data) > 0;
626 static bool intel_pt_get_config(struct intel_pt *pt,
627 struct perf_event_attr *attr, u64 *config)
629 if (attr->type == pt->pmu_type) {
631 *config = attr->config;
638 static bool intel_pt_exclude_kernel(struct intel_pt *pt)
640 struct perf_evsel *evsel;
642 evlist__for_each_entry(pt->session->evlist, evsel) {
643 if (intel_pt_get_config(pt, &evsel->attr, NULL) &&
644 !evsel->attr.exclude_kernel)
650 static bool intel_pt_return_compression(struct intel_pt *pt)
652 struct perf_evsel *evsel;
655 if (!pt->noretcomp_bit)
658 evlist__for_each_entry(pt->session->evlist, evsel) {
659 if (intel_pt_get_config(pt, &evsel->attr, &config) &&
660 (config & pt->noretcomp_bit))
666 static bool intel_pt_branch_enable(struct intel_pt *pt)
668 struct perf_evsel *evsel;
671 evlist__for_each_entry(pt->session->evlist, evsel) {
672 if (intel_pt_get_config(pt, &evsel->attr, &config) &&
673 (config & 1) && !(config & 0x2000))
679 static unsigned int intel_pt_mtc_period(struct intel_pt *pt)
681 struct perf_evsel *evsel;
685 if (!pt->mtc_freq_bits)
688 for (shift = 0, config = pt->mtc_freq_bits; !(config & 1); shift++)
691 evlist__for_each_entry(pt->session->evlist, evsel) {
692 if (intel_pt_get_config(pt, &evsel->attr, &config))
693 return (config & pt->mtc_freq_bits) >> shift;
698 static bool intel_pt_timeless_decoding(struct intel_pt *pt)
700 struct perf_evsel *evsel;
701 bool timeless_decoding = true;
704 if (!pt->tsc_bit || !pt->cap_user_time_zero)
707 evlist__for_each_entry(pt->session->evlist, evsel) {
708 if (!(evsel->attr.sample_type & PERF_SAMPLE_TIME))
710 if (intel_pt_get_config(pt, &evsel->attr, &config)) {
711 if (config & pt->tsc_bit)
712 timeless_decoding = false;
717 return timeless_decoding;
720 static bool intel_pt_tracing_kernel(struct intel_pt *pt)
722 struct perf_evsel *evsel;
724 evlist__for_each_entry(pt->session->evlist, evsel) {
725 if (intel_pt_get_config(pt, &evsel->attr, NULL) &&
726 !evsel->attr.exclude_kernel)
732 static bool intel_pt_have_tsc(struct intel_pt *pt)
734 struct perf_evsel *evsel;
735 bool have_tsc = false;
741 evlist__for_each_entry(pt->session->evlist, evsel) {
742 if (intel_pt_get_config(pt, &evsel->attr, &config)) {
743 if (config & pt->tsc_bit)
752 static u64 intel_pt_ns_to_ticks(const struct intel_pt *pt, u64 ns)
756 quot = ns / pt->tc.time_mult;
757 rem = ns % pt->tc.time_mult;
758 return (quot << pt->tc.time_shift) + (rem << pt->tc.time_shift) /
762 static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
763 unsigned int queue_nr)
765 struct intel_pt_params params = { .get_trace = 0, };
766 struct perf_env *env = pt->machine->env;
767 struct intel_pt_queue *ptq;
769 ptq = zalloc(sizeof(struct intel_pt_queue));
773 if (pt->synth_opts.callchain) {
774 size_t sz = sizeof(struct ip_callchain);
776 /* Add 1 to callchain_sz for callchain context */
777 sz += (pt->synth_opts.callchain_sz + 1) * sizeof(u64);
778 ptq->chain = zalloc(sz);
783 if (pt->synth_opts.last_branch) {
784 size_t sz = sizeof(struct branch_stack);
786 sz += pt->synth_opts.last_branch_sz *
787 sizeof(struct branch_entry);
788 ptq->last_branch = zalloc(sz);
789 if (!ptq->last_branch)
791 ptq->last_branch_rb = zalloc(sz);
792 if (!ptq->last_branch_rb)
796 ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
801 ptq->queue_nr = queue_nr;
802 ptq->exclude_kernel = intel_pt_exclude_kernel(pt);
808 params.get_trace = intel_pt_get_trace;
809 params.walk_insn = intel_pt_walk_next_insn;
811 params.return_compression = intel_pt_return_compression(pt);
812 params.branch_enable = intel_pt_branch_enable(pt);
813 params.max_non_turbo_ratio = pt->max_non_turbo_ratio;
814 params.mtc_period = intel_pt_mtc_period(pt);
815 params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n;
816 params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d;
818 if (pt->filts.cnt > 0)
819 params.pgd_ip = intel_pt_pgd_ip;
821 if (pt->synth_opts.instructions) {
822 if (pt->synth_opts.period) {
823 switch (pt->synth_opts.period_type) {
824 case PERF_ITRACE_PERIOD_INSTRUCTIONS:
826 INTEL_PT_PERIOD_INSTRUCTIONS;
827 params.period = pt->synth_opts.period;
829 case PERF_ITRACE_PERIOD_TICKS:
830 params.period_type = INTEL_PT_PERIOD_TICKS;
831 params.period = pt->synth_opts.period;
833 case PERF_ITRACE_PERIOD_NANOSECS:
834 params.period_type = INTEL_PT_PERIOD_TICKS;
835 params.period = intel_pt_ns_to_ticks(pt,
836 pt->synth_opts.period);
843 if (!params.period) {
844 params.period_type = INTEL_PT_PERIOD_INSTRUCTIONS;
849 if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92,", 18))
850 params.flags |= INTEL_PT_FUP_WITH_NLIP;
852 ptq->decoder = intel_pt_decoder_new(¶ms);
859 zfree(&ptq->event_buf);
860 zfree(&ptq->last_branch);
861 zfree(&ptq->last_branch_rb);
867 static void intel_pt_free_queue(void *priv)
869 struct intel_pt_queue *ptq = priv;
873 thread__zput(ptq->thread);
874 intel_pt_decoder_free(ptq->decoder);
875 zfree(&ptq->event_buf);
876 zfree(&ptq->last_branch);
877 zfree(&ptq->last_branch_rb);
882 static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
883 struct auxtrace_queue *queue)
885 struct intel_pt_queue *ptq = queue->priv;
887 if (queue->tid == -1 || pt->have_sched_switch) {
888 ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
889 thread__zput(ptq->thread);
892 if (!ptq->thread && ptq->tid != -1)
893 ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid);
896 ptq->pid = ptq->thread->pid_;
897 if (queue->cpu == -1)
898 ptq->cpu = ptq->thread->cpu;
902 static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
904 if (ptq->state->flags & INTEL_PT_ABORT_TX) {
905 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT;
906 } else if (ptq->state->flags & INTEL_PT_ASYNC) {
907 if (ptq->state->to_ip)
908 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
910 PERF_IP_FLAG_INTERRUPT;
912 ptq->flags = PERF_IP_FLAG_BRANCH |
913 PERF_IP_FLAG_TRACE_END;
916 if (ptq->state->from_ip)
917 ptq->flags = intel_pt_insn_type(ptq->state->insn_op);
919 ptq->flags = PERF_IP_FLAG_BRANCH |
920 PERF_IP_FLAG_TRACE_BEGIN;
921 if (ptq->state->flags & INTEL_PT_IN_TX)
922 ptq->flags |= PERF_IP_FLAG_IN_TX;
923 ptq->insn_len = ptq->state->insn_len;
924 memcpy(ptq->insn, ptq->state->insn, INTEL_PT_INSN_BUF_SZ);
927 if (ptq->state->type & INTEL_PT_TRACE_BEGIN)
928 ptq->flags |= PERF_IP_FLAG_TRACE_BEGIN;
929 if (ptq->state->type & INTEL_PT_TRACE_END)
930 ptq->flags |= PERF_IP_FLAG_TRACE_END;
933 static int intel_pt_setup_queue(struct intel_pt *pt,
934 struct auxtrace_queue *queue,
935 unsigned int queue_nr)
937 struct intel_pt_queue *ptq = queue->priv;
939 if (list_empty(&queue->head))
943 ptq = intel_pt_alloc_queue(pt, queue_nr);
948 if (queue->cpu != -1)
949 ptq->cpu = queue->cpu;
950 ptq->tid = queue->tid;
952 if (pt->sampling_mode && !pt->snapshot_mode &&
953 pt->timeless_decoding)
954 ptq->step_through_buffers = true;
956 ptq->sync_switch = pt->sync_switch;
960 (!ptq->sync_switch ||
961 ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
962 const struct intel_pt_state *state;
965 if (pt->timeless_decoding)
968 intel_pt_log("queue %u getting timestamp\n", queue_nr);
969 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
970 queue_nr, ptq->cpu, ptq->pid, ptq->tid);
972 state = intel_pt_decode(ptq->decoder);
974 if (state->err == INTEL_PT_ERR_NODATA) {
975 intel_pt_log("queue %u has no timestamp\n",
981 if (state->timestamp)
985 ptq->timestamp = state->timestamp;
986 intel_pt_log("queue %u timestamp 0x%" PRIx64 "\n",
987 queue_nr, ptq->timestamp);
989 ptq->have_sample = true;
990 intel_pt_sample_flags(ptq);
991 ret = auxtrace_heap__add(&pt->heap, queue_nr, ptq->timestamp);
1000 static int intel_pt_setup_queues(struct intel_pt *pt)
1005 for (i = 0; i < pt->queues.nr_queues; i++) {
1006 ret = intel_pt_setup_queue(pt, &pt->queues.queue_array[i], i);
1013 static inline void intel_pt_copy_last_branch_rb(struct intel_pt_queue *ptq)
1015 struct branch_stack *bs_src = ptq->last_branch_rb;
1016 struct branch_stack *bs_dst = ptq->last_branch;
1019 bs_dst->nr = bs_src->nr;
1024 nr = ptq->pt->synth_opts.last_branch_sz - ptq->last_branch_pos;
1025 memcpy(&bs_dst->entries[0],
1026 &bs_src->entries[ptq->last_branch_pos],
1027 sizeof(struct branch_entry) * nr);
1029 if (bs_src->nr >= ptq->pt->synth_opts.last_branch_sz) {
1030 memcpy(&bs_dst->entries[nr],
1031 &bs_src->entries[0],
1032 sizeof(struct branch_entry) * ptq->last_branch_pos);
1036 static inline void intel_pt_reset_last_branch_rb(struct intel_pt_queue *ptq)
1038 ptq->last_branch_pos = 0;
1039 ptq->last_branch_rb->nr = 0;
1042 static void intel_pt_update_last_branch_rb(struct intel_pt_queue *ptq)
1044 const struct intel_pt_state *state = ptq->state;
1045 struct branch_stack *bs = ptq->last_branch_rb;
1046 struct branch_entry *be;
1048 if (!ptq->last_branch_pos)
1049 ptq->last_branch_pos = ptq->pt->synth_opts.last_branch_sz;
1051 ptq->last_branch_pos -= 1;
1053 be = &bs->entries[ptq->last_branch_pos];
1054 be->from = state->from_ip;
1055 be->to = state->to_ip;
1056 be->flags.abort = !!(state->flags & INTEL_PT_ABORT_TX);
1057 be->flags.in_tx = !!(state->flags & INTEL_PT_IN_TX);
1058 /* No support for mispredict */
1059 be->flags.mispred = ptq->pt->mispred_all;
1061 if (bs->nr < ptq->pt->synth_opts.last_branch_sz)
1065 static inline bool intel_pt_skip_event(struct intel_pt *pt)
1067 return pt->synth_opts.initial_skip &&
1068 pt->num_events++ < pt->synth_opts.initial_skip;
1071 static void intel_pt_prep_b_sample(struct intel_pt *pt,
1072 struct intel_pt_queue *ptq,
1073 union perf_event *event,
1074 struct perf_sample *sample)
1076 if (!pt->timeless_decoding)
1077 sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
1079 sample->ip = ptq->state->from_ip;
1080 sample->cpumode = intel_pt_cpumode(pt, sample->ip);
1081 sample->pid = ptq->pid;
1082 sample->tid = ptq->tid;
1083 sample->addr = ptq->state->to_ip;
1085 sample->cpu = ptq->cpu;
1086 sample->flags = ptq->flags;
1087 sample->insn_len = ptq->insn_len;
1088 memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
1090 event->sample.header.type = PERF_RECORD_SAMPLE;
1091 event->sample.header.misc = sample->cpumode;
1092 event->sample.header.size = sizeof(struct perf_event_header);
1095 static int intel_pt_inject_event(union perf_event *event,
1096 struct perf_sample *sample, u64 type)
1098 event->header.size = perf_event__sample_event_size(sample, type, 0);
1099 return perf_event__synthesize_sample(event, type, 0, sample);
1102 static inline int intel_pt_opt_inject(struct intel_pt *pt,
1103 union perf_event *event,
1104 struct perf_sample *sample, u64 type)
1106 if (!pt->synth_opts.inject)
1109 return intel_pt_inject_event(event, sample, type);
1112 static int intel_pt_deliver_synth_b_event(struct intel_pt *pt,
1113 union perf_event *event,
1114 struct perf_sample *sample, u64 type)
1118 ret = intel_pt_opt_inject(pt, event, sample, type);
1122 ret = perf_session__deliver_synth_event(pt->session, event, sample);
1124 pr_err("Intel PT: failed to deliver event, error %d\n", ret);
1129 static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
1131 struct intel_pt *pt = ptq->pt;
1132 union perf_event *event = ptq->event_buf;
1133 struct perf_sample sample = { .ip = 0, };
1134 struct dummy_branch_stack {
1136 struct branch_entry entries;
1139 if (pt->branches_filter && !(pt->branches_filter & ptq->flags))
1142 if (intel_pt_skip_event(pt))
1145 intel_pt_prep_b_sample(pt, ptq, event, &sample);
1147 sample.id = ptq->pt->branches_id;
1148 sample.stream_id = ptq->pt->branches_id;
1151 * perf report cannot handle events without a branch stack when using
1152 * SORT_MODE__BRANCH so make a dummy one.
1154 if (pt->synth_opts.last_branch && sort__mode == SORT_MODE__BRANCH) {
1155 dummy_bs = (struct dummy_branch_stack){
1162 sample.branch_stack = (struct branch_stack *)&dummy_bs;
1165 return intel_pt_deliver_synth_b_event(pt, event, &sample,
1166 pt->branches_sample_type);
1169 static void intel_pt_prep_sample(struct intel_pt *pt,
1170 struct intel_pt_queue *ptq,
1171 union perf_event *event,
1172 struct perf_sample *sample)
1174 intel_pt_prep_b_sample(pt, ptq, event, sample);
1176 if (pt->synth_opts.callchain) {
1177 thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
1178 pt->synth_opts.callchain_sz + 1,
1179 sample->ip, pt->kernel_start);
1180 sample->callchain = ptq->chain;
1183 if (pt->synth_opts.last_branch) {
1184 intel_pt_copy_last_branch_rb(ptq);
1185 sample->branch_stack = ptq->last_branch;
1189 static inline int intel_pt_deliver_synth_event(struct intel_pt *pt,
1190 struct intel_pt_queue *ptq,
1191 union perf_event *event,
1192 struct perf_sample *sample,
1197 ret = intel_pt_deliver_synth_b_event(pt, event, sample, type);
1199 if (pt->synth_opts.last_branch)
1200 intel_pt_reset_last_branch_rb(ptq);
1205 static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
1207 struct intel_pt *pt = ptq->pt;
1208 union perf_event *event = ptq->event_buf;
1209 struct perf_sample sample = { .ip = 0, };
1211 if (intel_pt_skip_event(pt))
1214 intel_pt_prep_sample(pt, ptq, event, &sample);
1216 sample.id = ptq->pt->instructions_id;
1217 sample.stream_id = ptq->pt->instructions_id;
1218 sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
1220 ptq->last_insn_cnt = ptq->state->tot_insn_cnt;
1222 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1223 pt->instructions_sample_type);
1226 static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
1228 struct intel_pt *pt = ptq->pt;
1229 union perf_event *event = ptq->event_buf;
1230 struct perf_sample sample = { .ip = 0, };
1232 if (intel_pt_skip_event(pt))
1235 intel_pt_prep_sample(pt, ptq, event, &sample);
1237 sample.id = ptq->pt->transactions_id;
1238 sample.stream_id = ptq->pt->transactions_id;
1240 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1241 pt->transactions_sample_type);
1244 static void intel_pt_prep_p_sample(struct intel_pt *pt,
1245 struct intel_pt_queue *ptq,
1246 union perf_event *event,
1247 struct perf_sample *sample)
1249 intel_pt_prep_sample(pt, ptq, event, sample);
1252 * Zero IP is used to mean "trace start" but that is not the case for
1253 * power or PTWRITE events with no IP, so clear the flags.
1259 static int intel_pt_synth_ptwrite_sample(struct intel_pt_queue *ptq)
1261 struct intel_pt *pt = ptq->pt;
1262 union perf_event *event = ptq->event_buf;
1263 struct perf_sample sample = { .ip = 0, };
1264 struct perf_synth_intel_ptwrite raw;
1266 if (intel_pt_skip_event(pt))
1269 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1271 sample.id = ptq->pt->ptwrites_id;
1272 sample.stream_id = ptq->pt->ptwrites_id;
1275 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
1276 raw.payload = cpu_to_le64(ptq->state->ptw_payload);
1278 sample.raw_size = perf_synth__raw_size(raw);
1279 sample.raw_data = perf_synth__raw_data(&raw);
1281 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1282 pt->ptwrites_sample_type);
1285 static int intel_pt_synth_cbr_sample(struct intel_pt_queue *ptq)
1287 struct intel_pt *pt = ptq->pt;
1288 union perf_event *event = ptq->event_buf;
1289 struct perf_sample sample = { .ip = 0, };
1290 struct perf_synth_intel_cbr raw;
1293 if (intel_pt_skip_event(pt))
1296 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1298 sample.id = ptq->pt->cbr_id;
1299 sample.stream_id = ptq->pt->cbr_id;
1301 flags = (u16)ptq->state->cbr_payload | (pt->max_non_turbo_ratio << 16);
1302 raw.flags = cpu_to_le32(flags);
1303 raw.freq = cpu_to_le32(raw.cbr * pt->cbr2khz);
1306 sample.raw_size = perf_synth__raw_size(raw);
1307 sample.raw_data = perf_synth__raw_data(&raw);
1309 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1310 pt->pwr_events_sample_type);
1313 static int intel_pt_synth_mwait_sample(struct intel_pt_queue *ptq)
1315 struct intel_pt *pt = ptq->pt;
1316 union perf_event *event = ptq->event_buf;
1317 struct perf_sample sample = { .ip = 0, };
1318 struct perf_synth_intel_mwait raw;
1320 if (intel_pt_skip_event(pt))
1323 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1325 sample.id = ptq->pt->mwait_id;
1326 sample.stream_id = ptq->pt->mwait_id;
1329 raw.payload = cpu_to_le64(ptq->state->mwait_payload);
1331 sample.raw_size = perf_synth__raw_size(raw);
1332 sample.raw_data = perf_synth__raw_data(&raw);
1334 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1335 pt->pwr_events_sample_type);
1338 static int intel_pt_synth_pwre_sample(struct intel_pt_queue *ptq)
1340 struct intel_pt *pt = ptq->pt;
1341 union perf_event *event = ptq->event_buf;
1342 struct perf_sample sample = { .ip = 0, };
1343 struct perf_synth_intel_pwre raw;
1345 if (intel_pt_skip_event(pt))
1348 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1350 sample.id = ptq->pt->pwre_id;
1351 sample.stream_id = ptq->pt->pwre_id;
1354 raw.payload = cpu_to_le64(ptq->state->pwre_payload);
1356 sample.raw_size = perf_synth__raw_size(raw);
1357 sample.raw_data = perf_synth__raw_data(&raw);
1359 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1360 pt->pwr_events_sample_type);
1363 static int intel_pt_synth_exstop_sample(struct intel_pt_queue *ptq)
1365 struct intel_pt *pt = ptq->pt;
1366 union perf_event *event = ptq->event_buf;
1367 struct perf_sample sample = { .ip = 0, };
1368 struct perf_synth_intel_exstop raw;
1370 if (intel_pt_skip_event(pt))
1373 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1375 sample.id = ptq->pt->exstop_id;
1376 sample.stream_id = ptq->pt->exstop_id;
1379 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
1381 sample.raw_size = perf_synth__raw_size(raw);
1382 sample.raw_data = perf_synth__raw_data(&raw);
1384 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1385 pt->pwr_events_sample_type);
1388 static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq)
1390 struct intel_pt *pt = ptq->pt;
1391 union perf_event *event = ptq->event_buf;
1392 struct perf_sample sample = { .ip = 0, };
1393 struct perf_synth_intel_pwrx raw;
1395 if (intel_pt_skip_event(pt))
1398 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1400 sample.id = ptq->pt->pwrx_id;
1401 sample.stream_id = ptq->pt->pwrx_id;
1404 raw.payload = cpu_to_le64(ptq->state->pwrx_payload);
1406 sample.raw_size = perf_synth__raw_size(raw);
1407 sample.raw_data = perf_synth__raw_data(&raw);
1409 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1410 pt->pwr_events_sample_type);
1413 static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
1414 pid_t pid, pid_t tid, u64 ip)
1416 union perf_event event;
1417 char msg[MAX_AUXTRACE_ERROR_MSG];
1420 intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG);
1422 auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
1423 code, cpu, pid, tid, ip, msg);
1425 err = perf_session__deliver_synth_event(pt->session, &event, NULL);
1427 pr_err("Intel Processor Trace: failed to deliver error event, error %d\n",
1433 static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq)
1435 struct auxtrace_queue *queue;
1436 pid_t tid = ptq->next_tid;
1442 intel_pt_log("switch: cpu %d tid %d\n", ptq->cpu, tid);
1444 err = machine__set_current_tid(pt->machine, ptq->cpu, -1, tid);
1446 queue = &pt->queues.queue_array[ptq->queue_nr];
1447 intel_pt_set_pid_tid_cpu(pt, queue);
1454 static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip)
1456 struct intel_pt *pt = ptq->pt;
1458 return ip == pt->switch_ip &&
1459 (ptq->flags & PERF_IP_FLAG_BRANCH) &&
1460 !(ptq->flags & (PERF_IP_FLAG_CONDITIONAL | PERF_IP_FLAG_ASYNC |
1461 PERF_IP_FLAG_INTERRUPT | PERF_IP_FLAG_TX_ABORT));
1464 #define INTEL_PT_PWR_EVT (INTEL_PT_MWAIT_OP | INTEL_PT_PWR_ENTRY | \
1465 INTEL_PT_EX_STOP | INTEL_PT_PWR_EXIT | \
1468 static int intel_pt_sample(struct intel_pt_queue *ptq)
1470 const struct intel_pt_state *state = ptq->state;
1471 struct intel_pt *pt = ptq->pt;
1474 if (!ptq->have_sample)
1477 ptq->have_sample = false;
1479 if (pt->sample_pwr_events && (state->type & INTEL_PT_PWR_EVT)) {
1480 if (state->type & INTEL_PT_CBR_CHG) {
1481 err = intel_pt_synth_cbr_sample(ptq);
1485 if (state->type & INTEL_PT_MWAIT_OP) {
1486 err = intel_pt_synth_mwait_sample(ptq);
1490 if (state->type & INTEL_PT_PWR_ENTRY) {
1491 err = intel_pt_synth_pwre_sample(ptq);
1495 if (state->type & INTEL_PT_EX_STOP) {
1496 err = intel_pt_synth_exstop_sample(ptq);
1500 if (state->type & INTEL_PT_PWR_EXIT) {
1501 err = intel_pt_synth_pwrx_sample(ptq);
1507 if (pt->sample_instructions && (state->type & INTEL_PT_INSTRUCTION)) {
1508 err = intel_pt_synth_instruction_sample(ptq);
1513 if (pt->sample_transactions && (state->type & INTEL_PT_TRANSACTION)) {
1514 err = intel_pt_synth_transaction_sample(ptq);
1519 if (pt->sample_ptwrites && (state->type & INTEL_PT_PTW)) {
1520 err = intel_pt_synth_ptwrite_sample(ptq);
1525 if (!(state->type & INTEL_PT_BRANCH))
1528 if (pt->synth_opts.callchain || pt->synth_opts.thread_stack)
1529 thread_stack__event(ptq->thread, ptq->cpu, ptq->flags, state->from_ip,
1530 state->to_ip, ptq->insn_len,
1533 thread_stack__set_trace_nr(ptq->thread, ptq->cpu, state->trace_nr);
1535 if (pt->sample_branches) {
1536 err = intel_pt_synth_branch_sample(ptq);
1541 if (pt->synth_opts.last_branch)
1542 intel_pt_update_last_branch_rb(ptq);
1544 if (!ptq->sync_switch)
1547 if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
1548 switch (ptq->switch_state) {
1549 case INTEL_PT_SS_NOT_TRACING:
1550 case INTEL_PT_SS_UNKNOWN:
1551 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
1552 err = intel_pt_next_tid(pt, ptq);
1555 ptq->switch_state = INTEL_PT_SS_TRACING;
1558 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_EVENT;
1561 } else if (!state->to_ip) {
1562 ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
1563 } else if (ptq->switch_state == INTEL_PT_SS_NOT_TRACING) {
1564 ptq->switch_state = INTEL_PT_SS_UNKNOWN;
1565 } else if (ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
1566 state->to_ip == pt->ptss_ip &&
1567 (ptq->flags & PERF_IP_FLAG_CALL)) {
1568 ptq->switch_state = INTEL_PT_SS_TRACING;
1574 static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
1576 struct machine *machine = pt->machine;
1578 struct symbol *sym, *start;
1579 u64 ip, switch_ip = 0;
1585 map = machine__kernel_map(machine);
1592 start = dso__first_symbol(map->dso);
1594 for (sym = start; sym; sym = dso__next_symbol(sym)) {
1595 if (sym->binding == STB_GLOBAL &&
1596 !strcmp(sym->name, "__switch_to")) {
1597 ip = map->unmap_ip(map, sym->start);
1598 if (ip >= map->start && ip < map->end) {
1605 if (!switch_ip || !ptss_ip)
1608 if (pt->have_sched_switch == 1)
1609 ptss = "perf_trace_sched_switch";
1611 ptss = "__perf_event_task_sched_out";
1613 for (sym = start; sym; sym = dso__next_symbol(sym)) {
1614 if (!strcmp(sym->name, ptss)) {
1615 ip = map->unmap_ip(map, sym->start);
1616 if (ip >= map->start && ip < map->end) {
1626 static void intel_pt_enable_sync_switch(struct intel_pt *pt)
1630 pt->sync_switch = true;
1632 for (i = 0; i < pt->queues.nr_queues; i++) {
1633 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
1634 struct intel_pt_queue *ptq = queue->priv;
1637 ptq->sync_switch = true;
1641 static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
1643 const struct intel_pt_state *state = ptq->state;
1644 struct intel_pt *pt = ptq->pt;
1647 if (!pt->kernel_start) {
1648 pt->kernel_start = machine__kernel_start(pt->machine);
1649 if (pt->per_cpu_mmaps &&
1650 (pt->have_sched_switch == 1 || pt->have_sched_switch == 3) &&
1651 !pt->timeless_decoding && intel_pt_tracing_kernel(pt) &&
1652 !pt->sampling_mode) {
1653 pt->switch_ip = intel_pt_switch_ip(pt, &pt->ptss_ip);
1654 if (pt->switch_ip) {
1655 intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
1656 pt->switch_ip, pt->ptss_ip);
1657 intel_pt_enable_sync_switch(pt);
1662 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
1663 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
1665 err = intel_pt_sample(ptq);
1669 state = intel_pt_decode(ptq->decoder);
1671 if (state->err == INTEL_PT_ERR_NODATA)
1673 if (ptq->sync_switch &&
1674 state->from_ip >= pt->kernel_start) {
1675 ptq->sync_switch = false;
1676 intel_pt_next_tid(pt, ptq);
1678 if (pt->synth_opts.errors) {
1679 err = intel_pt_synth_error(pt, state->err,
1690 ptq->have_sample = true;
1691 intel_pt_sample_flags(ptq);
1693 /* Use estimated TSC upon return to user space */
1695 (state->from_ip >= pt->kernel_start || !state->from_ip) &&
1696 state->to_ip && state->to_ip < pt->kernel_start) {
1697 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
1698 state->timestamp, state->est_timestamp);
1699 ptq->timestamp = state->est_timestamp;
1700 /* Use estimated TSC in unknown switch state */
1701 } else if (ptq->sync_switch &&
1702 ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
1703 intel_pt_is_switch_ip(ptq, state->to_ip) &&
1704 ptq->next_tid == -1) {
1705 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
1706 state->timestamp, state->est_timestamp);
1707 ptq->timestamp = state->est_timestamp;
1708 } else if (state->timestamp > ptq->timestamp) {
1709 ptq->timestamp = state->timestamp;
1712 if (!pt->timeless_decoding && ptq->timestamp >= *timestamp) {
1713 *timestamp = ptq->timestamp;
1720 static inline int intel_pt_update_queues(struct intel_pt *pt)
1722 if (pt->queues.new_data) {
1723 pt->queues.new_data = false;
1724 return intel_pt_setup_queues(pt);
1729 static int intel_pt_process_queues(struct intel_pt *pt, u64 timestamp)
1731 unsigned int queue_nr;
1736 struct auxtrace_queue *queue;
1737 struct intel_pt_queue *ptq;
1739 if (!pt->heap.heap_cnt)
1742 if (pt->heap.heap_array[0].ordinal >= timestamp)
1745 queue_nr = pt->heap.heap_array[0].queue_nr;
1746 queue = &pt->queues.queue_array[queue_nr];
1749 intel_pt_log("queue %u processing 0x%" PRIx64 " to 0x%" PRIx64 "\n",
1750 queue_nr, pt->heap.heap_array[0].ordinal,
1753 auxtrace_heap__pop(&pt->heap);
1755 if (pt->heap.heap_cnt) {
1756 ts = pt->heap.heap_array[0].ordinal + 1;
1763 intel_pt_set_pid_tid_cpu(pt, queue);
1765 ret = intel_pt_run_decoder(ptq, &ts);
1768 auxtrace_heap__add(&pt->heap, queue_nr, ts);
1773 ret = auxtrace_heap__add(&pt->heap, queue_nr, ts);
1777 ptq->on_heap = false;
1784 static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid,
1787 struct auxtrace_queues *queues = &pt->queues;
1791 for (i = 0; i < queues->nr_queues; i++) {
1792 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
1793 struct intel_pt_queue *ptq = queue->priv;
1795 if (ptq && (tid == -1 || ptq->tid == tid)) {
1797 intel_pt_set_pid_tid_cpu(pt, queue);
1798 intel_pt_run_decoder(ptq, &ts);
1804 static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample)
1806 return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu,
1807 sample->pid, sample->tid, 0);
1810 static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu)
1814 if (cpu < 0 || !pt->queues.nr_queues)
1817 if ((unsigned)cpu >= pt->queues.nr_queues)
1818 i = pt->queues.nr_queues - 1;
1822 if (pt->queues.queue_array[i].cpu == cpu)
1823 return pt->queues.queue_array[i].priv;
1825 for (j = 0; i > 0; j++) {
1826 if (pt->queues.queue_array[--i].cpu == cpu)
1827 return pt->queues.queue_array[i].priv;
1830 for (; j < pt->queues.nr_queues; j++) {
1831 if (pt->queues.queue_array[j].cpu == cpu)
1832 return pt->queues.queue_array[j].priv;
1838 static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
1841 struct intel_pt_queue *ptq;
1844 if (!pt->sync_switch)
1847 ptq = intel_pt_cpu_to_ptq(pt, cpu);
1848 if (!ptq || !ptq->sync_switch)
1851 switch (ptq->switch_state) {
1852 case INTEL_PT_SS_NOT_TRACING:
1855 case INTEL_PT_SS_UNKNOWN:
1856 case INTEL_PT_SS_TRACING:
1857 ptq->next_tid = tid;
1858 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_IP;
1860 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
1861 if (!ptq->on_heap) {
1862 ptq->timestamp = perf_time_to_tsc(timestamp,
1864 err = auxtrace_heap__add(&pt->heap, ptq->queue_nr,
1868 ptq->on_heap = true;
1870 ptq->switch_state = INTEL_PT_SS_TRACING;
1872 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
1873 ptq->next_tid = tid;
1874 intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu);
1883 static int intel_pt_process_switch(struct intel_pt *pt,
1884 struct perf_sample *sample)
1886 struct perf_evsel *evsel;
1890 evsel = perf_evlist__id2evsel(pt->session->evlist, sample->id);
1891 if (evsel != pt->switch_evsel)
1894 tid = perf_evsel__intval(evsel, sample, "next_pid");
1897 intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
1898 cpu, tid, sample->time, perf_time_to_tsc(sample->time,
1901 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
1905 return machine__set_current_tid(pt->machine, cpu, -1, tid);
1908 static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
1909 struct perf_sample *sample)
1911 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
1917 if (pt->have_sched_switch == 3) {
1920 if (event->header.type != PERF_RECORD_SWITCH_CPU_WIDE) {
1921 pr_err("Expecting CPU-wide context switch event\n");
1924 pid = event->context_switch.next_prev_pid;
1925 tid = event->context_switch.next_prev_tid;
1934 pr_err("context_switch event has no tid\n");
1938 intel_pt_log("context_switch: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
1939 cpu, pid, tid, sample->time, perf_time_to_tsc(sample->time,
1942 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
1946 return machine__set_current_tid(pt->machine, cpu, pid, tid);
1949 static int intel_pt_process_itrace_start(struct intel_pt *pt,
1950 union perf_event *event,
1951 struct perf_sample *sample)
1953 if (!pt->per_cpu_mmaps)
1956 intel_pt_log("itrace_start: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
1957 sample->cpu, event->itrace_start.pid,
1958 event->itrace_start.tid, sample->time,
1959 perf_time_to_tsc(sample->time, &pt->tc));
1961 return machine__set_current_tid(pt->machine, sample->cpu,
1962 event->itrace_start.pid,
1963 event->itrace_start.tid);
1966 static int intel_pt_process_event(struct perf_session *session,
1967 union perf_event *event,
1968 struct perf_sample *sample,
1969 struct perf_tool *tool)
1971 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
1979 if (!tool->ordered_events) {
1980 pr_err("Intel Processor Trace requires ordered events\n");
1984 if (sample->time && sample->time != (u64)-1)
1985 timestamp = perf_time_to_tsc(sample->time, &pt->tc);
1989 if (timestamp || pt->timeless_decoding) {
1990 err = intel_pt_update_queues(pt);
1995 if (pt->timeless_decoding) {
1996 if (event->header.type == PERF_RECORD_EXIT) {
1997 err = intel_pt_process_timeless_queues(pt,
2001 } else if (timestamp) {
2002 err = intel_pt_process_queues(pt, timestamp);
2007 if (event->header.type == PERF_RECORD_AUX &&
2008 (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) &&
2009 pt->synth_opts.errors) {
2010 err = intel_pt_lost(pt, sample);
2015 if (pt->switch_evsel && event->header.type == PERF_RECORD_SAMPLE)
2016 err = intel_pt_process_switch(pt, sample);
2017 else if (event->header.type == PERF_RECORD_ITRACE_START)
2018 err = intel_pt_process_itrace_start(pt, event, sample);
2019 else if (event->header.type == PERF_RECORD_SWITCH ||
2020 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
2021 err = intel_pt_context_switch(pt, event, sample);
2023 intel_pt_log("event %u: cpu %d time %"PRIu64" tsc %#"PRIx64" ",
2024 event->header.type, sample->cpu, sample->time, timestamp);
2025 intel_pt_log_event(event);
2030 static int intel_pt_flush(struct perf_session *session, struct perf_tool *tool)
2032 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2039 if (!tool->ordered_events)
2042 ret = intel_pt_update_queues(pt);
2046 if (pt->timeless_decoding)
2047 return intel_pt_process_timeless_queues(pt, -1,
2050 return intel_pt_process_queues(pt, MAX_TIMESTAMP);
2053 static void intel_pt_free_events(struct perf_session *session)
2055 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2057 struct auxtrace_queues *queues = &pt->queues;
2060 for (i = 0; i < queues->nr_queues; i++) {
2061 intel_pt_free_queue(queues->queue_array[i].priv);
2062 queues->queue_array[i].priv = NULL;
2064 intel_pt_log_disable();
2065 auxtrace_queues__free(queues);
2068 static void intel_pt_free(struct perf_session *session)
2070 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2073 auxtrace_heap__free(&pt->heap);
2074 intel_pt_free_events(session);
2075 session->auxtrace = NULL;
2076 thread__put(pt->unknown_thread);
2077 addr_filters__exit(&pt->filts);
2082 static int intel_pt_process_auxtrace_event(struct perf_session *session,
2083 union perf_event *event,
2084 struct perf_tool *tool __maybe_unused)
2086 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2089 if (!pt->data_queued) {
2090 struct auxtrace_buffer *buffer;
2092 int fd = perf_data__fd(session->data);
2095 if (perf_data__is_pipe(session->data)) {
2098 data_offset = lseek(fd, 0, SEEK_CUR);
2099 if (data_offset == -1)
2103 err = auxtrace_queues__add_event(&pt->queues, session, event,
2104 data_offset, &buffer);
2108 /* Dump here now we have copied a piped trace out of the pipe */
2110 if (auxtrace_buffer__get_data(buffer, fd)) {
2111 intel_pt_dump_event(pt, buffer->data,
2113 auxtrace_buffer__put_data(buffer);
2121 struct intel_pt_synth {
2122 struct perf_tool dummy_tool;
2123 struct perf_session *session;
2126 static int intel_pt_event_synth(struct perf_tool *tool,
2127 union perf_event *event,
2128 struct perf_sample *sample __maybe_unused,
2129 struct machine *machine __maybe_unused)
2131 struct intel_pt_synth *intel_pt_synth =
2132 container_of(tool, struct intel_pt_synth, dummy_tool);
2134 return perf_session__deliver_synth_event(intel_pt_synth->session, event,
2138 static int intel_pt_synth_event(struct perf_session *session, const char *name,
2139 struct perf_event_attr *attr, u64 id)
2141 struct intel_pt_synth intel_pt_synth;
2144 pr_debug("Synthesizing '%s' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
2145 name, id, (u64)attr->sample_type);
2147 memset(&intel_pt_synth, 0, sizeof(struct intel_pt_synth));
2148 intel_pt_synth.session = session;
2150 err = perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1,
2151 &id, intel_pt_event_synth);
2153 pr_err("%s: failed to synthesize '%s' event type\n",
2159 static void intel_pt_set_event_name(struct perf_evlist *evlist, u64 id,
2162 struct perf_evsel *evsel;
2164 evlist__for_each_entry(evlist, evsel) {
2165 if (evsel->id && evsel->id[0] == id) {
2167 zfree(&evsel->name);
2168 evsel->name = strdup(name);
2174 static struct perf_evsel *intel_pt_evsel(struct intel_pt *pt,
2175 struct perf_evlist *evlist)
2177 struct perf_evsel *evsel;
2179 evlist__for_each_entry(evlist, evsel) {
2180 if (evsel->attr.type == pt->pmu_type && evsel->ids)
2187 static int intel_pt_synth_events(struct intel_pt *pt,
2188 struct perf_session *session)
2190 struct perf_evlist *evlist = session->evlist;
2191 struct perf_evsel *evsel = intel_pt_evsel(pt, evlist);
2192 struct perf_event_attr attr;
2197 pr_debug("There are no selected events with Intel Processor Trace data\n");
2201 memset(&attr, 0, sizeof(struct perf_event_attr));
2202 attr.size = sizeof(struct perf_event_attr);
2203 attr.type = PERF_TYPE_HARDWARE;
2204 attr.sample_type = evsel->attr.sample_type & PERF_SAMPLE_MASK;
2205 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
2207 if (pt->timeless_decoding)
2208 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
2210 attr.sample_type |= PERF_SAMPLE_TIME;
2211 if (!pt->per_cpu_mmaps)
2212 attr.sample_type &= ~(u64)PERF_SAMPLE_CPU;
2213 attr.exclude_user = evsel->attr.exclude_user;
2214 attr.exclude_kernel = evsel->attr.exclude_kernel;
2215 attr.exclude_hv = evsel->attr.exclude_hv;
2216 attr.exclude_host = evsel->attr.exclude_host;
2217 attr.exclude_guest = evsel->attr.exclude_guest;
2218 attr.sample_id_all = evsel->attr.sample_id_all;
2219 attr.read_format = evsel->attr.read_format;
2221 id = evsel->id[0] + 1000000000;
2225 if (pt->synth_opts.branches) {
2226 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
2227 attr.sample_period = 1;
2228 attr.sample_type |= PERF_SAMPLE_ADDR;
2229 err = intel_pt_synth_event(session, "branches", &attr, id);
2232 pt->sample_branches = true;
2233 pt->branches_sample_type = attr.sample_type;
2234 pt->branches_id = id;
2236 attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
2239 if (pt->synth_opts.callchain)
2240 attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
2241 if (pt->synth_opts.last_branch)
2242 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
2244 if (pt->synth_opts.instructions) {
2245 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
2246 if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS)
2247 attr.sample_period =
2248 intel_pt_ns_to_ticks(pt, pt->synth_opts.period);
2250 attr.sample_period = pt->synth_opts.period;
2251 err = intel_pt_synth_event(session, "instructions", &attr, id);
2254 pt->sample_instructions = true;
2255 pt->instructions_sample_type = attr.sample_type;
2256 pt->instructions_id = id;
2260 attr.sample_type &= ~(u64)PERF_SAMPLE_PERIOD;
2261 attr.sample_period = 1;
2263 if (pt->synth_opts.transactions) {
2264 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
2265 err = intel_pt_synth_event(session, "transactions", &attr, id);
2268 pt->sample_transactions = true;
2269 pt->transactions_sample_type = attr.sample_type;
2270 pt->transactions_id = id;
2271 intel_pt_set_event_name(evlist, id, "transactions");
2275 attr.type = PERF_TYPE_SYNTH;
2276 attr.sample_type |= PERF_SAMPLE_RAW;
2278 if (pt->synth_opts.ptwrites) {
2279 attr.config = PERF_SYNTH_INTEL_PTWRITE;
2280 err = intel_pt_synth_event(session, "ptwrite", &attr, id);
2283 pt->sample_ptwrites = true;
2284 pt->ptwrites_sample_type = attr.sample_type;
2285 pt->ptwrites_id = id;
2286 intel_pt_set_event_name(evlist, id, "ptwrite");
2290 if (pt->synth_opts.pwr_events) {
2291 pt->sample_pwr_events = true;
2292 pt->pwr_events_sample_type = attr.sample_type;
2294 attr.config = PERF_SYNTH_INTEL_CBR;
2295 err = intel_pt_synth_event(session, "cbr", &attr, id);
2299 intel_pt_set_event_name(evlist, id, "cbr");
2303 if (pt->synth_opts.pwr_events && (evsel->attr.config & 0x10)) {
2304 attr.config = PERF_SYNTH_INTEL_MWAIT;
2305 err = intel_pt_synth_event(session, "mwait", &attr, id);
2309 intel_pt_set_event_name(evlist, id, "mwait");
2312 attr.config = PERF_SYNTH_INTEL_PWRE;
2313 err = intel_pt_synth_event(session, "pwre", &attr, id);
2317 intel_pt_set_event_name(evlist, id, "pwre");
2320 attr.config = PERF_SYNTH_INTEL_EXSTOP;
2321 err = intel_pt_synth_event(session, "exstop", &attr, id);
2325 intel_pt_set_event_name(evlist, id, "exstop");
2328 attr.config = PERF_SYNTH_INTEL_PWRX;
2329 err = intel_pt_synth_event(session, "pwrx", &attr, id);
2333 intel_pt_set_event_name(evlist, id, "pwrx");
2340 static struct perf_evsel *intel_pt_find_sched_switch(struct perf_evlist *evlist)
2342 struct perf_evsel *evsel;
2344 evlist__for_each_entry_reverse(evlist, evsel) {
2345 const char *name = perf_evsel__name(evsel);
2347 if (!strcmp(name, "sched:sched_switch"))
2354 static bool intel_pt_find_switch(struct perf_evlist *evlist)
2356 struct perf_evsel *evsel;
2358 evlist__for_each_entry(evlist, evsel) {
2359 if (evsel->attr.context_switch)
2366 static int intel_pt_perf_config(const char *var, const char *value, void *data)
2368 struct intel_pt *pt = data;
2370 if (!strcmp(var, "intel-pt.mispred-all"))
2371 pt->mispred_all = perf_config_bool(var, value);
2376 static const char * const intel_pt_info_fmts[] = {
2377 [INTEL_PT_PMU_TYPE] = " PMU Type %"PRId64"\n",
2378 [INTEL_PT_TIME_SHIFT] = " Time Shift %"PRIu64"\n",
2379 [INTEL_PT_TIME_MULT] = " Time Muliplier %"PRIu64"\n",
2380 [INTEL_PT_TIME_ZERO] = " Time Zero %"PRIu64"\n",
2381 [INTEL_PT_CAP_USER_TIME_ZERO] = " Cap Time Zero %"PRId64"\n",
2382 [INTEL_PT_TSC_BIT] = " TSC bit %#"PRIx64"\n",
2383 [INTEL_PT_NORETCOMP_BIT] = " NoRETComp bit %#"PRIx64"\n",
2384 [INTEL_PT_HAVE_SCHED_SWITCH] = " Have sched_switch %"PRId64"\n",
2385 [INTEL_PT_SNAPSHOT_MODE] = " Snapshot mode %"PRId64"\n",
2386 [INTEL_PT_PER_CPU_MMAPS] = " Per-cpu maps %"PRId64"\n",
2387 [INTEL_PT_MTC_BIT] = " MTC bit %#"PRIx64"\n",
2388 [INTEL_PT_TSC_CTC_N] = " TSC:CTC numerator %"PRIu64"\n",
2389 [INTEL_PT_TSC_CTC_D] = " TSC:CTC denominator %"PRIu64"\n",
2390 [INTEL_PT_CYC_BIT] = " CYC bit %#"PRIx64"\n",
2391 [INTEL_PT_MAX_NONTURBO_RATIO] = " Max non-turbo ratio %"PRIu64"\n",
2392 [INTEL_PT_FILTER_STR_LEN] = " Filter string len. %"PRIu64"\n",
2395 static void intel_pt_print_info(u64 *arr, int start, int finish)
2402 for (i = start; i <= finish; i++)
2403 fprintf(stdout, intel_pt_info_fmts[i], arr[i]);
2406 static void intel_pt_print_info_str(const char *name, const char *str)
2411 fprintf(stdout, " %-20s%s\n", name, str ? str : "");
2414 static bool intel_pt_has(struct auxtrace_info_event *auxtrace_info, int pos)
2416 return auxtrace_info->header.size >=
2417 sizeof(struct auxtrace_info_event) + (sizeof(u64) * (pos + 1));
2420 int intel_pt_process_auxtrace_info(union perf_event *event,
2421 struct perf_session *session)
2423 struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info;
2424 size_t min_sz = sizeof(u64) * INTEL_PT_PER_CPU_MMAPS;
2425 struct intel_pt *pt;
2430 if (auxtrace_info->header.size < sizeof(struct auxtrace_info_event) +
2434 pt = zalloc(sizeof(struct intel_pt));
2438 addr_filters__init(&pt->filts);
2440 err = perf_config(intel_pt_perf_config, pt);
2444 err = auxtrace_queues__init(&pt->queues);
2448 intel_pt_log_set_name(INTEL_PT_PMU_NAME);
2450 pt->session = session;
2451 pt->machine = &session->machines.host; /* No kvm support */
2452 pt->auxtrace_type = auxtrace_info->type;
2453 pt->pmu_type = auxtrace_info->priv[INTEL_PT_PMU_TYPE];
2454 pt->tc.time_shift = auxtrace_info->priv[INTEL_PT_TIME_SHIFT];
2455 pt->tc.time_mult = auxtrace_info->priv[INTEL_PT_TIME_MULT];
2456 pt->tc.time_zero = auxtrace_info->priv[INTEL_PT_TIME_ZERO];
2457 pt->cap_user_time_zero = auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO];
2458 pt->tsc_bit = auxtrace_info->priv[INTEL_PT_TSC_BIT];
2459 pt->noretcomp_bit = auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT];
2460 pt->have_sched_switch = auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH];
2461 pt->snapshot_mode = auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE];
2462 pt->per_cpu_mmaps = auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS];
2463 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_PMU_TYPE,
2464 INTEL_PT_PER_CPU_MMAPS);
2466 if (intel_pt_has(auxtrace_info, INTEL_PT_CYC_BIT)) {
2467 pt->mtc_bit = auxtrace_info->priv[INTEL_PT_MTC_BIT];
2468 pt->mtc_freq_bits = auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS];
2469 pt->tsc_ctc_ratio_n = auxtrace_info->priv[INTEL_PT_TSC_CTC_N];
2470 pt->tsc_ctc_ratio_d = auxtrace_info->priv[INTEL_PT_TSC_CTC_D];
2471 pt->cyc_bit = auxtrace_info->priv[INTEL_PT_CYC_BIT];
2472 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_MTC_BIT,
2476 if (intel_pt_has(auxtrace_info, INTEL_PT_MAX_NONTURBO_RATIO)) {
2477 pt->max_non_turbo_ratio =
2478 auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO];
2479 intel_pt_print_info(&auxtrace_info->priv[0],
2480 INTEL_PT_MAX_NONTURBO_RATIO,
2481 INTEL_PT_MAX_NONTURBO_RATIO);
2484 info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1;
2485 info_end = (void *)info + auxtrace_info->header.size;
2487 if (intel_pt_has(auxtrace_info, INTEL_PT_FILTER_STR_LEN)) {
2490 len = auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN];
2491 intel_pt_print_info(&auxtrace_info->priv[0],
2492 INTEL_PT_FILTER_STR_LEN,
2493 INTEL_PT_FILTER_STR_LEN);
2495 const char *filter = (const char *)info;
2497 len = roundup(len + 1, 8);
2499 if ((void *)info > info_end) {
2500 pr_err("%s: bad filter string length\n", __func__);
2502 goto err_free_queues;
2504 pt->filter = memdup(filter, len);
2507 goto err_free_queues;
2509 if (session->header.needs_swap)
2510 mem_bswap_64(pt->filter, len);
2511 if (pt->filter[len - 1]) {
2512 pr_err("%s: filter string not null terminated\n", __func__);
2514 goto err_free_queues;
2516 err = addr_filters__parse_bare_filter(&pt->filts,
2519 goto err_free_queues;
2521 intel_pt_print_info_str("Filter string", pt->filter);
2524 pt->timeless_decoding = intel_pt_timeless_decoding(pt);
2525 pt->have_tsc = intel_pt_have_tsc(pt);
2526 pt->sampling_mode = false;
2527 pt->est_tsc = !pt->timeless_decoding;
2529 pt->unknown_thread = thread__new(999999999, 999999999);
2530 if (!pt->unknown_thread) {
2532 goto err_free_queues;
2536 * Since this thread will not be kept in any rbtree not in a
2537 * list, initialize its list node so that at thread__put() the
2538 * current thread lifetime assuption is kept and we don't segfault
2539 * at list_del_init().
2541 INIT_LIST_HEAD(&pt->unknown_thread->node);
2543 err = thread__set_comm(pt->unknown_thread, "unknown", 0);
2545 goto err_delete_thread;
2546 if (thread__init_map_groups(pt->unknown_thread, pt->machine)) {
2548 goto err_delete_thread;
2551 pt->auxtrace.process_event = intel_pt_process_event;
2552 pt->auxtrace.process_auxtrace_event = intel_pt_process_auxtrace_event;
2553 pt->auxtrace.flush_events = intel_pt_flush;
2554 pt->auxtrace.free_events = intel_pt_free_events;
2555 pt->auxtrace.free = intel_pt_free;
2556 session->auxtrace = &pt->auxtrace;
2561 if (pt->have_sched_switch == 1) {
2562 pt->switch_evsel = intel_pt_find_sched_switch(session->evlist);
2563 if (!pt->switch_evsel) {
2564 pr_err("%s: missing sched_switch event\n", __func__);
2566 goto err_delete_thread;
2568 } else if (pt->have_sched_switch == 2 &&
2569 !intel_pt_find_switch(session->evlist)) {
2570 pr_err("%s: missing context_switch attribute flag\n", __func__);
2572 goto err_delete_thread;
2575 if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
2576 pt->synth_opts = *session->itrace_synth_opts;
2578 itrace_synth_opts__set_default(&pt->synth_opts,
2579 session->itrace_synth_opts->default_no_sample);
2580 if (use_browser != -1) {
2581 pt->synth_opts.branches = false;
2582 pt->synth_opts.callchain = true;
2584 if (session->itrace_synth_opts)
2585 pt->synth_opts.thread_stack =
2586 session->itrace_synth_opts->thread_stack;
2589 if (pt->synth_opts.log)
2590 intel_pt_log_enable();
2592 /* Maximum non-turbo ratio is TSC freq / 100 MHz */
2593 if (pt->tc.time_mult) {
2594 u64 tsc_freq = intel_pt_ns_to_ticks(pt, 1000000000);
2596 if (!pt->max_non_turbo_ratio)
2597 pt->max_non_turbo_ratio =
2598 (tsc_freq + 50000000) / 100000000;
2599 intel_pt_log("TSC frequency %"PRIu64"\n", tsc_freq);
2600 intel_pt_log("Maximum non-turbo ratio %u\n",
2601 pt->max_non_turbo_ratio);
2602 pt->cbr2khz = tsc_freq / pt->max_non_turbo_ratio / 1000;
2605 if (pt->synth_opts.calls)
2606 pt->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
2607 PERF_IP_FLAG_TRACE_END;
2608 if (pt->synth_opts.returns)
2609 pt->branches_filter |= PERF_IP_FLAG_RETURN |
2610 PERF_IP_FLAG_TRACE_BEGIN;
2612 if (pt->synth_opts.callchain && !symbol_conf.use_callchain) {
2613 symbol_conf.use_callchain = true;
2614 if (callchain_register_param(&callchain_param) < 0) {
2615 symbol_conf.use_callchain = false;
2616 pt->synth_opts.callchain = false;
2620 err = intel_pt_synth_events(pt, session);
2622 goto err_delete_thread;
2624 err = auxtrace_queues__process_index(&pt->queues, session);
2626 goto err_delete_thread;
2628 if (pt->queues.populated)
2629 pt->data_queued = true;
2631 if (pt->timeless_decoding)
2632 pr_debug2("Intel PT decoding without timestamps\n");
2637 thread__zput(pt->unknown_thread);
2639 intel_pt_log_disable();
2640 auxtrace_queues__free(&pt->queues);
2641 session->auxtrace = NULL;
2643 addr_filters__exit(&pt->filts);