1 // SPDX-License-Identifier: GPL-2.0-only
3 * intel_pt.c: Intel Processor Trace support
4 * Copyright (c) 2013-2015, Intel Corporation.
9 #include <linux/kernel.h>
10 #include <linux/types.h>
11 #include <linux/bitops.h>
12 #include <linux/log2.h>
13 #include <linux/zalloc.h>
16 #include "../../../util/session.h"
17 #include "../../../util/event.h"
18 #include "../../../util/evlist.h"
19 #include "../../../util/evsel.h"
20 #include "../../../util/evsel_config.h"
21 #include "../../../util/cpumap.h"
22 #include "../../../util/mmap.h"
23 #include <subcmd/parse-options.h>
24 #include "../../../util/parse-events.h"
25 #include "../../../util/pmu.h"
26 #include "../../../util/debug.h"
27 #include "../../../util/auxtrace.h"
28 #include "../../../util/perf_api_probe.h"
29 #include "../../../util/record.h"
30 #include "../../../util/target.h"
31 #include "../../../util/tsc.h"
32 #include <internal/lib.h> // page_size
33 #include "../../../util/intel-pt.h"
35 #define KiB(x) ((x) * 1024)
36 #define MiB(x) ((x) * 1024 * 1024)
37 #define KiB_MASK(x) (KiB(x) - 1)
38 #define MiB_MASK(x) (MiB(x) - 1)
40 #define INTEL_PT_PSB_PERIOD_NEAR 256
42 struct intel_pt_snapshot_ref {
48 struct intel_pt_recording {
49 struct auxtrace_record itr;
50 struct perf_pmu *intel_pt_pmu;
51 int have_sched_switch;
52 struct evlist *evlist;
54 bool snapshot_init_done;
56 size_t snapshot_ref_buf_size;
58 struct intel_pt_snapshot_ref *snapshot_refs;
62 static int intel_pt_parse_terms_with_default(const char *pmu_name,
63 struct list_head *formats,
67 struct list_head *terms;
68 struct perf_event_attr attr = { .size = 0, };
71 terms = malloc(sizeof(struct list_head));
75 INIT_LIST_HEAD(terms);
77 err = parse_events_terms(terms, str);
81 attr.config = *config;
82 err = perf_pmu__config_terms(pmu_name, formats, &attr, terms, true,
87 *config = attr.config;
89 parse_events_terms__delete(terms);
93 static int intel_pt_parse_terms(const char *pmu_name, struct list_head *formats,
94 const char *str, u64 *config)
97 return intel_pt_parse_terms_with_default(pmu_name, formats, str,
101 static u64 intel_pt_masked_bits(u64 mask, u64 bits)
103 const u64 top_bit = 1ULL << 63;
107 for (i = 0; i < 64; i++) {
108 if (mask & top_bit) {
120 static int intel_pt_read_config(struct perf_pmu *intel_pt_pmu, const char *str,
121 struct evlist *evlist, u64 *res)
128 mask = perf_pmu__format_bits(&intel_pt_pmu->format, str);
132 evlist__for_each_entry(evlist, evsel) {
133 if (evsel->core.attr.type == intel_pt_pmu->type) {
134 *res = intel_pt_masked_bits(mask, evsel->core.attr.config);
142 static size_t intel_pt_psb_period(struct perf_pmu *intel_pt_pmu,
143 struct evlist *evlist)
146 int err, topa_multiple_entries;
149 if (perf_pmu__scan_file(intel_pt_pmu, "caps/topa_multiple_entries",
150 "%d", &topa_multiple_entries) != 1)
151 topa_multiple_entries = 0;
154 * Use caps/topa_multiple_entries to indicate early hardware that had
155 * extra frequent PSBs.
157 if (!topa_multiple_entries) {
162 err = intel_pt_read_config(intel_pt_pmu, "psb_period", evlist, &val);
166 psb_period = 1 << (val + 11);
168 pr_debug2("%s psb_period %zu\n", intel_pt_pmu->name, psb_period);
172 static int intel_pt_pick_bit(int bits, int target)
176 for (pos = 0; bits; bits >>= 1, pos++) {
178 if (pos <= target || pick < 0)
188 static u64 intel_pt_default_config(struct perf_pmu *intel_pt_pmu)
191 int mtc, mtc_periods = 0, mtc_period;
192 int psb_cyc, psb_periods, psb_period;
197 pos += scnprintf(buf + pos, sizeof(buf) - pos, "tsc");
199 if (perf_pmu__scan_file(intel_pt_pmu, "caps/mtc", "%d",
204 if (perf_pmu__scan_file(intel_pt_pmu, "caps/mtc_periods", "%x",
208 mtc_period = intel_pt_pick_bit(mtc_periods, 3);
209 pos += scnprintf(buf + pos, sizeof(buf) - pos,
210 ",mtc,mtc_period=%d", mtc_period);
214 if (perf_pmu__scan_file(intel_pt_pmu, "caps/psb_cyc", "%d",
218 if (psb_cyc && mtc_periods) {
219 if (perf_pmu__scan_file(intel_pt_pmu, "caps/psb_periods", "%x",
223 psb_period = intel_pt_pick_bit(psb_periods, 3);
224 pos += scnprintf(buf + pos, sizeof(buf) - pos,
225 ",psb_period=%d", psb_period);
229 if (perf_pmu__scan_file(intel_pt_pmu, "format/pt", "%c", &c) == 1 &&
230 perf_pmu__scan_file(intel_pt_pmu, "format/branch", "%c", &c) == 1)
231 pos += scnprintf(buf + pos, sizeof(buf) - pos, ",pt,branch");
233 pr_debug2("%s default config: %s\n", intel_pt_pmu->name, buf);
235 intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format, buf,
241 static int intel_pt_parse_snapshot_options(struct auxtrace_record *itr,
242 struct record_opts *opts,
245 struct intel_pt_recording *ptr =
246 container_of(itr, struct intel_pt_recording, itr);
247 unsigned long long snapshot_size = 0;
251 snapshot_size = strtoull(str, &endptr, 0);
252 if (*endptr || snapshot_size > SIZE_MAX)
256 opts->auxtrace_snapshot_mode = true;
257 opts->auxtrace_snapshot_size = snapshot_size;
259 ptr->snapshot_size = snapshot_size;
264 struct perf_event_attr *
265 intel_pt_pmu_default_config(struct perf_pmu *intel_pt_pmu)
267 struct perf_event_attr *attr;
269 attr = zalloc(sizeof(struct perf_event_attr));
273 attr->config = intel_pt_default_config(intel_pt_pmu);
275 intel_pt_pmu->selectable = true;
280 static const char *intel_pt_find_filter(struct evlist *evlist,
281 struct perf_pmu *intel_pt_pmu)
285 evlist__for_each_entry(evlist, evsel) {
286 if (evsel->core.attr.type == intel_pt_pmu->type)
287 return evsel->filter;
293 static size_t intel_pt_filter_bytes(const char *filter)
295 size_t len = filter ? strlen(filter) : 0;
297 return len ? roundup(len + 1, 8) : 0;
301 intel_pt_info_priv_size(struct auxtrace_record *itr, struct evlist *evlist)
303 struct intel_pt_recording *ptr =
304 container_of(itr, struct intel_pt_recording, itr);
305 const char *filter = intel_pt_find_filter(evlist, ptr->intel_pt_pmu);
307 ptr->priv_size = (INTEL_PT_AUXTRACE_PRIV_MAX * sizeof(u64)) +
308 intel_pt_filter_bytes(filter);
309 ptr->priv_size += sizeof(u64); /* Cap Event Trace */
311 return ptr->priv_size;
314 static void intel_pt_tsc_ctc_ratio(u32 *n, u32 *d)
316 unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
318 __get_cpuid(0x15, &eax, &ebx, &ecx, &edx);
323 static int intel_pt_info_fill(struct auxtrace_record *itr,
324 struct perf_session *session,
325 struct perf_record_auxtrace_info *auxtrace_info,
328 struct intel_pt_recording *ptr =
329 container_of(itr, struct intel_pt_recording, itr);
330 struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
331 struct perf_event_mmap_page *pc;
332 struct perf_tsc_conversion tc = { .time_mult = 0, };
333 bool cap_user_time_zero = false, per_cpu_mmaps;
334 u64 tsc_bit, mtc_bit, mtc_freq_bits, cyc_bit, noretcomp_bit;
335 u32 tsc_ctc_ratio_n, tsc_ctc_ratio_d;
336 unsigned long max_non_turbo_ratio;
337 size_t filter_str_len;
343 if (priv_size != ptr->priv_size)
346 intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format,
348 intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format,
349 "noretcomp", &noretcomp_bit);
350 intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format,
352 mtc_freq_bits = perf_pmu__format_bits(&intel_pt_pmu->format,
354 intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format,
357 intel_pt_tsc_ctc_ratio(&tsc_ctc_ratio_n, &tsc_ctc_ratio_d);
359 if (perf_pmu__scan_file(intel_pt_pmu, "max_nonturbo_ratio",
360 "%lu", &max_non_turbo_ratio) != 1)
361 max_non_turbo_ratio = 0;
362 if (perf_pmu__scan_file(intel_pt_pmu, "caps/event_trace",
363 "%d", &event_trace) != 1)
366 filter = intel_pt_find_filter(session->evlist, ptr->intel_pt_pmu);
367 filter_str_len = filter ? strlen(filter) : 0;
369 if (!session->evlist->core.nr_mmaps)
372 pc = session->evlist->mmap[0].core.base;
374 err = perf_read_tsc_conversion(pc, &tc);
376 if (err != -EOPNOTSUPP)
379 cap_user_time_zero = tc.time_mult != 0;
381 if (!cap_user_time_zero)
382 ui__warning("Intel Processor Trace: TSC not available\n");
385 per_cpu_mmaps = !perf_cpu_map__empty(session->evlist->core.cpus);
387 auxtrace_info->type = PERF_AUXTRACE_INTEL_PT;
388 auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type;
389 auxtrace_info->priv[INTEL_PT_TIME_SHIFT] = tc.time_shift;
390 auxtrace_info->priv[INTEL_PT_TIME_MULT] = tc.time_mult;
391 auxtrace_info->priv[INTEL_PT_TIME_ZERO] = tc.time_zero;
392 auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO] = cap_user_time_zero;
393 auxtrace_info->priv[INTEL_PT_TSC_BIT] = tsc_bit;
394 auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT] = noretcomp_bit;
395 auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH] = ptr->have_sched_switch;
396 auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE] = ptr->snapshot_mode;
397 auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS] = per_cpu_mmaps;
398 auxtrace_info->priv[INTEL_PT_MTC_BIT] = mtc_bit;
399 auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS] = mtc_freq_bits;
400 auxtrace_info->priv[INTEL_PT_TSC_CTC_N] = tsc_ctc_ratio_n;
401 auxtrace_info->priv[INTEL_PT_TSC_CTC_D] = tsc_ctc_ratio_d;
402 auxtrace_info->priv[INTEL_PT_CYC_BIT] = cyc_bit;
403 auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO] = max_non_turbo_ratio;
404 auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] = filter_str_len;
406 info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1;
408 if (filter_str_len) {
409 size_t len = intel_pt_filter_bytes(filter);
411 strncpy((char *)info, filter, len);
415 *info++ = event_trace;
420 static int intel_pt_track_switches(struct evlist *evlist)
422 const char *sched_switch = "sched:sched_switch";
426 if (!evlist__can_select_event(evlist, sched_switch))
429 err = parse_events(evlist, sched_switch, NULL);
431 pr_debug2("%s: failed to parse %s, error %d\n",
432 __func__, sched_switch, err);
436 evsel = evlist__last(evlist);
438 evsel__set_sample_bit(evsel, CPU);
439 evsel__set_sample_bit(evsel, TIME);
441 evsel->core.system_wide = true;
442 evsel->no_aux_samples = true;
443 evsel->immediate = true;
448 static void intel_pt_valid_str(char *str, size_t len, u64 valid)
450 unsigned int val, last = 0, state = 1;
455 for (val = 0; val <= 64; val++, valid >>= 1) {
460 p += scnprintf(str + p, len - p, ",");
463 p += scnprintf(str + p, len - p, "%u", val);
478 p += scnprintf(str + p, len - p, ",%u", last);
482 p += scnprintf(str + p, len - p, "-%u", last);
494 static int intel_pt_val_config_term(struct perf_pmu *intel_pt_pmu,
495 const char *caps, const char *name,
496 const char *supported, u64 config)
500 unsigned long long valid;
504 if (perf_pmu__scan_file(intel_pt_pmu, caps, "%llx", &valid) != 1)
508 perf_pmu__scan_file(intel_pt_pmu, supported, "%d", &ok) == 1 && !ok)
513 bits = perf_pmu__format_bits(&intel_pt_pmu->format, name);
517 for (shift = 0; bits && !(bits & 1); shift++)
525 if (valid & (1 << config))
528 intel_pt_valid_str(valid_str, sizeof(valid_str), valid);
529 pr_err("Invalid %s for %s. Valid values are: %s\n",
530 name, INTEL_PT_PMU_NAME, valid_str);
534 static int intel_pt_validate_config(struct perf_pmu *intel_pt_pmu,
544 * If supported, force pass-through config term (pt=1) even if user
545 * sets pt=0, which avoids senseless kernel errors.
547 if (perf_pmu__scan_file(intel_pt_pmu, "format/pt", "%c", &c) == 1 &&
548 !(evsel->core.attr.config & 1)) {
549 pr_warning("pt=0 doesn't make sense, forcing pt=1\n");
550 evsel->core.attr.config |= 1;
553 err = intel_pt_val_config_term(intel_pt_pmu, "caps/cycle_thresholds",
554 "cyc_thresh", "caps/psb_cyc",
555 evsel->core.attr.config);
559 err = intel_pt_val_config_term(intel_pt_pmu, "caps/mtc_periods",
560 "mtc_period", "caps/mtc",
561 evsel->core.attr.config);
565 return intel_pt_val_config_term(intel_pt_pmu, "caps/psb_periods",
566 "psb_period", "caps/psb_cyc",
567 evsel->core.attr.config);
570 static void intel_pt_config_sample_mode(struct perf_pmu *intel_pt_pmu,
573 u64 user_bits = 0, bits;
574 struct evsel_config_term *term = evsel__get_config_term(evsel, CFG_CHG);
577 user_bits = term->val.cfg_chg;
579 bits = perf_pmu__format_bits(&intel_pt_pmu->format, "psb_period");
581 /* Did user change psb_period */
582 if (bits & user_bits)
585 /* Set psb_period to 0 */
586 evsel->core.attr.config &= ~bits;
589 static void intel_pt_min_max_sample_sz(struct evlist *evlist,
590 size_t *min_sz, size_t *max_sz)
594 evlist__for_each_entry(evlist, evsel) {
595 size_t sz = evsel->core.attr.aux_sample_size;
599 if (min_sz && (sz < *min_sz || !*min_sz))
601 if (max_sz && sz > *max_sz)
607 * Currently, there is not enough information to disambiguate different PEBS
608 * events, so only allow one.
610 static bool intel_pt_too_many_aux_output(struct evlist *evlist)
613 int aux_output_cnt = 0;
615 evlist__for_each_entry(evlist, evsel)
616 aux_output_cnt += !!evsel->core.attr.aux_output;
618 if (aux_output_cnt > 1) {
619 pr_err(INTEL_PT_PMU_NAME " supports at most one event with aux-output\n");
626 static int intel_pt_recording_options(struct auxtrace_record *itr,
627 struct evlist *evlist,
628 struct record_opts *opts)
630 struct intel_pt_recording *ptr =
631 container_of(itr, struct intel_pt_recording, itr);
632 struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
633 bool have_timing_info, need_immediate = false;
634 struct evsel *evsel, *intel_pt_evsel = NULL;
635 const struct perf_cpu_map *cpus = evlist->core.cpus;
636 bool privileged = perf_event_paranoid_check(-1);
640 ptr->evlist = evlist;
641 ptr->snapshot_mode = opts->auxtrace_snapshot_mode;
643 evlist__for_each_entry(evlist, evsel) {
644 if (evsel->core.attr.type == intel_pt_pmu->type) {
645 if (intel_pt_evsel) {
646 pr_err("There may be only one " INTEL_PT_PMU_NAME " event\n");
649 evsel->core.attr.freq = 0;
650 evsel->core.attr.sample_period = 1;
651 evsel->no_aux_samples = true;
652 intel_pt_evsel = evsel;
653 opts->full_auxtrace = true;
657 if (opts->auxtrace_snapshot_mode && !opts->full_auxtrace) {
658 pr_err("Snapshot mode (-S option) requires " INTEL_PT_PMU_NAME " PMU event (-e " INTEL_PT_PMU_NAME ")\n");
662 if (opts->auxtrace_snapshot_mode && opts->auxtrace_sample_mode) {
663 pr_err("Snapshot mode (" INTEL_PT_PMU_NAME " PMU) and sample trace cannot be used together\n");
667 if (opts->use_clockid) {
668 pr_err("Cannot use clockid (-k option) with " INTEL_PT_PMU_NAME "\n");
672 if (intel_pt_too_many_aux_output(evlist))
675 if (!opts->full_auxtrace)
678 if (opts->auxtrace_sample_mode)
679 intel_pt_config_sample_mode(intel_pt_pmu, intel_pt_evsel);
681 err = intel_pt_validate_config(intel_pt_pmu, intel_pt_evsel);
685 /* Set default sizes for snapshot mode */
686 if (opts->auxtrace_snapshot_mode) {
687 size_t psb_period = intel_pt_psb_period(intel_pt_pmu, evlist);
689 if (!opts->auxtrace_snapshot_size && !opts->auxtrace_mmap_pages) {
691 opts->auxtrace_mmap_pages = MiB(4) / page_size;
693 opts->auxtrace_mmap_pages = KiB(128) / page_size;
694 if (opts->mmap_pages == UINT_MAX)
695 opts->mmap_pages = KiB(256) / page_size;
697 } else if (!opts->auxtrace_mmap_pages && !privileged &&
698 opts->mmap_pages == UINT_MAX) {
699 opts->mmap_pages = KiB(256) / page_size;
701 if (!opts->auxtrace_snapshot_size)
702 opts->auxtrace_snapshot_size =
703 opts->auxtrace_mmap_pages * (size_t)page_size;
704 if (!opts->auxtrace_mmap_pages) {
705 size_t sz = opts->auxtrace_snapshot_size;
707 sz = round_up(sz, page_size) / page_size;
708 opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
710 if (opts->auxtrace_snapshot_size >
711 opts->auxtrace_mmap_pages * (size_t)page_size) {
712 pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
713 opts->auxtrace_snapshot_size,
714 opts->auxtrace_mmap_pages * (size_t)page_size);
717 if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages) {
718 pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
721 pr_debug2("Intel PT snapshot size: %zu\n",
722 opts->auxtrace_snapshot_size);
724 opts->auxtrace_snapshot_size <= psb_period +
725 INTEL_PT_PSB_PERIOD_NEAR)
726 ui__warning("Intel PT snapshot size (%zu) may be too small for PSB period (%zu)\n",
727 opts->auxtrace_snapshot_size, psb_period);
730 /* Set default sizes for sample mode */
731 if (opts->auxtrace_sample_mode) {
732 size_t psb_period = intel_pt_psb_period(intel_pt_pmu, evlist);
733 size_t min_sz = 0, max_sz = 0;
735 intel_pt_min_max_sample_sz(evlist, &min_sz, &max_sz);
736 if (!opts->auxtrace_mmap_pages && !privileged &&
737 opts->mmap_pages == UINT_MAX)
738 opts->mmap_pages = KiB(256) / page_size;
739 if (!opts->auxtrace_mmap_pages) {
740 size_t sz = round_up(max_sz, page_size) / page_size;
742 opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
744 if (max_sz > opts->auxtrace_mmap_pages * (size_t)page_size) {
745 pr_err("Sample size %zu must not be greater than AUX area tracing mmap size %zu\n",
747 opts->auxtrace_mmap_pages * (size_t)page_size);
750 pr_debug2("Intel PT min. sample size: %zu max. sample size: %zu\n",
753 min_sz <= psb_period + INTEL_PT_PSB_PERIOD_NEAR)
754 ui__warning("Intel PT sample size (%zu) may be too small for PSB period (%zu)\n",
758 /* Set default sizes for full trace mode */
759 if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
761 opts->auxtrace_mmap_pages = MiB(4) / page_size;
763 opts->auxtrace_mmap_pages = KiB(128) / page_size;
764 if (opts->mmap_pages == UINT_MAX)
765 opts->mmap_pages = KiB(256) / page_size;
769 /* Validate auxtrace_mmap_pages */
770 if (opts->auxtrace_mmap_pages) {
771 size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
774 if (opts->auxtrace_snapshot_mode || opts->auxtrace_sample_mode)
779 if (sz < min_sz || !is_power_of_2(sz)) {
780 pr_err("Invalid mmap size for Intel Processor Trace: must be at least %zuKiB and a power of 2\n",
786 if (!opts->auxtrace_snapshot_mode && !opts->auxtrace_sample_mode) {
787 u32 aux_watermark = opts->auxtrace_mmap_pages * page_size / 4;
789 intel_pt_evsel->core.attr.aux_watermark = aux_watermark;
792 intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format,
795 if (opts->full_auxtrace && (intel_pt_evsel->core.attr.config & tsc_bit))
796 have_timing_info = true;
798 have_timing_info = false;
801 * Per-cpu recording needs sched_switch events to distinguish different
804 if (have_timing_info && !perf_cpu_map__empty(cpus) &&
805 !record_opts__no_switch_events(opts)) {
806 if (perf_can_record_switch_events()) {
807 bool cpu_wide = !target__none(&opts->target) &&
808 !target__has_task(&opts->target);
810 if (!cpu_wide && perf_can_record_cpu_wide()) {
811 struct evsel *switch_evsel;
813 err = parse_events(evlist, "dummy:u", NULL);
817 switch_evsel = evlist__last(evlist);
819 switch_evsel->core.attr.freq = 0;
820 switch_evsel->core.attr.sample_period = 1;
821 switch_evsel->core.attr.context_switch = 1;
823 switch_evsel->core.system_wide = true;
824 switch_evsel->no_aux_samples = true;
825 switch_evsel->immediate = true;
827 evsel__set_sample_bit(switch_evsel, TID);
828 evsel__set_sample_bit(switch_evsel, TIME);
829 evsel__set_sample_bit(switch_evsel, CPU);
830 evsel__reset_sample_bit(switch_evsel, BRANCH_STACK);
832 opts->record_switch_events = false;
833 ptr->have_sched_switch = 3;
835 opts->record_switch_events = true;
836 need_immediate = true;
838 ptr->have_sched_switch = 3;
840 ptr->have_sched_switch = 2;
843 err = intel_pt_track_switches(evlist);
845 pr_debug2("Unable to select sched:sched_switch\n");
849 ptr->have_sched_switch = 1;
853 if (have_timing_info && !intel_pt_evsel->core.attr.exclude_kernel &&
854 perf_can_record_text_poke_events() && perf_can_record_cpu_wide())
855 opts->text_poke = true;
857 if (intel_pt_evsel) {
859 * To obtain the auxtrace buffer file descriptor, the auxtrace
860 * event must come first.
862 evlist__to_front(evlist, intel_pt_evsel);
864 * In the case of per-cpu mmaps, we need the CPU on the
867 if (!perf_cpu_map__empty(cpus))
868 evsel__set_sample_bit(intel_pt_evsel, CPU);
871 /* Add dummy event to keep tracking */
872 if (opts->full_auxtrace) {
873 struct evsel *tracking_evsel;
875 err = parse_events(evlist, "dummy:u", NULL);
879 tracking_evsel = evlist__last(evlist);
881 evlist__set_tracking_event(evlist, tracking_evsel);
883 tracking_evsel->core.attr.freq = 0;
884 tracking_evsel->core.attr.sample_period = 1;
886 tracking_evsel->no_aux_samples = true;
888 tracking_evsel->immediate = true;
890 /* In per-cpu case, always need the time of mmap events etc */
891 if (!perf_cpu_map__empty(cpus)) {
892 evsel__set_sample_bit(tracking_evsel, TIME);
893 /* And the CPU for switch events */
894 evsel__set_sample_bit(tracking_evsel, CPU);
896 evsel__reset_sample_bit(tracking_evsel, BRANCH_STACK);
900 * Warn the user when we do not have enough information to decode i.e.
901 * per-cpu with no sched_switch (except workload-only).
903 if (!ptr->have_sched_switch && !perf_cpu_map__empty(cpus) &&
904 !target__none(&opts->target) &&
905 !intel_pt_evsel->core.attr.exclude_user)
906 ui__warning("Intel Processor Trace decoding will not be possible except for kernel tracing!\n");
911 static int intel_pt_snapshot_start(struct auxtrace_record *itr)
913 struct intel_pt_recording *ptr =
914 container_of(itr, struct intel_pt_recording, itr);
917 evlist__for_each_entry(ptr->evlist, evsel) {
918 if (evsel->core.attr.type == ptr->intel_pt_pmu->type)
919 return evsel__disable(evsel);
924 static int intel_pt_snapshot_finish(struct auxtrace_record *itr)
926 struct intel_pt_recording *ptr =
927 container_of(itr, struct intel_pt_recording, itr);
930 evlist__for_each_entry(ptr->evlist, evsel) {
931 if (evsel->core.attr.type == ptr->intel_pt_pmu->type)
932 return evsel__enable(evsel);
937 static int intel_pt_alloc_snapshot_refs(struct intel_pt_recording *ptr, int idx)
939 const size_t sz = sizeof(struct intel_pt_snapshot_ref);
940 int cnt = ptr->snapshot_ref_cnt, new_cnt = cnt * 2;
941 struct intel_pt_snapshot_ref *refs;
946 while (new_cnt <= idx)
949 refs = calloc(new_cnt, sz);
953 memcpy(refs, ptr->snapshot_refs, cnt * sz);
955 ptr->snapshot_refs = refs;
956 ptr->snapshot_ref_cnt = new_cnt;
961 static void intel_pt_free_snapshot_refs(struct intel_pt_recording *ptr)
965 for (i = 0; i < ptr->snapshot_ref_cnt; i++)
966 zfree(&ptr->snapshot_refs[i].ref_buf);
967 zfree(&ptr->snapshot_refs);
970 static void intel_pt_recording_free(struct auxtrace_record *itr)
972 struct intel_pt_recording *ptr =
973 container_of(itr, struct intel_pt_recording, itr);
975 intel_pt_free_snapshot_refs(ptr);
979 static int intel_pt_alloc_snapshot_ref(struct intel_pt_recording *ptr, int idx,
980 size_t snapshot_buf_size)
982 size_t ref_buf_size = ptr->snapshot_ref_buf_size;
985 ref_buf = zalloc(ref_buf_size);
989 ptr->snapshot_refs[idx].ref_buf = ref_buf;
990 ptr->snapshot_refs[idx].ref_offset = snapshot_buf_size - ref_buf_size;
995 static size_t intel_pt_snapshot_ref_buf_size(struct intel_pt_recording *ptr,
996 size_t snapshot_buf_size)
998 const size_t max_size = 256 * 1024;
999 size_t buf_size = 0, psb_period;
1001 if (ptr->snapshot_size <= 64 * 1024)
1004 psb_period = intel_pt_psb_period(ptr->intel_pt_pmu, ptr->evlist);
1006 buf_size = psb_period * 2;
1008 if (!buf_size || buf_size > max_size)
1009 buf_size = max_size;
1011 if (buf_size >= snapshot_buf_size)
1014 if (buf_size >= ptr->snapshot_size / 2)
1020 static int intel_pt_snapshot_init(struct intel_pt_recording *ptr,
1021 size_t snapshot_buf_size)
1023 if (ptr->snapshot_init_done)
1026 ptr->snapshot_init_done = true;
1028 ptr->snapshot_ref_buf_size = intel_pt_snapshot_ref_buf_size(ptr,
1035 * intel_pt_compare_buffers - compare bytes in a buffer to a circular buffer.
1036 * @buf1: first buffer
1037 * @compare_size: number of bytes to compare
1038 * @buf2: second buffer (a circular buffer)
1039 * @offs2: offset in second buffer
1040 * @buf2_size: size of second buffer
1042 * The comparison allows for the possibility that the bytes to compare in the
1043 * circular buffer are not contiguous. It is assumed that @compare_size <=
1044 * @buf2_size. This function returns %false if the bytes are identical, %true
1047 static bool intel_pt_compare_buffers(void *buf1, size_t compare_size,
1048 void *buf2, size_t offs2, size_t buf2_size)
1050 size_t end2 = offs2 + compare_size, part_size;
1052 if (end2 <= buf2_size)
1053 return memcmp(buf1, buf2 + offs2, compare_size);
1055 part_size = end2 - buf2_size;
1056 if (memcmp(buf1, buf2 + offs2, part_size))
1059 compare_size -= part_size;
1061 return memcmp(buf1 + part_size, buf2, compare_size);
1064 static bool intel_pt_compare_ref(void *ref_buf, size_t ref_offset,
1065 size_t ref_size, size_t buf_size,
1066 void *data, size_t head)
1068 size_t ref_end = ref_offset + ref_size;
1070 if (ref_end > buf_size) {
1071 if (head > ref_offset || head < ref_end - buf_size)
1073 } else if (head > ref_offset && head < ref_end) {
1077 return intel_pt_compare_buffers(ref_buf, ref_size, data, ref_offset,
1081 static void intel_pt_copy_ref(void *ref_buf, size_t ref_size, size_t buf_size,
1082 void *data, size_t head)
1084 if (head >= ref_size) {
1085 memcpy(ref_buf, data + head - ref_size, ref_size);
1087 memcpy(ref_buf, data, head);
1089 memcpy(ref_buf + head, data + buf_size - ref_size, ref_size);
1093 static bool intel_pt_wrapped(struct intel_pt_recording *ptr, int idx,
1094 struct auxtrace_mmap *mm, unsigned char *data,
1097 struct intel_pt_snapshot_ref *ref = &ptr->snapshot_refs[idx];
1100 wrapped = intel_pt_compare_ref(ref->ref_buf, ref->ref_offset,
1101 ptr->snapshot_ref_buf_size, mm->len,
1104 intel_pt_copy_ref(ref->ref_buf, ptr->snapshot_ref_buf_size, mm->len,
1110 static bool intel_pt_first_wrap(u64 *data, size_t buf_size)
1119 for (i = a; i < b; i++) {
1127 static int intel_pt_find_snapshot(struct auxtrace_record *itr, int idx,
1128 struct auxtrace_mmap *mm, unsigned char *data,
1129 u64 *head, u64 *old)
1131 struct intel_pt_recording *ptr =
1132 container_of(itr, struct intel_pt_recording, itr);
1136 pr_debug3("%s: mmap index %d old head %zu new head %zu\n",
1137 __func__, idx, (size_t)*old, (size_t)*head);
1139 err = intel_pt_snapshot_init(ptr, mm->len);
1143 if (idx >= ptr->snapshot_ref_cnt) {
1144 err = intel_pt_alloc_snapshot_refs(ptr, idx);
1149 if (ptr->snapshot_ref_buf_size) {
1150 if (!ptr->snapshot_refs[idx].ref_buf) {
1151 err = intel_pt_alloc_snapshot_ref(ptr, idx, mm->len);
1155 wrapped = intel_pt_wrapped(ptr, idx, mm, data, *head);
1157 wrapped = ptr->snapshot_refs[idx].wrapped;
1158 if (!wrapped && intel_pt_first_wrap((u64 *)data, mm->len)) {
1159 ptr->snapshot_refs[idx].wrapped = true;
1165 * In full trace mode 'head' continually increases. However in snapshot
1166 * mode 'head' is an offset within the buffer. Here 'old' and 'head'
1167 * are adjusted to match the full trace case which expects that 'old' is
1168 * always less than 'head'.
1182 pr_debug3("%s: wrap-around %sdetected, adjusted old head %zu adjusted new head %zu\n",
1183 __func__, wrapped ? "" : "not ", (size_t)*old, (size_t)*head);
1188 pr_err("%s: failed, error %d\n", __func__, err);
1192 static u64 intel_pt_reference(struct auxtrace_record *itr __maybe_unused)
1197 struct auxtrace_record *intel_pt_recording_init(int *err)
1199 struct perf_pmu *intel_pt_pmu = perf_pmu__find(INTEL_PT_PMU_NAME);
1200 struct intel_pt_recording *ptr;
1205 if (setenv("JITDUMP_USE_ARCH_TIMESTAMP", "1", 1)) {
1210 ptr = zalloc(sizeof(struct intel_pt_recording));
1216 ptr->intel_pt_pmu = intel_pt_pmu;
1217 ptr->itr.pmu = intel_pt_pmu;
1218 ptr->itr.recording_options = intel_pt_recording_options;
1219 ptr->itr.info_priv_size = intel_pt_info_priv_size;
1220 ptr->itr.info_fill = intel_pt_info_fill;
1221 ptr->itr.free = intel_pt_recording_free;
1222 ptr->itr.snapshot_start = intel_pt_snapshot_start;
1223 ptr->itr.snapshot_finish = intel_pt_snapshot_finish;
1224 ptr->itr.find_snapshot = intel_pt_find_snapshot;
1225 ptr->itr.parse_snapshot_options = intel_pt_parse_snapshot_options;
1226 ptr->itr.reference = intel_pt_reference;
1227 ptr->itr.read_finish = auxtrace_record__read_finish;
1229 * Decoding starts at a PSB packet. Minimum PSB period is 2K so 4K
1230 * should give at least 1 PSB per sample.
1232 ptr->itr.default_aux_sample_size = 4096;