2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
13 #include <linux/bitops.h>
14 #include <api/fs/fs.h>
15 #include <api/fs/tracing_path.h>
16 #include <traceevent/event-parse.h>
17 #include <linux/hw_breakpoint.h>
18 #include <linux/perf_event.h>
19 #include <linux/compiler.h>
20 #include <linux/err.h>
21 #include <sys/ioctl.h>
22 #include <sys/resource.h>
23 #include <sys/types.h>
26 #include "callchain.h"
33 #include "thread_map.h"
35 #include "perf_regs.h"
37 #include "trace-event.h"
40 #include "util/parse-branch-options.h"
42 #include "sane_ctype.h"
44 struct perf_missing_features perf_missing_features;
46 static clockid_t clockid;
48 static int perf_evsel__no_extra_init(struct perf_evsel *evsel __maybe_unused)
53 void __weak test_attr__ready(void) { }
55 static void perf_evsel__no_extra_fini(struct perf_evsel *evsel __maybe_unused)
61 int (*init)(struct perf_evsel *evsel);
62 void (*fini)(struct perf_evsel *evsel);
63 } perf_evsel__object = {
64 .size = sizeof(struct perf_evsel),
65 .init = perf_evsel__no_extra_init,
66 .fini = perf_evsel__no_extra_fini,
69 int perf_evsel__object_config(size_t object_size,
70 int (*init)(struct perf_evsel *evsel),
71 void (*fini)(struct perf_evsel *evsel))
77 if (perf_evsel__object.size > object_size)
80 perf_evsel__object.size = object_size;
84 perf_evsel__object.init = init;
87 perf_evsel__object.fini = fini;
92 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
94 int __perf_evsel__sample_size(u64 sample_type)
96 u64 mask = sample_type & PERF_SAMPLE_MASK;
100 for (i = 0; i < 64; i++) {
101 if (mask & (1ULL << i))
111 * __perf_evsel__calc_id_pos - calculate id_pos.
112 * @sample_type: sample type
114 * This function returns the position of the event id (PERF_SAMPLE_ID or
115 * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct
118 static int __perf_evsel__calc_id_pos(u64 sample_type)
122 if (sample_type & PERF_SAMPLE_IDENTIFIER)
125 if (!(sample_type & PERF_SAMPLE_ID))
128 if (sample_type & PERF_SAMPLE_IP)
131 if (sample_type & PERF_SAMPLE_TID)
134 if (sample_type & PERF_SAMPLE_TIME)
137 if (sample_type & PERF_SAMPLE_ADDR)
144 * __perf_evsel__calc_is_pos - calculate is_pos.
145 * @sample_type: sample type
147 * This function returns the position (counting backwards) of the event id
148 * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if
149 * sample_id_all is used there is an id sample appended to non-sample events.
151 static int __perf_evsel__calc_is_pos(u64 sample_type)
155 if (sample_type & PERF_SAMPLE_IDENTIFIER)
158 if (!(sample_type & PERF_SAMPLE_ID))
161 if (sample_type & PERF_SAMPLE_CPU)
164 if (sample_type & PERF_SAMPLE_STREAM_ID)
170 void perf_evsel__calc_id_pos(struct perf_evsel *evsel)
172 evsel->id_pos = __perf_evsel__calc_id_pos(evsel->attr.sample_type);
173 evsel->is_pos = __perf_evsel__calc_is_pos(evsel->attr.sample_type);
176 void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
177 enum perf_event_sample_format bit)
179 if (!(evsel->attr.sample_type & bit)) {
180 evsel->attr.sample_type |= bit;
181 evsel->sample_size += sizeof(u64);
182 perf_evsel__calc_id_pos(evsel);
186 void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
187 enum perf_event_sample_format bit)
189 if (evsel->attr.sample_type & bit) {
190 evsel->attr.sample_type &= ~bit;
191 evsel->sample_size -= sizeof(u64);
192 perf_evsel__calc_id_pos(evsel);
196 void perf_evsel__set_sample_id(struct perf_evsel *evsel,
197 bool can_sample_identifier)
199 if (can_sample_identifier) {
200 perf_evsel__reset_sample_bit(evsel, ID);
201 perf_evsel__set_sample_bit(evsel, IDENTIFIER);
203 perf_evsel__set_sample_bit(evsel, ID);
205 evsel->attr.read_format |= PERF_FORMAT_ID;
209 * perf_evsel__is_function_event - Return whether given evsel is a function
212 * @evsel - evsel selector to be tested
214 * Return %true if event is function trace event
216 bool perf_evsel__is_function_event(struct perf_evsel *evsel)
218 #define FUNCTION_EVENT "ftrace:function"
220 return evsel->name &&
221 !strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT));
223 #undef FUNCTION_EVENT
226 void perf_evsel__init(struct perf_evsel *evsel,
227 struct perf_event_attr *attr, int idx)
230 evsel->tracking = !idx;
232 evsel->leader = evsel;
235 evsel->evlist = NULL;
237 INIT_LIST_HEAD(&evsel->node);
238 INIT_LIST_HEAD(&evsel->config_terms);
239 perf_evsel__object.init(evsel);
240 evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
241 perf_evsel__calc_id_pos(evsel);
242 evsel->cmdline_group_boundary = false;
243 evsel->metric_expr = NULL;
244 evsel->metric_name = NULL;
245 evsel->metric_events = NULL;
246 evsel->collect_stat = false;
247 evsel->pmu_name = NULL;
250 struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
252 struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
256 perf_evsel__init(evsel, attr, idx);
258 if (perf_evsel__is_bpf_output(evsel)) {
259 evsel->attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
260 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
261 evsel->attr.sample_period = 1;
264 if (perf_evsel__is_clock(evsel)) {
266 * The evsel->unit points to static alias->unit
267 * so it's ok to use static string in here.
269 static const char *unit = "msec";
278 static bool perf_event_can_profile_kernel(void)
280 return geteuid() == 0 || perf_event_paranoid() == -1;
283 struct perf_evsel *perf_evsel__new_cycles(bool precise)
285 struct perf_event_attr attr = {
286 .type = PERF_TYPE_HARDWARE,
287 .config = PERF_COUNT_HW_CPU_CYCLES,
288 .exclude_kernel = !perf_event_can_profile_kernel(),
290 struct perf_evsel *evsel;
292 event_attr_init(&attr);
297 * Unnamed union member, not supported as struct member named
298 * initializer in older compilers such as gcc 4.4.7
300 * Just for probing the precise_ip:
302 attr.sample_period = 1;
304 perf_event_attr__set_max_precise_ip(&attr);
306 * Now let the usual logic to set up the perf_event_attr defaults
307 * to kick in when we return and before perf_evsel__open() is called.
309 attr.sample_period = 0;
311 evsel = perf_evsel__new(&attr);
315 /* use asprintf() because free(evsel) assumes name is allocated */
316 if (asprintf(&evsel->name, "cycles%s%s%.*s",
317 (attr.precise_ip || attr.exclude_kernel) ? ":" : "",
318 attr.exclude_kernel ? "u" : "",
319 attr.precise_ip ? attr.precise_ip + 1 : 0, "ppp") < 0)
324 perf_evsel__delete(evsel);
330 * Returns pointer with encoded error via <linux/err.h> interface.
332 struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx)
334 struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
340 struct perf_event_attr attr = {
341 .type = PERF_TYPE_TRACEPOINT,
342 .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
343 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
346 if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
349 evsel->tp_format = trace_event__tp_format(sys, name);
350 if (IS_ERR(evsel->tp_format)) {
351 err = PTR_ERR(evsel->tp_format);
355 event_attr_init(&attr);
356 attr.config = evsel->tp_format->id;
357 attr.sample_period = 1;
358 perf_evsel__init(evsel, &attr, idx);
370 const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
378 "stalled-cycles-frontend",
379 "stalled-cycles-backend",
383 static const char *__perf_evsel__hw_name(u64 config)
385 if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
386 return perf_evsel__hw_names[config];
388 return "unknown-hardware";
391 static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size)
393 int colon = 0, r = 0;
394 struct perf_event_attr *attr = &evsel->attr;
395 bool exclude_guest_default = false;
397 #define MOD_PRINT(context, mod) do { \
398 if (!attr->exclude_##context) { \
399 if (!colon) colon = ++r; \
400 r += scnprintf(bf + r, size - r, "%c", mod); \
403 if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
404 MOD_PRINT(kernel, 'k');
405 MOD_PRINT(user, 'u');
407 exclude_guest_default = true;
410 if (attr->precise_ip) {
413 r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
414 exclude_guest_default = true;
417 if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
418 MOD_PRINT(host, 'H');
419 MOD_PRINT(guest, 'G');
427 static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
429 int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config));
430 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
433 const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
446 static const char *__perf_evsel__sw_name(u64 config)
448 if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
449 return perf_evsel__sw_names[config];
450 return "unknown-software";
453 static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size)
455 int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config));
456 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
459 static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
463 r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
465 if (type & HW_BREAKPOINT_R)
466 r += scnprintf(bf + r, size - r, "r");
468 if (type & HW_BREAKPOINT_W)
469 r += scnprintf(bf + r, size - r, "w");
471 if (type & HW_BREAKPOINT_X)
472 r += scnprintf(bf + r, size - r, "x");
477 static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size)
479 struct perf_event_attr *attr = &evsel->attr;
480 int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
481 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
484 const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
485 [PERF_EVSEL__MAX_ALIASES] = {
486 { "L1-dcache", "l1-d", "l1d", "L1-data", },
487 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
489 { "dTLB", "d-tlb", "Data-TLB", },
490 { "iTLB", "i-tlb", "Instruction-TLB", },
491 { "branch", "branches", "bpu", "btb", "bpc", },
495 const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
496 [PERF_EVSEL__MAX_ALIASES] = {
497 { "load", "loads", "read", },
498 { "store", "stores", "write", },
499 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
502 const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
503 [PERF_EVSEL__MAX_ALIASES] = {
504 { "refs", "Reference", "ops", "access", },
505 { "misses", "miss", },
508 #define C(x) PERF_COUNT_HW_CACHE_##x
509 #define CACHE_READ (1 << C(OP_READ))
510 #define CACHE_WRITE (1 << C(OP_WRITE))
511 #define CACHE_PREFETCH (1 << C(OP_PREFETCH))
512 #define COP(x) (1 << x)
515 * cache operartion stat
516 * L1I : Read and prefetch only
517 * ITLB and BPU : Read-only
519 static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
520 [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
521 [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
522 [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
523 [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
524 [C(ITLB)] = (CACHE_READ),
525 [C(BPU)] = (CACHE_READ),
526 [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
529 bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
531 if (perf_evsel__hw_cache_stat[type] & COP(op))
532 return true; /* valid */
534 return false; /* invalid */
537 int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
538 char *bf, size_t size)
541 return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
542 perf_evsel__hw_cache_op[op][0],
543 perf_evsel__hw_cache_result[result][0]);
546 return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
547 perf_evsel__hw_cache_op[op][1]);
550 static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
552 u8 op, result, type = (config >> 0) & 0xff;
553 const char *err = "unknown-ext-hardware-cache-type";
555 if (type >= PERF_COUNT_HW_CACHE_MAX)
558 op = (config >> 8) & 0xff;
559 err = "unknown-ext-hardware-cache-op";
560 if (op >= PERF_COUNT_HW_CACHE_OP_MAX)
563 result = (config >> 16) & 0xff;
564 err = "unknown-ext-hardware-cache-result";
565 if (result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
568 err = "invalid-cache";
569 if (!perf_evsel__is_cache_op_valid(type, op))
572 return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
574 return scnprintf(bf, size, "%s", err);
577 static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size)
579 int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size);
580 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
583 static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
585 int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
586 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
589 const char *perf_evsel__name(struct perf_evsel *evsel)
596 switch (evsel->attr.type) {
598 perf_evsel__raw_name(evsel, bf, sizeof(bf));
601 case PERF_TYPE_HARDWARE:
602 perf_evsel__hw_name(evsel, bf, sizeof(bf));
605 case PERF_TYPE_HW_CACHE:
606 perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
609 case PERF_TYPE_SOFTWARE:
610 perf_evsel__sw_name(evsel, bf, sizeof(bf));
613 case PERF_TYPE_TRACEPOINT:
614 scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
617 case PERF_TYPE_BREAKPOINT:
618 perf_evsel__bp_name(evsel, bf, sizeof(bf));
622 scnprintf(bf, sizeof(bf), "unknown attr type: %d",
627 evsel->name = strdup(bf);
629 return evsel->name ?: "unknown";
632 const char *perf_evsel__group_name(struct perf_evsel *evsel)
634 return evsel->group_name ?: "anon group";
638 * Returns the group details for the specified leader,
639 * with following rules.
641 * For record -e '{cycles,instructions}'
642 * 'anon group { cycles:u, instructions:u }'
644 * For record -e 'cycles,instructions' and report --group
645 * 'cycles:u, instructions:u'
647 int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
650 struct perf_evsel *pos;
651 const char *group_name = perf_evsel__group_name(evsel);
653 if (!evsel->forced_leader)
654 ret = scnprintf(buf, size, "%s { ", group_name);
656 ret += scnprintf(buf + ret, size - ret, "%s",
657 perf_evsel__name(evsel));
659 for_each_group_member(pos, evsel)
660 ret += scnprintf(buf + ret, size - ret, ", %s",
661 perf_evsel__name(pos));
663 if (!evsel->forced_leader)
664 ret += scnprintf(buf + ret, size - ret, " }");
669 static void __perf_evsel__config_callchain(struct perf_evsel *evsel,
670 struct record_opts *opts,
671 struct callchain_param *param)
673 bool function = perf_evsel__is_function_event(evsel);
674 struct perf_event_attr *attr = &evsel->attr;
676 perf_evsel__set_sample_bit(evsel, CALLCHAIN);
678 attr->sample_max_stack = param->max_stack;
680 if (param->record_mode == CALLCHAIN_LBR) {
681 if (!opts->branch_stack) {
682 if (attr->exclude_user) {
683 pr_warning("LBR callstack option is only available "
684 "to get user callchain information. "
685 "Falling back to framepointers.\n");
687 perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
688 attr->branch_sample_type = PERF_SAMPLE_BRANCH_USER |
689 PERF_SAMPLE_BRANCH_CALL_STACK |
690 PERF_SAMPLE_BRANCH_NO_CYCLES |
691 PERF_SAMPLE_BRANCH_NO_FLAGS;
694 pr_warning("Cannot use LBR callstack with branch stack. "
695 "Falling back to framepointers.\n");
698 if (param->record_mode == CALLCHAIN_DWARF) {
700 perf_evsel__set_sample_bit(evsel, REGS_USER);
701 perf_evsel__set_sample_bit(evsel, STACK_USER);
702 attr->sample_regs_user |= PERF_REGS_MASK;
703 attr->sample_stack_user = param->dump_size;
704 attr->exclude_callchain_user = 1;
706 pr_info("Cannot use DWARF unwind for function trace event,"
707 " falling back to framepointers.\n");
712 pr_info("Disabling user space callchains for function trace event.\n");
713 attr->exclude_callchain_user = 1;
717 void perf_evsel__config_callchain(struct perf_evsel *evsel,
718 struct record_opts *opts,
719 struct callchain_param *param)
722 return __perf_evsel__config_callchain(evsel, opts, param);
726 perf_evsel__reset_callgraph(struct perf_evsel *evsel,
727 struct callchain_param *param)
729 struct perf_event_attr *attr = &evsel->attr;
731 perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
732 if (param->record_mode == CALLCHAIN_LBR) {
733 perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
734 attr->branch_sample_type &= ~(PERF_SAMPLE_BRANCH_USER |
735 PERF_SAMPLE_BRANCH_CALL_STACK);
737 if (param->record_mode == CALLCHAIN_DWARF) {
738 perf_evsel__reset_sample_bit(evsel, REGS_USER);
739 perf_evsel__reset_sample_bit(evsel, STACK_USER);
743 static void apply_config_terms(struct perf_evsel *evsel,
744 struct record_opts *opts, bool track)
746 struct perf_evsel_config_term *term;
747 struct list_head *config_terms = &evsel->config_terms;
748 struct perf_event_attr *attr = &evsel->attr;
749 /* callgraph default */
750 struct callchain_param param = {
751 .record_mode = callchain_param.record_mode,
755 const char *callgraph_buf = NULL;
757 list_for_each_entry(term, config_terms, list) {
758 switch (term->type) {
759 case PERF_EVSEL__CONFIG_TERM_PERIOD:
760 if (!(term->weak && opts->user_interval != ULLONG_MAX)) {
761 attr->sample_period = term->val.period;
763 perf_evsel__reset_sample_bit(evsel, PERIOD);
766 case PERF_EVSEL__CONFIG_TERM_FREQ:
767 if (!(term->weak && opts->user_freq != UINT_MAX)) {
768 attr->sample_freq = term->val.freq;
770 perf_evsel__set_sample_bit(evsel, PERIOD);
773 case PERF_EVSEL__CONFIG_TERM_TIME:
775 perf_evsel__set_sample_bit(evsel, TIME);
777 perf_evsel__reset_sample_bit(evsel, TIME);
779 case PERF_EVSEL__CONFIG_TERM_CALLGRAPH:
780 callgraph_buf = term->val.callgraph;
782 case PERF_EVSEL__CONFIG_TERM_BRANCH:
783 if (term->val.branch && strcmp(term->val.branch, "no")) {
784 perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
785 parse_branch_str(term->val.branch,
786 &attr->branch_sample_type);
788 perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
790 case PERF_EVSEL__CONFIG_TERM_STACK_USER:
791 dump_size = term->val.stack_user;
793 case PERF_EVSEL__CONFIG_TERM_MAX_STACK:
794 max_stack = term->val.max_stack;
796 case PERF_EVSEL__CONFIG_TERM_INHERIT:
798 * attr->inherit should has already been set by
799 * perf_evsel__config. If user explicitly set
800 * inherit using config terms, override global
801 * opt->no_inherit setting.
803 attr->inherit = term->val.inherit ? 1 : 0;
805 case PERF_EVSEL__CONFIG_TERM_OVERWRITE:
806 attr->write_backward = term->val.overwrite ? 1 : 0;
808 case PERF_EVSEL__CONFIG_TERM_DRV_CFG:
815 /* User explicitly set per-event callgraph, clear the old setting and reset. */
816 if ((callgraph_buf != NULL) || (dump_size > 0) || max_stack) {
817 bool sample_address = false;
820 param.max_stack = max_stack;
821 if (callgraph_buf == NULL)
822 callgraph_buf = "fp";
825 /* parse callgraph parameters */
826 if (callgraph_buf != NULL) {
827 if (!strcmp(callgraph_buf, "no")) {
828 param.enabled = false;
829 param.record_mode = CALLCHAIN_NONE;
831 param.enabled = true;
832 if (parse_callchain_record(callgraph_buf, ¶m)) {
833 pr_err("per-event callgraph setting for %s failed. "
834 "Apply callgraph global setting for it\n",
838 if (param.record_mode == CALLCHAIN_DWARF)
839 sample_address = true;
843 dump_size = round_up(dump_size, sizeof(u64));
844 param.dump_size = dump_size;
847 /* If global callgraph set, clear it */
848 if (callchain_param.enabled)
849 perf_evsel__reset_callgraph(evsel, &callchain_param);
851 /* set perf-event callgraph */
853 if (sample_address) {
854 perf_evsel__set_sample_bit(evsel, ADDR);
855 perf_evsel__set_sample_bit(evsel, DATA_SRC);
856 evsel->attr.mmap_data = track;
858 perf_evsel__config_callchain(evsel, opts, ¶m);
863 static bool is_dummy_event(struct perf_evsel *evsel)
865 return (evsel->attr.type == PERF_TYPE_SOFTWARE) &&
866 (evsel->attr.config == PERF_COUNT_SW_DUMMY);
870 * The enable_on_exec/disabled value strategy:
872 * 1) For any type of traced program:
873 * - all independent events and group leaders are disabled
874 * - all group members are enabled
876 * Group members are ruled by group leaders. They need to
877 * be enabled, because the group scheduling relies on that.
879 * 2) For traced programs executed by perf:
880 * - all independent events and group leaders have
882 * - we don't specifically enable or disable any event during
885 * Independent events and group leaders are initially disabled
886 * and get enabled by exec. Group members are ruled by group
887 * leaders as stated in 1).
889 * 3) For traced programs attached by perf (pid/tid):
890 * - we specifically enable or disable all events during
893 * When attaching events to already running traced we
894 * enable/disable events specifically, as there's no
895 * initial traced exec call.
897 void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
898 struct callchain_param *callchain)
900 struct perf_evsel *leader = evsel->leader;
901 struct perf_event_attr *attr = &evsel->attr;
902 int track = evsel->tracking;
903 bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread;
905 attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
906 attr->inherit = !opts->no_inherit;
907 attr->write_backward = opts->overwrite ? 1 : 0;
909 perf_evsel__set_sample_bit(evsel, IP);
910 perf_evsel__set_sample_bit(evsel, TID);
912 if (evsel->sample_read) {
913 perf_evsel__set_sample_bit(evsel, READ);
916 * We need ID even in case of single event, because
917 * PERF_SAMPLE_READ process ID specific data.
919 perf_evsel__set_sample_id(evsel, false);
922 * Apply group format only if we belong to group
923 * with more than one members.
925 if (leader->nr_members > 1) {
926 attr->read_format |= PERF_FORMAT_GROUP;
932 * We default some events to have a default interval. But keep
933 * it a weak assumption overridable by the user.
935 if (!attr->sample_period || (opts->user_freq != UINT_MAX ||
936 opts->user_interval != ULLONG_MAX)) {
938 perf_evsel__set_sample_bit(evsel, PERIOD);
940 attr->sample_freq = opts->freq;
942 attr->sample_period = opts->default_interval;
947 * Disable sampling for all group members other
948 * than leader in case leader 'leads' the sampling.
950 if ((leader != evsel) && leader->sample_read) {
952 attr->sample_freq = 0;
953 attr->sample_period = 0;
954 attr->write_backward = 0;
955 attr->sample_id_all = 0;
958 if (opts->no_samples)
959 attr->sample_freq = 0;
961 if (opts->inherit_stat) {
962 evsel->attr.read_format |=
963 PERF_FORMAT_TOTAL_TIME_ENABLED |
964 PERF_FORMAT_TOTAL_TIME_RUNNING |
966 attr->inherit_stat = 1;
969 if (opts->sample_address) {
970 perf_evsel__set_sample_bit(evsel, ADDR);
971 attr->mmap_data = track;
975 * We don't allow user space callchains for function trace
976 * event, due to issues with page faults while tracing page
977 * fault handler and its overall trickiness nature.
979 if (perf_evsel__is_function_event(evsel))
980 evsel->attr.exclude_callchain_user = 1;
982 if (callchain && callchain->enabled && !evsel->no_aux_samples)
983 perf_evsel__config_callchain(evsel, opts, callchain);
985 if (opts->sample_intr_regs) {
986 attr->sample_regs_intr = opts->sample_intr_regs;
987 perf_evsel__set_sample_bit(evsel, REGS_INTR);
990 if (opts->sample_user_regs) {
991 attr->sample_regs_user |= opts->sample_user_regs;
992 perf_evsel__set_sample_bit(evsel, REGS_USER);
995 if (target__has_cpu(&opts->target) || opts->sample_cpu)
996 perf_evsel__set_sample_bit(evsel, CPU);
999 * When the user explicitly disabled time don't force it here.
1001 if (opts->sample_time &&
1002 (!perf_missing_features.sample_id_all &&
1003 (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu ||
1004 opts->sample_time_set)))
1005 perf_evsel__set_sample_bit(evsel, TIME);
1007 if (opts->raw_samples && !evsel->no_aux_samples) {
1008 perf_evsel__set_sample_bit(evsel, TIME);
1009 perf_evsel__set_sample_bit(evsel, RAW);
1010 perf_evsel__set_sample_bit(evsel, CPU);
1013 if (opts->sample_address)
1014 perf_evsel__set_sample_bit(evsel, DATA_SRC);
1016 if (opts->sample_phys_addr)
1017 perf_evsel__set_sample_bit(evsel, PHYS_ADDR);
1019 if (opts->no_buffering) {
1020 attr->watermark = 0;
1021 attr->wakeup_events = 1;
1023 if (opts->branch_stack && !evsel->no_aux_samples) {
1024 perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
1025 attr->branch_sample_type = opts->branch_stack;
1028 if (opts->sample_weight)
1029 perf_evsel__set_sample_bit(evsel, WEIGHT);
1033 attr->mmap2 = track && !perf_missing_features.mmap2;
1036 if (opts->record_namespaces)
1037 attr->namespaces = track;
1039 if (opts->record_switch_events)
1040 attr->context_switch = track;
1042 if (opts->sample_transaction)
1043 perf_evsel__set_sample_bit(evsel, TRANSACTION);
1045 if (opts->running_time) {
1046 evsel->attr.read_format |=
1047 PERF_FORMAT_TOTAL_TIME_ENABLED |
1048 PERF_FORMAT_TOTAL_TIME_RUNNING;
1052 * XXX see the function comment above
1054 * Disabling only independent events or group leaders,
1055 * keeping group members enabled.
1057 if (perf_evsel__is_group_leader(evsel))
1061 * Setting enable_on_exec for independent events and
1062 * group leaders for traced executed by perf.
1064 if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel) &&
1065 !opts->initial_delay)
1066 attr->enable_on_exec = 1;
1068 if (evsel->immediate) {
1070 attr->enable_on_exec = 0;
1073 clockid = opts->clockid;
1074 if (opts->use_clockid) {
1075 attr->use_clockid = 1;
1076 attr->clockid = opts->clockid;
1079 if (evsel->precise_max)
1080 perf_event_attr__set_max_precise_ip(attr);
1082 if (opts->all_user) {
1083 attr->exclude_kernel = 1;
1084 attr->exclude_user = 0;
1087 if (opts->all_kernel) {
1088 attr->exclude_kernel = 0;
1089 attr->exclude_user = 1;
1093 * Apply event specific term settings,
1094 * it overloads any global configuration.
1096 apply_config_terms(evsel, opts, track);
1098 evsel->ignore_missing_thread = opts->ignore_missing_thread;
1100 /* The --period option takes the precedence. */
1101 if (opts->period_set) {
1103 perf_evsel__set_sample_bit(evsel, PERIOD);
1105 perf_evsel__reset_sample_bit(evsel, PERIOD);
1109 * For initial_delay, a dummy event is added implicitly.
1110 * The software event will trigger -EOPNOTSUPP error out,
1111 * if BRANCH_STACK bit is set.
1113 if (opts->initial_delay && is_dummy_event(evsel))
1114 perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
1117 static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
1119 if (evsel->system_wide)
1122 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
1126 for (cpu = 0; cpu < ncpus; cpu++) {
1127 for (thread = 0; thread < nthreads; thread++) {
1128 FD(evsel, cpu, thread) = -1;
1133 return evsel->fd != NULL ? 0 : -ENOMEM;
1136 static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
1141 for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
1142 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
1143 int fd = FD(evsel, cpu, thread),
1144 err = ioctl(fd, ioc, arg);
1154 int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter)
1156 return perf_evsel__run_ioctl(evsel,
1157 PERF_EVENT_IOC_SET_FILTER,
1161 int perf_evsel__set_filter(struct perf_evsel *evsel, const char *filter)
1163 char *new_filter = strdup(filter);
1165 if (new_filter != NULL) {
1166 free(evsel->filter);
1167 evsel->filter = new_filter;
1174 static int perf_evsel__append_filter(struct perf_evsel *evsel,
1175 const char *fmt, const char *filter)
1179 if (evsel->filter == NULL)
1180 return perf_evsel__set_filter(evsel, filter);
1182 if (asprintf(&new_filter, fmt, evsel->filter, filter) > 0) {
1183 free(evsel->filter);
1184 evsel->filter = new_filter;
1191 int perf_evsel__append_tp_filter(struct perf_evsel *evsel, const char *filter)
1193 return perf_evsel__append_filter(evsel, "(%s) && (%s)", filter);
1196 int perf_evsel__append_addr_filter(struct perf_evsel *evsel, const char *filter)
1198 return perf_evsel__append_filter(evsel, "%s,%s", filter);
1201 int perf_evsel__enable(struct perf_evsel *evsel)
1203 return perf_evsel__run_ioctl(evsel,
1204 PERF_EVENT_IOC_ENABLE,
1208 int perf_evsel__disable(struct perf_evsel *evsel)
1210 return perf_evsel__run_ioctl(evsel,
1211 PERF_EVENT_IOC_DISABLE,
1215 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
1217 if (ncpus == 0 || nthreads == 0)
1220 if (evsel->system_wide)
1223 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
1224 if (evsel->sample_id == NULL)
1227 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
1228 if (evsel->id == NULL) {
1229 xyarray__delete(evsel->sample_id);
1230 evsel->sample_id = NULL;
1237 static void perf_evsel__free_fd(struct perf_evsel *evsel)
1239 xyarray__delete(evsel->fd);
1243 static void perf_evsel__free_id(struct perf_evsel *evsel)
1245 xyarray__delete(evsel->sample_id);
1246 evsel->sample_id = NULL;
1250 static void perf_evsel__free_config_terms(struct perf_evsel *evsel)
1252 struct perf_evsel_config_term *term, *h;
1254 list_for_each_entry_safe(term, h, &evsel->config_terms, list) {
1255 list_del(&term->list);
1260 void perf_evsel__close_fd(struct perf_evsel *evsel)
1264 for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++)
1265 for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
1266 close(FD(evsel, cpu, thread));
1267 FD(evsel, cpu, thread) = -1;
1271 void perf_evsel__exit(struct perf_evsel *evsel)
1273 assert(list_empty(&evsel->node));
1274 assert(evsel->evlist == NULL);
1275 perf_evsel__free_fd(evsel);
1276 perf_evsel__free_id(evsel);
1277 perf_evsel__free_config_terms(evsel);
1278 cgroup__put(evsel->cgrp);
1279 cpu_map__put(evsel->cpus);
1280 cpu_map__put(evsel->own_cpus);
1281 thread_map__put(evsel->threads);
1282 zfree(&evsel->group_name);
1283 zfree(&evsel->name);
1284 perf_evsel__object.fini(evsel);
1287 void perf_evsel__delete(struct perf_evsel *evsel)
1289 perf_evsel__exit(evsel);
1293 void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, int thread,
1294 struct perf_counts_values *count)
1296 struct perf_counts_values tmp;
1298 if (!evsel->prev_raw_counts)
1302 tmp = evsel->prev_raw_counts->aggr;
1303 evsel->prev_raw_counts->aggr = *count;
1305 tmp = *perf_counts(evsel->prev_raw_counts, cpu, thread);
1306 *perf_counts(evsel->prev_raw_counts, cpu, thread) = *count;
1309 count->val = count->val - tmp.val;
1310 count->ena = count->ena - tmp.ena;
1311 count->run = count->run - tmp.run;
1314 void perf_counts_values__scale(struct perf_counts_values *count,
1315 bool scale, s8 *pscaled)
1320 if (count->run == 0) {
1323 } else if (count->run < count->ena) {
1325 count->val = (u64)((double) count->val * count->ena / count->run + 0.5);
1328 count->ena = count->run = 0;
1334 static int perf_evsel__read_size(struct perf_evsel *evsel)
1336 u64 read_format = evsel->attr.read_format;
1337 int entry = sizeof(u64); /* value */
1341 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1342 size += sizeof(u64);
1344 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1345 size += sizeof(u64);
1347 if (read_format & PERF_FORMAT_ID)
1348 entry += sizeof(u64);
1350 if (read_format & PERF_FORMAT_GROUP) {
1351 nr = evsel->nr_members;
1352 size += sizeof(u64);
1359 int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
1360 struct perf_counts_values *count)
1362 size_t size = perf_evsel__read_size(evsel);
1364 memset(count, 0, sizeof(*count));
1366 if (FD(evsel, cpu, thread) < 0)
1369 if (readn(FD(evsel, cpu, thread), count->values, size) <= 0)
1376 perf_evsel__read_one(struct perf_evsel *evsel, int cpu, int thread)
1378 struct perf_counts_values *count = perf_counts(evsel->counts, cpu, thread);
1380 return perf_evsel__read(evsel, cpu, thread, count);
1384 perf_evsel__set_count(struct perf_evsel *counter, int cpu, int thread,
1385 u64 val, u64 ena, u64 run)
1387 struct perf_counts_values *count;
1389 count = perf_counts(counter->counts, cpu, thread);
1394 count->loaded = true;
1398 perf_evsel__process_group_data(struct perf_evsel *leader,
1399 int cpu, int thread, u64 *data)
1401 u64 read_format = leader->attr.read_format;
1402 struct sample_read_value *v;
1403 u64 nr, ena = 0, run = 0, i;
1407 if (nr != (u64) leader->nr_members)
1410 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1413 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1416 v = (struct sample_read_value *) data;
1418 perf_evsel__set_count(leader, cpu, thread,
1419 v[0].value, ena, run);
1421 for (i = 1; i < nr; i++) {
1422 struct perf_evsel *counter;
1424 counter = perf_evlist__id2evsel(leader->evlist, v[i].id);
1428 perf_evsel__set_count(counter, cpu, thread,
1429 v[i].value, ena, run);
1436 perf_evsel__read_group(struct perf_evsel *leader, int cpu, int thread)
1438 struct perf_stat_evsel *ps = leader->stats;
1439 u64 read_format = leader->attr.read_format;
1440 int size = perf_evsel__read_size(leader);
1441 u64 *data = ps->group_data;
1443 if (!(read_format & PERF_FORMAT_ID))
1446 if (!perf_evsel__is_group_leader(leader))
1450 data = zalloc(size);
1454 ps->group_data = data;
1457 if (FD(leader, cpu, thread) < 0)
1460 if (readn(FD(leader, cpu, thread), data, size) <= 0)
1463 return perf_evsel__process_group_data(leader, cpu, thread, data);
1466 int perf_evsel__read_counter(struct perf_evsel *evsel, int cpu, int thread)
1468 u64 read_format = evsel->attr.read_format;
1470 if (read_format & PERF_FORMAT_GROUP)
1471 return perf_evsel__read_group(evsel, cpu, thread);
1473 return perf_evsel__read_one(evsel, cpu, thread);
1476 int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
1477 int cpu, int thread, bool scale)
1479 struct perf_counts_values count;
1480 size_t nv = scale ? 3 : 1;
1482 if (FD(evsel, cpu, thread) < 0)
1485 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1, thread + 1) < 0)
1488 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) <= 0)
1491 perf_evsel__compute_deltas(evsel, cpu, thread, &count);
1492 perf_counts_values__scale(&count, scale, NULL);
1493 *perf_counts(evsel->counts, cpu, thread) = count;
1497 static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
1499 struct perf_evsel *leader = evsel->leader;
1502 if (perf_evsel__is_group_leader(evsel))
1506 * Leader must be already processed/open,
1507 * if not it's a bug.
1509 BUG_ON(!leader->fd);
1511 fd = FD(leader, cpu, thread);
1522 static void __p_bits(char *buf, size_t size, u64 value, struct bit_names *bits)
1524 bool first_bit = true;
1528 if (value & bits[i].bit) {
1529 buf += scnprintf(buf, size, "%s%s", first_bit ? "" : "|", bits[i].name);
1532 } while (bits[++i].name != NULL);
1535 static void __p_sample_type(char *buf, size_t size, u64 value)
1537 #define bit_name(n) { PERF_SAMPLE_##n, #n }
1538 struct bit_names bits[] = {
1539 bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR),
1540 bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
1541 bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
1542 bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
1543 bit_name(IDENTIFIER), bit_name(REGS_INTR), bit_name(DATA_SRC),
1544 bit_name(WEIGHT), bit_name(PHYS_ADDR),
1548 __p_bits(buf, size, value, bits);
1551 static void __p_branch_sample_type(char *buf, size_t size, u64 value)
1553 #define bit_name(n) { PERF_SAMPLE_BRANCH_##n, #n }
1554 struct bit_names bits[] = {
1555 bit_name(USER), bit_name(KERNEL), bit_name(HV), bit_name(ANY),
1556 bit_name(ANY_CALL), bit_name(ANY_RETURN), bit_name(IND_CALL),
1557 bit_name(ABORT_TX), bit_name(IN_TX), bit_name(NO_TX),
1558 bit_name(COND), bit_name(CALL_STACK), bit_name(IND_JUMP),
1559 bit_name(CALL), bit_name(NO_FLAGS), bit_name(NO_CYCLES),
1563 __p_bits(buf, size, value, bits);
1566 static void __p_read_format(char *buf, size_t size, u64 value)
1568 #define bit_name(n) { PERF_FORMAT_##n, #n }
1569 struct bit_names bits[] = {
1570 bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING),
1571 bit_name(ID), bit_name(GROUP),
1575 __p_bits(buf, size, value, bits);
1578 #define BUF_SIZE 1024
1580 #define p_hex(val) snprintf(buf, BUF_SIZE, "%#"PRIx64, (uint64_t)(val))
1581 #define p_unsigned(val) snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val))
1582 #define p_signed(val) snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val))
1583 #define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val)
1584 #define p_branch_sample_type(val) __p_branch_sample_type(buf, BUF_SIZE, val)
1585 #define p_read_format(val) __p_read_format(buf, BUF_SIZE, val)
1587 #define PRINT_ATTRn(_n, _f, _p) \
1591 ret += attr__fprintf(fp, _n, buf, priv);\
1595 #define PRINT_ATTRf(_f, _p) PRINT_ATTRn(#_f, _f, _p)
1597 int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
1598 attr__fprintf_f attr__fprintf, void *priv)
1603 PRINT_ATTRf(type, p_unsigned);
1604 PRINT_ATTRf(size, p_unsigned);
1605 PRINT_ATTRf(config, p_hex);
1606 PRINT_ATTRn("{ sample_period, sample_freq }", sample_period, p_unsigned);
1607 PRINT_ATTRf(sample_type, p_sample_type);
1608 PRINT_ATTRf(read_format, p_read_format);
1610 PRINT_ATTRf(disabled, p_unsigned);
1611 PRINT_ATTRf(inherit, p_unsigned);
1612 PRINT_ATTRf(pinned, p_unsigned);
1613 PRINT_ATTRf(exclusive, p_unsigned);
1614 PRINT_ATTRf(exclude_user, p_unsigned);
1615 PRINT_ATTRf(exclude_kernel, p_unsigned);
1616 PRINT_ATTRf(exclude_hv, p_unsigned);
1617 PRINT_ATTRf(exclude_idle, p_unsigned);
1618 PRINT_ATTRf(mmap, p_unsigned);
1619 PRINT_ATTRf(comm, p_unsigned);
1620 PRINT_ATTRf(freq, p_unsigned);
1621 PRINT_ATTRf(inherit_stat, p_unsigned);
1622 PRINT_ATTRf(enable_on_exec, p_unsigned);
1623 PRINT_ATTRf(task, p_unsigned);
1624 PRINT_ATTRf(watermark, p_unsigned);
1625 PRINT_ATTRf(precise_ip, p_unsigned);
1626 PRINT_ATTRf(mmap_data, p_unsigned);
1627 PRINT_ATTRf(sample_id_all, p_unsigned);
1628 PRINT_ATTRf(exclude_host, p_unsigned);
1629 PRINT_ATTRf(exclude_guest, p_unsigned);
1630 PRINT_ATTRf(exclude_callchain_kernel, p_unsigned);
1631 PRINT_ATTRf(exclude_callchain_user, p_unsigned);
1632 PRINT_ATTRf(mmap2, p_unsigned);
1633 PRINT_ATTRf(comm_exec, p_unsigned);
1634 PRINT_ATTRf(use_clockid, p_unsigned);
1635 PRINT_ATTRf(context_switch, p_unsigned);
1636 PRINT_ATTRf(write_backward, p_unsigned);
1637 PRINT_ATTRf(namespaces, p_unsigned);
1639 PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
1640 PRINT_ATTRf(bp_type, p_unsigned);
1641 PRINT_ATTRn("{ bp_addr, config1 }", bp_addr, p_hex);
1642 PRINT_ATTRn("{ bp_len, config2 }", bp_len, p_hex);
1643 PRINT_ATTRf(branch_sample_type, p_branch_sample_type);
1644 PRINT_ATTRf(sample_regs_user, p_hex);
1645 PRINT_ATTRf(sample_stack_user, p_unsigned);
1646 PRINT_ATTRf(clockid, p_signed);
1647 PRINT_ATTRf(sample_regs_intr, p_hex);
1648 PRINT_ATTRf(aux_watermark, p_unsigned);
1649 PRINT_ATTRf(sample_max_stack, p_unsigned);
1654 static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
1655 void *priv __maybe_unused)
1657 return fprintf(fp, " %-32s %s\n", name, val);
1660 static void perf_evsel__remove_fd(struct perf_evsel *pos,
1661 int nr_cpus, int nr_threads,
1664 for (int cpu = 0; cpu < nr_cpus; cpu++)
1665 for (int thread = thread_idx; thread < nr_threads - 1; thread++)
1666 FD(pos, cpu, thread) = FD(pos, cpu, thread + 1);
1669 static int update_fds(struct perf_evsel *evsel,
1670 int nr_cpus, int cpu_idx,
1671 int nr_threads, int thread_idx)
1673 struct perf_evsel *pos;
1675 if (cpu_idx >= nr_cpus || thread_idx >= nr_threads)
1678 evlist__for_each_entry(evsel->evlist, pos) {
1679 nr_cpus = pos != evsel ? nr_cpus : cpu_idx;
1681 perf_evsel__remove_fd(pos, nr_cpus, nr_threads, thread_idx);
1684 * Since fds for next evsel has not been created,
1685 * there is no need to iterate whole event list.
1693 static bool ignore_missing_thread(struct perf_evsel *evsel,
1694 int nr_cpus, int cpu,
1695 struct thread_map *threads,
1696 int thread, int err)
1698 pid_t ignore_pid = thread_map__pid(threads, thread);
1700 if (!evsel->ignore_missing_thread)
1703 /* The system wide setup does not work with threads. */
1704 if (evsel->system_wide)
1707 /* The -ESRCH is perf event syscall errno for pid's not found. */
1711 /* If there's only one thread, let it fail. */
1712 if (threads->nr == 1)
1716 * We should remove fd for missing_thread first
1717 * because thread_map__remove() will decrease threads->nr.
1719 if (update_fds(evsel, nr_cpus, cpu, threads->nr, thread))
1722 if (thread_map__remove(threads, thread))
1725 pr_warning("WARNING: Ignored open failure for pid %d\n",
1730 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
1731 struct thread_map *threads)
1733 int cpu, thread, nthreads;
1734 unsigned long flags = PERF_FLAG_FD_CLOEXEC;
1736 enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
1738 if (perf_missing_features.write_backward && evsel->attr.write_backward)
1742 static struct cpu_map *empty_cpu_map;
1744 if (empty_cpu_map == NULL) {
1745 empty_cpu_map = cpu_map__dummy_new();
1746 if (empty_cpu_map == NULL)
1750 cpus = empty_cpu_map;
1753 if (threads == NULL) {
1754 static struct thread_map *empty_thread_map;
1756 if (empty_thread_map == NULL) {
1757 empty_thread_map = thread_map__new_by_tid(-1);
1758 if (empty_thread_map == NULL)
1762 threads = empty_thread_map;
1765 if (evsel->system_wide)
1768 nthreads = threads->nr;
1770 if (evsel->fd == NULL &&
1771 perf_evsel__alloc_fd(evsel, cpus->nr, nthreads) < 0)
1775 flags |= PERF_FLAG_PID_CGROUP;
1776 pid = evsel->cgrp->fd;
1779 fallback_missing_features:
1780 if (perf_missing_features.clockid_wrong)
1781 evsel->attr.clockid = CLOCK_MONOTONIC; /* should always work */
1782 if (perf_missing_features.clockid) {
1783 evsel->attr.use_clockid = 0;
1784 evsel->attr.clockid = 0;
1786 if (perf_missing_features.cloexec)
1787 flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC;
1788 if (perf_missing_features.mmap2)
1789 evsel->attr.mmap2 = 0;
1790 if (perf_missing_features.exclude_guest)
1791 evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
1792 if (perf_missing_features.lbr_flags)
1793 evsel->attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS |
1794 PERF_SAMPLE_BRANCH_NO_CYCLES);
1795 if (perf_missing_features.group_read && evsel->attr.inherit)
1796 evsel->attr.read_format &= ~(PERF_FORMAT_GROUP|PERF_FORMAT_ID);
1798 if (perf_missing_features.sample_id_all)
1799 evsel->attr.sample_id_all = 0;
1802 fprintf(stderr, "%.60s\n", graph_dotted_line);
1803 fprintf(stderr, "perf_event_attr:\n");
1804 perf_event_attr__fprintf(stderr, &evsel->attr, __open_attr__fprintf, NULL);
1805 fprintf(stderr, "%.60s\n", graph_dotted_line);
1808 for (cpu = 0; cpu < cpus->nr; cpu++) {
1810 for (thread = 0; thread < nthreads; thread++) {
1813 if (!evsel->cgrp && !evsel->system_wide)
1814 pid = thread_map__pid(threads, thread);
1816 group_fd = get_group_fd(evsel, cpu, thread);
1818 pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
1819 pid, cpus->map[cpu], group_fd, flags);
1823 fd = sys_perf_event_open(&evsel->attr, pid, cpus->map[cpu],
1826 FD(evsel, cpu, thread) = fd;
1831 if (ignore_missing_thread(evsel, cpus->nr, cpu, threads, thread, err)) {
1833 * We just removed 1 thread, so take a step
1834 * back on thread index and lower the upper
1840 /* ... and pretend like nothing have happened. */
1845 pr_debug2("\nsys_perf_event_open failed, error %d\n",
1850 pr_debug2(" = %d\n", fd);
1852 if (evsel->bpf_fd >= 0) {
1854 int bpf_fd = evsel->bpf_fd;
1857 PERF_EVENT_IOC_SET_BPF,
1859 if (err && errno != EEXIST) {
1860 pr_err("failed to attach bpf fd %d: %s\n",
1861 bpf_fd, strerror(errno));
1867 set_rlimit = NO_CHANGE;
1870 * If we succeeded but had to kill clockid, fail and
1871 * have perf_evsel__open_strerror() print us a nice
1874 if (perf_missing_features.clockid ||
1875 perf_missing_features.clockid_wrong) {
1886 * perf stat needs between 5 and 22 fds per CPU. When we run out
1887 * of them try to increase the limits.
1889 if (err == -EMFILE && set_rlimit < INCREASED_MAX) {
1891 int old_errno = errno;
1893 if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
1894 if (set_rlimit == NO_CHANGE)
1895 l.rlim_cur = l.rlim_max;
1897 l.rlim_cur = l.rlim_max + 1000;
1898 l.rlim_max = l.rlim_cur;
1900 if (setrlimit(RLIMIT_NOFILE, &l) == 0) {
1909 if (err != -EINVAL || cpu > 0 || thread > 0)
1913 * Must probe features in the order they were added to the
1914 * perf_event_attr interface.
1916 if (!perf_missing_features.write_backward && evsel->attr.write_backward) {
1917 perf_missing_features.write_backward = true;
1918 pr_debug2("switching off write_backward\n");
1920 } else if (!perf_missing_features.clockid_wrong && evsel->attr.use_clockid) {
1921 perf_missing_features.clockid_wrong = true;
1922 pr_debug2("switching off clockid\n");
1923 goto fallback_missing_features;
1924 } else if (!perf_missing_features.clockid && evsel->attr.use_clockid) {
1925 perf_missing_features.clockid = true;
1926 pr_debug2("switching off use_clockid\n");
1927 goto fallback_missing_features;
1928 } else if (!perf_missing_features.cloexec && (flags & PERF_FLAG_FD_CLOEXEC)) {
1929 perf_missing_features.cloexec = true;
1930 pr_debug2("switching off cloexec flag\n");
1931 goto fallback_missing_features;
1932 } else if (!perf_missing_features.mmap2 && evsel->attr.mmap2) {
1933 perf_missing_features.mmap2 = true;
1934 pr_debug2("switching off mmap2\n");
1935 goto fallback_missing_features;
1936 } else if (!perf_missing_features.exclude_guest &&
1937 (evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
1938 perf_missing_features.exclude_guest = true;
1939 pr_debug2("switching off exclude_guest, exclude_host\n");
1940 goto fallback_missing_features;
1941 } else if (!perf_missing_features.sample_id_all) {
1942 perf_missing_features.sample_id_all = true;
1943 pr_debug2("switching off sample_id_all\n");
1944 goto retry_sample_id;
1945 } else if (!perf_missing_features.lbr_flags &&
1946 (evsel->attr.branch_sample_type &
1947 (PERF_SAMPLE_BRANCH_NO_CYCLES |
1948 PERF_SAMPLE_BRANCH_NO_FLAGS))) {
1949 perf_missing_features.lbr_flags = true;
1950 pr_debug2("switching off branch sample type no (cycles/flags)\n");
1951 goto fallback_missing_features;
1952 } else if (!perf_missing_features.group_read &&
1953 evsel->attr.inherit &&
1954 (evsel->attr.read_format & PERF_FORMAT_GROUP) &&
1955 perf_evsel__is_group_leader(evsel)) {
1956 perf_missing_features.group_read = true;
1957 pr_debug2("switching off group read\n");
1958 goto fallback_missing_features;
1962 threads->err_thread = thread;
1965 while (--thread >= 0) {
1966 close(FD(evsel, cpu, thread));
1967 FD(evsel, cpu, thread) = -1;
1970 } while (--cpu >= 0);
1974 void perf_evsel__close(struct perf_evsel *evsel)
1976 if (evsel->fd == NULL)
1979 perf_evsel__close_fd(evsel);
1980 perf_evsel__free_fd(evsel);
1983 int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
1984 struct cpu_map *cpus)
1986 return perf_evsel__open(evsel, cpus, NULL);
1989 int perf_evsel__open_per_thread(struct perf_evsel *evsel,
1990 struct thread_map *threads)
1992 return perf_evsel__open(evsel, NULL, threads);
1995 static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
1996 const union perf_event *event,
1997 struct perf_sample *sample)
1999 u64 type = evsel->attr.sample_type;
2000 const u64 *array = event->sample.array;
2001 bool swapped = evsel->needs_swap;
2004 array += ((event->header.size -
2005 sizeof(event->header)) / sizeof(u64)) - 1;
2007 if (type & PERF_SAMPLE_IDENTIFIER) {
2008 sample->id = *array;
2012 if (type & PERF_SAMPLE_CPU) {
2015 /* undo swap of u64, then swap on individual u32s */
2016 u.val64 = bswap_64(u.val64);
2017 u.val32[0] = bswap_32(u.val32[0]);
2020 sample->cpu = u.val32[0];
2024 if (type & PERF_SAMPLE_STREAM_ID) {
2025 sample->stream_id = *array;
2029 if (type & PERF_SAMPLE_ID) {
2030 sample->id = *array;
2034 if (type & PERF_SAMPLE_TIME) {
2035 sample->time = *array;
2039 if (type & PERF_SAMPLE_TID) {
2042 /* undo swap of u64, then swap on individual u32s */
2043 u.val64 = bswap_64(u.val64);
2044 u.val32[0] = bswap_32(u.val32[0]);
2045 u.val32[1] = bswap_32(u.val32[1]);
2048 sample->pid = u.val32[0];
2049 sample->tid = u.val32[1];
2056 static inline bool overflow(const void *endp, u16 max_size, const void *offset,
2059 return size > max_size || offset + size > endp;
2062 #define OVERFLOW_CHECK(offset, size, max_size) \
2064 if (overflow(endp, (max_size), (offset), (size))) \
2068 #define OVERFLOW_CHECK_u64(offset) \
2069 OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
2072 perf_event__check_size(union perf_event *event, unsigned int sample_size)
2075 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
2076 * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to
2077 * check the format does not go past the end of the event.
2079 if (sample_size + sizeof(event->header) > event->header.size)
2085 int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
2086 struct perf_sample *data)
2088 u64 type = evsel->attr.sample_type;
2089 bool swapped = evsel->needs_swap;
2091 u16 max_size = event->header.size;
2092 const void *endp = (void *)event + max_size;
2096 * used for cross-endian analysis. See git commit 65014ab3
2097 * for why this goofiness is needed.
2101 memset(data, 0, sizeof(*data));
2102 data->cpu = data->pid = data->tid = -1;
2103 data->stream_id = data->id = data->time = -1ULL;
2104 data->period = evsel->attr.sample_period;
2105 data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
2106 data->misc = event->header.misc;
2108 data->data_src = PERF_MEM_DATA_SRC_NONE;
2110 if (event->header.type != PERF_RECORD_SAMPLE) {
2111 if (!evsel->attr.sample_id_all)
2113 return perf_evsel__parse_id_sample(evsel, event, data);
2116 array = event->sample.array;
2118 if (perf_event__check_size(event, evsel->sample_size))
2121 if (type & PERF_SAMPLE_IDENTIFIER) {
2126 if (type & PERF_SAMPLE_IP) {
2131 if (type & PERF_SAMPLE_TID) {
2134 /* undo swap of u64, then swap on individual u32s */
2135 u.val64 = bswap_64(u.val64);
2136 u.val32[0] = bswap_32(u.val32[0]);
2137 u.val32[1] = bswap_32(u.val32[1]);
2140 data->pid = u.val32[0];
2141 data->tid = u.val32[1];
2145 if (type & PERF_SAMPLE_TIME) {
2146 data->time = *array;
2150 if (type & PERF_SAMPLE_ADDR) {
2151 data->addr = *array;
2155 if (type & PERF_SAMPLE_ID) {
2160 if (type & PERF_SAMPLE_STREAM_ID) {
2161 data->stream_id = *array;
2165 if (type & PERF_SAMPLE_CPU) {
2169 /* undo swap of u64, then swap on individual u32s */
2170 u.val64 = bswap_64(u.val64);
2171 u.val32[0] = bswap_32(u.val32[0]);
2174 data->cpu = u.val32[0];
2178 if (type & PERF_SAMPLE_PERIOD) {
2179 data->period = *array;
2183 if (type & PERF_SAMPLE_READ) {
2184 u64 read_format = evsel->attr.read_format;
2186 OVERFLOW_CHECK_u64(array);
2187 if (read_format & PERF_FORMAT_GROUP)
2188 data->read.group.nr = *array;
2190 data->read.one.value = *array;
2194 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2195 OVERFLOW_CHECK_u64(array);
2196 data->read.time_enabled = *array;
2200 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2201 OVERFLOW_CHECK_u64(array);
2202 data->read.time_running = *array;
2206 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
2207 if (read_format & PERF_FORMAT_GROUP) {
2208 const u64 max_group_nr = UINT64_MAX /
2209 sizeof(struct sample_read_value);
2211 if (data->read.group.nr > max_group_nr)
2213 sz = data->read.group.nr *
2214 sizeof(struct sample_read_value);
2215 OVERFLOW_CHECK(array, sz, max_size);
2216 data->read.group.values =
2217 (struct sample_read_value *)array;
2218 array = (void *)array + sz;
2220 OVERFLOW_CHECK_u64(array);
2221 data->read.one.id = *array;
2226 if (evsel__has_callchain(evsel)) {
2227 const u64 max_callchain_nr = UINT64_MAX / sizeof(u64);
2229 OVERFLOW_CHECK_u64(array);
2230 data->callchain = (struct ip_callchain *)array++;
2231 if (data->callchain->nr > max_callchain_nr)
2233 sz = data->callchain->nr * sizeof(u64);
2234 OVERFLOW_CHECK(array, sz, max_size);
2235 array = (void *)array + sz;
2238 if (type & PERF_SAMPLE_RAW) {
2239 OVERFLOW_CHECK_u64(array);
2243 * Undo swap of u64, then swap on individual u32s,
2244 * get the size of the raw area and undo all of the
2245 * swap. The pevent interface handles endianity by
2249 u.val64 = bswap_64(u.val64);
2250 u.val32[0] = bswap_32(u.val32[0]);
2251 u.val32[1] = bswap_32(u.val32[1]);
2253 data->raw_size = u.val32[0];
2256 * The raw data is aligned on 64bits including the
2257 * u32 size, so it's safe to use mem_bswap_64.
2260 mem_bswap_64((void *) array, data->raw_size);
2262 array = (void *)array + sizeof(u32);
2264 OVERFLOW_CHECK(array, data->raw_size, max_size);
2265 data->raw_data = (void *)array;
2266 array = (void *)array + data->raw_size;
2269 if (type & PERF_SAMPLE_BRANCH_STACK) {
2270 const u64 max_branch_nr = UINT64_MAX /
2271 sizeof(struct branch_entry);
2273 OVERFLOW_CHECK_u64(array);
2274 data->branch_stack = (struct branch_stack *)array++;
2276 if (data->branch_stack->nr > max_branch_nr)
2278 sz = data->branch_stack->nr * sizeof(struct branch_entry);
2279 OVERFLOW_CHECK(array, sz, max_size);
2280 array = (void *)array + sz;
2283 if (type & PERF_SAMPLE_REGS_USER) {
2284 OVERFLOW_CHECK_u64(array);
2285 data->user_regs.abi = *array;
2288 if (data->user_regs.abi) {
2289 u64 mask = evsel->attr.sample_regs_user;
2291 sz = hweight_long(mask) * sizeof(u64);
2292 OVERFLOW_CHECK(array, sz, max_size);
2293 data->user_regs.mask = mask;
2294 data->user_regs.regs = (u64 *)array;
2295 array = (void *)array + sz;
2299 if (type & PERF_SAMPLE_STACK_USER) {
2300 OVERFLOW_CHECK_u64(array);
2303 data->user_stack.offset = ((char *)(array - 1)
2307 data->user_stack.size = 0;
2309 OVERFLOW_CHECK(array, sz, max_size);
2310 data->user_stack.data = (char *)array;
2311 array = (void *)array + sz;
2312 OVERFLOW_CHECK_u64(array);
2313 data->user_stack.size = *array++;
2314 if (WARN_ONCE(data->user_stack.size > sz,
2315 "user stack dump failure\n"))
2320 if (type & PERF_SAMPLE_WEIGHT) {
2321 OVERFLOW_CHECK_u64(array);
2322 data->weight = *array;
2326 if (type & PERF_SAMPLE_DATA_SRC) {
2327 OVERFLOW_CHECK_u64(array);
2328 data->data_src = *array;
2332 if (type & PERF_SAMPLE_TRANSACTION) {
2333 OVERFLOW_CHECK_u64(array);
2334 data->transaction = *array;
2338 data->intr_regs.abi = PERF_SAMPLE_REGS_ABI_NONE;
2339 if (type & PERF_SAMPLE_REGS_INTR) {
2340 OVERFLOW_CHECK_u64(array);
2341 data->intr_regs.abi = *array;
2344 if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) {
2345 u64 mask = evsel->attr.sample_regs_intr;
2347 sz = hweight_long(mask) * sizeof(u64);
2348 OVERFLOW_CHECK(array, sz, max_size);
2349 data->intr_regs.mask = mask;
2350 data->intr_regs.regs = (u64 *)array;
2351 array = (void *)array + sz;
2355 data->phys_addr = 0;
2356 if (type & PERF_SAMPLE_PHYS_ADDR) {
2357 data->phys_addr = *array;
2364 int perf_evsel__parse_sample_timestamp(struct perf_evsel *evsel,
2365 union perf_event *event,
2368 u64 type = evsel->attr.sample_type;
2371 if (!(type & PERF_SAMPLE_TIME))
2374 if (event->header.type != PERF_RECORD_SAMPLE) {
2375 struct perf_sample data = {
2379 if (!evsel->attr.sample_id_all)
2381 if (perf_evsel__parse_id_sample(evsel, event, &data))
2384 *timestamp = data.time;
2388 array = event->sample.array;
2390 if (perf_event__check_size(event, evsel->sample_size))
2393 if (type & PERF_SAMPLE_IDENTIFIER)
2396 if (type & PERF_SAMPLE_IP)
2399 if (type & PERF_SAMPLE_TID)
2402 if (type & PERF_SAMPLE_TIME)
2403 *timestamp = *array;
2408 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
2411 size_t sz, result = sizeof(struct sample_event);
2413 if (type & PERF_SAMPLE_IDENTIFIER)
2414 result += sizeof(u64);
2416 if (type & PERF_SAMPLE_IP)
2417 result += sizeof(u64);
2419 if (type & PERF_SAMPLE_TID)
2420 result += sizeof(u64);
2422 if (type & PERF_SAMPLE_TIME)
2423 result += sizeof(u64);
2425 if (type & PERF_SAMPLE_ADDR)
2426 result += sizeof(u64);
2428 if (type & PERF_SAMPLE_ID)
2429 result += sizeof(u64);
2431 if (type & PERF_SAMPLE_STREAM_ID)
2432 result += sizeof(u64);
2434 if (type & PERF_SAMPLE_CPU)
2435 result += sizeof(u64);
2437 if (type & PERF_SAMPLE_PERIOD)
2438 result += sizeof(u64);
2440 if (type & PERF_SAMPLE_READ) {
2441 result += sizeof(u64);
2442 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2443 result += sizeof(u64);
2444 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2445 result += sizeof(u64);
2446 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
2447 if (read_format & PERF_FORMAT_GROUP) {
2448 sz = sample->read.group.nr *
2449 sizeof(struct sample_read_value);
2452 result += sizeof(u64);
2456 if (type & PERF_SAMPLE_CALLCHAIN) {
2457 sz = (sample->callchain->nr + 1) * sizeof(u64);
2461 if (type & PERF_SAMPLE_RAW) {
2462 result += sizeof(u32);
2463 result += sample->raw_size;
2466 if (type & PERF_SAMPLE_BRANCH_STACK) {
2467 sz = sample->branch_stack->nr * sizeof(struct branch_entry);
2472 if (type & PERF_SAMPLE_REGS_USER) {
2473 if (sample->user_regs.abi) {
2474 result += sizeof(u64);
2475 sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
2478 result += sizeof(u64);
2482 if (type & PERF_SAMPLE_STACK_USER) {
2483 sz = sample->user_stack.size;
2484 result += sizeof(u64);
2487 result += sizeof(u64);
2491 if (type & PERF_SAMPLE_WEIGHT)
2492 result += sizeof(u64);
2494 if (type & PERF_SAMPLE_DATA_SRC)
2495 result += sizeof(u64);
2497 if (type & PERF_SAMPLE_TRANSACTION)
2498 result += sizeof(u64);
2500 if (type & PERF_SAMPLE_REGS_INTR) {
2501 if (sample->intr_regs.abi) {
2502 result += sizeof(u64);
2503 sz = hweight_long(sample->intr_regs.mask) * sizeof(u64);
2506 result += sizeof(u64);
2510 if (type & PERF_SAMPLE_PHYS_ADDR)
2511 result += sizeof(u64);
2516 int perf_event__synthesize_sample(union perf_event *event, u64 type,
2518 const struct perf_sample *sample)
2523 * used for cross-endian analysis. See git commit 65014ab3
2524 * for why this goofiness is needed.
2528 array = event->sample.array;
2530 if (type & PERF_SAMPLE_IDENTIFIER) {
2531 *array = sample->id;
2535 if (type & PERF_SAMPLE_IP) {
2536 *array = sample->ip;
2540 if (type & PERF_SAMPLE_TID) {
2541 u.val32[0] = sample->pid;
2542 u.val32[1] = sample->tid;
2547 if (type & PERF_SAMPLE_TIME) {
2548 *array = sample->time;
2552 if (type & PERF_SAMPLE_ADDR) {
2553 *array = sample->addr;
2557 if (type & PERF_SAMPLE_ID) {
2558 *array = sample->id;
2562 if (type & PERF_SAMPLE_STREAM_ID) {
2563 *array = sample->stream_id;
2567 if (type & PERF_SAMPLE_CPU) {
2568 u.val32[0] = sample->cpu;
2574 if (type & PERF_SAMPLE_PERIOD) {
2575 *array = sample->period;
2579 if (type & PERF_SAMPLE_READ) {
2580 if (read_format & PERF_FORMAT_GROUP)
2581 *array = sample->read.group.nr;
2583 *array = sample->read.one.value;
2586 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2587 *array = sample->read.time_enabled;
2591 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2592 *array = sample->read.time_running;
2596 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
2597 if (read_format & PERF_FORMAT_GROUP) {
2598 sz = sample->read.group.nr *
2599 sizeof(struct sample_read_value);
2600 memcpy(array, sample->read.group.values, sz);
2601 array = (void *)array + sz;
2603 *array = sample->read.one.id;
2608 if (type & PERF_SAMPLE_CALLCHAIN) {
2609 sz = (sample->callchain->nr + 1) * sizeof(u64);
2610 memcpy(array, sample->callchain, sz);
2611 array = (void *)array + sz;
2614 if (type & PERF_SAMPLE_RAW) {
2615 u.val32[0] = sample->raw_size;
2617 array = (void *)array + sizeof(u32);
2619 memcpy(array, sample->raw_data, sample->raw_size);
2620 array = (void *)array + sample->raw_size;
2623 if (type & PERF_SAMPLE_BRANCH_STACK) {
2624 sz = sample->branch_stack->nr * sizeof(struct branch_entry);
2626 memcpy(array, sample->branch_stack, sz);
2627 array = (void *)array + sz;
2630 if (type & PERF_SAMPLE_REGS_USER) {
2631 if (sample->user_regs.abi) {
2632 *array++ = sample->user_regs.abi;
2633 sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
2634 memcpy(array, sample->user_regs.regs, sz);
2635 array = (void *)array + sz;
2641 if (type & PERF_SAMPLE_STACK_USER) {
2642 sz = sample->user_stack.size;
2645 memcpy(array, sample->user_stack.data, sz);
2646 array = (void *)array + sz;
2651 if (type & PERF_SAMPLE_WEIGHT) {
2652 *array = sample->weight;
2656 if (type & PERF_SAMPLE_DATA_SRC) {
2657 *array = sample->data_src;
2661 if (type & PERF_SAMPLE_TRANSACTION) {
2662 *array = sample->transaction;
2666 if (type & PERF_SAMPLE_REGS_INTR) {
2667 if (sample->intr_regs.abi) {
2668 *array++ = sample->intr_regs.abi;
2669 sz = hweight_long(sample->intr_regs.mask) * sizeof(u64);
2670 memcpy(array, sample->intr_regs.regs, sz);
2671 array = (void *)array + sz;
2677 if (type & PERF_SAMPLE_PHYS_ADDR) {
2678 *array = sample->phys_addr;
2685 struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name)
2687 return tep_find_field(evsel->tp_format, name);
2690 void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
2693 struct format_field *field = perf_evsel__field(evsel, name);
2699 offset = field->offset;
2701 if (field->flags & FIELD_IS_DYNAMIC) {
2702 offset = *(int *)(sample->raw_data + field->offset);
2706 return sample->raw_data + offset;
2709 u64 format_field__intval(struct format_field *field, struct perf_sample *sample,
2713 void *ptr = sample->raw_data + field->offset;
2715 switch (field->size) {
2719 value = *(u16 *)ptr;
2722 value = *(u32 *)ptr;
2725 memcpy(&value, ptr, sizeof(u64));
2734 switch (field->size) {
2736 return bswap_16(value);
2738 return bswap_32(value);
2740 return bswap_64(value);
2748 u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
2751 struct format_field *field = perf_evsel__field(evsel, name);
2756 return field ? format_field__intval(field, sample, evsel->needs_swap) : 0;
2759 bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
2760 char *msg, size_t msgsize)
2764 if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
2765 evsel->attr.type == PERF_TYPE_HARDWARE &&
2766 evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
2768 * If it's cycles then fall back to hrtimer based
2769 * cpu-clock-tick sw counter, which is always available even if
2772 * PPC returns ENXIO until 2.6.37 (behavior changed with commit
2775 scnprintf(msg, msgsize, "%s",
2776 "The cycles event is not supported, trying to fall back to cpu-clock-ticks");
2778 evsel->attr.type = PERF_TYPE_SOFTWARE;
2779 evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK;
2781 zfree(&evsel->name);
2783 } else if (err == EACCES && !evsel->attr.exclude_kernel &&
2784 (paranoid = perf_event_paranoid()) > 1) {
2785 const char *name = perf_evsel__name(evsel);
2787 const char *sep = ":";
2789 /* Is there already the separator in the name. */
2790 if (strchr(name, '/') ||
2794 if (asprintf(&new_name, "%s%su", name, sep) < 0)
2799 evsel->name = new_name;
2800 scnprintf(msg, msgsize,
2801 "kernel.perf_event_paranoid=%d, trying to fall back to excluding kernel samples", paranoid);
2802 evsel->attr.exclude_kernel = 1;
2810 static bool find_process(const char *name)
2812 size_t len = strlen(name);
2817 dir = opendir(procfs__mountpoint());
2821 /* Walk through the directory. */
2822 while (ret && (d = readdir(dir)) != NULL) {
2823 char path[PATH_MAX];
2827 if ((d->d_type != DT_DIR) ||
2828 !strcmp(".", d->d_name) ||
2829 !strcmp("..", d->d_name))
2832 scnprintf(path, sizeof(path), "%s/%s/comm",
2833 procfs__mountpoint(), d->d_name);
2835 if (filename__read_str(path, &data, &size))
2838 ret = strncmp(name, data, len);
2843 return ret ? false : true;
2846 int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
2847 int err, char *msg, size_t size)
2849 char sbuf[STRERR_BUFSIZE];
2856 printed = scnprintf(msg, size,
2857 "No permission to enable %s event.\n\n",
2858 perf_evsel__name(evsel));
2860 return scnprintf(msg + printed, size - printed,
2861 "You may not have permission to collect %sstats.\n\n"
2862 "Consider tweaking /proc/sys/kernel/perf_event_paranoid,\n"
2863 "which controls use of the performance events system by\n"
2864 "unprivileged users (without CAP_SYS_ADMIN).\n\n"
2865 "The current value is %d:\n\n"
2866 " -1: Allow use of (almost) all events by all users\n"
2867 " Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK\n"
2868 ">= 0: Disallow ftrace function tracepoint by users without CAP_SYS_ADMIN\n"
2869 " Disallow raw tracepoint access by users without CAP_SYS_ADMIN\n"
2870 ">= 1: Disallow CPU event access by users without CAP_SYS_ADMIN\n"
2871 ">= 2: Disallow kernel profiling by users without CAP_SYS_ADMIN\n\n"
2872 "To make this setting permanent, edit /etc/sysctl.conf too, e.g.:\n\n"
2873 " kernel.perf_event_paranoid = -1\n" ,
2874 target->system_wide ? "system-wide " : "",
2875 perf_event_paranoid());
2877 return scnprintf(msg, size, "The %s event is not supported.",
2878 perf_evsel__name(evsel));
2880 return scnprintf(msg, size, "%s",
2881 "Too many events are opened.\n"
2882 "Probably the maximum number of open file descriptors has been reached.\n"
2883 "Hint: Try again after reducing the number of events.\n"
2884 "Hint: Try increasing the limit with 'ulimit -n <limit>'");
2886 if (evsel__has_callchain(evsel) &&
2887 access("/proc/sys/kernel/perf_event_max_stack", F_OK) == 0)
2888 return scnprintf(msg, size,
2889 "Not enough memory to setup event with callchain.\n"
2890 "Hint: Try tweaking /proc/sys/kernel/perf_event_max_stack\n"
2891 "Hint: Current value: %d", sysctl__max_stack());
2894 if (target->cpu_list)
2895 return scnprintf(msg, size, "%s",
2896 "No such device - did you specify an out-of-range profile CPU?");
2899 if (evsel->attr.sample_period != 0)
2900 return scnprintf(msg, size,
2901 "%s: PMU Hardware doesn't support sampling/overflow-interrupts. Try 'perf stat'",
2902 perf_evsel__name(evsel));
2903 if (evsel->attr.precise_ip)
2904 return scnprintf(msg, size, "%s",
2905 "\'precise\' request may not be supported. Try removing 'p' modifier.");
2906 #if defined(__i386__) || defined(__x86_64__)
2907 if (evsel->attr.type == PERF_TYPE_HARDWARE)
2908 return scnprintf(msg, size, "%s",
2909 "No hardware sampling interrupt available.\n");
2913 if (find_process("oprofiled"))
2914 return scnprintf(msg, size,
2915 "The PMU counters are busy/taken by another profiler.\n"
2916 "We found oprofile daemon running, please stop it and try again.");
2919 if (evsel->attr.write_backward && perf_missing_features.write_backward)
2920 return scnprintf(msg, size, "Reading from overwrite event is not supported by this kernel.");
2921 if (perf_missing_features.clockid)
2922 return scnprintf(msg, size, "clockid feature not supported.");
2923 if (perf_missing_features.clockid_wrong)
2924 return scnprintf(msg, size, "wrong clockid (%d).", clockid);
2930 return scnprintf(msg, size,
2931 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
2932 "/bin/dmesg | grep -i perf may provide additional information.\n",
2933 err, str_error_r(err, sbuf, sizeof(sbuf)),
2934 perf_evsel__name(evsel));
2937 struct perf_env *perf_evsel__env(struct perf_evsel *evsel)
2939 if (evsel && evsel->evlist)
2940 return evsel->evlist->env;