4 * Builtin stat command: Give a precise performance counters summary
5 * overview about any workload, CPU or specific PID.
9 $ perf stat ./hackbench 10
13 Performance counter stats for './hackbench 10':
15 1708.761321 task-clock # 11.037 CPUs utilized
16 41,190 context-switches # 0.024 M/sec
17 6,735 CPU-migrations # 0.004 M/sec
18 17,318 page-faults # 0.010 M/sec
19 5,205,202,243 cycles # 3.046 GHz
20 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle
21 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle
22 2,603,501,247 instructions # 0.50 insns per cycle
23 # 1.48 stalled cycles per insn
24 484,357,498 branches # 283.455 M/sec
25 6,388,934 branch-misses # 1.32% of all branches
27 0.154822978 seconds time elapsed
30 * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
32 * Improvements and fixes by:
34 * Arjan van de Ven <arjan@linux.intel.com>
35 * Yanmin Zhang <yanmin.zhang@intel.com>
36 * Wu Fengguang <fengguang.wu@intel.com>
37 * Mike Galbraith <efault@gmx.de>
38 * Paul Mackerras <paulus@samba.org>
39 * Jaswinder Singh Rajput <jaswinder@kernel.org>
41 * Released under the GPL v2. (and only v2, not any later version)
46 #include "util/cgroup.h"
47 #include "util/util.h"
48 #include <subcmd/parse-options.h>
49 #include "util/parse-events.h"
51 #include "util/event.h"
52 #include "util/evlist.h"
53 #include "util/evsel.h"
54 #include "util/debug.h"
55 #include "util/drv_configs.h"
56 #include "util/color.h"
57 #include "util/stat.h"
58 #include "util/header.h"
59 #include "util/cpumap.h"
60 #include "util/thread.h"
61 #include "util/thread_map.h"
62 #include "util/counts.h"
63 #include "util/group.h"
64 #include "util/session.h"
65 #include "util/tool.h"
66 #include "util/string2.h"
67 #include "util/metricgroup.h"
70 #include <linux/time64.h>
71 #include <api/fs/fs.h>
75 #include <sys/prctl.h>
79 #include <sys/types.h>
84 #include <sys/resource.h>
87 #include "sane_ctype.h"
89 #define DEFAULT_SEPARATOR " "
90 #define CNTR_NOT_SUPPORTED "<not supported>"
91 #define CNTR_NOT_COUNTED "<not counted>"
92 #define FREEZE_ON_SMI_PATH "devices/cpu/freeze_on_smi"
94 static void print_counters(struct timespec *ts, int argc, const char **argv);
96 /* Default events used for perf stat -T */
97 static const char *transaction_attrs = {
109 /* More limited version when the CPU does not have all events. */
110 static const char * transaction_limited_attrs = {
120 static const char * topdown_attrs[] = {
121 "topdown-total-slots",
122 "topdown-slots-retired",
123 "topdown-recovery-bubbles",
124 "topdown-fetch-bubbles",
125 "topdown-slots-issued",
129 static const char *smi_cost_attrs = {
137 static struct perf_evlist *evsel_list;
139 static struct rblist metric_events;
141 static struct target target = {
145 typedef int (*aggr_get_id_t)(struct cpu_map *m, int cpu);
147 static int run_count = 1;
148 static bool no_inherit = false;
149 static volatile pid_t child_pid = -1;
150 static bool null_run = false;
151 static int detailed_run = 0;
152 static bool transaction_run;
153 static bool topdown_run = false;
154 static bool smi_cost = false;
155 static bool smi_reset = false;
156 static bool big_num = true;
157 static int big_num_opt = -1;
158 static const char *csv_sep = NULL;
159 static bool csv_output = false;
160 static bool group = false;
161 static const char *pre_cmd = NULL;
162 static const char *post_cmd = NULL;
163 static bool sync_run = false;
164 static unsigned int initial_delay = 0;
165 static unsigned int unit_width = 4; /* strlen("unit") */
166 static bool forever = false;
167 static bool metric_only = false;
168 static bool force_metric_only = false;
169 static bool no_merge = false;
170 static bool walltime_run_table = false;
171 static struct timespec ref_time;
172 static struct cpu_map *aggr_map;
173 static aggr_get_id_t aggr_get_id;
174 static bool append_file;
175 static bool interval_count;
176 static const char *output_name;
177 static int output_fd;
178 static int print_free_counters_hint;
179 static int print_mixed_hw_group_error;
180 static u64 *walltime_run;
181 static bool ru_display = false;
182 static struct rusage ru_data;
186 struct perf_data data;
187 struct perf_session *session;
189 struct perf_tool tool;
191 struct cpu_map *cpus;
192 struct thread_map *threads;
193 enum aggr_mode aggr_mode;
196 static struct perf_stat perf_stat;
197 #define STAT_RECORD perf_stat.record
199 static volatile int done = 0;
201 static struct perf_stat_config stat_config = {
202 .aggr_mode = AGGR_GLOBAL,
206 static bool is_duration_time(struct perf_evsel *evsel)
208 return !strcmp(evsel->name, "duration_time");
211 static inline void diff_timespec(struct timespec *r, struct timespec *a,
214 r->tv_sec = a->tv_sec - b->tv_sec;
215 if (a->tv_nsec < b->tv_nsec) {
216 r->tv_nsec = a->tv_nsec + NSEC_PER_SEC - b->tv_nsec;
219 r->tv_nsec = a->tv_nsec - b->tv_nsec ;
223 static void perf_stat__reset_stats(void)
227 perf_evlist__reset_stats(evsel_list);
228 perf_stat__reset_shadow_stats();
230 for (i = 0; i < stat_config.stats_num; i++)
231 perf_stat__reset_shadow_per_stat(&stat_config.stats[i]);
234 static int create_perf_stat_counter(struct perf_evsel *evsel)
236 struct perf_event_attr *attr = &evsel->attr;
237 struct perf_evsel *leader = evsel->leader;
239 if (stat_config.scale) {
240 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
241 PERF_FORMAT_TOTAL_TIME_RUNNING;
245 * The event is part of non trivial group, let's enable
246 * the group read (for leader) and ID retrieval for all
249 if (leader->nr_members > 1)
250 attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP;
252 attr->inherit = !no_inherit;
255 * Some events get initialized with sample_(period/type) set,
256 * like tracepoints. Clear it up for counting.
258 attr->sample_period = 0;
261 * But set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless
262 * while avoiding that older tools show confusing messages.
264 * However for pipe sessions we need to keep it zero,
265 * because script's perf_evsel__check_attr is triggered
266 * by attr->sample_type != 0, and we can't run it on
269 if (!(STAT_RECORD && perf_stat.data.is_pipe))
270 attr->sample_type = PERF_SAMPLE_IDENTIFIER;
273 * Disabling all counters initially, they will be enabled
274 * either manually by us or by kernel via enable_on_exec
277 if (perf_evsel__is_group_leader(evsel)) {
281 * In case of initial_delay we enable tracee
284 if (target__none(&target) && !initial_delay)
285 attr->enable_on_exec = 1;
288 if (target__has_cpu(&target) && !target__has_per_thread(&target))
289 return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
291 return perf_evsel__open_per_thread(evsel, evsel_list->threads);
295 * Does the counter have nsecs as a unit?
297 static inline int nsec_counter(struct perf_evsel *evsel)
299 if (perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) ||
300 perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK))
306 static int process_synthesized_event(struct perf_tool *tool __maybe_unused,
307 union perf_event *event,
308 struct perf_sample *sample __maybe_unused,
309 struct machine *machine __maybe_unused)
311 if (perf_data__write(&perf_stat.data, event, event->header.size) < 0) {
312 pr_err("failed to write perf data, error: %m\n");
316 perf_stat.bytes_written += event->header.size;
320 static int write_stat_round_event(u64 tm, u64 type)
322 return perf_event__synthesize_stat_round(NULL, tm, type,
323 process_synthesized_event,
327 #define WRITE_STAT_ROUND_EVENT(time, interval) \
328 write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval)
330 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
333 perf_evsel__write_stat_event(struct perf_evsel *counter, u32 cpu, u32 thread,
334 struct perf_counts_values *count)
336 struct perf_sample_id *sid = SID(counter, cpu, thread);
338 return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count,
339 process_synthesized_event, NULL);
343 * Read out the results of a single counter:
344 * do not aggregate counts across CPUs in system-wide mode
346 static int read_counter(struct perf_evsel *counter)
348 int nthreads = thread_map__nr(evsel_list->threads);
349 int ncpus, cpu, thread;
351 if (target__has_cpu(&target) && !target__has_per_thread(&target))
352 ncpus = perf_evsel__nr_cpus(counter);
356 if (!counter->supported)
359 if (counter->system_wide)
362 for (thread = 0; thread < nthreads; thread++) {
363 for (cpu = 0; cpu < ncpus; cpu++) {
364 struct perf_counts_values *count;
366 count = perf_counts(counter->counts, cpu, thread);
369 * The leader's group read loads data into its group members
370 * (via perf_evsel__read_counter) and sets threir count->loaded.
372 if (!count->loaded &&
373 perf_evsel__read_counter(counter, cpu, thread)) {
374 counter->counts->scaled = -1;
375 perf_counts(counter->counts, cpu, thread)->ena = 0;
376 perf_counts(counter->counts, cpu, thread)->run = 0;
380 count->loaded = false;
383 if (perf_evsel__write_stat_event(counter, cpu, thread, count)) {
384 pr_err("failed to write stat event\n");
390 fprintf(stat_config.output,
391 "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
392 perf_evsel__name(counter),
394 count->val, count->ena, count->run);
402 static void read_counters(void)
404 struct perf_evsel *counter;
407 evlist__for_each_entry(evsel_list, counter) {
408 ret = read_counter(counter);
410 pr_debug("failed to read counter %s\n", counter->name);
412 if (ret == 0 && perf_stat_process_counter(&stat_config, counter))
413 pr_warning("failed to process counter %s\n", counter->name);
417 static void process_interval(void)
419 struct timespec ts, rs;
423 clock_gettime(CLOCK_MONOTONIC, &ts);
424 diff_timespec(&rs, &ts, &ref_time);
427 if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL))
428 pr_err("failed to write stat round event\n");
431 init_stats(&walltime_nsecs_stats);
432 update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000);
433 print_counters(&rs, 0, NULL);
436 static void enable_counters(void)
439 usleep(initial_delay * USEC_PER_MSEC);
442 * We need to enable counters only if:
443 * - we don't have tracee (attaching to task or cpu)
444 * - we have initial delay configured
446 if (!target__none(&target) || initial_delay)
447 perf_evlist__enable(evsel_list);
450 static void disable_counters(void)
453 * If we don't have tracee (attaching to task or cpu), counters may
454 * still be running. To get accurate group ratios, we must stop groups
455 * from counting before reading their constituent counters.
457 if (!target__none(&target))
458 perf_evlist__disable(evsel_list);
461 static volatile int workload_exec_errno;
464 * perf_evlist__prepare_workload will send a SIGUSR1
465 * if the fork fails, since we asked by setting its
466 * want_signal to true.
468 static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *info,
469 void *ucontext __maybe_unused)
471 workload_exec_errno = info->si_value.sival_int;
474 static int perf_stat_synthesize_config(bool is_pipe)
479 err = perf_event__synthesize_attrs(NULL, perf_stat.session,
480 process_synthesized_event);
482 pr_err("Couldn't synthesize attrs.\n");
487 err = perf_event__synthesize_extra_attr(NULL,
489 process_synthesized_event,
492 err = perf_event__synthesize_thread_map2(NULL, evsel_list->threads,
493 process_synthesized_event,
496 pr_err("Couldn't synthesize thread map.\n");
500 err = perf_event__synthesize_cpu_map(NULL, evsel_list->cpus,
501 process_synthesized_event, NULL);
503 pr_err("Couldn't synthesize thread map.\n");
507 err = perf_event__synthesize_stat_config(NULL, &stat_config,
508 process_synthesized_event, NULL);
510 pr_err("Couldn't synthesize config.\n");
517 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
519 static int __store_counter_ids(struct perf_evsel *counter)
523 for (cpu = 0; cpu < xyarray__max_x(counter->fd); cpu++) {
524 for (thread = 0; thread < xyarray__max_y(counter->fd);
526 int fd = FD(counter, cpu, thread);
528 if (perf_evlist__id_add_fd(evsel_list, counter,
529 cpu, thread, fd) < 0)
537 static int store_counter_ids(struct perf_evsel *counter)
539 struct cpu_map *cpus = counter->cpus;
540 struct thread_map *threads = counter->threads;
542 if (perf_evsel__alloc_id(counter, cpus->nr, threads->nr))
545 return __store_counter_ids(counter);
548 static bool perf_evsel__should_store_id(struct perf_evsel *counter)
550 return STAT_RECORD || counter->attr.read_format & PERF_FORMAT_ID;
553 static struct perf_evsel *perf_evsel__reset_weak_group(struct perf_evsel *evsel)
555 struct perf_evsel *c2, *leader;
558 leader = evsel->leader;
559 pr_debug("Weak group for %s/%d failed\n",
560 leader->name, leader->nr_members);
563 * for_each_group_member doesn't work here because it doesn't
564 * include the first entry.
566 evlist__for_each_entry(evsel_list, c2) {
569 if (c2->leader == leader) {
571 perf_evsel__close(c2);
579 static int __run_perf_stat(int argc, const char **argv, int run_idx)
581 int interval = stat_config.interval;
582 int times = stat_config.times;
583 int timeout = stat_config.timeout;
585 unsigned long long t0, t1;
586 struct perf_evsel *counter;
590 const bool forks = (argc > 0);
591 bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false;
592 struct perf_evsel_config_term *err_term;
595 ts.tv_sec = interval / USEC_PER_MSEC;
596 ts.tv_nsec = (interval % USEC_PER_MSEC) * NSEC_PER_MSEC;
597 } else if (timeout) {
598 ts.tv_sec = timeout / USEC_PER_MSEC;
599 ts.tv_nsec = (timeout % USEC_PER_MSEC) * NSEC_PER_MSEC;
606 if (perf_evlist__prepare_workload(evsel_list, &target, argv, is_pipe,
607 workload_exec_failed_signal) < 0) {
608 perror("failed to prepare workload");
611 child_pid = evsel_list->workload.pid;
615 perf_evlist__set_leader(evsel_list);
617 evlist__for_each_entry(evsel_list, counter) {
619 if (create_perf_stat_counter(counter) < 0) {
621 /* Weak group failed. Reset the group. */
622 if ((errno == EINVAL || errno == EBADF) &&
623 counter->leader != counter &&
624 counter->weak_group) {
625 counter = perf_evsel__reset_weak_group(counter);
630 * PPC returns ENXIO for HW counters until 2.6.37
631 * (behavior changed with commit b0a873e).
633 if (errno == EINVAL || errno == ENOSYS ||
634 errno == ENOENT || errno == EOPNOTSUPP ||
637 ui__warning("%s event is not supported by the kernel.\n",
638 perf_evsel__name(counter));
639 counter->supported = false;
641 if ((counter->leader != counter) ||
642 !(counter->leader->nr_members > 1))
644 } else if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) {
646 ui__warning("%s\n", msg);
648 } else if (target__has_per_thread(&target) &&
649 evsel_list->threads &&
650 evsel_list->threads->err_thread != -1) {
652 * For global --per-thread case, skip current
655 if (!thread_map__remove(evsel_list->threads,
656 evsel_list->threads->err_thread)) {
657 evsel_list->threads->err_thread = -1;
662 perf_evsel__open_strerror(counter, &target,
663 errno, msg, sizeof(msg));
664 ui__error("%s\n", msg);
667 kill(child_pid, SIGTERM);
671 counter->supported = true;
673 l = strlen(counter->unit);
677 if (perf_evsel__should_store_id(counter) &&
678 store_counter_ids(counter))
682 if (perf_evlist__apply_filters(evsel_list, &counter)) {
683 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
684 counter->filter, perf_evsel__name(counter), errno,
685 str_error_r(errno, msg, sizeof(msg)));
689 if (perf_evlist__apply_drv_configs(evsel_list, &counter, &err_term)) {
690 pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
691 err_term->val.drv_cfg, perf_evsel__name(counter), errno,
692 str_error_r(errno, msg, sizeof(msg)));
697 int err, fd = perf_data__fd(&perf_stat.data);
700 err = perf_header__write_pipe(perf_data__fd(&perf_stat.data));
702 err = perf_session__write_header(perf_stat.session, evsel_list,
709 err = perf_stat_synthesize_config(is_pipe);
715 * Enable counters and exec the command:
718 clock_gettime(CLOCK_MONOTONIC, &ref_time);
721 perf_evlist__start_workload(evsel_list);
724 if (interval || timeout) {
725 while (!waitpid(child_pid, &status, WNOHANG)) {
726 nanosleep(&ts, NULL);
730 if (interval_count && !(--times))
734 wait4(child_pid, &status, 0, &ru_data);
736 if (workload_exec_errno) {
737 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
738 pr_err("Workload failed: %s\n", emsg);
742 if (WIFSIGNALED(status))
743 psignal(WTERMSIG(status), argv[0]);
747 nanosleep(&ts, NULL);
752 if (interval_count && !(--times))
762 if (walltime_run_table)
763 walltime_run[run_idx] = t1 - t0;
765 update_stats(&walltime_nsecs_stats, t1 - t0);
768 * Closing a group leader splits the group, and as we only disable
769 * group leaders, results in remaining events becoming enabled. To
770 * avoid arbitrary skew, we must read all counters before closing any
774 perf_evlist__close(evsel_list);
776 return WEXITSTATUS(status);
779 static int run_perf_stat(int argc, const char **argv, int run_idx)
784 ret = system(pre_cmd);
792 ret = __run_perf_stat(argc, argv, run_idx);
797 ret = system(post_cmd);
805 static void print_running(u64 run, u64 ena)
808 fprintf(stat_config.output, "%s%" PRIu64 "%s%.2f",
812 ena ? 100.0 * run / ena : 100.0);
813 } else if (run != ena) {
814 fprintf(stat_config.output, " (%.2f%%)", 100.0 * run / ena);
818 static void print_noise_pct(double total, double avg)
820 double pct = rel_stddev_stats(total, avg);
823 fprintf(stat_config.output, "%s%.2f%%", csv_sep, pct);
825 fprintf(stat_config.output, " ( +-%6.2f%% )", pct);
828 static void print_noise(struct perf_evsel *evsel, double avg)
830 struct perf_stat_evsel *ps;
836 print_noise_pct(stddev_stats(&ps->res_stats[0]), avg);
839 static void aggr_printout(struct perf_evsel *evsel, int id, int nr)
841 switch (stat_config.aggr_mode) {
843 fprintf(stat_config.output, "S%d-C%*d%s%*d%s",
844 cpu_map__id_to_socket(id),
846 cpu_map__id_to_cpu(id),
853 fprintf(stat_config.output, "S%*d%s%*d%s",
862 fprintf(stat_config.output, "CPU%*d%s",
864 perf_evsel__cpus(evsel)->map[id], csv_sep);
867 fprintf(stat_config.output, "%*s-%*d%s",
869 thread_map__comm(evsel->threads, id),
871 thread_map__pid(evsel->threads, id),
887 struct perf_evsel *evsel;
890 #define METRIC_LEN 35
892 static void new_line_std(void *ctx)
894 struct outstate *os = ctx;
899 static void do_new_line_std(struct outstate *os)
902 fputs(os->prefix, os->fh);
903 aggr_printout(os->evsel, os->id, os->nr);
904 if (stat_config.aggr_mode == AGGR_NONE)
905 fprintf(os->fh, " ");
906 fprintf(os->fh, " ");
909 static void print_metric_std(void *ctx, const char *color, const char *fmt,
910 const char *unit, double val)
912 struct outstate *os = ctx;
915 bool newline = os->newline;
919 if (unit == NULL || fmt == NULL) {
920 fprintf(out, "%-*s", METRIC_LEN, "");
927 n = fprintf(out, " # ");
929 n += color_fprintf(out, color, fmt, val);
931 n += fprintf(out, fmt, val);
932 fprintf(out, " %-*s", METRIC_LEN - n - 1, unit);
935 static void new_line_csv(void *ctx)
937 struct outstate *os = ctx;
942 fprintf(os->fh, "%s%s", os->prefix, csv_sep);
943 aggr_printout(os->evsel, os->id, os->nr);
944 for (i = 0; i < os->nfields; i++)
945 fputs(csv_sep, os->fh);
948 static void print_metric_csv(void *ctx,
949 const char *color __maybe_unused,
950 const char *fmt, const char *unit, double val)
952 struct outstate *os = ctx;
954 char buf[64], *vals, *ends;
956 if (unit == NULL || fmt == NULL) {
957 fprintf(out, "%s%s", csv_sep, csv_sep);
960 snprintf(buf, sizeof(buf), fmt, val);
961 ends = vals = ltrim(buf);
962 while (isdigit(*ends) || *ends == '.')
965 while (isspace(*unit))
967 fprintf(out, "%s%s%s%s", csv_sep, vals, csv_sep, unit);
970 #define METRIC_ONLY_LEN 20
972 /* Filter out some columns that don't work well in metrics only mode */
974 static bool valid_only_metric(const char *unit)
978 if (strstr(unit, "/sec") ||
979 strstr(unit, "hz") ||
980 strstr(unit, "Hz") ||
981 strstr(unit, "CPUs utilized"))
986 static const char *fixunit(char *buf, struct perf_evsel *evsel,
989 if (!strncmp(unit, "of all", 6)) {
990 snprintf(buf, 1024, "%s %s", perf_evsel__name(evsel),
997 static void print_metric_only(void *ctx, const char *color, const char *fmt,
998 const char *unit, double val)
1000 struct outstate *os = ctx;
1004 unsigned mlen = METRIC_ONLY_LEN;
1006 if (!valid_only_metric(unit))
1008 unit = fixunit(buf, os->evsel, unit);
1010 n = color_fprintf(out, color, fmt, val);
1012 n = fprintf(out, fmt, val);
1013 if (n > METRIC_ONLY_LEN)
1014 n = METRIC_ONLY_LEN;
1015 if (mlen < strlen(unit))
1016 mlen = strlen(unit) + 1;
1017 fprintf(out, "%*s", mlen - n, "");
1020 static void print_metric_only_csv(void *ctx, const char *color __maybe_unused,
1022 const char *unit, double val)
1024 struct outstate *os = ctx;
1026 char buf[64], *vals, *ends;
1029 if (!valid_only_metric(unit))
1031 unit = fixunit(tbuf, os->evsel, unit);
1032 snprintf(buf, sizeof buf, fmt, val);
1033 ends = vals = ltrim(buf);
1034 while (isdigit(*ends) || *ends == '.')
1037 fprintf(out, "%s%s", vals, csv_sep);
1040 static void new_line_metric(void *ctx __maybe_unused)
1044 static void print_metric_header(void *ctx, const char *color __maybe_unused,
1045 const char *fmt __maybe_unused,
1046 const char *unit, double val __maybe_unused)
1048 struct outstate *os = ctx;
1051 if (!valid_only_metric(unit))
1053 unit = fixunit(tbuf, os->evsel, unit);
1055 fprintf(os->fh, "%s%s", unit, csv_sep);
1057 fprintf(os->fh, "%-*s ", METRIC_ONLY_LEN, unit);
1060 static void nsec_printout(int id, int nr, struct perf_evsel *evsel, double avg)
1062 FILE *output = stat_config.output;
1063 double msecs = avg / NSEC_PER_MSEC;
1064 const char *fmt_v, *fmt_n;
1067 fmt_v = csv_output ? "%.6f%s" : "%18.6f%s";
1068 fmt_n = csv_output ? "%s" : "%-25s";
1070 aggr_printout(evsel, id, nr);
1072 scnprintf(name, sizeof(name), "%s%s",
1073 perf_evsel__name(evsel), csv_output ? "" : " (msec)");
1075 fprintf(output, fmt_v, msecs, csv_sep);
1078 fprintf(output, "%s%s", evsel->unit, csv_sep);
1080 fprintf(output, "%-*s%s", unit_width, evsel->unit, csv_sep);
1082 fprintf(output, fmt_n, name);
1085 fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
1088 static int first_shadow_cpu(struct perf_evsel *evsel, int id)
1095 if (stat_config.aggr_mode == AGGR_NONE)
1098 if (stat_config.aggr_mode == AGGR_GLOBAL)
1101 for (i = 0; i < perf_evsel__nr_cpus(evsel); i++) {
1102 int cpu2 = perf_evsel__cpus(evsel)->map[i];
1104 if (aggr_get_id(evsel_list->cpus, cpu2) == id)
1110 static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
1112 FILE *output = stat_config.output;
1113 double sc = evsel->scale;
1117 fmt = floor(sc) != sc ? "%.2f%s" : "%.0f%s";
1120 fmt = floor(sc) != sc ? "%'18.2f%s" : "%'18.0f%s";
1122 fmt = floor(sc) != sc ? "%18.2f%s" : "%18.0f%s";
1125 aggr_printout(evsel, id, nr);
1127 fprintf(output, fmt, avg, csv_sep);
1130 fprintf(output, "%-*s%s",
1131 csv_output ? 0 : unit_width,
1132 evsel->unit, csv_sep);
1134 fprintf(output, "%-*s", csv_output ? 0 : 25, perf_evsel__name(evsel));
1137 fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
1140 static bool is_mixed_hw_group(struct perf_evsel *counter)
1142 struct perf_evlist *evlist = counter->evlist;
1143 u32 pmu_type = counter->attr.type;
1144 struct perf_evsel *pos;
1146 if (counter->nr_members < 2)
1149 evlist__for_each_entry(evlist, pos) {
1150 /* software events can be part of any hardware group */
1151 if (pos->attr.type == PERF_TYPE_SOFTWARE)
1153 if (pmu_type == PERF_TYPE_SOFTWARE) {
1154 pmu_type = pos->attr.type;
1157 if (pmu_type != pos->attr.type)
1164 static void printout(int id, int nr, struct perf_evsel *counter, double uval,
1165 char *prefix, u64 run, u64 ena, double noise,
1166 struct runtime_stat *st)
1168 struct perf_stat_output_ctx out;
1169 struct outstate os = {
1170 .fh = stat_config.output,
1171 .prefix = prefix ? prefix : "",
1176 print_metric_t pm = print_metric_std;
1180 nl = new_line_metric;
1182 pm = print_metric_only_csv;
1184 pm = print_metric_only;
1188 if (csv_output && !metric_only) {
1189 static int aggr_fields[] = {
1197 pm = print_metric_csv;
1200 os.nfields += aggr_fields[stat_config.aggr_mode];
1204 if (run == 0 || ena == 0 || counter->counts->scaled == -1) {
1206 pm(&os, NULL, "", "", 0);
1209 aggr_printout(counter, id, nr);
1211 fprintf(stat_config.output, "%*s%s",
1212 csv_output ? 0 : 18,
1213 counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
1216 if (counter->supported) {
1217 print_free_counters_hint = 1;
1218 if (is_mixed_hw_group(counter))
1219 print_mixed_hw_group_error = 1;
1222 fprintf(stat_config.output, "%-*s%s",
1223 csv_output ? 0 : unit_width,
1224 counter->unit, csv_sep);
1226 fprintf(stat_config.output, "%*s",
1227 csv_output ? 0 : -25,
1228 perf_evsel__name(counter));
1231 fprintf(stat_config.output, "%s%s",
1232 csv_sep, counter->cgrp->name);
1235 pm(&os, NULL, NULL, "", 0);
1236 print_noise(counter, noise);
1237 print_running(run, ena);
1239 pm(&os, NULL, NULL, "", 0);
1245 else if (nsec_counter(counter))
1246 nsec_printout(id, nr, counter, uval);
1248 abs_printout(id, nr, counter, uval);
1250 out.print_metric = pm;
1253 out.force_header = false;
1255 if (csv_output && !metric_only) {
1256 print_noise(counter, noise);
1257 print_running(run, ena);
1260 perf_stat__print_shadow_stats(counter, uval,
1261 first_shadow_cpu(counter, id),
1262 &out, &metric_events, st);
1263 if (!csv_output && !metric_only) {
1264 print_noise(counter, noise);
1265 print_running(run, ena);
1269 static void aggr_update_shadow(void)
1273 struct perf_evsel *counter;
1275 for (s = 0; s < aggr_map->nr; s++) {
1276 id = aggr_map->map[s];
1277 evlist__for_each_entry(evsel_list, counter) {
1279 for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
1280 s2 = aggr_get_id(evsel_list->cpus, cpu);
1283 val += perf_counts(counter->counts, cpu, 0)->val;
1285 perf_stat__update_shadow_stats(counter, val,
1286 first_shadow_cpu(counter, id),
1292 static void uniquify_event_name(struct perf_evsel *counter)
1297 if (counter->uniquified_name ||
1298 !counter->pmu_name || !strncmp(counter->name, counter->pmu_name,
1299 strlen(counter->pmu_name)))
1302 config = strchr(counter->name, '/');
1304 if (asprintf(&new_name,
1305 "%s%s", counter->pmu_name, config) > 0) {
1306 free(counter->name);
1307 counter->name = new_name;
1310 if (asprintf(&new_name,
1311 "%s [%s]", counter->name, counter->pmu_name) > 0) {
1312 free(counter->name);
1313 counter->name = new_name;
1317 counter->uniquified_name = true;
1320 static void collect_all_aliases(struct perf_evsel *counter,
1321 void (*cb)(struct perf_evsel *counter, void *data,
1325 struct perf_evsel *alias;
1327 alias = list_prepare_entry(counter, &(evsel_list->entries), node);
1328 list_for_each_entry_continue (alias, &evsel_list->entries, node) {
1329 if (strcmp(perf_evsel__name(alias), perf_evsel__name(counter)) ||
1330 alias->scale != counter->scale ||
1331 alias->cgrp != counter->cgrp ||
1332 strcmp(alias->unit, counter->unit) ||
1333 nsec_counter(alias) != nsec_counter(counter))
1335 alias->merged_stat = true;
1336 cb(alias, data, false);
1340 static bool collect_data(struct perf_evsel *counter,
1341 void (*cb)(struct perf_evsel *counter, void *data,
1345 if (counter->merged_stat)
1347 cb(counter, data, true);
1349 uniquify_event_name(counter);
1350 else if (counter->auto_merge_stats)
1351 collect_all_aliases(counter, cb, data);
1362 static void aggr_cb(struct perf_evsel *counter, void *data, bool first)
1364 struct aggr_data *ad = data;
1367 for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
1368 struct perf_counts_values *counts;
1370 s2 = aggr_get_id(perf_evsel__cpus(counter), cpu);
1375 counts = perf_counts(counter->counts, cpu, 0);
1377 * When any result is bad, make them all to give
1378 * consistent output in interval mode.
1380 if (counts->ena == 0 || counts->run == 0 ||
1381 counter->counts->scaled == -1) {
1386 ad->val += counts->val;
1387 ad->ena += counts->ena;
1388 ad->run += counts->run;
1392 static void print_aggr(char *prefix)
1394 FILE *output = stat_config.output;
1395 struct perf_evsel *counter;
1401 if (!(aggr_map || aggr_get_id))
1404 aggr_update_shadow();
1407 * With metric_only everything is on a single line.
1408 * Without each counter has its own line.
1410 for (s = 0; s < aggr_map->nr; s++) {
1411 struct aggr_data ad;
1412 if (prefix && metric_only)
1413 fprintf(output, "%s", prefix);
1415 ad.id = id = aggr_map->map[s];
1417 evlist__for_each_entry(evsel_list, counter) {
1418 if (is_duration_time(counter))
1421 ad.val = ad.ena = ad.run = 0;
1423 if (!collect_data(counter, aggr_cb, &ad))
1429 if (first && metric_only) {
1431 aggr_printout(counter, id, nr);
1433 if (prefix && !metric_only)
1434 fprintf(output, "%s", prefix);
1436 uval = val * counter->scale;
1437 printout(id, nr, counter, uval, prefix, run, ena, 1.0,
1440 fputc('\n', output);
1443 fputc('\n', output);
1447 static int cmp_val(const void *a, const void *b)
1449 return ((struct perf_aggr_thread_value *)b)->val -
1450 ((struct perf_aggr_thread_value *)a)->val;
1453 static struct perf_aggr_thread_value *sort_aggr_thread(
1454 struct perf_evsel *counter,
1455 int nthreads, int ncpus,
1458 int cpu, thread, i = 0;
1460 struct perf_aggr_thread_value *buf;
1462 buf = calloc(nthreads, sizeof(struct perf_aggr_thread_value));
1466 for (thread = 0; thread < nthreads; thread++) {
1467 u64 ena = 0, run = 0, val = 0;
1469 for (cpu = 0; cpu < ncpus; cpu++) {
1470 val += perf_counts(counter->counts, cpu, thread)->val;
1471 ena += perf_counts(counter->counts, cpu, thread)->ena;
1472 run += perf_counts(counter->counts, cpu, thread)->run;
1475 uval = val * counter->scale;
1478 * Skip value 0 when enabling --per-thread globally,
1479 * otherwise too many 0 output.
1481 if (uval == 0.0 && target__has_per_thread(&target))
1484 buf[i].counter = counter;
1493 qsort(buf, i, sizeof(struct perf_aggr_thread_value), cmp_val);
1501 static void print_aggr_thread(struct perf_evsel *counter, char *prefix)
1503 FILE *output = stat_config.output;
1504 int nthreads = thread_map__nr(counter->threads);
1505 int ncpus = cpu_map__nr(counter->cpus);
1506 int thread, sorted_threads, id;
1507 struct perf_aggr_thread_value *buf;
1509 buf = sort_aggr_thread(counter, nthreads, ncpus, &sorted_threads);
1511 perror("cannot sort aggr thread");
1515 for (thread = 0; thread < sorted_threads; thread++) {
1517 fprintf(output, "%s", prefix);
1519 id = buf[thread].id;
1520 if (stat_config.stats)
1521 printout(id, 0, buf[thread].counter, buf[thread].uval,
1522 prefix, buf[thread].run, buf[thread].ena, 1.0,
1523 &stat_config.stats[id]);
1525 printout(id, 0, buf[thread].counter, buf[thread].uval,
1526 prefix, buf[thread].run, buf[thread].ena, 1.0,
1528 fputc('\n', output);
1535 double avg, avg_enabled, avg_running;
1538 static void counter_aggr_cb(struct perf_evsel *counter, void *data,
1539 bool first __maybe_unused)
1541 struct caggr_data *cd = data;
1542 struct perf_stat_evsel *ps = counter->stats;
1544 cd->avg += avg_stats(&ps->res_stats[0]);
1545 cd->avg_enabled += avg_stats(&ps->res_stats[1]);
1546 cd->avg_running += avg_stats(&ps->res_stats[2]);
1550 * Print out the results of a single counter:
1551 * aggregated counts in system-wide mode
1553 static void print_counter_aggr(struct perf_evsel *counter, char *prefix)
1555 FILE *output = stat_config.output;
1557 struct caggr_data cd = { .avg = 0.0 };
1559 if (!collect_data(counter, counter_aggr_cb, &cd))
1562 if (prefix && !metric_only)
1563 fprintf(output, "%s", prefix);
1565 uval = cd.avg * counter->scale;
1566 printout(-1, 0, counter, uval, prefix, cd.avg_running, cd.avg_enabled,
1569 fprintf(output, "\n");
1572 static void counter_cb(struct perf_evsel *counter, void *data,
1573 bool first __maybe_unused)
1575 struct aggr_data *ad = data;
1577 ad->val += perf_counts(counter->counts, ad->cpu, 0)->val;
1578 ad->ena += perf_counts(counter->counts, ad->cpu, 0)->ena;
1579 ad->run += perf_counts(counter->counts, ad->cpu, 0)->run;
1583 * Print out the results of a single counter:
1584 * does not use aggregated count in system-wide
1586 static void print_counter(struct perf_evsel *counter, char *prefix)
1588 FILE *output = stat_config.output;
1593 for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
1594 struct aggr_data ad = { .cpu = cpu };
1596 if (!collect_data(counter, counter_cb, &ad))
1603 fprintf(output, "%s", prefix);
1605 uval = val * counter->scale;
1606 printout(cpu, 0, counter, uval, prefix, run, ena, 1.0,
1609 fputc('\n', output);
1613 static void print_no_aggr_metric(char *prefix)
1617 struct perf_evsel *counter;
1621 nrcpus = evsel_list->cpus->nr;
1622 for (cpu = 0; cpu < nrcpus; cpu++) {
1626 fputs(prefix, stat_config.output);
1627 evlist__for_each_entry(evsel_list, counter) {
1628 if (is_duration_time(counter))
1631 aggr_printout(counter, cpu, 0);
1634 val = perf_counts(counter->counts, cpu, 0)->val;
1635 ena = perf_counts(counter->counts, cpu, 0)->ena;
1636 run = perf_counts(counter->counts, cpu, 0)->run;
1638 uval = val * counter->scale;
1639 printout(cpu, 0, counter, uval, prefix, run, ena, 1.0,
1642 fputc('\n', stat_config.output);
1646 static int aggr_header_lens[] = {
1654 static const char *aggr_header_csv[] = {
1655 [AGGR_CORE] = "core,cpus,",
1656 [AGGR_SOCKET] = "socket,cpus",
1657 [AGGR_NONE] = "cpu,",
1658 [AGGR_THREAD] = "comm-pid,",
1662 static void print_metric_headers(const char *prefix, bool no_indent)
1664 struct perf_stat_output_ctx out;
1665 struct perf_evsel *counter;
1666 struct outstate os = {
1667 .fh = stat_config.output
1671 fprintf(stat_config.output, "%s", prefix);
1673 if (!csv_output && !no_indent)
1674 fprintf(stat_config.output, "%*s",
1675 aggr_header_lens[stat_config.aggr_mode], "");
1677 if (stat_config.interval)
1678 fputs("time,", stat_config.output);
1679 fputs(aggr_header_csv[stat_config.aggr_mode],
1680 stat_config.output);
1683 /* Print metrics headers only */
1684 evlist__for_each_entry(evsel_list, counter) {
1685 if (is_duration_time(counter))
1689 out.print_metric = print_metric_header;
1690 out.new_line = new_line_metric;
1691 out.force_header = true;
1693 perf_stat__print_shadow_stats(counter, 0,
1699 fputc('\n', stat_config.output);
1702 static void print_interval(char *prefix, struct timespec *ts)
1704 FILE *output = stat_config.output;
1705 static int num_print_interval;
1707 sprintf(prefix, "%6lu.%09lu%s", ts->tv_sec, ts->tv_nsec, csv_sep);
1709 if (num_print_interval == 0 && !csv_output) {
1710 switch (stat_config.aggr_mode) {
1712 fprintf(output, "# time socket cpus");
1714 fprintf(output, " counts %*s events\n", unit_width, "unit");
1717 fprintf(output, "# time core cpus");
1719 fprintf(output, " counts %*s events\n", unit_width, "unit");
1722 fprintf(output, "# time CPU");
1724 fprintf(output, " counts %*s events\n", unit_width, "unit");
1727 fprintf(output, "# time comm-pid");
1729 fprintf(output, " counts %*s events\n", unit_width, "unit");
1733 fprintf(output, "# time");
1735 fprintf(output, " counts %*s events\n", unit_width, "unit");
1741 if (num_print_interval == 0 && metric_only)
1742 print_metric_headers(" ", true);
1743 if (++num_print_interval == 25)
1744 num_print_interval = 0;
1747 static void print_header(int argc, const char **argv)
1749 FILE *output = stat_config.output;
1755 fprintf(output, "\n");
1756 fprintf(output, " Performance counter stats for ");
1757 if (target.system_wide)
1758 fprintf(output, "\'system wide");
1759 else if (target.cpu_list)
1760 fprintf(output, "\'CPU(s) %s", target.cpu_list);
1761 else if (!target__has_task(&target)) {
1762 fprintf(output, "\'%s", argv ? argv[0] : "pipe");
1763 for (i = 1; argv && (i < argc); i++)
1764 fprintf(output, " %s", argv[i]);
1765 } else if (target.pid)
1766 fprintf(output, "process id \'%s", target.pid);
1768 fprintf(output, "thread id \'%s", target.tid);
1770 fprintf(output, "\'");
1772 fprintf(output, " (%d runs)", run_count);
1773 fprintf(output, ":\n\n");
1777 static int get_precision(double num)
1782 return lround(ceil(-log10(num)));
1785 static void print_table(FILE *output, int precision, double avg)
1788 int idx, indent = 0;
1790 scnprintf(tmp, 64, " %17.*f", precision, avg);
1791 while (tmp[indent] == ' ')
1794 fprintf(output, "%*s# Table of individual measurements:\n", indent, "");
1796 for (idx = 0; idx < run_count; idx++) {
1797 double run = (double) walltime_run[idx] / NSEC_PER_SEC;
1798 int h, n = 1 + abs((int) (100.0 * (run - avg)/run) / 5);
1800 fprintf(output, " %17.*f (%+.*f) ",
1801 precision, run, precision, run - avg);
1803 for (h = 0; h < n; h++)
1804 fprintf(output, "#");
1806 fprintf(output, "\n");
1809 fprintf(output, "\n%*s# Final result:\n", indent, "");
1812 static double timeval2double(struct timeval *t)
1814 return t->tv_sec + (double) t->tv_usec/USEC_PER_SEC;
1817 static void print_footer(void)
1819 double avg = avg_stats(&walltime_nsecs_stats) / NSEC_PER_SEC;
1820 FILE *output = stat_config.output;
1824 fprintf(output, "\n");
1826 if (run_count == 1) {
1827 fprintf(output, " %17.9f seconds time elapsed", avg);
1830 double ru_utime = timeval2double(&ru_data.ru_utime);
1831 double ru_stime = timeval2double(&ru_data.ru_stime);
1833 fprintf(output, "\n\n");
1834 fprintf(output, " %17.9f seconds user\n", ru_utime);
1835 fprintf(output, " %17.9f seconds sys\n", ru_stime);
1838 double sd = stddev_stats(&walltime_nsecs_stats) / NSEC_PER_SEC;
1840 * Display at most 2 more significant
1841 * digits than the stddev inaccuracy.
1843 int precision = get_precision(sd) + 2;
1845 if (walltime_run_table)
1846 print_table(output, precision, avg);
1848 fprintf(output, " %17.*f +- %.*f seconds time elapsed",
1849 precision, avg, precision, sd);
1851 print_noise_pct(sd, avg);
1853 fprintf(output, "\n\n");
1855 if (print_free_counters_hint &&
1856 sysctl__read_int("kernel/nmi_watchdog", &n) >= 0 &&
1859 "Some events weren't counted. Try disabling the NMI watchdog:\n"
1860 " echo 0 > /proc/sys/kernel/nmi_watchdog\n"
1862 " echo 1 > /proc/sys/kernel/nmi_watchdog\n");
1864 if (print_mixed_hw_group_error)
1866 "The events in group usually have to be from "
1867 "the same PMU. Try reorganizing the group.\n");
1870 static void print_counters(struct timespec *ts, int argc, const char **argv)
1872 int interval = stat_config.interval;
1873 struct perf_evsel *counter;
1874 char buf[64], *prefix = NULL;
1876 /* Do not print anything if we record to the pipe. */
1877 if (STAT_RECORD && perf_stat.data.is_pipe)
1881 print_interval(prefix = buf, ts);
1883 print_header(argc, argv);
1886 static int num_print_iv;
1888 if (num_print_iv == 0 && !interval)
1889 print_metric_headers(prefix, false);
1890 if (num_print_iv++ == 25)
1892 if (stat_config.aggr_mode == AGGR_GLOBAL && prefix)
1893 fprintf(stat_config.output, "%s", prefix);
1896 switch (stat_config.aggr_mode) {
1902 evlist__for_each_entry(evsel_list, counter) {
1903 if (is_duration_time(counter))
1905 print_aggr_thread(counter, prefix);
1909 evlist__for_each_entry(evsel_list, counter) {
1910 if (is_duration_time(counter))
1912 print_counter_aggr(counter, prefix);
1915 fputc('\n', stat_config.output);
1919 print_no_aggr_metric(prefix);
1921 evlist__for_each_entry(evsel_list, counter) {
1922 if (is_duration_time(counter))
1924 print_counter(counter, prefix);
1933 if (!interval && !csv_output)
1936 fflush(stat_config.output);
1939 static volatile int signr = -1;
1941 static void skip_signal(int signo)
1943 if ((child_pid == -1) || stat_config.interval)
1948 * render child_pid harmless
1949 * won't send SIGTERM to a random
1950 * process in case of race condition
1951 * and fast PID recycling
1956 static void sig_atexit(void)
1961 * avoid race condition with SIGCHLD handler
1962 * in skip_signal() which is modifying child_pid
1963 * goal is to avoid send SIGTERM to a random
1967 sigaddset(&set, SIGCHLD);
1968 sigprocmask(SIG_BLOCK, &set, &oset);
1970 if (child_pid != -1)
1971 kill(child_pid, SIGTERM);
1973 sigprocmask(SIG_SETMASK, &oset, NULL);
1978 signal(signr, SIG_DFL);
1979 kill(getpid(), signr);
1982 static int stat__set_big_num(const struct option *opt __maybe_unused,
1983 const char *s __maybe_unused, int unset)
1985 big_num_opt = unset ? 0 : 1;
1989 static int enable_metric_only(const struct option *opt __maybe_unused,
1990 const char *s __maybe_unused, int unset)
1992 force_metric_only = true;
1993 metric_only = !unset;
1997 static int parse_metric_groups(const struct option *opt,
1999 int unset __maybe_unused)
2001 return metricgroup__parse_groups(opt, str, &metric_events);
2004 static const struct option stat_options[] = {
2005 OPT_BOOLEAN('T', "transaction", &transaction_run,
2006 "hardware transaction statistics"),
2007 OPT_CALLBACK('e', "event", &evsel_list, "event",
2008 "event selector. use 'perf list' to list available events",
2009 parse_events_option),
2010 OPT_CALLBACK(0, "filter", &evsel_list, "filter",
2011 "event filter", parse_filter),
2012 OPT_BOOLEAN('i', "no-inherit", &no_inherit,
2013 "child tasks do not inherit counters"),
2014 OPT_STRING('p', "pid", &target.pid, "pid",
2015 "stat events on existing process id"),
2016 OPT_STRING('t', "tid", &target.tid, "tid",
2017 "stat events on existing thread id"),
2018 OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
2019 "system-wide collection from all CPUs"),
2020 OPT_BOOLEAN('g', "group", &group,
2021 "put the counters into a counter group"),
2022 OPT_BOOLEAN('c', "scale", &stat_config.scale, "scale/normalize counters"),
2023 OPT_INCR('v', "verbose", &verbose,
2024 "be more verbose (show counter open errors, etc)"),
2025 OPT_INTEGER('r', "repeat", &run_count,
2026 "repeat command and print average + stddev (max: 100, forever: 0)"),
2027 OPT_BOOLEAN(0, "table", &walltime_run_table,
2028 "display details about each run (only with -r option)"),
2029 OPT_BOOLEAN('n', "null", &null_run,
2030 "null run - dont start any counters"),
2031 OPT_INCR('d', "detailed", &detailed_run,
2032 "detailed run - start a lot of events"),
2033 OPT_BOOLEAN('S', "sync", &sync_run,
2034 "call sync() before starting a run"),
2035 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
2036 "print large numbers with thousands\' separators",
2038 OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
2039 "list of cpus to monitor in system-wide"),
2040 OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode,
2041 "disable CPU count aggregation", AGGR_NONE),
2042 OPT_BOOLEAN(0, "no-merge", &no_merge, "Do not merge identical named events"),
2043 OPT_STRING('x', "field-separator", &csv_sep, "separator",
2044 "print counts with custom separator"),
2045 OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
2046 "monitor event in cgroup name only", parse_cgroups),
2047 OPT_STRING('o', "output", &output_name, "file", "output file name"),
2048 OPT_BOOLEAN(0, "append", &append_file, "append to the output file"),
2049 OPT_INTEGER(0, "log-fd", &output_fd,
2050 "log output to fd, instead of stderr"),
2051 OPT_STRING(0, "pre", &pre_cmd, "command",
2052 "command to run prior to the measured command"),
2053 OPT_STRING(0, "post", &post_cmd, "command",
2054 "command to run after to the measured command"),
2055 OPT_UINTEGER('I', "interval-print", &stat_config.interval,
2056 "print counts at regular interval in ms "
2057 "(overhead is possible for values <= 100ms)"),
2058 OPT_INTEGER(0, "interval-count", &stat_config.times,
2059 "print counts for fixed number of times"),
2060 OPT_UINTEGER(0, "timeout", &stat_config.timeout,
2061 "stop workload and print counts after a timeout period in ms (>= 10ms)"),
2062 OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode,
2063 "aggregate counts per processor socket", AGGR_SOCKET),
2064 OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode,
2065 "aggregate counts per physical processor core", AGGR_CORE),
2066 OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode,
2067 "aggregate counts per thread", AGGR_THREAD),
2068 OPT_UINTEGER('D', "delay", &initial_delay,
2069 "ms to wait before starting measurement after program start"),
2070 OPT_CALLBACK_NOOPT(0, "metric-only", &metric_only, NULL,
2071 "Only print computed metrics. No raw values", enable_metric_only),
2072 OPT_BOOLEAN(0, "topdown", &topdown_run,
2073 "measure topdown level 1 statistics"),
2074 OPT_BOOLEAN(0, "smi-cost", &smi_cost,
2075 "measure SMI cost"),
2076 OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list",
2077 "monitor specified metrics or metric groups (separated by ,)",
2078 parse_metric_groups),
2082 static int perf_stat__get_socket(struct cpu_map *map, int cpu)
2084 return cpu_map__get_socket(map, cpu, NULL);
2087 static int perf_stat__get_core(struct cpu_map *map, int cpu)
2089 return cpu_map__get_core(map, cpu, NULL);
2092 static int cpu_map__get_max(struct cpu_map *map)
2096 for (i = 0; i < map->nr; i++) {
2097 if (map->map[i] > max)
2104 static struct cpu_map *cpus_aggr_map;
2106 static int perf_stat__get_aggr(aggr_get_id_t get_id, struct cpu_map *map, int idx)
2113 cpu = map->map[idx];
2115 if (cpus_aggr_map->map[cpu] == -1)
2116 cpus_aggr_map->map[cpu] = get_id(map, idx);
2118 return cpus_aggr_map->map[cpu];
2121 static int perf_stat__get_socket_cached(struct cpu_map *map, int idx)
2123 return perf_stat__get_aggr(perf_stat__get_socket, map, idx);
2126 static int perf_stat__get_core_cached(struct cpu_map *map, int idx)
2128 return perf_stat__get_aggr(perf_stat__get_core, map, idx);
2131 static int perf_stat_init_aggr_mode(void)
2135 switch (stat_config.aggr_mode) {
2137 if (cpu_map__build_socket_map(evsel_list->cpus, &aggr_map)) {
2138 perror("cannot build socket map");
2141 aggr_get_id = perf_stat__get_socket_cached;
2144 if (cpu_map__build_core_map(evsel_list->cpus, &aggr_map)) {
2145 perror("cannot build core map");
2148 aggr_get_id = perf_stat__get_core_cached;
2159 * The evsel_list->cpus is the base we operate on,
2160 * taking the highest cpu number to be the size of
2161 * the aggregation translate cpumap.
2163 nr = cpu_map__get_max(evsel_list->cpus);
2164 cpus_aggr_map = cpu_map__empty_new(nr + 1);
2165 return cpus_aggr_map ? 0 : -ENOMEM;
2168 static void perf_stat__exit_aggr_mode(void)
2170 cpu_map__put(aggr_map);
2171 cpu_map__put(cpus_aggr_map);
2173 cpus_aggr_map = NULL;
2176 static inline int perf_env__get_cpu(struct perf_env *env, struct cpu_map *map, int idx)
2183 cpu = map->map[idx];
2185 if (cpu >= env->nr_cpus_avail)
2191 static int perf_env__get_socket(struct cpu_map *map, int idx, void *data)
2193 struct perf_env *env = data;
2194 int cpu = perf_env__get_cpu(env, map, idx);
2196 return cpu == -1 ? -1 : env->cpu[cpu].socket_id;
2199 static int perf_env__get_core(struct cpu_map *map, int idx, void *data)
2201 struct perf_env *env = data;
2202 int core = -1, cpu = perf_env__get_cpu(env, map, idx);
2205 int socket_id = env->cpu[cpu].socket_id;
2208 * Encode socket in upper 16 bits
2209 * core_id is relative to socket, and
2210 * we need a global id. So we combine
2213 core = (socket_id << 16) | (env->cpu[cpu].core_id & 0xffff);
2219 static int perf_env__build_socket_map(struct perf_env *env, struct cpu_map *cpus,
2220 struct cpu_map **sockp)
2222 return cpu_map__build_map(cpus, sockp, perf_env__get_socket, env);
2225 static int perf_env__build_core_map(struct perf_env *env, struct cpu_map *cpus,
2226 struct cpu_map **corep)
2228 return cpu_map__build_map(cpus, corep, perf_env__get_core, env);
2231 static int perf_stat__get_socket_file(struct cpu_map *map, int idx)
2233 return perf_env__get_socket(map, idx, &perf_stat.session->header.env);
2236 static int perf_stat__get_core_file(struct cpu_map *map, int idx)
2238 return perf_env__get_core(map, idx, &perf_stat.session->header.env);
2241 static int perf_stat_init_aggr_mode_file(struct perf_stat *st)
2243 struct perf_env *env = &st->session->header.env;
2245 switch (stat_config.aggr_mode) {
2247 if (perf_env__build_socket_map(env, evsel_list->cpus, &aggr_map)) {
2248 perror("cannot build socket map");
2251 aggr_get_id = perf_stat__get_socket_file;
2254 if (perf_env__build_core_map(env, evsel_list->cpus, &aggr_map)) {
2255 perror("cannot build core map");
2258 aggr_get_id = perf_stat__get_core_file;
2271 static int topdown_filter_events(const char **attr, char **str, bool use_group)
2278 for (i = 0; attr[i]; i++) {
2279 if (pmu_have_event("cpu", attr[i])) {
2280 len += strlen(attr[i]) + 1;
2281 attr[i - off] = attr[i];
2285 attr[i - off] = NULL;
2287 *str = malloc(len + 1 + 2);
2297 for (i = 0; attr[i]; i++) {
2310 __weak bool arch_topdown_check_group(bool *warn)
2316 __weak void arch_topdown_group_warn(void)
2321 * Add default attributes, if there were no attributes specified or
2322 * if -d/--detailed, -d -d or -d -d -d is used:
2324 static int add_default_attributes(void)
2327 struct perf_event_attr default_attrs0[] = {
2329 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK },
2330 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES },
2331 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS },
2332 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS },
2334 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES },
2336 struct perf_event_attr frontend_attrs[] = {
2337 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
2339 struct perf_event_attr backend_attrs[] = {
2340 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
2342 struct perf_event_attr default_attrs1[] = {
2343 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS },
2344 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
2345 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES },
2350 * Detailed stats (-d), covering the L1 and last level data caches:
2352 struct perf_event_attr detailed_attrs[] = {
2354 { .type = PERF_TYPE_HW_CACHE,
2356 PERF_COUNT_HW_CACHE_L1D << 0 |
2357 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
2358 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
2360 { .type = PERF_TYPE_HW_CACHE,
2362 PERF_COUNT_HW_CACHE_L1D << 0 |
2363 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
2364 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
2366 { .type = PERF_TYPE_HW_CACHE,
2368 PERF_COUNT_HW_CACHE_LL << 0 |
2369 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
2370 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
2372 { .type = PERF_TYPE_HW_CACHE,
2374 PERF_COUNT_HW_CACHE_LL << 0 |
2375 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
2376 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
2380 * Very detailed stats (-d -d), covering the instruction cache and the TLB caches:
2382 struct perf_event_attr very_detailed_attrs[] = {
2384 { .type = PERF_TYPE_HW_CACHE,
2386 PERF_COUNT_HW_CACHE_L1I << 0 |
2387 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
2388 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
2390 { .type = PERF_TYPE_HW_CACHE,
2392 PERF_COUNT_HW_CACHE_L1I << 0 |
2393 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
2394 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
2396 { .type = PERF_TYPE_HW_CACHE,
2398 PERF_COUNT_HW_CACHE_DTLB << 0 |
2399 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
2400 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
2402 { .type = PERF_TYPE_HW_CACHE,
2404 PERF_COUNT_HW_CACHE_DTLB << 0 |
2405 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
2406 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
2408 { .type = PERF_TYPE_HW_CACHE,
2410 PERF_COUNT_HW_CACHE_ITLB << 0 |
2411 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
2412 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
2414 { .type = PERF_TYPE_HW_CACHE,
2416 PERF_COUNT_HW_CACHE_ITLB << 0 |
2417 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
2418 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
2423 * Very, very detailed stats (-d -d -d), adding prefetch events:
2425 struct perf_event_attr very_very_detailed_attrs[] = {
2427 { .type = PERF_TYPE_HW_CACHE,
2429 PERF_COUNT_HW_CACHE_L1D << 0 |
2430 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
2431 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
2433 { .type = PERF_TYPE_HW_CACHE,
2435 PERF_COUNT_HW_CACHE_L1D << 0 |
2436 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
2437 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
2440 /* Set attrs if no event is selected and !null_run: */
2444 if (transaction_run) {
2445 struct parse_events_error errinfo;
2447 if (pmu_have_event("cpu", "cycles-ct") &&
2448 pmu_have_event("cpu", "el-start"))
2449 err = parse_events(evsel_list, transaction_attrs,
2452 err = parse_events(evsel_list,
2453 transaction_limited_attrs,
2456 fprintf(stderr, "Cannot set up transaction events\n");
2465 if (sysfs__read_int(FREEZE_ON_SMI_PATH, &smi) < 0) {
2466 fprintf(stderr, "freeze_on_smi is not supported.\n");
2471 if (sysfs__write_int(FREEZE_ON_SMI_PATH, 1) < 0) {
2472 fprintf(stderr, "Failed to set freeze_on_smi.\n");
2478 if (pmu_have_event("msr", "aperf") &&
2479 pmu_have_event("msr", "smi")) {
2480 if (!force_metric_only)
2482 err = parse_events(evsel_list, smi_cost_attrs, NULL);
2484 fprintf(stderr, "To measure SMI cost, it needs "
2485 "msr/aperf/, msr/smi/ and cpu/cycles/ support\n");
2489 fprintf(stderr, "Cannot set up SMI cost events\n");
2499 if (stat_config.aggr_mode != AGGR_GLOBAL &&
2500 stat_config.aggr_mode != AGGR_CORE) {
2501 pr_err("top down event configuration requires --per-core mode\n");
2504 stat_config.aggr_mode = AGGR_CORE;
2505 if (nr_cgroups || !target__has_cpu(&target)) {
2506 pr_err("top down event configuration requires system-wide mode (-a)\n");
2510 if (!force_metric_only)
2512 if (topdown_filter_events(topdown_attrs, &str,
2513 arch_topdown_check_group(&warn)) < 0) {
2514 pr_err("Out of memory\n");
2517 if (topdown_attrs[0] && str) {
2519 arch_topdown_group_warn();
2520 err = parse_events(evsel_list, str, NULL);
2523 "Cannot set up top down events %s: %d\n",
2529 fprintf(stderr, "System does not support topdown\n");
2535 if (!evsel_list->nr_entries) {
2536 if (target__has_cpu(&target))
2537 default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK;
2539 if (perf_evlist__add_default_attrs(evsel_list, default_attrs0) < 0)
2541 if (pmu_have_event("cpu", "stalled-cycles-frontend")) {
2542 if (perf_evlist__add_default_attrs(evsel_list,
2543 frontend_attrs) < 0)
2546 if (pmu_have_event("cpu", "stalled-cycles-backend")) {
2547 if (perf_evlist__add_default_attrs(evsel_list,
2551 if (perf_evlist__add_default_attrs(evsel_list, default_attrs1) < 0)
2555 /* Detailed events get appended to the event list: */
2557 if (detailed_run < 1)
2560 /* Append detailed run extra attributes: */
2561 if (perf_evlist__add_default_attrs(evsel_list, detailed_attrs) < 0)
2564 if (detailed_run < 2)
2567 /* Append very detailed run extra attributes: */
2568 if (perf_evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0)
2571 if (detailed_run < 3)
2574 /* Append very, very detailed run extra attributes: */
2575 return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs);
2578 static const char * const stat_record_usage[] = {
2579 "perf stat record [<options>]",
2583 static void init_features(struct perf_session *session)
2587 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
2588 perf_header__set_feat(&session->header, feat);
2590 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
2591 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
2592 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
2593 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
2596 static int __cmd_record(int argc, const char **argv)
2598 struct perf_session *session;
2599 struct perf_data *data = &perf_stat.data;
2601 argc = parse_options(argc, argv, stat_options, stat_record_usage,
2602 PARSE_OPT_STOP_AT_NON_OPTION);
2605 data->file.path = output_name;
2607 if (run_count != 1 || forever) {
2608 pr_err("Cannot use -r option with perf stat record.\n");
2612 session = perf_session__new(data, false, NULL);
2613 if (session == NULL) {
2614 pr_err("Perf session creation failed.\n");
2618 init_features(session);
2620 session->evlist = evsel_list;
2621 perf_stat.session = session;
2622 perf_stat.record = true;
2626 static int process_stat_round_event(struct perf_tool *tool __maybe_unused,
2627 union perf_event *event,
2628 struct perf_session *session)
2630 struct stat_round_event *stat_round = &event->stat_round;
2631 struct perf_evsel *counter;
2632 struct timespec tsh, *ts = NULL;
2633 const char **argv = session->header.env.cmdline_argv;
2634 int argc = session->header.env.nr_cmdline;
2636 evlist__for_each_entry(evsel_list, counter)
2637 perf_stat_process_counter(&stat_config, counter);
2639 if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL)
2640 update_stats(&walltime_nsecs_stats, stat_round->time);
2642 if (stat_config.interval && stat_round->time) {
2643 tsh.tv_sec = stat_round->time / NSEC_PER_SEC;
2644 tsh.tv_nsec = stat_round->time % NSEC_PER_SEC;
2648 print_counters(ts, argc, argv);
2653 int process_stat_config_event(struct perf_tool *tool,
2654 union perf_event *event,
2655 struct perf_session *session __maybe_unused)
2657 struct perf_stat *st = container_of(tool, struct perf_stat, tool);
2659 perf_event__read_stat_config(&stat_config, &event->stat_config);
2661 if (cpu_map__empty(st->cpus)) {
2662 if (st->aggr_mode != AGGR_UNSET)
2663 pr_warning("warning: processing task data, aggregation mode not set\n");
2667 if (st->aggr_mode != AGGR_UNSET)
2668 stat_config.aggr_mode = st->aggr_mode;
2670 if (perf_stat.data.is_pipe)
2671 perf_stat_init_aggr_mode();
2673 perf_stat_init_aggr_mode_file(st);
2678 static int set_maps(struct perf_stat *st)
2680 if (!st->cpus || !st->threads)
2683 if (WARN_ONCE(st->maps_allocated, "stats double allocation\n"))
2686 perf_evlist__set_maps(evsel_list, st->cpus, st->threads);
2688 if (perf_evlist__alloc_stats(evsel_list, true))
2691 st->maps_allocated = true;
2696 int process_thread_map_event(struct perf_tool *tool,
2697 union perf_event *event,
2698 struct perf_session *session __maybe_unused)
2700 struct perf_stat *st = container_of(tool, struct perf_stat, tool);
2703 pr_warning("Extra thread map event, ignoring.\n");
2707 st->threads = thread_map__new_event(&event->thread_map);
2711 return set_maps(st);
2715 int process_cpu_map_event(struct perf_tool *tool,
2716 union perf_event *event,
2717 struct perf_session *session __maybe_unused)
2719 struct perf_stat *st = container_of(tool, struct perf_stat, tool);
2720 struct cpu_map *cpus;
2723 pr_warning("Extra cpu map event, ignoring.\n");
2727 cpus = cpu_map__new_data(&event->cpu_map.data);
2732 return set_maps(st);
2735 static int runtime_stat_new(struct perf_stat_config *config, int nthreads)
2739 config->stats = calloc(nthreads, sizeof(struct runtime_stat));
2743 config->stats_num = nthreads;
2745 for (i = 0; i < nthreads; i++)
2746 runtime_stat__init(&config->stats[i]);
2751 static void runtime_stat_delete(struct perf_stat_config *config)
2758 for (i = 0; i < config->stats_num; i++)
2759 runtime_stat__exit(&config->stats[i]);
2761 free(config->stats);
2764 static const char * const stat_report_usage[] = {
2765 "perf stat report [<options>]",
2769 static struct perf_stat perf_stat = {
2771 .attr = perf_event__process_attr,
2772 .event_update = perf_event__process_event_update,
2773 .thread_map = process_thread_map_event,
2774 .cpu_map = process_cpu_map_event,
2775 .stat_config = process_stat_config_event,
2776 .stat = perf_event__process_stat_event,
2777 .stat_round = process_stat_round_event,
2779 .aggr_mode = AGGR_UNSET,
2782 static int __cmd_report(int argc, const char **argv)
2784 struct perf_session *session;
2785 const struct option options[] = {
2786 OPT_STRING('i', "input", &input_name, "file", "input file name"),
2787 OPT_SET_UINT(0, "per-socket", &perf_stat.aggr_mode,
2788 "aggregate counts per processor socket", AGGR_SOCKET),
2789 OPT_SET_UINT(0, "per-core", &perf_stat.aggr_mode,
2790 "aggregate counts per physical processor core", AGGR_CORE),
2791 OPT_SET_UINT('A', "no-aggr", &perf_stat.aggr_mode,
2792 "disable CPU count aggregation", AGGR_NONE),
2798 argc = parse_options(argc, argv, options, stat_report_usage, 0);
2800 if (!input_name || !strlen(input_name)) {
2801 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
2804 input_name = "perf.data";
2807 perf_stat.data.file.path = input_name;
2808 perf_stat.data.mode = PERF_DATA_MODE_READ;
2810 session = perf_session__new(&perf_stat.data, false, &perf_stat.tool);
2811 if (session == NULL)
2814 perf_stat.session = session;
2815 stat_config.output = stderr;
2816 evsel_list = session->evlist;
2818 ret = perf_session__process_events(session);
2822 perf_session__delete(session);
2826 static void setup_system_wide(int forks)
2829 * Make system wide (-a) the default target if
2830 * no target was specified and one of following
2831 * conditions is met:
2833 * - there's no workload specified
2834 * - there is workload specified but all requested
2835 * events are system wide events
2837 if (!target__none(&target))
2841 target.system_wide = true;
2843 struct perf_evsel *counter;
2845 evlist__for_each_entry(evsel_list, counter) {
2846 if (!counter->system_wide)
2850 if (evsel_list->nr_entries)
2851 target.system_wide = true;
2855 int cmd_stat(int argc, const char **argv)
2857 const char * const stat_usage[] = {
2858 "perf stat [<options>] [<command>]",
2861 int status = -EINVAL, run_idx;
2863 FILE *output = stderr;
2864 unsigned int interval, timeout;
2865 const char * const stat_subcommands[] = { "record", "report" };
2867 setlocale(LC_ALL, "");
2869 evsel_list = perf_evlist__new();
2870 if (evsel_list == NULL)
2873 parse_events__shrink_config_terms();
2874 argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands,
2875 (const char **) stat_usage,
2876 PARSE_OPT_STOP_AT_NON_OPTION);
2877 perf_stat__collect_metric_expr(evsel_list);
2878 perf_stat__init_shadow_stats();
2882 if (!strcmp(csv_sep, "\\t"))
2885 csv_sep = DEFAULT_SEPARATOR;
2887 if (argc && !strncmp(argv[0], "rec", 3)) {
2888 argc = __cmd_record(argc, argv);
2891 } else if (argc && !strncmp(argv[0], "rep", 3))
2892 return __cmd_report(argc, argv);
2894 interval = stat_config.interval;
2895 timeout = stat_config.timeout;
2898 * For record command the -o is already taken care of.
2900 if (!STAT_RECORD && output_name && strcmp(output_name, "-"))
2903 if (output_name && output_fd) {
2904 fprintf(stderr, "cannot use both --output and --log-fd\n");
2905 parse_options_usage(stat_usage, stat_options, "o", 1);
2906 parse_options_usage(NULL, stat_options, "log-fd", 0);
2910 if (metric_only && stat_config.aggr_mode == AGGR_THREAD) {
2911 fprintf(stderr, "--metric-only is not supported with --per-thread\n");
2915 if (metric_only && run_count > 1) {
2916 fprintf(stderr, "--metric-only is not supported with -r\n");
2920 if (walltime_run_table && run_count <= 1) {
2921 fprintf(stderr, "--table is only supported with -r\n");
2922 parse_options_usage(stat_usage, stat_options, "r", 1);
2923 parse_options_usage(NULL, stat_options, "table", 0);
2927 if (output_fd < 0) {
2928 fprintf(stderr, "argument to --log-fd must be a > 0\n");
2929 parse_options_usage(stat_usage, stat_options, "log-fd", 0);
2935 mode = append_file ? "a" : "w";
2937 output = fopen(output_name, mode);
2939 perror("failed to create output file");
2942 clock_gettime(CLOCK_REALTIME, &tm);
2943 fprintf(output, "# started on %s\n", ctime(&tm.tv_sec));
2944 } else if (output_fd > 0) {
2945 mode = append_file ? "a" : "w";
2946 output = fdopen(output_fd, mode);
2948 perror("Failed opening logfd");
2953 stat_config.output = output;
2956 * let the spreadsheet do the pretty-printing
2959 /* User explicitly passed -B? */
2960 if (big_num_opt == 1) {
2961 fprintf(stderr, "-B option not supported with -x\n");
2962 parse_options_usage(stat_usage, stat_options, "B", 1);
2963 parse_options_usage(NULL, stat_options, "x", 1);
2965 } else /* Nope, so disable big number formatting */
2967 } else if (big_num_opt == 0) /* User passed --no-big-num */
2970 setup_system_wide(argc);
2973 * Display user/system times only for single
2974 * run and when there's specified tracee.
2976 if ((run_count == 1) && target__none(&target))
2979 if (run_count < 0) {
2980 pr_err("Run count must be a positive number\n");
2981 parse_options_usage(stat_usage, stat_options, "r", 1);
2983 } else if (run_count == 0) {
2988 if (walltime_run_table) {
2989 walltime_run = zalloc(run_count * sizeof(walltime_run[0]));
2990 if (!walltime_run) {
2991 pr_err("failed to setup -r option");
2996 if ((stat_config.aggr_mode == AGGR_THREAD) &&
2997 !target__has_task(&target)) {
2998 if (!target.system_wide || target.cpu_list) {
2999 fprintf(stderr, "The --per-thread option is only "
3000 "available when monitoring via -p -t -a "
3001 "options or only --per-thread.\n");
3002 parse_options_usage(NULL, stat_options, "p", 1);
3003 parse_options_usage(NULL, stat_options, "t", 1);
3009 * no_aggr, cgroup are for system-wide only
3010 * --per-thread is aggregated per thread, we dont mix it with cpu mode
3012 if (((stat_config.aggr_mode != AGGR_GLOBAL &&
3013 stat_config.aggr_mode != AGGR_THREAD) || nr_cgroups) &&
3014 !target__has_cpu(&target)) {
3015 fprintf(stderr, "both cgroup and no-aggregation "
3016 "modes only available in system-wide mode\n");
3018 parse_options_usage(stat_usage, stat_options, "G", 1);
3019 parse_options_usage(NULL, stat_options, "A", 1);
3020 parse_options_usage(NULL, stat_options, "a", 1);
3024 if (add_default_attributes())
3027 target__validate(&target);
3029 if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide))
3030 target.per_thread = true;
3032 if (perf_evlist__create_maps(evsel_list, &target) < 0) {
3033 if (target__has_task(&target)) {
3034 pr_err("Problems finding threads of monitor\n");
3035 parse_options_usage(stat_usage, stat_options, "p", 1);
3036 parse_options_usage(NULL, stat_options, "t", 1);
3037 } else if (target__has_cpu(&target)) {
3038 perror("failed to parse CPUs map");
3039 parse_options_usage(stat_usage, stat_options, "C", 1);
3040 parse_options_usage(NULL, stat_options, "a", 1);
3046 * Initialize thread_map with comm names,
3047 * so we could print it out on output.
3049 if (stat_config.aggr_mode == AGGR_THREAD) {
3050 thread_map__read_comms(evsel_list->threads);
3051 if (target.system_wide) {
3052 if (runtime_stat_new(&stat_config,
3053 thread_map__nr(evsel_list->threads))) {
3059 if (stat_config.times && interval)
3060 interval_count = true;
3061 else if (stat_config.times && !interval) {
3062 pr_err("interval-count option should be used together with "
3063 "interval-print.\n");
3064 parse_options_usage(stat_usage, stat_options, "interval-count", 0);
3065 parse_options_usage(stat_usage, stat_options, "I", 1);
3069 if (timeout && timeout < 100) {
3071 pr_err("timeout must be >= 10ms.\n");
3072 parse_options_usage(stat_usage, stat_options, "timeout", 0);
3075 pr_warning("timeout < 100ms. "
3076 "The overhead percentage could be high in some cases. "
3077 "Please proceed with caution.\n");
3079 if (timeout && interval) {
3080 pr_err("timeout option is not supported with interval-print.\n");
3081 parse_options_usage(stat_usage, stat_options, "timeout", 0);
3082 parse_options_usage(stat_usage, stat_options, "I", 1);
3086 if (perf_evlist__alloc_stats(evsel_list, interval))
3089 if (perf_stat_init_aggr_mode())
3093 * We dont want to block the signals - that would cause
3094 * child tasks to inherit that and Ctrl-C would not work.
3095 * What we want is for Ctrl-C to work in the exec()-ed
3096 * task, but being ignored by perf stat itself:
3100 signal(SIGINT, skip_signal);
3101 signal(SIGCHLD, skip_signal);
3102 signal(SIGALRM, skip_signal);
3103 signal(SIGABRT, skip_signal);
3106 for (run_idx = 0; forever || run_idx < run_count; run_idx++) {
3107 if (run_count != 1 && verbose > 0)
3108 fprintf(output, "[ perf stat: executing run #%d ... ]\n",
3111 status = run_perf_stat(argc, argv, run_idx);
3112 if (forever && status != -1) {
3113 print_counters(NULL, argc, argv);
3114 perf_stat__reset_stats();
3118 if (!forever && status != -1 && !interval)
3119 print_counters(NULL, argc, argv);
3123 * We synthesize the kernel mmap record just so that older tools
3124 * don't emit warnings about not being able to resolve symbols
3125 * due to /proc/sys/kernel/kptr_restrict settings and instear provide
3126 * a saner message about no samples being in the perf.data file.
3128 * This also serves to suppress a warning about f_header.data.size == 0
3129 * in header.c at the moment 'perf stat record' gets introduced, which
3130 * is not really needed once we start adding the stat specific PERF_RECORD_
3131 * records, but the need to suppress the kptr_restrict messages in older
3132 * tools remain -acme
3134 int fd = perf_data__fd(&perf_stat.data);
3135 int err = perf_event__synthesize_kernel_mmap((void *)&perf_stat,
3136 process_synthesized_event,
3137 &perf_stat.session->machines.host);
3139 pr_warning("Couldn't synthesize the kernel mmap record, harmless, "
3140 "older tools may produce warnings about this file\n.");
3144 if (WRITE_STAT_ROUND_EVENT(walltime_nsecs_stats.max, FINAL))
3145 pr_err("failed to write stat round event\n");
3148 if (!perf_stat.data.is_pipe) {
3149 perf_stat.session->header.data_size += perf_stat.bytes_written;
3150 perf_session__write_header(perf_stat.session, evsel_list, fd, true);
3153 perf_session__delete(perf_stat.session);
3156 perf_stat__exit_aggr_mode();
3157 perf_evlist__free_stats(evsel_list);
3161 if (smi_cost && smi_reset)
3162 sysfs__write_int(FREEZE_ON_SMI_PATH, 0);
3164 perf_evlist__delete(evsel_list);
3166 runtime_stat_delete(&stat_config);