Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / tools / perf / builtin-stat.c
1 /*
2  * builtin-stat.c
3  *
4  * Builtin stat command: Give a precise performance counters summary
5  * overview about any workload, CPU or specific PID.
6  *
7  * Sample output:
8
9    $ perf stat ./hackbench 10
10
11   Time: 0.118
12
13   Performance counter stats for './hackbench 10':
14
15        1708.761321 task-clock                #   11.037 CPUs utilized
16             41,190 context-switches          #    0.024 M/sec
17              6,735 CPU-migrations            #    0.004 M/sec
18             17,318 page-faults               #    0.010 M/sec
19      5,205,202,243 cycles                    #    3.046 GHz
20      3,856,436,920 stalled-cycles-frontend   #   74.09% frontend cycles idle
21      1,600,790,871 stalled-cycles-backend    #   30.75% backend  cycles idle
22      2,603,501,247 instructions              #    0.50  insns per cycle
23                                              #    1.48  stalled cycles per insn
24        484,357,498 branches                  #  283.455 M/sec
25          6,388,934 branch-misses             #    1.32% of all branches
26
27         0.154822978  seconds time elapsed
28
29  *
30  * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
31  *
32  * Improvements and fixes by:
33  *
34  *   Arjan van de Ven <arjan@linux.intel.com>
35  *   Yanmin Zhang <yanmin.zhang@intel.com>
36  *   Wu Fengguang <fengguang.wu@intel.com>
37  *   Mike Galbraith <efault@gmx.de>
38  *   Paul Mackerras <paulus@samba.org>
39  *   Jaswinder Singh Rajput <jaswinder@kernel.org>
40  *
41  * Released under the GPL v2. (and only v2, not any later version)
42  */
43
44 #include "perf.h"
45 #include "builtin.h"
46 #include "util/cgroup.h"
47 #include "util/util.h"
48 #include <subcmd/parse-options.h>
49 #include "util/parse-events.h"
50 #include "util/pmu.h"
51 #include "util/event.h"
52 #include "util/evlist.h"
53 #include "util/evsel.h"
54 #include "util/debug.h"
55 #include "util/drv_configs.h"
56 #include "util/color.h"
57 #include "util/stat.h"
58 #include "util/header.h"
59 #include "util/cpumap.h"
60 #include "util/thread.h"
61 #include "util/thread_map.h"
62 #include "util/counts.h"
63 #include "util/group.h"
64 #include "util/session.h"
65 #include "util/tool.h"
66 #include "util/string2.h"
67 #include "util/metricgroup.h"
68 #include "util/top.h"
69 #include "asm/bug.h"
70
71 #include <linux/time64.h>
72 #include <api/fs/fs.h>
73 #include <errno.h>
74 #include <signal.h>
75 #include <stdlib.h>
76 #include <sys/prctl.h>
77 #include <inttypes.h>
78 #include <locale.h>
79 #include <math.h>
80 #include <sys/types.h>
81 #include <sys/stat.h>
82 #include <sys/wait.h>
83 #include <unistd.h>
84 #include <sys/time.h>
85 #include <sys/resource.h>
86 #include <sys/wait.h>
87
88 #include "sane_ctype.h"
89
90 #define DEFAULT_SEPARATOR       " "
91 #define FREEZE_ON_SMI_PATH      "devices/cpu/freeze_on_smi"
92
93 static void print_counters(struct timespec *ts, int argc, const char **argv);
94
95 /* Default events used for perf stat -T */
96 static const char *transaction_attrs = {
97         "task-clock,"
98         "{"
99         "instructions,"
100         "cycles,"
101         "cpu/cycles-t/,"
102         "cpu/tx-start/,"
103         "cpu/el-start/,"
104         "cpu/cycles-ct/"
105         "}"
106 };
107
108 /* More limited version when the CPU does not have all events. */
109 static const char * transaction_limited_attrs = {
110         "task-clock,"
111         "{"
112         "instructions,"
113         "cycles,"
114         "cpu/cycles-t/,"
115         "cpu/tx-start/"
116         "}"
117 };
118
119 static const char * topdown_attrs[] = {
120         "topdown-total-slots",
121         "topdown-slots-retired",
122         "topdown-recovery-bubbles",
123         "topdown-fetch-bubbles",
124         "topdown-slots-issued",
125         NULL,
126 };
127
128 static const char *smi_cost_attrs = {
129         "{"
130         "msr/aperf/,"
131         "msr/smi/,"
132         "cycles"
133         "}"
134 };
135
136 static struct perf_evlist       *evsel_list;
137
138 static struct target target = {
139         .uid    = UINT_MAX,
140 };
141
142 #define METRIC_ONLY_LEN 20
143
144 static volatile pid_t           child_pid                       = -1;
145 static int                      detailed_run                    =  0;
146 static bool                     transaction_run;
147 static bool                     topdown_run                     = false;
148 static bool                     smi_cost                        = false;
149 static bool                     smi_reset                       = false;
150 static int                      big_num_opt                     =  -1;
151 static bool                     group                           = false;
152 static const char               *pre_cmd                        = NULL;
153 static const char               *post_cmd                       = NULL;
154 static bool                     sync_run                        = false;
155 static bool                     forever                         = false;
156 static bool                     force_metric_only               = false;
157 static struct timespec          ref_time;
158 static bool                     append_file;
159 static bool                     interval_count;
160 static const char               *output_name;
161 static int                      output_fd;
162
163 struct perf_stat {
164         bool                     record;
165         struct perf_data         data;
166         struct perf_session     *session;
167         u64                      bytes_written;
168         struct perf_tool         tool;
169         bool                     maps_allocated;
170         struct cpu_map          *cpus;
171         struct thread_map       *threads;
172         enum aggr_mode           aggr_mode;
173 };
174
175 static struct perf_stat         perf_stat;
176 #define STAT_RECORD             perf_stat.record
177
178 static volatile int done = 0;
179
180 static struct perf_stat_config stat_config = {
181         .aggr_mode              = AGGR_GLOBAL,
182         .scale                  = true,
183         .unit_width             = 4, /* strlen("unit") */
184         .run_count              = 1,
185         .metric_only_len        = METRIC_ONLY_LEN,
186         .walltime_nsecs_stats   = &walltime_nsecs_stats,
187         .big_num                = true,
188 };
189
190 static inline void diff_timespec(struct timespec *r, struct timespec *a,
191                                  struct timespec *b)
192 {
193         r->tv_sec = a->tv_sec - b->tv_sec;
194         if (a->tv_nsec < b->tv_nsec) {
195                 r->tv_nsec = a->tv_nsec + NSEC_PER_SEC - b->tv_nsec;
196                 r->tv_sec--;
197         } else {
198                 r->tv_nsec = a->tv_nsec - b->tv_nsec ;
199         }
200 }
201
202 static void perf_stat__reset_stats(void)
203 {
204         int i;
205
206         perf_evlist__reset_stats(evsel_list);
207         perf_stat__reset_shadow_stats();
208
209         for (i = 0; i < stat_config.stats_num; i++)
210                 perf_stat__reset_shadow_per_stat(&stat_config.stats[i]);
211 }
212
213 static int process_synthesized_event(struct perf_tool *tool __maybe_unused,
214                                      union perf_event *event,
215                                      struct perf_sample *sample __maybe_unused,
216                                      struct machine *machine __maybe_unused)
217 {
218         if (perf_data__write(&perf_stat.data, event, event->header.size) < 0) {
219                 pr_err("failed to write perf data, error: %m\n");
220                 return -1;
221         }
222
223         perf_stat.bytes_written += event->header.size;
224         return 0;
225 }
226
227 static int write_stat_round_event(u64 tm, u64 type)
228 {
229         return perf_event__synthesize_stat_round(NULL, tm, type,
230                                                  process_synthesized_event,
231                                                  NULL);
232 }
233
234 #define WRITE_STAT_ROUND_EVENT(time, interval) \
235         write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval)
236
237 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
238
239 static int
240 perf_evsel__write_stat_event(struct perf_evsel *counter, u32 cpu, u32 thread,
241                              struct perf_counts_values *count)
242 {
243         struct perf_sample_id *sid = SID(counter, cpu, thread);
244
245         return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count,
246                                            process_synthesized_event, NULL);
247 }
248
249 /*
250  * Read out the results of a single counter:
251  * do not aggregate counts across CPUs in system-wide mode
252  */
253 static int read_counter(struct perf_evsel *counter)
254 {
255         int nthreads = thread_map__nr(evsel_list->threads);
256         int ncpus, cpu, thread;
257
258         if (target__has_cpu(&target) && !target__has_per_thread(&target))
259                 ncpus = perf_evsel__nr_cpus(counter);
260         else
261                 ncpus = 1;
262
263         if (!counter->supported)
264                 return -ENOENT;
265
266         if (counter->system_wide)
267                 nthreads = 1;
268
269         for (thread = 0; thread < nthreads; thread++) {
270                 for (cpu = 0; cpu < ncpus; cpu++) {
271                         struct perf_counts_values *count;
272
273                         count = perf_counts(counter->counts, cpu, thread);
274
275                         /*
276                          * The leader's group read loads data into its group members
277                          * (via perf_evsel__read_counter) and sets threir count->loaded.
278                          */
279                         if (!count->loaded &&
280                             perf_evsel__read_counter(counter, cpu, thread)) {
281                                 counter->counts->scaled = -1;
282                                 perf_counts(counter->counts, cpu, thread)->ena = 0;
283                                 perf_counts(counter->counts, cpu, thread)->run = 0;
284                                 return -1;
285                         }
286
287                         count->loaded = false;
288
289                         if (STAT_RECORD) {
290                                 if (perf_evsel__write_stat_event(counter, cpu, thread, count)) {
291                                         pr_err("failed to write stat event\n");
292                                         return -1;
293                                 }
294                         }
295
296                         if (verbose > 1) {
297                                 fprintf(stat_config.output,
298                                         "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
299                                                 perf_evsel__name(counter),
300                                                 cpu,
301                                                 count->val, count->ena, count->run);
302                         }
303                 }
304         }
305
306         return 0;
307 }
308
309 static void read_counters(void)
310 {
311         struct perf_evsel *counter;
312         int ret;
313
314         evlist__for_each_entry(evsel_list, counter) {
315                 ret = read_counter(counter);
316                 if (ret)
317                         pr_debug("failed to read counter %s\n", counter->name);
318
319                 if (ret == 0 && perf_stat_process_counter(&stat_config, counter))
320                         pr_warning("failed to process counter %s\n", counter->name);
321         }
322 }
323
324 static void process_interval(void)
325 {
326         struct timespec ts, rs;
327
328         read_counters();
329
330         clock_gettime(CLOCK_MONOTONIC, &ts);
331         diff_timespec(&rs, &ts, &ref_time);
332
333         if (STAT_RECORD) {
334                 if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL))
335                         pr_err("failed to write stat round event\n");
336         }
337
338         init_stats(&walltime_nsecs_stats);
339         update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000);
340         print_counters(&rs, 0, NULL);
341 }
342
343 static void enable_counters(void)
344 {
345         if (stat_config.initial_delay)
346                 usleep(stat_config.initial_delay * USEC_PER_MSEC);
347
348         /*
349          * We need to enable counters only if:
350          * - we don't have tracee (attaching to task or cpu)
351          * - we have initial delay configured
352          */
353         if (!target__none(&target) || stat_config.initial_delay)
354                 perf_evlist__enable(evsel_list);
355 }
356
357 static void disable_counters(void)
358 {
359         /*
360          * If we don't have tracee (attaching to task or cpu), counters may
361          * still be running. To get accurate group ratios, we must stop groups
362          * from counting before reading their constituent counters.
363          */
364         if (!target__none(&target))
365                 perf_evlist__disable(evsel_list);
366 }
367
368 static volatile int workload_exec_errno;
369
370 /*
371  * perf_evlist__prepare_workload will send a SIGUSR1
372  * if the fork fails, since we asked by setting its
373  * want_signal to true.
374  */
375 static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *info,
376                                         void *ucontext __maybe_unused)
377 {
378         workload_exec_errno = info->si_value.sival_int;
379 }
380
381 static bool perf_evsel__should_store_id(struct perf_evsel *counter)
382 {
383         return STAT_RECORD || counter->attr.read_format & PERF_FORMAT_ID;
384 }
385
386 static bool is_target_alive(struct target *_target,
387                             struct thread_map *threads)
388 {
389         struct stat st;
390         int i;
391
392         if (!target__has_task(_target))
393                 return true;
394
395         for (i = 0; i < threads->nr; i++) {
396                 char path[PATH_MAX];
397
398                 scnprintf(path, PATH_MAX, "%s/%d", procfs__mountpoint(),
399                           threads->map[i].pid);
400
401                 if (!stat(path, &st))
402                         return true;
403         }
404
405         return false;
406 }
407
408 static int __run_perf_stat(int argc, const char **argv, int run_idx)
409 {
410         int interval = stat_config.interval;
411         int times = stat_config.times;
412         int timeout = stat_config.timeout;
413         char msg[BUFSIZ];
414         unsigned long long t0, t1;
415         struct perf_evsel *counter;
416         struct timespec ts;
417         size_t l;
418         int status = 0;
419         const bool forks = (argc > 0);
420         bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false;
421         struct perf_evsel_config_term *err_term;
422
423         if (interval) {
424                 ts.tv_sec  = interval / USEC_PER_MSEC;
425                 ts.tv_nsec = (interval % USEC_PER_MSEC) * NSEC_PER_MSEC;
426         } else if (timeout) {
427                 ts.tv_sec  = timeout / USEC_PER_MSEC;
428                 ts.tv_nsec = (timeout % USEC_PER_MSEC) * NSEC_PER_MSEC;
429         } else {
430                 ts.tv_sec  = 1;
431                 ts.tv_nsec = 0;
432         }
433
434         if (forks) {
435                 if (perf_evlist__prepare_workload(evsel_list, &target, argv, is_pipe,
436                                                   workload_exec_failed_signal) < 0) {
437                         perror("failed to prepare workload");
438                         return -1;
439                 }
440                 child_pid = evsel_list->workload.pid;
441         }
442
443         if (group)
444                 perf_evlist__set_leader(evsel_list);
445
446         evlist__for_each_entry(evsel_list, counter) {
447 try_again:
448                 if (create_perf_stat_counter(counter, &stat_config, &target) < 0) {
449
450                         /* Weak group failed. Reset the group. */
451                         if ((errno == EINVAL || errno == EBADF) &&
452                             counter->leader != counter &&
453                             counter->weak_group) {
454                                 counter = perf_evlist__reset_weak_group(evsel_list, counter);
455                                 goto try_again;
456                         }
457
458                         /*
459                          * PPC returns ENXIO for HW counters until 2.6.37
460                          * (behavior changed with commit b0a873e).
461                          */
462                         if (errno == EINVAL || errno == ENOSYS ||
463                             errno == ENOENT || errno == EOPNOTSUPP ||
464                             errno == ENXIO) {
465                                 if (verbose > 0)
466                                         ui__warning("%s event is not supported by the kernel.\n",
467                                                     perf_evsel__name(counter));
468                                 counter->supported = false;
469
470                                 if ((counter->leader != counter) ||
471                                     !(counter->leader->nr_members > 1))
472                                         continue;
473                         } else if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) {
474                                 if (verbose > 0)
475                                         ui__warning("%s\n", msg);
476                                 goto try_again;
477                         } else if (target__has_per_thread(&target) &&
478                                    evsel_list->threads &&
479                                    evsel_list->threads->err_thread != -1) {
480                                 /*
481                                  * For global --per-thread case, skip current
482                                  * error thread.
483                                  */
484                                 if (!thread_map__remove(evsel_list->threads,
485                                                         evsel_list->threads->err_thread)) {
486                                         evsel_list->threads->err_thread = -1;
487                                         goto try_again;
488                                 }
489                         }
490
491                         perf_evsel__open_strerror(counter, &target,
492                                                   errno, msg, sizeof(msg));
493                         ui__error("%s\n", msg);
494
495                         if (child_pid != -1)
496                                 kill(child_pid, SIGTERM);
497
498                         return -1;
499                 }
500                 counter->supported = true;
501
502                 l = strlen(counter->unit);
503                 if (l > stat_config.unit_width)
504                         stat_config.unit_width = l;
505
506                 if (perf_evsel__should_store_id(counter) &&
507                     perf_evsel__store_ids(counter, evsel_list))
508                         return -1;
509         }
510
511         if (perf_evlist__apply_filters(evsel_list, &counter)) {
512                 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
513                         counter->filter, perf_evsel__name(counter), errno,
514                         str_error_r(errno, msg, sizeof(msg)));
515                 return -1;
516         }
517
518         if (perf_evlist__apply_drv_configs(evsel_list, &counter, &err_term)) {
519                 pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
520                       err_term->val.drv_cfg, perf_evsel__name(counter), errno,
521                       str_error_r(errno, msg, sizeof(msg)));
522                 return -1;
523         }
524
525         if (STAT_RECORD) {
526                 int err, fd = perf_data__fd(&perf_stat.data);
527
528                 if (is_pipe) {
529                         err = perf_header__write_pipe(perf_data__fd(&perf_stat.data));
530                 } else {
531                         err = perf_session__write_header(perf_stat.session, evsel_list,
532                                                          fd, false);
533                 }
534
535                 if (err < 0)
536                         return err;
537
538                 err = perf_stat_synthesize_config(&stat_config, NULL, evsel_list,
539                                                   process_synthesized_event, is_pipe);
540                 if (err < 0)
541                         return err;
542         }
543
544         /*
545          * Enable counters and exec the command:
546          */
547         t0 = rdclock();
548         clock_gettime(CLOCK_MONOTONIC, &ref_time);
549
550         if (forks) {
551                 perf_evlist__start_workload(evsel_list);
552                 enable_counters();
553
554                 if (interval || timeout) {
555                         while (!waitpid(child_pid, &status, WNOHANG)) {
556                                 nanosleep(&ts, NULL);
557                                 if (timeout)
558                                         break;
559                                 process_interval();
560                                 if (interval_count && !(--times))
561                                         break;
562                         }
563                 }
564                 wait4(child_pid, &status, 0, &stat_config.ru_data);
565
566                 if (workload_exec_errno) {
567                         const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
568                         pr_err("Workload failed: %s\n", emsg);
569                         return -1;
570                 }
571
572                 if (WIFSIGNALED(status))
573                         psignal(WTERMSIG(status), argv[0]);
574         } else {
575                 enable_counters();
576                 while (!done) {
577                         nanosleep(&ts, NULL);
578                         if (!is_target_alive(&target, evsel_list->threads))
579                                 break;
580                         if (timeout)
581                                 break;
582                         if (interval) {
583                                 process_interval();
584                                 if (interval_count && !(--times))
585                                         break;
586                         }
587                 }
588         }
589
590         disable_counters();
591
592         t1 = rdclock();
593
594         if (stat_config.walltime_run_table)
595                 stat_config.walltime_run[run_idx] = t1 - t0;
596
597         update_stats(&walltime_nsecs_stats, t1 - t0);
598
599         /*
600          * Closing a group leader splits the group, and as we only disable
601          * group leaders, results in remaining events becoming enabled. To
602          * avoid arbitrary skew, we must read all counters before closing any
603          * group leaders.
604          */
605         read_counters();
606         perf_evlist__close(evsel_list);
607
608         return WEXITSTATUS(status);
609 }
610
611 static int run_perf_stat(int argc, const char **argv, int run_idx)
612 {
613         int ret;
614
615         if (pre_cmd) {
616                 ret = system(pre_cmd);
617                 if (ret)
618                         return ret;
619         }
620
621         if (sync_run)
622                 sync();
623
624         ret = __run_perf_stat(argc, argv, run_idx);
625         if (ret)
626                 return ret;
627
628         if (post_cmd) {
629                 ret = system(post_cmd);
630                 if (ret)
631                         return ret;
632         }
633
634         return ret;
635 }
636
637 static void print_counters(struct timespec *ts, int argc, const char **argv)
638 {
639         /* Do not print anything if we record to the pipe. */
640         if (STAT_RECORD && perf_stat.data.is_pipe)
641                 return;
642
643         perf_evlist__print_counters(evsel_list, &stat_config, &target,
644                                     ts, argc, argv);
645 }
646
647 static volatile int signr = -1;
648
649 static void skip_signal(int signo)
650 {
651         if ((child_pid == -1) || stat_config.interval)
652                 done = 1;
653
654         signr = signo;
655         /*
656          * render child_pid harmless
657          * won't send SIGTERM to a random
658          * process in case of race condition
659          * and fast PID recycling
660          */
661         child_pid = -1;
662 }
663
664 static void sig_atexit(void)
665 {
666         sigset_t set, oset;
667
668         /*
669          * avoid race condition with SIGCHLD handler
670          * in skip_signal() which is modifying child_pid
671          * goal is to avoid send SIGTERM to a random
672          * process
673          */
674         sigemptyset(&set);
675         sigaddset(&set, SIGCHLD);
676         sigprocmask(SIG_BLOCK, &set, &oset);
677
678         if (child_pid != -1)
679                 kill(child_pid, SIGTERM);
680
681         sigprocmask(SIG_SETMASK, &oset, NULL);
682
683         if (signr == -1)
684                 return;
685
686         signal(signr, SIG_DFL);
687         kill(getpid(), signr);
688 }
689
690 static int stat__set_big_num(const struct option *opt __maybe_unused,
691                              const char *s __maybe_unused, int unset)
692 {
693         big_num_opt = unset ? 0 : 1;
694         return 0;
695 }
696
697 static int enable_metric_only(const struct option *opt __maybe_unused,
698                               const char *s __maybe_unused, int unset)
699 {
700         force_metric_only = true;
701         stat_config.metric_only = !unset;
702         return 0;
703 }
704
705 static int parse_metric_groups(const struct option *opt,
706                                const char *str,
707                                int unset __maybe_unused)
708 {
709         return metricgroup__parse_groups(opt, str, &stat_config.metric_events);
710 }
711
712 static const struct option stat_options[] = {
713         OPT_BOOLEAN('T', "transaction", &transaction_run,
714                     "hardware transaction statistics"),
715         OPT_CALLBACK('e', "event", &evsel_list, "event",
716                      "event selector. use 'perf list' to list available events",
717                      parse_events_option),
718         OPT_CALLBACK(0, "filter", &evsel_list, "filter",
719                      "event filter", parse_filter),
720         OPT_BOOLEAN('i', "no-inherit", &stat_config.no_inherit,
721                     "child tasks do not inherit counters"),
722         OPT_STRING('p', "pid", &target.pid, "pid",
723                    "stat events on existing process id"),
724         OPT_STRING('t', "tid", &target.tid, "tid",
725                    "stat events on existing thread id"),
726         OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
727                     "system-wide collection from all CPUs"),
728         OPT_BOOLEAN('g', "group", &group,
729                     "put the counters into a counter group"),
730         OPT_BOOLEAN('c', "scale", &stat_config.scale, "scale/normalize counters"),
731         OPT_INCR('v', "verbose", &verbose,
732                     "be more verbose (show counter open errors, etc)"),
733         OPT_INTEGER('r', "repeat", &stat_config.run_count,
734                     "repeat command and print average + stddev (max: 100, forever: 0)"),
735         OPT_BOOLEAN(0, "table", &stat_config.walltime_run_table,
736                     "display details about each run (only with -r option)"),
737         OPT_BOOLEAN('n', "null", &stat_config.null_run,
738                     "null run - dont start any counters"),
739         OPT_INCR('d', "detailed", &detailed_run,
740                     "detailed run - start a lot of events"),
741         OPT_BOOLEAN('S', "sync", &sync_run,
742                     "call sync() before starting a run"),
743         OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
744                            "print large numbers with thousands\' separators",
745                            stat__set_big_num),
746         OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
747                     "list of cpus to monitor in system-wide"),
748         OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode,
749                     "disable CPU count aggregation", AGGR_NONE),
750         OPT_BOOLEAN(0, "no-merge", &stat_config.no_merge, "Do not merge identical named events"),
751         OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator",
752                    "print counts with custom separator"),
753         OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
754                      "monitor event in cgroup name only", parse_cgroups),
755         OPT_STRING('o', "output", &output_name, "file", "output file name"),
756         OPT_BOOLEAN(0, "append", &append_file, "append to the output file"),
757         OPT_INTEGER(0, "log-fd", &output_fd,
758                     "log output to fd, instead of stderr"),
759         OPT_STRING(0, "pre", &pre_cmd, "command",
760                         "command to run prior to the measured command"),
761         OPT_STRING(0, "post", &post_cmd, "command",
762                         "command to run after to the measured command"),
763         OPT_UINTEGER('I', "interval-print", &stat_config.interval,
764                     "print counts at regular interval in ms "
765                     "(overhead is possible for values <= 100ms)"),
766         OPT_INTEGER(0, "interval-count", &stat_config.times,
767                     "print counts for fixed number of times"),
768         OPT_BOOLEAN(0, "interval-clear", &stat_config.interval_clear,
769                     "clear screen in between new interval"),
770         OPT_UINTEGER(0, "timeout", &stat_config.timeout,
771                     "stop workload and print counts after a timeout period in ms (>= 10ms)"),
772         OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode,
773                      "aggregate counts per processor socket", AGGR_SOCKET),
774         OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode,
775                      "aggregate counts per physical processor core", AGGR_CORE),
776         OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode,
777                      "aggregate counts per thread", AGGR_THREAD),
778         OPT_UINTEGER('D', "delay", &stat_config.initial_delay,
779                      "ms to wait before starting measurement after program start"),
780         OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL,
781                         "Only print computed metrics. No raw values", enable_metric_only),
782         OPT_BOOLEAN(0, "topdown", &topdown_run,
783                         "measure topdown level 1 statistics"),
784         OPT_BOOLEAN(0, "smi-cost", &smi_cost,
785                         "measure SMI cost"),
786         OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list",
787                      "monitor specified metrics or metric groups (separated by ,)",
788                      parse_metric_groups),
789         OPT_END()
790 };
791
792 static int perf_stat__get_socket(struct perf_stat_config *config __maybe_unused,
793                                  struct cpu_map *map, int cpu)
794 {
795         return cpu_map__get_socket(map, cpu, NULL);
796 }
797
798 static int perf_stat__get_core(struct perf_stat_config *config __maybe_unused,
799                                struct cpu_map *map, int cpu)
800 {
801         return cpu_map__get_core(map, cpu, NULL);
802 }
803
804 static int cpu_map__get_max(struct cpu_map *map)
805 {
806         int i, max = -1;
807
808         for (i = 0; i < map->nr; i++) {
809                 if (map->map[i] > max)
810                         max = map->map[i];
811         }
812
813         return max;
814 }
815
816 static int perf_stat__get_aggr(struct perf_stat_config *config,
817                                aggr_get_id_t get_id, struct cpu_map *map, int idx)
818 {
819         int cpu;
820
821         if (idx >= map->nr)
822                 return -1;
823
824         cpu = map->map[idx];
825
826         if (config->cpus_aggr_map->map[cpu] == -1)
827                 config->cpus_aggr_map->map[cpu] = get_id(config, map, idx);
828
829         return config->cpus_aggr_map->map[cpu];
830 }
831
832 static int perf_stat__get_socket_cached(struct perf_stat_config *config,
833                                         struct cpu_map *map, int idx)
834 {
835         return perf_stat__get_aggr(config, perf_stat__get_socket, map, idx);
836 }
837
838 static int perf_stat__get_core_cached(struct perf_stat_config *config,
839                                       struct cpu_map *map, int idx)
840 {
841         return perf_stat__get_aggr(config, perf_stat__get_core, map, idx);
842 }
843
844 static int perf_stat_init_aggr_mode(void)
845 {
846         int nr;
847
848         switch (stat_config.aggr_mode) {
849         case AGGR_SOCKET:
850                 if (cpu_map__build_socket_map(evsel_list->cpus, &stat_config.aggr_map)) {
851                         perror("cannot build socket map");
852                         return -1;
853                 }
854                 stat_config.aggr_get_id = perf_stat__get_socket_cached;
855                 break;
856         case AGGR_CORE:
857                 if (cpu_map__build_core_map(evsel_list->cpus, &stat_config.aggr_map)) {
858                         perror("cannot build core map");
859                         return -1;
860                 }
861                 stat_config.aggr_get_id = perf_stat__get_core_cached;
862                 break;
863         case AGGR_NONE:
864         case AGGR_GLOBAL:
865         case AGGR_THREAD:
866         case AGGR_UNSET:
867         default:
868                 break;
869         }
870
871         /*
872          * The evsel_list->cpus is the base we operate on,
873          * taking the highest cpu number to be the size of
874          * the aggregation translate cpumap.
875          */
876         nr = cpu_map__get_max(evsel_list->cpus);
877         stat_config.cpus_aggr_map = cpu_map__empty_new(nr + 1);
878         return stat_config.cpus_aggr_map ? 0 : -ENOMEM;
879 }
880
881 static void perf_stat__exit_aggr_mode(void)
882 {
883         cpu_map__put(stat_config.aggr_map);
884         cpu_map__put(stat_config.cpus_aggr_map);
885         stat_config.aggr_map = NULL;
886         stat_config.cpus_aggr_map = NULL;
887 }
888
889 static inline int perf_env__get_cpu(struct perf_env *env, struct cpu_map *map, int idx)
890 {
891         int cpu;
892
893         if (idx > map->nr)
894                 return -1;
895
896         cpu = map->map[idx];
897
898         if (cpu >= env->nr_cpus_avail)
899                 return -1;
900
901         return cpu;
902 }
903
904 static int perf_env__get_socket(struct cpu_map *map, int idx, void *data)
905 {
906         struct perf_env *env = data;
907         int cpu = perf_env__get_cpu(env, map, idx);
908
909         return cpu == -1 ? -1 : env->cpu[cpu].socket_id;
910 }
911
912 static int perf_env__get_core(struct cpu_map *map, int idx, void *data)
913 {
914         struct perf_env *env = data;
915         int core = -1, cpu = perf_env__get_cpu(env, map, idx);
916
917         if (cpu != -1) {
918                 int socket_id = env->cpu[cpu].socket_id;
919
920                 /*
921                  * Encode socket in upper 16 bits
922                  * core_id is relative to socket, and
923                  * we need a global id. So we combine
924                  * socket + core id.
925                  */
926                 core = (socket_id << 16) | (env->cpu[cpu].core_id & 0xffff);
927         }
928
929         return core;
930 }
931
932 static int perf_env__build_socket_map(struct perf_env *env, struct cpu_map *cpus,
933                                       struct cpu_map **sockp)
934 {
935         return cpu_map__build_map(cpus, sockp, perf_env__get_socket, env);
936 }
937
938 static int perf_env__build_core_map(struct perf_env *env, struct cpu_map *cpus,
939                                     struct cpu_map **corep)
940 {
941         return cpu_map__build_map(cpus, corep, perf_env__get_core, env);
942 }
943
944 static int perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused,
945                                       struct cpu_map *map, int idx)
946 {
947         return perf_env__get_socket(map, idx, &perf_stat.session->header.env);
948 }
949
950 static int perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused,
951                                     struct cpu_map *map, int idx)
952 {
953         return perf_env__get_core(map, idx, &perf_stat.session->header.env);
954 }
955
956 static int perf_stat_init_aggr_mode_file(struct perf_stat *st)
957 {
958         struct perf_env *env = &st->session->header.env;
959
960         switch (stat_config.aggr_mode) {
961         case AGGR_SOCKET:
962                 if (perf_env__build_socket_map(env, evsel_list->cpus, &stat_config.aggr_map)) {
963                         perror("cannot build socket map");
964                         return -1;
965                 }
966                 stat_config.aggr_get_id = perf_stat__get_socket_file;
967                 break;
968         case AGGR_CORE:
969                 if (perf_env__build_core_map(env, evsel_list->cpus, &stat_config.aggr_map)) {
970                         perror("cannot build core map");
971                         return -1;
972                 }
973                 stat_config.aggr_get_id = perf_stat__get_core_file;
974                 break;
975         case AGGR_NONE:
976         case AGGR_GLOBAL:
977         case AGGR_THREAD:
978         case AGGR_UNSET:
979         default:
980                 break;
981         }
982
983         return 0;
984 }
985
986 static int topdown_filter_events(const char **attr, char **str, bool use_group)
987 {
988         int off = 0;
989         int i;
990         int len = 0;
991         char *s;
992
993         for (i = 0; attr[i]; i++) {
994                 if (pmu_have_event("cpu", attr[i])) {
995                         len += strlen(attr[i]) + 1;
996                         attr[i - off] = attr[i];
997                 } else
998                         off++;
999         }
1000         attr[i - off] = NULL;
1001
1002         *str = malloc(len + 1 + 2);
1003         if (!*str)
1004                 return -1;
1005         s = *str;
1006         if (i - off == 0) {
1007                 *s = 0;
1008                 return 0;
1009         }
1010         if (use_group)
1011                 *s++ = '{';
1012         for (i = 0; attr[i]; i++) {
1013                 strcpy(s, attr[i]);
1014                 s += strlen(s);
1015                 *s++ = ',';
1016         }
1017         if (use_group) {
1018                 s[-1] = '}';
1019                 *s = 0;
1020         } else
1021                 s[-1] = 0;
1022         return 0;
1023 }
1024
1025 __weak bool arch_topdown_check_group(bool *warn)
1026 {
1027         *warn = false;
1028         return false;
1029 }
1030
1031 __weak void arch_topdown_group_warn(void)
1032 {
1033 }
1034
1035 /*
1036  * Add default attributes, if there were no attributes specified or
1037  * if -d/--detailed, -d -d or -d -d -d is used:
1038  */
1039 static int add_default_attributes(void)
1040 {
1041         int err;
1042         struct perf_event_attr default_attrs0[] = {
1043
1044   { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK              },
1045   { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES        },
1046   { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS          },
1047   { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS             },
1048
1049   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES              },
1050 };
1051         struct perf_event_attr frontend_attrs[] = {
1052   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
1053 };
1054         struct perf_event_attr backend_attrs[] = {
1055   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND  },
1056 };
1057         struct perf_event_attr default_attrs1[] = {
1058   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS            },
1059   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS     },
1060   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES           },
1061
1062 };
1063
1064 /*
1065  * Detailed stats (-d), covering the L1 and last level data caches:
1066  */
1067         struct perf_event_attr detailed_attrs[] = {
1068
1069   { .type = PERF_TYPE_HW_CACHE,
1070     .config =
1071          PERF_COUNT_HW_CACHE_L1D                <<  0  |
1072         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1073         (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)                          },
1074
1075   { .type = PERF_TYPE_HW_CACHE,
1076     .config =
1077          PERF_COUNT_HW_CACHE_L1D                <<  0  |
1078         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1079         (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)                          },
1080
1081   { .type = PERF_TYPE_HW_CACHE,
1082     .config =
1083          PERF_COUNT_HW_CACHE_LL                 <<  0  |
1084         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1085         (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)                          },
1086
1087   { .type = PERF_TYPE_HW_CACHE,
1088     .config =
1089          PERF_COUNT_HW_CACHE_LL                 <<  0  |
1090         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1091         (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)                          },
1092 };
1093
1094 /*
1095  * Very detailed stats (-d -d), covering the instruction cache and the TLB caches:
1096  */
1097         struct perf_event_attr very_detailed_attrs[] = {
1098
1099   { .type = PERF_TYPE_HW_CACHE,
1100     .config =
1101          PERF_COUNT_HW_CACHE_L1I                <<  0  |
1102         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1103         (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)                          },
1104
1105   { .type = PERF_TYPE_HW_CACHE,
1106     .config =
1107          PERF_COUNT_HW_CACHE_L1I                <<  0  |
1108         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1109         (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)                          },
1110
1111   { .type = PERF_TYPE_HW_CACHE,
1112     .config =
1113          PERF_COUNT_HW_CACHE_DTLB               <<  0  |
1114         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1115         (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)                          },
1116
1117   { .type = PERF_TYPE_HW_CACHE,
1118     .config =
1119          PERF_COUNT_HW_CACHE_DTLB               <<  0  |
1120         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1121         (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)                          },
1122
1123   { .type = PERF_TYPE_HW_CACHE,
1124     .config =
1125          PERF_COUNT_HW_CACHE_ITLB               <<  0  |
1126         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1127         (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)                          },
1128
1129   { .type = PERF_TYPE_HW_CACHE,
1130     .config =
1131          PERF_COUNT_HW_CACHE_ITLB               <<  0  |
1132         (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
1133         (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)                          },
1134
1135 };
1136
1137 /*
1138  * Very, very detailed stats (-d -d -d), adding prefetch events:
1139  */
1140         struct perf_event_attr very_very_detailed_attrs[] = {
1141
1142   { .type = PERF_TYPE_HW_CACHE,
1143     .config =
1144          PERF_COUNT_HW_CACHE_L1D                <<  0  |
1145         (PERF_COUNT_HW_CACHE_OP_PREFETCH        <<  8) |
1146         (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)                          },
1147
1148   { .type = PERF_TYPE_HW_CACHE,
1149     .config =
1150          PERF_COUNT_HW_CACHE_L1D                <<  0  |
1151         (PERF_COUNT_HW_CACHE_OP_PREFETCH        <<  8) |
1152         (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)                          },
1153 };
1154         struct parse_events_error errinfo;
1155
1156         /* Set attrs if no event is selected and !null_run: */
1157         if (stat_config.null_run)
1158                 return 0;
1159
1160         if (transaction_run) {
1161                 /* Handle -T as -M transaction. Once platform specific metrics
1162                  * support has been added to the json files, all archictures
1163                  * will use this approach. To determine transaction support
1164                  * on an architecture test for such a metric name.
1165                  */
1166                 if (metricgroup__has_metric("transaction")) {
1167                         struct option opt = { .value = &evsel_list };
1168
1169                         return metricgroup__parse_groups(&opt, "transaction",
1170                                                          &stat_config.metric_events);
1171                 }
1172
1173                 if (pmu_have_event("cpu", "cycles-ct") &&
1174                     pmu_have_event("cpu", "el-start"))
1175                         err = parse_events(evsel_list, transaction_attrs,
1176                                            &errinfo);
1177                 else
1178                         err = parse_events(evsel_list,
1179                                            transaction_limited_attrs,
1180                                            &errinfo);
1181                 if (err) {
1182                         fprintf(stderr, "Cannot set up transaction events\n");
1183                         parse_events_print_error(&errinfo, transaction_attrs);
1184                         return -1;
1185                 }
1186                 return 0;
1187         }
1188
1189         if (smi_cost) {
1190                 int smi;
1191
1192                 if (sysfs__read_int(FREEZE_ON_SMI_PATH, &smi) < 0) {
1193                         fprintf(stderr, "freeze_on_smi is not supported.\n");
1194                         return -1;
1195                 }
1196
1197                 if (!smi) {
1198                         if (sysfs__write_int(FREEZE_ON_SMI_PATH, 1) < 0) {
1199                                 fprintf(stderr, "Failed to set freeze_on_smi.\n");
1200                                 return -1;
1201                         }
1202                         smi_reset = true;
1203                 }
1204
1205                 if (pmu_have_event("msr", "aperf") &&
1206                     pmu_have_event("msr", "smi")) {
1207                         if (!force_metric_only)
1208                                 stat_config.metric_only = true;
1209                         err = parse_events(evsel_list, smi_cost_attrs, &errinfo);
1210                 } else {
1211                         fprintf(stderr, "To measure SMI cost, it needs "
1212                                 "msr/aperf/, msr/smi/ and cpu/cycles/ support\n");
1213                         parse_events_print_error(&errinfo, smi_cost_attrs);
1214                         return -1;
1215                 }
1216                 if (err) {
1217                         fprintf(stderr, "Cannot set up SMI cost events\n");
1218                         return -1;
1219                 }
1220                 return 0;
1221         }
1222
1223         if (topdown_run) {
1224                 char *str = NULL;
1225                 bool warn = false;
1226
1227                 if (stat_config.aggr_mode != AGGR_GLOBAL &&
1228                     stat_config.aggr_mode != AGGR_CORE) {
1229                         pr_err("top down event configuration requires --per-core mode\n");
1230                         return -1;
1231                 }
1232                 stat_config.aggr_mode = AGGR_CORE;
1233                 if (nr_cgroups || !target__has_cpu(&target)) {
1234                         pr_err("top down event configuration requires system-wide mode (-a)\n");
1235                         return -1;
1236                 }
1237
1238                 if (!force_metric_only)
1239                         stat_config.metric_only = true;
1240                 if (topdown_filter_events(topdown_attrs, &str,
1241                                 arch_topdown_check_group(&warn)) < 0) {
1242                         pr_err("Out of memory\n");
1243                         return -1;
1244                 }
1245                 if (topdown_attrs[0] && str) {
1246                         if (warn)
1247                                 arch_topdown_group_warn();
1248                         err = parse_events(evsel_list, str, &errinfo);
1249                         if (err) {
1250                                 fprintf(stderr,
1251                                         "Cannot set up top down events %s: %d\n",
1252                                         str, err);
1253                                 free(str);
1254                                 parse_events_print_error(&errinfo, str);
1255                                 return -1;
1256                         }
1257                 } else {
1258                         fprintf(stderr, "System does not support topdown\n");
1259                         return -1;
1260                 }
1261                 free(str);
1262         }
1263
1264         if (!evsel_list->nr_entries) {
1265                 if (target__has_cpu(&target))
1266                         default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK;
1267
1268                 if (perf_evlist__add_default_attrs(evsel_list, default_attrs0) < 0)
1269                         return -1;
1270                 if (pmu_have_event("cpu", "stalled-cycles-frontend")) {
1271                         if (perf_evlist__add_default_attrs(evsel_list,
1272                                                 frontend_attrs) < 0)
1273                                 return -1;
1274                 }
1275                 if (pmu_have_event("cpu", "stalled-cycles-backend")) {
1276                         if (perf_evlist__add_default_attrs(evsel_list,
1277                                                 backend_attrs) < 0)
1278                                 return -1;
1279                 }
1280                 if (perf_evlist__add_default_attrs(evsel_list, default_attrs1) < 0)
1281                         return -1;
1282         }
1283
1284         /* Detailed events get appended to the event list: */
1285
1286         if (detailed_run <  1)
1287                 return 0;
1288
1289         /* Append detailed run extra attributes: */
1290         if (perf_evlist__add_default_attrs(evsel_list, detailed_attrs) < 0)
1291                 return -1;
1292
1293         if (detailed_run < 2)
1294                 return 0;
1295
1296         /* Append very detailed run extra attributes: */
1297         if (perf_evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0)
1298                 return -1;
1299
1300         if (detailed_run < 3)
1301                 return 0;
1302
1303         /* Append very, very detailed run extra attributes: */
1304         return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs);
1305 }
1306
1307 static const char * const stat_record_usage[] = {
1308         "perf stat record [<options>]",
1309         NULL,
1310 };
1311
1312 static void init_features(struct perf_session *session)
1313 {
1314         int feat;
1315
1316         for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
1317                 perf_header__set_feat(&session->header, feat);
1318
1319         perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
1320         perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
1321         perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
1322         perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
1323 }
1324
1325 static int __cmd_record(int argc, const char **argv)
1326 {
1327         struct perf_session *session;
1328         struct perf_data *data = &perf_stat.data;
1329
1330         argc = parse_options(argc, argv, stat_options, stat_record_usage,
1331                              PARSE_OPT_STOP_AT_NON_OPTION);
1332
1333         if (output_name)
1334                 data->file.path = output_name;
1335
1336         if (stat_config.run_count != 1 || forever) {
1337                 pr_err("Cannot use -r option with perf stat record.\n");
1338                 return -1;
1339         }
1340
1341         session = perf_session__new(data, false, NULL);
1342         if (session == NULL) {
1343                 pr_err("Perf session creation failed.\n");
1344                 return -1;
1345         }
1346
1347         init_features(session);
1348
1349         session->evlist   = evsel_list;
1350         perf_stat.session = session;
1351         perf_stat.record  = true;
1352         return argc;
1353 }
1354
1355 static int process_stat_round_event(struct perf_session *session,
1356                                     union perf_event *event)
1357 {
1358         struct stat_round_event *stat_round = &event->stat_round;
1359         struct perf_evsel *counter;
1360         struct timespec tsh, *ts = NULL;
1361         const char **argv = session->header.env.cmdline_argv;
1362         int argc = session->header.env.nr_cmdline;
1363
1364         evlist__for_each_entry(evsel_list, counter)
1365                 perf_stat_process_counter(&stat_config, counter);
1366
1367         if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL)
1368                 update_stats(&walltime_nsecs_stats, stat_round->time);
1369
1370         if (stat_config.interval && stat_round->time) {
1371                 tsh.tv_sec  = stat_round->time / NSEC_PER_SEC;
1372                 tsh.tv_nsec = stat_round->time % NSEC_PER_SEC;
1373                 ts = &tsh;
1374         }
1375
1376         print_counters(ts, argc, argv);
1377         return 0;
1378 }
1379
1380 static
1381 int process_stat_config_event(struct perf_session *session,
1382                               union perf_event *event)
1383 {
1384         struct perf_tool *tool = session->tool;
1385         struct perf_stat *st = container_of(tool, struct perf_stat, tool);
1386
1387         perf_event__read_stat_config(&stat_config, &event->stat_config);
1388
1389         if (cpu_map__empty(st->cpus)) {
1390                 if (st->aggr_mode != AGGR_UNSET)
1391                         pr_warning("warning: processing task data, aggregation mode not set\n");
1392                 return 0;
1393         }
1394
1395         if (st->aggr_mode != AGGR_UNSET)
1396                 stat_config.aggr_mode = st->aggr_mode;
1397
1398         if (perf_stat.data.is_pipe)
1399                 perf_stat_init_aggr_mode();
1400         else
1401                 perf_stat_init_aggr_mode_file(st);
1402
1403         return 0;
1404 }
1405
1406 static int set_maps(struct perf_stat *st)
1407 {
1408         if (!st->cpus || !st->threads)
1409                 return 0;
1410
1411         if (WARN_ONCE(st->maps_allocated, "stats double allocation\n"))
1412                 return -EINVAL;
1413
1414         perf_evlist__set_maps(evsel_list, st->cpus, st->threads);
1415
1416         if (perf_evlist__alloc_stats(evsel_list, true))
1417                 return -ENOMEM;
1418
1419         st->maps_allocated = true;
1420         return 0;
1421 }
1422
1423 static
1424 int process_thread_map_event(struct perf_session *session,
1425                              union perf_event *event)
1426 {
1427         struct perf_tool *tool = session->tool;
1428         struct perf_stat *st = container_of(tool, struct perf_stat, tool);
1429
1430         if (st->threads) {
1431                 pr_warning("Extra thread map event, ignoring.\n");
1432                 return 0;
1433         }
1434
1435         st->threads = thread_map__new_event(&event->thread_map);
1436         if (!st->threads)
1437                 return -ENOMEM;
1438
1439         return set_maps(st);
1440 }
1441
1442 static
1443 int process_cpu_map_event(struct perf_session *session,
1444                           union perf_event *event)
1445 {
1446         struct perf_tool *tool = session->tool;
1447         struct perf_stat *st = container_of(tool, struct perf_stat, tool);
1448         struct cpu_map *cpus;
1449
1450         if (st->cpus) {
1451                 pr_warning("Extra cpu map event, ignoring.\n");
1452                 return 0;
1453         }
1454
1455         cpus = cpu_map__new_data(&event->cpu_map.data);
1456         if (!cpus)
1457                 return -ENOMEM;
1458
1459         st->cpus = cpus;
1460         return set_maps(st);
1461 }
1462
1463 static int runtime_stat_new(struct perf_stat_config *config, int nthreads)
1464 {
1465         int i;
1466
1467         config->stats = calloc(nthreads, sizeof(struct runtime_stat));
1468         if (!config->stats)
1469                 return -1;
1470
1471         config->stats_num = nthreads;
1472
1473         for (i = 0; i < nthreads; i++)
1474                 runtime_stat__init(&config->stats[i]);
1475
1476         return 0;
1477 }
1478
1479 static void runtime_stat_delete(struct perf_stat_config *config)
1480 {
1481         int i;
1482
1483         if (!config->stats)
1484                 return;
1485
1486         for (i = 0; i < config->stats_num; i++)
1487                 runtime_stat__exit(&config->stats[i]);
1488
1489         free(config->stats);
1490 }
1491
1492 static const char * const stat_report_usage[] = {
1493         "perf stat report [<options>]",
1494         NULL,
1495 };
1496
1497 static struct perf_stat perf_stat = {
1498         .tool = {
1499                 .attr           = perf_event__process_attr,
1500                 .event_update   = perf_event__process_event_update,
1501                 .thread_map     = process_thread_map_event,
1502                 .cpu_map        = process_cpu_map_event,
1503                 .stat_config    = process_stat_config_event,
1504                 .stat           = perf_event__process_stat_event,
1505                 .stat_round     = process_stat_round_event,
1506         },
1507         .aggr_mode = AGGR_UNSET,
1508 };
1509
1510 static int __cmd_report(int argc, const char **argv)
1511 {
1512         struct perf_session *session;
1513         const struct option options[] = {
1514         OPT_STRING('i', "input", &input_name, "file", "input file name"),
1515         OPT_SET_UINT(0, "per-socket", &perf_stat.aggr_mode,
1516                      "aggregate counts per processor socket", AGGR_SOCKET),
1517         OPT_SET_UINT(0, "per-core", &perf_stat.aggr_mode,
1518                      "aggregate counts per physical processor core", AGGR_CORE),
1519         OPT_SET_UINT('A', "no-aggr", &perf_stat.aggr_mode,
1520                      "disable CPU count aggregation", AGGR_NONE),
1521         OPT_END()
1522         };
1523         struct stat st;
1524         int ret;
1525
1526         argc = parse_options(argc, argv, options, stat_report_usage, 0);
1527
1528         if (!input_name || !strlen(input_name)) {
1529                 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
1530                         input_name = "-";
1531                 else
1532                         input_name = "perf.data";
1533         }
1534
1535         perf_stat.data.file.path = input_name;
1536         perf_stat.data.mode      = PERF_DATA_MODE_READ;
1537
1538         session = perf_session__new(&perf_stat.data, false, &perf_stat.tool);
1539         if (session == NULL)
1540                 return -1;
1541
1542         perf_stat.session  = session;
1543         stat_config.output = stderr;
1544         evsel_list         = session->evlist;
1545
1546         ret = perf_session__process_events(session);
1547         if (ret)
1548                 return ret;
1549
1550         perf_session__delete(session);
1551         return 0;
1552 }
1553
1554 static void setup_system_wide(int forks)
1555 {
1556         /*
1557          * Make system wide (-a) the default target if
1558          * no target was specified and one of following
1559          * conditions is met:
1560          *
1561          *   - there's no workload specified
1562          *   - there is workload specified but all requested
1563          *     events are system wide events
1564          */
1565         if (!target__none(&target))
1566                 return;
1567
1568         if (!forks)
1569                 target.system_wide = true;
1570         else {
1571                 struct perf_evsel *counter;
1572
1573                 evlist__for_each_entry(evsel_list, counter) {
1574                         if (!counter->system_wide)
1575                                 return;
1576                 }
1577
1578                 if (evsel_list->nr_entries)
1579                         target.system_wide = true;
1580         }
1581 }
1582
1583 int cmd_stat(int argc, const char **argv)
1584 {
1585         const char * const stat_usage[] = {
1586                 "perf stat [<options>] [<command>]",
1587                 NULL
1588         };
1589         int status = -EINVAL, run_idx;
1590         const char *mode;
1591         FILE *output = stderr;
1592         unsigned int interval, timeout;
1593         const char * const stat_subcommands[] = { "record", "report" };
1594
1595         setlocale(LC_ALL, "");
1596
1597         evsel_list = perf_evlist__new();
1598         if (evsel_list == NULL)
1599                 return -ENOMEM;
1600
1601         parse_events__shrink_config_terms();
1602         argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands,
1603                                         (const char **) stat_usage,
1604                                         PARSE_OPT_STOP_AT_NON_OPTION);
1605         perf_stat__collect_metric_expr(evsel_list);
1606         perf_stat__init_shadow_stats();
1607
1608         if (stat_config.csv_sep) {
1609                 stat_config.csv_output = true;
1610                 if (!strcmp(stat_config.csv_sep, "\\t"))
1611                         stat_config.csv_sep = "\t";
1612         } else
1613                 stat_config.csv_sep = DEFAULT_SEPARATOR;
1614
1615         if (argc && !strncmp(argv[0], "rec", 3)) {
1616                 argc = __cmd_record(argc, argv);
1617                 if (argc < 0)
1618                         return -1;
1619         } else if (argc && !strncmp(argv[0], "rep", 3))
1620                 return __cmd_report(argc, argv);
1621
1622         interval = stat_config.interval;
1623         timeout = stat_config.timeout;
1624
1625         /*
1626          * For record command the -o is already taken care of.
1627          */
1628         if (!STAT_RECORD && output_name && strcmp(output_name, "-"))
1629                 output = NULL;
1630
1631         if (output_name && output_fd) {
1632                 fprintf(stderr, "cannot use both --output and --log-fd\n");
1633                 parse_options_usage(stat_usage, stat_options, "o", 1);
1634                 parse_options_usage(NULL, stat_options, "log-fd", 0);
1635                 goto out;
1636         }
1637
1638         if (stat_config.metric_only && stat_config.aggr_mode == AGGR_THREAD) {
1639                 fprintf(stderr, "--metric-only is not supported with --per-thread\n");
1640                 goto out;
1641         }
1642
1643         if (stat_config.metric_only && stat_config.run_count > 1) {
1644                 fprintf(stderr, "--metric-only is not supported with -r\n");
1645                 goto out;
1646         }
1647
1648         if (stat_config.walltime_run_table && stat_config.run_count <= 1) {
1649                 fprintf(stderr, "--table is only supported with -r\n");
1650                 parse_options_usage(stat_usage, stat_options, "r", 1);
1651                 parse_options_usage(NULL, stat_options, "table", 0);
1652                 goto out;
1653         }
1654
1655         if (output_fd < 0) {
1656                 fprintf(stderr, "argument to --log-fd must be a > 0\n");
1657                 parse_options_usage(stat_usage, stat_options, "log-fd", 0);
1658                 goto out;
1659         }
1660
1661         if (!output) {
1662                 struct timespec tm;
1663                 mode = append_file ? "a" : "w";
1664
1665                 output = fopen(output_name, mode);
1666                 if (!output) {
1667                         perror("failed to create output file");
1668                         return -1;
1669                 }
1670                 clock_gettime(CLOCK_REALTIME, &tm);
1671                 fprintf(output, "# started on %s\n", ctime(&tm.tv_sec));
1672         } else if (output_fd > 0) {
1673                 mode = append_file ? "a" : "w";
1674                 output = fdopen(output_fd, mode);
1675                 if (!output) {
1676                         perror("Failed opening logfd");
1677                         return -errno;
1678                 }
1679         }
1680
1681         stat_config.output = output;
1682
1683         /*
1684          * let the spreadsheet do the pretty-printing
1685          */
1686         if (stat_config.csv_output) {
1687                 /* User explicitly passed -B? */
1688                 if (big_num_opt == 1) {
1689                         fprintf(stderr, "-B option not supported with -x\n");
1690                         parse_options_usage(stat_usage, stat_options, "B", 1);
1691                         parse_options_usage(NULL, stat_options, "x", 1);
1692                         goto out;
1693                 } else /* Nope, so disable big number formatting */
1694                         stat_config.big_num = false;
1695         } else if (big_num_opt == 0) /* User passed --no-big-num */
1696                 stat_config.big_num = false;
1697
1698         setup_system_wide(argc);
1699
1700         /*
1701          * Display user/system times only for single
1702          * run and when there's specified tracee.
1703          */
1704         if ((stat_config.run_count == 1) && target__none(&target))
1705                 stat_config.ru_display = true;
1706
1707         if (stat_config.run_count < 0) {
1708                 pr_err("Run count must be a positive number\n");
1709                 parse_options_usage(stat_usage, stat_options, "r", 1);
1710                 goto out;
1711         } else if (stat_config.run_count == 0) {
1712                 forever = true;
1713                 stat_config.run_count = 1;
1714         }
1715
1716         if (stat_config.walltime_run_table) {
1717                 stat_config.walltime_run = zalloc(stat_config.run_count * sizeof(stat_config.walltime_run[0]));
1718                 if (!stat_config.walltime_run) {
1719                         pr_err("failed to setup -r option");
1720                         goto out;
1721                 }
1722         }
1723
1724         if ((stat_config.aggr_mode == AGGR_THREAD) &&
1725                 !target__has_task(&target)) {
1726                 if (!target.system_wide || target.cpu_list) {
1727                         fprintf(stderr, "The --per-thread option is only "
1728                                 "available when monitoring via -p -t -a "
1729                                 "options or only --per-thread.\n");
1730                         parse_options_usage(NULL, stat_options, "p", 1);
1731                         parse_options_usage(NULL, stat_options, "t", 1);
1732                         goto out;
1733                 }
1734         }
1735
1736         /*
1737          * no_aggr, cgroup are for system-wide only
1738          * --per-thread is aggregated per thread, we dont mix it with cpu mode
1739          */
1740         if (((stat_config.aggr_mode != AGGR_GLOBAL &&
1741               stat_config.aggr_mode != AGGR_THREAD) || nr_cgroups) &&
1742             !target__has_cpu(&target)) {
1743                 fprintf(stderr, "both cgroup and no-aggregation "
1744                         "modes only available in system-wide mode\n");
1745
1746                 parse_options_usage(stat_usage, stat_options, "G", 1);
1747                 parse_options_usage(NULL, stat_options, "A", 1);
1748                 parse_options_usage(NULL, stat_options, "a", 1);
1749                 goto out;
1750         }
1751
1752         if (add_default_attributes())
1753                 goto out;
1754
1755         target__validate(&target);
1756
1757         if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide))
1758                 target.per_thread = true;
1759
1760         if (perf_evlist__create_maps(evsel_list, &target) < 0) {
1761                 if (target__has_task(&target)) {
1762                         pr_err("Problems finding threads of monitor\n");
1763                         parse_options_usage(stat_usage, stat_options, "p", 1);
1764                         parse_options_usage(NULL, stat_options, "t", 1);
1765                 } else if (target__has_cpu(&target)) {
1766                         perror("failed to parse CPUs map");
1767                         parse_options_usage(stat_usage, stat_options, "C", 1);
1768                         parse_options_usage(NULL, stat_options, "a", 1);
1769                 }
1770                 goto out;
1771         }
1772
1773         /*
1774          * Initialize thread_map with comm names,
1775          * so we could print it out on output.
1776          */
1777         if (stat_config.aggr_mode == AGGR_THREAD) {
1778                 thread_map__read_comms(evsel_list->threads);
1779                 if (target.system_wide) {
1780                         if (runtime_stat_new(&stat_config,
1781                                 thread_map__nr(evsel_list->threads))) {
1782                                 goto out;
1783                         }
1784                 }
1785         }
1786
1787         if (stat_config.times && interval)
1788                 interval_count = true;
1789         else if (stat_config.times && !interval) {
1790                 pr_err("interval-count option should be used together with "
1791                                 "interval-print.\n");
1792                 parse_options_usage(stat_usage, stat_options, "interval-count", 0);
1793                 parse_options_usage(stat_usage, stat_options, "I", 1);
1794                 goto out;
1795         }
1796
1797         if (timeout && timeout < 100) {
1798                 if (timeout < 10) {
1799                         pr_err("timeout must be >= 10ms.\n");
1800                         parse_options_usage(stat_usage, stat_options, "timeout", 0);
1801                         goto out;
1802                 } else
1803                         pr_warning("timeout < 100ms. "
1804                                    "The overhead percentage could be high in some cases. "
1805                                    "Please proceed with caution.\n");
1806         }
1807         if (timeout && interval) {
1808                 pr_err("timeout option is not supported with interval-print.\n");
1809                 parse_options_usage(stat_usage, stat_options, "timeout", 0);
1810                 parse_options_usage(stat_usage, stat_options, "I", 1);
1811                 goto out;
1812         }
1813
1814         if (perf_evlist__alloc_stats(evsel_list, interval))
1815                 goto out;
1816
1817         if (perf_stat_init_aggr_mode())
1818                 goto out;
1819
1820         /*
1821          * Set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless
1822          * while avoiding that older tools show confusing messages.
1823          *
1824          * However for pipe sessions we need to keep it zero,
1825          * because script's perf_evsel__check_attr is triggered
1826          * by attr->sample_type != 0, and we can't run it on
1827          * stat sessions.
1828          */
1829         stat_config.identifier = !(STAT_RECORD && perf_stat.data.is_pipe);
1830
1831         /*
1832          * We dont want to block the signals - that would cause
1833          * child tasks to inherit that and Ctrl-C would not work.
1834          * What we want is for Ctrl-C to work in the exec()-ed
1835          * task, but being ignored by perf stat itself:
1836          */
1837         atexit(sig_atexit);
1838         if (!forever)
1839                 signal(SIGINT,  skip_signal);
1840         signal(SIGCHLD, skip_signal);
1841         signal(SIGALRM, skip_signal);
1842         signal(SIGABRT, skip_signal);
1843
1844         status = 0;
1845         for (run_idx = 0; forever || run_idx < stat_config.run_count; run_idx++) {
1846                 if (stat_config.run_count != 1 && verbose > 0)
1847                         fprintf(output, "[ perf stat: executing run #%d ... ]\n",
1848                                 run_idx + 1);
1849
1850                 status = run_perf_stat(argc, argv, run_idx);
1851                 if (forever && status != -1) {
1852                         print_counters(NULL, argc, argv);
1853                         perf_stat__reset_stats();
1854                 }
1855         }
1856
1857         if (!forever && status != -1 && !interval)
1858                 print_counters(NULL, argc, argv);
1859
1860         if (STAT_RECORD) {
1861                 /*
1862                  * We synthesize the kernel mmap record just so that older tools
1863                  * don't emit warnings about not being able to resolve symbols
1864                  * due to /proc/sys/kernel/kptr_restrict settings and instear provide
1865                  * a saner message about no samples being in the perf.data file.
1866                  *
1867                  * This also serves to suppress a warning about f_header.data.size == 0
1868                  * in header.c at the moment 'perf stat record' gets introduced, which
1869                  * is not really needed once we start adding the stat specific PERF_RECORD_
1870                  * records, but the need to suppress the kptr_restrict messages in older
1871                  * tools remain  -acme
1872                  */
1873                 int fd = perf_data__fd(&perf_stat.data);
1874                 int err = perf_event__synthesize_kernel_mmap((void *)&perf_stat,
1875                                                              process_synthesized_event,
1876                                                              &perf_stat.session->machines.host);
1877                 if (err) {
1878                         pr_warning("Couldn't synthesize the kernel mmap record, harmless, "
1879                                    "older tools may produce warnings about this file\n.");
1880                 }
1881
1882                 if (!interval) {
1883                         if (WRITE_STAT_ROUND_EVENT(walltime_nsecs_stats.max, FINAL))
1884                                 pr_err("failed to write stat round event\n");
1885                 }
1886
1887                 if (!perf_stat.data.is_pipe) {
1888                         perf_stat.session->header.data_size += perf_stat.bytes_written;
1889                         perf_session__write_header(perf_stat.session, evsel_list, fd, true);
1890                 }
1891
1892                 perf_session__delete(perf_stat.session);
1893         }
1894
1895         perf_stat__exit_aggr_mode();
1896         perf_evlist__free_stats(evsel_list);
1897 out:
1898         free(stat_config.walltime_run);
1899
1900         if (smi_cost && smi_reset)
1901                 sysfs__write_int(FREEZE_ON_SMI_PATH, 0);
1902
1903         perf_evlist__delete(evsel_list);
1904
1905         runtime_stat_delete(&stat_config);
1906
1907         return status;
1908 }