1 // SPDX-License-Identifier: GPL-2.0
15 #include "thread_map.h"
16 #include <linux/zalloc.h>
18 void update_stats(struct stats *stats, u64 val)
23 delta = val - stats->mean;
24 stats->mean += delta / stats->n;
25 stats->M2 += delta*(val - stats->mean);
34 double avg_stats(struct stats *stats)
40 * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
42 * (\Sum n_i^2) - ((\Sum n_i)^2)/n
43 * s^2 = -------------------------------
46 * http://en.wikipedia.org/wiki/Stddev
48 * The std dev of the mean is related to the std dev by:
55 double stddev_stats(struct stats *stats)
57 double variance, variance_mean;
62 variance = stats->M2 / (stats->n - 1);
63 variance_mean = variance / stats->n;
65 return sqrt(variance_mean);
68 double rel_stddev_stats(double stddev, double avg)
73 pct = 100.0 * stddev/avg;
78 bool __perf_evsel_stat__is(struct evsel *evsel,
79 enum perf_stat_evsel_id id)
81 struct perf_stat_evsel *ps = evsel->stats;
86 #define ID(id, name) [PERF_STAT_EVSEL_ID__##id] = #name
87 static const char *id_str[PERF_STAT_EVSEL_ID__MAX] = {
89 ID(CYCLES_IN_TX, cpu/cycles-t/),
90 ID(TRANSACTION_START, cpu/tx-start/),
91 ID(ELISION_START, cpu/el-start/),
92 ID(CYCLES_IN_TX_CP, cpu/cycles-ct/),
93 ID(TOPDOWN_TOTAL_SLOTS, topdown-total-slots),
94 ID(TOPDOWN_SLOTS_ISSUED, topdown-slots-issued),
95 ID(TOPDOWN_SLOTS_RETIRED, topdown-slots-retired),
96 ID(TOPDOWN_FETCH_BUBBLES, topdown-fetch-bubbles),
97 ID(TOPDOWN_RECOVERY_BUBBLES, topdown-recovery-bubbles),
98 ID(TOPDOWN_RETIRING, topdown-retiring),
99 ID(TOPDOWN_BAD_SPEC, topdown-bad-spec),
100 ID(TOPDOWN_FE_BOUND, topdown-fe-bound),
101 ID(TOPDOWN_BE_BOUND, topdown-be-bound),
102 ID(TOPDOWN_HEAVY_OPS, topdown-heavy-ops),
103 ID(TOPDOWN_BR_MISPREDICT, topdown-br-mispredict),
104 ID(TOPDOWN_FETCH_LAT, topdown-fetch-lat),
105 ID(TOPDOWN_MEM_BOUND, topdown-mem-bound),
106 ID(SMI_NUM, msr/smi/),
107 ID(APERF, msr/aperf/),
111 static void perf_stat_evsel_id_init(struct evsel *evsel)
113 struct perf_stat_evsel *ps = evsel->stats;
116 /* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */
118 for (i = 0; i < PERF_STAT_EVSEL_ID__MAX; i++) {
119 if (!strcmp(evsel__name(evsel), id_str[i])) {
126 static void evsel__reset_stat_priv(struct evsel *evsel)
129 struct perf_stat_evsel *ps = evsel->stats;
131 for (i = 0; i < 3; i++)
132 init_stats(&ps->res_stats[i]);
134 perf_stat_evsel_id_init(evsel);
137 static int evsel__alloc_stat_priv(struct evsel *evsel)
139 evsel->stats = zalloc(sizeof(struct perf_stat_evsel));
140 if (evsel->stats == NULL)
142 evsel__reset_stat_priv(evsel);
146 static void evsel__free_stat_priv(struct evsel *evsel)
148 struct perf_stat_evsel *ps = evsel->stats;
151 zfree(&ps->group_data);
152 zfree(&evsel->stats);
155 static int evsel__alloc_prev_raw_counts(struct evsel *evsel, int ncpus, int nthreads)
157 struct perf_counts *counts;
159 counts = perf_counts__new(ncpus, nthreads);
161 evsel->prev_raw_counts = counts;
163 return counts ? 0 : -ENOMEM;
166 static void evsel__free_prev_raw_counts(struct evsel *evsel)
168 perf_counts__delete(evsel->prev_raw_counts);
169 evsel->prev_raw_counts = NULL;
172 static void evsel__reset_prev_raw_counts(struct evsel *evsel)
174 if (evsel->prev_raw_counts)
175 perf_counts__reset(evsel->prev_raw_counts);
178 static int evsel__alloc_stats(struct evsel *evsel, bool alloc_raw)
180 int ncpus = evsel__nr_cpus(evsel);
181 int nthreads = perf_thread_map__nr(evsel->core.threads);
183 if (evsel__alloc_stat_priv(evsel) < 0 ||
184 evsel__alloc_counts(evsel, ncpus, nthreads) < 0 ||
185 (alloc_raw && evsel__alloc_prev_raw_counts(evsel, ncpus, nthreads) < 0))
191 int evlist__alloc_stats(struct evlist *evlist, bool alloc_raw)
195 evlist__for_each_entry(evlist, evsel) {
196 if (evsel__alloc_stats(evsel, alloc_raw))
203 evlist__free_stats(evlist);
207 void evlist__free_stats(struct evlist *evlist)
211 evlist__for_each_entry(evlist, evsel) {
212 evsel__free_stat_priv(evsel);
213 evsel__free_counts(evsel);
214 evsel__free_prev_raw_counts(evsel);
218 void evlist__reset_stats(struct evlist *evlist)
222 evlist__for_each_entry(evlist, evsel) {
223 evsel__reset_stat_priv(evsel);
224 evsel__reset_counts(evsel);
228 void evlist__reset_prev_raw_counts(struct evlist *evlist)
232 evlist__for_each_entry(evlist, evsel)
233 evsel__reset_prev_raw_counts(evsel);
236 static void evsel__copy_prev_raw_counts(struct evsel *evsel)
238 int ncpus = evsel__nr_cpus(evsel);
239 int nthreads = perf_thread_map__nr(evsel->core.threads);
241 for (int thread = 0; thread < nthreads; thread++) {
242 for (int cpu = 0; cpu < ncpus; cpu++) {
243 *perf_counts(evsel->counts, cpu, thread) =
244 *perf_counts(evsel->prev_raw_counts, cpu,
249 evsel->counts->aggr = evsel->prev_raw_counts->aggr;
252 void evlist__copy_prev_raw_counts(struct evlist *evlist)
256 evlist__for_each_entry(evlist, evsel)
257 evsel__copy_prev_raw_counts(evsel);
260 void evlist__save_aggr_prev_raw_counts(struct evlist *evlist)
265 * To collect the overall statistics for interval mode,
266 * we copy the counts from evsel->prev_raw_counts to
267 * evsel->counts. The perf_stat_process_counter creates
268 * aggr values from per cpu values, but the per cpu values
269 * are 0 for AGGR_GLOBAL. So we use a trick that saves the
270 * previous aggr value to the first member of perf_counts,
271 * then aggr calculation in process_counter_values can work
274 evlist__for_each_entry(evlist, evsel) {
275 *perf_counts(evsel->prev_raw_counts, 0, 0) =
276 evsel->prev_raw_counts->aggr;
280 static void zero_per_pkg(struct evsel *counter)
282 if (counter->per_pkg_mask)
283 memset(counter->per_pkg_mask, 0, cpu__max_cpu());
286 static int check_per_pkg(struct evsel *counter,
287 struct perf_counts_values *vals, int cpu, bool *skip)
289 unsigned long *mask = counter->per_pkg_mask;
290 struct perf_cpu_map *cpus = evsel__cpus(counter);
295 if (!counter->per_pkg)
298 if (perf_cpu_map__empty(cpus))
302 mask = zalloc(cpu__max_cpu());
306 counter->per_pkg_mask = mask;
310 * we do not consider an event that has not run as a good
311 * instance to mark a package as used (skip=1). Otherwise
312 * we may run into a situation where the first CPU in a package
313 * is not running anything, yet the second is, and this function
314 * would mark the package as used after the first CPU and would
315 * not read the values from the second CPU.
317 if (!(vals->run && vals->ena))
320 s = cpu_map__get_socket(cpus, cpu, NULL).socket;
324 *skip = test_and_set_bit(s, mask) == 1;
329 process_counter_values(struct perf_stat_config *config, struct evsel *evsel,
331 struct perf_counts_values *count)
333 struct perf_counts_values *aggr = &evsel->counts->aggr;
334 static struct perf_counts_values zero;
337 if (check_per_pkg(evsel, count, cpu, &skip)) {
338 pr_err("failed to read per-pkg counter\n");
345 switch (config->aggr_mode) {
352 if (!evsel->snapshot)
353 evsel__compute_deltas(evsel, cpu, thread, count);
354 perf_counts_values__scale(count, config->scale, NULL);
355 if ((config->aggr_mode == AGGR_NONE) && (!evsel->percore)) {
356 perf_stat__update_shadow_stats(evsel, count->val,
360 if (config->aggr_mode == AGGR_THREAD) {
362 perf_stat__update_shadow_stats(evsel,
363 count->val, 0, &config->stats[thread]);
365 perf_stat__update_shadow_stats(evsel,
366 count->val, 0, &rt_stat);
370 aggr->val += count->val;
371 aggr->ena += count->ena;
372 aggr->run += count->run;
381 static int process_counter_maps(struct perf_stat_config *config,
382 struct evsel *counter)
384 int nthreads = perf_thread_map__nr(counter->core.threads);
385 int ncpus = evsel__nr_cpus(counter);
388 if (counter->core.system_wide)
391 for (thread = 0; thread < nthreads; thread++) {
392 for (cpu = 0; cpu < ncpus; cpu++) {
393 if (process_counter_values(config, counter, cpu, thread,
394 perf_counts(counter->counts, cpu, thread)))
402 int perf_stat_process_counter(struct perf_stat_config *config,
403 struct evsel *counter)
405 struct perf_counts_values *aggr = &counter->counts->aggr;
406 struct perf_stat_evsel *ps = counter->stats;
407 u64 *count = counter->counts->aggr.values;
410 aggr->val = aggr->ena = aggr->run = 0;
413 * We calculate counter's data every interval,
414 * and the display code shows ps->res_stats
415 * avg value. We need to zero the stats for
416 * interval mode, otherwise overall avg running
417 * averages will be shown for each interval.
419 if (config->interval || config->summary) {
420 for (i = 0; i < 3; i++)
421 init_stats(&ps->res_stats[i]);
424 if (counter->per_pkg)
425 zero_per_pkg(counter);
427 ret = process_counter_maps(config, counter);
431 if (config->aggr_mode != AGGR_GLOBAL)
434 if (!counter->snapshot)
435 evsel__compute_deltas(counter, -1, -1, aggr);
436 perf_counts_values__scale(aggr, config->scale, &counter->counts->scaled);
438 for (i = 0; i < 3; i++)
439 update_stats(&ps->res_stats[i], count[i]);
442 fprintf(config->output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
443 evsel__name(counter), count[0], count[1], count[2]);
447 * Save the full runtime - to allow normalization during printout:
449 perf_stat__update_shadow_stats(counter, *count, 0, &rt_stat);
454 int perf_event__process_stat_event(struct perf_session *session,
455 union perf_event *event)
457 struct perf_counts_values count;
458 struct perf_record_stat *st = &event->stat;
459 struct evsel *counter;
465 counter = evlist__id2evsel(session->evlist, st->id);
467 pr_err("Failed to resolve counter for stat event.\n");
471 *perf_counts(counter->counts, st->cpu, st->thread) = count;
472 counter->supported = true;
476 size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp)
478 struct perf_record_stat *st = (struct perf_record_stat *)event;
481 ret = fprintf(fp, "\n... id %" PRI_lu64 ", cpu %d, thread %d\n",
482 st->id, st->cpu, st->thread);
483 ret += fprintf(fp, "... value %" PRI_lu64 ", enabled %" PRI_lu64 ", running %" PRI_lu64 "\n",
484 st->val, st->ena, st->run);
489 size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp)
491 struct perf_record_stat_round *rd = (struct perf_record_stat_round *)event;
494 ret = fprintf(fp, "\n... time %" PRI_lu64 ", type %s\n", rd->time,
495 rd->type == PERF_STAT_ROUND_TYPE__FINAL ? "FINAL" : "INTERVAL");
500 size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp)
502 struct perf_stat_config sc;
505 perf_event__read_stat_config(&sc, &event->stat_config);
507 ret = fprintf(fp, "\n");
508 ret += fprintf(fp, "... aggr_mode %d\n", sc.aggr_mode);
509 ret += fprintf(fp, "... scale %d\n", sc.scale);
510 ret += fprintf(fp, "... interval %u\n", sc.interval);
515 int create_perf_stat_counter(struct evsel *evsel,
516 struct perf_stat_config *config,
517 struct target *target,
520 struct perf_event_attr *attr = &evsel->core.attr;
521 struct evsel *leader = evsel->leader;
523 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
524 PERF_FORMAT_TOTAL_TIME_RUNNING;
527 * The event is part of non trivial group, let's enable
528 * the group read (for leader) and ID retrieval for all
531 if (leader->core.nr_members > 1)
532 attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP;
534 attr->inherit = !config->no_inherit && list_empty(&evsel->bpf_counter_list);
537 * Some events get initialized with sample_(period/type) set,
538 * like tracepoints. Clear it up for counting.
540 attr->sample_period = 0;
542 if (config->identifier)
543 attr->sample_type = PERF_SAMPLE_IDENTIFIER;
545 if (config->all_user) {
546 attr->exclude_kernel = 1;
547 attr->exclude_user = 0;
550 if (config->all_kernel) {
551 attr->exclude_kernel = 0;
552 attr->exclude_user = 1;
556 * Disabling all counters initially, they will be enabled
557 * either manually by us or by kernel via enable_on_exec
560 if (evsel__is_group_leader(evsel)) {
564 * In case of initial_delay we enable tracee
567 if (target__none(target) && !config->initial_delay)
568 attr->enable_on_exec = 1;
571 if (target__has_cpu(target) && !target__has_per_thread(target))
572 return evsel__open_per_cpu(evsel, evsel__cpus(evsel), cpu);
574 return evsel__open_per_thread(evsel, evsel->core.threads);