Merge tag 'sound-5.3-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[sfrench/cifs-2.6.git] / tools / perf / util / hist.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "callchain.h"
3 #include "build-id.h"
4 #include "hist.h"
5 #include "map.h"
6 #include "session.h"
7 #include "namespaces.h"
8 #include "sort.h"
9 #include "units.h"
10 #include "evlist.h"
11 #include "evsel.h"
12 #include "annotate.h"
13 #include "srcline.h"
14 #include "symbol.h"
15 #include "thread.h"
16 #include "ui/progress.h"
17 #include <errno.h>
18 #include <math.h>
19 #include <inttypes.h>
20 #include <sys/param.h>
21 #include <linux/time64.h>
22 #include <linux/zalloc.h>
23
24 static bool hists__filter_entry_by_dso(struct hists *hists,
25                                        struct hist_entry *he);
26 static bool hists__filter_entry_by_thread(struct hists *hists,
27                                           struct hist_entry *he);
28 static bool hists__filter_entry_by_symbol(struct hists *hists,
29                                           struct hist_entry *he);
30 static bool hists__filter_entry_by_socket(struct hists *hists,
31                                           struct hist_entry *he);
32
33 u16 hists__col_len(struct hists *hists, enum hist_column col)
34 {
35         return hists->col_len[col];
36 }
37
38 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
39 {
40         hists->col_len[col] = len;
41 }
42
43 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
44 {
45         if (len > hists__col_len(hists, col)) {
46                 hists__set_col_len(hists, col, len);
47                 return true;
48         }
49         return false;
50 }
51
52 void hists__reset_col_len(struct hists *hists)
53 {
54         enum hist_column col;
55
56         for (col = 0; col < HISTC_NR_COLS; ++col)
57                 hists__set_col_len(hists, col, 0);
58 }
59
60 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
61 {
62         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
63
64         if (hists__col_len(hists, dso) < unresolved_col_width &&
65             !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
66             !symbol_conf.dso_list)
67                 hists__set_col_len(hists, dso, unresolved_col_width);
68 }
69
70 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
71 {
72         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
73         int symlen;
74         u16 len;
75
76         /*
77          * +4 accounts for '[x] ' priv level info
78          * +2 accounts for 0x prefix on raw addresses
79          * +3 accounts for ' y ' symtab origin info
80          */
81         if (h->ms.sym) {
82                 symlen = h->ms.sym->namelen + 4;
83                 if (verbose > 0)
84                         symlen += BITS_PER_LONG / 4 + 2 + 3;
85                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
86         } else {
87                 symlen = unresolved_col_width + 4 + 2;
88                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
89                 hists__set_unres_dso_col_len(hists, HISTC_DSO);
90         }
91
92         len = thread__comm_len(h->thread);
93         if (hists__new_col_len(hists, HISTC_COMM, len))
94                 hists__set_col_len(hists, HISTC_THREAD, len + 8);
95
96         if (h->ms.map) {
97                 len = dso__name_len(h->ms.map->dso);
98                 hists__new_col_len(hists, HISTC_DSO, len);
99         }
100
101         if (h->parent)
102                 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
103
104         if (h->branch_info) {
105                 if (h->branch_info->from.sym) {
106                         symlen = (int)h->branch_info->from.sym->namelen + 4;
107                         if (verbose > 0)
108                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
109                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
110
111                         symlen = dso__name_len(h->branch_info->from.map->dso);
112                         hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
113                 } else {
114                         symlen = unresolved_col_width + 4 + 2;
115                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
116                         hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
117                 }
118
119                 if (h->branch_info->to.sym) {
120                         symlen = (int)h->branch_info->to.sym->namelen + 4;
121                         if (verbose > 0)
122                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
123                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
124
125                         symlen = dso__name_len(h->branch_info->to.map->dso);
126                         hists__new_col_len(hists, HISTC_DSO_TO, symlen);
127                 } else {
128                         symlen = unresolved_col_width + 4 + 2;
129                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
130                         hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
131                 }
132
133                 if (h->branch_info->srcline_from)
134                         hists__new_col_len(hists, HISTC_SRCLINE_FROM,
135                                         strlen(h->branch_info->srcline_from));
136                 if (h->branch_info->srcline_to)
137                         hists__new_col_len(hists, HISTC_SRCLINE_TO,
138                                         strlen(h->branch_info->srcline_to));
139         }
140
141         if (h->mem_info) {
142                 if (h->mem_info->daddr.sym) {
143                         symlen = (int)h->mem_info->daddr.sym->namelen + 4
144                                + unresolved_col_width + 2;
145                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
146                                            symlen);
147                         hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
148                                            symlen + 1);
149                 } else {
150                         symlen = unresolved_col_width + 4 + 2;
151                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
152                                            symlen);
153                         hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
154                                            symlen);
155                 }
156
157                 if (h->mem_info->iaddr.sym) {
158                         symlen = (int)h->mem_info->iaddr.sym->namelen + 4
159                                + unresolved_col_width + 2;
160                         hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
161                                            symlen);
162                 } else {
163                         symlen = unresolved_col_width + 4 + 2;
164                         hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
165                                            symlen);
166                 }
167
168                 if (h->mem_info->daddr.map) {
169                         symlen = dso__name_len(h->mem_info->daddr.map->dso);
170                         hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
171                                            symlen);
172                 } else {
173                         symlen = unresolved_col_width + 4 + 2;
174                         hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
175                 }
176
177                 hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR,
178                                    unresolved_col_width + 4 + 2);
179
180         } else {
181                 symlen = unresolved_col_width + 4 + 2;
182                 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
183                 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
184                 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
185         }
186
187         hists__new_col_len(hists, HISTC_CGROUP_ID, 20);
188         hists__new_col_len(hists, HISTC_CPU, 3);
189         hists__new_col_len(hists, HISTC_SOCKET, 6);
190         hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
191         hists__new_col_len(hists, HISTC_MEM_TLB, 22);
192         hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
193         hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
194         hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
195         hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
196         hists__new_col_len(hists, HISTC_TIME, 12);
197
198         if (h->srcline) {
199                 len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
200                 hists__new_col_len(hists, HISTC_SRCLINE, len);
201         }
202
203         if (h->srcfile)
204                 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
205
206         if (h->transaction)
207                 hists__new_col_len(hists, HISTC_TRANSACTION,
208                                    hist_entry__transaction_len());
209
210         if (h->trace_output)
211                 hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
212 }
213
214 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
215 {
216         struct rb_node *next = rb_first_cached(&hists->entries);
217         struct hist_entry *n;
218         int row = 0;
219
220         hists__reset_col_len(hists);
221
222         while (next && row++ < max_rows) {
223                 n = rb_entry(next, struct hist_entry, rb_node);
224                 if (!n->filtered)
225                         hists__calc_col_len(hists, n);
226                 next = rb_next(&n->rb_node);
227         }
228 }
229
230 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
231                                         unsigned int cpumode, u64 period)
232 {
233         switch (cpumode) {
234         case PERF_RECORD_MISC_KERNEL:
235                 he_stat->period_sys += period;
236                 break;
237         case PERF_RECORD_MISC_USER:
238                 he_stat->period_us += period;
239                 break;
240         case PERF_RECORD_MISC_GUEST_KERNEL:
241                 he_stat->period_guest_sys += period;
242                 break;
243         case PERF_RECORD_MISC_GUEST_USER:
244                 he_stat->period_guest_us += period;
245                 break;
246         default:
247                 break;
248         }
249 }
250
251 static long hist_time(unsigned long htime)
252 {
253         unsigned long time_quantum = symbol_conf.time_quantum;
254         if (time_quantum)
255                 return (htime / time_quantum) * time_quantum;
256         return htime;
257 }
258
259 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
260                                 u64 weight)
261 {
262
263         he_stat->period         += period;
264         he_stat->weight         += weight;
265         he_stat->nr_events      += 1;
266 }
267
268 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
269 {
270         dest->period            += src->period;
271         dest->period_sys        += src->period_sys;
272         dest->period_us         += src->period_us;
273         dest->period_guest_sys  += src->period_guest_sys;
274         dest->period_guest_us   += src->period_guest_us;
275         dest->nr_events         += src->nr_events;
276         dest->weight            += src->weight;
277 }
278
279 static void he_stat__decay(struct he_stat *he_stat)
280 {
281         he_stat->period = (he_stat->period * 7) / 8;
282         he_stat->nr_events = (he_stat->nr_events * 7) / 8;
283         /* XXX need decay for weight too? */
284 }
285
286 static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
287
288 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
289 {
290         u64 prev_period = he->stat.period;
291         u64 diff;
292
293         if (prev_period == 0)
294                 return true;
295
296         he_stat__decay(&he->stat);
297         if (symbol_conf.cumulate_callchain)
298                 he_stat__decay(he->stat_acc);
299         decay_callchain(he->callchain);
300
301         diff = prev_period - he->stat.period;
302
303         if (!he->depth) {
304                 hists->stats.total_period -= diff;
305                 if (!he->filtered)
306                         hists->stats.total_non_filtered_period -= diff;
307         }
308
309         if (!he->leaf) {
310                 struct hist_entry *child;
311                 struct rb_node *node = rb_first_cached(&he->hroot_out);
312                 while (node) {
313                         child = rb_entry(node, struct hist_entry, rb_node);
314                         node = rb_next(node);
315
316                         if (hists__decay_entry(hists, child))
317                                 hists__delete_entry(hists, child);
318                 }
319         }
320
321         return he->stat.period == 0;
322 }
323
324 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
325 {
326         struct rb_root_cached *root_in;
327         struct rb_root_cached *root_out;
328
329         if (he->parent_he) {
330                 root_in  = &he->parent_he->hroot_in;
331                 root_out = &he->parent_he->hroot_out;
332         } else {
333                 if (hists__has(hists, need_collapse))
334                         root_in = &hists->entries_collapsed;
335                 else
336                         root_in = hists->entries_in;
337                 root_out = &hists->entries;
338         }
339
340         rb_erase_cached(&he->rb_node_in, root_in);
341         rb_erase_cached(&he->rb_node, root_out);
342
343         --hists->nr_entries;
344         if (!he->filtered)
345                 --hists->nr_non_filtered_entries;
346
347         hist_entry__delete(he);
348 }
349
350 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
351 {
352         struct rb_node *next = rb_first_cached(&hists->entries);
353         struct hist_entry *n;
354
355         while (next) {
356                 n = rb_entry(next, struct hist_entry, rb_node);
357                 next = rb_next(&n->rb_node);
358                 if (((zap_user && n->level == '.') ||
359                      (zap_kernel && n->level != '.') ||
360                      hists__decay_entry(hists, n))) {
361                         hists__delete_entry(hists, n);
362                 }
363         }
364 }
365
366 void hists__delete_entries(struct hists *hists)
367 {
368         struct rb_node *next = rb_first_cached(&hists->entries);
369         struct hist_entry *n;
370
371         while (next) {
372                 n = rb_entry(next, struct hist_entry, rb_node);
373                 next = rb_next(&n->rb_node);
374
375                 hists__delete_entry(hists, n);
376         }
377 }
378
379 struct hist_entry *hists__get_entry(struct hists *hists, int idx)
380 {
381         struct rb_node *next = rb_first_cached(&hists->entries);
382         struct hist_entry *n;
383         int i = 0;
384
385         while (next) {
386                 n = rb_entry(next, struct hist_entry, rb_node);
387                 if (i == idx)
388                         return n;
389
390                 next = rb_next(&n->rb_node);
391                 i++;
392         }
393
394         return NULL;
395 }
396
397 /*
398  * histogram, sorted on item, collects periods
399  */
400
401 static int hist_entry__init(struct hist_entry *he,
402                             struct hist_entry *template,
403                             bool sample_self,
404                             size_t callchain_size)
405 {
406         *he = *template;
407         he->callchain_size = callchain_size;
408
409         if (symbol_conf.cumulate_callchain) {
410                 he->stat_acc = malloc(sizeof(he->stat));
411                 if (he->stat_acc == NULL)
412                         return -ENOMEM;
413                 memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
414                 if (!sample_self)
415                         memset(&he->stat, 0, sizeof(he->stat));
416         }
417
418         map__get(he->ms.map);
419
420         if (he->branch_info) {
421                 /*
422                  * This branch info is (a part of) allocated from
423                  * sample__resolve_bstack() and will be freed after
424                  * adding new entries.  So we need to save a copy.
425                  */
426                 he->branch_info = malloc(sizeof(*he->branch_info));
427                 if (he->branch_info == NULL)
428                         goto err;
429
430                 memcpy(he->branch_info, template->branch_info,
431                        sizeof(*he->branch_info));
432
433                 map__get(he->branch_info->from.map);
434                 map__get(he->branch_info->to.map);
435         }
436
437         if (he->mem_info) {
438                 map__get(he->mem_info->iaddr.map);
439                 map__get(he->mem_info->daddr.map);
440         }
441
442         if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
443                 callchain_init(he->callchain);
444
445         if (he->raw_data) {
446                 he->raw_data = memdup(he->raw_data, he->raw_size);
447                 if (he->raw_data == NULL)
448                         goto err_infos;
449         }
450
451         if (he->srcline) {
452                 he->srcline = strdup(he->srcline);
453                 if (he->srcline == NULL)
454                         goto err_rawdata;
455         }
456
457         if (symbol_conf.res_sample) {
458                 he->res_samples = calloc(sizeof(struct res_sample),
459                                         symbol_conf.res_sample);
460                 if (!he->res_samples)
461                         goto err_srcline;
462         }
463
464         INIT_LIST_HEAD(&he->pairs.node);
465         thread__get(he->thread);
466         he->hroot_in  = RB_ROOT_CACHED;
467         he->hroot_out = RB_ROOT_CACHED;
468
469         if (!symbol_conf.report_hierarchy)
470                 he->leaf = true;
471
472         return 0;
473
474 err_srcline:
475         zfree(&he->srcline);
476
477 err_rawdata:
478         zfree(&he->raw_data);
479
480 err_infos:
481         if (he->branch_info) {
482                 map__put(he->branch_info->from.map);
483                 map__put(he->branch_info->to.map);
484                 zfree(&he->branch_info);
485         }
486         if (he->mem_info) {
487                 map__put(he->mem_info->iaddr.map);
488                 map__put(he->mem_info->daddr.map);
489         }
490 err:
491         map__zput(he->ms.map);
492         zfree(&he->stat_acc);
493         return -ENOMEM;
494 }
495
496 static void *hist_entry__zalloc(size_t size)
497 {
498         return zalloc(size + sizeof(struct hist_entry));
499 }
500
501 static void hist_entry__free(void *ptr)
502 {
503         free(ptr);
504 }
505
506 static struct hist_entry_ops default_ops = {
507         .new    = hist_entry__zalloc,
508         .free   = hist_entry__free,
509 };
510
511 static struct hist_entry *hist_entry__new(struct hist_entry *template,
512                                           bool sample_self)
513 {
514         struct hist_entry_ops *ops = template->ops;
515         size_t callchain_size = 0;
516         struct hist_entry *he;
517         int err = 0;
518
519         if (!ops)
520                 ops = template->ops = &default_ops;
521
522         if (symbol_conf.use_callchain)
523                 callchain_size = sizeof(struct callchain_root);
524
525         he = ops->new(callchain_size);
526         if (he) {
527                 err = hist_entry__init(he, template, sample_self, callchain_size);
528                 if (err) {
529                         ops->free(he);
530                         he = NULL;
531                 }
532         }
533
534         return he;
535 }
536
537 static u8 symbol__parent_filter(const struct symbol *parent)
538 {
539         if (symbol_conf.exclude_other && parent == NULL)
540                 return 1 << HIST_FILTER__PARENT;
541         return 0;
542 }
543
544 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
545 {
546         if (!hist_entry__has_callchains(he) || !symbol_conf.use_callchain)
547                 return;
548
549         he->hists->callchain_period += period;
550         if (!he->filtered)
551                 he->hists->callchain_non_filtered_period += period;
552 }
553
554 static struct hist_entry *hists__findnew_entry(struct hists *hists,
555                                                struct hist_entry *entry,
556                                                struct addr_location *al,
557                                                bool sample_self)
558 {
559         struct rb_node **p;
560         struct rb_node *parent = NULL;
561         struct hist_entry *he;
562         int64_t cmp;
563         u64 period = entry->stat.period;
564         u64 weight = entry->stat.weight;
565         bool leftmost = true;
566
567         p = &hists->entries_in->rb_root.rb_node;
568
569         while (*p != NULL) {
570                 parent = *p;
571                 he = rb_entry(parent, struct hist_entry, rb_node_in);
572
573                 /*
574                  * Make sure that it receives arguments in a same order as
575                  * hist_entry__collapse() so that we can use an appropriate
576                  * function when searching an entry regardless which sort
577                  * keys were used.
578                  */
579                 cmp = hist_entry__cmp(he, entry);
580
581                 if (!cmp) {
582                         if (sample_self) {
583                                 he_stat__add_period(&he->stat, period, weight);
584                                 hist_entry__add_callchain_period(he, period);
585                         }
586                         if (symbol_conf.cumulate_callchain)
587                                 he_stat__add_period(he->stat_acc, period, weight);
588
589                         /*
590                          * This mem info was allocated from sample__resolve_mem
591                          * and will not be used anymore.
592                          */
593                         mem_info__zput(entry->mem_info);
594
595                         block_info__zput(entry->block_info);
596
597                         /* If the map of an existing hist_entry has
598                          * become out-of-date due to an exec() or
599                          * similar, update it.  Otherwise we will
600                          * mis-adjust symbol addresses when computing
601                          * the history counter to increment.
602                          */
603                         if (he->ms.map != entry->ms.map) {
604                                 map__put(he->ms.map);
605                                 he->ms.map = map__get(entry->ms.map);
606                         }
607                         goto out;
608                 }
609
610                 if (cmp < 0)
611                         p = &(*p)->rb_left;
612                 else {
613                         p = &(*p)->rb_right;
614                         leftmost = false;
615                 }
616         }
617
618         he = hist_entry__new(entry, sample_self);
619         if (!he)
620                 return NULL;
621
622         if (sample_self)
623                 hist_entry__add_callchain_period(he, period);
624         hists->nr_entries++;
625
626         rb_link_node(&he->rb_node_in, parent, p);
627         rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost);
628 out:
629         if (sample_self)
630                 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
631         if (symbol_conf.cumulate_callchain)
632                 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
633         return he;
634 }
635
636 static unsigned random_max(unsigned high)
637 {
638         unsigned thresh = -high % high;
639         for (;;) {
640                 unsigned r = random();
641                 if (r >= thresh)
642                         return r % high;
643         }
644 }
645
646 static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample)
647 {
648         struct res_sample *r;
649         int j;
650
651         if (he->num_res < symbol_conf.res_sample) {
652                 j = he->num_res++;
653         } else {
654                 j = random_max(symbol_conf.res_sample);
655         }
656         r = &he->res_samples[j];
657         r->time = sample->time;
658         r->cpu = sample->cpu;
659         r->tid = sample->tid;
660 }
661
662 static struct hist_entry*
663 __hists__add_entry(struct hists *hists,
664                    struct addr_location *al,
665                    struct symbol *sym_parent,
666                    struct branch_info *bi,
667                    struct mem_info *mi,
668                    struct block_info *block_info,
669                    struct perf_sample *sample,
670                    bool sample_self,
671                    struct hist_entry_ops *ops)
672 {
673         struct namespaces *ns = thread__namespaces(al->thread);
674         struct hist_entry entry = {
675                 .thread = al->thread,
676                 .comm = thread__comm(al->thread),
677                 .cgroup_id = {
678                         .dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0,
679                         .ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0,
680                 },
681                 .ms = {
682                         .map    = al->map,
683                         .sym    = al->sym,
684                 },
685                 .srcline = (char *) al->srcline,
686                 .socket  = al->socket,
687                 .cpu     = al->cpu,
688                 .cpumode = al->cpumode,
689                 .ip      = al->addr,
690                 .level   = al->level,
691                 .stat = {
692                         .nr_events = 1,
693                         .period = sample->period,
694                         .weight = sample->weight,
695                 },
696                 .parent = sym_parent,
697                 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
698                 .hists  = hists,
699                 .branch_info = bi,
700                 .mem_info = mi,
701                 .block_info = block_info,
702                 .transaction = sample->transaction,
703                 .raw_data = sample->raw_data,
704                 .raw_size = sample->raw_size,
705                 .ops = ops,
706                 .time = hist_time(sample->time),
707         }, *he = hists__findnew_entry(hists, &entry, al, sample_self);
708
709         if (!hists->has_callchains && he && he->callchain_size != 0)
710                 hists->has_callchains = true;
711         if (he && symbol_conf.res_sample)
712                 hists__res_sample(he, sample);
713         return he;
714 }
715
716 struct hist_entry *hists__add_entry(struct hists *hists,
717                                     struct addr_location *al,
718                                     struct symbol *sym_parent,
719                                     struct branch_info *bi,
720                                     struct mem_info *mi,
721                                     struct perf_sample *sample,
722                                     bool sample_self)
723 {
724         return __hists__add_entry(hists, al, sym_parent, bi, mi, NULL,
725                                   sample, sample_self, NULL);
726 }
727
728 struct hist_entry *hists__add_entry_ops(struct hists *hists,
729                                         struct hist_entry_ops *ops,
730                                         struct addr_location *al,
731                                         struct symbol *sym_parent,
732                                         struct branch_info *bi,
733                                         struct mem_info *mi,
734                                         struct perf_sample *sample,
735                                         bool sample_self)
736 {
737         return __hists__add_entry(hists, al, sym_parent, bi, mi, NULL,
738                                   sample, sample_self, ops);
739 }
740
741 struct hist_entry *hists__add_entry_block(struct hists *hists,
742                                           struct addr_location *al,
743                                           struct block_info *block_info)
744 {
745         struct hist_entry entry = {
746                 .block_info = block_info,
747                 .hists = hists,
748         }, *he = hists__findnew_entry(hists, &entry, al, false);
749
750         return he;
751 }
752
753 static int
754 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
755                     struct addr_location *al __maybe_unused)
756 {
757         return 0;
758 }
759
760 static int
761 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
762                         struct addr_location *al __maybe_unused)
763 {
764         return 0;
765 }
766
767 static int
768 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
769 {
770         struct perf_sample *sample = iter->sample;
771         struct mem_info *mi;
772
773         mi = sample__resolve_mem(sample, al);
774         if (mi == NULL)
775                 return -ENOMEM;
776
777         iter->priv = mi;
778         return 0;
779 }
780
781 static int
782 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
783 {
784         u64 cost;
785         struct mem_info *mi = iter->priv;
786         struct hists *hists = evsel__hists(iter->evsel);
787         struct perf_sample *sample = iter->sample;
788         struct hist_entry *he;
789
790         if (mi == NULL)
791                 return -EINVAL;
792
793         cost = sample->weight;
794         if (!cost)
795                 cost = 1;
796
797         /*
798          * must pass period=weight in order to get the correct
799          * sorting from hists__collapse_resort() which is solely
800          * based on periods. We want sorting be done on nr_events * weight
801          * and this is indirectly achieved by passing period=weight here
802          * and the he_stat__add_period() function.
803          */
804         sample->period = cost;
805
806         he = hists__add_entry(hists, al, iter->parent, NULL, mi,
807                               sample, true);
808         if (!he)
809                 return -ENOMEM;
810
811         iter->he = he;
812         return 0;
813 }
814
815 static int
816 iter_finish_mem_entry(struct hist_entry_iter *iter,
817                       struct addr_location *al __maybe_unused)
818 {
819         struct perf_evsel *evsel = iter->evsel;
820         struct hists *hists = evsel__hists(evsel);
821         struct hist_entry *he = iter->he;
822         int err = -EINVAL;
823
824         if (he == NULL)
825                 goto out;
826
827         hists__inc_nr_samples(hists, he->filtered);
828
829         err = hist_entry__append_callchain(he, iter->sample);
830
831 out:
832         /*
833          * We don't need to free iter->priv (mem_info) here since the mem info
834          * was either already freed in hists__findnew_entry() or passed to a
835          * new hist entry by hist_entry__new().
836          */
837         iter->priv = NULL;
838
839         iter->he = NULL;
840         return err;
841 }
842
843 static int
844 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
845 {
846         struct branch_info *bi;
847         struct perf_sample *sample = iter->sample;
848
849         bi = sample__resolve_bstack(sample, al);
850         if (!bi)
851                 return -ENOMEM;
852
853         iter->curr = 0;
854         iter->total = sample->branch_stack->nr;
855
856         iter->priv = bi;
857         return 0;
858 }
859
860 static int
861 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
862                              struct addr_location *al __maybe_unused)
863 {
864         return 0;
865 }
866
867 static int
868 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
869 {
870         struct branch_info *bi = iter->priv;
871         int i = iter->curr;
872
873         if (bi == NULL)
874                 return 0;
875
876         if (iter->curr >= iter->total)
877                 return 0;
878
879         al->map = bi[i].to.map;
880         al->sym = bi[i].to.sym;
881         al->addr = bi[i].to.addr;
882         return 1;
883 }
884
885 static int
886 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
887 {
888         struct branch_info *bi;
889         struct perf_evsel *evsel = iter->evsel;
890         struct hists *hists = evsel__hists(evsel);
891         struct perf_sample *sample = iter->sample;
892         struct hist_entry *he = NULL;
893         int i = iter->curr;
894         int err = 0;
895
896         bi = iter->priv;
897
898         if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
899                 goto out;
900
901         /*
902          * The report shows the percentage of total branches captured
903          * and not events sampled. Thus we use a pseudo period of 1.
904          */
905         sample->period = 1;
906         sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
907
908         he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
909                               sample, true);
910         if (he == NULL)
911                 return -ENOMEM;
912
913         hists__inc_nr_samples(hists, he->filtered);
914
915 out:
916         iter->he = he;
917         iter->curr++;
918         return err;
919 }
920
921 static int
922 iter_finish_branch_entry(struct hist_entry_iter *iter,
923                          struct addr_location *al __maybe_unused)
924 {
925         zfree(&iter->priv);
926         iter->he = NULL;
927
928         return iter->curr >= iter->total ? 0 : -1;
929 }
930
931 static int
932 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
933                           struct addr_location *al __maybe_unused)
934 {
935         return 0;
936 }
937
938 static int
939 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
940 {
941         struct perf_evsel *evsel = iter->evsel;
942         struct perf_sample *sample = iter->sample;
943         struct hist_entry *he;
944
945         he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
946                               sample, true);
947         if (he == NULL)
948                 return -ENOMEM;
949
950         iter->he = he;
951         return 0;
952 }
953
954 static int
955 iter_finish_normal_entry(struct hist_entry_iter *iter,
956                          struct addr_location *al __maybe_unused)
957 {
958         struct hist_entry *he = iter->he;
959         struct perf_evsel *evsel = iter->evsel;
960         struct perf_sample *sample = iter->sample;
961
962         if (he == NULL)
963                 return 0;
964
965         iter->he = NULL;
966
967         hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
968
969         return hist_entry__append_callchain(he, sample);
970 }
971
972 static int
973 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
974                               struct addr_location *al __maybe_unused)
975 {
976         struct hist_entry **he_cache;
977
978         callchain_cursor_commit(&callchain_cursor);
979
980         /*
981          * This is for detecting cycles or recursions so that they're
982          * cumulated only one time to prevent entries more than 100%
983          * overhead.
984          */
985         he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1));
986         if (he_cache == NULL)
987                 return -ENOMEM;
988
989         iter->priv = he_cache;
990         iter->curr = 0;
991
992         return 0;
993 }
994
995 static int
996 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
997                                  struct addr_location *al)
998 {
999         struct perf_evsel *evsel = iter->evsel;
1000         struct hists *hists = evsel__hists(evsel);
1001         struct perf_sample *sample = iter->sample;
1002         struct hist_entry **he_cache = iter->priv;
1003         struct hist_entry *he;
1004         int err = 0;
1005
1006         he = hists__add_entry(hists, al, iter->parent, NULL, NULL,
1007                               sample, true);
1008         if (he == NULL)
1009                 return -ENOMEM;
1010
1011         iter->he = he;
1012         he_cache[iter->curr++] = he;
1013
1014         hist_entry__append_callchain(he, sample);
1015
1016         /*
1017          * We need to re-initialize the cursor since callchain_append()
1018          * advanced the cursor to the end.
1019          */
1020         callchain_cursor_commit(&callchain_cursor);
1021
1022         hists__inc_nr_samples(hists, he->filtered);
1023
1024         return err;
1025 }
1026
1027 static int
1028 iter_next_cumulative_entry(struct hist_entry_iter *iter,
1029                            struct addr_location *al)
1030 {
1031         struct callchain_cursor_node *node;
1032
1033         node = callchain_cursor_current(&callchain_cursor);
1034         if (node == NULL)
1035                 return 0;
1036
1037         return fill_callchain_info(al, node, iter->hide_unresolved);
1038 }
1039
1040 static int
1041 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
1042                                struct addr_location *al)
1043 {
1044         struct perf_evsel *evsel = iter->evsel;
1045         struct perf_sample *sample = iter->sample;
1046         struct hist_entry **he_cache = iter->priv;
1047         struct hist_entry *he;
1048         struct hist_entry he_tmp = {
1049                 .hists = evsel__hists(evsel),
1050                 .cpu = al->cpu,
1051                 .thread = al->thread,
1052                 .comm = thread__comm(al->thread),
1053                 .ip = al->addr,
1054                 .ms = {
1055                         .map = al->map,
1056                         .sym = al->sym,
1057                 },
1058                 .srcline = (char *) al->srcline,
1059                 .parent = iter->parent,
1060                 .raw_data = sample->raw_data,
1061                 .raw_size = sample->raw_size,
1062         };
1063         int i;
1064         struct callchain_cursor cursor;
1065
1066         callchain_cursor_snapshot(&cursor, &callchain_cursor);
1067
1068         callchain_cursor_advance(&callchain_cursor);
1069
1070         /*
1071          * Check if there's duplicate entries in the callchain.
1072          * It's possible that it has cycles or recursive calls.
1073          */
1074         for (i = 0; i < iter->curr; i++) {
1075                 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
1076                         /* to avoid calling callback function */
1077                         iter->he = NULL;
1078                         return 0;
1079                 }
1080         }
1081
1082         he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
1083                               sample, false);
1084         if (he == NULL)
1085                 return -ENOMEM;
1086
1087         iter->he = he;
1088         he_cache[iter->curr++] = he;
1089
1090         if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
1091                 callchain_append(he->callchain, &cursor, sample->period);
1092         return 0;
1093 }
1094
1095 static int
1096 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
1097                              struct addr_location *al __maybe_unused)
1098 {
1099         zfree(&iter->priv);
1100         iter->he = NULL;
1101
1102         return 0;
1103 }
1104
1105 const struct hist_iter_ops hist_iter_mem = {
1106         .prepare_entry          = iter_prepare_mem_entry,
1107         .add_single_entry       = iter_add_single_mem_entry,
1108         .next_entry             = iter_next_nop_entry,
1109         .add_next_entry         = iter_add_next_nop_entry,
1110         .finish_entry           = iter_finish_mem_entry,
1111 };
1112
1113 const struct hist_iter_ops hist_iter_branch = {
1114         .prepare_entry          = iter_prepare_branch_entry,
1115         .add_single_entry       = iter_add_single_branch_entry,
1116         .next_entry             = iter_next_branch_entry,
1117         .add_next_entry         = iter_add_next_branch_entry,
1118         .finish_entry           = iter_finish_branch_entry,
1119 };
1120
1121 const struct hist_iter_ops hist_iter_normal = {
1122         .prepare_entry          = iter_prepare_normal_entry,
1123         .add_single_entry       = iter_add_single_normal_entry,
1124         .next_entry             = iter_next_nop_entry,
1125         .add_next_entry         = iter_add_next_nop_entry,
1126         .finish_entry           = iter_finish_normal_entry,
1127 };
1128
1129 const struct hist_iter_ops hist_iter_cumulative = {
1130         .prepare_entry          = iter_prepare_cumulative_entry,
1131         .add_single_entry       = iter_add_single_cumulative_entry,
1132         .next_entry             = iter_next_cumulative_entry,
1133         .add_next_entry         = iter_add_next_cumulative_entry,
1134         .finish_entry           = iter_finish_cumulative_entry,
1135 };
1136
1137 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
1138                          int max_stack_depth, void *arg)
1139 {
1140         int err, err2;
1141         struct map *alm = NULL;
1142
1143         if (al)
1144                 alm = map__get(al->map);
1145
1146         err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
1147                                         iter->evsel, al, max_stack_depth);
1148         if (err) {
1149                 map__put(alm);
1150                 return err;
1151         }
1152
1153         err = iter->ops->prepare_entry(iter, al);
1154         if (err)
1155                 goto out;
1156
1157         err = iter->ops->add_single_entry(iter, al);
1158         if (err)
1159                 goto out;
1160
1161         if (iter->he && iter->add_entry_cb) {
1162                 err = iter->add_entry_cb(iter, al, true, arg);
1163                 if (err)
1164                         goto out;
1165         }
1166
1167         while (iter->ops->next_entry(iter, al)) {
1168                 err = iter->ops->add_next_entry(iter, al);
1169                 if (err)
1170                         break;
1171
1172                 if (iter->he && iter->add_entry_cb) {
1173                         err = iter->add_entry_cb(iter, al, false, arg);
1174                         if (err)
1175                                 goto out;
1176                 }
1177         }
1178
1179 out:
1180         err2 = iter->ops->finish_entry(iter, al);
1181         if (!err)
1182                 err = err2;
1183
1184         map__put(alm);
1185
1186         return err;
1187 }
1188
1189 int64_t
1190 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
1191 {
1192         struct hists *hists = left->hists;
1193         struct perf_hpp_fmt *fmt;
1194         int64_t cmp = 0;
1195
1196         hists__for_each_sort_list(hists, fmt) {
1197                 if (perf_hpp__is_dynamic_entry(fmt) &&
1198                     !perf_hpp__defined_dynamic_entry(fmt, hists))
1199                         continue;
1200
1201                 cmp = fmt->cmp(fmt, left, right);
1202                 if (cmp)
1203                         break;
1204         }
1205
1206         return cmp;
1207 }
1208
1209 int64_t
1210 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
1211 {
1212         struct hists *hists = left->hists;
1213         struct perf_hpp_fmt *fmt;
1214         int64_t cmp = 0;
1215
1216         hists__for_each_sort_list(hists, fmt) {
1217                 if (perf_hpp__is_dynamic_entry(fmt) &&
1218                     !perf_hpp__defined_dynamic_entry(fmt, hists))
1219                         continue;
1220
1221                 cmp = fmt->collapse(fmt, left, right);
1222                 if (cmp)
1223                         break;
1224         }
1225
1226         return cmp;
1227 }
1228
1229 void hist_entry__delete(struct hist_entry *he)
1230 {
1231         struct hist_entry_ops *ops = he->ops;
1232
1233         thread__zput(he->thread);
1234         map__zput(he->ms.map);
1235
1236         if (he->branch_info) {
1237                 map__zput(he->branch_info->from.map);
1238                 map__zput(he->branch_info->to.map);
1239                 free_srcline(he->branch_info->srcline_from);
1240                 free_srcline(he->branch_info->srcline_to);
1241                 zfree(&he->branch_info);
1242         }
1243
1244         if (he->mem_info) {
1245                 map__zput(he->mem_info->iaddr.map);
1246                 map__zput(he->mem_info->daddr.map);
1247                 mem_info__zput(he->mem_info);
1248         }
1249
1250         if (he->block_info)
1251                 block_info__zput(he->block_info);
1252
1253         zfree(&he->res_samples);
1254         zfree(&he->stat_acc);
1255         free_srcline(he->srcline);
1256         if (he->srcfile && he->srcfile[0])
1257                 zfree(&he->srcfile);
1258         free_callchain(he->callchain);
1259         zfree(&he->trace_output);
1260         zfree(&he->raw_data);
1261         ops->free(he);
1262 }
1263
1264 /*
1265  * If this is not the last column, then we need to pad it according to the
1266  * pre-calculated max length for this column, otherwise don't bother adding
1267  * spaces because that would break viewing this with, for instance, 'less',
1268  * that would show tons of trailing spaces when a long C++ demangled method
1269  * names is sampled.
1270 */
1271 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
1272                                    struct perf_hpp_fmt *fmt, int printed)
1273 {
1274         if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
1275                 const int width = fmt->width(fmt, hpp, he->hists);
1276                 if (printed < width) {
1277                         advance_hpp(hpp, printed);
1278                         printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
1279                 }
1280         }
1281
1282         return printed;
1283 }
1284
1285 /*
1286  * collapse the histogram
1287  */
1288
1289 static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
1290 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he,
1291                                        enum hist_filter type);
1292
1293 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt);
1294
1295 static bool check_thread_entry(struct perf_hpp_fmt *fmt)
1296 {
1297         return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt);
1298 }
1299
1300 static void hist_entry__check_and_remove_filter(struct hist_entry *he,
1301                                                 enum hist_filter type,
1302                                                 fmt_chk_fn check)
1303 {
1304         struct perf_hpp_fmt *fmt;
1305         bool type_match = false;
1306         struct hist_entry *parent = he->parent_he;
1307
1308         switch (type) {
1309         case HIST_FILTER__THREAD:
1310                 if (symbol_conf.comm_list == NULL &&
1311                     symbol_conf.pid_list == NULL &&
1312                     symbol_conf.tid_list == NULL)
1313                         return;
1314                 break;
1315         case HIST_FILTER__DSO:
1316                 if (symbol_conf.dso_list == NULL)
1317                         return;
1318                 break;
1319         case HIST_FILTER__SYMBOL:
1320                 if (symbol_conf.sym_list == NULL)
1321                         return;
1322                 break;
1323         case HIST_FILTER__PARENT:
1324         case HIST_FILTER__GUEST:
1325         case HIST_FILTER__HOST:
1326         case HIST_FILTER__SOCKET:
1327         case HIST_FILTER__C2C:
1328         default:
1329                 return;
1330         }
1331
1332         /* if it's filtered by own fmt, it has to have filter bits */
1333         perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1334                 if (check(fmt)) {
1335                         type_match = true;
1336                         break;
1337                 }
1338         }
1339
1340         if (type_match) {
1341                 /*
1342                  * If the filter is for current level entry, propagate
1343                  * filter marker to parents.  The marker bit was
1344                  * already set by default so it only needs to clear
1345                  * non-filtered entries.
1346                  */
1347                 if (!(he->filtered & (1 << type))) {
1348                         while (parent) {
1349                                 parent->filtered &= ~(1 << type);
1350                                 parent = parent->parent_he;
1351                         }
1352                 }
1353         } else {
1354                 /*
1355                  * If current entry doesn't have matching formats, set
1356                  * filter marker for upper level entries.  it will be
1357                  * cleared if its lower level entries is not filtered.
1358                  *
1359                  * For lower-level entries, it inherits parent's
1360                  * filter bit so that lower level entries of a
1361                  * non-filtered entry won't set the filter marker.
1362                  */
1363                 if (parent == NULL)
1364                         he->filtered |= (1 << type);
1365                 else
1366                         he->filtered |= (parent->filtered & (1 << type));
1367         }
1368 }
1369
1370 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
1371 {
1372         hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD,
1373                                             check_thread_entry);
1374
1375         hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO,
1376                                             perf_hpp__is_dso_entry);
1377
1378         hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL,
1379                                             perf_hpp__is_sym_entry);
1380
1381         hists__apply_filters(he->hists, he);
1382 }
1383
1384 static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
1385                                                  struct rb_root_cached *root,
1386                                                  struct hist_entry *he,
1387                                                  struct hist_entry *parent_he,
1388                                                  struct perf_hpp_list *hpp_list)
1389 {
1390         struct rb_node **p = &root->rb_root.rb_node;
1391         struct rb_node *parent = NULL;
1392         struct hist_entry *iter, *new;
1393         struct perf_hpp_fmt *fmt;
1394         int64_t cmp;
1395         bool leftmost = true;
1396
1397         while (*p != NULL) {
1398                 parent = *p;
1399                 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1400
1401                 cmp = 0;
1402                 perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1403                         cmp = fmt->collapse(fmt, iter, he);
1404                         if (cmp)
1405                                 break;
1406                 }
1407
1408                 if (!cmp) {
1409                         he_stat__add_stat(&iter->stat, &he->stat);
1410                         return iter;
1411                 }
1412
1413                 if (cmp < 0)
1414                         p = &parent->rb_left;
1415                 else {
1416                         p = &parent->rb_right;
1417                         leftmost = false;
1418                 }
1419         }
1420
1421         new = hist_entry__new(he, true);
1422         if (new == NULL)
1423                 return NULL;
1424
1425         hists->nr_entries++;
1426
1427         /* save related format list for output */
1428         new->hpp_list = hpp_list;
1429         new->parent_he = parent_he;
1430
1431         hist_entry__apply_hierarchy_filters(new);
1432
1433         /* some fields are now passed to 'new' */
1434         perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1435                 if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
1436                         he->trace_output = NULL;
1437                 else
1438                         new->trace_output = NULL;
1439
1440                 if (perf_hpp__is_srcline_entry(fmt))
1441                         he->srcline = NULL;
1442                 else
1443                         new->srcline = NULL;
1444
1445                 if (perf_hpp__is_srcfile_entry(fmt))
1446                         he->srcfile = NULL;
1447                 else
1448                         new->srcfile = NULL;
1449         }
1450
1451         rb_link_node(&new->rb_node_in, parent, p);
1452         rb_insert_color_cached(&new->rb_node_in, root, leftmost);
1453         return new;
1454 }
1455
1456 static int hists__hierarchy_insert_entry(struct hists *hists,
1457                                          struct rb_root_cached *root,
1458                                          struct hist_entry *he)
1459 {
1460         struct perf_hpp_list_node *node;
1461         struct hist_entry *new_he = NULL;
1462         struct hist_entry *parent = NULL;
1463         int depth = 0;
1464         int ret = 0;
1465
1466         list_for_each_entry(node, &hists->hpp_formats, list) {
1467                 /* skip period (overhead) and elided columns */
1468                 if (node->level == 0 || node->skip)
1469                         continue;
1470
1471                 /* insert copy of 'he' for each fmt into the hierarchy */
1472                 new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
1473                 if (new_he == NULL) {
1474                         ret = -1;
1475                         break;
1476                 }
1477
1478                 root = &new_he->hroot_in;
1479                 new_he->depth = depth++;
1480                 parent = new_he;
1481         }
1482
1483         if (new_he) {
1484                 new_he->leaf = true;
1485
1486                 if (hist_entry__has_callchains(new_he) &&
1487                     symbol_conf.use_callchain) {
1488                         callchain_cursor_reset(&callchain_cursor);
1489                         if (callchain_merge(&callchain_cursor,
1490                                             new_he->callchain,
1491                                             he->callchain) < 0)
1492                                 ret = -1;
1493                 }
1494         }
1495
1496         /* 'he' is no longer used */
1497         hist_entry__delete(he);
1498
1499         /* return 0 (or -1) since it already applied filters */
1500         return ret;
1501 }
1502
1503 static int hists__collapse_insert_entry(struct hists *hists,
1504                                         struct rb_root_cached *root,
1505                                         struct hist_entry *he)
1506 {
1507         struct rb_node **p = &root->rb_root.rb_node;
1508         struct rb_node *parent = NULL;
1509         struct hist_entry *iter;
1510         int64_t cmp;
1511         bool leftmost = true;
1512
1513         if (symbol_conf.report_hierarchy)
1514                 return hists__hierarchy_insert_entry(hists, root, he);
1515
1516         while (*p != NULL) {
1517                 parent = *p;
1518                 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1519
1520                 cmp = hist_entry__collapse(iter, he);
1521
1522                 if (!cmp) {
1523                         int ret = 0;
1524
1525                         he_stat__add_stat(&iter->stat, &he->stat);
1526                         if (symbol_conf.cumulate_callchain)
1527                                 he_stat__add_stat(iter->stat_acc, he->stat_acc);
1528
1529                         if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
1530                                 callchain_cursor_reset(&callchain_cursor);
1531                                 if (callchain_merge(&callchain_cursor,
1532                                                     iter->callchain,
1533                                                     he->callchain) < 0)
1534                                         ret = -1;
1535                         }
1536                         hist_entry__delete(he);
1537                         return ret;
1538                 }
1539
1540                 if (cmp < 0)
1541                         p = &(*p)->rb_left;
1542                 else {
1543                         p = &(*p)->rb_right;
1544                         leftmost = false;
1545                 }
1546         }
1547         hists->nr_entries++;
1548
1549         rb_link_node(&he->rb_node_in, parent, p);
1550         rb_insert_color_cached(&he->rb_node_in, root, leftmost);
1551         return 1;
1552 }
1553
1554 struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists)
1555 {
1556         struct rb_root_cached *root;
1557
1558         pthread_mutex_lock(&hists->lock);
1559
1560         root = hists->entries_in;
1561         if (++hists->entries_in > &hists->entries_in_array[1])
1562                 hists->entries_in = &hists->entries_in_array[0];
1563
1564         pthread_mutex_unlock(&hists->lock);
1565
1566         return root;
1567 }
1568
1569 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1570 {
1571         hists__filter_entry_by_dso(hists, he);
1572         hists__filter_entry_by_thread(hists, he);
1573         hists__filter_entry_by_symbol(hists, he);
1574         hists__filter_entry_by_socket(hists, he);
1575 }
1576
1577 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1578 {
1579         struct rb_root_cached *root;
1580         struct rb_node *next;
1581         struct hist_entry *n;
1582         int ret;
1583
1584         if (!hists__has(hists, need_collapse))
1585                 return 0;
1586
1587         hists->nr_entries = 0;
1588
1589         root = hists__get_rotate_entries_in(hists);
1590
1591         next = rb_first_cached(root);
1592
1593         while (next) {
1594                 if (session_done())
1595                         break;
1596                 n = rb_entry(next, struct hist_entry, rb_node_in);
1597                 next = rb_next(&n->rb_node_in);
1598
1599                 rb_erase_cached(&n->rb_node_in, root);
1600                 ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
1601                 if (ret < 0)
1602                         return -1;
1603
1604                 if (ret) {
1605                         /*
1606                          * If it wasn't combined with one of the entries already
1607                          * collapsed, we need to apply the filters that may have
1608                          * been set by, say, the hist_browser.
1609                          */
1610                         hists__apply_filters(hists, n);
1611                 }
1612                 if (prog)
1613                         ui_progress__update(prog, 1);
1614         }
1615         return 0;
1616 }
1617
1618 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1619 {
1620         struct hists *hists = a->hists;
1621         struct perf_hpp_fmt *fmt;
1622         int64_t cmp = 0;
1623
1624         hists__for_each_sort_list(hists, fmt) {
1625                 if (perf_hpp__should_skip(fmt, a->hists))
1626                         continue;
1627
1628                 cmp = fmt->sort(fmt, a, b);
1629                 if (cmp)
1630                         break;
1631         }
1632
1633         return cmp;
1634 }
1635
1636 static void hists__reset_filter_stats(struct hists *hists)
1637 {
1638         hists->nr_non_filtered_entries = 0;
1639         hists->stats.total_non_filtered_period = 0;
1640 }
1641
1642 void hists__reset_stats(struct hists *hists)
1643 {
1644         hists->nr_entries = 0;
1645         hists->stats.total_period = 0;
1646
1647         hists__reset_filter_stats(hists);
1648 }
1649
1650 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1651 {
1652         hists->nr_non_filtered_entries++;
1653         hists->stats.total_non_filtered_period += h->stat.period;
1654 }
1655
1656 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1657 {
1658         if (!h->filtered)
1659                 hists__inc_filter_stats(hists, h);
1660
1661         hists->nr_entries++;
1662         hists->stats.total_period += h->stat.period;
1663 }
1664
1665 static void hierarchy_recalc_total_periods(struct hists *hists)
1666 {
1667         struct rb_node *node;
1668         struct hist_entry *he;
1669
1670         node = rb_first_cached(&hists->entries);
1671
1672         hists->stats.total_period = 0;
1673         hists->stats.total_non_filtered_period = 0;
1674
1675         /*
1676          * recalculate total period using top-level entries only
1677          * since lower level entries only see non-filtered entries
1678          * but upper level entries have sum of both entries.
1679          */
1680         while (node) {
1681                 he = rb_entry(node, struct hist_entry, rb_node);
1682                 node = rb_next(node);
1683
1684                 hists->stats.total_period += he->stat.period;
1685                 if (!he->filtered)
1686                         hists->stats.total_non_filtered_period += he->stat.period;
1687         }
1688 }
1689
1690 static void hierarchy_insert_output_entry(struct rb_root_cached *root,
1691                                           struct hist_entry *he)
1692 {
1693         struct rb_node **p = &root->rb_root.rb_node;
1694         struct rb_node *parent = NULL;
1695         struct hist_entry *iter;
1696         struct perf_hpp_fmt *fmt;
1697         bool leftmost = true;
1698
1699         while (*p != NULL) {
1700                 parent = *p;
1701                 iter = rb_entry(parent, struct hist_entry, rb_node);
1702
1703                 if (hist_entry__sort(he, iter) > 0)
1704                         p = &parent->rb_left;
1705                 else {
1706                         p = &parent->rb_right;
1707                         leftmost = false;
1708                 }
1709         }
1710
1711         rb_link_node(&he->rb_node, parent, p);
1712         rb_insert_color_cached(&he->rb_node, root, leftmost);
1713
1714         /* update column width of dynamic entry */
1715         perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
1716                 if (perf_hpp__is_dynamic_entry(fmt))
1717                         fmt->sort(fmt, he, NULL);
1718         }
1719 }
1720
1721 static void hists__hierarchy_output_resort(struct hists *hists,
1722                                            struct ui_progress *prog,
1723                                            struct rb_root_cached *root_in,
1724                                            struct rb_root_cached *root_out,
1725                                            u64 min_callchain_hits,
1726                                            bool use_callchain)
1727 {
1728         struct rb_node *node;
1729         struct hist_entry *he;
1730
1731         *root_out = RB_ROOT_CACHED;
1732         node = rb_first_cached(root_in);
1733
1734         while (node) {
1735                 he = rb_entry(node, struct hist_entry, rb_node_in);
1736                 node = rb_next(node);
1737
1738                 hierarchy_insert_output_entry(root_out, he);
1739
1740                 if (prog)
1741                         ui_progress__update(prog, 1);
1742
1743                 hists->nr_entries++;
1744                 if (!he->filtered) {
1745                         hists->nr_non_filtered_entries++;
1746                         hists__calc_col_len(hists, he);
1747                 }
1748
1749                 if (!he->leaf) {
1750                         hists__hierarchy_output_resort(hists, prog,
1751                                                        &he->hroot_in,
1752                                                        &he->hroot_out,
1753                                                        min_callchain_hits,
1754                                                        use_callchain);
1755                         continue;
1756                 }
1757
1758                 if (!use_callchain)
1759                         continue;
1760
1761                 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1762                         u64 total = he->stat.period;
1763
1764                         if (symbol_conf.cumulate_callchain)
1765                                 total = he->stat_acc->period;
1766
1767                         min_callchain_hits = total * (callchain_param.min_percent / 100);
1768                 }
1769
1770                 callchain_param.sort(&he->sorted_chain, he->callchain,
1771                                      min_callchain_hits, &callchain_param);
1772         }
1773 }
1774
1775 static void __hists__insert_output_entry(struct rb_root_cached *entries,
1776                                          struct hist_entry *he,
1777                                          u64 min_callchain_hits,
1778                                          bool use_callchain)
1779 {
1780         struct rb_node **p = &entries->rb_root.rb_node;
1781         struct rb_node *parent = NULL;
1782         struct hist_entry *iter;
1783         struct perf_hpp_fmt *fmt;
1784         bool leftmost = true;
1785
1786         if (use_callchain) {
1787                 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1788                         u64 total = he->stat.period;
1789
1790                         if (symbol_conf.cumulate_callchain)
1791                                 total = he->stat_acc->period;
1792
1793                         min_callchain_hits = total * (callchain_param.min_percent / 100);
1794                 }
1795                 callchain_param.sort(&he->sorted_chain, he->callchain,
1796                                       min_callchain_hits, &callchain_param);
1797         }
1798
1799         while (*p != NULL) {
1800                 parent = *p;
1801                 iter = rb_entry(parent, struct hist_entry, rb_node);
1802
1803                 if (hist_entry__sort(he, iter) > 0)
1804                         p = &(*p)->rb_left;
1805                 else {
1806                         p = &(*p)->rb_right;
1807                         leftmost = false;
1808                 }
1809         }
1810
1811         rb_link_node(&he->rb_node, parent, p);
1812         rb_insert_color_cached(&he->rb_node, entries, leftmost);
1813
1814         perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
1815                 if (perf_hpp__is_dynamic_entry(fmt) &&
1816                     perf_hpp__defined_dynamic_entry(fmt, he->hists))
1817                         fmt->sort(fmt, he, NULL);  /* update column width */
1818         }
1819 }
1820
1821 static void output_resort(struct hists *hists, struct ui_progress *prog,
1822                           bool use_callchain, hists__resort_cb_t cb,
1823                           void *cb_arg)
1824 {
1825         struct rb_root_cached *root;
1826         struct rb_node *next;
1827         struct hist_entry *n;
1828         u64 callchain_total;
1829         u64 min_callchain_hits;
1830
1831         callchain_total = hists->callchain_period;
1832         if (symbol_conf.filter_relative)
1833                 callchain_total = hists->callchain_non_filtered_period;
1834
1835         min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
1836
1837         hists__reset_stats(hists);
1838         hists__reset_col_len(hists);
1839
1840         if (symbol_conf.report_hierarchy) {
1841                 hists__hierarchy_output_resort(hists, prog,
1842                                                &hists->entries_collapsed,
1843                                                &hists->entries,
1844                                                min_callchain_hits,
1845                                                use_callchain);
1846                 hierarchy_recalc_total_periods(hists);
1847                 return;
1848         }
1849
1850         if (hists__has(hists, need_collapse))
1851                 root = &hists->entries_collapsed;
1852         else
1853                 root = hists->entries_in;
1854
1855         next = rb_first_cached(root);
1856         hists->entries = RB_ROOT_CACHED;
1857
1858         while (next) {
1859                 n = rb_entry(next, struct hist_entry, rb_node_in);
1860                 next = rb_next(&n->rb_node_in);
1861
1862                 if (cb && cb(n, cb_arg))
1863                         continue;
1864
1865                 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1866                 hists__inc_stats(hists, n);
1867
1868                 if (!n->filtered)
1869                         hists__calc_col_len(hists, n);
1870
1871                 if (prog)
1872                         ui_progress__update(prog, 1);
1873         }
1874 }
1875
1876 void perf_evsel__output_resort_cb(struct perf_evsel *evsel, struct ui_progress *prog,
1877                                   hists__resort_cb_t cb, void *cb_arg)
1878 {
1879         bool use_callchain;
1880
1881         if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1882                 use_callchain = evsel__has_callchain(evsel);
1883         else
1884                 use_callchain = symbol_conf.use_callchain;
1885
1886         use_callchain |= symbol_conf.show_branchflag_count;
1887
1888         output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg);
1889 }
1890
1891 void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog)
1892 {
1893         return perf_evsel__output_resort_cb(evsel, prog, NULL, NULL);
1894 }
1895
1896 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1897 {
1898         output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL);
1899 }
1900
1901 void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
1902                              hists__resort_cb_t cb)
1903 {
1904         output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL);
1905 }
1906
1907 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
1908 {
1909         if (he->leaf || hmd == HMD_FORCE_SIBLING)
1910                 return false;
1911
1912         if (he->unfolded || hmd == HMD_FORCE_CHILD)
1913                 return true;
1914
1915         return false;
1916 }
1917
1918 struct rb_node *rb_hierarchy_last(struct rb_node *node)
1919 {
1920         struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1921
1922         while (can_goto_child(he, HMD_NORMAL)) {
1923                 node = rb_last(&he->hroot_out.rb_root);
1924                 he = rb_entry(node, struct hist_entry, rb_node);
1925         }
1926         return node;
1927 }
1928
1929 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
1930 {
1931         struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1932
1933         if (can_goto_child(he, hmd))
1934                 node = rb_first_cached(&he->hroot_out);
1935         else
1936                 node = rb_next(node);
1937
1938         while (node == NULL) {
1939                 he = he->parent_he;
1940                 if (he == NULL)
1941                         break;
1942
1943                 node = rb_next(&he->rb_node);
1944         }
1945         return node;
1946 }
1947
1948 struct rb_node *rb_hierarchy_prev(struct rb_node *node)
1949 {
1950         struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1951
1952         node = rb_prev(node);
1953         if (node)
1954                 return rb_hierarchy_last(node);
1955
1956         he = he->parent_he;
1957         if (he == NULL)
1958                 return NULL;
1959
1960         return &he->rb_node;
1961 }
1962
1963 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
1964 {
1965         struct rb_node *node;
1966         struct hist_entry *child;
1967         float percent;
1968
1969         if (he->leaf)
1970                 return false;
1971
1972         node = rb_first_cached(&he->hroot_out);
1973         child = rb_entry(node, struct hist_entry, rb_node);
1974
1975         while (node && child->filtered) {
1976                 node = rb_next(node);
1977                 child = rb_entry(node, struct hist_entry, rb_node);
1978         }
1979
1980         if (node)
1981                 percent = hist_entry__get_percent_limit(child);
1982         else
1983                 percent = 0;
1984
1985         return node && percent >= limit;
1986 }
1987
1988 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1989                                        enum hist_filter filter)
1990 {
1991         h->filtered &= ~(1 << filter);
1992
1993         if (symbol_conf.report_hierarchy) {
1994                 struct hist_entry *parent = h->parent_he;
1995
1996                 while (parent) {
1997                         he_stat__add_stat(&parent->stat, &h->stat);
1998
1999                         parent->filtered &= ~(1 << filter);
2000
2001                         if (parent->filtered)
2002                                 goto next;
2003
2004                         /* force fold unfiltered entry for simplicity */
2005                         parent->unfolded = false;
2006                         parent->has_no_entry = false;
2007                         parent->row_offset = 0;
2008                         parent->nr_rows = 0;
2009 next:
2010                         parent = parent->parent_he;
2011                 }
2012         }
2013
2014         if (h->filtered)
2015                 return;
2016
2017         /* force fold unfiltered entry for simplicity */
2018         h->unfolded = false;
2019         h->has_no_entry = false;
2020         h->row_offset = 0;
2021         h->nr_rows = 0;
2022
2023         hists->stats.nr_non_filtered_samples += h->stat.nr_events;
2024
2025         hists__inc_filter_stats(hists, h);
2026         hists__calc_col_len(hists, h);
2027 }
2028
2029
2030 static bool hists__filter_entry_by_dso(struct hists *hists,
2031                                        struct hist_entry *he)
2032 {
2033         if (hists->dso_filter != NULL &&
2034             (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
2035                 he->filtered |= (1 << HIST_FILTER__DSO);
2036                 return true;
2037         }
2038
2039         return false;
2040 }
2041
2042 static bool hists__filter_entry_by_thread(struct hists *hists,
2043                                           struct hist_entry *he)
2044 {
2045         if (hists->thread_filter != NULL &&
2046             he->thread != hists->thread_filter) {
2047                 he->filtered |= (1 << HIST_FILTER__THREAD);
2048                 return true;
2049         }
2050
2051         return false;
2052 }
2053
2054 static bool hists__filter_entry_by_symbol(struct hists *hists,
2055                                           struct hist_entry *he)
2056 {
2057         if (hists->symbol_filter_str != NULL &&
2058             (!he->ms.sym || strstr(he->ms.sym->name,
2059                                    hists->symbol_filter_str) == NULL)) {
2060                 he->filtered |= (1 << HIST_FILTER__SYMBOL);
2061                 return true;
2062         }
2063
2064         return false;
2065 }
2066
2067 static bool hists__filter_entry_by_socket(struct hists *hists,
2068                                           struct hist_entry *he)
2069 {
2070         if ((hists->socket_filter > -1) &&
2071             (he->socket != hists->socket_filter)) {
2072                 he->filtered |= (1 << HIST_FILTER__SOCKET);
2073                 return true;
2074         }
2075
2076         return false;
2077 }
2078
2079 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
2080
2081 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
2082 {
2083         struct rb_node *nd;
2084
2085         hists->stats.nr_non_filtered_samples = 0;
2086
2087         hists__reset_filter_stats(hists);
2088         hists__reset_col_len(hists);
2089
2090         for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
2091                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2092
2093                 if (filter(hists, h))
2094                         continue;
2095
2096                 hists__remove_entry_filter(hists, h, type);
2097         }
2098 }
2099
2100 static void resort_filtered_entry(struct rb_root_cached *root,
2101                                   struct hist_entry *he)
2102 {
2103         struct rb_node **p = &root->rb_root.rb_node;
2104         struct rb_node *parent = NULL;
2105         struct hist_entry *iter;
2106         struct rb_root_cached new_root = RB_ROOT_CACHED;
2107         struct rb_node *nd;
2108         bool leftmost = true;
2109
2110         while (*p != NULL) {
2111                 parent = *p;
2112                 iter = rb_entry(parent, struct hist_entry, rb_node);
2113
2114                 if (hist_entry__sort(he, iter) > 0)
2115                         p = &(*p)->rb_left;
2116                 else {
2117                         p = &(*p)->rb_right;
2118                         leftmost = false;
2119                 }
2120         }
2121
2122         rb_link_node(&he->rb_node, parent, p);
2123         rb_insert_color_cached(&he->rb_node, root, leftmost);
2124
2125         if (he->leaf || he->filtered)
2126                 return;
2127
2128         nd = rb_first_cached(&he->hroot_out);
2129         while (nd) {
2130                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2131
2132                 nd = rb_next(nd);
2133                 rb_erase_cached(&h->rb_node, &he->hroot_out);
2134
2135                 resort_filtered_entry(&new_root, h);
2136         }
2137
2138         he->hroot_out = new_root;
2139 }
2140
2141 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
2142 {
2143         struct rb_node *nd;
2144         struct rb_root_cached new_root = RB_ROOT_CACHED;
2145
2146         hists->stats.nr_non_filtered_samples = 0;
2147
2148         hists__reset_filter_stats(hists);
2149         hists__reset_col_len(hists);
2150
2151         nd = rb_first_cached(&hists->entries);
2152         while (nd) {
2153                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2154                 int ret;
2155
2156                 ret = hist_entry__filter(h, type, arg);
2157
2158                 /*
2159                  * case 1. non-matching type
2160                  * zero out the period, set filter marker and move to child
2161                  */
2162                 if (ret < 0) {
2163                         memset(&h->stat, 0, sizeof(h->stat));
2164                         h->filtered |= (1 << type);
2165
2166                         nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
2167                 }
2168                 /*
2169                  * case 2. matched type (filter out)
2170                  * set filter marker and move to next
2171                  */
2172                 else if (ret == 1) {
2173                         h->filtered |= (1 << type);
2174
2175                         nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2176                 }
2177                 /*
2178                  * case 3. ok (not filtered)
2179                  * add period to hists and parents, erase the filter marker
2180                  * and move to next sibling
2181                  */
2182                 else {
2183                         hists__remove_entry_filter(hists, h, type);
2184
2185                         nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2186                 }
2187         }
2188
2189         hierarchy_recalc_total_periods(hists);
2190
2191         /*
2192          * resort output after applying a new filter since filter in a lower
2193          * hierarchy can change periods in a upper hierarchy.
2194          */
2195         nd = rb_first_cached(&hists->entries);
2196         while (nd) {
2197                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2198
2199                 nd = rb_next(nd);
2200                 rb_erase_cached(&h->rb_node, &hists->entries);
2201
2202                 resort_filtered_entry(&new_root, h);
2203         }
2204
2205         hists->entries = new_root;
2206 }
2207
2208 void hists__filter_by_thread(struct hists *hists)
2209 {
2210         if (symbol_conf.report_hierarchy)
2211                 hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
2212                                         hists->thread_filter);
2213         else
2214                 hists__filter_by_type(hists, HIST_FILTER__THREAD,
2215                                       hists__filter_entry_by_thread);
2216 }
2217
2218 void hists__filter_by_dso(struct hists *hists)
2219 {
2220         if (symbol_conf.report_hierarchy)
2221                 hists__filter_hierarchy(hists, HIST_FILTER__DSO,
2222                                         hists->dso_filter);
2223         else
2224                 hists__filter_by_type(hists, HIST_FILTER__DSO,
2225                                       hists__filter_entry_by_dso);
2226 }
2227
2228 void hists__filter_by_symbol(struct hists *hists)
2229 {
2230         if (symbol_conf.report_hierarchy)
2231                 hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
2232                                         hists->symbol_filter_str);
2233         else
2234                 hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
2235                                       hists__filter_entry_by_symbol);
2236 }
2237
2238 void hists__filter_by_socket(struct hists *hists)
2239 {
2240         if (symbol_conf.report_hierarchy)
2241                 hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
2242                                         &hists->socket_filter);
2243         else
2244                 hists__filter_by_type(hists, HIST_FILTER__SOCKET,
2245                                       hists__filter_entry_by_socket);
2246 }
2247
2248 void events_stats__inc(struct events_stats *stats, u32 type)
2249 {
2250         ++stats->nr_events[0];
2251         ++stats->nr_events[type];
2252 }
2253
2254 void hists__inc_nr_events(struct hists *hists, u32 type)
2255 {
2256         events_stats__inc(&hists->stats, type);
2257 }
2258
2259 void hists__inc_nr_samples(struct hists *hists, bool filtered)
2260 {
2261         events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
2262         if (!filtered)
2263                 hists->stats.nr_non_filtered_samples++;
2264 }
2265
2266 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
2267                                                  struct hist_entry *pair)
2268 {
2269         struct rb_root_cached *root;
2270         struct rb_node **p;
2271         struct rb_node *parent = NULL;
2272         struct hist_entry *he;
2273         int64_t cmp;
2274         bool leftmost = true;
2275
2276         if (hists__has(hists, need_collapse))
2277                 root = &hists->entries_collapsed;
2278         else
2279                 root = hists->entries_in;
2280
2281         p = &root->rb_root.rb_node;
2282
2283         while (*p != NULL) {
2284                 parent = *p;
2285                 he = rb_entry(parent, struct hist_entry, rb_node_in);
2286
2287                 cmp = hist_entry__collapse(he, pair);
2288
2289                 if (!cmp)
2290                         goto out;
2291
2292                 if (cmp < 0)
2293                         p = &(*p)->rb_left;
2294                 else {
2295                         p = &(*p)->rb_right;
2296                         leftmost = false;
2297                 }
2298         }
2299
2300         he = hist_entry__new(pair, true);
2301         if (he) {
2302                 memset(&he->stat, 0, sizeof(he->stat));
2303                 he->hists = hists;
2304                 if (symbol_conf.cumulate_callchain)
2305                         memset(he->stat_acc, 0, sizeof(he->stat));
2306                 rb_link_node(&he->rb_node_in, parent, p);
2307                 rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2308                 hists__inc_stats(hists, he);
2309                 he->dummy = true;
2310         }
2311 out:
2312         return he;
2313 }
2314
2315 static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
2316                                                     struct rb_root_cached *root,
2317                                                     struct hist_entry *pair)
2318 {
2319         struct rb_node **p;
2320         struct rb_node *parent = NULL;
2321         struct hist_entry *he;
2322         struct perf_hpp_fmt *fmt;
2323         bool leftmost = true;
2324
2325         p = &root->rb_root.rb_node;
2326         while (*p != NULL) {
2327                 int64_t cmp = 0;
2328
2329                 parent = *p;
2330                 he = rb_entry(parent, struct hist_entry, rb_node_in);
2331
2332                 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2333                         cmp = fmt->collapse(fmt, he, pair);
2334                         if (cmp)
2335                                 break;
2336                 }
2337                 if (!cmp)
2338                         goto out;
2339
2340                 if (cmp < 0)
2341                         p = &parent->rb_left;
2342                 else {
2343                         p = &parent->rb_right;
2344                         leftmost = false;
2345                 }
2346         }
2347
2348         he = hist_entry__new(pair, true);
2349         if (he) {
2350                 rb_link_node(&he->rb_node_in, parent, p);
2351                 rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2352
2353                 he->dummy = true;
2354                 he->hists = hists;
2355                 memset(&he->stat, 0, sizeof(he->stat));
2356                 hists__inc_stats(hists, he);
2357         }
2358 out:
2359         return he;
2360 }
2361
2362 static struct hist_entry *hists__find_entry(struct hists *hists,
2363                                             struct hist_entry *he)
2364 {
2365         struct rb_node *n;
2366
2367         if (hists__has(hists, need_collapse))
2368                 n = hists->entries_collapsed.rb_root.rb_node;
2369         else
2370                 n = hists->entries_in->rb_root.rb_node;
2371
2372         while (n) {
2373                 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
2374                 int64_t cmp = hist_entry__collapse(iter, he);
2375
2376                 if (cmp < 0)
2377                         n = n->rb_left;
2378                 else if (cmp > 0)
2379                         n = n->rb_right;
2380                 else
2381                         return iter;
2382         }
2383
2384         return NULL;
2385 }
2386
2387 static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root,
2388                                                       struct hist_entry *he)
2389 {
2390         struct rb_node *n = root->rb_root.rb_node;
2391
2392         while (n) {
2393                 struct hist_entry *iter;
2394                 struct perf_hpp_fmt *fmt;
2395                 int64_t cmp = 0;
2396
2397                 iter = rb_entry(n, struct hist_entry, rb_node_in);
2398                 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2399                         cmp = fmt->collapse(fmt, iter, he);
2400                         if (cmp)
2401                                 break;
2402                 }
2403
2404                 if (cmp < 0)
2405                         n = n->rb_left;
2406                 else if (cmp > 0)
2407                         n = n->rb_right;
2408                 else
2409                         return iter;
2410         }
2411
2412         return NULL;
2413 }
2414
2415 static void hists__match_hierarchy(struct rb_root_cached *leader_root,
2416                                    struct rb_root_cached *other_root)
2417 {
2418         struct rb_node *nd;
2419         struct hist_entry *pos, *pair;
2420
2421         for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) {
2422                 pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2423                 pair = hists__find_hierarchy_entry(other_root, pos);
2424
2425                 if (pair) {
2426                         hist_entry__add_pair(pair, pos);
2427                         hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in);
2428                 }
2429         }
2430 }
2431
2432 /*
2433  * Look for pairs to link to the leader buckets (hist_entries):
2434  */
2435 void hists__match(struct hists *leader, struct hists *other)
2436 {
2437         struct rb_root_cached *root;
2438         struct rb_node *nd;
2439         struct hist_entry *pos, *pair;
2440
2441         if (symbol_conf.report_hierarchy) {
2442                 /* hierarchy report always collapses entries */
2443                 return hists__match_hierarchy(&leader->entries_collapsed,
2444                                               &other->entries_collapsed);
2445         }
2446
2447         if (hists__has(leader, need_collapse))
2448                 root = &leader->entries_collapsed;
2449         else
2450                 root = leader->entries_in;
2451
2452         for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2453                 pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2454                 pair = hists__find_entry(other, pos);
2455
2456                 if (pair)
2457                         hist_entry__add_pair(pair, pos);
2458         }
2459 }
2460
2461 static int hists__link_hierarchy(struct hists *leader_hists,
2462                                  struct hist_entry *parent,
2463                                  struct rb_root_cached *leader_root,
2464                                  struct rb_root_cached *other_root)
2465 {
2466         struct rb_node *nd;
2467         struct hist_entry *pos, *leader;
2468
2469         for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) {
2470                 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2471
2472                 if (hist_entry__has_pairs(pos)) {
2473                         bool found = false;
2474
2475                         list_for_each_entry(leader, &pos->pairs.head, pairs.node) {
2476                                 if (leader->hists == leader_hists) {
2477                                         found = true;
2478                                         break;
2479                                 }
2480                         }
2481                         if (!found)
2482                                 return -1;
2483                 } else {
2484                         leader = add_dummy_hierarchy_entry(leader_hists,
2485                                                            leader_root, pos);
2486                         if (leader == NULL)
2487                                 return -1;
2488
2489                         /* do not point parent in the pos */
2490                         leader->parent_he = parent;
2491
2492                         hist_entry__add_pair(pos, leader);
2493                 }
2494
2495                 if (!pos->leaf) {
2496                         if (hists__link_hierarchy(leader_hists, leader,
2497                                                   &leader->hroot_in,
2498                                                   &pos->hroot_in) < 0)
2499                                 return -1;
2500                 }
2501         }
2502         return 0;
2503 }
2504
2505 /*
2506  * Look for entries in the other hists that are not present in the leader, if
2507  * we find them, just add a dummy entry on the leader hists, with period=0,
2508  * nr_events=0, to serve as the list header.
2509  */
2510 int hists__link(struct hists *leader, struct hists *other)
2511 {
2512         struct rb_root_cached *root;
2513         struct rb_node *nd;
2514         struct hist_entry *pos, *pair;
2515
2516         if (symbol_conf.report_hierarchy) {
2517                 /* hierarchy report always collapses entries */
2518                 return hists__link_hierarchy(leader, NULL,
2519                                              &leader->entries_collapsed,
2520                                              &other->entries_collapsed);
2521         }
2522
2523         if (hists__has(other, need_collapse))
2524                 root = &other->entries_collapsed;
2525         else
2526                 root = other->entries_in;
2527
2528         for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2529                 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2530
2531                 if (!hist_entry__has_pairs(pos)) {
2532                         pair = hists__add_dummy_entry(leader, pos);
2533                         if (pair == NULL)
2534                                 return -1;
2535                         hist_entry__add_pair(pos, pair);
2536                 }
2537         }
2538
2539         return 0;
2540 }
2541
2542 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
2543                           struct perf_sample *sample, bool nonany_branch_mode)
2544 {
2545         struct branch_info *bi;
2546
2547         /* If we have branch cycles always annotate them. */
2548         if (bs && bs->nr && bs->entries[0].flags.cycles) {
2549                 int i;
2550
2551                 bi = sample__resolve_bstack(sample, al);
2552                 if (bi) {
2553                         struct addr_map_symbol *prev = NULL;
2554
2555                         /*
2556                          * Ignore errors, still want to process the
2557                          * other entries.
2558                          *
2559                          * For non standard branch modes always
2560                          * force no IPC (prev == NULL)
2561                          *
2562                          * Note that perf stores branches reversed from
2563                          * program order!
2564                          */
2565                         for (i = bs->nr - 1; i >= 0; i--) {
2566                                 addr_map_symbol__account_cycles(&bi[i].from,
2567                                         nonany_branch_mode ? NULL : prev,
2568                                         bi[i].flags.cycles);
2569                                 prev = &bi[i].to;
2570                         }
2571                         free(bi);
2572                 }
2573         }
2574 }
2575
2576 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
2577 {
2578         struct perf_evsel *pos;
2579         size_t ret = 0;
2580
2581         evlist__for_each_entry(evlist, pos) {
2582                 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
2583                 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
2584         }
2585
2586         return ret;
2587 }
2588
2589
2590 u64 hists__total_period(struct hists *hists)
2591 {
2592         return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
2593                 hists->stats.total_period;
2594 }
2595
2596 int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq)
2597 {
2598         char unit;
2599         int printed;
2600         const struct dso *dso = hists->dso_filter;
2601         struct thread *thread = hists->thread_filter;
2602         int socket_id = hists->socket_filter;
2603         unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
2604         u64 nr_events = hists->stats.total_period;
2605         struct perf_evsel *evsel = hists_to_evsel(hists);
2606         const char *ev_name = perf_evsel__name(evsel);
2607         char buf[512], sample_freq_str[64] = "";
2608         size_t buflen = sizeof(buf);
2609         char ref[30] = " show reference callgraph, ";
2610         bool enable_ref = false;
2611
2612         if (symbol_conf.filter_relative) {
2613                 nr_samples = hists->stats.nr_non_filtered_samples;
2614                 nr_events = hists->stats.total_non_filtered_period;
2615         }
2616
2617         if (perf_evsel__is_group_event(evsel)) {
2618                 struct perf_evsel *pos;
2619
2620                 perf_evsel__group_desc(evsel, buf, buflen);
2621                 ev_name = buf;
2622
2623                 for_each_group_member(pos, evsel) {
2624                         struct hists *pos_hists = evsel__hists(pos);
2625
2626                         if (symbol_conf.filter_relative) {
2627                                 nr_samples += pos_hists->stats.nr_non_filtered_samples;
2628                                 nr_events += pos_hists->stats.total_non_filtered_period;
2629                         } else {
2630                                 nr_samples += pos_hists->stats.nr_events[PERF_RECORD_SAMPLE];
2631                                 nr_events += pos_hists->stats.total_period;
2632                         }
2633                 }
2634         }
2635
2636         if (symbol_conf.show_ref_callgraph &&
2637             strstr(ev_name, "call-graph=no"))
2638                 enable_ref = true;
2639
2640         if (show_freq)
2641                 scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->attr.sample_freq);
2642
2643         nr_samples = convert_unit(nr_samples, &unit);
2644         printed = scnprintf(bf, size,
2645                            "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64,
2646                            nr_samples, unit, evsel->nr_members > 1 ? "s" : "",
2647                            ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events);
2648
2649
2650         if (hists->uid_filter_str)
2651                 printed += snprintf(bf + printed, size - printed,
2652                                     ", UID: %s", hists->uid_filter_str);
2653         if (thread) {
2654                 if (hists__has(hists, thread)) {
2655                         printed += scnprintf(bf + printed, size - printed,
2656                                     ", Thread: %s(%d)",
2657                                      (thread->comm_set ? thread__comm_str(thread) : ""),
2658                                     thread->tid);
2659                 } else {
2660                         printed += scnprintf(bf + printed, size - printed,
2661                                     ", Thread: %s",
2662                                      (thread->comm_set ? thread__comm_str(thread) : ""));
2663                 }
2664         }
2665         if (dso)
2666                 printed += scnprintf(bf + printed, size - printed,
2667                                     ", DSO: %s", dso->short_name);
2668         if (socket_id > -1)
2669                 printed += scnprintf(bf + printed, size - printed,
2670                                     ", Processor Socket: %d", socket_id);
2671
2672         return printed;
2673 }
2674
2675 int parse_filter_percentage(const struct option *opt __maybe_unused,
2676                             const char *arg, int unset __maybe_unused)
2677 {
2678         if (!strcmp(arg, "relative"))
2679                 symbol_conf.filter_relative = true;
2680         else if (!strcmp(arg, "absolute"))
2681                 symbol_conf.filter_relative = false;
2682         else {
2683                 pr_debug("Invalid percentage: %s\n", arg);
2684                 return -1;
2685         }
2686
2687         return 0;
2688 }
2689
2690 int perf_hist_config(const char *var, const char *value)
2691 {
2692         if (!strcmp(var, "hist.percentage"))
2693                 return parse_filter_percentage(NULL, value, 0);
2694
2695         return 0;
2696 }
2697
2698 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
2699 {
2700         memset(hists, 0, sizeof(*hists));
2701         hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED;
2702         hists->entries_in = &hists->entries_in_array[0];
2703         hists->entries_collapsed = RB_ROOT_CACHED;
2704         hists->entries = RB_ROOT_CACHED;
2705         pthread_mutex_init(&hists->lock, NULL);
2706         hists->socket_filter = -1;
2707         hists->hpp_list = hpp_list;
2708         INIT_LIST_HEAD(&hists->hpp_formats);
2709         return 0;
2710 }
2711
2712 static void hists__delete_remaining_entries(struct rb_root_cached *root)
2713 {
2714         struct rb_node *node;
2715         struct hist_entry *he;
2716
2717         while (!RB_EMPTY_ROOT(&root->rb_root)) {
2718                 node = rb_first_cached(root);
2719                 rb_erase_cached(node, root);
2720
2721                 he = rb_entry(node, struct hist_entry, rb_node_in);
2722                 hist_entry__delete(he);
2723         }
2724 }
2725
2726 static void hists__delete_all_entries(struct hists *hists)
2727 {
2728         hists__delete_entries(hists);
2729         hists__delete_remaining_entries(&hists->entries_in_array[0]);
2730         hists__delete_remaining_entries(&hists->entries_in_array[1]);
2731         hists__delete_remaining_entries(&hists->entries_collapsed);
2732 }
2733
2734 static void hists_evsel__exit(struct perf_evsel *evsel)
2735 {
2736         struct hists *hists = evsel__hists(evsel);
2737         struct perf_hpp_fmt *fmt, *pos;
2738         struct perf_hpp_list_node *node, *tmp;
2739
2740         hists__delete_all_entries(hists);
2741
2742         list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
2743                 perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
2744                         list_del_init(&fmt->list);
2745                         free(fmt);
2746                 }
2747                 list_del_init(&node->list);
2748                 free(node);
2749         }
2750 }
2751
2752 static int hists_evsel__init(struct perf_evsel *evsel)
2753 {
2754         struct hists *hists = evsel__hists(evsel);
2755
2756         __hists__init(hists, &perf_hpp_list);
2757         return 0;
2758 }
2759
2760 /*
2761  * XXX We probably need a hists_evsel__exit() to free the hist_entries
2762  * stored in the rbtree...
2763  */
2764
2765 int hists__init(void)
2766 {
2767         int err = perf_evsel__object_config(sizeof(struct hists_evsel),
2768                                             hists_evsel__init,
2769                                             hists_evsel__exit);
2770         if (err)
2771                 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
2772
2773         return err;
2774 }
2775
2776 void perf_hpp_list__init(struct perf_hpp_list *list)
2777 {
2778         INIT_LIST_HEAD(&list->fields);
2779         INIT_LIST_HEAD(&list->sorts);
2780 }