perf tools: Add helper functions for some sort keys
[cascardo/linux.git] / tools / perf / util / hist.c
1 #include "util.h"
2 #include "build-id.h"
3 #include "hist.h"
4 #include "session.h"
5 #include "sort.h"
6 #include "evlist.h"
7 #include "evsel.h"
8 #include "annotate.h"
9 #include "ui/progress.h"
10 #include <math.h>
11
12 static bool hists__filter_entry_by_dso(struct hists *hists,
13                                        struct hist_entry *he);
14 static bool hists__filter_entry_by_thread(struct hists *hists,
15                                           struct hist_entry *he);
16 static bool hists__filter_entry_by_symbol(struct hists *hists,
17                                           struct hist_entry *he);
18 static bool hists__filter_entry_by_socket(struct hists *hists,
19                                           struct hist_entry *he);
20
21 u16 hists__col_len(struct hists *hists, enum hist_column col)
22 {
23         return hists->col_len[col];
24 }
25
26 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
27 {
28         hists->col_len[col] = len;
29 }
30
31 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
32 {
33         if (len > hists__col_len(hists, col)) {
34                 hists__set_col_len(hists, col, len);
35                 return true;
36         }
37         return false;
38 }
39
40 void hists__reset_col_len(struct hists *hists)
41 {
42         enum hist_column col;
43
44         for (col = 0; col < HISTC_NR_COLS; ++col)
45                 hists__set_col_len(hists, col, 0);
46 }
47
48 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
49 {
50         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
51
52         if (hists__col_len(hists, dso) < unresolved_col_width &&
53             !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
54             !symbol_conf.dso_list)
55                 hists__set_col_len(hists, dso, unresolved_col_width);
56 }
57
58 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
59 {
60         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
61         int symlen;
62         u16 len;
63
64         /*
65          * +4 accounts for '[x] ' priv level info
66          * +2 accounts for 0x prefix on raw addresses
67          * +3 accounts for ' y ' symtab origin info
68          */
69         if (h->ms.sym) {
70                 symlen = h->ms.sym->namelen + 4;
71                 if (verbose)
72                         symlen += BITS_PER_LONG / 4 + 2 + 3;
73                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
74         } else {
75                 symlen = unresolved_col_width + 4 + 2;
76                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
77                 hists__set_unres_dso_col_len(hists, HISTC_DSO);
78         }
79
80         len = thread__comm_len(h->thread);
81         if (hists__new_col_len(hists, HISTC_COMM, len))
82                 hists__set_col_len(hists, HISTC_THREAD, len + 6);
83
84         if (h->ms.map) {
85                 len = dso__name_len(h->ms.map->dso);
86                 hists__new_col_len(hists, HISTC_DSO, len);
87         }
88
89         if (h->parent)
90                 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
91
92         if (h->branch_info) {
93                 if (h->branch_info->from.sym) {
94                         symlen = (int)h->branch_info->from.sym->namelen + 4;
95                         if (verbose)
96                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
97                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
98
99                         symlen = dso__name_len(h->branch_info->from.map->dso);
100                         hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
101                 } else {
102                         symlen = unresolved_col_width + 4 + 2;
103                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
104                         hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
105                 }
106
107                 if (h->branch_info->to.sym) {
108                         symlen = (int)h->branch_info->to.sym->namelen + 4;
109                         if (verbose)
110                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
111                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
112
113                         symlen = dso__name_len(h->branch_info->to.map->dso);
114                         hists__new_col_len(hists, HISTC_DSO_TO, symlen);
115                 } else {
116                         symlen = unresolved_col_width + 4 + 2;
117                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
118                         hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
119                 }
120         }
121
122         if (h->mem_info) {
123                 if (h->mem_info->daddr.sym) {
124                         symlen = (int)h->mem_info->daddr.sym->namelen + 4
125                                + unresolved_col_width + 2;
126                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
127                                            symlen);
128                         hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
129                                            symlen + 1);
130                 } else {
131                         symlen = unresolved_col_width + 4 + 2;
132                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
133                                            symlen);
134                         hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
135                                            symlen);
136                 }
137
138                 if (h->mem_info->iaddr.sym) {
139                         symlen = (int)h->mem_info->iaddr.sym->namelen + 4
140                                + unresolved_col_width + 2;
141                         hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
142                                            symlen);
143                 } else {
144                         symlen = unresolved_col_width + 4 + 2;
145                         hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
146                                            symlen);
147                 }
148
149                 if (h->mem_info->daddr.map) {
150                         symlen = dso__name_len(h->mem_info->daddr.map->dso);
151                         hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
152                                            symlen);
153                 } else {
154                         symlen = unresolved_col_width + 4 + 2;
155                         hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
156                 }
157         } else {
158                 symlen = unresolved_col_width + 4 + 2;
159                 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
160                 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
161                 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
162         }
163
164         hists__new_col_len(hists, HISTC_CPU, 3);
165         hists__new_col_len(hists, HISTC_SOCKET, 6);
166         hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
167         hists__new_col_len(hists, HISTC_MEM_TLB, 22);
168         hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
169         hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
170         hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
171         hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
172
173         if (h->srcline)
174                 hists__new_col_len(hists, HISTC_SRCLINE, strlen(h->srcline));
175
176         if (h->srcfile)
177                 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
178
179         if (h->transaction)
180                 hists__new_col_len(hists, HISTC_TRANSACTION,
181                                    hist_entry__transaction_len());
182
183         if (h->trace_output)
184                 hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
185 }
186
187 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
188 {
189         struct rb_node *next = rb_first(&hists->entries);
190         struct hist_entry *n;
191         int row = 0;
192
193         hists__reset_col_len(hists);
194
195         while (next && row++ < max_rows) {
196                 n = rb_entry(next, struct hist_entry, rb_node);
197                 if (!n->filtered)
198                         hists__calc_col_len(hists, n);
199                 next = rb_next(&n->rb_node);
200         }
201 }
202
203 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
204                                         unsigned int cpumode, u64 period)
205 {
206         switch (cpumode) {
207         case PERF_RECORD_MISC_KERNEL:
208                 he_stat->period_sys += period;
209                 break;
210         case PERF_RECORD_MISC_USER:
211                 he_stat->period_us += period;
212                 break;
213         case PERF_RECORD_MISC_GUEST_KERNEL:
214                 he_stat->period_guest_sys += period;
215                 break;
216         case PERF_RECORD_MISC_GUEST_USER:
217                 he_stat->period_guest_us += period;
218                 break;
219         default:
220                 break;
221         }
222 }
223
224 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
225                                 u64 weight)
226 {
227
228         he_stat->period         += period;
229         he_stat->weight         += weight;
230         he_stat->nr_events      += 1;
231 }
232
233 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
234 {
235         dest->period            += src->period;
236         dest->period_sys        += src->period_sys;
237         dest->period_us         += src->period_us;
238         dest->period_guest_sys  += src->period_guest_sys;
239         dest->period_guest_us   += src->period_guest_us;
240         dest->nr_events         += src->nr_events;
241         dest->weight            += src->weight;
242 }
243
244 static void he_stat__decay(struct he_stat *he_stat)
245 {
246         he_stat->period = (he_stat->period * 7) / 8;
247         he_stat->nr_events = (he_stat->nr_events * 7) / 8;
248         /* XXX need decay for weight too? */
249 }
250
251 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
252 {
253         u64 prev_period = he->stat.period;
254         u64 diff;
255
256         if (prev_period == 0)
257                 return true;
258
259         he_stat__decay(&he->stat);
260         if (symbol_conf.cumulate_callchain)
261                 he_stat__decay(he->stat_acc);
262         decay_callchain(he->callchain);
263
264         diff = prev_period - he->stat.period;
265
266         hists->stats.total_period -= diff;
267         if (!he->filtered)
268                 hists->stats.total_non_filtered_period -= diff;
269
270         return he->stat.period == 0;
271 }
272
273 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
274 {
275         rb_erase(&he->rb_node, &hists->entries);
276
277         if (sort__need_collapse)
278                 rb_erase(&he->rb_node_in, &hists->entries_collapsed);
279         else
280                 rb_erase(&he->rb_node_in, hists->entries_in);
281
282         --hists->nr_entries;
283         if (!he->filtered)
284                 --hists->nr_non_filtered_entries;
285
286         hist_entry__delete(he);
287 }
288
289 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
290 {
291         struct rb_node *next = rb_first(&hists->entries);
292         struct hist_entry *n;
293
294         while (next) {
295                 n = rb_entry(next, struct hist_entry, rb_node);
296                 next = rb_next(&n->rb_node);
297                 if (((zap_user && n->level == '.') ||
298                      (zap_kernel && n->level != '.') ||
299                      hists__decay_entry(hists, n))) {
300                         hists__delete_entry(hists, n);
301                 }
302         }
303 }
304
305 void hists__delete_entries(struct hists *hists)
306 {
307         struct rb_node *next = rb_first(&hists->entries);
308         struct hist_entry *n;
309
310         while (next) {
311                 n = rb_entry(next, struct hist_entry, rb_node);
312                 next = rb_next(&n->rb_node);
313
314                 hists__delete_entry(hists, n);
315         }
316 }
317
318 /*
319  * histogram, sorted on item, collects periods
320  */
321
322 static struct hist_entry *hist_entry__new(struct hist_entry *template,
323                                           bool sample_self)
324 {
325         size_t callchain_size = 0;
326         struct hist_entry *he;
327
328         if (symbol_conf.use_callchain)
329                 callchain_size = sizeof(struct callchain_root);
330
331         he = zalloc(sizeof(*he) + callchain_size);
332
333         if (he != NULL) {
334                 *he = *template;
335
336                 if (symbol_conf.cumulate_callchain) {
337                         he->stat_acc = malloc(sizeof(he->stat));
338                         if (he->stat_acc == NULL) {
339                                 free(he);
340                                 return NULL;
341                         }
342                         memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
343                         if (!sample_self)
344                                 memset(&he->stat, 0, sizeof(he->stat));
345                 }
346
347                 map__get(he->ms.map);
348
349                 if (he->branch_info) {
350                         /*
351                          * This branch info is (a part of) allocated from
352                          * sample__resolve_bstack() and will be freed after
353                          * adding new entries.  So we need to save a copy.
354                          */
355                         he->branch_info = malloc(sizeof(*he->branch_info));
356                         if (he->branch_info == NULL) {
357                                 map__zput(he->ms.map);
358                                 free(he->stat_acc);
359                                 free(he);
360                                 return NULL;
361                         }
362
363                         memcpy(he->branch_info, template->branch_info,
364                                sizeof(*he->branch_info));
365
366                         map__get(he->branch_info->from.map);
367                         map__get(he->branch_info->to.map);
368                 }
369
370                 if (he->mem_info) {
371                         map__get(he->mem_info->iaddr.map);
372                         map__get(he->mem_info->daddr.map);
373                 }
374
375                 if (symbol_conf.use_callchain)
376                         callchain_init(he->callchain);
377
378                 if (he->raw_data) {
379                         he->raw_data = memdup(he->raw_data, he->raw_size);
380
381                         if (he->raw_data == NULL) {
382                                 map__put(he->ms.map);
383                                 if (he->branch_info) {
384                                         map__put(he->branch_info->from.map);
385                                         map__put(he->branch_info->to.map);
386                                         free(he->branch_info);
387                                 }
388                                 if (he->mem_info) {
389                                         map__put(he->mem_info->iaddr.map);
390                                         map__put(he->mem_info->daddr.map);
391                                 }
392                                 free(he->stat_acc);
393                                 free(he);
394                                 return NULL;
395                         }
396                 }
397                 INIT_LIST_HEAD(&he->pairs.node);
398                 thread__get(he->thread);
399         }
400
401         return he;
402 }
403
404 static u8 symbol__parent_filter(const struct symbol *parent)
405 {
406         if (symbol_conf.exclude_other && parent == NULL)
407                 return 1 << HIST_FILTER__PARENT;
408         return 0;
409 }
410
411 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
412 {
413         if (!symbol_conf.use_callchain)
414                 return;
415
416         he->hists->callchain_period += period;
417         if (!he->filtered)
418                 he->hists->callchain_non_filtered_period += period;
419 }
420
421 static struct hist_entry *hists__findnew_entry(struct hists *hists,
422                                                struct hist_entry *entry,
423                                                struct addr_location *al,
424                                                bool sample_self)
425 {
426         struct rb_node **p;
427         struct rb_node *parent = NULL;
428         struct hist_entry *he;
429         int64_t cmp;
430         u64 period = entry->stat.period;
431         u64 weight = entry->stat.weight;
432
433         p = &hists->entries_in->rb_node;
434
435         while (*p != NULL) {
436                 parent = *p;
437                 he = rb_entry(parent, struct hist_entry, rb_node_in);
438
439                 /*
440                  * Make sure that it receives arguments in a same order as
441                  * hist_entry__collapse() so that we can use an appropriate
442                  * function when searching an entry regardless which sort
443                  * keys were used.
444                  */
445                 cmp = hist_entry__cmp(he, entry);
446
447                 if (!cmp) {
448                         if (sample_self) {
449                                 he_stat__add_period(&he->stat, period, weight);
450                                 hist_entry__add_callchain_period(he, period);
451                         }
452                         if (symbol_conf.cumulate_callchain)
453                                 he_stat__add_period(he->stat_acc, period, weight);
454
455                         /*
456                          * This mem info was allocated from sample__resolve_mem
457                          * and will not be used anymore.
458                          */
459                         zfree(&entry->mem_info);
460
461                         /* If the map of an existing hist_entry has
462                          * become out-of-date due to an exec() or
463                          * similar, update it.  Otherwise we will
464                          * mis-adjust symbol addresses when computing
465                          * the history counter to increment.
466                          */
467                         if (he->ms.map != entry->ms.map) {
468                                 map__put(he->ms.map);
469                                 he->ms.map = map__get(entry->ms.map);
470                         }
471                         goto out;
472                 }
473
474                 if (cmp < 0)
475                         p = &(*p)->rb_left;
476                 else
477                         p = &(*p)->rb_right;
478         }
479
480         he = hist_entry__new(entry, sample_self);
481         if (!he)
482                 return NULL;
483
484         if (sample_self)
485                 hist_entry__add_callchain_period(he, period);
486         hists->nr_entries++;
487
488         rb_link_node(&he->rb_node_in, parent, p);
489         rb_insert_color(&he->rb_node_in, hists->entries_in);
490 out:
491         if (sample_self)
492                 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
493         if (symbol_conf.cumulate_callchain)
494                 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
495         return he;
496 }
497
498 struct hist_entry *__hists__add_entry(struct hists *hists,
499                                       struct addr_location *al,
500                                       struct symbol *sym_parent,
501                                       struct branch_info *bi,
502                                       struct mem_info *mi,
503                                       struct perf_sample *sample,
504                                       bool sample_self)
505 {
506         struct hist_entry entry = {
507                 .thread = al->thread,
508                 .comm = thread__comm(al->thread),
509                 .ms = {
510                         .map    = al->map,
511                         .sym    = al->sym,
512                 },
513                 .socket  = al->socket,
514                 .cpu     = al->cpu,
515                 .cpumode = al->cpumode,
516                 .ip      = al->addr,
517                 .level   = al->level,
518                 .stat = {
519                         .nr_events = 1,
520                         .period = sample->period,
521                         .weight = sample->weight,
522                 },
523                 .parent = sym_parent,
524                 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
525                 .hists  = hists,
526                 .branch_info = bi,
527                 .mem_info = mi,
528                 .transaction = sample->transaction,
529                 .raw_data = sample->raw_data,
530                 .raw_size = sample->raw_size,
531         };
532
533         return hists__findnew_entry(hists, &entry, al, sample_self);
534 }
535
536 static int
537 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
538                     struct addr_location *al __maybe_unused)
539 {
540         return 0;
541 }
542
543 static int
544 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
545                         struct addr_location *al __maybe_unused)
546 {
547         return 0;
548 }
549
550 static int
551 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
552 {
553         struct perf_sample *sample = iter->sample;
554         struct mem_info *mi;
555
556         mi = sample__resolve_mem(sample, al);
557         if (mi == NULL)
558                 return -ENOMEM;
559
560         iter->priv = mi;
561         return 0;
562 }
563
564 static int
565 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
566 {
567         u64 cost;
568         struct mem_info *mi = iter->priv;
569         struct hists *hists = evsel__hists(iter->evsel);
570         struct perf_sample *sample = iter->sample;
571         struct hist_entry *he;
572
573         if (mi == NULL)
574                 return -EINVAL;
575
576         cost = sample->weight;
577         if (!cost)
578                 cost = 1;
579
580         /*
581          * must pass period=weight in order to get the correct
582          * sorting from hists__collapse_resort() which is solely
583          * based on periods. We want sorting be done on nr_events * weight
584          * and this is indirectly achieved by passing period=weight here
585          * and the he_stat__add_period() function.
586          */
587         sample->period = cost;
588
589         he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
590                                 sample, true);
591         if (!he)
592                 return -ENOMEM;
593
594         iter->he = he;
595         return 0;
596 }
597
598 static int
599 iter_finish_mem_entry(struct hist_entry_iter *iter,
600                       struct addr_location *al __maybe_unused)
601 {
602         struct perf_evsel *evsel = iter->evsel;
603         struct hists *hists = evsel__hists(evsel);
604         struct hist_entry *he = iter->he;
605         int err = -EINVAL;
606
607         if (he == NULL)
608                 goto out;
609
610         hists__inc_nr_samples(hists, he->filtered);
611
612         err = hist_entry__append_callchain(he, iter->sample);
613
614 out:
615         /*
616          * We don't need to free iter->priv (mem_info) here since the mem info
617          * was either already freed in hists__findnew_entry() or passed to a
618          * new hist entry by hist_entry__new().
619          */
620         iter->priv = NULL;
621
622         iter->he = NULL;
623         return err;
624 }
625
626 static int
627 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
628 {
629         struct branch_info *bi;
630         struct perf_sample *sample = iter->sample;
631
632         bi = sample__resolve_bstack(sample, al);
633         if (!bi)
634                 return -ENOMEM;
635
636         iter->curr = 0;
637         iter->total = sample->branch_stack->nr;
638
639         iter->priv = bi;
640         return 0;
641 }
642
643 static int
644 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
645                              struct addr_location *al __maybe_unused)
646 {
647         /* to avoid calling callback function */
648         iter->he = NULL;
649
650         return 0;
651 }
652
653 static int
654 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
655 {
656         struct branch_info *bi = iter->priv;
657         int i = iter->curr;
658
659         if (bi == NULL)
660                 return 0;
661
662         if (iter->curr >= iter->total)
663                 return 0;
664
665         al->map = bi[i].to.map;
666         al->sym = bi[i].to.sym;
667         al->addr = bi[i].to.addr;
668         return 1;
669 }
670
671 static int
672 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
673 {
674         struct branch_info *bi;
675         struct perf_evsel *evsel = iter->evsel;
676         struct hists *hists = evsel__hists(evsel);
677         struct perf_sample *sample = iter->sample;
678         struct hist_entry *he = NULL;
679         int i = iter->curr;
680         int err = 0;
681
682         bi = iter->priv;
683
684         if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
685                 goto out;
686
687         /*
688          * The report shows the percentage of total branches captured
689          * and not events sampled. Thus we use a pseudo period of 1.
690          */
691         sample->period = 1;
692         sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
693
694         he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
695                                 sample, true);
696         if (he == NULL)
697                 return -ENOMEM;
698
699         hists__inc_nr_samples(hists, he->filtered);
700
701 out:
702         iter->he = he;
703         iter->curr++;
704         return err;
705 }
706
707 static int
708 iter_finish_branch_entry(struct hist_entry_iter *iter,
709                          struct addr_location *al __maybe_unused)
710 {
711         zfree(&iter->priv);
712         iter->he = NULL;
713
714         return iter->curr >= iter->total ? 0 : -1;
715 }
716
717 static int
718 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
719                           struct addr_location *al __maybe_unused)
720 {
721         return 0;
722 }
723
724 static int
725 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
726 {
727         struct perf_evsel *evsel = iter->evsel;
728         struct perf_sample *sample = iter->sample;
729         struct hist_entry *he;
730
731         he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
732                                 sample, true);
733         if (he == NULL)
734                 return -ENOMEM;
735
736         iter->he = he;
737         return 0;
738 }
739
740 static int
741 iter_finish_normal_entry(struct hist_entry_iter *iter,
742                          struct addr_location *al __maybe_unused)
743 {
744         struct hist_entry *he = iter->he;
745         struct perf_evsel *evsel = iter->evsel;
746         struct perf_sample *sample = iter->sample;
747
748         if (he == NULL)
749                 return 0;
750
751         iter->he = NULL;
752
753         hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
754
755         return hist_entry__append_callchain(he, sample);
756 }
757
758 static int
759 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
760                               struct addr_location *al __maybe_unused)
761 {
762         struct hist_entry **he_cache;
763
764         callchain_cursor_commit(&callchain_cursor);
765
766         /*
767          * This is for detecting cycles or recursions so that they're
768          * cumulated only one time to prevent entries more than 100%
769          * overhead.
770          */
771         he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1));
772         if (he_cache == NULL)
773                 return -ENOMEM;
774
775         iter->priv = he_cache;
776         iter->curr = 0;
777
778         return 0;
779 }
780
781 static int
782 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
783                                  struct addr_location *al)
784 {
785         struct perf_evsel *evsel = iter->evsel;
786         struct hists *hists = evsel__hists(evsel);
787         struct perf_sample *sample = iter->sample;
788         struct hist_entry **he_cache = iter->priv;
789         struct hist_entry *he;
790         int err = 0;
791
792         he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
793                                 sample, true);
794         if (he == NULL)
795                 return -ENOMEM;
796
797         iter->he = he;
798         he_cache[iter->curr++] = he;
799
800         hist_entry__append_callchain(he, sample);
801
802         /*
803          * We need to re-initialize the cursor since callchain_append()
804          * advanced the cursor to the end.
805          */
806         callchain_cursor_commit(&callchain_cursor);
807
808         hists__inc_nr_samples(hists, he->filtered);
809
810         return err;
811 }
812
813 static int
814 iter_next_cumulative_entry(struct hist_entry_iter *iter,
815                            struct addr_location *al)
816 {
817         struct callchain_cursor_node *node;
818
819         node = callchain_cursor_current(&callchain_cursor);
820         if (node == NULL)
821                 return 0;
822
823         return fill_callchain_info(al, node, iter->hide_unresolved);
824 }
825
826 static int
827 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
828                                struct addr_location *al)
829 {
830         struct perf_evsel *evsel = iter->evsel;
831         struct perf_sample *sample = iter->sample;
832         struct hist_entry **he_cache = iter->priv;
833         struct hist_entry *he;
834         struct hist_entry he_tmp = {
835                 .hists = evsel__hists(evsel),
836                 .cpu = al->cpu,
837                 .thread = al->thread,
838                 .comm = thread__comm(al->thread),
839                 .ip = al->addr,
840                 .ms = {
841                         .map = al->map,
842                         .sym = al->sym,
843                 },
844                 .parent = iter->parent,
845                 .raw_data = sample->raw_data,
846                 .raw_size = sample->raw_size,
847         };
848         int i;
849         struct callchain_cursor cursor;
850
851         callchain_cursor_snapshot(&cursor, &callchain_cursor);
852
853         callchain_cursor_advance(&callchain_cursor);
854
855         /*
856          * Check if there's duplicate entries in the callchain.
857          * It's possible that it has cycles or recursive calls.
858          */
859         for (i = 0; i < iter->curr; i++) {
860                 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
861                         /* to avoid calling callback function */
862                         iter->he = NULL;
863                         return 0;
864                 }
865         }
866
867         he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
868                                 sample, false);
869         if (he == NULL)
870                 return -ENOMEM;
871
872         iter->he = he;
873         he_cache[iter->curr++] = he;
874
875         if (symbol_conf.use_callchain)
876                 callchain_append(he->callchain, &cursor, sample->period);
877         return 0;
878 }
879
880 static int
881 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
882                              struct addr_location *al __maybe_unused)
883 {
884         zfree(&iter->priv);
885         iter->he = NULL;
886
887         return 0;
888 }
889
890 const struct hist_iter_ops hist_iter_mem = {
891         .prepare_entry          = iter_prepare_mem_entry,
892         .add_single_entry       = iter_add_single_mem_entry,
893         .next_entry             = iter_next_nop_entry,
894         .add_next_entry         = iter_add_next_nop_entry,
895         .finish_entry           = iter_finish_mem_entry,
896 };
897
898 const struct hist_iter_ops hist_iter_branch = {
899         .prepare_entry          = iter_prepare_branch_entry,
900         .add_single_entry       = iter_add_single_branch_entry,
901         .next_entry             = iter_next_branch_entry,
902         .add_next_entry         = iter_add_next_branch_entry,
903         .finish_entry           = iter_finish_branch_entry,
904 };
905
906 const struct hist_iter_ops hist_iter_normal = {
907         .prepare_entry          = iter_prepare_normal_entry,
908         .add_single_entry       = iter_add_single_normal_entry,
909         .next_entry             = iter_next_nop_entry,
910         .add_next_entry         = iter_add_next_nop_entry,
911         .finish_entry           = iter_finish_normal_entry,
912 };
913
914 const struct hist_iter_ops hist_iter_cumulative = {
915         .prepare_entry          = iter_prepare_cumulative_entry,
916         .add_single_entry       = iter_add_single_cumulative_entry,
917         .next_entry             = iter_next_cumulative_entry,
918         .add_next_entry         = iter_add_next_cumulative_entry,
919         .finish_entry           = iter_finish_cumulative_entry,
920 };
921
922 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
923                          int max_stack_depth, void *arg)
924 {
925         int err, err2;
926
927         err = sample__resolve_callchain(iter->sample, &iter->parent,
928                                         iter->evsel, al, max_stack_depth);
929         if (err)
930                 return err;
931
932         iter->max_stack = max_stack_depth;
933
934         err = iter->ops->prepare_entry(iter, al);
935         if (err)
936                 goto out;
937
938         err = iter->ops->add_single_entry(iter, al);
939         if (err)
940                 goto out;
941
942         if (iter->he && iter->add_entry_cb) {
943                 err = iter->add_entry_cb(iter, al, true, arg);
944                 if (err)
945                         goto out;
946         }
947
948         while (iter->ops->next_entry(iter, al)) {
949                 err = iter->ops->add_next_entry(iter, al);
950                 if (err)
951                         break;
952
953                 if (iter->he && iter->add_entry_cb) {
954                         err = iter->add_entry_cb(iter, al, false, arg);
955                         if (err)
956                                 goto out;
957                 }
958         }
959
960 out:
961         err2 = iter->ops->finish_entry(iter, al);
962         if (!err)
963                 err = err2;
964
965         return err;
966 }
967
968 int64_t
969 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
970 {
971         struct hists *hists = left->hists;
972         struct perf_hpp_fmt *fmt;
973         int64_t cmp = 0;
974
975         hists__for_each_sort_list(hists, fmt) {
976                 cmp = fmt->cmp(fmt, left, right);
977                 if (cmp)
978                         break;
979         }
980
981         return cmp;
982 }
983
984 int64_t
985 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
986 {
987         struct hists *hists = left->hists;
988         struct perf_hpp_fmt *fmt;
989         int64_t cmp = 0;
990
991         hists__for_each_sort_list(hists, fmt) {
992                 cmp = fmt->collapse(fmt, left, right);
993                 if (cmp)
994                         break;
995         }
996
997         return cmp;
998 }
999
1000 void hist_entry__delete(struct hist_entry *he)
1001 {
1002         thread__zput(he->thread);
1003         map__zput(he->ms.map);
1004
1005         if (he->branch_info) {
1006                 map__zput(he->branch_info->from.map);
1007                 map__zput(he->branch_info->to.map);
1008                 zfree(&he->branch_info);
1009         }
1010
1011         if (he->mem_info) {
1012                 map__zput(he->mem_info->iaddr.map);
1013                 map__zput(he->mem_info->daddr.map);
1014                 zfree(&he->mem_info);
1015         }
1016
1017         zfree(&he->stat_acc);
1018         free_srcline(he->srcline);
1019         if (he->srcfile && he->srcfile[0])
1020                 free(he->srcfile);
1021         free_callchain(he->callchain);
1022         free(he->trace_output);
1023         free(he->raw_data);
1024         free(he);
1025 }
1026
1027 /*
1028  * If this is not the last column, then we need to pad it according to the
1029  * pre-calculated max lenght for this column, otherwise don't bother adding
1030  * spaces because that would break viewing this with, for instance, 'less',
1031  * that would show tons of trailing spaces when a long C++ demangled method
1032  * names is sampled.
1033 */
1034 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
1035                                    struct perf_hpp_fmt *fmt, int printed)
1036 {
1037         if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
1038                 const int width = fmt->width(fmt, hpp, hists_to_evsel(he->hists));
1039                 if (printed < width) {
1040                         advance_hpp(hpp, printed);
1041                         printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
1042                 }
1043         }
1044
1045         return printed;
1046 }
1047
1048 /*
1049  * collapse the histogram
1050  */
1051
1052 int hists__collapse_insert_entry(struct hists *hists, struct rb_root *root,
1053                                  struct hist_entry *he)
1054 {
1055         struct rb_node **p = &root->rb_node;
1056         struct rb_node *parent = NULL;
1057         struct hist_entry *iter;
1058         int64_t cmp;
1059
1060         while (*p != NULL) {
1061                 parent = *p;
1062                 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1063
1064                 cmp = hist_entry__collapse(iter, he);
1065
1066                 if (!cmp) {
1067                         int ret = 0;
1068
1069                         he_stat__add_stat(&iter->stat, &he->stat);
1070                         if (symbol_conf.cumulate_callchain)
1071                                 he_stat__add_stat(iter->stat_acc, he->stat_acc);
1072
1073                         if (symbol_conf.use_callchain) {
1074                                 callchain_cursor_reset(&callchain_cursor);
1075                                 if (callchain_merge(&callchain_cursor,
1076                                                     iter->callchain,
1077                                                     he->callchain) < 0)
1078                                         ret = -1;
1079                         }
1080                         hist_entry__delete(he);
1081                         return ret;
1082                 }
1083
1084                 if (cmp < 0)
1085                         p = &(*p)->rb_left;
1086                 else
1087                         p = &(*p)->rb_right;
1088         }
1089         hists->nr_entries++;
1090
1091         rb_link_node(&he->rb_node_in, parent, p);
1092         rb_insert_color(&he->rb_node_in, root);
1093         return 1;
1094 }
1095
1096 struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
1097 {
1098         struct rb_root *root;
1099
1100         pthread_mutex_lock(&hists->lock);
1101
1102         root = hists->entries_in;
1103         if (++hists->entries_in > &hists->entries_in_array[1])
1104                 hists->entries_in = &hists->entries_in_array[0];
1105
1106         pthread_mutex_unlock(&hists->lock);
1107
1108         return root;
1109 }
1110
1111 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1112 {
1113         hists__filter_entry_by_dso(hists, he);
1114         hists__filter_entry_by_thread(hists, he);
1115         hists__filter_entry_by_symbol(hists, he);
1116         hists__filter_entry_by_socket(hists, he);
1117 }
1118
1119 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1120 {
1121         struct rb_root *root;
1122         struct rb_node *next;
1123         struct hist_entry *n;
1124         int ret;
1125
1126         if (!sort__need_collapse)
1127                 return 0;
1128
1129         hists->nr_entries = 0;
1130
1131         root = hists__get_rotate_entries_in(hists);
1132
1133         next = rb_first(root);
1134
1135         while (next) {
1136                 if (session_done())
1137                         break;
1138                 n = rb_entry(next, struct hist_entry, rb_node_in);
1139                 next = rb_next(&n->rb_node_in);
1140
1141                 rb_erase(&n->rb_node_in, root);
1142                 ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
1143                 if (ret < 0)
1144                         return -1;
1145
1146                 if (ret) {
1147                         /*
1148                          * If it wasn't combined with one of the entries already
1149                          * collapsed, we need to apply the filters that may have
1150                          * been set by, say, the hist_browser.
1151                          */
1152                         hists__apply_filters(hists, n);
1153                 }
1154                 if (prog)
1155                         ui_progress__update(prog, 1);
1156         }
1157         return 0;
1158 }
1159
1160 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1161 {
1162         struct hists *hists = a->hists;
1163         struct perf_hpp_fmt *fmt;
1164         int64_t cmp = 0;
1165
1166         hists__for_each_sort_list(hists, fmt) {
1167                 if (perf_hpp__should_skip(fmt, a->hists))
1168                         continue;
1169
1170                 cmp = fmt->sort(fmt, a, b);
1171                 if (cmp)
1172                         break;
1173         }
1174
1175         return cmp;
1176 }
1177
1178 static void hists__reset_filter_stats(struct hists *hists)
1179 {
1180         hists->nr_non_filtered_entries = 0;
1181         hists->stats.total_non_filtered_period = 0;
1182 }
1183
1184 void hists__reset_stats(struct hists *hists)
1185 {
1186         hists->nr_entries = 0;
1187         hists->stats.total_period = 0;
1188
1189         hists__reset_filter_stats(hists);
1190 }
1191
1192 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1193 {
1194         hists->nr_non_filtered_entries++;
1195         hists->stats.total_non_filtered_period += h->stat.period;
1196 }
1197
1198 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1199 {
1200         if (!h->filtered)
1201                 hists__inc_filter_stats(hists, h);
1202
1203         hists->nr_entries++;
1204         hists->stats.total_period += h->stat.period;
1205 }
1206
1207 static void __hists__insert_output_entry(struct rb_root *entries,
1208                                          struct hist_entry *he,
1209                                          u64 min_callchain_hits,
1210                                          bool use_callchain)
1211 {
1212         struct rb_node **p = &entries->rb_node;
1213         struct rb_node *parent = NULL;
1214         struct hist_entry *iter;
1215
1216         if (use_callchain) {
1217                 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1218                         u64 total = he->stat.period;
1219
1220                         if (symbol_conf.cumulate_callchain)
1221                                 total = he->stat_acc->period;
1222
1223                         min_callchain_hits = total * (callchain_param.min_percent / 100);
1224                 }
1225                 callchain_param.sort(&he->sorted_chain, he->callchain,
1226                                       min_callchain_hits, &callchain_param);
1227         }
1228
1229         while (*p != NULL) {
1230                 parent = *p;
1231                 iter = rb_entry(parent, struct hist_entry, rb_node);
1232
1233                 if (hist_entry__sort(he, iter) > 0)
1234                         p = &(*p)->rb_left;
1235                 else
1236                         p = &(*p)->rb_right;
1237         }
1238
1239         rb_link_node(&he->rb_node, parent, p);
1240         rb_insert_color(&he->rb_node, entries);
1241 }
1242
1243 static void output_resort(struct hists *hists, struct ui_progress *prog,
1244                           bool use_callchain)
1245 {
1246         struct rb_root *root;
1247         struct rb_node *next;
1248         struct hist_entry *n;
1249         u64 callchain_total;
1250         u64 min_callchain_hits;
1251
1252         callchain_total = hists->callchain_period;
1253         if (symbol_conf.filter_relative)
1254                 callchain_total = hists->callchain_non_filtered_period;
1255
1256         min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
1257
1258         if (sort__need_collapse)
1259                 root = &hists->entries_collapsed;
1260         else
1261                 root = hists->entries_in;
1262
1263         next = rb_first(root);
1264         hists->entries = RB_ROOT;
1265
1266         hists__reset_stats(hists);
1267         hists__reset_col_len(hists);
1268
1269         while (next) {
1270                 n = rb_entry(next, struct hist_entry, rb_node_in);
1271                 next = rb_next(&n->rb_node_in);
1272
1273                 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1274                 hists__inc_stats(hists, n);
1275
1276                 if (!n->filtered)
1277                         hists__calc_col_len(hists, n);
1278
1279                 if (prog)
1280                         ui_progress__update(prog, 1);
1281         }
1282 }
1283
1284 void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog)
1285 {
1286         bool use_callchain;
1287
1288         if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1289                 use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
1290         else
1291                 use_callchain = symbol_conf.use_callchain;
1292
1293         output_resort(evsel__hists(evsel), prog, use_callchain);
1294 }
1295
1296 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1297 {
1298         output_resort(hists, prog, symbol_conf.use_callchain);
1299 }
1300
1301 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1302                                        enum hist_filter filter)
1303 {
1304         h->filtered &= ~(1 << filter);
1305         if (h->filtered)
1306                 return;
1307
1308         /* force fold unfiltered entry for simplicity */
1309         h->unfolded = false;
1310         h->row_offset = 0;
1311         h->nr_rows = 0;
1312
1313         hists->stats.nr_non_filtered_samples += h->stat.nr_events;
1314
1315         hists__inc_filter_stats(hists, h);
1316         hists__calc_col_len(hists, h);
1317 }
1318
1319
1320 static bool hists__filter_entry_by_dso(struct hists *hists,
1321                                        struct hist_entry *he)
1322 {
1323         if (hists->dso_filter != NULL &&
1324             (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1325                 he->filtered |= (1 << HIST_FILTER__DSO);
1326                 return true;
1327         }
1328
1329         return false;
1330 }
1331
1332 static bool hists__filter_entry_by_thread(struct hists *hists,
1333                                           struct hist_entry *he)
1334 {
1335         if (hists->thread_filter != NULL &&
1336             he->thread != hists->thread_filter) {
1337                 he->filtered |= (1 << HIST_FILTER__THREAD);
1338                 return true;
1339         }
1340
1341         return false;
1342 }
1343
1344 static bool hists__filter_entry_by_symbol(struct hists *hists,
1345                                           struct hist_entry *he)
1346 {
1347         if (hists->symbol_filter_str != NULL &&
1348             (!he->ms.sym || strstr(he->ms.sym->name,
1349                                    hists->symbol_filter_str) == NULL)) {
1350                 he->filtered |= (1 << HIST_FILTER__SYMBOL);
1351                 return true;
1352         }
1353
1354         return false;
1355 }
1356
1357 static bool hists__filter_entry_by_socket(struct hists *hists,
1358                                           struct hist_entry *he)
1359 {
1360         if ((hists->socket_filter > -1) &&
1361             (he->socket != hists->socket_filter)) {
1362                 he->filtered |= (1 << HIST_FILTER__SOCKET);
1363                 return true;
1364         }
1365
1366         return false;
1367 }
1368
1369 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
1370
1371 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
1372 {
1373         struct rb_node *nd;
1374
1375         hists->stats.nr_non_filtered_samples = 0;
1376
1377         hists__reset_filter_stats(hists);
1378         hists__reset_col_len(hists);
1379
1380         for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1381                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1382
1383                 if (filter(hists, h))
1384                         continue;
1385
1386                 hists__remove_entry_filter(hists, h, type);
1387         }
1388 }
1389
1390 void hists__filter_by_thread(struct hists *hists)
1391 {
1392         hists__filter_by_type(hists, HIST_FILTER__THREAD,
1393                               hists__filter_entry_by_thread);
1394 }
1395
1396 void hists__filter_by_dso(struct hists *hists)
1397 {
1398         hists__filter_by_type(hists, HIST_FILTER__DSO,
1399                               hists__filter_entry_by_dso);
1400 }
1401
1402 void hists__filter_by_symbol(struct hists *hists)
1403 {
1404         hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
1405                               hists__filter_entry_by_symbol);
1406 }
1407
1408 void hists__filter_by_socket(struct hists *hists)
1409 {
1410         hists__filter_by_type(hists, HIST_FILTER__SOCKET,
1411                               hists__filter_entry_by_socket);
1412 }
1413
1414 void events_stats__inc(struct events_stats *stats, u32 type)
1415 {
1416         ++stats->nr_events[0];
1417         ++stats->nr_events[type];
1418 }
1419
1420 void hists__inc_nr_events(struct hists *hists, u32 type)
1421 {
1422         events_stats__inc(&hists->stats, type);
1423 }
1424
1425 void hists__inc_nr_samples(struct hists *hists, bool filtered)
1426 {
1427         events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
1428         if (!filtered)
1429                 hists->stats.nr_non_filtered_samples++;
1430 }
1431
1432 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
1433                                                  struct hist_entry *pair)
1434 {
1435         struct rb_root *root;
1436         struct rb_node **p;
1437         struct rb_node *parent = NULL;
1438         struct hist_entry *he;
1439         int64_t cmp;
1440
1441         if (sort__need_collapse)
1442                 root = &hists->entries_collapsed;
1443         else
1444                 root = hists->entries_in;
1445
1446         p = &root->rb_node;
1447
1448         while (*p != NULL) {
1449                 parent = *p;
1450                 he = rb_entry(parent, struct hist_entry, rb_node_in);
1451
1452                 cmp = hist_entry__collapse(he, pair);
1453
1454                 if (!cmp)
1455                         goto out;
1456
1457                 if (cmp < 0)
1458                         p = &(*p)->rb_left;
1459                 else
1460                         p = &(*p)->rb_right;
1461         }
1462
1463         he = hist_entry__new(pair, true);
1464         if (he) {
1465                 memset(&he->stat, 0, sizeof(he->stat));
1466                 he->hists = hists;
1467                 rb_link_node(&he->rb_node_in, parent, p);
1468                 rb_insert_color(&he->rb_node_in, root);
1469                 hists__inc_stats(hists, he);
1470                 he->dummy = true;
1471         }
1472 out:
1473         return he;
1474 }
1475
1476 static struct hist_entry *hists__find_entry(struct hists *hists,
1477                                             struct hist_entry *he)
1478 {
1479         struct rb_node *n;
1480
1481         if (sort__need_collapse)
1482                 n = hists->entries_collapsed.rb_node;
1483         else
1484                 n = hists->entries_in->rb_node;
1485
1486         while (n) {
1487                 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
1488                 int64_t cmp = hist_entry__collapse(iter, he);
1489
1490                 if (cmp < 0)
1491                         n = n->rb_left;
1492                 else if (cmp > 0)
1493                         n = n->rb_right;
1494                 else
1495                         return iter;
1496         }
1497
1498         return NULL;
1499 }
1500
1501 /*
1502  * Look for pairs to link to the leader buckets (hist_entries):
1503  */
1504 void hists__match(struct hists *leader, struct hists *other)
1505 {
1506         struct rb_root *root;
1507         struct rb_node *nd;
1508         struct hist_entry *pos, *pair;
1509
1510         if (sort__need_collapse)
1511                 root = &leader->entries_collapsed;
1512         else
1513                 root = leader->entries_in;
1514
1515         for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1516                 pos  = rb_entry(nd, struct hist_entry, rb_node_in);
1517                 pair = hists__find_entry(other, pos);
1518
1519                 if (pair)
1520                         hist_entry__add_pair(pair, pos);
1521         }
1522 }
1523
1524 /*
1525  * Look for entries in the other hists that are not present in the leader, if
1526  * we find them, just add a dummy entry on the leader hists, with period=0,
1527  * nr_events=0, to serve as the list header.
1528  */
1529 int hists__link(struct hists *leader, struct hists *other)
1530 {
1531         struct rb_root *root;
1532         struct rb_node *nd;
1533         struct hist_entry *pos, *pair;
1534
1535         if (sort__need_collapse)
1536                 root = &other->entries_collapsed;
1537         else
1538                 root = other->entries_in;
1539
1540         for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1541                 pos = rb_entry(nd, struct hist_entry, rb_node_in);
1542
1543                 if (!hist_entry__has_pairs(pos)) {
1544                         pair = hists__add_dummy_entry(leader, pos);
1545                         if (pair == NULL)
1546                                 return -1;
1547                         hist_entry__add_pair(pos, pair);
1548                 }
1549         }
1550
1551         return 0;
1552 }
1553
1554 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
1555                           struct perf_sample *sample, bool nonany_branch_mode)
1556 {
1557         struct branch_info *bi;
1558
1559         /* If we have branch cycles always annotate them. */
1560         if (bs && bs->nr && bs->entries[0].flags.cycles) {
1561                 int i;
1562
1563                 bi = sample__resolve_bstack(sample, al);
1564                 if (bi) {
1565                         struct addr_map_symbol *prev = NULL;
1566
1567                         /*
1568                          * Ignore errors, still want to process the
1569                          * other entries.
1570                          *
1571                          * For non standard branch modes always
1572                          * force no IPC (prev == NULL)
1573                          *
1574                          * Note that perf stores branches reversed from
1575                          * program order!
1576                          */
1577                         for (i = bs->nr - 1; i >= 0; i--) {
1578                                 addr_map_symbol__account_cycles(&bi[i].from,
1579                                         nonany_branch_mode ? NULL : prev,
1580                                         bi[i].flags.cycles);
1581                                 prev = &bi[i].to;
1582                         }
1583                         free(bi);
1584                 }
1585         }
1586 }
1587
1588 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
1589 {
1590         struct perf_evsel *pos;
1591         size_t ret = 0;
1592
1593         evlist__for_each(evlist, pos) {
1594                 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1595                 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
1596         }
1597
1598         return ret;
1599 }
1600
1601
1602 u64 hists__total_period(struct hists *hists)
1603 {
1604         return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
1605                 hists->stats.total_period;
1606 }
1607
1608 int parse_filter_percentage(const struct option *opt __maybe_unused,
1609                             const char *arg, int unset __maybe_unused)
1610 {
1611         if (!strcmp(arg, "relative"))
1612                 symbol_conf.filter_relative = true;
1613         else if (!strcmp(arg, "absolute"))
1614                 symbol_conf.filter_relative = false;
1615         else
1616                 return -1;
1617
1618         return 0;
1619 }
1620
1621 int perf_hist_config(const char *var, const char *value)
1622 {
1623         if (!strcmp(var, "hist.percentage"))
1624                 return parse_filter_percentage(NULL, value, 0);
1625
1626         return 0;
1627 }
1628
1629 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
1630 {
1631         memset(hists, 0, sizeof(*hists));
1632         hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
1633         hists->entries_in = &hists->entries_in_array[0];
1634         hists->entries_collapsed = RB_ROOT;
1635         hists->entries = RB_ROOT;
1636         pthread_mutex_init(&hists->lock, NULL);
1637         hists->socket_filter = -1;
1638         hists->hpp_list = hpp_list;
1639         return 0;
1640 }
1641
1642 static void hists__delete_remaining_entries(struct rb_root *root)
1643 {
1644         struct rb_node *node;
1645         struct hist_entry *he;
1646
1647         while (!RB_EMPTY_ROOT(root)) {
1648                 node = rb_first(root);
1649                 rb_erase(node, root);
1650
1651                 he = rb_entry(node, struct hist_entry, rb_node_in);
1652                 hist_entry__delete(he);
1653         }
1654 }
1655
1656 static void hists__delete_all_entries(struct hists *hists)
1657 {
1658         hists__delete_entries(hists);
1659         hists__delete_remaining_entries(&hists->entries_in_array[0]);
1660         hists__delete_remaining_entries(&hists->entries_in_array[1]);
1661         hists__delete_remaining_entries(&hists->entries_collapsed);
1662 }
1663
1664 static void hists_evsel__exit(struct perf_evsel *evsel)
1665 {
1666         struct hists *hists = evsel__hists(evsel);
1667
1668         hists__delete_all_entries(hists);
1669 }
1670
1671 static int hists_evsel__init(struct perf_evsel *evsel)
1672 {
1673         struct hists *hists = evsel__hists(evsel);
1674
1675         __hists__init(hists, &perf_hpp_list);
1676         return 0;
1677 }
1678
1679 /*
1680  * XXX We probably need a hists_evsel__exit() to free the hist_entries
1681  * stored in the rbtree...
1682  */
1683
1684 int hists__init(void)
1685 {
1686         int err = perf_evsel__object_config(sizeof(struct hists_evsel),
1687                                             hists_evsel__init,
1688                                             hists_evsel__exit);
1689         if (err)
1690                 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
1691
1692         return err;
1693 }
1694
1695 void perf_hpp_list__init(struct perf_hpp_list *list)
1696 {
1697         INIT_LIST_HEAD(&list->fields);
1698         INIT_LIST_HEAD(&list->sorts);
1699 }