Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git...
[cascardo/linux.git] / tools / perf / util / hist.c
1 #include "util.h"
2 #include "build-id.h"
3 #include "hist.h"
4 #include "session.h"
5 #include "sort.h"
6 #include "evlist.h"
7 #include "evsel.h"
8 #include "annotate.h"
9 #include "ui/progress.h"
10 #include <math.h>
11
12 static bool hists__filter_entry_by_dso(struct hists *hists,
13                                        struct hist_entry *he);
14 static bool hists__filter_entry_by_thread(struct hists *hists,
15                                           struct hist_entry *he);
16 static bool hists__filter_entry_by_symbol(struct hists *hists,
17                                           struct hist_entry *he);
18 static bool hists__filter_entry_by_socket(struct hists *hists,
19                                           struct hist_entry *he);
20
21 u16 hists__col_len(struct hists *hists, enum hist_column col)
22 {
23         return hists->col_len[col];
24 }
25
26 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
27 {
28         hists->col_len[col] = len;
29 }
30
31 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
32 {
33         if (len > hists__col_len(hists, col)) {
34                 hists__set_col_len(hists, col, len);
35                 return true;
36         }
37         return false;
38 }
39
40 void hists__reset_col_len(struct hists *hists)
41 {
42         enum hist_column col;
43
44         for (col = 0; col < HISTC_NR_COLS; ++col)
45                 hists__set_col_len(hists, col, 0);
46 }
47
48 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
49 {
50         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
51
52         if (hists__col_len(hists, dso) < unresolved_col_width &&
53             !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
54             !symbol_conf.dso_list)
55                 hists__set_col_len(hists, dso, unresolved_col_width);
56 }
57
58 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
59 {
60         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
61         int symlen;
62         u16 len;
63
64         /*
65          * +4 accounts for '[x] ' priv level info
66          * +2 accounts for 0x prefix on raw addresses
67          * +3 accounts for ' y ' symtab origin info
68          */
69         if (h->ms.sym) {
70                 symlen = h->ms.sym->namelen + 4;
71                 if (verbose)
72                         symlen += BITS_PER_LONG / 4 + 2 + 3;
73                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
74         } else {
75                 symlen = unresolved_col_width + 4 + 2;
76                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
77                 hists__set_unres_dso_col_len(hists, HISTC_DSO);
78         }
79
80         len = thread__comm_len(h->thread);
81         if (hists__new_col_len(hists, HISTC_COMM, len))
82                 hists__set_col_len(hists, HISTC_THREAD, len + 6);
83
84         if (h->ms.map) {
85                 len = dso__name_len(h->ms.map->dso);
86                 hists__new_col_len(hists, HISTC_DSO, len);
87         }
88
89         if (h->parent)
90                 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
91
92         if (h->branch_info) {
93                 if (h->branch_info->from.sym) {
94                         symlen = (int)h->branch_info->from.sym->namelen + 4;
95                         if (verbose)
96                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
97                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
98
99                         symlen = dso__name_len(h->branch_info->from.map->dso);
100                         hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
101                 } else {
102                         symlen = unresolved_col_width + 4 + 2;
103                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
104                         hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
105                 }
106
107                 if (h->branch_info->to.sym) {
108                         symlen = (int)h->branch_info->to.sym->namelen + 4;
109                         if (verbose)
110                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
111                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
112
113                         symlen = dso__name_len(h->branch_info->to.map->dso);
114                         hists__new_col_len(hists, HISTC_DSO_TO, symlen);
115                 } else {
116                         symlen = unresolved_col_width + 4 + 2;
117                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
118                         hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
119                 }
120         }
121
122         if (h->mem_info) {
123                 if (h->mem_info->daddr.sym) {
124                         symlen = (int)h->mem_info->daddr.sym->namelen + 4
125                                + unresolved_col_width + 2;
126                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
127                                            symlen);
128                         hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
129                                            symlen + 1);
130                 } else {
131                         symlen = unresolved_col_width + 4 + 2;
132                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
133                                            symlen);
134                         hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
135                                            symlen);
136                 }
137
138                 if (h->mem_info->iaddr.sym) {
139                         symlen = (int)h->mem_info->iaddr.sym->namelen + 4
140                                + unresolved_col_width + 2;
141                         hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
142                                            symlen);
143                 } else {
144                         symlen = unresolved_col_width + 4 + 2;
145                         hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
146                                            symlen);
147                 }
148
149                 if (h->mem_info->daddr.map) {
150                         symlen = dso__name_len(h->mem_info->daddr.map->dso);
151                         hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
152                                            symlen);
153                 } else {
154                         symlen = unresolved_col_width + 4 + 2;
155                         hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
156                 }
157         } else {
158                 symlen = unresolved_col_width + 4 + 2;
159                 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
160                 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
161                 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
162         }
163
164         hists__new_col_len(hists, HISTC_CPU, 3);
165         hists__new_col_len(hists, HISTC_SOCKET, 6);
166         hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
167         hists__new_col_len(hists, HISTC_MEM_TLB, 22);
168         hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
169         hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
170         hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
171         hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
172
173         if (h->srcline)
174                 hists__new_col_len(hists, HISTC_SRCLINE, strlen(h->srcline));
175
176         if (h->srcfile)
177                 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
178
179         if (h->transaction)
180                 hists__new_col_len(hists, HISTC_TRANSACTION,
181                                    hist_entry__transaction_len());
182 }
183
184 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
185 {
186         struct rb_node *next = rb_first(&hists->entries);
187         struct hist_entry *n;
188         int row = 0;
189
190         hists__reset_col_len(hists);
191
192         while (next && row++ < max_rows) {
193                 n = rb_entry(next, struct hist_entry, rb_node);
194                 if (!n->filtered)
195                         hists__calc_col_len(hists, n);
196                 next = rb_next(&n->rb_node);
197         }
198 }
199
200 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
201                                         unsigned int cpumode, u64 period)
202 {
203         switch (cpumode) {
204         case PERF_RECORD_MISC_KERNEL:
205                 he_stat->period_sys += period;
206                 break;
207         case PERF_RECORD_MISC_USER:
208                 he_stat->period_us += period;
209                 break;
210         case PERF_RECORD_MISC_GUEST_KERNEL:
211                 he_stat->period_guest_sys += period;
212                 break;
213         case PERF_RECORD_MISC_GUEST_USER:
214                 he_stat->period_guest_us += period;
215                 break;
216         default:
217                 break;
218         }
219 }
220
221 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
222                                 u64 weight)
223 {
224
225         he_stat->period         += period;
226         he_stat->weight         += weight;
227         he_stat->nr_events      += 1;
228 }
229
230 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
231 {
232         dest->period            += src->period;
233         dest->period_sys        += src->period_sys;
234         dest->period_us         += src->period_us;
235         dest->period_guest_sys  += src->period_guest_sys;
236         dest->period_guest_us   += src->period_guest_us;
237         dest->nr_events         += src->nr_events;
238         dest->weight            += src->weight;
239 }
240
241 static void he_stat__decay(struct he_stat *he_stat)
242 {
243         he_stat->period = (he_stat->period * 7) / 8;
244         he_stat->nr_events = (he_stat->nr_events * 7) / 8;
245         /* XXX need decay for weight too? */
246 }
247
248 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
249 {
250         u64 prev_period = he->stat.period;
251         u64 diff;
252
253         if (prev_period == 0)
254                 return true;
255
256         he_stat__decay(&he->stat);
257         if (symbol_conf.cumulate_callchain)
258                 he_stat__decay(he->stat_acc);
259         decay_callchain(he->callchain);
260
261         diff = prev_period - he->stat.period;
262
263         hists->stats.total_period -= diff;
264         if (!he->filtered)
265                 hists->stats.total_non_filtered_period -= diff;
266
267         return he->stat.period == 0;
268 }
269
270 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
271 {
272         rb_erase(&he->rb_node, &hists->entries);
273
274         if (sort__need_collapse)
275                 rb_erase(&he->rb_node_in, &hists->entries_collapsed);
276         else
277                 rb_erase(&he->rb_node_in, hists->entries_in);
278
279         --hists->nr_entries;
280         if (!he->filtered)
281                 --hists->nr_non_filtered_entries;
282
283         hist_entry__delete(he);
284 }
285
286 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
287 {
288         struct rb_node *next = rb_first(&hists->entries);
289         struct hist_entry *n;
290
291         while (next) {
292                 n = rb_entry(next, struct hist_entry, rb_node);
293                 next = rb_next(&n->rb_node);
294                 if (((zap_user && n->level == '.') ||
295                      (zap_kernel && n->level != '.') ||
296                      hists__decay_entry(hists, n))) {
297                         hists__delete_entry(hists, n);
298                 }
299         }
300 }
301
302 void hists__delete_entries(struct hists *hists)
303 {
304         struct rb_node *next = rb_first(&hists->entries);
305         struct hist_entry *n;
306
307         while (next) {
308                 n = rb_entry(next, struct hist_entry, rb_node);
309                 next = rb_next(&n->rb_node);
310
311                 hists__delete_entry(hists, n);
312         }
313 }
314
315 /*
316  * histogram, sorted on item, collects periods
317  */
318
319 static struct hist_entry *hist_entry__new(struct hist_entry *template,
320                                           bool sample_self)
321 {
322         size_t callchain_size = 0;
323         struct hist_entry *he;
324
325         if (symbol_conf.use_callchain)
326                 callchain_size = sizeof(struct callchain_root);
327
328         he = zalloc(sizeof(*he) + callchain_size);
329
330         if (he != NULL) {
331                 *he = *template;
332
333                 if (symbol_conf.cumulate_callchain) {
334                         he->stat_acc = malloc(sizeof(he->stat));
335                         if (he->stat_acc == NULL) {
336                                 free(he);
337                                 return NULL;
338                         }
339                         memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
340                         if (!sample_self)
341                                 memset(&he->stat, 0, sizeof(he->stat));
342                 }
343
344                 map__get(he->ms.map);
345
346                 if (he->branch_info) {
347                         /*
348                          * This branch info is (a part of) allocated from
349                          * sample__resolve_bstack() and will be freed after
350                          * adding new entries.  So we need to save a copy.
351                          */
352                         he->branch_info = malloc(sizeof(*he->branch_info));
353                         if (he->branch_info == NULL) {
354                                 map__zput(he->ms.map);
355                                 free(he->stat_acc);
356                                 free(he);
357                                 return NULL;
358                         }
359
360                         memcpy(he->branch_info, template->branch_info,
361                                sizeof(*he->branch_info));
362
363                         map__get(he->branch_info->from.map);
364                         map__get(he->branch_info->to.map);
365                 }
366
367                 if (he->mem_info) {
368                         map__get(he->mem_info->iaddr.map);
369                         map__get(he->mem_info->daddr.map);
370                 }
371
372                 if (symbol_conf.use_callchain)
373                         callchain_init(he->callchain);
374
375                 if (he->raw_data) {
376                         he->raw_data = memdup(he->raw_data, he->raw_size);
377
378                         if (he->raw_data == NULL) {
379                                 map__put(he->ms.map);
380                                 if (he->branch_info) {
381                                         map__put(he->branch_info->from.map);
382                                         map__put(he->branch_info->to.map);
383                                         free(he->branch_info);
384                                 }
385                                 if (he->mem_info) {
386                                         map__put(he->mem_info->iaddr.map);
387                                         map__put(he->mem_info->daddr.map);
388                                 }
389                                 free(he->stat_acc);
390                                 free(he);
391                                 return NULL;
392                         }
393                 }
394                 INIT_LIST_HEAD(&he->pairs.node);
395                 thread__get(he->thread);
396         }
397
398         return he;
399 }
400
401 static u8 symbol__parent_filter(const struct symbol *parent)
402 {
403         if (symbol_conf.exclude_other && parent == NULL)
404                 return 1 << HIST_FILTER__PARENT;
405         return 0;
406 }
407
408 static struct hist_entry *hists__findnew_entry(struct hists *hists,
409                                                struct hist_entry *entry,
410                                                struct addr_location *al,
411                                                bool sample_self)
412 {
413         struct rb_node **p;
414         struct rb_node *parent = NULL;
415         struct hist_entry *he;
416         int64_t cmp;
417         u64 period = entry->stat.period;
418         u64 weight = entry->stat.weight;
419
420         p = &hists->entries_in->rb_node;
421
422         while (*p != NULL) {
423                 parent = *p;
424                 he = rb_entry(parent, struct hist_entry, rb_node_in);
425
426                 /*
427                  * Make sure that it receives arguments in a same order as
428                  * hist_entry__collapse() so that we can use an appropriate
429                  * function when searching an entry regardless which sort
430                  * keys were used.
431                  */
432                 cmp = hist_entry__cmp(he, entry);
433
434                 if (!cmp) {
435                         if (sample_self) {
436                                 he_stat__add_period(&he->stat, period, weight);
437                                 hists->stats.total_period += period;
438                                 if (!he->filtered)
439                                         hists->stats.total_non_filtered_period += period;
440                         }
441                         if (symbol_conf.cumulate_callchain)
442                                 he_stat__add_period(he->stat_acc, period, weight);
443
444                         /*
445                          * This mem info was allocated from sample__resolve_mem
446                          * and will not be used anymore.
447                          */
448                         zfree(&entry->mem_info);
449
450                         /* If the map of an existing hist_entry has
451                          * become out-of-date due to an exec() or
452                          * similar, update it.  Otherwise we will
453                          * mis-adjust symbol addresses when computing
454                          * the history counter to increment.
455                          */
456                         if (he->ms.map != entry->ms.map) {
457                                 map__put(he->ms.map);
458                                 he->ms.map = map__get(entry->ms.map);
459                         }
460                         goto out;
461                 }
462
463                 if (cmp < 0)
464                         p = &(*p)->rb_left;
465                 else
466                         p = &(*p)->rb_right;
467         }
468
469         he = hist_entry__new(entry, sample_self);
470         if (!he)
471                 return NULL;
472
473         if (sample_self)
474                 hists__inc_stats(hists, he);
475         else
476                 hists->nr_entries++;
477
478         rb_link_node(&he->rb_node_in, parent, p);
479         rb_insert_color(&he->rb_node_in, hists->entries_in);
480 out:
481         if (sample_self)
482                 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
483         if (symbol_conf.cumulate_callchain)
484                 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
485         return he;
486 }
487
488 struct hist_entry *__hists__add_entry(struct hists *hists,
489                                       struct addr_location *al,
490                                       struct symbol *sym_parent,
491                                       struct branch_info *bi,
492                                       struct mem_info *mi,
493                                       struct perf_sample *sample,
494                                       bool sample_self)
495 {
496         struct hist_entry entry = {
497                 .thread = al->thread,
498                 .comm = thread__comm(al->thread),
499                 .ms = {
500                         .map    = al->map,
501                         .sym    = al->sym,
502                 },
503                 .socket  = al->socket,
504                 .cpu     = al->cpu,
505                 .cpumode = al->cpumode,
506                 .ip      = al->addr,
507                 .level   = al->level,
508                 .stat = {
509                         .nr_events = 1,
510                         .period = sample->period,
511                         .weight = sample->weight,
512                 },
513                 .parent = sym_parent,
514                 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
515                 .hists  = hists,
516                 .branch_info = bi,
517                 .mem_info = mi,
518                 .transaction = sample->transaction,
519                 .raw_data = sample->raw_data,
520                 .raw_size = sample->raw_size,
521         };
522
523         return hists__findnew_entry(hists, &entry, al, sample_self);
524 }
525
526 static int
527 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
528                     struct addr_location *al __maybe_unused)
529 {
530         return 0;
531 }
532
533 static int
534 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
535                         struct addr_location *al __maybe_unused)
536 {
537         return 0;
538 }
539
540 static int
541 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
542 {
543         struct perf_sample *sample = iter->sample;
544         struct mem_info *mi;
545
546         mi = sample__resolve_mem(sample, al);
547         if (mi == NULL)
548                 return -ENOMEM;
549
550         iter->priv = mi;
551         return 0;
552 }
553
554 static int
555 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
556 {
557         u64 cost;
558         struct mem_info *mi = iter->priv;
559         struct hists *hists = evsel__hists(iter->evsel);
560         struct perf_sample *sample = iter->sample;
561         struct hist_entry *he;
562
563         if (mi == NULL)
564                 return -EINVAL;
565
566         cost = sample->weight;
567         if (!cost)
568                 cost = 1;
569
570         /*
571          * must pass period=weight in order to get the correct
572          * sorting from hists__collapse_resort() which is solely
573          * based on periods. We want sorting be done on nr_events * weight
574          * and this is indirectly achieved by passing period=weight here
575          * and the he_stat__add_period() function.
576          */
577         sample->period = cost;
578
579         he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
580                                 sample, true);
581         if (!he)
582                 return -ENOMEM;
583
584         iter->he = he;
585         return 0;
586 }
587
588 static int
589 iter_finish_mem_entry(struct hist_entry_iter *iter,
590                       struct addr_location *al __maybe_unused)
591 {
592         struct perf_evsel *evsel = iter->evsel;
593         struct hists *hists = evsel__hists(evsel);
594         struct hist_entry *he = iter->he;
595         int err = -EINVAL;
596
597         if (he == NULL)
598                 goto out;
599
600         hists__inc_nr_samples(hists, he->filtered);
601
602         err = hist_entry__append_callchain(he, iter->sample);
603
604 out:
605         /*
606          * We don't need to free iter->priv (mem_info) here since the mem info
607          * was either already freed in hists__findnew_entry() or passed to a
608          * new hist entry by hist_entry__new().
609          */
610         iter->priv = NULL;
611
612         iter->he = NULL;
613         return err;
614 }
615
616 static int
617 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
618 {
619         struct branch_info *bi;
620         struct perf_sample *sample = iter->sample;
621
622         bi = sample__resolve_bstack(sample, al);
623         if (!bi)
624                 return -ENOMEM;
625
626         iter->curr = 0;
627         iter->total = sample->branch_stack->nr;
628
629         iter->priv = bi;
630         return 0;
631 }
632
633 static int
634 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
635                              struct addr_location *al __maybe_unused)
636 {
637         /* to avoid calling callback function */
638         iter->he = NULL;
639
640         return 0;
641 }
642
643 static int
644 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
645 {
646         struct branch_info *bi = iter->priv;
647         int i = iter->curr;
648
649         if (bi == NULL)
650                 return 0;
651
652         if (iter->curr >= iter->total)
653                 return 0;
654
655         al->map = bi[i].to.map;
656         al->sym = bi[i].to.sym;
657         al->addr = bi[i].to.addr;
658         return 1;
659 }
660
661 static int
662 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
663 {
664         struct branch_info *bi;
665         struct perf_evsel *evsel = iter->evsel;
666         struct hists *hists = evsel__hists(evsel);
667         struct perf_sample *sample = iter->sample;
668         struct hist_entry *he = NULL;
669         int i = iter->curr;
670         int err = 0;
671
672         bi = iter->priv;
673
674         if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
675                 goto out;
676
677         /*
678          * The report shows the percentage of total branches captured
679          * and not events sampled. Thus we use a pseudo period of 1.
680          */
681         sample->period = 1;
682         sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
683
684         he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
685                                 sample, true);
686         if (he == NULL)
687                 return -ENOMEM;
688
689         hists__inc_nr_samples(hists, he->filtered);
690
691 out:
692         iter->he = he;
693         iter->curr++;
694         return err;
695 }
696
697 static int
698 iter_finish_branch_entry(struct hist_entry_iter *iter,
699                          struct addr_location *al __maybe_unused)
700 {
701         zfree(&iter->priv);
702         iter->he = NULL;
703
704         return iter->curr >= iter->total ? 0 : -1;
705 }
706
707 static int
708 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
709                           struct addr_location *al __maybe_unused)
710 {
711         return 0;
712 }
713
714 static int
715 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
716 {
717         struct perf_evsel *evsel = iter->evsel;
718         struct perf_sample *sample = iter->sample;
719         struct hist_entry *he;
720
721         he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
722                                 sample, true);
723         if (he == NULL)
724                 return -ENOMEM;
725
726         iter->he = he;
727         return 0;
728 }
729
730 static int
731 iter_finish_normal_entry(struct hist_entry_iter *iter,
732                          struct addr_location *al __maybe_unused)
733 {
734         struct hist_entry *he = iter->he;
735         struct perf_evsel *evsel = iter->evsel;
736         struct perf_sample *sample = iter->sample;
737
738         if (he == NULL)
739                 return 0;
740
741         iter->he = NULL;
742
743         hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
744
745         return hist_entry__append_callchain(he, sample);
746 }
747
748 static int
749 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
750                               struct addr_location *al __maybe_unused)
751 {
752         struct hist_entry **he_cache;
753
754         callchain_cursor_commit(&callchain_cursor);
755
756         /*
757          * This is for detecting cycles or recursions so that they're
758          * cumulated only one time to prevent entries more than 100%
759          * overhead.
760          */
761         he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1));
762         if (he_cache == NULL)
763                 return -ENOMEM;
764
765         iter->priv = he_cache;
766         iter->curr = 0;
767
768         return 0;
769 }
770
771 static int
772 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
773                                  struct addr_location *al)
774 {
775         struct perf_evsel *evsel = iter->evsel;
776         struct hists *hists = evsel__hists(evsel);
777         struct perf_sample *sample = iter->sample;
778         struct hist_entry **he_cache = iter->priv;
779         struct hist_entry *he;
780         int err = 0;
781
782         he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
783                                 sample, true);
784         if (he == NULL)
785                 return -ENOMEM;
786
787         iter->he = he;
788         he_cache[iter->curr++] = he;
789
790         hist_entry__append_callchain(he, sample);
791
792         /*
793          * We need to re-initialize the cursor since callchain_append()
794          * advanced the cursor to the end.
795          */
796         callchain_cursor_commit(&callchain_cursor);
797
798         hists__inc_nr_samples(hists, he->filtered);
799
800         return err;
801 }
802
803 static int
804 iter_next_cumulative_entry(struct hist_entry_iter *iter,
805                            struct addr_location *al)
806 {
807         struct callchain_cursor_node *node;
808
809         node = callchain_cursor_current(&callchain_cursor);
810         if (node == NULL)
811                 return 0;
812
813         return fill_callchain_info(al, node, iter->hide_unresolved);
814 }
815
816 static int
817 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
818                                struct addr_location *al)
819 {
820         struct perf_evsel *evsel = iter->evsel;
821         struct perf_sample *sample = iter->sample;
822         struct hist_entry **he_cache = iter->priv;
823         struct hist_entry *he;
824         struct hist_entry he_tmp = {
825                 .hists = evsel__hists(evsel),
826                 .cpu = al->cpu,
827                 .thread = al->thread,
828                 .comm = thread__comm(al->thread),
829                 .ip = al->addr,
830                 .ms = {
831                         .map = al->map,
832                         .sym = al->sym,
833                 },
834                 .parent = iter->parent,
835                 .raw_data = sample->raw_data,
836                 .raw_size = sample->raw_size,
837         };
838         int i;
839         struct callchain_cursor cursor;
840
841         callchain_cursor_snapshot(&cursor, &callchain_cursor);
842
843         callchain_cursor_advance(&callchain_cursor);
844
845         /*
846          * Check if there's duplicate entries in the callchain.
847          * It's possible that it has cycles or recursive calls.
848          */
849         for (i = 0; i < iter->curr; i++) {
850                 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
851                         /* to avoid calling callback function */
852                         iter->he = NULL;
853                         return 0;
854                 }
855         }
856
857         he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
858                                 sample, false);
859         if (he == NULL)
860                 return -ENOMEM;
861
862         iter->he = he;
863         he_cache[iter->curr++] = he;
864
865         if (symbol_conf.use_callchain)
866                 callchain_append(he->callchain, &cursor, sample->period);
867         return 0;
868 }
869
870 static int
871 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
872                              struct addr_location *al __maybe_unused)
873 {
874         zfree(&iter->priv);
875         iter->he = NULL;
876
877         return 0;
878 }
879
880 const struct hist_iter_ops hist_iter_mem = {
881         .prepare_entry          = iter_prepare_mem_entry,
882         .add_single_entry       = iter_add_single_mem_entry,
883         .next_entry             = iter_next_nop_entry,
884         .add_next_entry         = iter_add_next_nop_entry,
885         .finish_entry           = iter_finish_mem_entry,
886 };
887
888 const struct hist_iter_ops hist_iter_branch = {
889         .prepare_entry          = iter_prepare_branch_entry,
890         .add_single_entry       = iter_add_single_branch_entry,
891         .next_entry             = iter_next_branch_entry,
892         .add_next_entry         = iter_add_next_branch_entry,
893         .finish_entry           = iter_finish_branch_entry,
894 };
895
896 const struct hist_iter_ops hist_iter_normal = {
897         .prepare_entry          = iter_prepare_normal_entry,
898         .add_single_entry       = iter_add_single_normal_entry,
899         .next_entry             = iter_next_nop_entry,
900         .add_next_entry         = iter_add_next_nop_entry,
901         .finish_entry           = iter_finish_normal_entry,
902 };
903
904 const struct hist_iter_ops hist_iter_cumulative = {
905         .prepare_entry          = iter_prepare_cumulative_entry,
906         .add_single_entry       = iter_add_single_cumulative_entry,
907         .next_entry             = iter_next_cumulative_entry,
908         .add_next_entry         = iter_add_next_cumulative_entry,
909         .finish_entry           = iter_finish_cumulative_entry,
910 };
911
912 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
913                          int max_stack_depth, void *arg)
914 {
915         int err, err2;
916
917         err = sample__resolve_callchain(iter->sample, &iter->parent,
918                                         iter->evsel, al, max_stack_depth);
919         if (err)
920                 return err;
921
922         iter->max_stack = max_stack_depth;
923
924         err = iter->ops->prepare_entry(iter, al);
925         if (err)
926                 goto out;
927
928         err = iter->ops->add_single_entry(iter, al);
929         if (err)
930                 goto out;
931
932         if (iter->he && iter->add_entry_cb) {
933                 err = iter->add_entry_cb(iter, al, true, arg);
934                 if (err)
935                         goto out;
936         }
937
938         while (iter->ops->next_entry(iter, al)) {
939                 err = iter->ops->add_next_entry(iter, al);
940                 if (err)
941                         break;
942
943                 if (iter->he && iter->add_entry_cb) {
944                         err = iter->add_entry_cb(iter, al, false, arg);
945                         if (err)
946                                 goto out;
947                 }
948         }
949
950 out:
951         err2 = iter->ops->finish_entry(iter, al);
952         if (!err)
953                 err = err2;
954
955         return err;
956 }
957
958 int64_t
959 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
960 {
961         struct hists *hists = left->hists;
962         struct perf_hpp_fmt *fmt;
963         int64_t cmp = 0;
964
965         hists__for_each_sort_list(hists, fmt) {
966                 cmp = fmt->cmp(fmt, left, right);
967                 if (cmp)
968                         break;
969         }
970
971         return cmp;
972 }
973
974 int64_t
975 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
976 {
977         struct hists *hists = left->hists;
978         struct perf_hpp_fmt *fmt;
979         int64_t cmp = 0;
980
981         hists__for_each_sort_list(hists, fmt) {
982                 cmp = fmt->collapse(fmt, left, right);
983                 if (cmp)
984                         break;
985         }
986
987         return cmp;
988 }
989
990 void hist_entry__delete(struct hist_entry *he)
991 {
992         thread__zput(he->thread);
993         map__zput(he->ms.map);
994
995         if (he->branch_info) {
996                 map__zput(he->branch_info->from.map);
997                 map__zput(he->branch_info->to.map);
998                 zfree(&he->branch_info);
999         }
1000
1001         if (he->mem_info) {
1002                 map__zput(he->mem_info->iaddr.map);
1003                 map__zput(he->mem_info->daddr.map);
1004                 zfree(&he->mem_info);
1005         }
1006
1007         zfree(&he->stat_acc);
1008         free_srcline(he->srcline);
1009         if (he->srcfile && he->srcfile[0])
1010                 free(he->srcfile);
1011         free_callchain(he->callchain);
1012         free(he->trace_output);
1013         free(he->raw_data);
1014         free(he);
1015 }
1016
1017 /*
1018  * collapse the histogram
1019  */
1020
1021 bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
1022                                   struct rb_root *root, struct hist_entry *he)
1023 {
1024         struct rb_node **p = &root->rb_node;
1025         struct rb_node *parent = NULL;
1026         struct hist_entry *iter;
1027         int64_t cmp;
1028
1029         while (*p != NULL) {
1030                 parent = *p;
1031                 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1032
1033                 cmp = hist_entry__collapse(iter, he);
1034
1035                 if (!cmp) {
1036                         he_stat__add_stat(&iter->stat, &he->stat);
1037                         if (symbol_conf.cumulate_callchain)
1038                                 he_stat__add_stat(iter->stat_acc, he->stat_acc);
1039
1040                         if (symbol_conf.use_callchain) {
1041                                 callchain_cursor_reset(&callchain_cursor);
1042                                 callchain_merge(&callchain_cursor,
1043                                                 iter->callchain,
1044                                                 he->callchain);
1045                         }
1046                         hist_entry__delete(he);
1047                         return false;
1048                 }
1049
1050                 if (cmp < 0)
1051                         p = &(*p)->rb_left;
1052                 else
1053                         p = &(*p)->rb_right;
1054         }
1055         hists->nr_entries++;
1056
1057         rb_link_node(&he->rb_node_in, parent, p);
1058         rb_insert_color(&he->rb_node_in, root);
1059         return true;
1060 }
1061
1062 struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
1063 {
1064         struct rb_root *root;
1065
1066         pthread_mutex_lock(&hists->lock);
1067
1068         root = hists->entries_in;
1069         if (++hists->entries_in > &hists->entries_in_array[1])
1070                 hists->entries_in = &hists->entries_in_array[0];
1071
1072         pthread_mutex_unlock(&hists->lock);
1073
1074         return root;
1075 }
1076
1077 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1078 {
1079         hists__filter_entry_by_dso(hists, he);
1080         hists__filter_entry_by_thread(hists, he);
1081         hists__filter_entry_by_symbol(hists, he);
1082         hists__filter_entry_by_socket(hists, he);
1083 }
1084
1085 void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1086 {
1087         struct rb_root *root;
1088         struct rb_node *next;
1089         struct hist_entry *n;
1090
1091         if (!sort__need_collapse)
1092                 return;
1093
1094         hists->nr_entries = 0;
1095
1096         root = hists__get_rotate_entries_in(hists);
1097
1098         next = rb_first(root);
1099
1100         while (next) {
1101                 if (session_done())
1102                         break;
1103                 n = rb_entry(next, struct hist_entry, rb_node_in);
1104                 next = rb_next(&n->rb_node_in);
1105
1106                 rb_erase(&n->rb_node_in, root);
1107                 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
1108                         /*
1109                          * If it wasn't combined with one of the entries already
1110                          * collapsed, we need to apply the filters that may have
1111                          * been set by, say, the hist_browser.
1112                          */
1113                         hists__apply_filters(hists, n);
1114                 }
1115                 if (prog)
1116                         ui_progress__update(prog, 1);
1117         }
1118 }
1119
1120 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1121 {
1122         struct hists *hists = a->hists;
1123         struct perf_hpp_fmt *fmt;
1124         int64_t cmp = 0;
1125
1126         hists__for_each_sort_list(hists, fmt) {
1127                 if (perf_hpp__should_skip(fmt, a->hists))
1128                         continue;
1129
1130                 cmp = fmt->sort(fmt, a, b);
1131                 if (cmp)
1132                         break;
1133         }
1134
1135         return cmp;
1136 }
1137
1138 static void hists__reset_filter_stats(struct hists *hists)
1139 {
1140         hists->nr_non_filtered_entries = 0;
1141         hists->stats.total_non_filtered_period = 0;
1142 }
1143
1144 void hists__reset_stats(struct hists *hists)
1145 {
1146         hists->nr_entries = 0;
1147         hists->stats.total_period = 0;
1148
1149         hists__reset_filter_stats(hists);
1150 }
1151
1152 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1153 {
1154         hists->nr_non_filtered_entries++;
1155         hists->stats.total_non_filtered_period += h->stat.period;
1156 }
1157
1158 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1159 {
1160         if (!h->filtered)
1161                 hists__inc_filter_stats(hists, h);
1162
1163         hists->nr_entries++;
1164         hists->stats.total_period += h->stat.period;
1165 }
1166
1167 static void __hists__insert_output_entry(struct rb_root *entries,
1168                                          struct hist_entry *he,
1169                                          u64 min_callchain_hits,
1170                                          bool use_callchain)
1171 {
1172         struct rb_node **p = &entries->rb_node;
1173         struct rb_node *parent = NULL;
1174         struct hist_entry *iter;
1175
1176         if (use_callchain) {
1177                 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1178                         u64 total = he->stat.period;
1179
1180                         if (symbol_conf.cumulate_callchain)
1181                                 total = he->stat_acc->period;
1182
1183                         min_callchain_hits = total * (callchain_param.min_percent / 100);
1184                 }
1185                 callchain_param.sort(&he->sorted_chain, he->callchain,
1186                                       min_callchain_hits, &callchain_param);
1187         }
1188
1189         while (*p != NULL) {
1190                 parent = *p;
1191                 iter = rb_entry(parent, struct hist_entry, rb_node);
1192
1193                 if (hist_entry__sort(he, iter) > 0)
1194                         p = &(*p)->rb_left;
1195                 else
1196                         p = &(*p)->rb_right;
1197         }
1198
1199         rb_link_node(&he->rb_node, parent, p);
1200         rb_insert_color(&he->rb_node, entries);
1201 }
1202
1203 static void output_resort(struct hists *hists, struct ui_progress *prog,
1204                           bool use_callchain)
1205 {
1206         struct rb_root *root;
1207         struct rb_node *next;
1208         struct hist_entry *n;
1209         u64 min_callchain_hits;
1210
1211         min_callchain_hits = hists__total_period(hists) * (callchain_param.min_percent / 100);
1212
1213         if (sort__need_collapse)
1214                 root = &hists->entries_collapsed;
1215         else
1216                 root = hists->entries_in;
1217
1218         next = rb_first(root);
1219         hists->entries = RB_ROOT;
1220
1221         hists__reset_stats(hists);
1222         hists__reset_col_len(hists);
1223
1224         while (next) {
1225                 n = rb_entry(next, struct hist_entry, rb_node_in);
1226                 next = rb_next(&n->rb_node_in);
1227
1228                 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1229                 hists__inc_stats(hists, n);
1230
1231                 if (!n->filtered)
1232                         hists__calc_col_len(hists, n);
1233
1234                 if (prog)
1235                         ui_progress__update(prog, 1);
1236         }
1237 }
1238
1239 void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog)
1240 {
1241         bool use_callchain;
1242
1243         if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1244                 use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
1245         else
1246                 use_callchain = symbol_conf.use_callchain;
1247
1248         output_resort(evsel__hists(evsel), prog, use_callchain);
1249 }
1250
1251 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1252 {
1253         output_resort(hists, prog, symbol_conf.use_callchain);
1254 }
1255
1256 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1257                                        enum hist_filter filter)
1258 {
1259         h->filtered &= ~(1 << filter);
1260         if (h->filtered)
1261                 return;
1262
1263         /* force fold unfiltered entry for simplicity */
1264         h->unfolded = false;
1265         h->row_offset = 0;
1266         h->nr_rows = 0;
1267
1268         hists->stats.nr_non_filtered_samples += h->stat.nr_events;
1269
1270         hists__inc_filter_stats(hists, h);
1271         hists__calc_col_len(hists, h);
1272 }
1273
1274
1275 static bool hists__filter_entry_by_dso(struct hists *hists,
1276                                        struct hist_entry *he)
1277 {
1278         if (hists->dso_filter != NULL &&
1279             (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1280                 he->filtered |= (1 << HIST_FILTER__DSO);
1281                 return true;
1282         }
1283
1284         return false;
1285 }
1286
1287 static bool hists__filter_entry_by_thread(struct hists *hists,
1288                                           struct hist_entry *he)
1289 {
1290         if (hists->thread_filter != NULL &&
1291             he->thread != hists->thread_filter) {
1292                 he->filtered |= (1 << HIST_FILTER__THREAD);
1293                 return true;
1294         }
1295
1296         return false;
1297 }
1298
1299 static bool hists__filter_entry_by_symbol(struct hists *hists,
1300                                           struct hist_entry *he)
1301 {
1302         if (hists->symbol_filter_str != NULL &&
1303             (!he->ms.sym || strstr(he->ms.sym->name,
1304                                    hists->symbol_filter_str) == NULL)) {
1305                 he->filtered |= (1 << HIST_FILTER__SYMBOL);
1306                 return true;
1307         }
1308
1309         return false;
1310 }
1311
1312 static bool hists__filter_entry_by_socket(struct hists *hists,
1313                                           struct hist_entry *he)
1314 {
1315         if ((hists->socket_filter > -1) &&
1316             (he->socket != hists->socket_filter)) {
1317                 he->filtered |= (1 << HIST_FILTER__SOCKET);
1318                 return true;
1319         }
1320
1321         return false;
1322 }
1323
1324 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
1325
1326 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
1327 {
1328         struct rb_node *nd;
1329
1330         hists->stats.nr_non_filtered_samples = 0;
1331
1332         hists__reset_filter_stats(hists);
1333         hists__reset_col_len(hists);
1334
1335         for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1336                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1337
1338                 if (filter(hists, h))
1339                         continue;
1340
1341                 hists__remove_entry_filter(hists, h, type);
1342         }
1343 }
1344
1345 void hists__filter_by_thread(struct hists *hists)
1346 {
1347         hists__filter_by_type(hists, HIST_FILTER__THREAD,
1348                               hists__filter_entry_by_thread);
1349 }
1350
1351 void hists__filter_by_dso(struct hists *hists)
1352 {
1353         hists__filter_by_type(hists, HIST_FILTER__DSO,
1354                               hists__filter_entry_by_dso);
1355 }
1356
1357 void hists__filter_by_symbol(struct hists *hists)
1358 {
1359         hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
1360                               hists__filter_entry_by_symbol);
1361 }
1362
1363 void hists__filter_by_socket(struct hists *hists)
1364 {
1365         hists__filter_by_type(hists, HIST_FILTER__SOCKET,
1366                               hists__filter_entry_by_socket);
1367 }
1368
1369 void events_stats__inc(struct events_stats *stats, u32 type)
1370 {
1371         ++stats->nr_events[0];
1372         ++stats->nr_events[type];
1373 }
1374
1375 void hists__inc_nr_events(struct hists *hists, u32 type)
1376 {
1377         events_stats__inc(&hists->stats, type);
1378 }
1379
1380 void hists__inc_nr_samples(struct hists *hists, bool filtered)
1381 {
1382         events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
1383         if (!filtered)
1384                 hists->stats.nr_non_filtered_samples++;
1385 }
1386
1387 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
1388                                                  struct hist_entry *pair)
1389 {
1390         struct rb_root *root;
1391         struct rb_node **p;
1392         struct rb_node *parent = NULL;
1393         struct hist_entry *he;
1394         int64_t cmp;
1395
1396         if (sort__need_collapse)
1397                 root = &hists->entries_collapsed;
1398         else
1399                 root = hists->entries_in;
1400
1401         p = &root->rb_node;
1402
1403         while (*p != NULL) {
1404                 parent = *p;
1405                 he = rb_entry(parent, struct hist_entry, rb_node_in);
1406
1407                 cmp = hist_entry__collapse(he, pair);
1408
1409                 if (!cmp)
1410                         goto out;
1411
1412                 if (cmp < 0)
1413                         p = &(*p)->rb_left;
1414                 else
1415                         p = &(*p)->rb_right;
1416         }
1417
1418         he = hist_entry__new(pair, true);
1419         if (he) {
1420                 memset(&he->stat, 0, sizeof(he->stat));
1421                 he->hists = hists;
1422                 rb_link_node(&he->rb_node_in, parent, p);
1423                 rb_insert_color(&he->rb_node_in, root);
1424                 hists__inc_stats(hists, he);
1425                 he->dummy = true;
1426         }
1427 out:
1428         return he;
1429 }
1430
1431 static struct hist_entry *hists__find_entry(struct hists *hists,
1432                                             struct hist_entry *he)
1433 {
1434         struct rb_node *n;
1435
1436         if (sort__need_collapse)
1437                 n = hists->entries_collapsed.rb_node;
1438         else
1439                 n = hists->entries_in->rb_node;
1440
1441         while (n) {
1442                 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
1443                 int64_t cmp = hist_entry__collapse(iter, he);
1444
1445                 if (cmp < 0)
1446                         n = n->rb_left;
1447                 else if (cmp > 0)
1448                         n = n->rb_right;
1449                 else
1450                         return iter;
1451         }
1452
1453         return NULL;
1454 }
1455
1456 /*
1457  * Look for pairs to link to the leader buckets (hist_entries):
1458  */
1459 void hists__match(struct hists *leader, struct hists *other)
1460 {
1461         struct rb_root *root;
1462         struct rb_node *nd;
1463         struct hist_entry *pos, *pair;
1464
1465         if (sort__need_collapse)
1466                 root = &leader->entries_collapsed;
1467         else
1468                 root = leader->entries_in;
1469
1470         for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1471                 pos  = rb_entry(nd, struct hist_entry, rb_node_in);
1472                 pair = hists__find_entry(other, pos);
1473
1474                 if (pair)
1475                         hist_entry__add_pair(pair, pos);
1476         }
1477 }
1478
1479 /*
1480  * Look for entries in the other hists that are not present in the leader, if
1481  * we find them, just add a dummy entry on the leader hists, with period=0,
1482  * nr_events=0, to serve as the list header.
1483  */
1484 int hists__link(struct hists *leader, struct hists *other)
1485 {
1486         struct rb_root *root;
1487         struct rb_node *nd;
1488         struct hist_entry *pos, *pair;
1489
1490         if (sort__need_collapse)
1491                 root = &other->entries_collapsed;
1492         else
1493                 root = other->entries_in;
1494
1495         for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1496                 pos = rb_entry(nd, struct hist_entry, rb_node_in);
1497
1498                 if (!hist_entry__has_pairs(pos)) {
1499                         pair = hists__add_dummy_entry(leader, pos);
1500                         if (pair == NULL)
1501                                 return -1;
1502                         hist_entry__add_pair(pos, pair);
1503                 }
1504         }
1505
1506         return 0;
1507 }
1508
1509 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
1510                           struct perf_sample *sample, bool nonany_branch_mode)
1511 {
1512         struct branch_info *bi;
1513
1514         /* If we have branch cycles always annotate them. */
1515         if (bs && bs->nr && bs->entries[0].flags.cycles) {
1516                 int i;
1517
1518                 bi = sample__resolve_bstack(sample, al);
1519                 if (bi) {
1520                         struct addr_map_symbol *prev = NULL;
1521
1522                         /*
1523                          * Ignore errors, still want to process the
1524                          * other entries.
1525                          *
1526                          * For non standard branch modes always
1527                          * force no IPC (prev == NULL)
1528                          *
1529                          * Note that perf stores branches reversed from
1530                          * program order!
1531                          */
1532                         for (i = bs->nr - 1; i >= 0; i--) {
1533                                 addr_map_symbol__account_cycles(&bi[i].from,
1534                                         nonany_branch_mode ? NULL : prev,
1535                                         bi[i].flags.cycles);
1536                                 prev = &bi[i].to;
1537                         }
1538                         free(bi);
1539                 }
1540         }
1541 }
1542
1543 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
1544 {
1545         struct perf_evsel *pos;
1546         size_t ret = 0;
1547
1548         evlist__for_each(evlist, pos) {
1549                 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1550                 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
1551         }
1552
1553         return ret;
1554 }
1555
1556
1557 u64 hists__total_period(struct hists *hists)
1558 {
1559         return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
1560                 hists->stats.total_period;
1561 }
1562
1563 int parse_filter_percentage(const struct option *opt __maybe_unused,
1564                             const char *arg, int unset __maybe_unused)
1565 {
1566         if (!strcmp(arg, "relative"))
1567                 symbol_conf.filter_relative = true;
1568         else if (!strcmp(arg, "absolute"))
1569                 symbol_conf.filter_relative = false;
1570         else
1571                 return -1;
1572
1573         return 0;
1574 }
1575
1576 int perf_hist_config(const char *var, const char *value)
1577 {
1578         if (!strcmp(var, "hist.percentage"))
1579                 return parse_filter_percentage(NULL, value, 0);
1580
1581         return 0;
1582 }
1583
1584 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
1585 {
1586         memset(hists, 0, sizeof(*hists));
1587         hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
1588         hists->entries_in = &hists->entries_in_array[0];
1589         hists->entries_collapsed = RB_ROOT;
1590         hists->entries = RB_ROOT;
1591         pthread_mutex_init(&hists->lock, NULL);
1592         hists->socket_filter = -1;
1593         hists->hpp_list = hpp_list;
1594         return 0;
1595 }
1596
1597 static void hists__delete_remaining_entries(struct rb_root *root)
1598 {
1599         struct rb_node *node;
1600         struct hist_entry *he;
1601
1602         while (!RB_EMPTY_ROOT(root)) {
1603                 node = rb_first(root);
1604                 rb_erase(node, root);
1605
1606                 he = rb_entry(node, struct hist_entry, rb_node_in);
1607                 hist_entry__delete(he);
1608         }
1609 }
1610
1611 static void hists__delete_all_entries(struct hists *hists)
1612 {
1613         hists__delete_entries(hists);
1614         hists__delete_remaining_entries(&hists->entries_in_array[0]);
1615         hists__delete_remaining_entries(&hists->entries_in_array[1]);
1616         hists__delete_remaining_entries(&hists->entries_collapsed);
1617 }
1618
1619 static void hists_evsel__exit(struct perf_evsel *evsel)
1620 {
1621         struct hists *hists = evsel__hists(evsel);
1622
1623         hists__delete_all_entries(hists);
1624 }
1625
1626 static int hists_evsel__init(struct perf_evsel *evsel)
1627 {
1628         struct hists *hists = evsel__hists(evsel);
1629
1630         __hists__init(hists, &perf_hpp_list);
1631         return 0;
1632 }
1633
1634 /*
1635  * XXX We probably need a hists_evsel__exit() to free the hist_entries
1636  * stored in the rbtree...
1637  */
1638
1639 int hists__init(void)
1640 {
1641         int err = perf_evsel__object_config(sizeof(struct hists_evsel),
1642                                             hists_evsel__init,
1643                                             hists_evsel__exit);
1644         if (err)
1645                 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
1646
1647         return err;
1648 }
1649
1650 void perf_hpp_list__init(struct perf_hpp_list *list)
1651 {
1652         INIT_LIST_HEAD(&list->fields);
1653         INIT_LIST_HEAD(&list->sorts);
1654 }