9 #include "ui/progress.h"
12 static bool hists__filter_entry_by_dso(struct hists *hists,
13 struct hist_entry *he);
14 static bool hists__filter_entry_by_thread(struct hists *hists,
15 struct hist_entry *he);
16 static bool hists__filter_entry_by_symbol(struct hists *hists,
17 struct hist_entry *he);
18 static bool hists__filter_entry_by_socket(struct hists *hists,
19 struct hist_entry *he);
21 u16 hists__col_len(struct hists *hists, enum hist_column col)
23 return hists->col_len[col];
26 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
28 hists->col_len[col] = len;
31 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
33 if (len > hists__col_len(hists, col)) {
34 hists__set_col_len(hists, col, len);
40 void hists__reset_col_len(struct hists *hists)
44 for (col = 0; col < HISTC_NR_COLS; ++col)
45 hists__set_col_len(hists, col, 0);
48 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
50 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
52 if (hists__col_len(hists, dso) < unresolved_col_width &&
53 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
54 !symbol_conf.dso_list)
55 hists__set_col_len(hists, dso, unresolved_col_width);
58 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
60 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
65 * +4 accounts for '[x] ' priv level info
66 * +2 accounts for 0x prefix on raw addresses
67 * +3 accounts for ' y ' symtab origin info
70 symlen = h->ms.sym->namelen + 4;
72 symlen += BITS_PER_LONG / 4 + 2 + 3;
73 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
75 symlen = unresolved_col_width + 4 + 2;
76 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
77 hists__set_unres_dso_col_len(hists, HISTC_DSO);
80 len = thread__comm_len(h->thread);
81 if (hists__new_col_len(hists, HISTC_COMM, len))
82 hists__set_col_len(hists, HISTC_THREAD, len + 6);
85 len = dso__name_len(h->ms.map->dso);
86 hists__new_col_len(hists, HISTC_DSO, len);
90 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
93 if (h->branch_info->from.sym) {
94 symlen = (int)h->branch_info->from.sym->namelen + 4;
96 symlen += BITS_PER_LONG / 4 + 2 + 3;
97 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
99 symlen = dso__name_len(h->branch_info->from.map->dso);
100 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
102 symlen = unresolved_col_width + 4 + 2;
103 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
104 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
107 if (h->branch_info->to.sym) {
108 symlen = (int)h->branch_info->to.sym->namelen + 4;
110 symlen += BITS_PER_LONG / 4 + 2 + 3;
111 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
113 symlen = dso__name_len(h->branch_info->to.map->dso);
114 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
116 symlen = unresolved_col_width + 4 + 2;
117 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
118 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
123 if (h->mem_info->daddr.sym) {
124 symlen = (int)h->mem_info->daddr.sym->namelen + 4
125 + unresolved_col_width + 2;
126 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
128 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
131 symlen = unresolved_col_width + 4 + 2;
132 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
134 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
138 if (h->mem_info->iaddr.sym) {
139 symlen = (int)h->mem_info->iaddr.sym->namelen + 4
140 + unresolved_col_width + 2;
141 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
144 symlen = unresolved_col_width + 4 + 2;
145 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
149 if (h->mem_info->daddr.map) {
150 symlen = dso__name_len(h->mem_info->daddr.map->dso);
151 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
154 symlen = unresolved_col_width + 4 + 2;
155 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
158 symlen = unresolved_col_width + 4 + 2;
159 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
160 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
161 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
164 hists__new_col_len(hists, HISTC_CPU, 3);
165 hists__new_col_len(hists, HISTC_SOCKET, 6);
166 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
167 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
168 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
169 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
170 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
171 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
174 hists__new_col_len(hists, HISTC_SRCLINE, strlen(h->srcline));
177 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
180 hists__new_col_len(hists, HISTC_TRANSACTION,
181 hist_entry__transaction_len());
184 hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
187 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
189 struct rb_node *next = rb_first(&hists->entries);
190 struct hist_entry *n;
193 hists__reset_col_len(hists);
195 while (next && row++ < max_rows) {
196 n = rb_entry(next, struct hist_entry, rb_node);
198 hists__calc_col_len(hists, n);
199 next = rb_next(&n->rb_node);
203 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
204 unsigned int cpumode, u64 period)
207 case PERF_RECORD_MISC_KERNEL:
208 he_stat->period_sys += period;
210 case PERF_RECORD_MISC_USER:
211 he_stat->period_us += period;
213 case PERF_RECORD_MISC_GUEST_KERNEL:
214 he_stat->period_guest_sys += period;
216 case PERF_RECORD_MISC_GUEST_USER:
217 he_stat->period_guest_us += period;
224 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
228 he_stat->period += period;
229 he_stat->weight += weight;
230 he_stat->nr_events += 1;
233 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
235 dest->period += src->period;
236 dest->period_sys += src->period_sys;
237 dest->period_us += src->period_us;
238 dest->period_guest_sys += src->period_guest_sys;
239 dest->period_guest_us += src->period_guest_us;
240 dest->nr_events += src->nr_events;
241 dest->weight += src->weight;
244 static void he_stat__decay(struct he_stat *he_stat)
246 he_stat->period = (he_stat->period * 7) / 8;
247 he_stat->nr_events = (he_stat->nr_events * 7) / 8;
248 /* XXX need decay for weight too? */
251 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
253 u64 prev_period = he->stat.period;
256 if (prev_period == 0)
259 he_stat__decay(&he->stat);
260 if (symbol_conf.cumulate_callchain)
261 he_stat__decay(he->stat_acc);
262 decay_callchain(he->callchain);
264 diff = prev_period - he->stat.period;
266 hists->stats.total_period -= diff;
268 hists->stats.total_non_filtered_period -= diff;
270 return he->stat.period == 0;
273 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
275 rb_erase(&he->rb_node, &hists->entries);
277 if (sort__need_collapse)
278 rb_erase(&he->rb_node_in, &hists->entries_collapsed);
280 rb_erase(&he->rb_node_in, hists->entries_in);
284 --hists->nr_non_filtered_entries;
286 hist_entry__delete(he);
289 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
291 struct rb_node *next = rb_first(&hists->entries);
292 struct hist_entry *n;
295 n = rb_entry(next, struct hist_entry, rb_node);
296 next = rb_next(&n->rb_node);
297 if (((zap_user && n->level == '.') ||
298 (zap_kernel && n->level != '.') ||
299 hists__decay_entry(hists, n))) {
300 hists__delete_entry(hists, n);
305 void hists__delete_entries(struct hists *hists)
307 struct rb_node *next = rb_first(&hists->entries);
308 struct hist_entry *n;
311 n = rb_entry(next, struct hist_entry, rb_node);
312 next = rb_next(&n->rb_node);
314 hists__delete_entry(hists, n);
319 * histogram, sorted on item, collects periods
322 static struct hist_entry *hist_entry__new(struct hist_entry *template,
325 size_t callchain_size = 0;
326 struct hist_entry *he;
328 if (symbol_conf.use_callchain)
329 callchain_size = sizeof(struct callchain_root);
331 he = zalloc(sizeof(*he) + callchain_size);
336 if (symbol_conf.cumulate_callchain) {
337 he->stat_acc = malloc(sizeof(he->stat));
338 if (he->stat_acc == NULL) {
342 memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
344 memset(&he->stat, 0, sizeof(he->stat));
347 map__get(he->ms.map);
349 if (he->branch_info) {
351 * This branch info is (a part of) allocated from
352 * sample__resolve_bstack() and will be freed after
353 * adding new entries. So we need to save a copy.
355 he->branch_info = malloc(sizeof(*he->branch_info));
356 if (he->branch_info == NULL) {
357 map__zput(he->ms.map);
363 memcpy(he->branch_info, template->branch_info,
364 sizeof(*he->branch_info));
366 map__get(he->branch_info->from.map);
367 map__get(he->branch_info->to.map);
371 map__get(he->mem_info->iaddr.map);
372 map__get(he->mem_info->daddr.map);
375 if (symbol_conf.use_callchain)
376 callchain_init(he->callchain);
379 he->raw_data = memdup(he->raw_data, he->raw_size);
381 if (he->raw_data == NULL) {
382 map__put(he->ms.map);
383 if (he->branch_info) {
384 map__put(he->branch_info->from.map);
385 map__put(he->branch_info->to.map);
386 free(he->branch_info);
389 map__put(he->mem_info->iaddr.map);
390 map__put(he->mem_info->daddr.map);
397 INIT_LIST_HEAD(&he->pairs.node);
398 thread__get(he->thread);
404 static u8 symbol__parent_filter(const struct symbol *parent)
406 if (symbol_conf.exclude_other && parent == NULL)
407 return 1 << HIST_FILTER__PARENT;
411 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
413 if (!symbol_conf.use_callchain)
416 he->hists->callchain_period += period;
418 he->hists->callchain_non_filtered_period += period;
421 static struct hist_entry *hists__findnew_entry(struct hists *hists,
422 struct hist_entry *entry,
423 struct addr_location *al,
427 struct rb_node *parent = NULL;
428 struct hist_entry *he;
430 u64 period = entry->stat.period;
431 u64 weight = entry->stat.weight;
433 p = &hists->entries_in->rb_node;
437 he = rb_entry(parent, struct hist_entry, rb_node_in);
440 * Make sure that it receives arguments in a same order as
441 * hist_entry__collapse() so that we can use an appropriate
442 * function when searching an entry regardless which sort
445 cmp = hist_entry__cmp(he, entry);
449 he_stat__add_period(&he->stat, period, weight);
450 hist_entry__add_callchain_period(he, period);
452 if (symbol_conf.cumulate_callchain)
453 he_stat__add_period(he->stat_acc, period, weight);
456 * This mem info was allocated from sample__resolve_mem
457 * and will not be used anymore.
459 zfree(&entry->mem_info);
461 /* If the map of an existing hist_entry has
462 * become out-of-date due to an exec() or
463 * similar, update it. Otherwise we will
464 * mis-adjust symbol addresses when computing
465 * the history counter to increment.
467 if (he->ms.map != entry->ms.map) {
468 map__put(he->ms.map);
469 he->ms.map = map__get(entry->ms.map);
480 he = hist_entry__new(entry, sample_self);
485 hist_entry__add_callchain_period(he, period);
488 rb_link_node(&he->rb_node_in, parent, p);
489 rb_insert_color(&he->rb_node_in, hists->entries_in);
492 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
493 if (symbol_conf.cumulate_callchain)
494 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
498 struct hist_entry *__hists__add_entry(struct hists *hists,
499 struct addr_location *al,
500 struct symbol *sym_parent,
501 struct branch_info *bi,
503 struct perf_sample *sample,
506 struct hist_entry entry = {
507 .thread = al->thread,
508 .comm = thread__comm(al->thread),
513 .socket = al->socket,
515 .cpumode = al->cpumode,
520 .period = sample->period,
521 .weight = sample->weight,
523 .parent = sym_parent,
524 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
528 .transaction = sample->transaction,
529 .raw_data = sample->raw_data,
530 .raw_size = sample->raw_size,
533 return hists__findnew_entry(hists, &entry, al, sample_self);
537 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
538 struct addr_location *al __maybe_unused)
544 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
545 struct addr_location *al __maybe_unused)
551 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
553 struct perf_sample *sample = iter->sample;
556 mi = sample__resolve_mem(sample, al);
565 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
568 struct mem_info *mi = iter->priv;
569 struct hists *hists = evsel__hists(iter->evsel);
570 struct perf_sample *sample = iter->sample;
571 struct hist_entry *he;
576 cost = sample->weight;
581 * must pass period=weight in order to get the correct
582 * sorting from hists__collapse_resort() which is solely
583 * based on periods. We want sorting be done on nr_events * weight
584 * and this is indirectly achieved by passing period=weight here
585 * and the he_stat__add_period() function.
587 sample->period = cost;
589 he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
599 iter_finish_mem_entry(struct hist_entry_iter *iter,
600 struct addr_location *al __maybe_unused)
602 struct perf_evsel *evsel = iter->evsel;
603 struct hists *hists = evsel__hists(evsel);
604 struct hist_entry *he = iter->he;
610 hists__inc_nr_samples(hists, he->filtered);
612 err = hist_entry__append_callchain(he, iter->sample);
616 * We don't need to free iter->priv (mem_info) here since the mem info
617 * was either already freed in hists__findnew_entry() or passed to a
618 * new hist entry by hist_entry__new().
627 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
629 struct branch_info *bi;
630 struct perf_sample *sample = iter->sample;
632 bi = sample__resolve_bstack(sample, al);
637 iter->total = sample->branch_stack->nr;
644 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
645 struct addr_location *al __maybe_unused)
647 /* to avoid calling callback function */
654 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
656 struct branch_info *bi = iter->priv;
662 if (iter->curr >= iter->total)
665 al->map = bi[i].to.map;
666 al->sym = bi[i].to.sym;
667 al->addr = bi[i].to.addr;
672 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
674 struct branch_info *bi;
675 struct perf_evsel *evsel = iter->evsel;
676 struct hists *hists = evsel__hists(evsel);
677 struct perf_sample *sample = iter->sample;
678 struct hist_entry *he = NULL;
684 if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
688 * The report shows the percentage of total branches captured
689 * and not events sampled. Thus we use a pseudo period of 1.
692 sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
694 he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
699 hists__inc_nr_samples(hists, he->filtered);
708 iter_finish_branch_entry(struct hist_entry_iter *iter,
709 struct addr_location *al __maybe_unused)
714 return iter->curr >= iter->total ? 0 : -1;
718 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
719 struct addr_location *al __maybe_unused)
725 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
727 struct perf_evsel *evsel = iter->evsel;
728 struct perf_sample *sample = iter->sample;
729 struct hist_entry *he;
731 he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
741 iter_finish_normal_entry(struct hist_entry_iter *iter,
742 struct addr_location *al __maybe_unused)
744 struct hist_entry *he = iter->he;
745 struct perf_evsel *evsel = iter->evsel;
746 struct perf_sample *sample = iter->sample;
753 hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
755 return hist_entry__append_callchain(he, sample);
759 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
760 struct addr_location *al __maybe_unused)
762 struct hist_entry **he_cache;
764 callchain_cursor_commit(&callchain_cursor);
767 * This is for detecting cycles or recursions so that they're
768 * cumulated only one time to prevent entries more than 100%
771 he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1));
772 if (he_cache == NULL)
775 iter->priv = he_cache;
782 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
783 struct addr_location *al)
785 struct perf_evsel *evsel = iter->evsel;
786 struct hists *hists = evsel__hists(evsel);
787 struct perf_sample *sample = iter->sample;
788 struct hist_entry **he_cache = iter->priv;
789 struct hist_entry *he;
792 he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
798 he_cache[iter->curr++] = he;
800 hist_entry__append_callchain(he, sample);
803 * We need to re-initialize the cursor since callchain_append()
804 * advanced the cursor to the end.
806 callchain_cursor_commit(&callchain_cursor);
808 hists__inc_nr_samples(hists, he->filtered);
814 iter_next_cumulative_entry(struct hist_entry_iter *iter,
815 struct addr_location *al)
817 struct callchain_cursor_node *node;
819 node = callchain_cursor_current(&callchain_cursor);
823 return fill_callchain_info(al, node, iter->hide_unresolved);
827 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
828 struct addr_location *al)
830 struct perf_evsel *evsel = iter->evsel;
831 struct perf_sample *sample = iter->sample;
832 struct hist_entry **he_cache = iter->priv;
833 struct hist_entry *he;
834 struct hist_entry he_tmp = {
835 .hists = evsel__hists(evsel),
837 .thread = al->thread,
838 .comm = thread__comm(al->thread),
844 .parent = iter->parent,
845 .raw_data = sample->raw_data,
846 .raw_size = sample->raw_size,
849 struct callchain_cursor cursor;
851 callchain_cursor_snapshot(&cursor, &callchain_cursor);
853 callchain_cursor_advance(&callchain_cursor);
856 * Check if there's duplicate entries in the callchain.
857 * It's possible that it has cycles or recursive calls.
859 for (i = 0; i < iter->curr; i++) {
860 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
861 /* to avoid calling callback function */
867 he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
873 he_cache[iter->curr++] = he;
875 if (symbol_conf.use_callchain)
876 callchain_append(he->callchain, &cursor, sample->period);
881 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
882 struct addr_location *al __maybe_unused)
890 const struct hist_iter_ops hist_iter_mem = {
891 .prepare_entry = iter_prepare_mem_entry,
892 .add_single_entry = iter_add_single_mem_entry,
893 .next_entry = iter_next_nop_entry,
894 .add_next_entry = iter_add_next_nop_entry,
895 .finish_entry = iter_finish_mem_entry,
898 const struct hist_iter_ops hist_iter_branch = {
899 .prepare_entry = iter_prepare_branch_entry,
900 .add_single_entry = iter_add_single_branch_entry,
901 .next_entry = iter_next_branch_entry,
902 .add_next_entry = iter_add_next_branch_entry,
903 .finish_entry = iter_finish_branch_entry,
906 const struct hist_iter_ops hist_iter_normal = {
907 .prepare_entry = iter_prepare_normal_entry,
908 .add_single_entry = iter_add_single_normal_entry,
909 .next_entry = iter_next_nop_entry,
910 .add_next_entry = iter_add_next_nop_entry,
911 .finish_entry = iter_finish_normal_entry,
914 const struct hist_iter_ops hist_iter_cumulative = {
915 .prepare_entry = iter_prepare_cumulative_entry,
916 .add_single_entry = iter_add_single_cumulative_entry,
917 .next_entry = iter_next_cumulative_entry,
918 .add_next_entry = iter_add_next_cumulative_entry,
919 .finish_entry = iter_finish_cumulative_entry,
922 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
923 int max_stack_depth, void *arg)
927 err = sample__resolve_callchain(iter->sample, &iter->parent,
928 iter->evsel, al, max_stack_depth);
932 iter->max_stack = max_stack_depth;
934 err = iter->ops->prepare_entry(iter, al);
938 err = iter->ops->add_single_entry(iter, al);
942 if (iter->he && iter->add_entry_cb) {
943 err = iter->add_entry_cb(iter, al, true, arg);
948 while (iter->ops->next_entry(iter, al)) {
949 err = iter->ops->add_next_entry(iter, al);
953 if (iter->he && iter->add_entry_cb) {
954 err = iter->add_entry_cb(iter, al, false, arg);
961 err2 = iter->ops->finish_entry(iter, al);
969 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
971 struct hists *hists = left->hists;
972 struct perf_hpp_fmt *fmt;
975 hists__for_each_sort_list(hists, fmt) {
976 cmp = fmt->cmp(fmt, left, right);
985 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
987 struct hists *hists = left->hists;
988 struct perf_hpp_fmt *fmt;
991 hists__for_each_sort_list(hists, fmt) {
992 cmp = fmt->collapse(fmt, left, right);
1000 void hist_entry__delete(struct hist_entry *he)
1002 thread__zput(he->thread);
1003 map__zput(he->ms.map);
1005 if (he->branch_info) {
1006 map__zput(he->branch_info->from.map);
1007 map__zput(he->branch_info->to.map);
1008 zfree(&he->branch_info);
1012 map__zput(he->mem_info->iaddr.map);
1013 map__zput(he->mem_info->daddr.map);
1014 zfree(&he->mem_info);
1017 zfree(&he->stat_acc);
1018 free_srcline(he->srcline);
1019 if (he->srcfile && he->srcfile[0])
1021 free_callchain(he->callchain);
1022 free(he->trace_output);
1028 * If this is not the last column, then we need to pad it according to the
1029 * pre-calculated max lenght for this column, otherwise don't bother adding
1030 * spaces because that would break viewing this with, for instance, 'less',
1031 * that would show tons of trailing spaces when a long C++ demangled method
1034 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
1035 struct perf_hpp_fmt *fmt, int printed)
1037 if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
1038 const int width = fmt->width(fmt, hpp, hists_to_evsel(he->hists));
1039 if (printed < width) {
1040 advance_hpp(hpp, printed);
1041 printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
1049 * collapse the histogram
1052 int hists__collapse_insert_entry(struct hists *hists, struct rb_root *root,
1053 struct hist_entry *he)
1055 struct rb_node **p = &root->rb_node;
1056 struct rb_node *parent = NULL;
1057 struct hist_entry *iter;
1060 while (*p != NULL) {
1062 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1064 cmp = hist_entry__collapse(iter, he);
1069 he_stat__add_stat(&iter->stat, &he->stat);
1070 if (symbol_conf.cumulate_callchain)
1071 he_stat__add_stat(iter->stat_acc, he->stat_acc);
1073 if (symbol_conf.use_callchain) {
1074 callchain_cursor_reset(&callchain_cursor);
1075 if (callchain_merge(&callchain_cursor,
1080 hist_entry__delete(he);
1087 p = &(*p)->rb_right;
1089 hists->nr_entries++;
1091 rb_link_node(&he->rb_node_in, parent, p);
1092 rb_insert_color(&he->rb_node_in, root);
1096 struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
1098 struct rb_root *root;
1100 pthread_mutex_lock(&hists->lock);
1102 root = hists->entries_in;
1103 if (++hists->entries_in > &hists->entries_in_array[1])
1104 hists->entries_in = &hists->entries_in_array[0];
1106 pthread_mutex_unlock(&hists->lock);
1111 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1113 hists__filter_entry_by_dso(hists, he);
1114 hists__filter_entry_by_thread(hists, he);
1115 hists__filter_entry_by_symbol(hists, he);
1116 hists__filter_entry_by_socket(hists, he);
1119 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1121 struct rb_root *root;
1122 struct rb_node *next;
1123 struct hist_entry *n;
1126 if (!sort__need_collapse)
1129 hists->nr_entries = 0;
1131 root = hists__get_rotate_entries_in(hists);
1133 next = rb_first(root);
1138 n = rb_entry(next, struct hist_entry, rb_node_in);
1139 next = rb_next(&n->rb_node_in);
1141 rb_erase(&n->rb_node_in, root);
1142 ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
1148 * If it wasn't combined with one of the entries already
1149 * collapsed, we need to apply the filters that may have
1150 * been set by, say, the hist_browser.
1152 hists__apply_filters(hists, n);
1155 ui_progress__update(prog, 1);
1160 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1162 struct hists *hists = a->hists;
1163 struct perf_hpp_fmt *fmt;
1166 hists__for_each_sort_list(hists, fmt) {
1167 if (perf_hpp__should_skip(fmt, a->hists))
1170 cmp = fmt->sort(fmt, a, b);
1178 static void hists__reset_filter_stats(struct hists *hists)
1180 hists->nr_non_filtered_entries = 0;
1181 hists->stats.total_non_filtered_period = 0;
1184 void hists__reset_stats(struct hists *hists)
1186 hists->nr_entries = 0;
1187 hists->stats.total_period = 0;
1189 hists__reset_filter_stats(hists);
1192 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1194 hists->nr_non_filtered_entries++;
1195 hists->stats.total_non_filtered_period += h->stat.period;
1198 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1201 hists__inc_filter_stats(hists, h);
1203 hists->nr_entries++;
1204 hists->stats.total_period += h->stat.period;
1207 static void __hists__insert_output_entry(struct rb_root *entries,
1208 struct hist_entry *he,
1209 u64 min_callchain_hits,
1212 struct rb_node **p = &entries->rb_node;
1213 struct rb_node *parent = NULL;
1214 struct hist_entry *iter;
1216 if (use_callchain) {
1217 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1218 u64 total = he->stat.period;
1220 if (symbol_conf.cumulate_callchain)
1221 total = he->stat_acc->period;
1223 min_callchain_hits = total * (callchain_param.min_percent / 100);
1225 callchain_param.sort(&he->sorted_chain, he->callchain,
1226 min_callchain_hits, &callchain_param);
1229 while (*p != NULL) {
1231 iter = rb_entry(parent, struct hist_entry, rb_node);
1233 if (hist_entry__sort(he, iter) > 0)
1236 p = &(*p)->rb_right;
1239 rb_link_node(&he->rb_node, parent, p);
1240 rb_insert_color(&he->rb_node, entries);
1243 static void output_resort(struct hists *hists, struct ui_progress *prog,
1246 struct rb_root *root;
1247 struct rb_node *next;
1248 struct hist_entry *n;
1249 u64 callchain_total;
1250 u64 min_callchain_hits;
1252 callchain_total = hists->callchain_period;
1253 if (symbol_conf.filter_relative)
1254 callchain_total = hists->callchain_non_filtered_period;
1256 min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
1258 if (sort__need_collapse)
1259 root = &hists->entries_collapsed;
1261 root = hists->entries_in;
1263 next = rb_first(root);
1264 hists->entries = RB_ROOT;
1266 hists__reset_stats(hists);
1267 hists__reset_col_len(hists);
1270 n = rb_entry(next, struct hist_entry, rb_node_in);
1271 next = rb_next(&n->rb_node_in);
1273 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1274 hists__inc_stats(hists, n);
1277 hists__calc_col_len(hists, n);
1280 ui_progress__update(prog, 1);
1284 void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog)
1288 if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1289 use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
1291 use_callchain = symbol_conf.use_callchain;
1293 output_resort(evsel__hists(evsel), prog, use_callchain);
1296 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1298 output_resort(hists, prog, symbol_conf.use_callchain);
1301 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1302 enum hist_filter filter)
1304 h->filtered &= ~(1 << filter);
1308 /* force fold unfiltered entry for simplicity */
1309 h->unfolded = false;
1313 hists->stats.nr_non_filtered_samples += h->stat.nr_events;
1315 hists__inc_filter_stats(hists, h);
1316 hists__calc_col_len(hists, h);
1320 static bool hists__filter_entry_by_dso(struct hists *hists,
1321 struct hist_entry *he)
1323 if (hists->dso_filter != NULL &&
1324 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1325 he->filtered |= (1 << HIST_FILTER__DSO);
1332 static bool hists__filter_entry_by_thread(struct hists *hists,
1333 struct hist_entry *he)
1335 if (hists->thread_filter != NULL &&
1336 he->thread != hists->thread_filter) {
1337 he->filtered |= (1 << HIST_FILTER__THREAD);
1344 static bool hists__filter_entry_by_symbol(struct hists *hists,
1345 struct hist_entry *he)
1347 if (hists->symbol_filter_str != NULL &&
1348 (!he->ms.sym || strstr(he->ms.sym->name,
1349 hists->symbol_filter_str) == NULL)) {
1350 he->filtered |= (1 << HIST_FILTER__SYMBOL);
1357 static bool hists__filter_entry_by_socket(struct hists *hists,
1358 struct hist_entry *he)
1360 if ((hists->socket_filter > -1) &&
1361 (he->socket != hists->socket_filter)) {
1362 he->filtered |= (1 << HIST_FILTER__SOCKET);
1369 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
1371 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
1375 hists->stats.nr_non_filtered_samples = 0;
1377 hists__reset_filter_stats(hists);
1378 hists__reset_col_len(hists);
1380 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1381 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1383 if (filter(hists, h))
1386 hists__remove_entry_filter(hists, h, type);
1390 void hists__filter_by_thread(struct hists *hists)
1392 hists__filter_by_type(hists, HIST_FILTER__THREAD,
1393 hists__filter_entry_by_thread);
1396 void hists__filter_by_dso(struct hists *hists)
1398 hists__filter_by_type(hists, HIST_FILTER__DSO,
1399 hists__filter_entry_by_dso);
1402 void hists__filter_by_symbol(struct hists *hists)
1404 hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
1405 hists__filter_entry_by_symbol);
1408 void hists__filter_by_socket(struct hists *hists)
1410 hists__filter_by_type(hists, HIST_FILTER__SOCKET,
1411 hists__filter_entry_by_socket);
1414 void events_stats__inc(struct events_stats *stats, u32 type)
1416 ++stats->nr_events[0];
1417 ++stats->nr_events[type];
1420 void hists__inc_nr_events(struct hists *hists, u32 type)
1422 events_stats__inc(&hists->stats, type);
1425 void hists__inc_nr_samples(struct hists *hists, bool filtered)
1427 events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
1429 hists->stats.nr_non_filtered_samples++;
1432 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
1433 struct hist_entry *pair)
1435 struct rb_root *root;
1437 struct rb_node *parent = NULL;
1438 struct hist_entry *he;
1441 if (sort__need_collapse)
1442 root = &hists->entries_collapsed;
1444 root = hists->entries_in;
1448 while (*p != NULL) {
1450 he = rb_entry(parent, struct hist_entry, rb_node_in);
1452 cmp = hist_entry__collapse(he, pair);
1460 p = &(*p)->rb_right;
1463 he = hist_entry__new(pair, true);
1465 memset(&he->stat, 0, sizeof(he->stat));
1467 rb_link_node(&he->rb_node_in, parent, p);
1468 rb_insert_color(&he->rb_node_in, root);
1469 hists__inc_stats(hists, he);
1476 static struct hist_entry *hists__find_entry(struct hists *hists,
1477 struct hist_entry *he)
1481 if (sort__need_collapse)
1482 n = hists->entries_collapsed.rb_node;
1484 n = hists->entries_in->rb_node;
1487 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
1488 int64_t cmp = hist_entry__collapse(iter, he);
1502 * Look for pairs to link to the leader buckets (hist_entries):
1504 void hists__match(struct hists *leader, struct hists *other)
1506 struct rb_root *root;
1508 struct hist_entry *pos, *pair;
1510 if (sort__need_collapse)
1511 root = &leader->entries_collapsed;
1513 root = leader->entries_in;
1515 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1516 pos = rb_entry(nd, struct hist_entry, rb_node_in);
1517 pair = hists__find_entry(other, pos);
1520 hist_entry__add_pair(pair, pos);
1525 * Look for entries in the other hists that are not present in the leader, if
1526 * we find them, just add a dummy entry on the leader hists, with period=0,
1527 * nr_events=0, to serve as the list header.
1529 int hists__link(struct hists *leader, struct hists *other)
1531 struct rb_root *root;
1533 struct hist_entry *pos, *pair;
1535 if (sort__need_collapse)
1536 root = &other->entries_collapsed;
1538 root = other->entries_in;
1540 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1541 pos = rb_entry(nd, struct hist_entry, rb_node_in);
1543 if (!hist_entry__has_pairs(pos)) {
1544 pair = hists__add_dummy_entry(leader, pos);
1547 hist_entry__add_pair(pos, pair);
1554 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
1555 struct perf_sample *sample, bool nonany_branch_mode)
1557 struct branch_info *bi;
1559 /* If we have branch cycles always annotate them. */
1560 if (bs && bs->nr && bs->entries[0].flags.cycles) {
1563 bi = sample__resolve_bstack(sample, al);
1565 struct addr_map_symbol *prev = NULL;
1568 * Ignore errors, still want to process the
1571 * For non standard branch modes always
1572 * force no IPC (prev == NULL)
1574 * Note that perf stores branches reversed from
1577 for (i = bs->nr - 1; i >= 0; i--) {
1578 addr_map_symbol__account_cycles(&bi[i].from,
1579 nonany_branch_mode ? NULL : prev,
1580 bi[i].flags.cycles);
1588 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
1590 struct perf_evsel *pos;
1593 evlist__for_each(evlist, pos) {
1594 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1595 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
1602 u64 hists__total_period(struct hists *hists)
1604 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
1605 hists->stats.total_period;
1608 int parse_filter_percentage(const struct option *opt __maybe_unused,
1609 const char *arg, int unset __maybe_unused)
1611 if (!strcmp(arg, "relative"))
1612 symbol_conf.filter_relative = true;
1613 else if (!strcmp(arg, "absolute"))
1614 symbol_conf.filter_relative = false;
1621 int perf_hist_config(const char *var, const char *value)
1623 if (!strcmp(var, "hist.percentage"))
1624 return parse_filter_percentage(NULL, value, 0);
1629 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
1631 memset(hists, 0, sizeof(*hists));
1632 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
1633 hists->entries_in = &hists->entries_in_array[0];
1634 hists->entries_collapsed = RB_ROOT;
1635 hists->entries = RB_ROOT;
1636 pthread_mutex_init(&hists->lock, NULL);
1637 hists->socket_filter = -1;
1638 hists->hpp_list = hpp_list;
1642 static void hists__delete_remaining_entries(struct rb_root *root)
1644 struct rb_node *node;
1645 struct hist_entry *he;
1647 while (!RB_EMPTY_ROOT(root)) {
1648 node = rb_first(root);
1649 rb_erase(node, root);
1651 he = rb_entry(node, struct hist_entry, rb_node_in);
1652 hist_entry__delete(he);
1656 static void hists__delete_all_entries(struct hists *hists)
1658 hists__delete_entries(hists);
1659 hists__delete_remaining_entries(&hists->entries_in_array[0]);
1660 hists__delete_remaining_entries(&hists->entries_in_array[1]);
1661 hists__delete_remaining_entries(&hists->entries_collapsed);
1664 static void hists_evsel__exit(struct perf_evsel *evsel)
1666 struct hists *hists = evsel__hists(evsel);
1668 hists__delete_all_entries(hists);
1671 static int hists_evsel__init(struct perf_evsel *evsel)
1673 struct hists *hists = evsel__hists(evsel);
1675 __hists__init(hists, &perf_hpp_list);
1680 * XXX We probably need a hists_evsel__exit() to free the hist_entries
1681 * stored in the rbtree...
1684 int hists__init(void)
1686 int err = perf_evsel__object_config(sizeof(struct hists_evsel),
1690 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
1695 void perf_hpp_list__init(struct perf_hpp_list *list)
1697 INIT_LIST_HEAD(&list->fields);
1698 INIT_LIST_HEAD(&list->sorts);