2 * intel_pt.c: Intel Processor Trace support
3 * Copyright (c) 2013-2015, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 #include <linux/kernel.h>
20 #include <linux/types.h>
34 #include "thread-stack.h"
36 #include "callchain.h"
44 #include "intel-pt-decoder/intel-pt-log.h"
45 #include "intel-pt-decoder/intel-pt-decoder.h"
46 #include "intel-pt-decoder/intel-pt-insn-decoder.h"
47 #include "intel-pt-decoder/intel-pt-pkt-decoder.h"
49 #define MAX_TIMESTAMP (~0ULL)
52 struct auxtrace auxtrace;
53 struct auxtrace_queues queues;
54 struct auxtrace_heap heap;
56 struct perf_session *session;
57 struct machine *machine;
58 struct perf_evsel *switch_evsel;
59 struct thread *unknown_thread;
60 bool timeless_decoding;
69 int have_sched_switch;
75 struct perf_tsc_conversion tc;
76 bool cap_user_time_zero;
78 struct itrace_synth_opts synth_opts;
80 bool sample_instructions;
81 u64 instructions_sample_type;
82 u64 instructions_sample_period;
87 u64 branches_sample_type;
90 bool sample_transactions;
91 u64 transactions_sample_type;
94 bool synth_needs_swap;
103 unsigned max_non_turbo_ratio;
105 unsigned long num_events;
109 INTEL_PT_SS_NOT_TRACING,
112 INTEL_PT_SS_EXPECTING_SWITCH_EVENT,
113 INTEL_PT_SS_EXPECTING_SWITCH_IP,
116 struct intel_pt_queue {
118 unsigned int queue_nr;
119 struct auxtrace_buffer *buffer;
121 const struct intel_pt_state *state;
122 struct ip_callchain *chain;
123 struct branch_stack *last_branch;
124 struct branch_stack *last_branch_rb;
125 size_t last_branch_pos;
126 union perf_event *event_buf;
129 bool step_through_buffers;
130 bool use_buffer_pid_tid;
135 struct thread *thread;
145 static void intel_pt_dump(struct intel_pt *pt __maybe_unused,
146 unsigned char *buf, size_t len)
148 struct intel_pt_pkt packet;
151 char desc[INTEL_PT_PKT_DESC_MAX];
152 const char *color = PERF_COLOR_BLUE;
154 color_fprintf(stdout, color,
155 ". ... Intel Processor Trace data: size %zu bytes\n",
159 ret = intel_pt_get_packet(buf, len, &packet);
165 color_fprintf(stdout, color, " %08x: ", pos);
166 for (i = 0; i < pkt_len; i++)
167 color_fprintf(stdout, color, " %02x", buf[i]);
169 color_fprintf(stdout, color, " ");
171 ret = intel_pt_pkt_desc(&packet, desc,
172 INTEL_PT_PKT_DESC_MAX);
174 color_fprintf(stdout, color, " %s\n", desc);
176 color_fprintf(stdout, color, " Bad packet!\n");
184 static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
188 intel_pt_dump(pt, buf, len);
191 static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
192 struct auxtrace_buffer *b)
196 start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
200 b->use_size = b->data + b->size - start;
205 static void intel_pt_use_buffer_pid_tid(struct intel_pt_queue *ptq,
206 struct auxtrace_queue *queue,
207 struct auxtrace_buffer *buffer)
209 if (queue->cpu == -1 && buffer->cpu != -1)
210 ptq->cpu = buffer->cpu;
212 ptq->pid = buffer->pid;
213 ptq->tid = buffer->tid;
215 intel_pt_log("queue %u cpu %d pid %d tid %d\n",
216 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
218 thread__zput(ptq->thread);
220 if (ptq->tid != -1) {
222 ptq->thread = machine__findnew_thread(ptq->pt->machine,
226 ptq->thread = machine__find_thread(ptq->pt->machine, -1,
231 /* This function assumes data is processed sequentially only */
232 static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
234 struct intel_pt_queue *ptq = data;
235 struct auxtrace_buffer *buffer = ptq->buffer, *old_buffer = buffer;
236 struct auxtrace_queue *queue;
243 queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
245 buffer = auxtrace_buffer__next(queue, buffer);
248 auxtrace_buffer__drop_data(old_buffer);
253 ptq->buffer = buffer;
256 int fd = perf_data_file__fd(ptq->pt->session->file);
258 buffer->data = auxtrace_buffer__get_data(buffer, fd);
263 if (ptq->pt->snapshot_mode && !buffer->consecutive && old_buffer &&
264 intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer))
267 if (buffer->use_data) {
268 b->len = buffer->use_size;
269 b->buf = buffer->use_data;
271 b->len = buffer->size;
272 b->buf = buffer->data;
274 b->ref_timestamp = buffer->reference;
277 * If in snapshot mode and the buffer has no usable data, get next
278 * buffer and again check overlap against old_buffer.
280 if (ptq->pt->snapshot_mode && !b->len)
284 auxtrace_buffer__drop_data(old_buffer);
286 if (!old_buffer || ptq->pt->sampling_mode || (ptq->pt->snapshot_mode &&
287 !buffer->consecutive)) {
288 b->consecutive = false;
289 b->trace_nr = buffer->buffer_nr + 1;
291 b->consecutive = true;
294 if (ptq->use_buffer_pid_tid && (ptq->pid != buffer->pid ||
295 ptq->tid != buffer->tid))
296 intel_pt_use_buffer_pid_tid(ptq, queue, buffer);
298 if (ptq->step_through_buffers)
302 return intel_pt_get_trace(b, data);
307 struct intel_pt_cache_entry {
308 struct auxtrace_cache_entry entry;
311 enum intel_pt_insn_op op;
312 enum intel_pt_insn_branch branch;
317 static int intel_pt_config_div(const char *var, const char *value, void *data)
322 if (!strcmp(var, "intel-pt.cache-divisor")) {
323 val = strtol(value, NULL, 0);
324 if (val > 0 && val <= INT_MAX)
331 static int intel_pt_cache_divisor(void)
338 perf_config(intel_pt_config_div, &d);
346 static unsigned int intel_pt_cache_size(struct dso *dso,
347 struct machine *machine)
351 size = dso__data_size(dso, machine);
352 size /= intel_pt_cache_divisor();
355 if (size > (1 << 21))
357 return 32 - __builtin_clz(size);
360 static struct auxtrace_cache *intel_pt_cache(struct dso *dso,
361 struct machine *machine)
363 struct auxtrace_cache *c;
366 if (dso->auxtrace_cache)
367 return dso->auxtrace_cache;
369 bits = intel_pt_cache_size(dso, machine);
371 /* Ignoring cache creation failure */
372 c = auxtrace_cache__new(bits, sizeof(struct intel_pt_cache_entry), 200);
374 dso->auxtrace_cache = c;
379 static int intel_pt_cache_add(struct dso *dso, struct machine *machine,
380 u64 offset, u64 insn_cnt, u64 byte_cnt,
381 struct intel_pt_insn *intel_pt_insn)
383 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
384 struct intel_pt_cache_entry *e;
390 e = auxtrace_cache__alloc_entry(c);
394 e->insn_cnt = insn_cnt;
395 e->byte_cnt = byte_cnt;
396 e->op = intel_pt_insn->op;
397 e->branch = intel_pt_insn->branch;
398 e->length = intel_pt_insn->length;
399 e->rel = intel_pt_insn->rel;
401 err = auxtrace_cache__add(c, offset, &e->entry);
403 auxtrace_cache__free_entry(c, e);
408 static struct intel_pt_cache_entry *
409 intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset)
411 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
416 return auxtrace_cache__lookup(dso->auxtrace_cache, offset);
419 static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
420 uint64_t *insn_cnt_ptr, uint64_t *ip,
421 uint64_t to_ip, uint64_t max_insn_cnt,
424 struct intel_pt_queue *ptq = data;
425 struct machine *machine = ptq->pt->machine;
426 struct thread *thread;
427 struct addr_location al;
428 unsigned char buf[1024];
433 u64 offset, start_offset, start_ip;
437 if (to_ip && *ip == to_ip)
440 bufsz = intel_pt_insn_max_size();
442 if (*ip >= ptq->pt->kernel_start)
443 cpumode = PERF_RECORD_MISC_KERNEL;
445 cpumode = PERF_RECORD_MISC_USER;
447 thread = ptq->thread;
449 if (cpumode != PERF_RECORD_MISC_KERNEL)
451 thread = ptq->pt->unknown_thread;
455 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, *ip, &al);
456 if (!al.map || !al.map->dso)
459 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
460 dso__data_status_seen(al.map->dso,
461 DSO_DATA_STATUS_SEEN_ITRACE))
464 offset = al.map->map_ip(al.map, *ip);
466 if (!to_ip && one_map) {
467 struct intel_pt_cache_entry *e;
469 e = intel_pt_cache_lookup(al.map->dso, machine, offset);
471 (!max_insn_cnt || e->insn_cnt <= max_insn_cnt)) {
472 *insn_cnt_ptr = e->insn_cnt;
474 intel_pt_insn->op = e->op;
475 intel_pt_insn->branch = e->branch;
476 intel_pt_insn->length = e->length;
477 intel_pt_insn->rel = e->rel;
478 intel_pt_log_insn_no_data(intel_pt_insn, *ip);
483 start_offset = offset;
486 /* Load maps to ensure dso->is_64_bit has been updated */
489 x86_64 = al.map->dso->is_64_bit;
492 len = dso__data_read_offset(al.map->dso, machine,
497 if (intel_pt_get_insn(buf, len, x86_64, intel_pt_insn))
500 intel_pt_log_insn(intel_pt_insn, *ip);
504 if (intel_pt_insn->branch != INTEL_PT_BR_NO_BRANCH)
507 if (max_insn_cnt && insn_cnt >= max_insn_cnt)
510 *ip += intel_pt_insn->length;
512 if (to_ip && *ip == to_ip)
515 if (*ip >= al.map->end)
518 offset += intel_pt_insn->length;
523 *insn_cnt_ptr = insn_cnt;
529 * Didn't lookup in the 'to_ip' case, so do it now to prevent duplicate
533 struct intel_pt_cache_entry *e;
535 e = intel_pt_cache_lookup(al.map->dso, machine, start_offset);
540 /* Ignore cache errors */
541 intel_pt_cache_add(al.map->dso, machine, start_offset, insn_cnt,
542 *ip - start_ip, intel_pt_insn);
547 *insn_cnt_ptr = insn_cnt;
551 static bool intel_pt_get_config(struct intel_pt *pt,
552 struct perf_event_attr *attr, u64 *config)
554 if (attr->type == pt->pmu_type) {
556 *config = attr->config;
563 static bool intel_pt_exclude_kernel(struct intel_pt *pt)
565 struct perf_evsel *evsel;
567 evlist__for_each_entry(pt->session->evlist, evsel) {
568 if (intel_pt_get_config(pt, &evsel->attr, NULL) &&
569 !evsel->attr.exclude_kernel)
575 static bool intel_pt_return_compression(struct intel_pt *pt)
577 struct perf_evsel *evsel;
580 if (!pt->noretcomp_bit)
583 evlist__for_each_entry(pt->session->evlist, evsel) {
584 if (intel_pt_get_config(pt, &evsel->attr, &config) &&
585 (config & pt->noretcomp_bit))
591 static unsigned int intel_pt_mtc_period(struct intel_pt *pt)
593 struct perf_evsel *evsel;
597 if (!pt->mtc_freq_bits)
600 for (shift = 0, config = pt->mtc_freq_bits; !(config & 1); shift++)
603 evlist__for_each_entry(pt->session->evlist, evsel) {
604 if (intel_pt_get_config(pt, &evsel->attr, &config))
605 return (config & pt->mtc_freq_bits) >> shift;
610 static bool intel_pt_timeless_decoding(struct intel_pt *pt)
612 struct perf_evsel *evsel;
613 bool timeless_decoding = true;
616 if (!pt->tsc_bit || !pt->cap_user_time_zero)
619 evlist__for_each_entry(pt->session->evlist, evsel) {
620 if (!(evsel->attr.sample_type & PERF_SAMPLE_TIME))
622 if (intel_pt_get_config(pt, &evsel->attr, &config)) {
623 if (config & pt->tsc_bit)
624 timeless_decoding = false;
629 return timeless_decoding;
632 static bool intel_pt_tracing_kernel(struct intel_pt *pt)
634 struct perf_evsel *evsel;
636 evlist__for_each_entry(pt->session->evlist, evsel) {
637 if (intel_pt_get_config(pt, &evsel->attr, NULL) &&
638 !evsel->attr.exclude_kernel)
644 static bool intel_pt_have_tsc(struct intel_pt *pt)
646 struct perf_evsel *evsel;
647 bool have_tsc = false;
653 evlist__for_each_entry(pt->session->evlist, evsel) {
654 if (intel_pt_get_config(pt, &evsel->attr, &config)) {
655 if (config & pt->tsc_bit)
664 static u64 intel_pt_ns_to_ticks(const struct intel_pt *pt, u64 ns)
668 quot = ns / pt->tc.time_mult;
669 rem = ns % pt->tc.time_mult;
670 return (quot << pt->tc.time_shift) + (rem << pt->tc.time_shift) /
674 static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
675 unsigned int queue_nr)
677 struct intel_pt_params params = { .get_trace = 0, };
678 struct intel_pt_queue *ptq;
680 ptq = zalloc(sizeof(struct intel_pt_queue));
684 if (pt->synth_opts.callchain) {
685 size_t sz = sizeof(struct ip_callchain);
687 sz += pt->synth_opts.callchain_sz * sizeof(u64);
688 ptq->chain = zalloc(sz);
693 if (pt->synth_opts.last_branch) {
694 size_t sz = sizeof(struct branch_stack);
696 sz += pt->synth_opts.last_branch_sz *
697 sizeof(struct branch_entry);
698 ptq->last_branch = zalloc(sz);
699 if (!ptq->last_branch)
701 ptq->last_branch_rb = zalloc(sz);
702 if (!ptq->last_branch_rb)
706 ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
711 ptq->queue_nr = queue_nr;
712 ptq->exclude_kernel = intel_pt_exclude_kernel(pt);
718 params.get_trace = intel_pt_get_trace;
719 params.walk_insn = intel_pt_walk_next_insn;
721 params.return_compression = intel_pt_return_compression(pt);
722 params.max_non_turbo_ratio = pt->max_non_turbo_ratio;
723 params.mtc_period = intel_pt_mtc_period(pt);
724 params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n;
725 params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d;
727 if (pt->synth_opts.instructions) {
728 if (pt->synth_opts.period) {
729 switch (pt->synth_opts.period_type) {
730 case PERF_ITRACE_PERIOD_INSTRUCTIONS:
732 INTEL_PT_PERIOD_INSTRUCTIONS;
733 params.period = pt->synth_opts.period;
735 case PERF_ITRACE_PERIOD_TICKS:
736 params.period_type = INTEL_PT_PERIOD_TICKS;
737 params.period = pt->synth_opts.period;
739 case PERF_ITRACE_PERIOD_NANOSECS:
740 params.period_type = INTEL_PT_PERIOD_TICKS;
741 params.period = intel_pt_ns_to_ticks(pt,
742 pt->synth_opts.period);
749 if (!params.period) {
750 params.period_type = INTEL_PT_PERIOD_INSTRUCTIONS;
755 ptq->decoder = intel_pt_decoder_new(¶ms);
762 zfree(&ptq->event_buf);
763 zfree(&ptq->last_branch);
764 zfree(&ptq->last_branch_rb);
770 static void intel_pt_free_queue(void *priv)
772 struct intel_pt_queue *ptq = priv;
776 thread__zput(ptq->thread);
777 intel_pt_decoder_free(ptq->decoder);
778 zfree(&ptq->event_buf);
779 zfree(&ptq->last_branch);
780 zfree(&ptq->last_branch_rb);
785 static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
786 struct auxtrace_queue *queue)
788 struct intel_pt_queue *ptq = queue->priv;
790 if (queue->tid == -1 || pt->have_sched_switch) {
791 ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
792 thread__zput(ptq->thread);
795 if (!ptq->thread && ptq->tid != -1)
796 ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid);
799 ptq->pid = ptq->thread->pid_;
800 if (queue->cpu == -1)
801 ptq->cpu = ptq->thread->cpu;
805 static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
807 if (ptq->state->flags & INTEL_PT_ABORT_TX) {
808 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT;
809 } else if (ptq->state->flags & INTEL_PT_ASYNC) {
810 if (ptq->state->to_ip)
811 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
813 PERF_IP_FLAG_INTERRUPT;
815 ptq->flags = PERF_IP_FLAG_BRANCH |
816 PERF_IP_FLAG_TRACE_END;
819 if (ptq->state->from_ip)
820 ptq->flags = intel_pt_insn_type(ptq->state->insn_op);
822 ptq->flags = PERF_IP_FLAG_BRANCH |
823 PERF_IP_FLAG_TRACE_BEGIN;
824 if (ptq->state->flags & INTEL_PT_IN_TX)
825 ptq->flags |= PERF_IP_FLAG_IN_TX;
826 ptq->insn_len = ptq->state->insn_len;
830 static int intel_pt_setup_queue(struct intel_pt *pt,
831 struct auxtrace_queue *queue,
832 unsigned int queue_nr)
834 struct intel_pt_queue *ptq = queue->priv;
836 if (list_empty(&queue->head))
840 ptq = intel_pt_alloc_queue(pt, queue_nr);
845 if (queue->cpu != -1)
846 ptq->cpu = queue->cpu;
847 ptq->tid = queue->tid;
849 if (pt->sampling_mode) {
850 if (pt->timeless_decoding)
851 ptq->step_through_buffers = true;
852 if (pt->timeless_decoding || !pt->have_sched_switch)
853 ptq->use_buffer_pid_tid = true;
859 ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
860 const struct intel_pt_state *state;
863 if (pt->timeless_decoding)
866 intel_pt_log("queue %u getting timestamp\n", queue_nr);
867 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
868 queue_nr, ptq->cpu, ptq->pid, ptq->tid);
870 state = intel_pt_decode(ptq->decoder);
872 if (state->err == INTEL_PT_ERR_NODATA) {
873 intel_pt_log("queue %u has no timestamp\n",
879 if (state->timestamp)
883 ptq->timestamp = state->timestamp;
884 intel_pt_log("queue %u timestamp 0x%" PRIx64 "\n",
885 queue_nr, ptq->timestamp);
887 ptq->have_sample = true;
888 intel_pt_sample_flags(ptq);
889 ret = auxtrace_heap__add(&pt->heap, queue_nr, ptq->timestamp);
898 static int intel_pt_setup_queues(struct intel_pt *pt)
903 for (i = 0; i < pt->queues.nr_queues; i++) {
904 ret = intel_pt_setup_queue(pt, &pt->queues.queue_array[i], i);
911 static inline void intel_pt_copy_last_branch_rb(struct intel_pt_queue *ptq)
913 struct branch_stack *bs_src = ptq->last_branch_rb;
914 struct branch_stack *bs_dst = ptq->last_branch;
917 bs_dst->nr = bs_src->nr;
922 nr = ptq->pt->synth_opts.last_branch_sz - ptq->last_branch_pos;
923 memcpy(&bs_dst->entries[0],
924 &bs_src->entries[ptq->last_branch_pos],
925 sizeof(struct branch_entry) * nr);
927 if (bs_src->nr >= ptq->pt->synth_opts.last_branch_sz) {
928 memcpy(&bs_dst->entries[nr],
930 sizeof(struct branch_entry) * ptq->last_branch_pos);
934 static inline void intel_pt_reset_last_branch_rb(struct intel_pt_queue *ptq)
936 ptq->last_branch_pos = 0;
937 ptq->last_branch_rb->nr = 0;
940 static void intel_pt_update_last_branch_rb(struct intel_pt_queue *ptq)
942 const struct intel_pt_state *state = ptq->state;
943 struct branch_stack *bs = ptq->last_branch_rb;
944 struct branch_entry *be;
946 if (!ptq->last_branch_pos)
947 ptq->last_branch_pos = ptq->pt->synth_opts.last_branch_sz;
949 ptq->last_branch_pos -= 1;
951 be = &bs->entries[ptq->last_branch_pos];
952 be->from = state->from_ip;
953 be->to = state->to_ip;
954 be->flags.abort = !!(state->flags & INTEL_PT_ABORT_TX);
955 be->flags.in_tx = !!(state->flags & INTEL_PT_IN_TX);
956 /* No support for mispredict */
957 be->flags.mispred = ptq->pt->mispred_all;
959 if (bs->nr < ptq->pt->synth_opts.last_branch_sz)
963 static int intel_pt_inject_event(union perf_event *event,
964 struct perf_sample *sample, u64 type,
967 event->header.size = perf_event__sample_event_size(sample, type, 0);
968 return perf_event__synthesize_sample(event, type, 0, sample, swapped);
971 static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
974 struct intel_pt *pt = ptq->pt;
975 union perf_event *event = ptq->event_buf;
976 struct perf_sample sample = { .ip = 0, };
977 struct dummy_branch_stack {
979 struct branch_entry entries;
982 if (pt->branches_filter && !(pt->branches_filter & ptq->flags))
985 if (pt->synth_opts.initial_skip &&
986 pt->num_events++ < pt->synth_opts.initial_skip)
989 event->sample.header.type = PERF_RECORD_SAMPLE;
990 event->sample.header.misc = PERF_RECORD_MISC_USER;
991 event->sample.header.size = sizeof(struct perf_event_header);
993 if (!pt->timeless_decoding)
994 sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
996 sample.cpumode = PERF_RECORD_MISC_USER;
997 sample.ip = ptq->state->from_ip;
998 sample.pid = ptq->pid;
999 sample.tid = ptq->tid;
1000 sample.addr = ptq->state->to_ip;
1001 sample.id = ptq->pt->branches_id;
1002 sample.stream_id = ptq->pt->branches_id;
1004 sample.cpu = ptq->cpu;
1005 sample.flags = ptq->flags;
1006 sample.insn_len = ptq->insn_len;
1009 * perf report cannot handle events without a branch stack when using
1010 * SORT_MODE__BRANCH so make a dummy one.
1012 if (pt->synth_opts.last_branch && sort__mode == SORT_MODE__BRANCH) {
1013 dummy_bs = (struct dummy_branch_stack){
1020 sample.branch_stack = (struct branch_stack *)&dummy_bs;
1023 if (pt->synth_opts.inject) {
1024 ret = intel_pt_inject_event(event, &sample,
1025 pt->branches_sample_type,
1026 pt->synth_needs_swap);
1031 ret = perf_session__deliver_synth_event(pt->session, event, &sample);
1033 pr_err("Intel Processor Trace: failed to deliver branch event, error %d\n",
1039 static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
1042 struct intel_pt *pt = ptq->pt;
1043 union perf_event *event = ptq->event_buf;
1044 struct perf_sample sample = { .ip = 0, };
1046 if (pt->synth_opts.initial_skip &&
1047 pt->num_events++ < pt->synth_opts.initial_skip)
1050 event->sample.header.type = PERF_RECORD_SAMPLE;
1051 event->sample.header.misc = PERF_RECORD_MISC_USER;
1052 event->sample.header.size = sizeof(struct perf_event_header);
1054 if (!pt->timeless_decoding)
1055 sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
1057 sample.cpumode = PERF_RECORD_MISC_USER;
1058 sample.ip = ptq->state->from_ip;
1059 sample.pid = ptq->pid;
1060 sample.tid = ptq->tid;
1061 sample.addr = ptq->state->to_ip;
1062 sample.id = ptq->pt->instructions_id;
1063 sample.stream_id = ptq->pt->instructions_id;
1064 sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
1065 sample.cpu = ptq->cpu;
1066 sample.flags = ptq->flags;
1067 sample.insn_len = ptq->insn_len;
1069 ptq->last_insn_cnt = ptq->state->tot_insn_cnt;
1071 if (pt->synth_opts.callchain) {
1072 thread_stack__sample(ptq->thread, ptq->chain,
1073 pt->synth_opts.callchain_sz, sample.ip);
1074 sample.callchain = ptq->chain;
1077 if (pt->synth_opts.last_branch) {
1078 intel_pt_copy_last_branch_rb(ptq);
1079 sample.branch_stack = ptq->last_branch;
1082 if (pt->synth_opts.inject) {
1083 ret = intel_pt_inject_event(event, &sample,
1084 pt->instructions_sample_type,
1085 pt->synth_needs_swap);
1090 ret = perf_session__deliver_synth_event(pt->session, event, &sample);
1092 pr_err("Intel Processor Trace: failed to deliver instruction event, error %d\n",
1095 if (pt->synth_opts.last_branch)
1096 intel_pt_reset_last_branch_rb(ptq);
1101 static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
1104 struct intel_pt *pt = ptq->pt;
1105 union perf_event *event = ptq->event_buf;
1106 struct perf_sample sample = { .ip = 0, };
1108 if (pt->synth_opts.initial_skip &&
1109 pt->num_events++ < pt->synth_opts.initial_skip)
1112 event->sample.header.type = PERF_RECORD_SAMPLE;
1113 event->sample.header.misc = PERF_RECORD_MISC_USER;
1114 event->sample.header.size = sizeof(struct perf_event_header);
1116 if (!pt->timeless_decoding)
1117 sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
1119 sample.cpumode = PERF_RECORD_MISC_USER;
1120 sample.ip = ptq->state->from_ip;
1121 sample.pid = ptq->pid;
1122 sample.tid = ptq->tid;
1123 sample.addr = ptq->state->to_ip;
1124 sample.id = ptq->pt->transactions_id;
1125 sample.stream_id = ptq->pt->transactions_id;
1127 sample.cpu = ptq->cpu;
1128 sample.flags = ptq->flags;
1129 sample.insn_len = ptq->insn_len;
1131 if (pt->synth_opts.callchain) {
1132 thread_stack__sample(ptq->thread, ptq->chain,
1133 pt->synth_opts.callchain_sz, sample.ip);
1134 sample.callchain = ptq->chain;
1137 if (pt->synth_opts.last_branch) {
1138 intel_pt_copy_last_branch_rb(ptq);
1139 sample.branch_stack = ptq->last_branch;
1142 if (pt->synth_opts.inject) {
1143 ret = intel_pt_inject_event(event, &sample,
1144 pt->transactions_sample_type,
1145 pt->synth_needs_swap);
1150 ret = perf_session__deliver_synth_event(pt->session, event, &sample);
1152 pr_err("Intel Processor Trace: failed to deliver transaction event, error %d\n",
1155 if (pt->synth_opts.last_branch)
1156 intel_pt_reset_last_branch_rb(ptq);
1161 static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
1162 pid_t pid, pid_t tid, u64 ip)
1164 union perf_event event;
1165 char msg[MAX_AUXTRACE_ERROR_MSG];
1168 intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG);
1170 auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
1171 code, cpu, pid, tid, ip, msg);
1173 err = perf_session__deliver_synth_event(pt->session, &event, NULL);
1175 pr_err("Intel Processor Trace: failed to deliver error event, error %d\n",
1181 static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq)
1183 struct auxtrace_queue *queue;
1184 pid_t tid = ptq->next_tid;
1190 intel_pt_log("switch: cpu %d tid %d\n", ptq->cpu, tid);
1192 err = machine__set_current_tid(pt->machine, ptq->cpu, -1, tid);
1194 queue = &pt->queues.queue_array[ptq->queue_nr];
1195 intel_pt_set_pid_tid_cpu(pt, queue);
1202 static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip)
1204 struct intel_pt *pt = ptq->pt;
1206 return ip == pt->switch_ip &&
1207 (ptq->flags & PERF_IP_FLAG_BRANCH) &&
1208 !(ptq->flags & (PERF_IP_FLAG_CONDITIONAL | PERF_IP_FLAG_ASYNC |
1209 PERF_IP_FLAG_INTERRUPT | PERF_IP_FLAG_TX_ABORT));
1212 static int intel_pt_sample(struct intel_pt_queue *ptq)
1214 const struct intel_pt_state *state = ptq->state;
1215 struct intel_pt *pt = ptq->pt;
1218 if (!ptq->have_sample)
1221 ptq->have_sample = false;
1223 if (pt->sample_instructions &&
1224 (state->type & INTEL_PT_INSTRUCTION) &&
1225 (!pt->synth_opts.initial_skip ||
1226 pt->num_events++ >= pt->synth_opts.initial_skip)) {
1227 err = intel_pt_synth_instruction_sample(ptq);
1232 if (pt->sample_transactions &&
1233 (state->type & INTEL_PT_TRANSACTION) &&
1234 (!pt->synth_opts.initial_skip ||
1235 pt->num_events++ >= pt->synth_opts.initial_skip)) {
1236 err = intel_pt_synth_transaction_sample(ptq);
1241 if (!(state->type & INTEL_PT_BRANCH))
1244 if (pt->synth_opts.callchain || pt->synth_opts.thread_stack)
1245 thread_stack__event(ptq->thread, ptq->flags, state->from_ip,
1246 state->to_ip, ptq->insn_len,
1249 thread_stack__set_trace_nr(ptq->thread, state->trace_nr);
1251 if (pt->sample_branches) {
1252 err = intel_pt_synth_branch_sample(ptq);
1257 if (pt->synth_opts.last_branch)
1258 intel_pt_update_last_branch_rb(ptq);
1260 if (!pt->sync_switch)
1263 if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
1264 switch (ptq->switch_state) {
1265 case INTEL_PT_SS_UNKNOWN:
1266 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
1267 err = intel_pt_next_tid(pt, ptq);
1270 ptq->switch_state = INTEL_PT_SS_TRACING;
1273 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_EVENT;
1276 } else if (!state->to_ip) {
1277 ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
1278 } else if (ptq->switch_state == INTEL_PT_SS_NOT_TRACING) {
1279 ptq->switch_state = INTEL_PT_SS_UNKNOWN;
1280 } else if (ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
1281 state->to_ip == pt->ptss_ip &&
1282 (ptq->flags & PERF_IP_FLAG_CALL)) {
1283 ptq->switch_state = INTEL_PT_SS_TRACING;
1289 static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
1291 struct machine *machine = pt->machine;
1293 struct symbol *sym, *start;
1294 u64 ip, switch_ip = 0;
1300 map = machine__kernel_map(machine);
1307 start = dso__first_symbol(map->dso, MAP__FUNCTION);
1309 for (sym = start; sym; sym = dso__next_symbol(sym)) {
1310 if (sym->binding == STB_GLOBAL &&
1311 !strcmp(sym->name, "__switch_to")) {
1312 ip = map->unmap_ip(map, sym->start);
1313 if (ip >= map->start && ip < map->end) {
1320 if (!switch_ip || !ptss_ip)
1323 if (pt->have_sched_switch == 1)
1324 ptss = "perf_trace_sched_switch";
1326 ptss = "__perf_event_task_sched_out";
1328 for (sym = start; sym; sym = dso__next_symbol(sym)) {
1329 if (!strcmp(sym->name, ptss)) {
1330 ip = map->unmap_ip(map, sym->start);
1331 if (ip >= map->start && ip < map->end) {
1341 static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
1343 const struct intel_pt_state *state = ptq->state;
1344 struct intel_pt *pt = ptq->pt;
1347 if (!pt->kernel_start) {
1348 pt->kernel_start = machine__kernel_start(pt->machine);
1349 if (pt->per_cpu_mmaps &&
1350 (pt->have_sched_switch == 1 || pt->have_sched_switch == 3) &&
1351 !pt->timeless_decoding && intel_pt_tracing_kernel(pt) &&
1352 !pt->sampling_mode) {
1353 pt->switch_ip = intel_pt_switch_ip(pt, &pt->ptss_ip);
1354 if (pt->switch_ip) {
1355 intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
1356 pt->switch_ip, pt->ptss_ip);
1357 pt->sync_switch = true;
1362 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
1363 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
1365 err = intel_pt_sample(ptq);
1369 state = intel_pt_decode(ptq->decoder);
1371 if (state->err == INTEL_PT_ERR_NODATA)
1373 if (pt->sync_switch &&
1374 state->from_ip >= pt->kernel_start) {
1375 pt->sync_switch = false;
1376 intel_pt_next_tid(pt, ptq);
1378 if (pt->synth_opts.errors) {
1379 err = intel_pt_synth_error(pt, state->err,
1390 ptq->have_sample = true;
1391 intel_pt_sample_flags(ptq);
1393 /* Use estimated TSC upon return to user space */
1395 (state->from_ip >= pt->kernel_start || !state->from_ip) &&
1396 state->to_ip && state->to_ip < pt->kernel_start) {
1397 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
1398 state->timestamp, state->est_timestamp);
1399 ptq->timestamp = state->est_timestamp;
1400 /* Use estimated TSC in unknown switch state */
1401 } else if (pt->sync_switch &&
1402 ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
1403 intel_pt_is_switch_ip(ptq, state->to_ip) &&
1404 ptq->next_tid == -1) {
1405 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
1406 state->timestamp, state->est_timestamp);
1407 ptq->timestamp = state->est_timestamp;
1408 } else if (state->timestamp > ptq->timestamp) {
1409 ptq->timestamp = state->timestamp;
1412 if (!pt->timeless_decoding && ptq->timestamp >= *timestamp) {
1413 *timestamp = ptq->timestamp;
1420 static inline int intel_pt_update_queues(struct intel_pt *pt)
1422 if (pt->queues.new_data) {
1423 pt->queues.new_data = false;
1424 return intel_pt_setup_queues(pt);
1429 static int intel_pt_process_queues(struct intel_pt *pt, u64 timestamp)
1431 unsigned int queue_nr;
1436 struct auxtrace_queue *queue;
1437 struct intel_pt_queue *ptq;
1439 if (!pt->heap.heap_cnt)
1442 if (pt->heap.heap_array[0].ordinal >= timestamp)
1445 queue_nr = pt->heap.heap_array[0].queue_nr;
1446 queue = &pt->queues.queue_array[queue_nr];
1449 intel_pt_log("queue %u processing 0x%" PRIx64 " to 0x%" PRIx64 "\n",
1450 queue_nr, pt->heap.heap_array[0].ordinal,
1453 auxtrace_heap__pop(&pt->heap);
1455 if (pt->heap.heap_cnt) {
1456 ts = pt->heap.heap_array[0].ordinal + 1;
1463 intel_pt_set_pid_tid_cpu(pt, queue);
1465 ret = intel_pt_run_decoder(ptq, &ts);
1468 auxtrace_heap__add(&pt->heap, queue_nr, ts);
1473 ret = auxtrace_heap__add(&pt->heap, queue_nr, ts);
1477 ptq->on_heap = false;
1484 static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid,
1487 struct auxtrace_queues *queues = &pt->queues;
1491 for (i = 0; i < queues->nr_queues; i++) {
1492 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
1493 struct intel_pt_queue *ptq = queue->priv;
1495 if (ptq && (tid == -1 || ptq->tid == tid)) {
1497 intel_pt_set_pid_tid_cpu(pt, queue);
1498 intel_pt_run_decoder(ptq, &ts);
1504 static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample)
1506 return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu,
1507 sample->pid, sample->tid, 0);
1510 static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu)
1514 if (cpu < 0 || !pt->queues.nr_queues)
1517 if ((unsigned)cpu >= pt->queues.nr_queues)
1518 i = pt->queues.nr_queues - 1;
1522 if (pt->queues.queue_array[i].cpu == cpu)
1523 return pt->queues.queue_array[i].priv;
1525 for (j = 0; i > 0; j++) {
1526 if (pt->queues.queue_array[--i].cpu == cpu)
1527 return pt->queues.queue_array[i].priv;
1530 for (; j < pt->queues.nr_queues; j++) {
1531 if (pt->queues.queue_array[j].cpu == cpu)
1532 return pt->queues.queue_array[j].priv;
1538 static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
1541 struct intel_pt_queue *ptq;
1544 if (!pt->sync_switch)
1547 ptq = intel_pt_cpu_to_ptq(pt, cpu);
1551 switch (ptq->switch_state) {
1552 case INTEL_PT_SS_NOT_TRACING:
1555 case INTEL_PT_SS_UNKNOWN:
1556 case INTEL_PT_SS_TRACING:
1557 ptq->next_tid = tid;
1558 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_IP;
1560 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
1561 if (!ptq->on_heap) {
1562 ptq->timestamp = perf_time_to_tsc(timestamp,
1564 err = auxtrace_heap__add(&pt->heap, ptq->queue_nr,
1568 ptq->on_heap = true;
1570 ptq->switch_state = INTEL_PT_SS_TRACING;
1572 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
1573 ptq->next_tid = tid;
1574 intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu);
1583 static int intel_pt_process_switch(struct intel_pt *pt,
1584 struct perf_sample *sample)
1586 struct perf_evsel *evsel;
1590 evsel = perf_evlist__id2evsel(pt->session->evlist, sample->id);
1591 if (evsel != pt->switch_evsel)
1594 tid = perf_evsel__intval(evsel, sample, "next_pid");
1597 intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
1598 cpu, tid, sample->time, perf_time_to_tsc(sample->time,
1601 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
1605 return machine__set_current_tid(pt->machine, cpu, -1, tid);
1608 static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
1609 struct perf_sample *sample)
1611 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
1617 if (pt->have_sched_switch == 3) {
1620 if (event->header.type != PERF_RECORD_SWITCH_CPU_WIDE) {
1621 pr_err("Expecting CPU-wide context switch event\n");
1624 pid = event->context_switch.next_prev_pid;
1625 tid = event->context_switch.next_prev_tid;
1634 pr_err("context_switch event has no tid\n");
1638 intel_pt_log("context_switch: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
1639 cpu, pid, tid, sample->time, perf_time_to_tsc(sample->time,
1642 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
1646 return machine__set_current_tid(pt->machine, cpu, pid, tid);
1649 static int intel_pt_process_itrace_start(struct intel_pt *pt,
1650 union perf_event *event,
1651 struct perf_sample *sample)
1653 if (!pt->per_cpu_mmaps)
1656 intel_pt_log("itrace_start: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
1657 sample->cpu, event->itrace_start.pid,
1658 event->itrace_start.tid, sample->time,
1659 perf_time_to_tsc(sample->time, &pt->tc));
1661 return machine__set_current_tid(pt->machine, sample->cpu,
1662 event->itrace_start.pid,
1663 event->itrace_start.tid);
1666 static int intel_pt_process_event(struct perf_session *session,
1667 union perf_event *event,
1668 struct perf_sample *sample,
1669 struct perf_tool *tool)
1671 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
1679 if (!tool->ordered_events) {
1680 pr_err("Intel Processor Trace requires ordered events\n");
1684 if (sample->time && sample->time != (u64)-1)
1685 timestamp = perf_time_to_tsc(sample->time, &pt->tc);
1689 if (timestamp || pt->timeless_decoding) {
1690 err = intel_pt_update_queues(pt);
1695 if (pt->timeless_decoding) {
1696 if (event->header.type == PERF_RECORD_EXIT) {
1697 err = intel_pt_process_timeless_queues(pt,
1701 } else if (timestamp) {
1702 err = intel_pt_process_queues(pt, timestamp);
1707 if (event->header.type == PERF_RECORD_AUX &&
1708 (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) &&
1709 pt->synth_opts.errors) {
1710 err = intel_pt_lost(pt, sample);
1715 if (pt->switch_evsel && event->header.type == PERF_RECORD_SAMPLE)
1716 err = intel_pt_process_switch(pt, sample);
1717 else if (event->header.type == PERF_RECORD_ITRACE_START)
1718 err = intel_pt_process_itrace_start(pt, event, sample);
1719 else if (event->header.type == PERF_RECORD_SWITCH ||
1720 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
1721 err = intel_pt_context_switch(pt, event, sample);
1723 intel_pt_log("event %s (%u): cpu %d time %"PRIu64" tsc %#"PRIx64"\n",
1724 perf_event__name(event->header.type), event->header.type,
1725 sample->cpu, sample->time, timestamp);
1730 static int intel_pt_flush(struct perf_session *session, struct perf_tool *tool)
1732 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
1739 if (!tool->ordered_events)
1742 ret = intel_pt_update_queues(pt);
1746 if (pt->timeless_decoding)
1747 return intel_pt_process_timeless_queues(pt, -1,
1750 return intel_pt_process_queues(pt, MAX_TIMESTAMP);
1753 static void intel_pt_free_events(struct perf_session *session)
1755 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
1757 struct auxtrace_queues *queues = &pt->queues;
1760 for (i = 0; i < queues->nr_queues; i++) {
1761 intel_pt_free_queue(queues->queue_array[i].priv);
1762 queues->queue_array[i].priv = NULL;
1764 intel_pt_log_disable();
1765 auxtrace_queues__free(queues);
1768 static void intel_pt_free(struct perf_session *session)
1770 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
1773 auxtrace_heap__free(&pt->heap);
1774 intel_pt_free_events(session);
1775 session->auxtrace = NULL;
1776 thread__put(pt->unknown_thread);
1780 static int intel_pt_process_auxtrace_event(struct perf_session *session,
1781 union perf_event *event,
1782 struct perf_tool *tool __maybe_unused)
1784 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
1787 if (pt->sampling_mode)
1790 if (!pt->data_queued) {
1791 struct auxtrace_buffer *buffer;
1793 int fd = perf_data_file__fd(session->file);
1796 if (perf_data_file__is_pipe(session->file)) {
1799 data_offset = lseek(fd, 0, SEEK_CUR);
1800 if (data_offset == -1)
1804 err = auxtrace_queues__add_event(&pt->queues, session, event,
1805 data_offset, &buffer);
1809 /* Dump here now we have copied a piped trace out of the pipe */
1811 if (auxtrace_buffer__get_data(buffer, fd)) {
1812 intel_pt_dump_event(pt, buffer->data,
1814 auxtrace_buffer__put_data(buffer);
1822 struct intel_pt_synth {
1823 struct perf_tool dummy_tool;
1824 struct perf_session *session;
1827 static int intel_pt_event_synth(struct perf_tool *tool,
1828 union perf_event *event,
1829 struct perf_sample *sample __maybe_unused,
1830 struct machine *machine __maybe_unused)
1832 struct intel_pt_synth *intel_pt_synth =
1833 container_of(tool, struct intel_pt_synth, dummy_tool);
1835 return perf_session__deliver_synth_event(intel_pt_synth->session, event,
1839 static int intel_pt_synth_event(struct perf_session *session,
1840 struct perf_event_attr *attr, u64 id)
1842 struct intel_pt_synth intel_pt_synth;
1844 memset(&intel_pt_synth, 0, sizeof(struct intel_pt_synth));
1845 intel_pt_synth.session = session;
1847 return perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1,
1848 &id, intel_pt_event_synth);
1851 static int intel_pt_synth_events(struct intel_pt *pt,
1852 struct perf_session *session)
1854 struct perf_evlist *evlist = session->evlist;
1855 struct perf_evsel *evsel;
1856 struct perf_event_attr attr;
1861 evlist__for_each_entry(evlist, evsel) {
1862 if (evsel->attr.type == pt->pmu_type && evsel->ids) {
1869 pr_debug("There are no selected events with Intel Processor Trace data\n");
1873 memset(&attr, 0, sizeof(struct perf_event_attr));
1874 attr.size = sizeof(struct perf_event_attr);
1875 attr.type = PERF_TYPE_HARDWARE;
1876 attr.sample_type = evsel->attr.sample_type & PERF_SAMPLE_MASK;
1877 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
1879 if (pt->timeless_decoding)
1880 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
1882 attr.sample_type |= PERF_SAMPLE_TIME;
1883 if (!pt->per_cpu_mmaps)
1884 attr.sample_type &= ~(u64)PERF_SAMPLE_CPU;
1885 attr.exclude_user = evsel->attr.exclude_user;
1886 attr.exclude_kernel = evsel->attr.exclude_kernel;
1887 attr.exclude_hv = evsel->attr.exclude_hv;
1888 attr.exclude_host = evsel->attr.exclude_host;
1889 attr.exclude_guest = evsel->attr.exclude_guest;
1890 attr.sample_id_all = evsel->attr.sample_id_all;
1891 attr.read_format = evsel->attr.read_format;
1893 id = evsel->id[0] + 1000000000;
1897 if (pt->synth_opts.instructions) {
1898 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
1899 if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS)
1900 attr.sample_period =
1901 intel_pt_ns_to_ticks(pt, pt->synth_opts.period);
1903 attr.sample_period = pt->synth_opts.period;
1904 pt->instructions_sample_period = attr.sample_period;
1905 if (pt->synth_opts.callchain)
1906 attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
1907 if (pt->synth_opts.last_branch)
1908 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
1909 pr_debug("Synthesizing 'instructions' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
1910 id, (u64)attr.sample_type);
1911 err = intel_pt_synth_event(session, &attr, id);
1913 pr_err("%s: failed to synthesize 'instructions' event type\n",
1917 pt->sample_instructions = true;
1918 pt->instructions_sample_type = attr.sample_type;
1919 pt->instructions_id = id;
1923 if (pt->synth_opts.transactions) {
1924 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
1925 attr.sample_period = 1;
1926 if (pt->synth_opts.callchain)
1927 attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
1928 if (pt->synth_opts.last_branch)
1929 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
1930 pr_debug("Synthesizing 'transactions' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
1931 id, (u64)attr.sample_type);
1932 err = intel_pt_synth_event(session, &attr, id);
1934 pr_err("%s: failed to synthesize 'transactions' event type\n",
1938 pt->sample_transactions = true;
1939 pt->transactions_id = id;
1941 evlist__for_each_entry(evlist, evsel) {
1942 if (evsel->id && evsel->id[0] == pt->transactions_id) {
1944 zfree(&evsel->name);
1945 evsel->name = strdup("transactions");
1951 if (pt->synth_opts.branches) {
1952 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
1953 attr.sample_period = 1;
1954 attr.sample_type |= PERF_SAMPLE_ADDR;
1955 attr.sample_type &= ~(u64)PERF_SAMPLE_CALLCHAIN;
1956 attr.sample_type &= ~(u64)PERF_SAMPLE_BRANCH_STACK;
1957 pr_debug("Synthesizing 'branches' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
1958 id, (u64)attr.sample_type);
1959 err = intel_pt_synth_event(session, &attr, id);
1961 pr_err("%s: failed to synthesize 'branches' event type\n",
1965 pt->sample_branches = true;
1966 pt->branches_sample_type = attr.sample_type;
1967 pt->branches_id = id;
1970 pt->synth_needs_swap = evsel->needs_swap;
1975 static struct perf_evsel *intel_pt_find_sched_switch(struct perf_evlist *evlist)
1977 struct perf_evsel *evsel;
1979 evlist__for_each_entry_reverse(evlist, evsel) {
1980 const char *name = perf_evsel__name(evsel);
1982 if (!strcmp(name, "sched:sched_switch"))
1989 static bool intel_pt_find_switch(struct perf_evlist *evlist)
1991 struct perf_evsel *evsel;
1993 evlist__for_each_entry(evlist, evsel) {
1994 if (evsel->attr.context_switch)
2001 static int intel_pt_perf_config(const char *var, const char *value, void *data)
2003 struct intel_pt *pt = data;
2005 if (!strcmp(var, "intel-pt.mispred-all"))
2006 pt->mispred_all = perf_config_bool(var, value);
2011 static const char * const intel_pt_info_fmts[] = {
2012 [INTEL_PT_PMU_TYPE] = " PMU Type %"PRId64"\n",
2013 [INTEL_PT_TIME_SHIFT] = " Time Shift %"PRIu64"\n",
2014 [INTEL_PT_TIME_MULT] = " Time Muliplier %"PRIu64"\n",
2015 [INTEL_PT_TIME_ZERO] = " Time Zero %"PRIu64"\n",
2016 [INTEL_PT_CAP_USER_TIME_ZERO] = " Cap Time Zero %"PRId64"\n",
2017 [INTEL_PT_TSC_BIT] = " TSC bit %#"PRIx64"\n",
2018 [INTEL_PT_NORETCOMP_BIT] = " NoRETComp bit %#"PRIx64"\n",
2019 [INTEL_PT_HAVE_SCHED_SWITCH] = " Have sched_switch %"PRId64"\n",
2020 [INTEL_PT_SNAPSHOT_MODE] = " Snapshot mode %"PRId64"\n",
2021 [INTEL_PT_PER_CPU_MMAPS] = " Per-cpu maps %"PRId64"\n",
2022 [INTEL_PT_MTC_BIT] = " MTC bit %#"PRIx64"\n",
2023 [INTEL_PT_TSC_CTC_N] = " TSC:CTC numerator %"PRIu64"\n",
2024 [INTEL_PT_TSC_CTC_D] = " TSC:CTC denominator %"PRIu64"\n",
2025 [INTEL_PT_CYC_BIT] = " CYC bit %#"PRIx64"\n",
2026 [INTEL_PT_MAX_NONTURBO_RATIO] = " Max non-turbo ratio %"PRIu64"\n",
2029 static void intel_pt_print_info(u64 *arr, int start, int finish)
2036 for (i = start; i <= finish; i++)
2037 fprintf(stdout, intel_pt_info_fmts[i], arr[i]);
2040 static bool intel_pt_has(struct auxtrace_info_event *auxtrace_info, int pos)
2042 return auxtrace_info->header.size >=
2043 sizeof(struct auxtrace_info_event) + (sizeof(u64) * (pos + 1));
2046 int intel_pt_process_auxtrace_info(union perf_event *event,
2047 struct perf_session *session)
2049 struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info;
2050 size_t min_sz = sizeof(u64) * INTEL_PT_PER_CPU_MMAPS;
2051 struct intel_pt *pt;
2054 if (auxtrace_info->header.size < sizeof(struct auxtrace_info_event) +
2058 pt = zalloc(sizeof(struct intel_pt));
2062 perf_config(intel_pt_perf_config, pt);
2064 err = auxtrace_queues__init(&pt->queues);
2068 intel_pt_log_set_name(INTEL_PT_PMU_NAME);
2070 pt->session = session;
2071 pt->machine = &session->machines.host; /* No kvm support */
2072 pt->auxtrace_type = auxtrace_info->type;
2073 pt->pmu_type = auxtrace_info->priv[INTEL_PT_PMU_TYPE];
2074 pt->tc.time_shift = auxtrace_info->priv[INTEL_PT_TIME_SHIFT];
2075 pt->tc.time_mult = auxtrace_info->priv[INTEL_PT_TIME_MULT];
2076 pt->tc.time_zero = auxtrace_info->priv[INTEL_PT_TIME_ZERO];
2077 pt->cap_user_time_zero = auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO];
2078 pt->tsc_bit = auxtrace_info->priv[INTEL_PT_TSC_BIT];
2079 pt->noretcomp_bit = auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT];
2080 pt->have_sched_switch = auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH];
2081 pt->snapshot_mode = auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE];
2082 pt->per_cpu_mmaps = auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS];
2083 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_PMU_TYPE,
2084 INTEL_PT_PER_CPU_MMAPS);
2086 if (intel_pt_has(auxtrace_info, INTEL_PT_CYC_BIT)) {
2087 pt->mtc_bit = auxtrace_info->priv[INTEL_PT_MTC_BIT];
2088 pt->mtc_freq_bits = auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS];
2089 pt->tsc_ctc_ratio_n = auxtrace_info->priv[INTEL_PT_TSC_CTC_N];
2090 pt->tsc_ctc_ratio_d = auxtrace_info->priv[INTEL_PT_TSC_CTC_D];
2091 pt->cyc_bit = auxtrace_info->priv[INTEL_PT_CYC_BIT];
2092 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_MTC_BIT,
2096 if (intel_pt_has(auxtrace_info, INTEL_PT_MAX_NONTURBO_RATIO)) {
2097 pt->max_non_turbo_ratio =
2098 auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO];
2099 intel_pt_print_info(&auxtrace_info->priv[0],
2100 INTEL_PT_MAX_NONTURBO_RATIO,
2101 INTEL_PT_MAX_NONTURBO_RATIO);
2104 pt->timeless_decoding = intel_pt_timeless_decoding(pt);
2105 pt->have_tsc = intel_pt_have_tsc(pt);
2106 pt->sampling_mode = false;
2107 pt->est_tsc = !pt->timeless_decoding;
2109 pt->unknown_thread = thread__new(999999999, 999999999);
2110 if (!pt->unknown_thread) {
2112 goto err_free_queues;
2116 * Since this thread will not be kept in any rbtree not in a
2117 * list, initialize its list node so that at thread__put() the
2118 * current thread lifetime assuption is kept and we don't segfault
2119 * at list_del_init().
2121 INIT_LIST_HEAD(&pt->unknown_thread->node);
2123 err = thread__set_comm(pt->unknown_thread, "unknown", 0);
2125 goto err_delete_thread;
2126 if (thread__init_map_groups(pt->unknown_thread, pt->machine)) {
2128 goto err_delete_thread;
2131 pt->auxtrace.process_event = intel_pt_process_event;
2132 pt->auxtrace.process_auxtrace_event = intel_pt_process_auxtrace_event;
2133 pt->auxtrace.flush_events = intel_pt_flush;
2134 pt->auxtrace.free_events = intel_pt_free_events;
2135 pt->auxtrace.free = intel_pt_free;
2136 session->auxtrace = &pt->auxtrace;
2141 if (pt->have_sched_switch == 1) {
2142 pt->switch_evsel = intel_pt_find_sched_switch(session->evlist);
2143 if (!pt->switch_evsel) {
2144 pr_err("%s: missing sched_switch event\n", __func__);
2146 goto err_delete_thread;
2148 } else if (pt->have_sched_switch == 2 &&
2149 !intel_pt_find_switch(session->evlist)) {
2150 pr_err("%s: missing context_switch attribute flag\n", __func__);
2152 goto err_delete_thread;
2155 if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
2156 pt->synth_opts = *session->itrace_synth_opts;
2158 itrace_synth_opts__set_default(&pt->synth_opts);
2159 if (use_browser != -1) {
2160 pt->synth_opts.branches = false;
2161 pt->synth_opts.callchain = true;
2163 if (session->itrace_synth_opts)
2164 pt->synth_opts.thread_stack =
2165 session->itrace_synth_opts->thread_stack;
2168 if (pt->synth_opts.log)
2169 intel_pt_log_enable();
2171 /* Maximum non-turbo ratio is TSC freq / 100 MHz */
2172 if (pt->tc.time_mult) {
2173 u64 tsc_freq = intel_pt_ns_to_ticks(pt, 1000000000);
2175 if (!pt->max_non_turbo_ratio)
2176 pt->max_non_turbo_ratio =
2177 (tsc_freq + 50000000) / 100000000;
2178 intel_pt_log("TSC frequency %"PRIu64"\n", tsc_freq);
2179 intel_pt_log("Maximum non-turbo ratio %u\n",
2180 pt->max_non_turbo_ratio);
2183 if (pt->synth_opts.calls)
2184 pt->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
2185 PERF_IP_FLAG_TRACE_END;
2186 if (pt->synth_opts.returns)
2187 pt->branches_filter |= PERF_IP_FLAG_RETURN |
2188 PERF_IP_FLAG_TRACE_BEGIN;
2190 if (pt->synth_opts.callchain && !symbol_conf.use_callchain) {
2191 symbol_conf.use_callchain = true;
2192 if (callchain_register_param(&callchain_param) < 0) {
2193 symbol_conf.use_callchain = false;
2194 pt->synth_opts.callchain = false;
2198 err = intel_pt_synth_events(pt, session);
2200 goto err_delete_thread;
2202 err = auxtrace_queues__process_index(&pt->queues, session);
2204 goto err_delete_thread;
2206 if (pt->queues.populated)
2207 pt->data_queued = true;
2209 if (pt->timeless_decoding)
2210 pr_debug2("Intel PT decoding without timestamps\n");
2215 thread__zput(pt->unknown_thread);
2217 intel_pt_log_disable();
2218 auxtrace_queues__free(&pt->queues);
2219 session->auxtrace = NULL;