2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
11 #include <linux/bitops.h>
12 #include <api/fs/debugfs.h>
13 #include <traceevent/event-parse.h>
14 #include <linux/hw_breakpoint.h>
15 #include <linux/perf_event.h>
16 #include <sys/resource.h>
18 #include "callchain.h"
24 #include "thread_map.h"
26 #include "perf_regs.h"
28 #include "trace-event.h"
38 } perf_missing_features;
40 static clockid_t clockid;
42 static int perf_evsel__no_extra_init(struct perf_evsel *evsel __maybe_unused)
47 static void perf_evsel__no_extra_fini(struct perf_evsel *evsel __maybe_unused)
53 int (*init)(struct perf_evsel *evsel);
54 void (*fini)(struct perf_evsel *evsel);
55 } perf_evsel__object = {
56 .size = sizeof(struct perf_evsel),
57 .init = perf_evsel__no_extra_init,
58 .fini = perf_evsel__no_extra_fini,
61 int perf_evsel__object_config(size_t object_size,
62 int (*init)(struct perf_evsel *evsel),
63 void (*fini)(struct perf_evsel *evsel))
69 if (perf_evsel__object.size > object_size)
72 perf_evsel__object.size = object_size;
76 perf_evsel__object.init = init;
79 perf_evsel__object.fini = fini;
84 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
86 int __perf_evsel__sample_size(u64 sample_type)
88 u64 mask = sample_type & PERF_SAMPLE_MASK;
92 for (i = 0; i < 64; i++) {
93 if (mask & (1ULL << i))
103 * __perf_evsel__calc_id_pos - calculate id_pos.
104 * @sample_type: sample type
106 * This function returns the position of the event id (PERF_SAMPLE_ID or
107 * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct
110 static int __perf_evsel__calc_id_pos(u64 sample_type)
114 if (sample_type & PERF_SAMPLE_IDENTIFIER)
117 if (!(sample_type & PERF_SAMPLE_ID))
120 if (sample_type & PERF_SAMPLE_IP)
123 if (sample_type & PERF_SAMPLE_TID)
126 if (sample_type & PERF_SAMPLE_TIME)
129 if (sample_type & PERF_SAMPLE_ADDR)
136 * __perf_evsel__calc_is_pos - calculate is_pos.
137 * @sample_type: sample type
139 * This function returns the position (counting backwards) of the event id
140 * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if
141 * sample_id_all is used there is an id sample appended to non-sample events.
143 static int __perf_evsel__calc_is_pos(u64 sample_type)
147 if (sample_type & PERF_SAMPLE_IDENTIFIER)
150 if (!(sample_type & PERF_SAMPLE_ID))
153 if (sample_type & PERF_SAMPLE_CPU)
156 if (sample_type & PERF_SAMPLE_STREAM_ID)
162 void perf_evsel__calc_id_pos(struct perf_evsel *evsel)
164 evsel->id_pos = __perf_evsel__calc_id_pos(evsel->attr.sample_type);
165 evsel->is_pos = __perf_evsel__calc_is_pos(evsel->attr.sample_type);
168 void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
169 enum perf_event_sample_format bit)
171 if (!(evsel->attr.sample_type & bit)) {
172 evsel->attr.sample_type |= bit;
173 evsel->sample_size += sizeof(u64);
174 perf_evsel__calc_id_pos(evsel);
178 void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
179 enum perf_event_sample_format bit)
181 if (evsel->attr.sample_type & bit) {
182 evsel->attr.sample_type &= ~bit;
183 evsel->sample_size -= sizeof(u64);
184 perf_evsel__calc_id_pos(evsel);
188 void perf_evsel__set_sample_id(struct perf_evsel *evsel,
189 bool can_sample_identifier)
191 if (can_sample_identifier) {
192 perf_evsel__reset_sample_bit(evsel, ID);
193 perf_evsel__set_sample_bit(evsel, IDENTIFIER);
195 perf_evsel__set_sample_bit(evsel, ID);
197 evsel->attr.read_format |= PERF_FORMAT_ID;
200 void perf_evsel__init(struct perf_evsel *evsel,
201 struct perf_event_attr *attr, int idx)
204 evsel->tracking = !idx;
206 evsel->leader = evsel;
209 INIT_LIST_HEAD(&evsel->node);
210 perf_evsel__object.init(evsel);
211 evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
212 perf_evsel__calc_id_pos(evsel);
213 evsel->cmdline_group_boundary = false;
216 struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
218 struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
221 perf_evsel__init(evsel, attr, idx);
226 struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx)
228 struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
231 struct perf_event_attr attr = {
232 .type = PERF_TYPE_TRACEPOINT,
233 .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
234 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
237 if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
240 evsel->tp_format = trace_event__tp_format(sys, name);
241 if (evsel->tp_format == NULL)
244 event_attr_init(&attr);
245 attr.config = evsel->tp_format->id;
246 attr.sample_period = 1;
247 perf_evsel__init(evsel, &attr, idx);
258 const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
266 "stalled-cycles-frontend",
267 "stalled-cycles-backend",
271 static const char *__perf_evsel__hw_name(u64 config)
273 if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
274 return perf_evsel__hw_names[config];
276 return "unknown-hardware";
279 static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size)
281 int colon = 0, r = 0;
282 struct perf_event_attr *attr = &evsel->attr;
283 bool exclude_guest_default = false;
285 #define MOD_PRINT(context, mod) do { \
286 if (!attr->exclude_##context) { \
287 if (!colon) colon = ++r; \
288 r += scnprintf(bf + r, size - r, "%c", mod); \
291 if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
292 MOD_PRINT(kernel, 'k');
293 MOD_PRINT(user, 'u');
295 exclude_guest_default = true;
298 if (attr->precise_ip) {
301 r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
302 exclude_guest_default = true;
305 if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
306 MOD_PRINT(host, 'H');
307 MOD_PRINT(guest, 'G');
315 static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
317 int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config));
318 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
321 const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
334 static const char *__perf_evsel__sw_name(u64 config)
336 if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
337 return perf_evsel__sw_names[config];
338 return "unknown-software";
341 static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size)
343 int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config));
344 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
347 static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
351 r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
353 if (type & HW_BREAKPOINT_R)
354 r += scnprintf(bf + r, size - r, "r");
356 if (type & HW_BREAKPOINT_W)
357 r += scnprintf(bf + r, size - r, "w");
359 if (type & HW_BREAKPOINT_X)
360 r += scnprintf(bf + r, size - r, "x");
365 static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size)
367 struct perf_event_attr *attr = &evsel->attr;
368 int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
369 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
372 const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
373 [PERF_EVSEL__MAX_ALIASES] = {
374 { "L1-dcache", "l1-d", "l1d", "L1-data", },
375 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
377 { "dTLB", "d-tlb", "Data-TLB", },
378 { "iTLB", "i-tlb", "Instruction-TLB", },
379 { "branch", "branches", "bpu", "btb", "bpc", },
383 const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
384 [PERF_EVSEL__MAX_ALIASES] = {
385 { "load", "loads", "read", },
386 { "store", "stores", "write", },
387 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
390 const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
391 [PERF_EVSEL__MAX_ALIASES] = {
392 { "refs", "Reference", "ops", "access", },
393 { "misses", "miss", },
396 #define C(x) PERF_COUNT_HW_CACHE_##x
397 #define CACHE_READ (1 << C(OP_READ))
398 #define CACHE_WRITE (1 << C(OP_WRITE))
399 #define CACHE_PREFETCH (1 << C(OP_PREFETCH))
400 #define COP(x) (1 << x)
403 * cache operartion stat
404 * L1I : Read and prefetch only
405 * ITLB and BPU : Read-only
407 static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
408 [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
409 [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
410 [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
411 [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
412 [C(ITLB)] = (CACHE_READ),
413 [C(BPU)] = (CACHE_READ),
414 [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
417 bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
419 if (perf_evsel__hw_cache_stat[type] & COP(op))
420 return true; /* valid */
422 return false; /* invalid */
425 int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
426 char *bf, size_t size)
429 return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
430 perf_evsel__hw_cache_op[op][0],
431 perf_evsel__hw_cache_result[result][0]);
434 return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
435 perf_evsel__hw_cache_op[op][1]);
438 static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
440 u8 op, result, type = (config >> 0) & 0xff;
441 const char *err = "unknown-ext-hardware-cache-type";
443 if (type > PERF_COUNT_HW_CACHE_MAX)
446 op = (config >> 8) & 0xff;
447 err = "unknown-ext-hardware-cache-op";
448 if (op > PERF_COUNT_HW_CACHE_OP_MAX)
451 result = (config >> 16) & 0xff;
452 err = "unknown-ext-hardware-cache-result";
453 if (result > PERF_COUNT_HW_CACHE_RESULT_MAX)
456 err = "invalid-cache";
457 if (!perf_evsel__is_cache_op_valid(type, op))
460 return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
462 return scnprintf(bf, size, "%s", err);
465 static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size)
467 int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size);
468 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
471 static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
473 int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
474 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
477 const char *perf_evsel__name(struct perf_evsel *evsel)
484 switch (evsel->attr.type) {
486 perf_evsel__raw_name(evsel, bf, sizeof(bf));
489 case PERF_TYPE_HARDWARE:
490 perf_evsel__hw_name(evsel, bf, sizeof(bf));
493 case PERF_TYPE_HW_CACHE:
494 perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
497 case PERF_TYPE_SOFTWARE:
498 perf_evsel__sw_name(evsel, bf, sizeof(bf));
501 case PERF_TYPE_TRACEPOINT:
502 scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
505 case PERF_TYPE_BREAKPOINT:
506 perf_evsel__bp_name(evsel, bf, sizeof(bf));
510 scnprintf(bf, sizeof(bf), "unknown attr type: %d",
515 evsel->name = strdup(bf);
517 return evsel->name ?: "unknown";
520 const char *perf_evsel__group_name(struct perf_evsel *evsel)
522 return evsel->group_name ?: "anon group";
525 int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
528 struct perf_evsel *pos;
529 const char *group_name = perf_evsel__group_name(evsel);
531 ret = scnprintf(buf, size, "%s", group_name);
533 ret += scnprintf(buf + ret, size - ret, " { %s",
534 perf_evsel__name(evsel));
536 for_each_group_member(pos, evsel)
537 ret += scnprintf(buf + ret, size - ret, ", %s",
538 perf_evsel__name(pos));
540 ret += scnprintf(buf + ret, size - ret, " }");
546 perf_evsel__config_callgraph(struct perf_evsel *evsel,
547 struct record_opts *opts)
549 bool function = perf_evsel__is_function_event(evsel);
550 struct perf_event_attr *attr = &evsel->attr;
552 perf_evsel__set_sample_bit(evsel, CALLCHAIN);
554 if (callchain_param.record_mode == CALLCHAIN_LBR) {
555 if (!opts->branch_stack) {
556 if (attr->exclude_user) {
557 pr_warning("LBR callstack option is only available "
558 "to get user callchain information. "
559 "Falling back to framepointers.\n");
561 perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
562 attr->branch_sample_type = PERF_SAMPLE_BRANCH_USER |
563 PERF_SAMPLE_BRANCH_CALL_STACK;
566 pr_warning("Cannot use LBR callstack with branch stack. "
567 "Falling back to framepointers.\n");
570 if (callchain_param.record_mode == CALLCHAIN_DWARF) {
572 perf_evsel__set_sample_bit(evsel, REGS_USER);
573 perf_evsel__set_sample_bit(evsel, STACK_USER);
574 attr->sample_regs_user = PERF_REGS_MASK;
575 attr->sample_stack_user = callchain_param.dump_size;
576 attr->exclude_callchain_user = 1;
578 pr_info("Cannot use DWARF unwind for function trace event,"
579 " falling back to framepointers.\n");
584 pr_info("Disabling user space callchains for function trace event.\n");
585 attr->exclude_callchain_user = 1;
590 * The enable_on_exec/disabled value strategy:
592 * 1) For any type of traced program:
593 * - all independent events and group leaders are disabled
594 * - all group members are enabled
596 * Group members are ruled by group leaders. They need to
597 * be enabled, because the group scheduling relies on that.
599 * 2) For traced programs executed by perf:
600 * - all independent events and group leaders have
602 * - we don't specifically enable or disable any event during
605 * Independent events and group leaders are initially disabled
606 * and get enabled by exec. Group members are ruled by group
607 * leaders as stated in 1).
609 * 3) For traced programs attached by perf (pid/tid):
610 * - we specifically enable or disable all events during
613 * When attaching events to already running traced we
614 * enable/disable events specifically, as there's no
615 * initial traced exec call.
617 void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
619 struct perf_evsel *leader = evsel->leader;
620 struct perf_event_attr *attr = &evsel->attr;
621 int track = evsel->tracking;
622 bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread;
624 attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
625 attr->inherit = !opts->no_inherit;
627 perf_evsel__set_sample_bit(evsel, IP);
628 perf_evsel__set_sample_bit(evsel, TID);
630 if (evsel->sample_read) {
631 perf_evsel__set_sample_bit(evsel, READ);
634 * We need ID even in case of single event, because
635 * PERF_SAMPLE_READ process ID specific data.
637 perf_evsel__set_sample_id(evsel, false);
640 * Apply group format only if we belong to group
641 * with more than one members.
643 if (leader->nr_members > 1) {
644 attr->read_format |= PERF_FORMAT_GROUP;
650 * We default some events to have a default interval. But keep
651 * it a weak assumption overridable by the user.
653 if (!attr->sample_period || (opts->user_freq != UINT_MAX ||
654 opts->user_interval != ULLONG_MAX)) {
656 perf_evsel__set_sample_bit(evsel, PERIOD);
658 attr->sample_freq = opts->freq;
660 attr->sample_period = opts->default_interval;
665 * Disable sampling for all group members other
666 * than leader in case leader 'leads' the sampling.
668 if ((leader != evsel) && leader->sample_read) {
669 attr->sample_freq = 0;
670 attr->sample_period = 0;
673 if (opts->no_samples)
674 attr->sample_freq = 0;
676 if (opts->inherit_stat)
677 attr->inherit_stat = 1;
679 if (opts->sample_address) {
680 perf_evsel__set_sample_bit(evsel, ADDR);
681 attr->mmap_data = track;
685 * We don't allow user space callchains for function trace
686 * event, due to issues with page faults while tracing page
687 * fault handler and its overall trickiness nature.
689 if (perf_evsel__is_function_event(evsel))
690 evsel->attr.exclude_callchain_user = 1;
692 if (callchain_param.enabled && !evsel->no_aux_samples)
693 perf_evsel__config_callgraph(evsel, opts);
695 if (opts->sample_intr_regs) {
696 attr->sample_regs_intr = PERF_REGS_MASK;
697 perf_evsel__set_sample_bit(evsel, REGS_INTR);
700 if (target__has_cpu(&opts->target))
701 perf_evsel__set_sample_bit(evsel, CPU);
704 perf_evsel__set_sample_bit(evsel, PERIOD);
707 * When the user explicitely disabled time don't force it here.
709 if (opts->sample_time &&
710 (!perf_missing_features.sample_id_all &&
711 (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu ||
712 opts->sample_time_set)))
713 perf_evsel__set_sample_bit(evsel, TIME);
715 if (opts->raw_samples && !evsel->no_aux_samples) {
716 perf_evsel__set_sample_bit(evsel, TIME);
717 perf_evsel__set_sample_bit(evsel, RAW);
718 perf_evsel__set_sample_bit(evsel, CPU);
721 if (opts->sample_address)
722 perf_evsel__set_sample_bit(evsel, DATA_SRC);
724 if (opts->no_buffering) {
726 attr->wakeup_events = 1;
728 if (opts->branch_stack && !evsel->no_aux_samples) {
729 perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
730 attr->branch_sample_type = opts->branch_stack;
733 if (opts->sample_weight)
734 perf_evsel__set_sample_bit(evsel, WEIGHT);
738 attr->mmap2 = track && !perf_missing_features.mmap2;
741 if (opts->sample_transaction)
742 perf_evsel__set_sample_bit(evsel, TRANSACTION);
744 if (opts->running_time) {
745 evsel->attr.read_format |=
746 PERF_FORMAT_TOTAL_TIME_ENABLED |
747 PERF_FORMAT_TOTAL_TIME_RUNNING;
751 * XXX see the function comment above
753 * Disabling only independent events or group leaders,
754 * keeping group members enabled.
756 if (perf_evsel__is_group_leader(evsel))
760 * Setting enable_on_exec for independent events and
761 * group leaders for traced executed by perf.
763 if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel) &&
764 !opts->initial_delay)
765 attr->enable_on_exec = 1;
767 if (evsel->immediate) {
769 attr->enable_on_exec = 0;
772 clockid = opts->clockid;
773 if (opts->use_clockid) {
774 attr->use_clockid = 1;
775 attr->clockid = opts->clockid;
779 static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
783 if (evsel->system_wide)
786 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
789 for (cpu = 0; cpu < ncpus; cpu++) {
790 for (thread = 0; thread < nthreads; thread++) {
791 FD(evsel, cpu, thread) = -1;
796 return evsel->fd != NULL ? 0 : -ENOMEM;
799 static int perf_evsel__run_ioctl(struct perf_evsel *evsel, int ncpus, int nthreads,
804 if (evsel->system_wide)
807 for (cpu = 0; cpu < ncpus; cpu++) {
808 for (thread = 0; thread < nthreads; thread++) {
809 int fd = FD(evsel, cpu, thread),
810 err = ioctl(fd, ioc, arg);
820 int perf_evsel__apply_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
823 return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
824 PERF_EVENT_IOC_SET_FILTER,
828 int perf_evsel__set_filter(struct perf_evsel *evsel, const char *filter)
830 char *new_filter = strdup(filter);
832 if (new_filter != NULL) {
834 evsel->filter = new_filter;
841 int perf_evsel__append_filter(struct perf_evsel *evsel,
842 const char *op, const char *filter)
846 if (evsel->filter == NULL)
847 return perf_evsel__set_filter(evsel, filter);
849 if (asprintf(&new_filter,"(%s) %s (%s)", evsel->filter, op, filter) > 0) {
851 evsel->filter = new_filter;
858 int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads)
860 return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
861 PERF_EVENT_IOC_ENABLE,
865 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
867 if (ncpus == 0 || nthreads == 0)
870 if (evsel->system_wide)
873 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
874 if (evsel->sample_id == NULL)
877 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
878 if (evsel->id == NULL) {
879 xyarray__delete(evsel->sample_id);
880 evsel->sample_id = NULL;
887 static void perf_evsel__free_fd(struct perf_evsel *evsel)
889 xyarray__delete(evsel->fd);
893 static void perf_evsel__free_id(struct perf_evsel *evsel)
895 xyarray__delete(evsel->sample_id);
896 evsel->sample_id = NULL;
900 void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
904 if (evsel->system_wide)
907 for (cpu = 0; cpu < ncpus; cpu++)
908 for (thread = 0; thread < nthreads; ++thread) {
909 close(FD(evsel, cpu, thread));
910 FD(evsel, cpu, thread) = -1;
914 void perf_evsel__exit(struct perf_evsel *evsel)
916 assert(list_empty(&evsel->node));
917 perf_evsel__free_fd(evsel);
918 perf_evsel__free_id(evsel);
919 close_cgroup(evsel->cgrp);
920 cpu_map__put(evsel->cpus);
921 thread_map__put(evsel->threads);
922 zfree(&evsel->group_name);
924 perf_evsel__object.fini(evsel);
927 void perf_evsel__delete(struct perf_evsel *evsel)
929 perf_evsel__exit(evsel);
933 void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, int thread,
934 struct perf_counts_values *count)
936 struct perf_counts_values tmp;
938 if (!evsel->prev_raw_counts)
942 tmp = evsel->prev_raw_counts->aggr;
943 evsel->prev_raw_counts->aggr = *count;
945 tmp = *perf_counts(evsel->prev_raw_counts, cpu, thread);
946 *perf_counts(evsel->prev_raw_counts, cpu, thread) = *count;
949 count->val = count->val - tmp.val;
950 count->ena = count->ena - tmp.ena;
951 count->run = count->run - tmp.run;
954 void perf_counts_values__scale(struct perf_counts_values *count,
955 bool scale, s8 *pscaled)
960 if (count->run == 0) {
963 } else if (count->run < count->ena) {
965 count->val = (u64)((double) count->val * count->ena / count->run + 0.5);
968 count->ena = count->run = 0;
974 int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
975 struct perf_counts_values *count)
977 memset(count, 0, sizeof(*count));
979 if (FD(evsel, cpu, thread) < 0)
982 if (readn(FD(evsel, cpu, thread), count, sizeof(*count)) < 0)
988 int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
989 int cpu, int thread, bool scale)
991 struct perf_counts_values count;
992 size_t nv = scale ? 3 : 1;
994 if (FD(evsel, cpu, thread) < 0)
997 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1, thread + 1) < 0)
1000 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
1003 perf_evsel__compute_deltas(evsel, cpu, thread, &count);
1004 perf_counts_values__scale(&count, scale, NULL);
1005 *perf_counts(evsel->counts, cpu, thread) = count;
1009 static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
1011 struct perf_evsel *leader = evsel->leader;
1014 if (perf_evsel__is_group_leader(evsel))
1018 * Leader must be already processed/open,
1019 * if not it's a bug.
1021 BUG_ON(!leader->fd);
1023 fd = FD(leader, cpu, thread);
1034 static void __p_bits(char *buf, size_t size, u64 value, struct bit_names *bits)
1036 bool first_bit = true;
1040 if (value & bits[i].bit) {
1041 buf += scnprintf(buf, size, "%s%s", first_bit ? "" : "|", bits[i].name);
1044 } while (bits[++i].name != NULL);
1047 static void __p_sample_type(char *buf, size_t size, u64 value)
1049 #define bit_name(n) { PERF_SAMPLE_##n, #n }
1050 struct bit_names bits[] = {
1051 bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR),
1052 bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
1053 bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
1054 bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
1055 bit_name(IDENTIFIER), bit_name(REGS_INTR),
1059 __p_bits(buf, size, value, bits);
1062 static void __p_read_format(char *buf, size_t size, u64 value)
1064 #define bit_name(n) { PERF_FORMAT_##n, #n }
1065 struct bit_names bits[] = {
1066 bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING),
1067 bit_name(ID), bit_name(GROUP),
1071 __p_bits(buf, size, value, bits);
1074 #define BUF_SIZE 1024
1076 #define p_hex(val) snprintf(buf, BUF_SIZE, "%#"PRIx64, (uint64_t)(val))
1077 #define p_unsigned(val) snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val))
1078 #define p_signed(val) snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val))
1079 #define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val)
1080 #define p_read_format(val) __p_read_format(buf, BUF_SIZE, val)
1082 #define PRINT_ATTRn(_n, _f, _p) \
1086 ret += attr__fprintf(fp, _n, buf, priv);\
1090 #define PRINT_ATTRf(_f, _p) PRINT_ATTRn(#_f, _f, _p)
1092 int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
1093 attr__fprintf_f attr__fprintf, void *priv)
1098 PRINT_ATTRf(type, p_unsigned);
1099 PRINT_ATTRf(size, p_unsigned);
1100 PRINT_ATTRf(config, p_hex);
1101 PRINT_ATTRn("{ sample_period, sample_freq }", sample_period, p_unsigned);
1102 PRINT_ATTRf(sample_type, p_sample_type);
1103 PRINT_ATTRf(read_format, p_read_format);
1105 PRINT_ATTRf(disabled, p_unsigned);
1106 PRINT_ATTRf(inherit, p_unsigned);
1107 PRINT_ATTRf(pinned, p_unsigned);
1108 PRINT_ATTRf(exclusive, p_unsigned);
1109 PRINT_ATTRf(exclude_user, p_unsigned);
1110 PRINT_ATTRf(exclude_kernel, p_unsigned);
1111 PRINT_ATTRf(exclude_hv, p_unsigned);
1112 PRINT_ATTRf(exclude_idle, p_unsigned);
1113 PRINT_ATTRf(mmap, p_unsigned);
1114 PRINT_ATTRf(comm, p_unsigned);
1115 PRINT_ATTRf(freq, p_unsigned);
1116 PRINT_ATTRf(inherit_stat, p_unsigned);
1117 PRINT_ATTRf(enable_on_exec, p_unsigned);
1118 PRINT_ATTRf(task, p_unsigned);
1119 PRINT_ATTRf(watermark, p_unsigned);
1120 PRINT_ATTRf(precise_ip, p_unsigned);
1121 PRINT_ATTRf(mmap_data, p_unsigned);
1122 PRINT_ATTRf(sample_id_all, p_unsigned);
1123 PRINT_ATTRf(exclude_host, p_unsigned);
1124 PRINT_ATTRf(exclude_guest, p_unsigned);
1125 PRINT_ATTRf(exclude_callchain_kernel, p_unsigned);
1126 PRINT_ATTRf(exclude_callchain_user, p_unsigned);
1127 PRINT_ATTRf(mmap2, p_unsigned);
1128 PRINT_ATTRf(comm_exec, p_unsigned);
1129 PRINT_ATTRf(use_clockid, p_unsigned);
1131 PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
1132 PRINT_ATTRf(bp_type, p_unsigned);
1133 PRINT_ATTRn("{ bp_addr, config1 }", bp_addr, p_hex);
1134 PRINT_ATTRn("{ bp_len, config2 }", bp_len, p_hex);
1135 PRINT_ATTRf(sample_regs_user, p_hex);
1136 PRINT_ATTRf(sample_stack_user, p_unsigned);
1137 PRINT_ATTRf(clockid, p_signed);
1138 PRINT_ATTRf(sample_regs_intr, p_hex);
1139 PRINT_ATTRf(aux_watermark, p_unsigned);
1144 static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
1145 void *priv __attribute__((unused)))
1147 return fprintf(fp, " %-32s %s\n", name, val);
1150 static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
1151 struct thread_map *threads)
1153 int cpu, thread, nthreads;
1154 unsigned long flags = PERF_FLAG_FD_CLOEXEC;
1156 enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
1158 if (evsel->system_wide)
1161 nthreads = threads->nr;
1163 if (evsel->fd == NULL &&
1164 perf_evsel__alloc_fd(evsel, cpus->nr, nthreads) < 0)
1168 flags |= PERF_FLAG_PID_CGROUP;
1169 pid = evsel->cgrp->fd;
1172 fallback_missing_features:
1173 if (perf_missing_features.clockid_wrong)
1174 evsel->attr.clockid = CLOCK_MONOTONIC; /* should always work */
1175 if (perf_missing_features.clockid) {
1176 evsel->attr.use_clockid = 0;
1177 evsel->attr.clockid = 0;
1179 if (perf_missing_features.cloexec)
1180 flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC;
1181 if (perf_missing_features.mmap2)
1182 evsel->attr.mmap2 = 0;
1183 if (perf_missing_features.exclude_guest)
1184 evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
1186 if (perf_missing_features.sample_id_all)
1187 evsel->attr.sample_id_all = 0;
1190 fprintf(stderr, "%.60s\n", graph_dotted_line);
1191 fprintf(stderr, "perf_event_attr:\n");
1192 perf_event_attr__fprintf(stderr, &evsel->attr, __open_attr__fprintf, NULL);
1193 fprintf(stderr, "%.60s\n", graph_dotted_line);
1196 for (cpu = 0; cpu < cpus->nr; cpu++) {
1198 for (thread = 0; thread < nthreads; thread++) {
1201 if (!evsel->cgrp && !evsel->system_wide)
1202 pid = thread_map__pid(threads, thread);
1204 group_fd = get_group_fd(evsel, cpu, thread);
1206 pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx\n",
1207 pid, cpus->map[cpu], group_fd, flags);
1209 FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
1213 if (FD(evsel, cpu, thread) < 0) {
1215 pr_debug2("sys_perf_event_open failed, error %d\n",
1219 set_rlimit = NO_CHANGE;
1222 * If we succeeded but had to kill clockid, fail and
1223 * have perf_evsel__open_strerror() print us a nice
1226 if (perf_missing_features.clockid ||
1227 perf_missing_features.clockid_wrong) {
1238 * perf stat needs between 5 and 22 fds per CPU. When we run out
1239 * of them try to increase the limits.
1241 if (err == -EMFILE && set_rlimit < INCREASED_MAX) {
1243 int old_errno = errno;
1245 if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
1246 if (set_rlimit == NO_CHANGE)
1247 l.rlim_cur = l.rlim_max;
1249 l.rlim_cur = l.rlim_max + 1000;
1250 l.rlim_max = l.rlim_cur;
1252 if (setrlimit(RLIMIT_NOFILE, &l) == 0) {
1261 if (err != -EINVAL || cpu > 0 || thread > 0)
1265 * Must probe features in the order they were added to the
1266 * perf_event_attr interface.
1268 if (!perf_missing_features.clockid_wrong && evsel->attr.use_clockid) {
1269 perf_missing_features.clockid_wrong = true;
1270 goto fallback_missing_features;
1271 } else if (!perf_missing_features.clockid && evsel->attr.use_clockid) {
1272 perf_missing_features.clockid = true;
1273 goto fallback_missing_features;
1274 } else if (!perf_missing_features.cloexec && (flags & PERF_FLAG_FD_CLOEXEC)) {
1275 perf_missing_features.cloexec = true;
1276 goto fallback_missing_features;
1277 } else if (!perf_missing_features.mmap2 && evsel->attr.mmap2) {
1278 perf_missing_features.mmap2 = true;
1279 goto fallback_missing_features;
1280 } else if (!perf_missing_features.exclude_guest &&
1281 (evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
1282 perf_missing_features.exclude_guest = true;
1283 goto fallback_missing_features;
1284 } else if (!perf_missing_features.sample_id_all) {
1285 perf_missing_features.sample_id_all = true;
1286 goto retry_sample_id;
1291 while (--thread >= 0) {
1292 close(FD(evsel, cpu, thread));
1293 FD(evsel, cpu, thread) = -1;
1296 } while (--cpu >= 0);
1300 void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
1302 if (evsel->fd == NULL)
1305 perf_evsel__close_fd(evsel, ncpus, nthreads);
1306 perf_evsel__free_fd(evsel);
1318 struct thread_map map;
1320 } empty_thread_map = {
1325 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
1326 struct thread_map *threads)
1329 /* Work around old compiler warnings about strict aliasing */
1330 cpus = &empty_cpu_map.map;
1333 if (threads == NULL)
1334 threads = &empty_thread_map.map;
1336 return __perf_evsel__open(evsel, cpus, threads);
1339 int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
1340 struct cpu_map *cpus)
1342 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
1345 int perf_evsel__open_per_thread(struct perf_evsel *evsel,
1346 struct thread_map *threads)
1348 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
1351 static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
1352 const union perf_event *event,
1353 struct perf_sample *sample)
1355 u64 type = evsel->attr.sample_type;
1356 const u64 *array = event->sample.array;
1357 bool swapped = evsel->needs_swap;
1360 array += ((event->header.size -
1361 sizeof(event->header)) / sizeof(u64)) - 1;
1363 if (type & PERF_SAMPLE_IDENTIFIER) {
1364 sample->id = *array;
1368 if (type & PERF_SAMPLE_CPU) {
1371 /* undo swap of u64, then swap on individual u32s */
1372 u.val64 = bswap_64(u.val64);
1373 u.val32[0] = bswap_32(u.val32[0]);
1376 sample->cpu = u.val32[0];
1380 if (type & PERF_SAMPLE_STREAM_ID) {
1381 sample->stream_id = *array;
1385 if (type & PERF_SAMPLE_ID) {
1386 sample->id = *array;
1390 if (type & PERF_SAMPLE_TIME) {
1391 sample->time = *array;
1395 if (type & PERF_SAMPLE_TID) {
1398 /* undo swap of u64, then swap on individual u32s */
1399 u.val64 = bswap_64(u.val64);
1400 u.val32[0] = bswap_32(u.val32[0]);
1401 u.val32[1] = bswap_32(u.val32[1]);
1404 sample->pid = u.val32[0];
1405 sample->tid = u.val32[1];
1412 static inline bool overflow(const void *endp, u16 max_size, const void *offset,
1415 return size > max_size || offset + size > endp;
1418 #define OVERFLOW_CHECK(offset, size, max_size) \
1420 if (overflow(endp, (max_size), (offset), (size))) \
1424 #define OVERFLOW_CHECK_u64(offset) \
1425 OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
1427 int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
1428 struct perf_sample *data)
1430 u64 type = evsel->attr.sample_type;
1431 bool swapped = evsel->needs_swap;
1433 u16 max_size = event->header.size;
1434 const void *endp = (void *)event + max_size;
1438 * used for cross-endian analysis. See git commit 65014ab3
1439 * for why this goofiness is needed.
1443 memset(data, 0, sizeof(*data));
1444 data->cpu = data->pid = data->tid = -1;
1445 data->stream_id = data->id = data->time = -1ULL;
1446 data->period = evsel->attr.sample_period;
1449 if (event->header.type != PERF_RECORD_SAMPLE) {
1450 if (!evsel->attr.sample_id_all)
1452 return perf_evsel__parse_id_sample(evsel, event, data);
1455 array = event->sample.array;
1458 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
1459 * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to
1460 * check the format does not go past the end of the event.
1462 if (evsel->sample_size + sizeof(event->header) > event->header.size)
1466 if (type & PERF_SAMPLE_IDENTIFIER) {
1471 if (type & PERF_SAMPLE_IP) {
1476 if (type & PERF_SAMPLE_TID) {
1479 /* undo swap of u64, then swap on individual u32s */
1480 u.val64 = bswap_64(u.val64);
1481 u.val32[0] = bswap_32(u.val32[0]);
1482 u.val32[1] = bswap_32(u.val32[1]);
1485 data->pid = u.val32[0];
1486 data->tid = u.val32[1];
1490 if (type & PERF_SAMPLE_TIME) {
1491 data->time = *array;
1496 if (type & PERF_SAMPLE_ADDR) {
1497 data->addr = *array;
1501 if (type & PERF_SAMPLE_ID) {
1506 if (type & PERF_SAMPLE_STREAM_ID) {
1507 data->stream_id = *array;
1511 if (type & PERF_SAMPLE_CPU) {
1515 /* undo swap of u64, then swap on individual u32s */
1516 u.val64 = bswap_64(u.val64);
1517 u.val32[0] = bswap_32(u.val32[0]);
1520 data->cpu = u.val32[0];
1524 if (type & PERF_SAMPLE_PERIOD) {
1525 data->period = *array;
1529 if (type & PERF_SAMPLE_READ) {
1530 u64 read_format = evsel->attr.read_format;
1532 OVERFLOW_CHECK_u64(array);
1533 if (read_format & PERF_FORMAT_GROUP)
1534 data->read.group.nr = *array;
1536 data->read.one.value = *array;
1540 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1541 OVERFLOW_CHECK_u64(array);
1542 data->read.time_enabled = *array;
1546 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1547 OVERFLOW_CHECK_u64(array);
1548 data->read.time_running = *array;
1552 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1553 if (read_format & PERF_FORMAT_GROUP) {
1554 const u64 max_group_nr = UINT64_MAX /
1555 sizeof(struct sample_read_value);
1557 if (data->read.group.nr > max_group_nr)
1559 sz = data->read.group.nr *
1560 sizeof(struct sample_read_value);
1561 OVERFLOW_CHECK(array, sz, max_size);
1562 data->read.group.values =
1563 (struct sample_read_value *)array;
1564 array = (void *)array + sz;
1566 OVERFLOW_CHECK_u64(array);
1567 data->read.one.id = *array;
1572 if (type & PERF_SAMPLE_CALLCHAIN) {
1573 const u64 max_callchain_nr = UINT64_MAX / sizeof(u64);
1575 OVERFLOW_CHECK_u64(array);
1576 data->callchain = (struct ip_callchain *)array++;
1577 if (data->callchain->nr > max_callchain_nr)
1579 sz = data->callchain->nr * sizeof(u64);
1580 OVERFLOW_CHECK(array, sz, max_size);
1581 array = (void *)array + sz;
1584 if (type & PERF_SAMPLE_RAW) {
1585 OVERFLOW_CHECK_u64(array);
1587 if (WARN_ONCE(swapped,
1588 "Endianness of raw data not corrected!\n")) {
1589 /* undo swap of u64, then swap on individual u32s */
1590 u.val64 = bswap_64(u.val64);
1591 u.val32[0] = bswap_32(u.val32[0]);
1592 u.val32[1] = bswap_32(u.val32[1]);
1594 data->raw_size = u.val32[0];
1595 array = (void *)array + sizeof(u32);
1597 OVERFLOW_CHECK(array, data->raw_size, max_size);
1598 data->raw_data = (void *)array;
1599 array = (void *)array + data->raw_size;
1602 if (type & PERF_SAMPLE_BRANCH_STACK) {
1603 const u64 max_branch_nr = UINT64_MAX /
1604 sizeof(struct branch_entry);
1606 OVERFLOW_CHECK_u64(array);
1607 data->branch_stack = (struct branch_stack *)array++;
1609 if (data->branch_stack->nr > max_branch_nr)
1611 sz = data->branch_stack->nr * sizeof(struct branch_entry);
1612 OVERFLOW_CHECK(array, sz, max_size);
1613 array = (void *)array + sz;
1616 if (type & PERF_SAMPLE_REGS_USER) {
1617 OVERFLOW_CHECK_u64(array);
1618 data->user_regs.abi = *array;
1621 if (data->user_regs.abi) {
1622 u64 mask = evsel->attr.sample_regs_user;
1624 sz = hweight_long(mask) * sizeof(u64);
1625 OVERFLOW_CHECK(array, sz, max_size);
1626 data->user_regs.mask = mask;
1627 data->user_regs.regs = (u64 *)array;
1628 array = (void *)array + sz;
1632 if (type & PERF_SAMPLE_STACK_USER) {
1633 OVERFLOW_CHECK_u64(array);
1636 data->user_stack.offset = ((char *)(array - 1)
1640 data->user_stack.size = 0;
1642 OVERFLOW_CHECK(array, sz, max_size);
1643 data->user_stack.data = (char *)array;
1644 array = (void *)array + sz;
1645 OVERFLOW_CHECK_u64(array);
1646 data->user_stack.size = *array++;
1647 if (WARN_ONCE(data->user_stack.size > sz,
1648 "user stack dump failure\n"))
1654 if (type & PERF_SAMPLE_WEIGHT) {
1655 OVERFLOW_CHECK_u64(array);
1656 data->weight = *array;
1660 data->data_src = PERF_MEM_DATA_SRC_NONE;
1661 if (type & PERF_SAMPLE_DATA_SRC) {
1662 OVERFLOW_CHECK_u64(array);
1663 data->data_src = *array;
1667 data->transaction = 0;
1668 if (type & PERF_SAMPLE_TRANSACTION) {
1669 OVERFLOW_CHECK_u64(array);
1670 data->transaction = *array;
1674 data->intr_regs.abi = PERF_SAMPLE_REGS_ABI_NONE;
1675 if (type & PERF_SAMPLE_REGS_INTR) {
1676 OVERFLOW_CHECK_u64(array);
1677 data->intr_regs.abi = *array;
1680 if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) {
1681 u64 mask = evsel->attr.sample_regs_intr;
1683 sz = hweight_long(mask) * sizeof(u64);
1684 OVERFLOW_CHECK(array, sz, max_size);
1685 data->intr_regs.mask = mask;
1686 data->intr_regs.regs = (u64 *)array;
1687 array = (void *)array + sz;
1694 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
1697 size_t sz, result = sizeof(struct sample_event);
1699 if (type & PERF_SAMPLE_IDENTIFIER)
1700 result += sizeof(u64);
1702 if (type & PERF_SAMPLE_IP)
1703 result += sizeof(u64);
1705 if (type & PERF_SAMPLE_TID)
1706 result += sizeof(u64);
1708 if (type & PERF_SAMPLE_TIME)
1709 result += sizeof(u64);
1711 if (type & PERF_SAMPLE_ADDR)
1712 result += sizeof(u64);
1714 if (type & PERF_SAMPLE_ID)
1715 result += sizeof(u64);
1717 if (type & PERF_SAMPLE_STREAM_ID)
1718 result += sizeof(u64);
1720 if (type & PERF_SAMPLE_CPU)
1721 result += sizeof(u64);
1723 if (type & PERF_SAMPLE_PERIOD)
1724 result += sizeof(u64);
1726 if (type & PERF_SAMPLE_READ) {
1727 result += sizeof(u64);
1728 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1729 result += sizeof(u64);
1730 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1731 result += sizeof(u64);
1732 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1733 if (read_format & PERF_FORMAT_GROUP) {
1734 sz = sample->read.group.nr *
1735 sizeof(struct sample_read_value);
1738 result += sizeof(u64);
1742 if (type & PERF_SAMPLE_CALLCHAIN) {
1743 sz = (sample->callchain->nr + 1) * sizeof(u64);
1747 if (type & PERF_SAMPLE_RAW) {
1748 result += sizeof(u32);
1749 result += sample->raw_size;
1752 if (type & PERF_SAMPLE_BRANCH_STACK) {
1753 sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1758 if (type & PERF_SAMPLE_REGS_USER) {
1759 if (sample->user_regs.abi) {
1760 result += sizeof(u64);
1761 sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
1764 result += sizeof(u64);
1768 if (type & PERF_SAMPLE_STACK_USER) {
1769 sz = sample->user_stack.size;
1770 result += sizeof(u64);
1773 result += sizeof(u64);
1777 if (type & PERF_SAMPLE_WEIGHT)
1778 result += sizeof(u64);
1780 if (type & PERF_SAMPLE_DATA_SRC)
1781 result += sizeof(u64);
1783 if (type & PERF_SAMPLE_TRANSACTION)
1784 result += sizeof(u64);
1786 if (type & PERF_SAMPLE_REGS_INTR) {
1787 if (sample->intr_regs.abi) {
1788 result += sizeof(u64);
1789 sz = hweight_long(sample->intr_regs.mask) * sizeof(u64);
1792 result += sizeof(u64);
1799 int perf_event__synthesize_sample(union perf_event *event, u64 type,
1801 const struct perf_sample *sample,
1807 * used for cross-endian analysis. See git commit 65014ab3
1808 * for why this goofiness is needed.
1812 array = event->sample.array;
1814 if (type & PERF_SAMPLE_IDENTIFIER) {
1815 *array = sample->id;
1819 if (type & PERF_SAMPLE_IP) {
1820 *array = sample->ip;
1824 if (type & PERF_SAMPLE_TID) {
1825 u.val32[0] = sample->pid;
1826 u.val32[1] = sample->tid;
1829 * Inverse of what is done in perf_evsel__parse_sample
1831 u.val32[0] = bswap_32(u.val32[0]);
1832 u.val32[1] = bswap_32(u.val32[1]);
1833 u.val64 = bswap_64(u.val64);
1840 if (type & PERF_SAMPLE_TIME) {
1841 *array = sample->time;
1845 if (type & PERF_SAMPLE_ADDR) {
1846 *array = sample->addr;
1850 if (type & PERF_SAMPLE_ID) {
1851 *array = sample->id;
1855 if (type & PERF_SAMPLE_STREAM_ID) {
1856 *array = sample->stream_id;
1860 if (type & PERF_SAMPLE_CPU) {
1861 u.val32[0] = sample->cpu;
1864 * Inverse of what is done in perf_evsel__parse_sample
1866 u.val32[0] = bswap_32(u.val32[0]);
1867 u.val64 = bswap_64(u.val64);
1873 if (type & PERF_SAMPLE_PERIOD) {
1874 *array = sample->period;
1878 if (type & PERF_SAMPLE_READ) {
1879 if (read_format & PERF_FORMAT_GROUP)
1880 *array = sample->read.group.nr;
1882 *array = sample->read.one.value;
1885 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1886 *array = sample->read.time_enabled;
1890 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1891 *array = sample->read.time_running;
1895 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1896 if (read_format & PERF_FORMAT_GROUP) {
1897 sz = sample->read.group.nr *
1898 sizeof(struct sample_read_value);
1899 memcpy(array, sample->read.group.values, sz);
1900 array = (void *)array + sz;
1902 *array = sample->read.one.id;
1907 if (type & PERF_SAMPLE_CALLCHAIN) {
1908 sz = (sample->callchain->nr + 1) * sizeof(u64);
1909 memcpy(array, sample->callchain, sz);
1910 array = (void *)array + sz;
1913 if (type & PERF_SAMPLE_RAW) {
1914 u.val32[0] = sample->raw_size;
1915 if (WARN_ONCE(swapped,
1916 "Endianness of raw data not corrected!\n")) {
1918 * Inverse of what is done in perf_evsel__parse_sample
1920 u.val32[0] = bswap_32(u.val32[0]);
1921 u.val32[1] = bswap_32(u.val32[1]);
1922 u.val64 = bswap_64(u.val64);
1925 array = (void *)array + sizeof(u32);
1927 memcpy(array, sample->raw_data, sample->raw_size);
1928 array = (void *)array + sample->raw_size;
1931 if (type & PERF_SAMPLE_BRANCH_STACK) {
1932 sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1934 memcpy(array, sample->branch_stack, sz);
1935 array = (void *)array + sz;
1938 if (type & PERF_SAMPLE_REGS_USER) {
1939 if (sample->user_regs.abi) {
1940 *array++ = sample->user_regs.abi;
1941 sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
1942 memcpy(array, sample->user_regs.regs, sz);
1943 array = (void *)array + sz;
1949 if (type & PERF_SAMPLE_STACK_USER) {
1950 sz = sample->user_stack.size;
1953 memcpy(array, sample->user_stack.data, sz);
1954 array = (void *)array + sz;
1959 if (type & PERF_SAMPLE_WEIGHT) {
1960 *array = sample->weight;
1964 if (type & PERF_SAMPLE_DATA_SRC) {
1965 *array = sample->data_src;
1969 if (type & PERF_SAMPLE_TRANSACTION) {
1970 *array = sample->transaction;
1974 if (type & PERF_SAMPLE_REGS_INTR) {
1975 if (sample->intr_regs.abi) {
1976 *array++ = sample->intr_regs.abi;
1977 sz = hweight_long(sample->intr_regs.mask) * sizeof(u64);
1978 memcpy(array, sample->intr_regs.regs, sz);
1979 array = (void *)array + sz;
1988 struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name)
1990 return pevent_find_field(evsel->tp_format, name);
1993 void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
1996 struct format_field *field = perf_evsel__field(evsel, name);
2002 offset = field->offset;
2004 if (field->flags & FIELD_IS_DYNAMIC) {
2005 offset = *(int *)(sample->raw_data + field->offset);
2009 return sample->raw_data + offset;
2012 u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
2015 struct format_field *field = perf_evsel__field(evsel, name);
2022 ptr = sample->raw_data + field->offset;
2024 switch (field->size) {
2028 value = *(u16 *)ptr;
2031 value = *(u32 *)ptr;
2034 memcpy(&value, ptr, sizeof(u64));
2040 if (!evsel->needs_swap)
2043 switch (field->size) {
2045 return bswap_16(value);
2047 return bswap_32(value);
2049 return bswap_64(value);
2057 static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...)
2063 ret += fprintf(fp, ",");
2065 ret += fprintf(fp, ":");
2069 va_start(args, fmt);
2070 ret += vfprintf(fp, fmt, args);
2075 static int __print_attr__fprintf(FILE *fp, const char *name, const char *val, void *priv)
2077 return comma_fprintf(fp, (bool *)priv, " %s: %s", name, val);
2080 int perf_evsel__fprintf(struct perf_evsel *evsel,
2081 struct perf_attr_details *details, FILE *fp)
2086 if (details->event_group) {
2087 struct perf_evsel *pos;
2089 if (!perf_evsel__is_group_leader(evsel))
2092 if (evsel->nr_members > 1)
2093 printed += fprintf(fp, "%s{", evsel->group_name ?: "");
2095 printed += fprintf(fp, "%s", perf_evsel__name(evsel));
2096 for_each_group_member(pos, evsel)
2097 printed += fprintf(fp, ",%s", perf_evsel__name(pos));
2099 if (evsel->nr_members > 1)
2100 printed += fprintf(fp, "}");
2104 printed += fprintf(fp, "%s", perf_evsel__name(evsel));
2106 if (details->verbose) {
2107 printed += perf_event_attr__fprintf(fp, &evsel->attr,
2108 __print_attr__fprintf, &first);
2109 } else if (details->freq) {
2110 printed += comma_fprintf(fp, &first, " sample_freq=%" PRIu64,
2111 (u64)evsel->attr.sample_freq);
2118 bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
2119 char *msg, size_t msgsize)
2121 if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
2122 evsel->attr.type == PERF_TYPE_HARDWARE &&
2123 evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
2125 * If it's cycles then fall back to hrtimer based
2126 * cpu-clock-tick sw counter, which is always available even if
2129 * PPC returns ENXIO until 2.6.37 (behavior changed with commit
2132 scnprintf(msg, msgsize, "%s",
2133 "The cycles event is not supported, trying to fall back to cpu-clock-ticks");
2135 evsel->attr.type = PERF_TYPE_SOFTWARE;
2136 evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK;
2138 zfree(&evsel->name);
2145 int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
2146 int err, char *msg, size_t size)
2148 char sbuf[STRERR_BUFSIZE];
2153 return scnprintf(msg, size,
2154 "You may not have permission to collect %sstats.\n"
2155 "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
2156 " -1 - Not paranoid at all\n"
2157 " 0 - Disallow raw tracepoint access for unpriv\n"
2158 " 1 - Disallow cpu events for unpriv\n"
2159 " 2 - Disallow kernel profiling for unpriv",
2160 target->system_wide ? "system-wide " : "");
2162 return scnprintf(msg, size, "The %s event is not supported.",
2163 perf_evsel__name(evsel));
2165 return scnprintf(msg, size, "%s",
2166 "Too many events are opened.\n"
2167 "Probably the maximum number of open file descriptors has been reached.\n"
2168 "Hint: Try again after reducing the number of events.\n"
2169 "Hint: Try increasing the limit with 'ulimit -n <limit>'");
2171 if (target->cpu_list)
2172 return scnprintf(msg, size, "%s",
2173 "No such device - did you specify an out-of-range profile CPU?\n");
2176 if (evsel->attr.precise_ip)
2177 return scnprintf(msg, size, "%s",
2178 "\'precise\' request may not be supported. Try removing 'p' modifier.");
2179 #if defined(__i386__) || defined(__x86_64__)
2180 if (evsel->attr.type == PERF_TYPE_HARDWARE)
2181 return scnprintf(msg, size, "%s",
2182 "No hardware sampling interrupt available.\n"
2183 "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.");
2187 if (find_process("oprofiled"))
2188 return scnprintf(msg, size,
2189 "The PMU counters are busy/taken by another profiler.\n"
2190 "We found oprofile daemon running, please stop it and try again.");
2193 if (perf_missing_features.clockid)
2194 return scnprintf(msg, size, "clockid feature not supported.");
2195 if (perf_missing_features.clockid_wrong)
2196 return scnprintf(msg, size, "wrong clockid (%d).", clockid);
2202 return scnprintf(msg, size,
2203 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
2204 "/bin/dmesg may provide additional information.\n"
2205 "No CONFIG_PERF_EVENTS=y kernel support configured?\n",
2206 err, strerror_r(err, sbuf, sizeof(sbuf)),
2207 perf_evsel__name(evsel));