Merge git://1984.lsi.us.es/nf-next
[cascardo/linux.git] / tools / perf / util / evlist.c
1 /*
2  * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3  *
4  * Parts came from builtin-{top,stat,record}.c, see those files for further
5  * copyright notes.
6  *
7  * Released under the GPL v2. (and only v2, not any later version)
8  */
9 #include "util.h"
10 #include "debugfs.h"
11 #include <poll.h>
12 #include "cpumap.h"
13 #include "thread_map.h"
14 #include "target.h"
15 #include "evlist.h"
16 #include "evsel.h"
17 #include <unistd.h>
18
19 #include "parse-events.h"
20
21 #include <sys/mman.h>
22
23 #include <linux/bitops.h>
24 #include <linux/hash.h>
25
26 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
27 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
28
29 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
30                        struct thread_map *threads)
31 {
32         int i;
33
34         for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
35                 INIT_HLIST_HEAD(&evlist->heads[i]);
36         INIT_LIST_HEAD(&evlist->entries);
37         perf_evlist__set_maps(evlist, cpus, threads);
38         evlist->workload.pid = -1;
39 }
40
41 struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
42                                      struct thread_map *threads)
43 {
44         struct perf_evlist *evlist = zalloc(sizeof(*evlist));
45
46         if (evlist != NULL)
47                 perf_evlist__init(evlist, cpus, threads);
48
49         return evlist;
50 }
51
52 void perf_evlist__config_attrs(struct perf_evlist *evlist,
53                                struct perf_record_opts *opts)
54 {
55         struct perf_evsel *evsel, *first;
56
57         if (evlist->cpus->map[0] < 0)
58                 opts->no_inherit = true;
59
60         first = list_entry(evlist->entries.next, struct perf_evsel, node);
61
62         list_for_each_entry(evsel, &evlist->entries, node) {
63                 perf_evsel__config(evsel, opts, first);
64
65                 if (evlist->nr_entries > 1)
66                         evsel->attr.sample_type |= PERF_SAMPLE_ID;
67         }
68 }
69
70 static void perf_evlist__purge(struct perf_evlist *evlist)
71 {
72         struct perf_evsel *pos, *n;
73
74         list_for_each_entry_safe(pos, n, &evlist->entries, node) {
75                 list_del_init(&pos->node);
76                 perf_evsel__delete(pos);
77         }
78
79         evlist->nr_entries = 0;
80 }
81
82 void perf_evlist__exit(struct perf_evlist *evlist)
83 {
84         free(evlist->mmap);
85         free(evlist->pollfd);
86         evlist->mmap = NULL;
87         evlist->pollfd = NULL;
88 }
89
90 void perf_evlist__delete(struct perf_evlist *evlist)
91 {
92         perf_evlist__purge(evlist);
93         perf_evlist__exit(evlist);
94         free(evlist);
95 }
96
97 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
98 {
99         list_add_tail(&entry->node, &evlist->entries);
100         ++evlist->nr_entries;
101 }
102
103 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
104                                    struct list_head *list,
105                                    int nr_entries)
106 {
107         list_splice_tail(list, &evlist->entries);
108         evlist->nr_entries += nr_entries;
109 }
110
111 int perf_evlist__add_default(struct perf_evlist *evlist)
112 {
113         struct perf_event_attr attr = {
114                 .type = PERF_TYPE_HARDWARE,
115                 .config = PERF_COUNT_HW_CPU_CYCLES,
116         };
117         struct perf_evsel *evsel;
118
119         event_attr_init(&attr);
120
121         evsel = perf_evsel__new(&attr, 0);
122         if (evsel == NULL)
123                 goto error;
124
125         /* use strdup() because free(evsel) assumes name is allocated */
126         evsel->name = strdup("cycles");
127         if (!evsel->name)
128                 goto error_free;
129
130         perf_evlist__add(evlist, evsel);
131         return 0;
132 error_free:
133         perf_evsel__delete(evsel);
134 error:
135         return -ENOMEM;
136 }
137
138 int perf_evlist__add_attrs(struct perf_evlist *evlist,
139                            struct perf_event_attr *attrs, size_t nr_attrs)
140 {
141         struct perf_evsel *evsel, *n;
142         LIST_HEAD(head);
143         size_t i;
144
145         for (i = 0; i < nr_attrs; i++) {
146                 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
147                 if (evsel == NULL)
148                         goto out_delete_partial_list;
149                 list_add_tail(&evsel->node, &head);
150         }
151
152         perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
153
154         return 0;
155
156 out_delete_partial_list:
157         list_for_each_entry_safe(evsel, n, &head, node)
158                 perf_evsel__delete(evsel);
159         return -1;
160 }
161
162 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
163                                      struct perf_event_attr *attrs, size_t nr_attrs)
164 {
165         size_t i;
166
167         for (i = 0; i < nr_attrs; i++)
168                 event_attr_init(attrs + i);
169
170         return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
171 }
172
173 static int trace_event__id(const char *evname)
174 {
175         char *filename, *colon;
176         int err = -1, fd;
177
178         if (asprintf(&filename, "%s/%s/id", tracing_events_path, evname) < 0)
179                 return -1;
180
181         colon = strrchr(filename, ':');
182         if (colon != NULL)
183                 *colon = '/';
184
185         fd = open(filename, O_RDONLY);
186         if (fd >= 0) {
187                 char id[16];
188                 if (read(fd, id, sizeof(id)) > 0)
189                         err = atoi(id);
190                 close(fd);
191         }
192
193         free(filename);
194         return err;
195 }
196
197 int perf_evlist__add_tracepoints(struct perf_evlist *evlist,
198                                  const char *tracepoints[],
199                                  size_t nr_tracepoints)
200 {
201         int err;
202         size_t i;
203         struct perf_event_attr *attrs = zalloc(nr_tracepoints * sizeof(*attrs));
204
205         if (attrs == NULL)
206                 return -1;
207
208         for (i = 0; i < nr_tracepoints; i++) {
209                 err = trace_event__id(tracepoints[i]);
210
211                 if (err < 0)
212                         goto out_free_attrs;
213
214                 attrs[i].type          = PERF_TYPE_TRACEPOINT;
215                 attrs[i].config        = err;
216                 attrs[i].sample_type   = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
217                                           PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD);
218                 attrs[i].sample_period = 1;
219         }
220
221         err = perf_evlist__add_attrs(evlist, attrs, nr_tracepoints);
222 out_free_attrs:
223         free(attrs);
224         return err;
225 }
226
227 struct perf_evsel *
228 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
229 {
230         struct perf_evsel *evsel;
231
232         list_for_each_entry(evsel, &evlist->entries, node) {
233                 if (evsel->attr.type   == PERF_TYPE_TRACEPOINT &&
234                     (int)evsel->attr.config == id)
235                         return evsel;
236         }
237
238         return NULL;
239 }
240
241 int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist,
242                                           const struct perf_evsel_str_handler *assocs,
243                                           size_t nr_assocs)
244 {
245         struct perf_evsel *evsel;
246         int err;
247         size_t i;
248
249         for (i = 0; i < nr_assocs; i++) {
250                 err = trace_event__id(assocs[i].name);
251                 if (err < 0)
252                         goto out;
253
254                 evsel = perf_evlist__find_tracepoint_by_id(evlist, err);
255                 if (evsel == NULL)
256                         continue;
257
258                 err = -EEXIST;
259                 if (evsel->handler.func != NULL)
260                         goto out;
261                 evsel->handler.func = assocs[i].handler;
262         }
263
264         err = 0;
265 out:
266         return err;
267 }
268
269 void perf_evlist__disable(struct perf_evlist *evlist)
270 {
271         int cpu, thread;
272         struct perf_evsel *pos;
273
274         for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
275                 list_for_each_entry(pos, &evlist->entries, node) {
276                         for (thread = 0; thread < evlist->threads->nr; thread++)
277                                 ioctl(FD(pos, cpu, thread),
278                                       PERF_EVENT_IOC_DISABLE, 0);
279                 }
280         }
281 }
282
283 void perf_evlist__enable(struct perf_evlist *evlist)
284 {
285         int cpu, thread;
286         struct perf_evsel *pos;
287
288         for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
289                 list_for_each_entry(pos, &evlist->entries, node) {
290                         for (thread = 0; thread < evlist->threads->nr; thread++)
291                                 ioctl(FD(pos, cpu, thread),
292                                       PERF_EVENT_IOC_ENABLE, 0);
293                 }
294         }
295 }
296
297 static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
298 {
299         int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries;
300         evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
301         return evlist->pollfd != NULL ? 0 : -ENOMEM;
302 }
303
304 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
305 {
306         fcntl(fd, F_SETFL, O_NONBLOCK);
307         evlist->pollfd[evlist->nr_fds].fd = fd;
308         evlist->pollfd[evlist->nr_fds].events = POLLIN;
309         evlist->nr_fds++;
310 }
311
312 static void perf_evlist__id_hash(struct perf_evlist *evlist,
313                                  struct perf_evsel *evsel,
314                                  int cpu, int thread, u64 id)
315 {
316         int hash;
317         struct perf_sample_id *sid = SID(evsel, cpu, thread);
318
319         sid->id = id;
320         sid->evsel = evsel;
321         hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
322         hlist_add_head(&sid->node, &evlist->heads[hash]);
323 }
324
325 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
326                          int cpu, int thread, u64 id)
327 {
328         perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
329         evsel->id[evsel->ids++] = id;
330 }
331
332 static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
333                                   struct perf_evsel *evsel,
334                                   int cpu, int thread, int fd)
335 {
336         u64 read_data[4] = { 0, };
337         int id_idx = 1; /* The first entry is the counter value */
338
339         if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
340             read(fd, &read_data, sizeof(read_data)) == -1)
341                 return -1;
342
343         if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
344                 ++id_idx;
345         if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
346                 ++id_idx;
347
348         perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]);
349         return 0;
350 }
351
352 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
353 {
354         struct hlist_head *head;
355         struct hlist_node *pos;
356         struct perf_sample_id *sid;
357         int hash;
358
359         if (evlist->nr_entries == 1)
360                 return list_entry(evlist->entries.next, struct perf_evsel, node);
361
362         hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
363         head = &evlist->heads[hash];
364
365         hlist_for_each_entry(sid, pos, head, node)
366                 if (sid->id == id)
367                         return sid->evsel;
368
369         if (!perf_evlist__sample_id_all(evlist))
370                 return list_entry(evlist->entries.next, struct perf_evsel, node);
371
372         return NULL;
373 }
374
375 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
376 {
377         /* XXX Move this to perf.c, making it generally available */
378         unsigned int page_size = sysconf(_SC_PAGE_SIZE);
379         struct perf_mmap *md = &evlist->mmap[idx];
380         unsigned int head = perf_mmap__read_head(md);
381         unsigned int old = md->prev;
382         unsigned char *data = md->base + page_size;
383         union perf_event *event = NULL;
384
385         if (evlist->overwrite) {
386                 /*
387                  * If we're further behind than half the buffer, there's a chance
388                  * the writer will bite our tail and mess up the samples under us.
389                  *
390                  * If we somehow ended up ahead of the head, we got messed up.
391                  *
392                  * In either case, truncate and restart at head.
393                  */
394                 int diff = head - old;
395                 if (diff > md->mask / 2 || diff < 0) {
396                         fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
397
398                         /*
399                          * head points to a known good entry, start there.
400                          */
401                         old = head;
402                 }
403         }
404
405         if (old != head) {
406                 size_t size;
407
408                 event = (union perf_event *)&data[old & md->mask];
409                 size = event->header.size;
410
411                 /*
412                  * Event straddles the mmap boundary -- header should always
413                  * be inside due to u64 alignment of output.
414                  */
415                 if ((old & md->mask) + size != ((old + size) & md->mask)) {
416                         unsigned int offset = old;
417                         unsigned int len = min(sizeof(*event), size), cpy;
418                         void *dst = &evlist->event_copy;
419
420                         do {
421                                 cpy = min(md->mask + 1 - (offset & md->mask), len);
422                                 memcpy(dst, &data[offset & md->mask], cpy);
423                                 offset += cpy;
424                                 dst += cpy;
425                                 len -= cpy;
426                         } while (len);
427
428                         event = &evlist->event_copy;
429                 }
430
431                 old += size;
432         }
433
434         md->prev = old;
435
436         if (!evlist->overwrite)
437                 perf_mmap__write_tail(md, old);
438
439         return event;
440 }
441
442 void perf_evlist__munmap(struct perf_evlist *evlist)
443 {
444         int i;
445
446         for (i = 0; i < evlist->nr_mmaps; i++) {
447                 if (evlist->mmap[i].base != NULL) {
448                         munmap(evlist->mmap[i].base, evlist->mmap_len);
449                         evlist->mmap[i].base = NULL;
450                 }
451         }
452
453         free(evlist->mmap);
454         evlist->mmap = NULL;
455 }
456
457 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
458 {
459         evlist->nr_mmaps = evlist->cpus->nr;
460         if (evlist->cpus->map[0] == -1)
461                 evlist->nr_mmaps = evlist->threads->nr;
462         evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
463         return evlist->mmap != NULL ? 0 : -ENOMEM;
464 }
465
466 static int __perf_evlist__mmap(struct perf_evlist *evlist,
467                                int idx, int prot, int mask, int fd)
468 {
469         evlist->mmap[idx].prev = 0;
470         evlist->mmap[idx].mask = mask;
471         evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
472                                       MAP_SHARED, fd, 0);
473         if (evlist->mmap[idx].base == MAP_FAILED) {
474                 evlist->mmap[idx].base = NULL;
475                 return -1;
476         }
477
478         perf_evlist__add_pollfd(evlist, fd);
479         return 0;
480 }
481
482 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
483 {
484         struct perf_evsel *evsel;
485         int cpu, thread;
486
487         for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
488                 int output = -1;
489
490                 for (thread = 0; thread < evlist->threads->nr; thread++) {
491                         list_for_each_entry(evsel, &evlist->entries, node) {
492                                 int fd = FD(evsel, cpu, thread);
493
494                                 if (output == -1) {
495                                         output = fd;
496                                         if (__perf_evlist__mmap(evlist, cpu,
497                                                                 prot, mask, output) < 0)
498                                                 goto out_unmap;
499                                 } else {
500                                         if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
501                                                 goto out_unmap;
502                                 }
503
504                                 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
505                                     perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
506                                         goto out_unmap;
507                         }
508                 }
509         }
510
511         return 0;
512
513 out_unmap:
514         for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
515                 if (evlist->mmap[cpu].base != NULL) {
516                         munmap(evlist->mmap[cpu].base, evlist->mmap_len);
517                         evlist->mmap[cpu].base = NULL;
518                 }
519         }
520         return -1;
521 }
522
523 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
524 {
525         struct perf_evsel *evsel;
526         int thread;
527
528         for (thread = 0; thread < evlist->threads->nr; thread++) {
529                 int output = -1;
530
531                 list_for_each_entry(evsel, &evlist->entries, node) {
532                         int fd = FD(evsel, 0, thread);
533
534                         if (output == -1) {
535                                 output = fd;
536                                 if (__perf_evlist__mmap(evlist, thread,
537                                                         prot, mask, output) < 0)
538                                         goto out_unmap;
539                         } else {
540                                 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
541                                         goto out_unmap;
542                         }
543
544                         if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
545                             perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
546                                 goto out_unmap;
547                 }
548         }
549
550         return 0;
551
552 out_unmap:
553         for (thread = 0; thread < evlist->threads->nr; thread++) {
554                 if (evlist->mmap[thread].base != NULL) {
555                         munmap(evlist->mmap[thread].base, evlist->mmap_len);
556                         evlist->mmap[thread].base = NULL;
557                 }
558         }
559         return -1;
560 }
561
562 /** perf_evlist__mmap - Create per cpu maps to receive events
563  *
564  * @evlist - list of events
565  * @pages - map length in pages
566  * @overwrite - overwrite older events?
567  *
568  * If overwrite is false the user needs to signal event consuption using:
569  *
570  *      struct perf_mmap *m = &evlist->mmap[cpu];
571  *      unsigned int head = perf_mmap__read_head(m);
572  *
573  *      perf_mmap__write_tail(m, head)
574  *
575  * Using perf_evlist__read_on_cpu does this automatically.
576  */
577 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
578                       bool overwrite)
579 {
580         unsigned int page_size = sysconf(_SC_PAGE_SIZE);
581         struct perf_evsel *evsel;
582         const struct cpu_map *cpus = evlist->cpus;
583         const struct thread_map *threads = evlist->threads;
584         int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
585
586         /* 512 kiB: default amount of unprivileged mlocked memory */
587         if (pages == UINT_MAX)
588                 pages = (512 * 1024) / page_size;
589         else if (!is_power_of_2(pages))
590                 return -EINVAL;
591
592         mask = pages * page_size - 1;
593
594         if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
595                 return -ENOMEM;
596
597         if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
598                 return -ENOMEM;
599
600         evlist->overwrite = overwrite;
601         evlist->mmap_len = (pages + 1) * page_size;
602
603         list_for_each_entry(evsel, &evlist->entries, node) {
604                 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
605                     evsel->sample_id == NULL &&
606                     perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0)
607                         return -ENOMEM;
608         }
609
610         if (evlist->cpus->map[0] == -1)
611                 return perf_evlist__mmap_per_thread(evlist, prot, mask);
612
613         return perf_evlist__mmap_per_cpu(evlist, prot, mask);
614 }
615
616 int perf_evlist__create_maps(struct perf_evlist *evlist,
617                              struct perf_target *target)
618 {
619         evlist->threads = thread_map__new_str(target->pid, target->tid,
620                                               target->uid);
621
622         if (evlist->threads == NULL)
623                 return -1;
624
625         if (perf_target__has_task(target))
626                 evlist->cpus = cpu_map__dummy_new();
627         else if (!perf_target__has_cpu(target) && !target->uses_mmap)
628                 evlist->cpus = cpu_map__dummy_new();
629         else
630                 evlist->cpus = cpu_map__new(target->cpu_list);
631
632         if (evlist->cpus == NULL)
633                 goto out_delete_threads;
634
635         return 0;
636
637 out_delete_threads:
638         thread_map__delete(evlist->threads);
639         return -1;
640 }
641
642 void perf_evlist__delete_maps(struct perf_evlist *evlist)
643 {
644         cpu_map__delete(evlist->cpus);
645         thread_map__delete(evlist->threads);
646         evlist->cpus    = NULL;
647         evlist->threads = NULL;
648 }
649
650 int perf_evlist__set_filters(struct perf_evlist *evlist)
651 {
652         const struct thread_map *threads = evlist->threads;
653         const struct cpu_map *cpus = evlist->cpus;
654         struct perf_evsel *evsel;
655         char *filter;
656         int thread;
657         int cpu;
658         int err;
659         int fd;
660
661         list_for_each_entry(evsel, &evlist->entries, node) {
662                 filter = evsel->filter;
663                 if (!filter)
664                         continue;
665                 for (cpu = 0; cpu < cpus->nr; cpu++) {
666                         for (thread = 0; thread < threads->nr; thread++) {
667                                 fd = FD(evsel, cpu, thread);
668                                 err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter);
669                                 if (err)
670                                         return err;
671                         }
672                 }
673         }
674
675         return 0;
676 }
677
678 bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist)
679 {
680         struct perf_evsel *pos, *first;
681
682         pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
683
684         list_for_each_entry_continue(pos, &evlist->entries, node) {
685                 if (first->attr.sample_type != pos->attr.sample_type)
686                         return false;
687         }
688
689         return true;
690 }
691
692 u64 perf_evlist__sample_type(const struct perf_evlist *evlist)
693 {
694         struct perf_evsel *first;
695
696         first = list_entry(evlist->entries.next, struct perf_evsel, node);
697         return first->attr.sample_type;
698 }
699
700 u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist)
701 {
702         struct perf_evsel *first;
703         struct perf_sample *data;
704         u64 sample_type;
705         u16 size = 0;
706
707         first = list_entry(evlist->entries.next, struct perf_evsel, node);
708
709         if (!first->attr.sample_id_all)
710                 goto out;
711
712         sample_type = first->attr.sample_type;
713
714         if (sample_type & PERF_SAMPLE_TID)
715                 size += sizeof(data->tid) * 2;
716
717        if (sample_type & PERF_SAMPLE_TIME)
718                 size += sizeof(data->time);
719
720         if (sample_type & PERF_SAMPLE_ID)
721                 size += sizeof(data->id);
722
723         if (sample_type & PERF_SAMPLE_STREAM_ID)
724                 size += sizeof(data->stream_id);
725
726         if (sample_type & PERF_SAMPLE_CPU)
727                 size += sizeof(data->cpu) * 2;
728 out:
729         return size;
730 }
731
732 bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist)
733 {
734         struct perf_evsel *pos, *first;
735
736         pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
737
738         list_for_each_entry_continue(pos, &evlist->entries, node) {
739                 if (first->attr.sample_id_all != pos->attr.sample_id_all)
740                         return false;
741         }
742
743         return true;
744 }
745
746 bool perf_evlist__sample_id_all(const struct perf_evlist *evlist)
747 {
748         struct perf_evsel *first;
749
750         first = list_entry(evlist->entries.next, struct perf_evsel, node);
751         return first->attr.sample_id_all;
752 }
753
754 void perf_evlist__set_selected(struct perf_evlist *evlist,
755                                struct perf_evsel *evsel)
756 {
757         evlist->selected = evsel;
758 }
759
760 int perf_evlist__open(struct perf_evlist *evlist, bool group)
761 {
762         struct perf_evsel *evsel, *first;
763         int err, ncpus, nthreads;
764
765         first = list_entry(evlist->entries.next, struct perf_evsel, node);
766
767         list_for_each_entry(evsel, &evlist->entries, node) {
768                 struct xyarray *group_fd = NULL;
769
770                 if (group && evsel != first)
771                         group_fd = first->fd;
772
773                 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads,
774                                        group, group_fd);
775                 if (err < 0)
776                         goto out_err;
777         }
778
779         return 0;
780 out_err:
781         ncpus = evlist->cpus ? evlist->cpus->nr : 1;
782         nthreads = evlist->threads ? evlist->threads->nr : 1;
783
784         list_for_each_entry_reverse(evsel, &evlist->entries, node)
785                 perf_evsel__close(evsel, ncpus, nthreads);
786
787         errno = -err;
788         return err;
789 }
790
791 int perf_evlist__prepare_workload(struct perf_evlist *evlist,
792                                   struct perf_record_opts *opts,
793                                   const char *argv[])
794 {
795         int child_ready_pipe[2], go_pipe[2];
796         char bf;
797
798         if (pipe(child_ready_pipe) < 0) {
799                 perror("failed to create 'ready' pipe");
800                 return -1;
801         }
802
803         if (pipe(go_pipe) < 0) {
804                 perror("failed to create 'go' pipe");
805                 goto out_close_ready_pipe;
806         }
807
808         evlist->workload.pid = fork();
809         if (evlist->workload.pid < 0) {
810                 perror("failed to fork");
811                 goto out_close_pipes;
812         }
813
814         if (!evlist->workload.pid) {
815                 if (opts->pipe_output)
816                         dup2(2, 1);
817
818                 close(child_ready_pipe[0]);
819                 close(go_pipe[1]);
820                 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
821
822                 /*
823                  * Do a dummy execvp to get the PLT entry resolved,
824                  * so we avoid the resolver overhead on the real
825                  * execvp call.
826                  */
827                 execvp("", (char **)argv);
828
829                 /*
830                  * Tell the parent we're ready to go
831                  */
832                 close(child_ready_pipe[1]);
833
834                 /*
835                  * Wait until the parent tells us to go.
836                  */
837                 if (read(go_pipe[0], &bf, 1) == -1)
838                         perror("unable to read pipe");
839
840                 execvp(argv[0], (char **)argv);
841
842                 perror(argv[0]);
843                 kill(getppid(), SIGUSR1);
844                 exit(-1);
845         }
846
847         if (perf_target__none(&opts->target))
848                 evlist->threads->map[0] = evlist->workload.pid;
849
850         close(child_ready_pipe[1]);
851         close(go_pipe[0]);
852         /*
853          * wait for child to settle
854          */
855         if (read(child_ready_pipe[0], &bf, 1) == -1) {
856                 perror("unable to read pipe");
857                 goto out_close_pipes;
858         }
859
860         evlist->workload.cork_fd = go_pipe[1];
861         close(child_ready_pipe[0]);
862         return 0;
863
864 out_close_pipes:
865         close(go_pipe[0]);
866         close(go_pipe[1]);
867 out_close_ready_pipe:
868         close(child_ready_pipe[0]);
869         close(child_ready_pipe[1]);
870         return -1;
871 }
872
873 int perf_evlist__start_workload(struct perf_evlist *evlist)
874 {
875         if (evlist->workload.cork_fd > 0) {
876                 /*
877                  * Remove the cork, let it rip!
878                  */
879                 return close(evlist->workload.cork_fd);
880         }
881
882         return 0;
883 }
884
885 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
886                               struct perf_sample *sample, bool swapped)
887 {
888         struct perf_evsel *e = list_entry(evlist->entries.next, struct perf_evsel, node);
889         return perf_evsel__parse_sample(e, event, sample, swapped);
890 }