tools/perf: Add support for record transaction flags
[cascardo/linux.git] / tools / perf / tests / hists_link.c
1 #include "perf.h"
2 #include "tests.h"
3 #include "debug.h"
4 #include "symbol.h"
5 #include "sort.h"
6 #include "evsel.h"
7 #include "evlist.h"
8 #include "machine.h"
9 #include "thread.h"
10 #include "parse-events.h"
11
12 static struct {
13         u32 pid;
14         const char *comm;
15 } fake_threads[] = {
16         { 100, "perf" },
17         { 200, "perf" },
18         { 300, "bash" },
19 };
20
21 static struct {
22         u32 pid;
23         u64 start;
24         const char *filename;
25 } fake_mmap_info[] = {
26         { 100, 0x40000, "perf" },
27         { 100, 0x50000, "libc" },
28         { 100, 0xf0000, "[kernel]" },
29         { 200, 0x40000, "perf" },
30         { 200, 0x50000, "libc" },
31         { 200, 0xf0000, "[kernel]" },
32         { 300, 0x40000, "bash" },
33         { 300, 0x50000, "libc" },
34         { 300, 0xf0000, "[kernel]" },
35 };
36
37 struct fake_sym {
38         u64 start;
39         u64 length;
40         const char *name;
41 };
42
43 static struct fake_sym perf_syms[] = {
44         { 700, 100, "main" },
45         { 800, 100, "run_command" },
46         { 900, 100, "cmd_record" },
47 };
48
49 static struct fake_sym bash_syms[] = {
50         { 700, 100, "main" },
51         { 800, 100, "xmalloc" },
52         { 900, 100, "xfree" },
53 };
54
55 static struct fake_sym libc_syms[] = {
56         { 700, 100, "malloc" },
57         { 800, 100, "free" },
58         { 900, 100, "realloc" },
59 };
60
61 static struct fake_sym kernel_syms[] = {
62         { 700, 100, "schedule" },
63         { 800, 100, "page_fault" },
64         { 900, 100, "sys_perf_event_open" },
65 };
66
67 static struct {
68         const char *dso_name;
69         struct fake_sym *syms;
70         size_t nr_syms;
71 } fake_symbols[] = {
72         { "perf", perf_syms, ARRAY_SIZE(perf_syms) },
73         { "bash", bash_syms, ARRAY_SIZE(bash_syms) },
74         { "libc", libc_syms, ARRAY_SIZE(libc_syms) },
75         { "[kernel]", kernel_syms, ARRAY_SIZE(kernel_syms) },
76 };
77
78 static struct machine *setup_fake_machine(struct machines *machines)
79 {
80         struct machine *machine = machines__find(machines, HOST_KERNEL_ID);
81         size_t i;
82
83         if (machine == NULL) {
84                 pr_debug("Not enough memory for machine setup\n");
85                 return NULL;
86         }
87
88         for (i = 0; i < ARRAY_SIZE(fake_threads); i++) {
89                 struct thread *thread;
90
91                 thread = machine__findnew_thread(machine, fake_threads[i].pid,
92                                                  fake_threads[i].pid);
93                 if (thread == NULL)
94                         goto out;
95
96                 thread__set_comm(thread, fake_threads[i].comm);
97         }
98
99         for (i = 0; i < ARRAY_SIZE(fake_mmap_info); i++) {
100                 union perf_event fake_mmap_event = {
101                         .mmap = {
102                                 .header = { .misc = PERF_RECORD_MISC_USER, },
103                                 .pid = fake_mmap_info[i].pid,
104                                 .start = fake_mmap_info[i].start,
105                                 .len = 0x1000ULL,
106                                 .pgoff = 0ULL,
107                         },
108                 };
109
110                 strcpy(fake_mmap_event.mmap.filename,
111                        fake_mmap_info[i].filename);
112
113                 machine__process_mmap_event(machine, &fake_mmap_event);
114         }
115
116         for (i = 0; i < ARRAY_SIZE(fake_symbols); i++) {
117                 size_t k;
118                 struct dso *dso;
119
120                 dso = __dsos__findnew(&machine->user_dsos,
121                                       fake_symbols[i].dso_name);
122                 if (dso == NULL)
123                         goto out;
124
125                 /* emulate dso__load() */
126                 dso__set_loaded(dso, MAP__FUNCTION);
127
128                 for (k = 0; k < fake_symbols[i].nr_syms; k++) {
129                         struct symbol *sym;
130                         struct fake_sym *fsym = &fake_symbols[i].syms[k];
131
132                         sym = symbol__new(fsym->start, fsym->length,
133                                           STB_GLOBAL, fsym->name);
134                         if (sym == NULL)
135                                 goto out;
136
137                         symbols__insert(&dso->symbols[MAP__FUNCTION], sym);
138                 }
139         }
140
141         return machine;
142
143 out:
144         pr_debug("Not enough memory for machine setup\n");
145         machine__delete_threads(machine);
146         machine__delete(machine);
147         return NULL;
148 }
149
150 struct sample {
151         u32 pid;
152         u64 ip;
153         struct thread *thread;
154         struct map *map;
155         struct symbol *sym;
156 };
157
158 static struct sample fake_common_samples[] = {
159         /* perf [kernel] schedule() */
160         { .pid = 100, .ip = 0xf0000 + 700, },
161         /* perf [perf]   main() */
162         { .pid = 200, .ip = 0x40000 + 700, },
163         /* perf [perf]   cmd_record() */
164         { .pid = 200, .ip = 0x40000 + 900, },
165         /* bash [bash]   xmalloc() */
166         { .pid = 300, .ip = 0x40000 + 800, },
167         /* bash [libc]   malloc() */
168         { .pid = 300, .ip = 0x50000 + 700, },
169 };
170
171 static struct sample fake_samples[][5] = {
172         {
173                 /* perf [perf]   run_command() */
174                 { .pid = 100, .ip = 0x40000 + 800, },
175                 /* perf [libc]   malloc() */
176                 { .pid = 100, .ip = 0x50000 + 700, },
177                 /* perf [kernel] page_fault() */
178                 { .pid = 100, .ip = 0xf0000 + 800, },
179                 /* perf [kernel] sys_perf_event_open() */
180                 { .pid = 200, .ip = 0xf0000 + 900, },
181                 /* bash [libc]   free() */
182                 { .pid = 300, .ip = 0x50000 + 800, },
183         },
184         {
185                 /* perf [libc]   free() */
186                 { .pid = 200, .ip = 0x50000 + 800, },
187                 /* bash [libc]   malloc() */
188                 { .pid = 300, .ip = 0x50000 + 700, }, /* will be merged */
189                 /* bash [bash]   xfee() */
190                 { .pid = 300, .ip = 0x40000 + 900, },
191                 /* bash [libc]   realloc() */
192                 { .pid = 300, .ip = 0x50000 + 900, },
193                 /* bash [kernel] page_fault() */
194                 { .pid = 300, .ip = 0xf0000 + 800, },
195         },
196 };
197
198 static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
199 {
200         struct perf_evsel *evsel;
201         struct addr_location al;
202         struct hist_entry *he;
203         struct perf_sample sample = { .cpu = 0, };
204         size_t i = 0, k;
205
206         /*
207          * each evsel will have 10 samples - 5 common and 5 distinct.
208          * However the second evsel also has a collapsed entry for
209          * "bash [libc] malloc" so total 9 entries will be in the tree.
210          */
211         list_for_each_entry(evsel, &evlist->entries, node) {
212                 for (k = 0; k < ARRAY_SIZE(fake_common_samples); k++) {
213                         const union perf_event event = {
214                                 .header = {
215                                         .misc = PERF_RECORD_MISC_USER,
216                                 },
217                         };
218
219                         sample.pid = fake_common_samples[k].pid;
220                         sample.ip = fake_common_samples[k].ip;
221                         if (perf_event__preprocess_sample(&event, machine, &al,
222                                                           &sample) < 0)
223                                 goto out;
224
225                         he = __hists__add_entry(&evsel->hists, &al, NULL,
226                                                 1, 1, 0);
227                         if (he == NULL)
228                                 goto out;
229
230                         fake_common_samples[k].thread = al.thread;
231                         fake_common_samples[k].map = al.map;
232                         fake_common_samples[k].sym = al.sym;
233                 }
234
235                 for (k = 0; k < ARRAY_SIZE(fake_samples[i]); k++) {
236                         const union perf_event event = {
237                                 .header = {
238                                         .misc = PERF_RECORD_MISC_USER,
239                                 },
240                         };
241
242                         sample.pid = fake_samples[i][k].pid;
243                         sample.ip = fake_samples[i][k].ip;
244                         if (perf_event__preprocess_sample(&event, machine, &al,
245                                                           &sample) < 0)
246                                 goto out;
247
248                         he = __hists__add_entry(&evsel->hists, &al, NULL, 1, 1,
249                                                 0);
250                         if (he == NULL)
251                                 goto out;
252
253                         fake_samples[i][k].thread = al.thread;
254                         fake_samples[i][k].map = al.map;
255                         fake_samples[i][k].sym = al.sym;
256                 }
257                 i++;
258         }
259
260         return 0;
261
262 out:
263         pr_debug("Not enough memory for adding a hist entry\n");
264         return -1;
265 }
266
267 static int find_sample(struct sample *samples, size_t nr_samples,
268                        struct thread *t, struct map *m, struct symbol *s)
269 {
270         while (nr_samples--) {
271                 if (samples->thread == t && samples->map == m &&
272                     samples->sym == s)
273                         return 1;
274                 samples++;
275         }
276         return 0;
277 }
278
279 static int __validate_match(struct hists *hists)
280 {
281         size_t count = 0;
282         struct rb_root *root;
283         struct rb_node *node;
284
285         /*
286          * Only entries from fake_common_samples should have a pair.
287          */
288         if (sort__need_collapse)
289                 root = &hists->entries_collapsed;
290         else
291                 root = hists->entries_in;
292
293         node = rb_first(root);
294         while (node) {
295                 struct hist_entry *he;
296
297                 he = rb_entry(node, struct hist_entry, rb_node_in);
298
299                 if (hist_entry__has_pairs(he)) {
300                         if (find_sample(fake_common_samples,
301                                         ARRAY_SIZE(fake_common_samples),
302                                         he->thread, he->ms.map, he->ms.sym)) {
303                                 count++;
304                         } else {
305                                 pr_debug("Can't find the matched entry\n");
306                                 return -1;
307                         }
308                 }
309
310                 node = rb_next(node);
311         }
312
313         if (count != ARRAY_SIZE(fake_common_samples)) {
314                 pr_debug("Invalid count for matched entries: %zd of %zd\n",
315                          count, ARRAY_SIZE(fake_common_samples));
316                 return -1;
317         }
318
319         return 0;
320 }
321
322 static int validate_match(struct hists *leader, struct hists *other)
323 {
324         return __validate_match(leader) || __validate_match(other);
325 }
326
327 static int __validate_link(struct hists *hists, int idx)
328 {
329         size_t count = 0;
330         size_t count_pair = 0;
331         size_t count_dummy = 0;
332         struct rb_root *root;
333         struct rb_node *node;
334
335         /*
336          * Leader hists (idx = 0) will have dummy entries from other,
337          * and some entries will have no pair.  However every entry
338          * in other hists should have (dummy) pair.
339          */
340         if (sort__need_collapse)
341                 root = &hists->entries_collapsed;
342         else
343                 root = hists->entries_in;
344
345         node = rb_first(root);
346         while (node) {
347                 struct hist_entry *he;
348
349                 he = rb_entry(node, struct hist_entry, rb_node_in);
350
351                 if (hist_entry__has_pairs(he)) {
352                         if (!find_sample(fake_common_samples,
353                                          ARRAY_SIZE(fake_common_samples),
354                                          he->thread, he->ms.map, he->ms.sym) &&
355                             !find_sample(fake_samples[idx],
356                                          ARRAY_SIZE(fake_samples[idx]),
357                                          he->thread, he->ms.map, he->ms.sym)) {
358                                 count_dummy++;
359                         }
360                         count_pair++;
361                 } else if (idx) {
362                         pr_debug("A entry from the other hists should have pair\n");
363                         return -1;
364                 }
365
366                 count++;
367                 node = rb_next(node);
368         }
369
370         /*
371          * Note that we have a entry collapsed in the other (idx = 1) hists.
372          */
373         if (idx == 0) {
374                 if (count_dummy != ARRAY_SIZE(fake_samples[1]) - 1) {
375                         pr_debug("Invalid count of dummy entries: %zd of %zd\n",
376                                  count_dummy, ARRAY_SIZE(fake_samples[1]) - 1);
377                         return -1;
378                 }
379                 if (count != count_pair + ARRAY_SIZE(fake_samples[0])) {
380                         pr_debug("Invalid count of total leader entries: %zd of %zd\n",
381                                  count, count_pair + ARRAY_SIZE(fake_samples[0]));
382                         return -1;
383                 }
384         } else {
385                 if (count != count_pair) {
386                         pr_debug("Invalid count of total other entries: %zd of %zd\n",
387                                  count, count_pair);
388                         return -1;
389                 }
390                 if (count_dummy > 0) {
391                         pr_debug("Other hists should not have dummy entries: %zd\n",
392                                  count_dummy);
393                         return -1;
394                 }
395         }
396
397         return 0;
398 }
399
400 static int validate_link(struct hists *leader, struct hists *other)
401 {
402         return __validate_link(leader, 0) || __validate_link(other, 1);
403 }
404
405 static void print_hists(struct hists *hists)
406 {
407         int i = 0;
408         struct rb_root *root;
409         struct rb_node *node;
410
411         if (sort__need_collapse)
412                 root = &hists->entries_collapsed;
413         else
414                 root = hists->entries_in;
415
416         pr_info("----- %s --------\n", __func__);
417         node = rb_first(root);
418         while (node) {
419                 struct hist_entry *he;
420
421                 he = rb_entry(node, struct hist_entry, rb_node_in);
422
423                 pr_info("%2d: entry: %-8s [%-8s] %20s: period = %"PRIu64"\n",
424                         i, he->thread->comm, he->ms.map->dso->short_name,
425                         he->ms.sym->name, he->stat.period);
426
427                 i++;
428                 node = rb_next(node);
429         }
430 }
431
432 int test__hists_link(void)
433 {
434         int err = -1;
435         struct machines machines;
436         struct machine *machine = NULL;
437         struct perf_evsel *evsel, *first;
438         struct perf_evlist *evlist = perf_evlist__new();
439
440         if (evlist == NULL)
441                 return -ENOMEM;
442
443         err = parse_events(evlist, "cpu-clock");
444         if (err)
445                 goto out;
446         err = parse_events(evlist, "task-clock");
447         if (err)
448                 goto out;
449
450         /* default sort order (comm,dso,sym) will be used */
451         if (setup_sorting() < 0)
452                 goto out;
453
454         machines__init(&machines);
455
456         /* setup threads/dso/map/symbols also */
457         machine = setup_fake_machine(&machines);
458         if (!machine)
459                 goto out;
460
461         if (verbose > 1)
462                 machine__fprintf(machine, stderr);
463
464         /* process sample events */
465         err = add_hist_entries(evlist, machine);
466         if (err < 0)
467                 goto out;
468
469         list_for_each_entry(evsel, &evlist->entries, node) {
470                 hists__collapse_resort(&evsel->hists);
471
472                 if (verbose > 2)
473                         print_hists(&evsel->hists);
474         }
475
476         first = perf_evlist__first(evlist);
477         evsel = perf_evlist__last(evlist);
478
479         /* match common entries */
480         hists__match(&first->hists, &evsel->hists);
481         err = validate_match(&first->hists, &evsel->hists);
482         if (err)
483                 goto out;
484
485         /* link common and/or dummy entries */
486         hists__link(&first->hists, &evsel->hists);
487         err = validate_link(&first->hists, &evsel->hists);
488         if (err)
489                 goto out;
490
491         err = 0;
492
493 out:
494         /* tear down everything */
495         perf_evlist__delete(evlist);
496         machines__exit(&machines);
497
498         return err;
499 }