hyperv: Add handling of IP header with option field in netvsc_set_hash()
[cascardo/linux.git] / tools / perf / builtin-sched.c
1 #include "builtin.h"
2 #include "perf.h"
3
4 #include "util/util.h"
5 #include "util/evlist.h"
6 #include "util/cache.h"
7 #include "util/evsel.h"
8 #include "util/symbol.h"
9 #include "util/thread.h"
10 #include "util/header.h"
11 #include "util/session.h"
12 #include "util/tool.h"
13 #include "util/cloexec.h"
14
15 #include "util/parse-options.h"
16 #include "util/trace-event.h"
17
18 #include "util/debug.h"
19
20 #include <sys/prctl.h>
21 #include <sys/resource.h>
22
23 #include <semaphore.h>
24 #include <pthread.h>
25 #include <math.h>
26
27 #define PR_SET_NAME             15               /* Set process name */
28 #define MAX_CPUS                4096
29 #define COMM_LEN                20
30 #define SYM_LEN                 129
31 #define MAX_PID                 65536
32
33 struct sched_atom;
34
35 struct task_desc {
36         unsigned long           nr;
37         unsigned long           pid;
38         char                    comm[COMM_LEN];
39
40         unsigned long           nr_events;
41         unsigned long           curr_event;
42         struct sched_atom       **atoms;
43
44         pthread_t               thread;
45         sem_t                   sleep_sem;
46
47         sem_t                   ready_for_work;
48         sem_t                   work_done_sem;
49
50         u64                     cpu_usage;
51 };
52
53 enum sched_event_type {
54         SCHED_EVENT_RUN,
55         SCHED_EVENT_SLEEP,
56         SCHED_EVENT_WAKEUP,
57         SCHED_EVENT_MIGRATION,
58 };
59
60 struct sched_atom {
61         enum sched_event_type   type;
62         int                     specific_wait;
63         u64                     timestamp;
64         u64                     duration;
65         unsigned long           nr;
66         sem_t                   *wait_sem;
67         struct task_desc        *wakee;
68 };
69
70 #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
71
72 enum thread_state {
73         THREAD_SLEEPING = 0,
74         THREAD_WAIT_CPU,
75         THREAD_SCHED_IN,
76         THREAD_IGNORE
77 };
78
79 struct work_atom {
80         struct list_head        list;
81         enum thread_state       state;
82         u64                     sched_out_time;
83         u64                     wake_up_time;
84         u64                     sched_in_time;
85         u64                     runtime;
86 };
87
88 struct work_atoms {
89         struct list_head        work_list;
90         struct thread           *thread;
91         struct rb_node          node;
92         u64                     max_lat;
93         u64                     max_lat_at;
94         u64                     total_lat;
95         u64                     nb_atoms;
96         u64                     total_runtime;
97 };
98
99 typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
100
101 struct perf_sched;
102
103 struct trace_sched_handler {
104         int (*switch_event)(struct perf_sched *sched, struct perf_evsel *evsel,
105                             struct perf_sample *sample, struct machine *machine);
106
107         int (*runtime_event)(struct perf_sched *sched, struct perf_evsel *evsel,
108                              struct perf_sample *sample, struct machine *machine);
109
110         int (*wakeup_event)(struct perf_sched *sched, struct perf_evsel *evsel,
111                             struct perf_sample *sample, struct machine *machine);
112
113         /* PERF_RECORD_FORK event, not sched_process_fork tracepoint */
114         int (*fork_event)(struct perf_sched *sched, union perf_event *event,
115                           struct machine *machine);
116
117         int (*migrate_task_event)(struct perf_sched *sched,
118                                   struct perf_evsel *evsel,
119                                   struct perf_sample *sample,
120                                   struct machine *machine);
121 };
122
123 struct perf_sched {
124         struct perf_tool tool;
125         const char       *sort_order;
126         unsigned long    nr_tasks;
127         struct task_desc *pid_to_task[MAX_PID];
128         struct task_desc **tasks;
129         const struct trace_sched_handler *tp_handler;
130         pthread_mutex_t  start_work_mutex;
131         pthread_mutex_t  work_done_wait_mutex;
132         int              profile_cpu;
133 /*
134  * Track the current task - that way we can know whether there's any
135  * weird events, such as a task being switched away that is not current.
136  */
137         int              max_cpu;
138         u32              curr_pid[MAX_CPUS];
139         struct thread    *curr_thread[MAX_CPUS];
140         char             next_shortname1;
141         char             next_shortname2;
142         unsigned int     replay_repeat;
143         unsigned long    nr_run_events;
144         unsigned long    nr_sleep_events;
145         unsigned long    nr_wakeup_events;
146         unsigned long    nr_sleep_corrections;
147         unsigned long    nr_run_events_optimized;
148         unsigned long    targetless_wakeups;
149         unsigned long    multitarget_wakeups;
150         unsigned long    nr_runs;
151         unsigned long    nr_timestamps;
152         unsigned long    nr_unordered_timestamps;
153         unsigned long    nr_context_switch_bugs;
154         unsigned long    nr_events;
155         unsigned long    nr_lost_chunks;
156         unsigned long    nr_lost_events;
157         u64              run_measurement_overhead;
158         u64              sleep_measurement_overhead;
159         u64              start_time;
160         u64              cpu_usage;
161         u64              runavg_cpu_usage;
162         u64              parent_cpu_usage;
163         u64              runavg_parent_cpu_usage;
164         u64              sum_runtime;
165         u64              sum_fluct;
166         u64              run_avg;
167         u64              all_runtime;
168         u64              all_count;
169         u64              cpu_last_switched[MAX_CPUS];
170         struct rb_root   atom_root, sorted_atom_root;
171         struct list_head sort_list, cmp_pid;
172 };
173
174 static u64 get_nsecs(void)
175 {
176         struct timespec ts;
177
178         clock_gettime(CLOCK_MONOTONIC, &ts);
179
180         return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
181 }
182
183 static void burn_nsecs(struct perf_sched *sched, u64 nsecs)
184 {
185         u64 T0 = get_nsecs(), T1;
186
187         do {
188                 T1 = get_nsecs();
189         } while (T1 + sched->run_measurement_overhead < T0 + nsecs);
190 }
191
192 static void sleep_nsecs(u64 nsecs)
193 {
194         struct timespec ts;
195
196         ts.tv_nsec = nsecs % 999999999;
197         ts.tv_sec = nsecs / 999999999;
198
199         nanosleep(&ts, NULL);
200 }
201
202 static void calibrate_run_measurement_overhead(struct perf_sched *sched)
203 {
204         u64 T0, T1, delta, min_delta = 1000000000ULL;
205         int i;
206
207         for (i = 0; i < 10; i++) {
208                 T0 = get_nsecs();
209                 burn_nsecs(sched, 0);
210                 T1 = get_nsecs();
211                 delta = T1-T0;
212                 min_delta = min(min_delta, delta);
213         }
214         sched->run_measurement_overhead = min_delta;
215
216         printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
217 }
218
219 static void calibrate_sleep_measurement_overhead(struct perf_sched *sched)
220 {
221         u64 T0, T1, delta, min_delta = 1000000000ULL;
222         int i;
223
224         for (i = 0; i < 10; i++) {
225                 T0 = get_nsecs();
226                 sleep_nsecs(10000);
227                 T1 = get_nsecs();
228                 delta = T1-T0;
229                 min_delta = min(min_delta, delta);
230         }
231         min_delta -= 10000;
232         sched->sleep_measurement_overhead = min_delta;
233
234         printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
235 }
236
237 static struct sched_atom *
238 get_new_event(struct task_desc *task, u64 timestamp)
239 {
240         struct sched_atom *event = zalloc(sizeof(*event));
241         unsigned long idx = task->nr_events;
242         size_t size;
243
244         event->timestamp = timestamp;
245         event->nr = idx;
246
247         task->nr_events++;
248         size = sizeof(struct sched_atom *) * task->nr_events;
249         task->atoms = realloc(task->atoms, size);
250         BUG_ON(!task->atoms);
251
252         task->atoms[idx] = event;
253
254         return event;
255 }
256
257 static struct sched_atom *last_event(struct task_desc *task)
258 {
259         if (!task->nr_events)
260                 return NULL;
261
262         return task->atoms[task->nr_events - 1];
263 }
264
265 static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task,
266                                 u64 timestamp, u64 duration)
267 {
268         struct sched_atom *event, *curr_event = last_event(task);
269
270         /*
271          * optimize an existing RUN event by merging this one
272          * to it:
273          */
274         if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
275                 sched->nr_run_events_optimized++;
276                 curr_event->duration += duration;
277                 return;
278         }
279
280         event = get_new_event(task, timestamp);
281
282         event->type = SCHED_EVENT_RUN;
283         event->duration = duration;
284
285         sched->nr_run_events++;
286 }
287
288 static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task,
289                                    u64 timestamp, struct task_desc *wakee)
290 {
291         struct sched_atom *event, *wakee_event;
292
293         event = get_new_event(task, timestamp);
294         event->type = SCHED_EVENT_WAKEUP;
295         event->wakee = wakee;
296
297         wakee_event = last_event(wakee);
298         if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
299                 sched->targetless_wakeups++;
300                 return;
301         }
302         if (wakee_event->wait_sem) {
303                 sched->multitarget_wakeups++;
304                 return;
305         }
306
307         wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
308         sem_init(wakee_event->wait_sem, 0, 0);
309         wakee_event->specific_wait = 1;
310         event->wait_sem = wakee_event->wait_sem;
311
312         sched->nr_wakeup_events++;
313 }
314
315 static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
316                                   u64 timestamp, u64 task_state __maybe_unused)
317 {
318         struct sched_atom *event = get_new_event(task, timestamp);
319
320         event->type = SCHED_EVENT_SLEEP;
321
322         sched->nr_sleep_events++;
323 }
324
325 static struct task_desc *register_pid(struct perf_sched *sched,
326                                       unsigned long pid, const char *comm)
327 {
328         struct task_desc *task;
329
330         BUG_ON(pid >= MAX_PID);
331
332         task = sched->pid_to_task[pid];
333
334         if (task)
335                 return task;
336
337         task = zalloc(sizeof(*task));
338         task->pid = pid;
339         task->nr = sched->nr_tasks;
340         strcpy(task->comm, comm);
341         /*
342          * every task starts in sleeping state - this gets ignored
343          * if there's no wakeup pointing to this sleep state:
344          */
345         add_sched_event_sleep(sched, task, 0, 0);
346
347         sched->pid_to_task[pid] = task;
348         sched->nr_tasks++;
349         sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_task *));
350         BUG_ON(!sched->tasks);
351         sched->tasks[task->nr] = task;
352
353         if (verbose)
354                 printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm);
355
356         return task;
357 }
358
359
360 static void print_task_traces(struct perf_sched *sched)
361 {
362         struct task_desc *task;
363         unsigned long i;
364
365         for (i = 0; i < sched->nr_tasks; i++) {
366                 task = sched->tasks[i];
367                 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
368                         task->nr, task->comm, task->pid, task->nr_events);
369         }
370 }
371
372 static void add_cross_task_wakeups(struct perf_sched *sched)
373 {
374         struct task_desc *task1, *task2;
375         unsigned long i, j;
376
377         for (i = 0; i < sched->nr_tasks; i++) {
378                 task1 = sched->tasks[i];
379                 j = i + 1;
380                 if (j == sched->nr_tasks)
381                         j = 0;
382                 task2 = sched->tasks[j];
383                 add_sched_event_wakeup(sched, task1, 0, task2);
384         }
385 }
386
387 static void perf_sched__process_event(struct perf_sched *sched,
388                                       struct sched_atom *atom)
389 {
390         int ret = 0;
391
392         switch (atom->type) {
393                 case SCHED_EVENT_RUN:
394                         burn_nsecs(sched, atom->duration);
395                         break;
396                 case SCHED_EVENT_SLEEP:
397                         if (atom->wait_sem)
398                                 ret = sem_wait(atom->wait_sem);
399                         BUG_ON(ret);
400                         break;
401                 case SCHED_EVENT_WAKEUP:
402                         if (atom->wait_sem)
403                                 ret = sem_post(atom->wait_sem);
404                         BUG_ON(ret);
405                         break;
406                 case SCHED_EVENT_MIGRATION:
407                         break;
408                 default:
409                         BUG_ON(1);
410         }
411 }
412
413 static u64 get_cpu_usage_nsec_parent(void)
414 {
415         struct rusage ru;
416         u64 sum;
417         int err;
418
419         err = getrusage(RUSAGE_SELF, &ru);
420         BUG_ON(err);
421
422         sum =  ru.ru_utime.tv_sec*1e9 + ru.ru_utime.tv_usec*1e3;
423         sum += ru.ru_stime.tv_sec*1e9 + ru.ru_stime.tv_usec*1e3;
424
425         return sum;
426 }
427
428 static int self_open_counters(void)
429 {
430         struct perf_event_attr attr;
431         int fd;
432
433         memset(&attr, 0, sizeof(attr));
434
435         attr.type = PERF_TYPE_SOFTWARE;
436         attr.config = PERF_COUNT_SW_TASK_CLOCK;
437
438         fd = sys_perf_event_open(&attr, 0, -1, -1,
439                                  perf_event_open_cloexec_flag());
440
441         if (fd < 0)
442                 pr_err("Error: sys_perf_event_open() syscall returned "
443                        "with %d (%s)\n", fd, strerror(errno));
444         return fd;
445 }
446
447 static u64 get_cpu_usage_nsec_self(int fd)
448 {
449         u64 runtime;
450         int ret;
451
452         ret = read(fd, &runtime, sizeof(runtime));
453         BUG_ON(ret != sizeof(runtime));
454
455         return runtime;
456 }
457
458 struct sched_thread_parms {
459         struct task_desc  *task;
460         struct perf_sched *sched;
461 };
462
463 static void *thread_func(void *ctx)
464 {
465         struct sched_thread_parms *parms = ctx;
466         struct task_desc *this_task = parms->task;
467         struct perf_sched *sched = parms->sched;
468         u64 cpu_usage_0, cpu_usage_1;
469         unsigned long i, ret;
470         char comm2[22];
471         int fd;
472
473         zfree(&parms);
474
475         sprintf(comm2, ":%s", this_task->comm);
476         prctl(PR_SET_NAME, comm2);
477         fd = self_open_counters();
478         if (fd < 0)
479                 return NULL;
480 again:
481         ret = sem_post(&this_task->ready_for_work);
482         BUG_ON(ret);
483         ret = pthread_mutex_lock(&sched->start_work_mutex);
484         BUG_ON(ret);
485         ret = pthread_mutex_unlock(&sched->start_work_mutex);
486         BUG_ON(ret);
487
488         cpu_usage_0 = get_cpu_usage_nsec_self(fd);
489
490         for (i = 0; i < this_task->nr_events; i++) {
491                 this_task->curr_event = i;
492                 perf_sched__process_event(sched, this_task->atoms[i]);
493         }
494
495         cpu_usage_1 = get_cpu_usage_nsec_self(fd);
496         this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
497         ret = sem_post(&this_task->work_done_sem);
498         BUG_ON(ret);
499
500         ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
501         BUG_ON(ret);
502         ret = pthread_mutex_unlock(&sched->work_done_wait_mutex);
503         BUG_ON(ret);
504
505         goto again;
506 }
507
508 static void create_tasks(struct perf_sched *sched)
509 {
510         struct task_desc *task;
511         pthread_attr_t attr;
512         unsigned long i;
513         int err;
514
515         err = pthread_attr_init(&attr);
516         BUG_ON(err);
517         err = pthread_attr_setstacksize(&attr,
518                         (size_t) max(16 * 1024, PTHREAD_STACK_MIN));
519         BUG_ON(err);
520         err = pthread_mutex_lock(&sched->start_work_mutex);
521         BUG_ON(err);
522         err = pthread_mutex_lock(&sched->work_done_wait_mutex);
523         BUG_ON(err);
524         for (i = 0; i < sched->nr_tasks; i++) {
525                 struct sched_thread_parms *parms = malloc(sizeof(*parms));
526                 BUG_ON(parms == NULL);
527                 parms->task = task = sched->tasks[i];
528                 parms->sched = sched;
529                 sem_init(&task->sleep_sem, 0, 0);
530                 sem_init(&task->ready_for_work, 0, 0);
531                 sem_init(&task->work_done_sem, 0, 0);
532                 task->curr_event = 0;
533                 err = pthread_create(&task->thread, &attr, thread_func, parms);
534                 BUG_ON(err);
535         }
536 }
537
538 static void wait_for_tasks(struct perf_sched *sched)
539 {
540         u64 cpu_usage_0, cpu_usage_1;
541         struct task_desc *task;
542         unsigned long i, ret;
543
544         sched->start_time = get_nsecs();
545         sched->cpu_usage = 0;
546         pthread_mutex_unlock(&sched->work_done_wait_mutex);
547
548         for (i = 0; i < sched->nr_tasks; i++) {
549                 task = sched->tasks[i];
550                 ret = sem_wait(&task->ready_for_work);
551                 BUG_ON(ret);
552                 sem_init(&task->ready_for_work, 0, 0);
553         }
554         ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
555         BUG_ON(ret);
556
557         cpu_usage_0 = get_cpu_usage_nsec_parent();
558
559         pthread_mutex_unlock(&sched->start_work_mutex);
560
561         for (i = 0; i < sched->nr_tasks; i++) {
562                 task = sched->tasks[i];
563                 ret = sem_wait(&task->work_done_sem);
564                 BUG_ON(ret);
565                 sem_init(&task->work_done_sem, 0, 0);
566                 sched->cpu_usage += task->cpu_usage;
567                 task->cpu_usage = 0;
568         }
569
570         cpu_usage_1 = get_cpu_usage_nsec_parent();
571         if (!sched->runavg_cpu_usage)
572                 sched->runavg_cpu_usage = sched->cpu_usage;
573         sched->runavg_cpu_usage = (sched->runavg_cpu_usage * 9 + sched->cpu_usage) / 10;
574
575         sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
576         if (!sched->runavg_parent_cpu_usage)
577                 sched->runavg_parent_cpu_usage = sched->parent_cpu_usage;
578         sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * 9 +
579                                          sched->parent_cpu_usage)/10;
580
581         ret = pthread_mutex_lock(&sched->start_work_mutex);
582         BUG_ON(ret);
583
584         for (i = 0; i < sched->nr_tasks; i++) {
585                 task = sched->tasks[i];
586                 sem_init(&task->sleep_sem, 0, 0);
587                 task->curr_event = 0;
588         }
589 }
590
591 static void run_one_test(struct perf_sched *sched)
592 {
593         u64 T0, T1, delta, avg_delta, fluct;
594
595         T0 = get_nsecs();
596         wait_for_tasks(sched);
597         T1 = get_nsecs();
598
599         delta = T1 - T0;
600         sched->sum_runtime += delta;
601         sched->nr_runs++;
602
603         avg_delta = sched->sum_runtime / sched->nr_runs;
604         if (delta < avg_delta)
605                 fluct = avg_delta - delta;
606         else
607                 fluct = delta - avg_delta;
608         sched->sum_fluct += fluct;
609         if (!sched->run_avg)
610                 sched->run_avg = delta;
611         sched->run_avg = (sched->run_avg * 9 + delta) / 10;
612
613         printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / 1000000.0);
614
615         printf("ravg: %0.2f, ", (double)sched->run_avg / 1e6);
616
617         printf("cpu: %0.2f / %0.2f",
618                 (double)sched->cpu_usage / 1e6, (double)sched->runavg_cpu_usage / 1e6);
619
620 #if 0
621         /*
622          * rusage statistics done by the parent, these are less
623          * accurate than the sched->sum_exec_runtime based statistics:
624          */
625         printf(" [%0.2f / %0.2f]",
626                 (double)sched->parent_cpu_usage/1e6,
627                 (double)sched->runavg_parent_cpu_usage/1e6);
628 #endif
629
630         printf("\n");
631
632         if (sched->nr_sleep_corrections)
633                 printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections);
634         sched->nr_sleep_corrections = 0;
635 }
636
637 static void test_calibrations(struct perf_sched *sched)
638 {
639         u64 T0, T1;
640
641         T0 = get_nsecs();
642         burn_nsecs(sched, 1e6);
643         T1 = get_nsecs();
644
645         printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
646
647         T0 = get_nsecs();
648         sleep_nsecs(1e6);
649         T1 = get_nsecs();
650
651         printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
652 }
653
654 static int
655 replay_wakeup_event(struct perf_sched *sched,
656                     struct perf_evsel *evsel, struct perf_sample *sample,
657                     struct machine *machine __maybe_unused)
658 {
659         const char *comm = perf_evsel__strval(evsel, sample, "comm");
660         const u32 pid    = perf_evsel__intval(evsel, sample, "pid");
661         struct task_desc *waker, *wakee;
662
663         if (verbose) {
664                 printf("sched_wakeup event %p\n", evsel);
665
666                 printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid);
667         }
668
669         waker = register_pid(sched, sample->tid, "<unknown>");
670         wakee = register_pid(sched, pid, comm);
671
672         add_sched_event_wakeup(sched, waker, sample->time, wakee);
673         return 0;
674 }
675
676 static int replay_switch_event(struct perf_sched *sched,
677                                struct perf_evsel *evsel,
678                                struct perf_sample *sample,
679                                struct machine *machine __maybe_unused)
680 {
681         const char *prev_comm  = perf_evsel__strval(evsel, sample, "prev_comm"),
682                    *next_comm  = perf_evsel__strval(evsel, sample, "next_comm");
683         const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
684                   next_pid = perf_evsel__intval(evsel, sample, "next_pid");
685         const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
686         struct task_desc *prev, __maybe_unused *next;
687         u64 timestamp0, timestamp = sample->time;
688         int cpu = sample->cpu;
689         s64 delta;
690
691         if (verbose)
692                 printf("sched_switch event %p\n", evsel);
693
694         if (cpu >= MAX_CPUS || cpu < 0)
695                 return 0;
696
697         timestamp0 = sched->cpu_last_switched[cpu];
698         if (timestamp0)
699                 delta = timestamp - timestamp0;
700         else
701                 delta = 0;
702
703         if (delta < 0) {
704                 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
705                 return -1;
706         }
707
708         pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
709                  prev_comm, prev_pid, next_comm, next_pid, delta);
710
711         prev = register_pid(sched, prev_pid, prev_comm);
712         next = register_pid(sched, next_pid, next_comm);
713
714         sched->cpu_last_switched[cpu] = timestamp;
715
716         add_sched_event_run(sched, prev, timestamp, delta);
717         add_sched_event_sleep(sched, prev, timestamp, prev_state);
718
719         return 0;
720 }
721
722 static int replay_fork_event(struct perf_sched *sched,
723                              union perf_event *event,
724                              struct machine *machine)
725 {
726         struct thread *child, *parent;
727
728         child = machine__findnew_thread(machine, event->fork.pid,
729                                         event->fork.tid);
730         parent = machine__findnew_thread(machine, event->fork.ppid,
731                                          event->fork.ptid);
732
733         if (child == NULL || parent == NULL) {
734                 pr_debug("thread does not exist on fork event: child %p, parent %p\n",
735                                  child, parent);
736                 return 0;
737         }
738
739         if (verbose) {
740                 printf("fork event\n");
741                 printf("... parent: %s/%d\n", thread__comm_str(parent), parent->tid);
742                 printf("...  child: %s/%d\n", thread__comm_str(child), child->tid);
743         }
744
745         register_pid(sched, parent->tid, thread__comm_str(parent));
746         register_pid(sched, child->tid, thread__comm_str(child));
747         return 0;
748 }
749
750 struct sort_dimension {
751         const char              *name;
752         sort_fn_t               cmp;
753         struct list_head        list;
754 };
755
756 static int
757 thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
758 {
759         struct sort_dimension *sort;
760         int ret = 0;
761
762         BUG_ON(list_empty(list));
763
764         list_for_each_entry(sort, list, list) {
765                 ret = sort->cmp(l, r);
766                 if (ret)
767                         return ret;
768         }
769
770         return ret;
771 }
772
773 static struct work_atoms *
774 thread_atoms_search(struct rb_root *root, struct thread *thread,
775                          struct list_head *sort_list)
776 {
777         struct rb_node *node = root->rb_node;
778         struct work_atoms key = { .thread = thread };
779
780         while (node) {
781                 struct work_atoms *atoms;
782                 int cmp;
783
784                 atoms = container_of(node, struct work_atoms, node);
785
786                 cmp = thread_lat_cmp(sort_list, &key, atoms);
787                 if (cmp > 0)
788                         node = node->rb_left;
789                 else if (cmp < 0)
790                         node = node->rb_right;
791                 else {
792                         BUG_ON(thread != atoms->thread);
793                         return atoms;
794                 }
795         }
796         return NULL;
797 }
798
799 static void
800 __thread_latency_insert(struct rb_root *root, struct work_atoms *data,
801                          struct list_head *sort_list)
802 {
803         struct rb_node **new = &(root->rb_node), *parent = NULL;
804
805         while (*new) {
806                 struct work_atoms *this;
807                 int cmp;
808
809                 this = container_of(*new, struct work_atoms, node);
810                 parent = *new;
811
812                 cmp = thread_lat_cmp(sort_list, data, this);
813
814                 if (cmp > 0)
815                         new = &((*new)->rb_left);
816                 else
817                         new = &((*new)->rb_right);
818         }
819
820         rb_link_node(&data->node, parent, new);
821         rb_insert_color(&data->node, root);
822 }
823
824 static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
825 {
826         struct work_atoms *atoms = zalloc(sizeof(*atoms));
827         if (!atoms) {
828                 pr_err("No memory at %s\n", __func__);
829                 return -1;
830         }
831
832         atoms->thread = thread;
833         INIT_LIST_HEAD(&atoms->work_list);
834         __thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid);
835         return 0;
836 }
837
838 static char sched_out_state(u64 prev_state)
839 {
840         const char *str = TASK_STATE_TO_CHAR_STR;
841
842         return str[prev_state];
843 }
844
845 static int
846 add_sched_out_event(struct work_atoms *atoms,
847                     char run_state,
848                     u64 timestamp)
849 {
850         struct work_atom *atom = zalloc(sizeof(*atom));
851         if (!atom) {
852                 pr_err("Non memory at %s", __func__);
853                 return -1;
854         }
855
856         atom->sched_out_time = timestamp;
857
858         if (run_state == 'R') {
859                 atom->state = THREAD_WAIT_CPU;
860                 atom->wake_up_time = atom->sched_out_time;
861         }
862
863         list_add_tail(&atom->list, &atoms->work_list);
864         return 0;
865 }
866
867 static void
868 add_runtime_event(struct work_atoms *atoms, u64 delta,
869                   u64 timestamp __maybe_unused)
870 {
871         struct work_atom *atom;
872
873         BUG_ON(list_empty(&atoms->work_list));
874
875         atom = list_entry(atoms->work_list.prev, struct work_atom, list);
876
877         atom->runtime += delta;
878         atoms->total_runtime += delta;
879 }
880
881 static void
882 add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
883 {
884         struct work_atom *atom;
885         u64 delta;
886
887         if (list_empty(&atoms->work_list))
888                 return;
889
890         atom = list_entry(atoms->work_list.prev, struct work_atom, list);
891
892         if (atom->state != THREAD_WAIT_CPU)
893                 return;
894
895         if (timestamp < atom->wake_up_time) {
896                 atom->state = THREAD_IGNORE;
897                 return;
898         }
899
900         atom->state = THREAD_SCHED_IN;
901         atom->sched_in_time = timestamp;
902
903         delta = atom->sched_in_time - atom->wake_up_time;
904         atoms->total_lat += delta;
905         if (delta > atoms->max_lat) {
906                 atoms->max_lat = delta;
907                 atoms->max_lat_at = timestamp;
908         }
909         atoms->nb_atoms++;
910 }
911
912 static int latency_switch_event(struct perf_sched *sched,
913                                 struct perf_evsel *evsel,
914                                 struct perf_sample *sample,
915                                 struct machine *machine)
916 {
917         const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
918                   next_pid = perf_evsel__intval(evsel, sample, "next_pid");
919         const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
920         struct work_atoms *out_events, *in_events;
921         struct thread *sched_out, *sched_in;
922         u64 timestamp0, timestamp = sample->time;
923         int cpu = sample->cpu;
924         s64 delta;
925
926         BUG_ON(cpu >= MAX_CPUS || cpu < 0);
927
928         timestamp0 = sched->cpu_last_switched[cpu];
929         sched->cpu_last_switched[cpu] = timestamp;
930         if (timestamp0)
931                 delta = timestamp - timestamp0;
932         else
933                 delta = 0;
934
935         if (delta < 0) {
936                 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
937                 return -1;
938         }
939
940         sched_out = machine__findnew_thread(machine, -1, prev_pid);
941         sched_in = machine__findnew_thread(machine, -1, next_pid);
942
943         out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
944         if (!out_events) {
945                 if (thread_atoms_insert(sched, sched_out))
946                         return -1;
947                 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
948                 if (!out_events) {
949                         pr_err("out-event: Internal tree error");
950                         return -1;
951                 }
952         }
953         if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp))
954                 return -1;
955
956         in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
957         if (!in_events) {
958                 if (thread_atoms_insert(sched, sched_in))
959                         return -1;
960                 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
961                 if (!in_events) {
962                         pr_err("in-event: Internal tree error");
963                         return -1;
964                 }
965                 /*
966                  * Take came in we have not heard about yet,
967                  * add in an initial atom in runnable state:
968                  */
969                 if (add_sched_out_event(in_events, 'R', timestamp))
970                         return -1;
971         }
972         add_sched_in_event(in_events, timestamp);
973
974         return 0;
975 }
976
977 static int latency_runtime_event(struct perf_sched *sched,
978                                  struct perf_evsel *evsel,
979                                  struct perf_sample *sample,
980                                  struct machine *machine)
981 {
982         const u32 pid      = perf_evsel__intval(evsel, sample, "pid");
983         const u64 runtime  = perf_evsel__intval(evsel, sample, "runtime");
984         struct thread *thread = machine__findnew_thread(machine, -1, pid);
985         struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
986         u64 timestamp = sample->time;
987         int cpu = sample->cpu;
988
989         BUG_ON(cpu >= MAX_CPUS || cpu < 0);
990         if (!atoms) {
991                 if (thread_atoms_insert(sched, thread))
992                         return -1;
993                 atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
994                 if (!atoms) {
995                         pr_err("in-event: Internal tree error");
996                         return -1;
997                 }
998                 if (add_sched_out_event(atoms, 'R', timestamp))
999                         return -1;
1000         }
1001
1002         add_runtime_event(atoms, runtime, timestamp);
1003         return 0;
1004 }
1005
1006 static int latency_wakeup_event(struct perf_sched *sched,
1007                                 struct perf_evsel *evsel,
1008                                 struct perf_sample *sample,
1009                                 struct machine *machine)
1010 {
1011         const u32 pid     = perf_evsel__intval(evsel, sample, "pid");
1012         struct work_atoms *atoms;
1013         struct work_atom *atom;
1014         struct thread *wakee;
1015         u64 timestamp = sample->time;
1016
1017         wakee = machine__findnew_thread(machine, -1, pid);
1018         atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1019         if (!atoms) {
1020                 if (thread_atoms_insert(sched, wakee))
1021                         return -1;
1022                 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1023                 if (!atoms) {
1024                         pr_err("wakeup-event: Internal tree error");
1025                         return -1;
1026                 }
1027                 if (add_sched_out_event(atoms, 'S', timestamp))
1028                         return -1;
1029         }
1030
1031         BUG_ON(list_empty(&atoms->work_list));
1032
1033         atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1034
1035         /*
1036          * As we do not guarantee the wakeup event happens when
1037          * task is out of run queue, also may happen when task is
1038          * on run queue and wakeup only change ->state to TASK_RUNNING,
1039          * then we should not set the ->wake_up_time when wake up a
1040          * task which is on run queue.
1041          *
1042          * You WILL be missing events if you've recorded only
1043          * one CPU, or are only looking at only one, so don't
1044          * skip in this case.
1045          */
1046         if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
1047                 return 0;
1048
1049         sched->nr_timestamps++;
1050         if (atom->sched_out_time > timestamp) {
1051                 sched->nr_unordered_timestamps++;
1052                 return 0;
1053         }
1054
1055         atom->state = THREAD_WAIT_CPU;
1056         atom->wake_up_time = timestamp;
1057         return 0;
1058 }
1059
1060 static int latency_migrate_task_event(struct perf_sched *sched,
1061                                       struct perf_evsel *evsel,
1062                                       struct perf_sample *sample,
1063                                       struct machine *machine)
1064 {
1065         const u32 pid = perf_evsel__intval(evsel, sample, "pid");
1066         u64 timestamp = sample->time;
1067         struct work_atoms *atoms;
1068         struct work_atom *atom;
1069         struct thread *migrant;
1070
1071         /*
1072          * Only need to worry about migration when profiling one CPU.
1073          */
1074         if (sched->profile_cpu == -1)
1075                 return 0;
1076
1077         migrant = machine__findnew_thread(machine, -1, pid);
1078         atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1079         if (!atoms) {
1080                 if (thread_atoms_insert(sched, migrant))
1081                         return -1;
1082                 register_pid(sched, migrant->tid, thread__comm_str(migrant));
1083                 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1084                 if (!atoms) {
1085                         pr_err("migration-event: Internal tree error");
1086                         return -1;
1087                 }
1088                 if (add_sched_out_event(atoms, 'R', timestamp))
1089                         return -1;
1090         }
1091
1092         BUG_ON(list_empty(&atoms->work_list));
1093
1094         atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1095         atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1096
1097         sched->nr_timestamps++;
1098
1099         if (atom->sched_out_time > timestamp)
1100                 sched->nr_unordered_timestamps++;
1101
1102         return 0;
1103 }
1104
1105 static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
1106 {
1107         int i;
1108         int ret;
1109         u64 avg;
1110
1111         if (!work_list->nb_atoms)
1112                 return;
1113         /*
1114          * Ignore idle threads:
1115          */
1116         if (!strcmp(thread__comm_str(work_list->thread), "swapper"))
1117                 return;
1118
1119         sched->all_runtime += work_list->total_runtime;
1120         sched->all_count   += work_list->nb_atoms;
1121
1122         ret = printf("  %s:%d ", thread__comm_str(work_list->thread), work_list->thread->tid);
1123
1124         for (i = 0; i < 24 - ret; i++)
1125                 printf(" ");
1126
1127         avg = work_list->total_lat / work_list->nb_atoms;
1128
1129         printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %13.6f s\n",
1130               (double)work_list->total_runtime / 1e6,
1131                  work_list->nb_atoms, (double)avg / 1e6,
1132                  (double)work_list->max_lat / 1e6,
1133                  (double)work_list->max_lat_at / 1e9);
1134 }
1135
1136 static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1137 {
1138         if (l->thread->tid < r->thread->tid)
1139                 return -1;
1140         if (l->thread->tid > r->thread->tid)
1141                 return 1;
1142
1143         return 0;
1144 }
1145
1146 static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1147 {
1148         u64 avgl, avgr;
1149
1150         if (!l->nb_atoms)
1151                 return -1;
1152
1153         if (!r->nb_atoms)
1154                 return 1;
1155
1156         avgl = l->total_lat / l->nb_atoms;
1157         avgr = r->total_lat / r->nb_atoms;
1158
1159         if (avgl < avgr)
1160                 return -1;
1161         if (avgl > avgr)
1162                 return 1;
1163
1164         return 0;
1165 }
1166
1167 static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1168 {
1169         if (l->max_lat < r->max_lat)
1170                 return -1;
1171         if (l->max_lat > r->max_lat)
1172                 return 1;
1173
1174         return 0;
1175 }
1176
1177 static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1178 {
1179         if (l->nb_atoms < r->nb_atoms)
1180                 return -1;
1181         if (l->nb_atoms > r->nb_atoms)
1182                 return 1;
1183
1184         return 0;
1185 }
1186
1187 static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1188 {
1189         if (l->total_runtime < r->total_runtime)
1190                 return -1;
1191         if (l->total_runtime > r->total_runtime)
1192                 return 1;
1193
1194         return 0;
1195 }
1196
1197 static int sort_dimension__add(const char *tok, struct list_head *list)
1198 {
1199         size_t i;
1200         static struct sort_dimension avg_sort_dimension = {
1201                 .name = "avg",
1202                 .cmp  = avg_cmp,
1203         };
1204         static struct sort_dimension max_sort_dimension = {
1205                 .name = "max",
1206                 .cmp  = max_cmp,
1207         };
1208         static struct sort_dimension pid_sort_dimension = {
1209                 .name = "pid",
1210                 .cmp  = pid_cmp,
1211         };
1212         static struct sort_dimension runtime_sort_dimension = {
1213                 .name = "runtime",
1214                 .cmp  = runtime_cmp,
1215         };
1216         static struct sort_dimension switch_sort_dimension = {
1217                 .name = "switch",
1218                 .cmp  = switch_cmp,
1219         };
1220         struct sort_dimension *available_sorts[] = {
1221                 &pid_sort_dimension,
1222                 &avg_sort_dimension,
1223                 &max_sort_dimension,
1224                 &switch_sort_dimension,
1225                 &runtime_sort_dimension,
1226         };
1227
1228         for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
1229                 if (!strcmp(available_sorts[i]->name, tok)) {
1230                         list_add_tail(&available_sorts[i]->list, list);
1231
1232                         return 0;
1233                 }
1234         }
1235
1236         return -1;
1237 }
1238
1239 static void perf_sched__sort_lat(struct perf_sched *sched)
1240 {
1241         struct rb_node *node;
1242
1243         for (;;) {
1244                 struct work_atoms *data;
1245                 node = rb_first(&sched->atom_root);
1246                 if (!node)
1247                         break;
1248
1249                 rb_erase(node, &sched->atom_root);
1250                 data = rb_entry(node, struct work_atoms, node);
1251                 __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
1252         }
1253 }
1254
1255 static int process_sched_wakeup_event(struct perf_tool *tool,
1256                                       struct perf_evsel *evsel,
1257                                       struct perf_sample *sample,
1258                                       struct machine *machine)
1259 {
1260         struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1261
1262         if (sched->tp_handler->wakeup_event)
1263                 return sched->tp_handler->wakeup_event(sched, evsel, sample, machine);
1264
1265         return 0;
1266 }
1267
1268 static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
1269                             struct perf_sample *sample, struct machine *machine)
1270 {
1271         const u32 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
1272         struct thread *sched_in;
1273         int new_shortname;
1274         u64 timestamp0, timestamp = sample->time;
1275         s64 delta;
1276         int cpu, this_cpu = sample->cpu;
1277
1278         BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
1279
1280         if (this_cpu > sched->max_cpu)
1281                 sched->max_cpu = this_cpu;
1282
1283         timestamp0 = sched->cpu_last_switched[this_cpu];
1284         sched->cpu_last_switched[this_cpu] = timestamp;
1285         if (timestamp0)
1286                 delta = timestamp - timestamp0;
1287         else
1288                 delta = 0;
1289
1290         if (delta < 0) {
1291                 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1292                 return -1;
1293         }
1294
1295         sched_in = machine__findnew_thread(machine, -1, next_pid);
1296
1297         sched->curr_thread[this_cpu] = sched_in;
1298
1299         printf("  ");
1300
1301         new_shortname = 0;
1302         if (!sched_in->shortname[0]) {
1303                 if (!strcmp(thread__comm_str(sched_in), "swapper")) {
1304                         /*
1305                          * Don't allocate a letter-number for swapper:0
1306                          * as a shortname. Instead, we use '.' for it.
1307                          */
1308                         sched_in->shortname[0] = '.';
1309                         sched_in->shortname[1] = ' ';
1310                 } else {
1311                         sched_in->shortname[0] = sched->next_shortname1;
1312                         sched_in->shortname[1] = sched->next_shortname2;
1313
1314                         if (sched->next_shortname1 < 'Z') {
1315                                 sched->next_shortname1++;
1316                         } else {
1317                                 sched->next_shortname1 = 'A';
1318                                 if (sched->next_shortname2 < '9')
1319                                         sched->next_shortname2++;
1320                                 else
1321                                         sched->next_shortname2 = '0';
1322                         }
1323                 }
1324                 new_shortname = 1;
1325         }
1326
1327         for (cpu = 0; cpu <= sched->max_cpu; cpu++) {
1328                 if (cpu != this_cpu)
1329                         printf(" ");
1330                 else
1331                         printf("*");
1332
1333                 if (sched->curr_thread[cpu])
1334                         printf("%2s ", sched->curr_thread[cpu]->shortname);
1335                 else
1336                         printf("   ");
1337         }
1338
1339         printf("  %12.6f secs ", (double)timestamp/1e9);
1340         if (new_shortname) {
1341                 printf("%s => %s:%d\n",
1342                        sched_in->shortname, thread__comm_str(sched_in), sched_in->tid);
1343         } else {
1344                 printf("\n");
1345         }
1346
1347         return 0;
1348 }
1349
1350 static int process_sched_switch_event(struct perf_tool *tool,
1351                                       struct perf_evsel *evsel,
1352                                       struct perf_sample *sample,
1353                                       struct machine *machine)
1354 {
1355         struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1356         int this_cpu = sample->cpu, err = 0;
1357         u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
1358             next_pid = perf_evsel__intval(evsel, sample, "next_pid");
1359
1360         if (sched->curr_pid[this_cpu] != (u32)-1) {
1361                 /*
1362                  * Are we trying to switch away a PID that is
1363                  * not current?
1364                  */
1365                 if (sched->curr_pid[this_cpu] != prev_pid)
1366                         sched->nr_context_switch_bugs++;
1367         }
1368
1369         if (sched->tp_handler->switch_event)
1370                 err = sched->tp_handler->switch_event(sched, evsel, sample, machine);
1371
1372         sched->curr_pid[this_cpu] = next_pid;
1373         return err;
1374 }
1375
1376 static int process_sched_runtime_event(struct perf_tool *tool,
1377                                        struct perf_evsel *evsel,
1378                                        struct perf_sample *sample,
1379                                        struct machine *machine)
1380 {
1381         struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1382
1383         if (sched->tp_handler->runtime_event)
1384                 return sched->tp_handler->runtime_event(sched, evsel, sample, machine);
1385
1386         return 0;
1387 }
1388
1389 static int perf_sched__process_fork_event(struct perf_tool *tool,
1390                                           union perf_event *event,
1391                                           struct perf_sample *sample,
1392                                           struct machine *machine)
1393 {
1394         struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1395
1396         /* run the fork event through the perf machineruy */
1397         perf_event__process_fork(tool, event, sample, machine);
1398
1399         /* and then run additional processing needed for this command */
1400         if (sched->tp_handler->fork_event)
1401                 return sched->tp_handler->fork_event(sched, event, machine);
1402
1403         return 0;
1404 }
1405
1406 static int process_sched_migrate_task_event(struct perf_tool *tool,
1407                                             struct perf_evsel *evsel,
1408                                             struct perf_sample *sample,
1409                                             struct machine *machine)
1410 {
1411         struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1412
1413         if (sched->tp_handler->migrate_task_event)
1414                 return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine);
1415
1416         return 0;
1417 }
1418
1419 typedef int (*tracepoint_handler)(struct perf_tool *tool,
1420                                   struct perf_evsel *evsel,
1421                                   struct perf_sample *sample,
1422                                   struct machine *machine);
1423
1424 static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused,
1425                                                  union perf_event *event __maybe_unused,
1426                                                  struct perf_sample *sample,
1427                                                  struct perf_evsel *evsel,
1428                                                  struct machine *machine)
1429 {
1430         int err = 0;
1431
1432         evsel->hists.stats.total_period += sample->period;
1433         hists__inc_nr_samples(&evsel->hists, true);
1434
1435         if (evsel->handler != NULL) {
1436                 tracepoint_handler f = evsel->handler;
1437                 err = f(tool, evsel, sample, machine);
1438         }
1439
1440         return err;
1441 }
1442
1443 static int perf_sched__read_events(struct perf_sched *sched,
1444                                    struct perf_session **psession)
1445 {
1446         const struct perf_evsel_str_handler handlers[] = {
1447                 { "sched:sched_switch",       process_sched_switch_event, },
1448                 { "sched:sched_stat_runtime", process_sched_runtime_event, },
1449                 { "sched:sched_wakeup",       process_sched_wakeup_event, },
1450                 { "sched:sched_wakeup_new",   process_sched_wakeup_event, },
1451                 { "sched:sched_migrate_task", process_sched_migrate_task_event, },
1452         };
1453         struct perf_session *session;
1454         struct perf_data_file file = {
1455                 .path = input_name,
1456                 .mode = PERF_DATA_MODE_READ,
1457         };
1458
1459         session = perf_session__new(&file, false, &sched->tool);
1460         if (session == NULL) {
1461                 pr_debug("No Memory for session\n");
1462                 return -1;
1463         }
1464
1465         if (perf_session__set_tracepoints_handlers(session, handlers))
1466                 goto out_delete;
1467
1468         if (perf_session__has_traces(session, "record -R")) {
1469                 int err = perf_session__process_events(session, &sched->tool);
1470                 if (err) {
1471                         pr_err("Failed to process events, error %d", err);
1472                         goto out_delete;
1473                 }
1474
1475                 sched->nr_events      = session->stats.nr_events[0];
1476                 sched->nr_lost_events = session->stats.total_lost;
1477                 sched->nr_lost_chunks = session->stats.nr_events[PERF_RECORD_LOST];
1478         }
1479
1480         if (psession)
1481                 *psession = session;
1482         else
1483                 perf_session__delete(session);
1484
1485         return 0;
1486
1487 out_delete:
1488         perf_session__delete(session);
1489         return -1;
1490 }
1491
1492 static void print_bad_events(struct perf_sched *sched)
1493 {
1494         if (sched->nr_unordered_timestamps && sched->nr_timestamps) {
1495                 printf("  INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
1496                         (double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0,
1497                         sched->nr_unordered_timestamps, sched->nr_timestamps);
1498         }
1499         if (sched->nr_lost_events && sched->nr_events) {
1500                 printf("  INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1501                         (double)sched->nr_lost_events/(double)sched->nr_events * 100.0,
1502                         sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks);
1503         }
1504         if (sched->nr_context_switch_bugs && sched->nr_timestamps) {
1505                 printf("  INFO: %.3f%% context switch bugs (%ld out of %ld)",
1506                         (double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0,
1507                         sched->nr_context_switch_bugs, sched->nr_timestamps);
1508                 if (sched->nr_lost_events)
1509                         printf(" (due to lost events?)");
1510                 printf("\n");
1511         }
1512 }
1513
1514 static int perf_sched__lat(struct perf_sched *sched)
1515 {
1516         struct rb_node *next;
1517         struct perf_session *session;
1518
1519         setup_pager();
1520
1521         /* save session -- references to threads are held in work_list */
1522         if (perf_sched__read_events(sched, &session))
1523                 return -1;
1524
1525         perf_sched__sort_lat(sched);
1526
1527         printf("\n -----------------------------------------------------------------------------------------------------------------\n");
1528         printf("  Task                  |   Runtime ms  | Switches | Average delay ms | Maximum delay ms | Maximum delay at       |\n");
1529         printf(" -----------------------------------------------------------------------------------------------------------------\n");
1530
1531         next = rb_first(&sched->sorted_atom_root);
1532
1533         while (next) {
1534                 struct work_atoms *work_list;
1535
1536                 work_list = rb_entry(next, struct work_atoms, node);
1537                 output_lat_thread(sched, work_list);
1538                 next = rb_next(next);
1539         }
1540
1541         printf(" -----------------------------------------------------------------------------------------------------------------\n");
1542         printf("  TOTAL:                |%11.3f ms |%9" PRIu64 " |\n",
1543                 (double)sched->all_runtime / 1e6, sched->all_count);
1544
1545         printf(" ---------------------------------------------------\n");
1546
1547         print_bad_events(sched);
1548         printf("\n");
1549
1550         perf_session__delete(session);
1551         return 0;
1552 }
1553
1554 static int perf_sched__map(struct perf_sched *sched)
1555 {
1556         sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF);
1557
1558         setup_pager();
1559         if (perf_sched__read_events(sched, NULL))
1560                 return -1;
1561         print_bad_events(sched);
1562         return 0;
1563 }
1564
1565 static int perf_sched__replay(struct perf_sched *sched)
1566 {
1567         unsigned long i;
1568
1569         calibrate_run_measurement_overhead(sched);
1570         calibrate_sleep_measurement_overhead(sched);
1571
1572         test_calibrations(sched);
1573
1574         if (perf_sched__read_events(sched, NULL))
1575                 return -1;
1576
1577         printf("nr_run_events:        %ld\n", sched->nr_run_events);
1578         printf("nr_sleep_events:      %ld\n", sched->nr_sleep_events);
1579         printf("nr_wakeup_events:     %ld\n", sched->nr_wakeup_events);
1580
1581         if (sched->targetless_wakeups)
1582                 printf("target-less wakeups:  %ld\n", sched->targetless_wakeups);
1583         if (sched->multitarget_wakeups)
1584                 printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups);
1585         if (sched->nr_run_events_optimized)
1586                 printf("run atoms optimized: %ld\n",
1587                         sched->nr_run_events_optimized);
1588
1589         print_task_traces(sched);
1590         add_cross_task_wakeups(sched);
1591
1592         create_tasks(sched);
1593         printf("------------------------------------------------------------\n");
1594         for (i = 0; i < sched->replay_repeat; i++)
1595                 run_one_test(sched);
1596
1597         return 0;
1598 }
1599
1600 static void setup_sorting(struct perf_sched *sched, const struct option *options,
1601                           const char * const usage_msg[])
1602 {
1603         char *tmp, *tok, *str = strdup(sched->sort_order);
1604
1605         for (tok = strtok_r(str, ", ", &tmp);
1606                         tok; tok = strtok_r(NULL, ", ", &tmp)) {
1607                 if (sort_dimension__add(tok, &sched->sort_list) < 0) {
1608                         error("Unknown --sort key: `%s'", tok);
1609                         usage_with_options(usage_msg, options);
1610                 }
1611         }
1612
1613         free(str);
1614
1615         sort_dimension__add("pid", &sched->cmp_pid);
1616 }
1617
1618 static int __cmd_record(int argc, const char **argv)
1619 {
1620         unsigned int rec_argc, i, j;
1621         const char **rec_argv;
1622         const char * const record_args[] = {
1623                 "record",
1624                 "-a",
1625                 "-R",
1626                 "-m", "1024",
1627                 "-c", "1",
1628                 "-e", "sched:sched_switch",
1629                 "-e", "sched:sched_stat_wait",
1630                 "-e", "sched:sched_stat_sleep",
1631                 "-e", "sched:sched_stat_iowait",
1632                 "-e", "sched:sched_stat_runtime",
1633                 "-e", "sched:sched_process_fork",
1634                 "-e", "sched:sched_wakeup",
1635                 "-e", "sched:sched_wakeup_new",
1636                 "-e", "sched:sched_migrate_task",
1637         };
1638
1639         rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1640         rec_argv = calloc(rec_argc + 1, sizeof(char *));
1641
1642         if (rec_argv == NULL)
1643                 return -ENOMEM;
1644
1645         for (i = 0; i < ARRAY_SIZE(record_args); i++)
1646                 rec_argv[i] = strdup(record_args[i]);
1647
1648         for (j = 1; j < (unsigned int)argc; j++, i++)
1649                 rec_argv[i] = argv[j];
1650
1651         BUG_ON(i != rec_argc);
1652
1653         return cmd_record(i, rec_argv, NULL);
1654 }
1655
1656 int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
1657 {
1658         const char default_sort_order[] = "avg, max, switch, runtime";
1659         struct perf_sched sched = {
1660                 .tool = {
1661                         .sample          = perf_sched__process_tracepoint_sample,
1662                         .comm            = perf_event__process_comm,
1663                         .lost            = perf_event__process_lost,
1664                         .fork            = perf_sched__process_fork_event,
1665                         .ordered_samples = true,
1666                 },
1667                 .cmp_pid              = LIST_HEAD_INIT(sched.cmp_pid),
1668                 .sort_list            = LIST_HEAD_INIT(sched.sort_list),
1669                 .start_work_mutex     = PTHREAD_MUTEX_INITIALIZER,
1670                 .work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER,
1671                 .sort_order           = default_sort_order,
1672                 .replay_repeat        = 10,
1673                 .profile_cpu          = -1,
1674                 .next_shortname1      = 'A',
1675                 .next_shortname2      = '0',
1676         };
1677         const struct option latency_options[] = {
1678         OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
1679                    "sort by key(s): runtime, switch, avg, max"),
1680         OPT_INCR('v', "verbose", &verbose,
1681                     "be more verbose (show symbol address, etc)"),
1682         OPT_INTEGER('C', "CPU", &sched.profile_cpu,
1683                     "CPU to profile on"),
1684         OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1685                     "dump raw trace in ASCII"),
1686         OPT_END()
1687         };
1688         const struct option replay_options[] = {
1689         OPT_UINTEGER('r', "repeat", &sched.replay_repeat,
1690                      "repeat the workload replay N times (-1: infinite)"),
1691         OPT_INCR('v', "verbose", &verbose,
1692                     "be more verbose (show symbol address, etc)"),
1693         OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1694                     "dump raw trace in ASCII"),
1695         OPT_END()
1696         };
1697         const struct option sched_options[] = {
1698         OPT_STRING('i', "input", &input_name, "file",
1699                     "input file name"),
1700         OPT_INCR('v', "verbose", &verbose,
1701                     "be more verbose (show symbol address, etc)"),
1702         OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1703                     "dump raw trace in ASCII"),
1704         OPT_END()
1705         };
1706         const char * const latency_usage[] = {
1707                 "perf sched latency [<options>]",
1708                 NULL
1709         };
1710         const char * const replay_usage[] = {
1711                 "perf sched replay [<options>]",
1712                 NULL
1713         };
1714         const char *const sched_subcommands[] = { "record", "latency", "map",
1715                                                   "replay", "script", NULL };
1716         const char *sched_usage[] = {
1717                 NULL,
1718                 NULL
1719         };
1720         struct trace_sched_handler lat_ops  = {
1721                 .wakeup_event       = latency_wakeup_event,
1722                 .switch_event       = latency_switch_event,
1723                 .runtime_event      = latency_runtime_event,
1724                 .migrate_task_event = latency_migrate_task_event,
1725         };
1726         struct trace_sched_handler map_ops  = {
1727                 .switch_event       = map_switch_event,
1728         };
1729         struct trace_sched_handler replay_ops  = {
1730                 .wakeup_event       = replay_wakeup_event,
1731                 .switch_event       = replay_switch_event,
1732                 .fork_event         = replay_fork_event,
1733         };
1734         unsigned int i;
1735
1736         for (i = 0; i < ARRAY_SIZE(sched.curr_pid); i++)
1737                 sched.curr_pid[i] = -1;
1738
1739         argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands,
1740                                         sched_usage, PARSE_OPT_STOP_AT_NON_OPTION);
1741         if (!argc)
1742                 usage_with_options(sched_usage, sched_options);
1743
1744         /*
1745          * Aliased to 'perf script' for now:
1746          */
1747         if (!strcmp(argv[0], "script"))
1748                 return cmd_script(argc, argv, prefix);
1749
1750         symbol__init();
1751         if (!strncmp(argv[0], "rec", 3)) {
1752                 return __cmd_record(argc, argv);
1753         } else if (!strncmp(argv[0], "lat", 3)) {
1754                 sched.tp_handler = &lat_ops;
1755                 if (argc > 1) {
1756                         argc = parse_options(argc, argv, latency_options, latency_usage, 0);
1757                         if (argc)
1758                                 usage_with_options(latency_usage, latency_options);
1759                 }
1760                 setup_sorting(&sched, latency_options, latency_usage);
1761                 return perf_sched__lat(&sched);
1762         } else if (!strcmp(argv[0], "map")) {
1763                 sched.tp_handler = &map_ops;
1764                 setup_sorting(&sched, latency_options, latency_usage);
1765                 return perf_sched__map(&sched);
1766         } else if (!strncmp(argv[0], "rep", 3)) {
1767                 sched.tp_handler = &replay_ops;
1768                 if (argc) {
1769                         argc = parse_options(argc, argv, replay_options, replay_usage, 0);
1770                         if (argc)
1771                                 usage_with_options(replay_usage, replay_options);
1772                 }
1773                 return perf_sched__replay(&sched);
1774         } else {
1775                 usage_with_options(sched_usage, sched_options);
1776         }
1777
1778         return 0;
1779 }