5 #include "util/evlist.h"
6 #include "util/cache.h"
7 #include "util/evsel.h"
8 #include "util/symbol.h"
9 #include "util/thread.h"
10 #include "util/header.h"
11 #include "util/session.h"
12 #include "util/tool.h"
13 #include "util/cloexec.h"
15 #include <subcmd/parse-options.h>
16 #include "util/trace-event.h"
18 #include "util/debug.h"
20 #include <sys/prctl.h>
21 #include <sys/resource.h>
23 #include <semaphore.h>
26 #include <api/fs/fs.h>
28 #define PR_SET_NAME 15 /* Set process name */
32 #define MAX_PID 1024000
41 unsigned long nr_events;
42 unsigned long curr_event;
43 struct sched_atom **atoms;
54 enum sched_event_type {
58 SCHED_EVENT_MIGRATION,
62 enum sched_event_type type;
68 struct task_desc *wakee;
71 #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
81 struct list_head list;
82 enum thread_state state;
90 struct list_head work_list;
91 struct thread *thread;
101 typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
105 struct trace_sched_handler {
106 int (*switch_event)(struct perf_sched *sched, struct perf_evsel *evsel,
107 struct perf_sample *sample, struct machine *machine);
109 int (*runtime_event)(struct perf_sched *sched, struct perf_evsel *evsel,
110 struct perf_sample *sample, struct machine *machine);
112 int (*wakeup_event)(struct perf_sched *sched, struct perf_evsel *evsel,
113 struct perf_sample *sample, struct machine *machine);
115 /* PERF_RECORD_FORK event, not sched_process_fork tracepoint */
116 int (*fork_event)(struct perf_sched *sched, union perf_event *event,
117 struct machine *machine);
119 int (*migrate_task_event)(struct perf_sched *sched,
120 struct perf_evsel *evsel,
121 struct perf_sample *sample,
122 struct machine *machine);
125 struct perf_sched_map {
126 DECLARE_BITMAP(comp_cpus_mask, MAX_CPUS);
132 struct perf_tool tool;
133 const char *sort_order;
134 unsigned long nr_tasks;
135 struct task_desc **pid_to_task;
136 struct task_desc **tasks;
137 const struct trace_sched_handler *tp_handler;
138 pthread_mutex_t start_work_mutex;
139 pthread_mutex_t work_done_wait_mutex;
142 * Track the current task - that way we can know whether there's any
143 * weird events, such as a task being switched away that is not current.
146 u32 curr_pid[MAX_CPUS];
147 struct thread *curr_thread[MAX_CPUS];
148 char next_shortname1;
149 char next_shortname2;
150 unsigned int replay_repeat;
151 unsigned long nr_run_events;
152 unsigned long nr_sleep_events;
153 unsigned long nr_wakeup_events;
154 unsigned long nr_sleep_corrections;
155 unsigned long nr_run_events_optimized;
156 unsigned long targetless_wakeups;
157 unsigned long multitarget_wakeups;
158 unsigned long nr_runs;
159 unsigned long nr_timestamps;
160 unsigned long nr_unordered_timestamps;
161 unsigned long nr_context_switch_bugs;
162 unsigned long nr_events;
163 unsigned long nr_lost_chunks;
164 unsigned long nr_lost_events;
165 u64 run_measurement_overhead;
166 u64 sleep_measurement_overhead;
169 u64 runavg_cpu_usage;
170 u64 parent_cpu_usage;
171 u64 runavg_parent_cpu_usage;
177 u64 cpu_last_switched[MAX_CPUS];
178 struct rb_root atom_root, sorted_atom_root, merged_atom_root;
179 struct list_head sort_list, cmp_pid;
182 struct perf_sched_map map;
185 static u64 get_nsecs(void)
189 clock_gettime(CLOCK_MONOTONIC, &ts);
191 return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
194 static void burn_nsecs(struct perf_sched *sched, u64 nsecs)
196 u64 T0 = get_nsecs(), T1;
200 } while (T1 + sched->run_measurement_overhead < T0 + nsecs);
203 static void sleep_nsecs(u64 nsecs)
207 ts.tv_nsec = nsecs % 999999999;
208 ts.tv_sec = nsecs / 999999999;
210 nanosleep(&ts, NULL);
213 static void calibrate_run_measurement_overhead(struct perf_sched *sched)
215 u64 T0, T1, delta, min_delta = 1000000000ULL;
218 for (i = 0; i < 10; i++) {
220 burn_nsecs(sched, 0);
223 min_delta = min(min_delta, delta);
225 sched->run_measurement_overhead = min_delta;
227 printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
230 static void calibrate_sleep_measurement_overhead(struct perf_sched *sched)
232 u64 T0, T1, delta, min_delta = 1000000000ULL;
235 for (i = 0; i < 10; i++) {
240 min_delta = min(min_delta, delta);
243 sched->sleep_measurement_overhead = min_delta;
245 printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
248 static struct sched_atom *
249 get_new_event(struct task_desc *task, u64 timestamp)
251 struct sched_atom *event = zalloc(sizeof(*event));
252 unsigned long idx = task->nr_events;
255 event->timestamp = timestamp;
259 size = sizeof(struct sched_atom *) * task->nr_events;
260 task->atoms = realloc(task->atoms, size);
261 BUG_ON(!task->atoms);
263 task->atoms[idx] = event;
268 static struct sched_atom *last_event(struct task_desc *task)
270 if (!task->nr_events)
273 return task->atoms[task->nr_events - 1];
276 static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task,
277 u64 timestamp, u64 duration)
279 struct sched_atom *event, *curr_event = last_event(task);
282 * optimize an existing RUN event by merging this one
285 if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
286 sched->nr_run_events_optimized++;
287 curr_event->duration += duration;
291 event = get_new_event(task, timestamp);
293 event->type = SCHED_EVENT_RUN;
294 event->duration = duration;
296 sched->nr_run_events++;
299 static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task,
300 u64 timestamp, struct task_desc *wakee)
302 struct sched_atom *event, *wakee_event;
304 event = get_new_event(task, timestamp);
305 event->type = SCHED_EVENT_WAKEUP;
306 event->wakee = wakee;
308 wakee_event = last_event(wakee);
309 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
310 sched->targetless_wakeups++;
313 if (wakee_event->wait_sem) {
314 sched->multitarget_wakeups++;
318 wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
319 sem_init(wakee_event->wait_sem, 0, 0);
320 wakee_event->specific_wait = 1;
321 event->wait_sem = wakee_event->wait_sem;
323 sched->nr_wakeup_events++;
326 static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
327 u64 timestamp, u64 task_state __maybe_unused)
329 struct sched_atom *event = get_new_event(task, timestamp);
331 event->type = SCHED_EVENT_SLEEP;
333 sched->nr_sleep_events++;
336 static struct task_desc *register_pid(struct perf_sched *sched,
337 unsigned long pid, const char *comm)
339 struct task_desc *task;
342 if (sched->pid_to_task == NULL) {
343 if (sysctl__read_int("kernel/pid_max", &pid_max) < 0)
345 BUG_ON((sched->pid_to_task = calloc(pid_max, sizeof(struct task_desc *))) == NULL);
347 if (pid >= (unsigned long)pid_max) {
348 BUG_ON((sched->pid_to_task = realloc(sched->pid_to_task, (pid + 1) *
349 sizeof(struct task_desc *))) == NULL);
350 while (pid >= (unsigned long)pid_max)
351 sched->pid_to_task[pid_max++] = NULL;
354 task = sched->pid_to_task[pid];
359 task = zalloc(sizeof(*task));
361 task->nr = sched->nr_tasks;
362 strcpy(task->comm, comm);
364 * every task starts in sleeping state - this gets ignored
365 * if there's no wakeup pointing to this sleep state:
367 add_sched_event_sleep(sched, task, 0, 0);
369 sched->pid_to_task[pid] = task;
371 sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_desc *));
372 BUG_ON(!sched->tasks);
373 sched->tasks[task->nr] = task;
376 printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm);
382 static void print_task_traces(struct perf_sched *sched)
384 struct task_desc *task;
387 for (i = 0; i < sched->nr_tasks; i++) {
388 task = sched->tasks[i];
389 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
390 task->nr, task->comm, task->pid, task->nr_events);
394 static void add_cross_task_wakeups(struct perf_sched *sched)
396 struct task_desc *task1, *task2;
399 for (i = 0; i < sched->nr_tasks; i++) {
400 task1 = sched->tasks[i];
402 if (j == sched->nr_tasks)
404 task2 = sched->tasks[j];
405 add_sched_event_wakeup(sched, task1, 0, task2);
409 static void perf_sched__process_event(struct perf_sched *sched,
410 struct sched_atom *atom)
414 switch (atom->type) {
415 case SCHED_EVENT_RUN:
416 burn_nsecs(sched, atom->duration);
418 case SCHED_EVENT_SLEEP:
420 ret = sem_wait(atom->wait_sem);
423 case SCHED_EVENT_WAKEUP:
425 ret = sem_post(atom->wait_sem);
428 case SCHED_EVENT_MIGRATION:
435 static u64 get_cpu_usage_nsec_parent(void)
441 err = getrusage(RUSAGE_SELF, &ru);
444 sum = ru.ru_utime.tv_sec*1e9 + ru.ru_utime.tv_usec*1e3;
445 sum += ru.ru_stime.tv_sec*1e9 + ru.ru_stime.tv_usec*1e3;
450 static int self_open_counters(struct perf_sched *sched, unsigned long cur_task)
452 struct perf_event_attr attr;
453 char sbuf[STRERR_BUFSIZE], info[STRERR_BUFSIZE];
456 bool need_privilege = false;
458 memset(&attr, 0, sizeof(attr));
460 attr.type = PERF_TYPE_SOFTWARE;
461 attr.config = PERF_COUNT_SW_TASK_CLOCK;
464 fd = sys_perf_event_open(&attr, 0, -1, -1,
465 perf_event_open_cloexec_flag());
468 if (errno == EMFILE) {
470 BUG_ON(getrlimit(RLIMIT_NOFILE, &limit) == -1);
471 limit.rlim_cur += sched->nr_tasks - cur_task;
472 if (limit.rlim_cur > limit.rlim_max) {
473 limit.rlim_max = limit.rlim_cur;
474 need_privilege = true;
476 if (setrlimit(RLIMIT_NOFILE, &limit) == -1) {
477 if (need_privilege && errno == EPERM)
478 strcpy(info, "Need privilege\n");
482 strcpy(info, "Have a try with -f option\n");
484 pr_err("Error: sys_perf_event_open() syscall returned "
485 "with %d (%s)\n%s", fd,
486 strerror_r(errno, sbuf, sizeof(sbuf)), info);
492 static u64 get_cpu_usage_nsec_self(int fd)
497 ret = read(fd, &runtime, sizeof(runtime));
498 BUG_ON(ret != sizeof(runtime));
503 struct sched_thread_parms {
504 struct task_desc *task;
505 struct perf_sched *sched;
509 static void *thread_func(void *ctx)
511 struct sched_thread_parms *parms = ctx;
512 struct task_desc *this_task = parms->task;
513 struct perf_sched *sched = parms->sched;
514 u64 cpu_usage_0, cpu_usage_1;
515 unsigned long i, ret;
521 sprintf(comm2, ":%s", this_task->comm);
522 prctl(PR_SET_NAME, comm2);
526 ret = sem_post(&this_task->ready_for_work);
528 ret = pthread_mutex_lock(&sched->start_work_mutex);
530 ret = pthread_mutex_unlock(&sched->start_work_mutex);
533 cpu_usage_0 = get_cpu_usage_nsec_self(fd);
535 for (i = 0; i < this_task->nr_events; i++) {
536 this_task->curr_event = i;
537 perf_sched__process_event(sched, this_task->atoms[i]);
540 cpu_usage_1 = get_cpu_usage_nsec_self(fd);
541 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
542 ret = sem_post(&this_task->work_done_sem);
545 ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
547 ret = pthread_mutex_unlock(&sched->work_done_wait_mutex);
553 static void create_tasks(struct perf_sched *sched)
555 struct task_desc *task;
560 err = pthread_attr_init(&attr);
562 err = pthread_attr_setstacksize(&attr,
563 (size_t) max(16 * 1024, PTHREAD_STACK_MIN));
565 err = pthread_mutex_lock(&sched->start_work_mutex);
567 err = pthread_mutex_lock(&sched->work_done_wait_mutex);
569 for (i = 0; i < sched->nr_tasks; i++) {
570 struct sched_thread_parms *parms = malloc(sizeof(*parms));
571 BUG_ON(parms == NULL);
572 parms->task = task = sched->tasks[i];
573 parms->sched = sched;
574 parms->fd = self_open_counters(sched, i);
575 sem_init(&task->sleep_sem, 0, 0);
576 sem_init(&task->ready_for_work, 0, 0);
577 sem_init(&task->work_done_sem, 0, 0);
578 task->curr_event = 0;
579 err = pthread_create(&task->thread, &attr, thread_func, parms);
584 static void wait_for_tasks(struct perf_sched *sched)
586 u64 cpu_usage_0, cpu_usage_1;
587 struct task_desc *task;
588 unsigned long i, ret;
590 sched->start_time = get_nsecs();
591 sched->cpu_usage = 0;
592 pthread_mutex_unlock(&sched->work_done_wait_mutex);
594 for (i = 0; i < sched->nr_tasks; i++) {
595 task = sched->tasks[i];
596 ret = sem_wait(&task->ready_for_work);
598 sem_init(&task->ready_for_work, 0, 0);
600 ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
603 cpu_usage_0 = get_cpu_usage_nsec_parent();
605 pthread_mutex_unlock(&sched->start_work_mutex);
607 for (i = 0; i < sched->nr_tasks; i++) {
608 task = sched->tasks[i];
609 ret = sem_wait(&task->work_done_sem);
611 sem_init(&task->work_done_sem, 0, 0);
612 sched->cpu_usage += task->cpu_usage;
616 cpu_usage_1 = get_cpu_usage_nsec_parent();
617 if (!sched->runavg_cpu_usage)
618 sched->runavg_cpu_usage = sched->cpu_usage;
619 sched->runavg_cpu_usage = (sched->runavg_cpu_usage * (sched->replay_repeat - 1) + sched->cpu_usage) / sched->replay_repeat;
621 sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
622 if (!sched->runavg_parent_cpu_usage)
623 sched->runavg_parent_cpu_usage = sched->parent_cpu_usage;
624 sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * (sched->replay_repeat - 1) +
625 sched->parent_cpu_usage)/sched->replay_repeat;
627 ret = pthread_mutex_lock(&sched->start_work_mutex);
630 for (i = 0; i < sched->nr_tasks; i++) {
631 task = sched->tasks[i];
632 sem_init(&task->sleep_sem, 0, 0);
633 task->curr_event = 0;
637 static void run_one_test(struct perf_sched *sched)
639 u64 T0, T1, delta, avg_delta, fluct;
642 wait_for_tasks(sched);
646 sched->sum_runtime += delta;
649 avg_delta = sched->sum_runtime / sched->nr_runs;
650 if (delta < avg_delta)
651 fluct = avg_delta - delta;
653 fluct = delta - avg_delta;
654 sched->sum_fluct += fluct;
656 sched->run_avg = delta;
657 sched->run_avg = (sched->run_avg * (sched->replay_repeat - 1) + delta) / sched->replay_repeat;
659 printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / 1000000.0);
661 printf("ravg: %0.2f, ", (double)sched->run_avg / 1e6);
663 printf("cpu: %0.2f / %0.2f",
664 (double)sched->cpu_usage / 1e6, (double)sched->runavg_cpu_usage / 1e6);
668 * rusage statistics done by the parent, these are less
669 * accurate than the sched->sum_exec_runtime based statistics:
671 printf(" [%0.2f / %0.2f]",
672 (double)sched->parent_cpu_usage/1e6,
673 (double)sched->runavg_parent_cpu_usage/1e6);
678 if (sched->nr_sleep_corrections)
679 printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections);
680 sched->nr_sleep_corrections = 0;
683 static void test_calibrations(struct perf_sched *sched)
688 burn_nsecs(sched, 1e6);
691 printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
697 printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
701 replay_wakeup_event(struct perf_sched *sched,
702 struct perf_evsel *evsel, struct perf_sample *sample,
703 struct machine *machine __maybe_unused)
705 const char *comm = perf_evsel__strval(evsel, sample, "comm");
706 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
707 struct task_desc *waker, *wakee;
710 printf("sched_wakeup event %p\n", evsel);
712 printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid);
715 waker = register_pid(sched, sample->tid, "<unknown>");
716 wakee = register_pid(sched, pid, comm);
718 add_sched_event_wakeup(sched, waker, sample->time, wakee);
722 static int replay_switch_event(struct perf_sched *sched,
723 struct perf_evsel *evsel,
724 struct perf_sample *sample,
725 struct machine *machine __maybe_unused)
727 const char *prev_comm = perf_evsel__strval(evsel, sample, "prev_comm"),
728 *next_comm = perf_evsel__strval(evsel, sample, "next_comm");
729 const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
730 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
731 const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
732 struct task_desc *prev, __maybe_unused *next;
733 u64 timestamp0, timestamp = sample->time;
734 int cpu = sample->cpu;
738 printf("sched_switch event %p\n", evsel);
740 if (cpu >= MAX_CPUS || cpu < 0)
743 timestamp0 = sched->cpu_last_switched[cpu];
745 delta = timestamp - timestamp0;
750 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
754 pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
755 prev_comm, prev_pid, next_comm, next_pid, delta);
757 prev = register_pid(sched, prev_pid, prev_comm);
758 next = register_pid(sched, next_pid, next_comm);
760 sched->cpu_last_switched[cpu] = timestamp;
762 add_sched_event_run(sched, prev, timestamp, delta);
763 add_sched_event_sleep(sched, prev, timestamp, prev_state);
768 static int replay_fork_event(struct perf_sched *sched,
769 union perf_event *event,
770 struct machine *machine)
772 struct thread *child, *parent;
774 child = machine__findnew_thread(machine, event->fork.pid,
776 parent = machine__findnew_thread(machine, event->fork.ppid,
779 if (child == NULL || parent == NULL) {
780 pr_debug("thread does not exist on fork event: child %p, parent %p\n",
786 printf("fork event\n");
787 printf("... parent: %s/%d\n", thread__comm_str(parent), parent->tid);
788 printf("... child: %s/%d\n", thread__comm_str(child), child->tid);
791 register_pid(sched, parent->tid, thread__comm_str(parent));
792 register_pid(sched, child->tid, thread__comm_str(child));
799 struct sort_dimension {
802 struct list_head list;
806 thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
808 struct sort_dimension *sort;
811 BUG_ON(list_empty(list));
813 list_for_each_entry(sort, list, list) {
814 ret = sort->cmp(l, r);
822 static struct work_atoms *
823 thread_atoms_search(struct rb_root *root, struct thread *thread,
824 struct list_head *sort_list)
826 struct rb_node *node = root->rb_node;
827 struct work_atoms key = { .thread = thread };
830 struct work_atoms *atoms;
833 atoms = container_of(node, struct work_atoms, node);
835 cmp = thread_lat_cmp(sort_list, &key, atoms);
837 node = node->rb_left;
839 node = node->rb_right;
841 BUG_ON(thread != atoms->thread);
849 __thread_latency_insert(struct rb_root *root, struct work_atoms *data,
850 struct list_head *sort_list)
852 struct rb_node **new = &(root->rb_node), *parent = NULL;
855 struct work_atoms *this;
858 this = container_of(*new, struct work_atoms, node);
861 cmp = thread_lat_cmp(sort_list, data, this);
864 new = &((*new)->rb_left);
866 new = &((*new)->rb_right);
869 rb_link_node(&data->node, parent, new);
870 rb_insert_color(&data->node, root);
873 static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
875 struct work_atoms *atoms = zalloc(sizeof(*atoms));
877 pr_err("No memory at %s\n", __func__);
881 atoms->thread = thread__get(thread);
882 INIT_LIST_HEAD(&atoms->work_list);
883 __thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid);
887 static char sched_out_state(u64 prev_state)
889 const char *str = TASK_STATE_TO_CHAR_STR;
891 return str[prev_state];
895 add_sched_out_event(struct work_atoms *atoms,
899 struct work_atom *atom = zalloc(sizeof(*atom));
901 pr_err("Non memory at %s", __func__);
905 atom->sched_out_time = timestamp;
907 if (run_state == 'R') {
908 atom->state = THREAD_WAIT_CPU;
909 atom->wake_up_time = atom->sched_out_time;
912 list_add_tail(&atom->list, &atoms->work_list);
917 add_runtime_event(struct work_atoms *atoms, u64 delta,
918 u64 timestamp __maybe_unused)
920 struct work_atom *atom;
922 BUG_ON(list_empty(&atoms->work_list));
924 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
926 atom->runtime += delta;
927 atoms->total_runtime += delta;
931 add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
933 struct work_atom *atom;
936 if (list_empty(&atoms->work_list))
939 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
941 if (atom->state != THREAD_WAIT_CPU)
944 if (timestamp < atom->wake_up_time) {
945 atom->state = THREAD_IGNORE;
949 atom->state = THREAD_SCHED_IN;
950 atom->sched_in_time = timestamp;
952 delta = atom->sched_in_time - atom->wake_up_time;
953 atoms->total_lat += delta;
954 if (delta > atoms->max_lat) {
955 atoms->max_lat = delta;
956 atoms->max_lat_at = timestamp;
961 static int latency_switch_event(struct perf_sched *sched,
962 struct perf_evsel *evsel,
963 struct perf_sample *sample,
964 struct machine *machine)
966 const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
967 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
968 const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
969 struct work_atoms *out_events, *in_events;
970 struct thread *sched_out, *sched_in;
971 u64 timestamp0, timestamp = sample->time;
972 int cpu = sample->cpu, err = -1;
975 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
977 timestamp0 = sched->cpu_last_switched[cpu];
978 sched->cpu_last_switched[cpu] = timestamp;
980 delta = timestamp - timestamp0;
985 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
989 sched_out = machine__findnew_thread(machine, -1, prev_pid);
990 sched_in = machine__findnew_thread(machine, -1, next_pid);
991 if (sched_out == NULL || sched_in == NULL)
994 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
996 if (thread_atoms_insert(sched, sched_out))
998 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
1000 pr_err("out-event: Internal tree error");
1004 if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp))
1007 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1009 if (thread_atoms_insert(sched, sched_in))
1011 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1013 pr_err("in-event: Internal tree error");
1017 * Take came in we have not heard about yet,
1018 * add in an initial atom in runnable state:
1020 if (add_sched_out_event(in_events, 'R', timestamp))
1023 add_sched_in_event(in_events, timestamp);
1026 thread__put(sched_out);
1027 thread__put(sched_in);
1031 static int latency_runtime_event(struct perf_sched *sched,
1032 struct perf_evsel *evsel,
1033 struct perf_sample *sample,
1034 struct machine *machine)
1036 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
1037 const u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
1038 struct thread *thread = machine__findnew_thread(machine, -1, pid);
1039 struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1040 u64 timestamp = sample->time;
1041 int cpu = sample->cpu, err = -1;
1046 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1048 if (thread_atoms_insert(sched, thread))
1050 atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1052 pr_err("in-event: Internal tree error");
1055 if (add_sched_out_event(atoms, 'R', timestamp))
1059 add_runtime_event(atoms, runtime, timestamp);
1062 thread__put(thread);
1066 static int latency_wakeup_event(struct perf_sched *sched,
1067 struct perf_evsel *evsel,
1068 struct perf_sample *sample,
1069 struct machine *machine)
1071 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
1072 struct work_atoms *atoms;
1073 struct work_atom *atom;
1074 struct thread *wakee;
1075 u64 timestamp = sample->time;
1078 wakee = machine__findnew_thread(machine, -1, pid);
1081 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1083 if (thread_atoms_insert(sched, wakee))
1085 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1087 pr_err("wakeup-event: Internal tree error");
1090 if (add_sched_out_event(atoms, 'S', timestamp))
1094 BUG_ON(list_empty(&atoms->work_list));
1096 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1099 * As we do not guarantee the wakeup event happens when
1100 * task is out of run queue, also may happen when task is
1101 * on run queue and wakeup only change ->state to TASK_RUNNING,
1102 * then we should not set the ->wake_up_time when wake up a
1103 * task which is on run queue.
1105 * You WILL be missing events if you've recorded only
1106 * one CPU, or are only looking at only one, so don't
1107 * skip in this case.
1109 if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
1112 sched->nr_timestamps++;
1113 if (atom->sched_out_time > timestamp) {
1114 sched->nr_unordered_timestamps++;
1118 atom->state = THREAD_WAIT_CPU;
1119 atom->wake_up_time = timestamp;
1127 static int latency_migrate_task_event(struct perf_sched *sched,
1128 struct perf_evsel *evsel,
1129 struct perf_sample *sample,
1130 struct machine *machine)
1132 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
1133 u64 timestamp = sample->time;
1134 struct work_atoms *atoms;
1135 struct work_atom *atom;
1136 struct thread *migrant;
1140 * Only need to worry about migration when profiling one CPU.
1142 if (sched->profile_cpu == -1)
1145 migrant = machine__findnew_thread(machine, -1, pid);
1146 if (migrant == NULL)
1148 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1150 if (thread_atoms_insert(sched, migrant))
1152 register_pid(sched, migrant->tid, thread__comm_str(migrant));
1153 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1155 pr_err("migration-event: Internal tree error");
1158 if (add_sched_out_event(atoms, 'R', timestamp))
1162 BUG_ON(list_empty(&atoms->work_list));
1164 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1165 atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1167 sched->nr_timestamps++;
1169 if (atom->sched_out_time > timestamp)
1170 sched->nr_unordered_timestamps++;
1173 thread__put(migrant);
1177 static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
1183 if (!work_list->nb_atoms)
1186 * Ignore idle threads:
1188 if (!strcmp(thread__comm_str(work_list->thread), "swapper"))
1191 sched->all_runtime += work_list->total_runtime;
1192 sched->all_count += work_list->nb_atoms;
1194 if (work_list->num_merged > 1)
1195 ret = printf(" %s:(%d) ", thread__comm_str(work_list->thread), work_list->num_merged);
1197 ret = printf(" %s:%d ", thread__comm_str(work_list->thread), work_list->thread->tid);
1199 for (i = 0; i < 24 - ret; i++)
1202 avg = work_list->total_lat / work_list->nb_atoms;
1204 printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %13.6f s\n",
1205 (double)work_list->total_runtime / 1e6,
1206 work_list->nb_atoms, (double)avg / 1e6,
1207 (double)work_list->max_lat / 1e6,
1208 (double)work_list->max_lat_at / 1e9);
1211 static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1213 if (l->thread == r->thread)
1215 if (l->thread->tid < r->thread->tid)
1217 if (l->thread->tid > r->thread->tid)
1219 return (int)(l->thread - r->thread);
1222 static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1232 avgl = l->total_lat / l->nb_atoms;
1233 avgr = r->total_lat / r->nb_atoms;
1243 static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1245 if (l->max_lat < r->max_lat)
1247 if (l->max_lat > r->max_lat)
1253 static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1255 if (l->nb_atoms < r->nb_atoms)
1257 if (l->nb_atoms > r->nb_atoms)
1263 static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1265 if (l->total_runtime < r->total_runtime)
1267 if (l->total_runtime > r->total_runtime)
1273 static int sort_dimension__add(const char *tok, struct list_head *list)
1276 static struct sort_dimension avg_sort_dimension = {
1280 static struct sort_dimension max_sort_dimension = {
1284 static struct sort_dimension pid_sort_dimension = {
1288 static struct sort_dimension runtime_sort_dimension = {
1292 static struct sort_dimension switch_sort_dimension = {
1296 struct sort_dimension *available_sorts[] = {
1297 &pid_sort_dimension,
1298 &avg_sort_dimension,
1299 &max_sort_dimension,
1300 &switch_sort_dimension,
1301 &runtime_sort_dimension,
1304 for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
1305 if (!strcmp(available_sorts[i]->name, tok)) {
1306 list_add_tail(&available_sorts[i]->list, list);
1315 static void perf_sched__sort_lat(struct perf_sched *sched)
1317 struct rb_node *node;
1318 struct rb_root *root = &sched->atom_root;
1321 struct work_atoms *data;
1322 node = rb_first(root);
1326 rb_erase(node, root);
1327 data = rb_entry(node, struct work_atoms, node);
1328 __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
1330 if (root == &sched->atom_root) {
1331 root = &sched->merged_atom_root;
1336 static int process_sched_wakeup_event(struct perf_tool *tool,
1337 struct perf_evsel *evsel,
1338 struct perf_sample *sample,
1339 struct machine *machine)
1341 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1343 if (sched->tp_handler->wakeup_event)
1344 return sched->tp_handler->wakeup_event(sched, evsel, sample, machine);
1349 static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
1350 struct perf_sample *sample, struct machine *machine)
1352 const u32 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
1353 struct thread *sched_in;
1355 u64 timestamp0, timestamp = sample->time;
1357 int i, this_cpu = sample->cpu;
1359 bool new_cpu = false;
1361 BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
1363 if (this_cpu > sched->max_cpu)
1364 sched->max_cpu = this_cpu;
1366 if (sched->map.comp) {
1367 cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS);
1368 if (!test_and_set_bit(this_cpu, sched->map.comp_cpus_mask)) {
1369 sched->map.comp_cpus[cpus_nr++] = this_cpu;
1373 cpus_nr = sched->max_cpu;
1375 timestamp0 = sched->cpu_last_switched[this_cpu];
1376 sched->cpu_last_switched[this_cpu] = timestamp;
1378 delta = timestamp - timestamp0;
1383 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1387 sched_in = machine__findnew_thread(machine, -1, next_pid);
1388 if (sched_in == NULL)
1391 sched->curr_thread[this_cpu] = thread__get(sched_in);
1396 if (!sched_in->shortname[0]) {
1397 if (!strcmp(thread__comm_str(sched_in), "swapper")) {
1399 * Don't allocate a letter-number for swapper:0
1400 * as a shortname. Instead, we use '.' for it.
1402 sched_in->shortname[0] = '.';
1403 sched_in->shortname[1] = ' ';
1405 sched_in->shortname[0] = sched->next_shortname1;
1406 sched_in->shortname[1] = sched->next_shortname2;
1408 if (sched->next_shortname1 < 'Z') {
1409 sched->next_shortname1++;
1411 sched->next_shortname1 = 'A';
1412 if (sched->next_shortname2 < '9')
1413 sched->next_shortname2++;
1415 sched->next_shortname2 = '0';
1421 for (i = 0; i < cpus_nr; i++) {
1422 int cpu = sched->map.comp ? sched->map.comp_cpus[i] : i;
1424 if (cpu != this_cpu)
1429 if (sched->curr_thread[cpu])
1430 printf("%2s ", sched->curr_thread[cpu]->shortname);
1435 printf(" %12.6f secs ", (double)timestamp/1e9);
1436 if (new_shortname) {
1437 printf("%s => %s:%d",
1438 sched_in->shortname, thread__comm_str(sched_in), sched_in->tid);
1441 if (sched->map.comp && new_cpu)
1442 printf(" (CPU %d)", this_cpu);
1446 thread__put(sched_in);
1451 static int process_sched_switch_event(struct perf_tool *tool,
1452 struct perf_evsel *evsel,
1453 struct perf_sample *sample,
1454 struct machine *machine)
1456 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1457 int this_cpu = sample->cpu, err = 0;
1458 u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
1459 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
1461 if (sched->curr_pid[this_cpu] != (u32)-1) {
1463 * Are we trying to switch away a PID that is
1466 if (sched->curr_pid[this_cpu] != prev_pid)
1467 sched->nr_context_switch_bugs++;
1470 if (sched->tp_handler->switch_event)
1471 err = sched->tp_handler->switch_event(sched, evsel, sample, machine);
1473 sched->curr_pid[this_cpu] = next_pid;
1477 static int process_sched_runtime_event(struct perf_tool *tool,
1478 struct perf_evsel *evsel,
1479 struct perf_sample *sample,
1480 struct machine *machine)
1482 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1484 if (sched->tp_handler->runtime_event)
1485 return sched->tp_handler->runtime_event(sched, evsel, sample, machine);
1490 static int perf_sched__process_fork_event(struct perf_tool *tool,
1491 union perf_event *event,
1492 struct perf_sample *sample,
1493 struct machine *machine)
1495 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1497 /* run the fork event through the perf machineruy */
1498 perf_event__process_fork(tool, event, sample, machine);
1500 /* and then run additional processing needed for this command */
1501 if (sched->tp_handler->fork_event)
1502 return sched->tp_handler->fork_event(sched, event, machine);
1507 static int process_sched_migrate_task_event(struct perf_tool *tool,
1508 struct perf_evsel *evsel,
1509 struct perf_sample *sample,
1510 struct machine *machine)
1512 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1514 if (sched->tp_handler->migrate_task_event)
1515 return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine);
1520 typedef int (*tracepoint_handler)(struct perf_tool *tool,
1521 struct perf_evsel *evsel,
1522 struct perf_sample *sample,
1523 struct machine *machine);
1525 static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused,
1526 union perf_event *event __maybe_unused,
1527 struct perf_sample *sample,
1528 struct perf_evsel *evsel,
1529 struct machine *machine)
1533 if (evsel->handler != NULL) {
1534 tracepoint_handler f = evsel->handler;
1535 err = f(tool, evsel, sample, machine);
1541 static int perf_sched__read_events(struct perf_sched *sched)
1543 const struct perf_evsel_str_handler handlers[] = {
1544 { "sched:sched_switch", process_sched_switch_event, },
1545 { "sched:sched_stat_runtime", process_sched_runtime_event, },
1546 { "sched:sched_wakeup", process_sched_wakeup_event, },
1547 { "sched:sched_wakeup_new", process_sched_wakeup_event, },
1548 { "sched:sched_migrate_task", process_sched_migrate_task_event, },
1550 struct perf_session *session;
1551 struct perf_data_file file = {
1553 .mode = PERF_DATA_MODE_READ,
1554 .force = sched->force,
1558 session = perf_session__new(&file, false, &sched->tool);
1559 if (session == NULL) {
1560 pr_debug("No Memory for session\n");
1564 symbol__init(&session->header.env);
1566 if (perf_session__set_tracepoints_handlers(session, handlers))
1569 if (perf_session__has_traces(session, "record -R")) {
1570 int err = perf_session__process_events(session);
1572 pr_err("Failed to process events, error %d", err);
1576 sched->nr_events = session->evlist->stats.nr_events[0];
1577 sched->nr_lost_events = session->evlist->stats.total_lost;
1578 sched->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
1583 perf_session__delete(session);
1587 static void print_bad_events(struct perf_sched *sched)
1589 if (sched->nr_unordered_timestamps && sched->nr_timestamps) {
1590 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
1591 (double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0,
1592 sched->nr_unordered_timestamps, sched->nr_timestamps);
1594 if (sched->nr_lost_events && sched->nr_events) {
1595 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1596 (double)sched->nr_lost_events/(double)sched->nr_events * 100.0,
1597 sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks);
1599 if (sched->nr_context_switch_bugs && sched->nr_timestamps) {
1600 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
1601 (double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0,
1602 sched->nr_context_switch_bugs, sched->nr_timestamps);
1603 if (sched->nr_lost_events)
1604 printf(" (due to lost events?)");
1609 static void __merge_work_atoms(struct rb_root *root, struct work_atoms *data)
1611 struct rb_node **new = &(root->rb_node), *parent = NULL;
1612 struct work_atoms *this;
1613 const char *comm = thread__comm_str(data->thread), *this_comm;
1618 this = container_of(*new, struct work_atoms, node);
1621 this_comm = thread__comm_str(this->thread);
1622 cmp = strcmp(comm, this_comm);
1624 new = &((*new)->rb_left);
1625 } else if (cmp < 0) {
1626 new = &((*new)->rb_right);
1629 this->total_runtime += data->total_runtime;
1630 this->nb_atoms += data->nb_atoms;
1631 this->total_lat += data->total_lat;
1632 list_splice(&data->work_list, &this->work_list);
1633 if (this->max_lat < data->max_lat) {
1634 this->max_lat = data->max_lat;
1635 this->max_lat_at = data->max_lat_at;
1643 rb_link_node(&data->node, parent, new);
1644 rb_insert_color(&data->node, root);
1647 static void perf_sched__merge_lat(struct perf_sched *sched)
1649 struct work_atoms *data;
1650 struct rb_node *node;
1652 if (sched->skip_merge)
1655 while ((node = rb_first(&sched->atom_root))) {
1656 rb_erase(node, &sched->atom_root);
1657 data = rb_entry(node, struct work_atoms, node);
1658 __merge_work_atoms(&sched->merged_atom_root, data);
1662 static int perf_sched__lat(struct perf_sched *sched)
1664 struct rb_node *next;
1668 if (perf_sched__read_events(sched))
1671 perf_sched__merge_lat(sched);
1672 perf_sched__sort_lat(sched);
1674 printf("\n -----------------------------------------------------------------------------------------------------------------\n");
1675 printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n");
1676 printf(" -----------------------------------------------------------------------------------------------------------------\n");
1678 next = rb_first(&sched->sorted_atom_root);
1681 struct work_atoms *work_list;
1683 work_list = rb_entry(next, struct work_atoms, node);
1684 output_lat_thread(sched, work_list);
1685 next = rb_next(next);
1686 thread__zput(work_list->thread);
1689 printf(" -----------------------------------------------------------------------------------------------------------------\n");
1690 printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n",
1691 (double)sched->all_runtime / 1e6, sched->all_count);
1693 printf(" ---------------------------------------------------\n");
1695 print_bad_events(sched);
1701 static int setup_map_cpus(struct perf_sched *sched)
1703 sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF);
1705 if (sched->map.comp) {
1706 sched->map.comp_cpus = zalloc(sched->max_cpu * sizeof(int));
1707 return sched->map.comp_cpus ? 0 : -1;
1713 static int perf_sched__map(struct perf_sched *sched)
1715 if (setup_map_cpus(sched))
1719 if (perf_sched__read_events(sched))
1721 print_bad_events(sched);
1725 static int perf_sched__replay(struct perf_sched *sched)
1729 calibrate_run_measurement_overhead(sched);
1730 calibrate_sleep_measurement_overhead(sched);
1732 test_calibrations(sched);
1734 if (perf_sched__read_events(sched))
1737 printf("nr_run_events: %ld\n", sched->nr_run_events);
1738 printf("nr_sleep_events: %ld\n", sched->nr_sleep_events);
1739 printf("nr_wakeup_events: %ld\n", sched->nr_wakeup_events);
1741 if (sched->targetless_wakeups)
1742 printf("target-less wakeups: %ld\n", sched->targetless_wakeups);
1743 if (sched->multitarget_wakeups)
1744 printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups);
1745 if (sched->nr_run_events_optimized)
1746 printf("run atoms optimized: %ld\n",
1747 sched->nr_run_events_optimized);
1749 print_task_traces(sched);
1750 add_cross_task_wakeups(sched);
1752 create_tasks(sched);
1753 printf("------------------------------------------------------------\n");
1754 for (i = 0; i < sched->replay_repeat; i++)
1755 run_one_test(sched);
1760 static void setup_sorting(struct perf_sched *sched, const struct option *options,
1761 const char * const usage_msg[])
1763 char *tmp, *tok, *str = strdup(sched->sort_order);
1765 for (tok = strtok_r(str, ", ", &tmp);
1766 tok; tok = strtok_r(NULL, ", ", &tmp)) {
1767 if (sort_dimension__add(tok, &sched->sort_list) < 0) {
1768 usage_with_options_msg(usage_msg, options,
1769 "Unknown --sort key: `%s'", tok);
1775 sort_dimension__add("pid", &sched->cmp_pid);
1778 static int __cmd_record(int argc, const char **argv)
1780 unsigned int rec_argc, i, j;
1781 const char **rec_argv;
1782 const char * const record_args[] = {
1788 "-e", "sched:sched_switch",
1789 "-e", "sched:sched_stat_wait",
1790 "-e", "sched:sched_stat_sleep",
1791 "-e", "sched:sched_stat_iowait",
1792 "-e", "sched:sched_stat_runtime",
1793 "-e", "sched:sched_process_fork",
1794 "-e", "sched:sched_wakeup",
1795 "-e", "sched:sched_wakeup_new",
1796 "-e", "sched:sched_migrate_task",
1799 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1800 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1802 if (rec_argv == NULL)
1805 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1806 rec_argv[i] = strdup(record_args[i]);
1808 for (j = 1; j < (unsigned int)argc; j++, i++)
1809 rec_argv[i] = argv[j];
1811 BUG_ON(i != rec_argc);
1813 return cmd_record(i, rec_argv, NULL);
1816 int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
1818 const char default_sort_order[] = "avg, max, switch, runtime";
1819 struct perf_sched sched = {
1821 .sample = perf_sched__process_tracepoint_sample,
1822 .comm = perf_event__process_comm,
1823 .lost = perf_event__process_lost,
1824 .fork = perf_sched__process_fork_event,
1825 .ordered_events = true,
1827 .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid),
1828 .sort_list = LIST_HEAD_INIT(sched.sort_list),
1829 .start_work_mutex = PTHREAD_MUTEX_INITIALIZER,
1830 .work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER,
1831 .sort_order = default_sort_order,
1832 .replay_repeat = 10,
1834 .next_shortname1 = 'A',
1835 .next_shortname2 = '0',
1838 const struct option latency_options[] = {
1839 OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
1840 "sort by key(s): runtime, switch, avg, max"),
1841 OPT_INCR('v', "verbose", &verbose,
1842 "be more verbose (show symbol address, etc)"),
1843 OPT_INTEGER('C', "CPU", &sched.profile_cpu,
1844 "CPU to profile on"),
1845 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1846 "dump raw trace in ASCII"),
1847 OPT_BOOLEAN('p', "pids", &sched.skip_merge,
1848 "latency stats per pid instead of per comm"),
1851 const struct option replay_options[] = {
1852 OPT_UINTEGER('r', "repeat", &sched.replay_repeat,
1853 "repeat the workload replay N times (-1: infinite)"),
1854 OPT_INCR('v', "verbose", &verbose,
1855 "be more verbose (show symbol address, etc)"),
1856 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1857 "dump raw trace in ASCII"),
1858 OPT_BOOLEAN('f', "force", &sched.force, "don't complain, do it"),
1861 const struct option sched_options[] = {
1862 OPT_STRING('i', "input", &input_name, "file",
1864 OPT_INCR('v', "verbose", &verbose,
1865 "be more verbose (show symbol address, etc)"),
1866 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1867 "dump raw trace in ASCII"),
1870 const struct option map_options[] = {
1871 OPT_BOOLEAN(0, "compact", &sched.map.comp,
1872 "map output in compact mode"),
1875 const char * const latency_usage[] = {
1876 "perf sched latency [<options>]",
1879 const char * const replay_usage[] = {
1880 "perf sched replay [<options>]",
1883 const char * const map_usage[] = {
1884 "perf sched map [<options>]",
1887 const char *const sched_subcommands[] = { "record", "latency", "map",
1888 "replay", "script", NULL };
1889 const char *sched_usage[] = {
1893 struct trace_sched_handler lat_ops = {
1894 .wakeup_event = latency_wakeup_event,
1895 .switch_event = latency_switch_event,
1896 .runtime_event = latency_runtime_event,
1897 .migrate_task_event = latency_migrate_task_event,
1899 struct trace_sched_handler map_ops = {
1900 .switch_event = map_switch_event,
1902 struct trace_sched_handler replay_ops = {
1903 .wakeup_event = replay_wakeup_event,
1904 .switch_event = replay_switch_event,
1905 .fork_event = replay_fork_event,
1909 for (i = 0; i < ARRAY_SIZE(sched.curr_pid); i++)
1910 sched.curr_pid[i] = -1;
1912 argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands,
1913 sched_usage, PARSE_OPT_STOP_AT_NON_OPTION);
1915 usage_with_options(sched_usage, sched_options);
1918 * Aliased to 'perf script' for now:
1920 if (!strcmp(argv[0], "script"))
1921 return cmd_script(argc, argv, prefix);
1923 if (!strncmp(argv[0], "rec", 3)) {
1924 return __cmd_record(argc, argv);
1925 } else if (!strncmp(argv[0], "lat", 3)) {
1926 sched.tp_handler = &lat_ops;
1928 argc = parse_options(argc, argv, latency_options, latency_usage, 0);
1930 usage_with_options(latency_usage, latency_options);
1932 setup_sorting(&sched, latency_options, latency_usage);
1933 return perf_sched__lat(&sched);
1934 } else if (!strcmp(argv[0], "map")) {
1936 argc = parse_options(argc, argv, map_options, replay_usage, 0);
1938 usage_with_options(map_usage, map_options);
1940 sched.tp_handler = &map_ops;
1941 setup_sorting(&sched, latency_options, latency_usage);
1942 return perf_sched__map(&sched);
1943 } else if (!strncmp(argv[0], "rep", 3)) {
1944 sched.tp_handler = &replay_ops;
1946 argc = parse_options(argc, argv, replay_options, replay_usage, 0);
1948 usage_with_options(replay_usage, replay_options);
1950 return perf_sched__replay(&sched);
1952 usage_with_options(sched_usage, sched_options);