Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2#include "builtin.h"
3#include "perf-sys.h"
4
5#include "util/cpumap.h"
6#include "util/evlist.h"
7#include "util/evsel.h"
8#include "util/evsel_fprintf.h"
9#include "util/mutex.h"
10#include "util/symbol.h"
11#include "util/thread.h"
12#include "util/header.h"
13#include "util/session.h"
14#include "util/tool.h"
15#include "util/cloexec.h"
16#include "util/thread_map.h"
17#include "util/color.h"
18#include "util/stat.h"
19#include "util/string2.h"
20#include "util/callchain.h"
21#include "util/time-utils.h"
22
23#include <subcmd/pager.h>
24#include <subcmd/parse-options.h>
25#include "util/trace-event.h"
26
27#include "util/debug.h"
28#include "util/event.h"
29#include "util/util.h"
30
31#include <linux/kernel.h>
32#include <linux/log2.h>
33#include <linux/zalloc.h>
34#include <sys/prctl.h>
35#include <sys/resource.h>
36#include <inttypes.h>
37
38#include <errno.h>
39#include <semaphore.h>
40#include <pthread.h>
41#include <math.h>
42#include <api/fs/fs.h>
43#include <perf/cpumap.h>
44#include <linux/time64.h>
45#include <linux/err.h>
46
47#include <linux/ctype.h>
48
49#define PR_SET_NAME 15 /* Set process name */
50#define MAX_CPUS 4096
51#define COMM_LEN 20
52#define SYM_LEN 129
53#define MAX_PID 1024000
54
55static const char *cpu_list;
56static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
57
58struct sched_atom;
59
60struct task_desc {
61 unsigned long nr;
62 unsigned long pid;
63 char comm[COMM_LEN];
64
65 unsigned long nr_events;
66 unsigned long curr_event;
67 struct sched_atom **atoms;
68
69 pthread_t thread;
70 sem_t sleep_sem;
71
72 sem_t ready_for_work;
73 sem_t work_done_sem;
74
75 u64 cpu_usage;
76};
77
78enum sched_event_type {
79 SCHED_EVENT_RUN,
80 SCHED_EVENT_SLEEP,
81 SCHED_EVENT_WAKEUP,
82 SCHED_EVENT_MIGRATION,
83};
84
85struct sched_atom {
86 enum sched_event_type type;
87 int specific_wait;
88 u64 timestamp;
89 u64 duration;
90 unsigned long nr;
91 sem_t *wait_sem;
92 struct task_desc *wakee;
93};
94
95enum thread_state {
96 THREAD_SLEEPING = 0,
97 THREAD_WAIT_CPU,
98 THREAD_SCHED_IN,
99 THREAD_IGNORE
100};
101
102struct work_atom {
103 struct list_head list;
104 enum thread_state state;
105 u64 sched_out_time;
106 u64 wake_up_time;
107 u64 sched_in_time;
108 u64 runtime;
109};
110
111struct work_atoms {
112 struct list_head work_list;
113 struct thread *thread;
114 struct rb_node node;
115 u64 max_lat;
116 u64 max_lat_start;
117 u64 max_lat_end;
118 u64 total_lat;
119 u64 nb_atoms;
120 u64 total_runtime;
121 int num_merged;
122};
123
124typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
125
126struct perf_sched;
127
128struct trace_sched_handler {
129 int (*switch_event)(struct perf_sched *sched, struct evsel *evsel,
130 struct perf_sample *sample, struct machine *machine);
131
132 int (*runtime_event)(struct perf_sched *sched, struct evsel *evsel,
133 struct perf_sample *sample, struct machine *machine);
134
135 int (*wakeup_event)(struct perf_sched *sched, struct evsel *evsel,
136 struct perf_sample *sample, struct machine *machine);
137
138 /* PERF_RECORD_FORK event, not sched_process_fork tracepoint */
139 int (*fork_event)(struct perf_sched *sched, union perf_event *event,
140 struct machine *machine);
141
142 int (*migrate_task_event)(struct perf_sched *sched,
143 struct evsel *evsel,
144 struct perf_sample *sample,
145 struct machine *machine);
146};
147
148#define COLOR_PIDS PERF_COLOR_BLUE
149#define COLOR_CPUS PERF_COLOR_BG_RED
150
151struct perf_sched_map {
152 DECLARE_BITMAP(comp_cpus_mask, MAX_CPUS);
153 struct perf_cpu *comp_cpus;
154 bool comp;
155 struct perf_thread_map *color_pids;
156 const char *color_pids_str;
157 struct perf_cpu_map *color_cpus;
158 const char *color_cpus_str;
159 const char *task_name;
160 struct strlist *task_names;
161 bool fuzzy;
162 struct perf_cpu_map *cpus;
163 const char *cpus_str;
164};
165
166struct perf_sched {
167 struct perf_tool tool;
168 const char *sort_order;
169 unsigned long nr_tasks;
170 struct task_desc **pid_to_task;
171 struct task_desc **tasks;
172 const struct trace_sched_handler *tp_handler;
173 struct mutex start_work_mutex;
174 struct mutex work_done_wait_mutex;
175 int profile_cpu;
176/*
177 * Track the current task - that way we can know whether there's any
178 * weird events, such as a task being switched away that is not current.
179 */
180 struct perf_cpu max_cpu;
181 u32 *curr_pid;
182 struct thread **curr_thread;
183 struct thread **curr_out_thread;
184 char next_shortname1;
185 char next_shortname2;
186 unsigned int replay_repeat;
187 unsigned long nr_run_events;
188 unsigned long nr_sleep_events;
189 unsigned long nr_wakeup_events;
190 unsigned long nr_sleep_corrections;
191 unsigned long nr_run_events_optimized;
192 unsigned long targetless_wakeups;
193 unsigned long multitarget_wakeups;
194 unsigned long nr_runs;
195 unsigned long nr_timestamps;
196 unsigned long nr_unordered_timestamps;
197 unsigned long nr_context_switch_bugs;
198 unsigned long nr_events;
199 unsigned long nr_lost_chunks;
200 unsigned long nr_lost_events;
201 u64 run_measurement_overhead;
202 u64 sleep_measurement_overhead;
203 u64 start_time;
204 u64 cpu_usage;
205 u64 runavg_cpu_usage;
206 u64 parent_cpu_usage;
207 u64 runavg_parent_cpu_usage;
208 u64 sum_runtime;
209 u64 sum_fluct;
210 u64 run_avg;
211 u64 all_runtime;
212 u64 all_count;
213 u64 *cpu_last_switched;
214 struct rb_root_cached atom_root, sorted_atom_root, merged_atom_root;
215 struct list_head sort_list, cmp_pid;
216 bool force;
217 bool skip_merge;
218 struct perf_sched_map map;
219
220 /* options for timehist command */
221 bool summary;
222 bool summary_only;
223 bool idle_hist;
224 bool show_callchain;
225 unsigned int max_stack;
226 bool show_cpu_visual;
227 bool show_wakeups;
228 bool show_next;
229 bool show_migrations;
230 bool show_state;
231 u64 skipped_samples;
232 const char *time_str;
233 struct perf_time_interval ptime;
234 struct perf_time_interval hist_time;
235 volatile bool thread_funcs_exit;
236};
237
238/* per thread run time data */
239struct thread_runtime {
240 u64 last_time; /* time of previous sched in/out event */
241 u64 dt_run; /* run time */
242 u64 dt_sleep; /* time between CPU access by sleep (off cpu) */
243 u64 dt_iowait; /* time between CPU access by iowait (off cpu) */
244 u64 dt_preempt; /* time between CPU access by preempt (off cpu) */
245 u64 dt_delay; /* time between wakeup and sched-in */
246 u64 ready_to_run; /* time of wakeup */
247
248 struct stats run_stats;
249 u64 total_run_time;
250 u64 total_sleep_time;
251 u64 total_iowait_time;
252 u64 total_preempt_time;
253 u64 total_delay_time;
254
255 char last_state;
256
257 char shortname[3];
258 bool comm_changed;
259
260 u64 migrations;
261};
262
263/* per event run time data */
264struct evsel_runtime {
265 u64 *last_time; /* time this event was last seen per cpu */
266 u32 ncpu; /* highest cpu slot allocated */
267};
268
269/* per cpu idle time data */
270struct idle_thread_runtime {
271 struct thread_runtime tr;
272 struct thread *last_thread;
273 struct rb_root_cached sorted_root;
274 struct callchain_root callchain;
275 struct callchain_cursor cursor;
276};
277
278/* track idle times per cpu */
279static struct thread **idle_threads;
280static int idle_max_cpu;
281static char idle_comm[] = "<idle>";
282
283static u64 get_nsecs(void)
284{
285 struct timespec ts;
286
287 clock_gettime(CLOCK_MONOTONIC, &ts);
288
289 return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
290}
291
292static void burn_nsecs(struct perf_sched *sched, u64 nsecs)
293{
294 u64 T0 = get_nsecs(), T1;
295
296 do {
297 T1 = get_nsecs();
298 } while (T1 + sched->run_measurement_overhead < T0 + nsecs);
299}
300
301static void sleep_nsecs(u64 nsecs)
302{
303 struct timespec ts;
304
305 ts.tv_nsec = nsecs % 999999999;
306 ts.tv_sec = nsecs / 999999999;
307
308 nanosleep(&ts, NULL);
309}
310
311static void calibrate_run_measurement_overhead(struct perf_sched *sched)
312{
313 u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
314 int i;
315
316 for (i = 0; i < 10; i++) {
317 T0 = get_nsecs();
318 burn_nsecs(sched, 0);
319 T1 = get_nsecs();
320 delta = T1-T0;
321 min_delta = min(min_delta, delta);
322 }
323 sched->run_measurement_overhead = min_delta;
324
325 printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
326}
327
328static void calibrate_sleep_measurement_overhead(struct perf_sched *sched)
329{
330 u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
331 int i;
332
333 for (i = 0; i < 10; i++) {
334 T0 = get_nsecs();
335 sleep_nsecs(10000);
336 T1 = get_nsecs();
337 delta = T1-T0;
338 min_delta = min(min_delta, delta);
339 }
340 min_delta -= 10000;
341 sched->sleep_measurement_overhead = min_delta;
342
343 printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
344}
345
346static struct sched_atom *
347get_new_event(struct task_desc *task, u64 timestamp)
348{
349 struct sched_atom *event = zalloc(sizeof(*event));
350 unsigned long idx = task->nr_events;
351 size_t size;
352
353 event->timestamp = timestamp;
354 event->nr = idx;
355
356 task->nr_events++;
357 size = sizeof(struct sched_atom *) * task->nr_events;
358 task->atoms = realloc(task->atoms, size);
359 BUG_ON(!task->atoms);
360
361 task->atoms[idx] = event;
362
363 return event;
364}
365
366static struct sched_atom *last_event(struct task_desc *task)
367{
368 if (!task->nr_events)
369 return NULL;
370
371 return task->atoms[task->nr_events - 1];
372}
373
374static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task,
375 u64 timestamp, u64 duration)
376{
377 struct sched_atom *event, *curr_event = last_event(task);
378
379 /*
380 * optimize an existing RUN event by merging this one
381 * to it:
382 */
383 if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
384 sched->nr_run_events_optimized++;
385 curr_event->duration += duration;
386 return;
387 }
388
389 event = get_new_event(task, timestamp);
390
391 event->type = SCHED_EVENT_RUN;
392 event->duration = duration;
393
394 sched->nr_run_events++;
395}
396
397static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task,
398 u64 timestamp, struct task_desc *wakee)
399{
400 struct sched_atom *event, *wakee_event;
401
402 event = get_new_event(task, timestamp);
403 event->type = SCHED_EVENT_WAKEUP;
404 event->wakee = wakee;
405
406 wakee_event = last_event(wakee);
407 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
408 sched->targetless_wakeups++;
409 return;
410 }
411 if (wakee_event->wait_sem) {
412 sched->multitarget_wakeups++;
413 return;
414 }
415
416 wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
417 sem_init(wakee_event->wait_sem, 0, 0);
418 wakee_event->specific_wait = 1;
419 event->wait_sem = wakee_event->wait_sem;
420
421 sched->nr_wakeup_events++;
422}
423
424static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
425 u64 timestamp, const char task_state __maybe_unused)
426{
427 struct sched_atom *event = get_new_event(task, timestamp);
428
429 event->type = SCHED_EVENT_SLEEP;
430
431 sched->nr_sleep_events++;
432}
433
434static struct task_desc *register_pid(struct perf_sched *sched,
435 unsigned long pid, const char *comm)
436{
437 struct task_desc *task;
438 static int pid_max;
439
440 if (sched->pid_to_task == NULL) {
441 if (sysctl__read_int("kernel/pid_max", &pid_max) < 0)
442 pid_max = MAX_PID;
443 BUG_ON((sched->pid_to_task = calloc(pid_max, sizeof(struct task_desc *))) == NULL);
444 }
445 if (pid >= (unsigned long)pid_max) {
446 BUG_ON((sched->pid_to_task = realloc(sched->pid_to_task, (pid + 1) *
447 sizeof(struct task_desc *))) == NULL);
448 while (pid >= (unsigned long)pid_max)
449 sched->pid_to_task[pid_max++] = NULL;
450 }
451
452 task = sched->pid_to_task[pid];
453
454 if (task)
455 return task;
456
457 task = zalloc(sizeof(*task));
458 task->pid = pid;
459 task->nr = sched->nr_tasks;
460 strcpy(task->comm, comm);
461 /*
462 * every task starts in sleeping state - this gets ignored
463 * if there's no wakeup pointing to this sleep state:
464 */
465 add_sched_event_sleep(sched, task, 0, 0);
466
467 sched->pid_to_task[pid] = task;
468 sched->nr_tasks++;
469 sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_desc *));
470 BUG_ON(!sched->tasks);
471 sched->tasks[task->nr] = task;
472
473 if (verbose > 0)
474 printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm);
475
476 return task;
477}
478
479
480static void print_task_traces(struct perf_sched *sched)
481{
482 struct task_desc *task;
483 unsigned long i;
484
485 for (i = 0; i < sched->nr_tasks; i++) {
486 task = sched->tasks[i];
487 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
488 task->nr, task->comm, task->pid, task->nr_events);
489 }
490}
491
492static void add_cross_task_wakeups(struct perf_sched *sched)
493{
494 struct task_desc *task1, *task2;
495 unsigned long i, j;
496
497 for (i = 0; i < sched->nr_tasks; i++) {
498 task1 = sched->tasks[i];
499 j = i + 1;
500 if (j == sched->nr_tasks)
501 j = 0;
502 task2 = sched->tasks[j];
503 add_sched_event_wakeup(sched, task1, 0, task2);
504 }
505}
506
507static void perf_sched__process_event(struct perf_sched *sched,
508 struct sched_atom *atom)
509{
510 int ret = 0;
511
512 switch (atom->type) {
513 case SCHED_EVENT_RUN:
514 burn_nsecs(sched, atom->duration);
515 break;
516 case SCHED_EVENT_SLEEP:
517 if (atom->wait_sem)
518 ret = sem_wait(atom->wait_sem);
519 BUG_ON(ret);
520 break;
521 case SCHED_EVENT_WAKEUP:
522 if (atom->wait_sem)
523 ret = sem_post(atom->wait_sem);
524 BUG_ON(ret);
525 break;
526 case SCHED_EVENT_MIGRATION:
527 break;
528 default:
529 BUG_ON(1);
530 }
531}
532
533static u64 get_cpu_usage_nsec_parent(void)
534{
535 struct rusage ru;
536 u64 sum;
537 int err;
538
539 err = getrusage(RUSAGE_SELF, &ru);
540 BUG_ON(err);
541
542 sum = ru.ru_utime.tv_sec * NSEC_PER_SEC + ru.ru_utime.tv_usec * NSEC_PER_USEC;
543 sum += ru.ru_stime.tv_sec * NSEC_PER_SEC + ru.ru_stime.tv_usec * NSEC_PER_USEC;
544
545 return sum;
546}
547
548static int self_open_counters(struct perf_sched *sched, unsigned long cur_task)
549{
550 struct perf_event_attr attr;
551 char sbuf[STRERR_BUFSIZE], info[STRERR_BUFSIZE];
552 int fd;
553 struct rlimit limit;
554 bool need_privilege = false;
555
556 memset(&attr, 0, sizeof(attr));
557
558 attr.type = PERF_TYPE_SOFTWARE;
559 attr.config = PERF_COUNT_SW_TASK_CLOCK;
560
561force_again:
562 fd = sys_perf_event_open(&attr, 0, -1, -1,
563 perf_event_open_cloexec_flag());
564
565 if (fd < 0) {
566 if (errno == EMFILE) {
567 if (sched->force) {
568 BUG_ON(getrlimit(RLIMIT_NOFILE, &limit) == -1);
569 limit.rlim_cur += sched->nr_tasks - cur_task;
570 if (limit.rlim_cur > limit.rlim_max) {
571 limit.rlim_max = limit.rlim_cur;
572 need_privilege = true;
573 }
574 if (setrlimit(RLIMIT_NOFILE, &limit) == -1) {
575 if (need_privilege && errno == EPERM)
576 strcpy(info, "Need privilege\n");
577 } else
578 goto force_again;
579 } else
580 strcpy(info, "Have a try with -f option\n");
581 }
582 pr_err("Error: sys_perf_event_open() syscall returned "
583 "with %d (%s)\n%s", fd,
584 str_error_r(errno, sbuf, sizeof(sbuf)), info);
585 exit(EXIT_FAILURE);
586 }
587 return fd;
588}
589
590static u64 get_cpu_usage_nsec_self(int fd)
591{
592 u64 runtime;
593 int ret;
594
595 ret = read(fd, &runtime, sizeof(runtime));
596 BUG_ON(ret != sizeof(runtime));
597
598 return runtime;
599}
600
601struct sched_thread_parms {
602 struct task_desc *task;
603 struct perf_sched *sched;
604 int fd;
605};
606
607static void *thread_func(void *ctx)
608{
609 struct sched_thread_parms *parms = ctx;
610 struct task_desc *this_task = parms->task;
611 struct perf_sched *sched = parms->sched;
612 u64 cpu_usage_0, cpu_usage_1;
613 unsigned long i, ret;
614 char comm2[22];
615 int fd = parms->fd;
616
617 zfree(&parms);
618
619 sprintf(comm2, ":%s", this_task->comm);
620 prctl(PR_SET_NAME, comm2);
621 if (fd < 0)
622 return NULL;
623
624 while (!sched->thread_funcs_exit) {
625 ret = sem_post(&this_task->ready_for_work);
626 BUG_ON(ret);
627 mutex_lock(&sched->start_work_mutex);
628 mutex_unlock(&sched->start_work_mutex);
629
630 cpu_usage_0 = get_cpu_usage_nsec_self(fd);
631
632 for (i = 0; i < this_task->nr_events; i++) {
633 this_task->curr_event = i;
634 perf_sched__process_event(sched, this_task->atoms[i]);
635 }
636
637 cpu_usage_1 = get_cpu_usage_nsec_self(fd);
638 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
639 ret = sem_post(&this_task->work_done_sem);
640 BUG_ON(ret);
641
642 mutex_lock(&sched->work_done_wait_mutex);
643 mutex_unlock(&sched->work_done_wait_mutex);
644 }
645 return NULL;
646}
647
648static void create_tasks(struct perf_sched *sched)
649 EXCLUSIVE_LOCK_FUNCTION(sched->start_work_mutex)
650 EXCLUSIVE_LOCK_FUNCTION(sched->work_done_wait_mutex)
651{
652 struct task_desc *task;
653 pthread_attr_t attr;
654 unsigned long i;
655 int err;
656
657 err = pthread_attr_init(&attr);
658 BUG_ON(err);
659 err = pthread_attr_setstacksize(&attr,
660 (size_t) max(16 * 1024, (int)PTHREAD_STACK_MIN));
661 BUG_ON(err);
662 mutex_lock(&sched->start_work_mutex);
663 mutex_lock(&sched->work_done_wait_mutex);
664 for (i = 0; i < sched->nr_tasks; i++) {
665 struct sched_thread_parms *parms = malloc(sizeof(*parms));
666 BUG_ON(parms == NULL);
667 parms->task = task = sched->tasks[i];
668 parms->sched = sched;
669 parms->fd = self_open_counters(sched, i);
670 sem_init(&task->sleep_sem, 0, 0);
671 sem_init(&task->ready_for_work, 0, 0);
672 sem_init(&task->work_done_sem, 0, 0);
673 task->curr_event = 0;
674 err = pthread_create(&task->thread, &attr, thread_func, parms);
675 BUG_ON(err);
676 }
677}
678
679static void destroy_tasks(struct perf_sched *sched)
680 UNLOCK_FUNCTION(sched->start_work_mutex)
681 UNLOCK_FUNCTION(sched->work_done_wait_mutex)
682{
683 struct task_desc *task;
684 unsigned long i;
685 int err;
686
687 mutex_unlock(&sched->start_work_mutex);
688 mutex_unlock(&sched->work_done_wait_mutex);
689 /* Get rid of threads so they won't be upset by mutex destrunction */
690 for (i = 0; i < sched->nr_tasks; i++) {
691 task = sched->tasks[i];
692 err = pthread_join(task->thread, NULL);
693 BUG_ON(err);
694 sem_destroy(&task->sleep_sem);
695 sem_destroy(&task->ready_for_work);
696 sem_destroy(&task->work_done_sem);
697 }
698}
699
700static void wait_for_tasks(struct perf_sched *sched)
701 EXCLUSIVE_LOCKS_REQUIRED(sched->work_done_wait_mutex)
702 EXCLUSIVE_LOCKS_REQUIRED(sched->start_work_mutex)
703{
704 u64 cpu_usage_0, cpu_usage_1;
705 struct task_desc *task;
706 unsigned long i, ret;
707
708 sched->start_time = get_nsecs();
709 sched->cpu_usage = 0;
710 mutex_unlock(&sched->work_done_wait_mutex);
711
712 for (i = 0; i < sched->nr_tasks; i++) {
713 task = sched->tasks[i];
714 ret = sem_wait(&task->ready_for_work);
715 BUG_ON(ret);
716 sem_init(&task->ready_for_work, 0, 0);
717 }
718 mutex_lock(&sched->work_done_wait_mutex);
719
720 cpu_usage_0 = get_cpu_usage_nsec_parent();
721
722 mutex_unlock(&sched->start_work_mutex);
723
724 for (i = 0; i < sched->nr_tasks; i++) {
725 task = sched->tasks[i];
726 ret = sem_wait(&task->work_done_sem);
727 BUG_ON(ret);
728 sem_init(&task->work_done_sem, 0, 0);
729 sched->cpu_usage += task->cpu_usage;
730 task->cpu_usage = 0;
731 }
732
733 cpu_usage_1 = get_cpu_usage_nsec_parent();
734 if (!sched->runavg_cpu_usage)
735 sched->runavg_cpu_usage = sched->cpu_usage;
736 sched->runavg_cpu_usage = (sched->runavg_cpu_usage * (sched->replay_repeat - 1) + sched->cpu_usage) / sched->replay_repeat;
737
738 sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
739 if (!sched->runavg_parent_cpu_usage)
740 sched->runavg_parent_cpu_usage = sched->parent_cpu_usage;
741 sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * (sched->replay_repeat - 1) +
742 sched->parent_cpu_usage)/sched->replay_repeat;
743
744 mutex_lock(&sched->start_work_mutex);
745
746 for (i = 0; i < sched->nr_tasks; i++) {
747 task = sched->tasks[i];
748 sem_init(&task->sleep_sem, 0, 0);
749 task->curr_event = 0;
750 }
751}
752
753static void run_one_test(struct perf_sched *sched)
754 EXCLUSIVE_LOCKS_REQUIRED(sched->work_done_wait_mutex)
755 EXCLUSIVE_LOCKS_REQUIRED(sched->start_work_mutex)
756{
757 u64 T0, T1, delta, avg_delta, fluct;
758
759 T0 = get_nsecs();
760 wait_for_tasks(sched);
761 T1 = get_nsecs();
762
763 delta = T1 - T0;
764 sched->sum_runtime += delta;
765 sched->nr_runs++;
766
767 avg_delta = sched->sum_runtime / sched->nr_runs;
768 if (delta < avg_delta)
769 fluct = avg_delta - delta;
770 else
771 fluct = delta - avg_delta;
772 sched->sum_fluct += fluct;
773 if (!sched->run_avg)
774 sched->run_avg = delta;
775 sched->run_avg = (sched->run_avg * (sched->replay_repeat - 1) + delta) / sched->replay_repeat;
776
777 printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / NSEC_PER_MSEC);
778
779 printf("ravg: %0.2f, ", (double)sched->run_avg / NSEC_PER_MSEC);
780
781 printf("cpu: %0.2f / %0.2f",
782 (double)sched->cpu_usage / NSEC_PER_MSEC, (double)sched->runavg_cpu_usage / NSEC_PER_MSEC);
783
784#if 0
785 /*
786 * rusage statistics done by the parent, these are less
787 * accurate than the sched->sum_exec_runtime based statistics:
788 */
789 printf(" [%0.2f / %0.2f]",
790 (double)sched->parent_cpu_usage / NSEC_PER_MSEC,
791 (double)sched->runavg_parent_cpu_usage / NSEC_PER_MSEC);
792#endif
793
794 printf("\n");
795
796 if (sched->nr_sleep_corrections)
797 printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections);
798 sched->nr_sleep_corrections = 0;
799}
800
801static void test_calibrations(struct perf_sched *sched)
802{
803 u64 T0, T1;
804
805 T0 = get_nsecs();
806 burn_nsecs(sched, NSEC_PER_MSEC);
807 T1 = get_nsecs();
808
809 printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
810
811 T0 = get_nsecs();
812 sleep_nsecs(NSEC_PER_MSEC);
813 T1 = get_nsecs();
814
815 printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
816}
817
818static int
819replay_wakeup_event(struct perf_sched *sched,
820 struct evsel *evsel, struct perf_sample *sample,
821 struct machine *machine __maybe_unused)
822{
823 const char *comm = evsel__strval(evsel, sample, "comm");
824 const u32 pid = evsel__intval(evsel, sample, "pid");
825 struct task_desc *waker, *wakee;
826
827 if (verbose > 0) {
828 printf("sched_wakeup event %p\n", evsel);
829
830 printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid);
831 }
832
833 waker = register_pid(sched, sample->tid, "<unknown>");
834 wakee = register_pid(sched, pid, comm);
835
836 add_sched_event_wakeup(sched, waker, sample->time, wakee);
837 return 0;
838}
839
840static int replay_switch_event(struct perf_sched *sched,
841 struct evsel *evsel,
842 struct perf_sample *sample,
843 struct machine *machine __maybe_unused)
844{
845 const char *prev_comm = evsel__strval(evsel, sample, "prev_comm"),
846 *next_comm = evsel__strval(evsel, sample, "next_comm");
847 const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
848 next_pid = evsel__intval(evsel, sample, "next_pid");
849 const char prev_state = evsel__taskstate(evsel, sample, "prev_state");
850 struct task_desc *prev, __maybe_unused *next;
851 u64 timestamp0, timestamp = sample->time;
852 int cpu = sample->cpu;
853 s64 delta;
854
855 if (verbose > 0)
856 printf("sched_switch event %p\n", evsel);
857
858 if (cpu >= MAX_CPUS || cpu < 0)
859 return 0;
860
861 timestamp0 = sched->cpu_last_switched[cpu];
862 if (timestamp0)
863 delta = timestamp - timestamp0;
864 else
865 delta = 0;
866
867 if (delta < 0) {
868 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
869 return -1;
870 }
871
872 pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
873 prev_comm, prev_pid, next_comm, next_pid, delta);
874
875 prev = register_pid(sched, prev_pid, prev_comm);
876 next = register_pid(sched, next_pid, next_comm);
877
878 sched->cpu_last_switched[cpu] = timestamp;
879
880 add_sched_event_run(sched, prev, timestamp, delta);
881 add_sched_event_sleep(sched, prev, timestamp, prev_state);
882
883 return 0;
884}
885
886static int replay_fork_event(struct perf_sched *sched,
887 union perf_event *event,
888 struct machine *machine)
889{
890 struct thread *child, *parent;
891
892 child = machine__findnew_thread(machine, event->fork.pid,
893 event->fork.tid);
894 parent = machine__findnew_thread(machine, event->fork.ppid,
895 event->fork.ptid);
896
897 if (child == NULL || parent == NULL) {
898 pr_debug("thread does not exist on fork event: child %p, parent %p\n",
899 child, parent);
900 goto out_put;
901 }
902
903 if (verbose > 0) {
904 printf("fork event\n");
905 printf("... parent: %s/%d\n", thread__comm_str(parent), thread__tid(parent));
906 printf("... child: %s/%d\n", thread__comm_str(child), thread__tid(child));
907 }
908
909 register_pid(sched, thread__tid(parent), thread__comm_str(parent));
910 register_pid(sched, thread__tid(child), thread__comm_str(child));
911out_put:
912 thread__put(child);
913 thread__put(parent);
914 return 0;
915}
916
917struct sort_dimension {
918 const char *name;
919 sort_fn_t cmp;
920 struct list_head list;
921};
922
923/*
924 * handle runtime stats saved per thread
925 */
926static struct thread_runtime *thread__init_runtime(struct thread *thread)
927{
928 struct thread_runtime *r;
929
930 r = zalloc(sizeof(struct thread_runtime));
931 if (!r)
932 return NULL;
933
934 init_stats(&r->run_stats);
935 thread__set_priv(thread, r);
936
937 return r;
938}
939
940static struct thread_runtime *thread__get_runtime(struct thread *thread)
941{
942 struct thread_runtime *tr;
943
944 tr = thread__priv(thread);
945 if (tr == NULL) {
946 tr = thread__init_runtime(thread);
947 if (tr == NULL)
948 pr_debug("Failed to malloc memory for runtime data.\n");
949 }
950
951 return tr;
952}
953
954static int
955thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
956{
957 struct sort_dimension *sort;
958 int ret = 0;
959
960 BUG_ON(list_empty(list));
961
962 list_for_each_entry(sort, list, list) {
963 ret = sort->cmp(l, r);
964 if (ret)
965 return ret;
966 }
967
968 return ret;
969}
970
971static struct work_atoms *
972thread_atoms_search(struct rb_root_cached *root, struct thread *thread,
973 struct list_head *sort_list)
974{
975 struct rb_node *node = root->rb_root.rb_node;
976 struct work_atoms key = { .thread = thread };
977
978 while (node) {
979 struct work_atoms *atoms;
980 int cmp;
981
982 atoms = container_of(node, struct work_atoms, node);
983
984 cmp = thread_lat_cmp(sort_list, &key, atoms);
985 if (cmp > 0)
986 node = node->rb_left;
987 else if (cmp < 0)
988 node = node->rb_right;
989 else {
990 BUG_ON(thread != atoms->thread);
991 return atoms;
992 }
993 }
994 return NULL;
995}
996
997static void
998__thread_latency_insert(struct rb_root_cached *root, struct work_atoms *data,
999 struct list_head *sort_list)
1000{
1001 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
1002 bool leftmost = true;
1003
1004 while (*new) {
1005 struct work_atoms *this;
1006 int cmp;
1007
1008 this = container_of(*new, struct work_atoms, node);
1009 parent = *new;
1010
1011 cmp = thread_lat_cmp(sort_list, data, this);
1012
1013 if (cmp > 0)
1014 new = &((*new)->rb_left);
1015 else {
1016 new = &((*new)->rb_right);
1017 leftmost = false;
1018 }
1019 }
1020
1021 rb_link_node(&data->node, parent, new);
1022 rb_insert_color_cached(&data->node, root, leftmost);
1023}
1024
1025static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
1026{
1027 struct work_atoms *atoms = zalloc(sizeof(*atoms));
1028 if (!atoms) {
1029 pr_err("No memory at %s\n", __func__);
1030 return -1;
1031 }
1032
1033 atoms->thread = thread__get(thread);
1034 INIT_LIST_HEAD(&atoms->work_list);
1035 __thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid);
1036 return 0;
1037}
1038
1039static int
1040add_sched_out_event(struct work_atoms *atoms,
1041 char run_state,
1042 u64 timestamp)
1043{
1044 struct work_atom *atom = zalloc(sizeof(*atom));
1045 if (!atom) {
1046 pr_err("Non memory at %s", __func__);
1047 return -1;
1048 }
1049
1050 atom->sched_out_time = timestamp;
1051
1052 if (run_state == 'R') {
1053 atom->state = THREAD_WAIT_CPU;
1054 atom->wake_up_time = atom->sched_out_time;
1055 }
1056
1057 list_add_tail(&atom->list, &atoms->work_list);
1058 return 0;
1059}
1060
1061static void
1062add_runtime_event(struct work_atoms *atoms, u64 delta,
1063 u64 timestamp __maybe_unused)
1064{
1065 struct work_atom *atom;
1066
1067 BUG_ON(list_empty(&atoms->work_list));
1068
1069 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1070
1071 atom->runtime += delta;
1072 atoms->total_runtime += delta;
1073}
1074
1075static void
1076add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
1077{
1078 struct work_atom *atom;
1079 u64 delta;
1080
1081 if (list_empty(&atoms->work_list))
1082 return;
1083
1084 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1085
1086 if (atom->state != THREAD_WAIT_CPU)
1087 return;
1088
1089 if (timestamp < atom->wake_up_time) {
1090 atom->state = THREAD_IGNORE;
1091 return;
1092 }
1093
1094 atom->state = THREAD_SCHED_IN;
1095 atom->sched_in_time = timestamp;
1096
1097 delta = atom->sched_in_time - atom->wake_up_time;
1098 atoms->total_lat += delta;
1099 if (delta > atoms->max_lat) {
1100 atoms->max_lat = delta;
1101 atoms->max_lat_start = atom->wake_up_time;
1102 atoms->max_lat_end = timestamp;
1103 }
1104 atoms->nb_atoms++;
1105}
1106
1107static int latency_switch_event(struct perf_sched *sched,
1108 struct evsel *evsel,
1109 struct perf_sample *sample,
1110 struct machine *machine)
1111{
1112 const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
1113 next_pid = evsel__intval(evsel, sample, "next_pid");
1114 const char prev_state = evsel__taskstate(evsel, sample, "prev_state");
1115 struct work_atoms *out_events, *in_events;
1116 struct thread *sched_out, *sched_in;
1117 u64 timestamp0, timestamp = sample->time;
1118 int cpu = sample->cpu, err = -1;
1119 s64 delta;
1120
1121 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1122
1123 timestamp0 = sched->cpu_last_switched[cpu];
1124 sched->cpu_last_switched[cpu] = timestamp;
1125 if (timestamp0)
1126 delta = timestamp - timestamp0;
1127 else
1128 delta = 0;
1129
1130 if (delta < 0) {
1131 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1132 return -1;
1133 }
1134
1135 sched_out = machine__findnew_thread(machine, -1, prev_pid);
1136 sched_in = machine__findnew_thread(machine, -1, next_pid);
1137 if (sched_out == NULL || sched_in == NULL)
1138 goto out_put;
1139
1140 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
1141 if (!out_events) {
1142 if (thread_atoms_insert(sched, sched_out))
1143 goto out_put;
1144 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
1145 if (!out_events) {
1146 pr_err("out-event: Internal tree error");
1147 goto out_put;
1148 }
1149 }
1150 if (add_sched_out_event(out_events, prev_state, timestamp))
1151 return -1;
1152
1153 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1154 if (!in_events) {
1155 if (thread_atoms_insert(sched, sched_in))
1156 goto out_put;
1157 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1158 if (!in_events) {
1159 pr_err("in-event: Internal tree error");
1160 goto out_put;
1161 }
1162 /*
1163 * Take came in we have not heard about yet,
1164 * add in an initial atom in runnable state:
1165 */
1166 if (add_sched_out_event(in_events, 'R', timestamp))
1167 goto out_put;
1168 }
1169 add_sched_in_event(in_events, timestamp);
1170 err = 0;
1171out_put:
1172 thread__put(sched_out);
1173 thread__put(sched_in);
1174 return err;
1175}
1176
1177static int latency_runtime_event(struct perf_sched *sched,
1178 struct evsel *evsel,
1179 struct perf_sample *sample,
1180 struct machine *machine)
1181{
1182 const u32 pid = evsel__intval(evsel, sample, "pid");
1183 const u64 runtime = evsel__intval(evsel, sample, "runtime");
1184 struct thread *thread = machine__findnew_thread(machine, -1, pid);
1185 struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1186 u64 timestamp = sample->time;
1187 int cpu = sample->cpu, err = -1;
1188
1189 if (thread == NULL)
1190 return -1;
1191
1192 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1193 if (!atoms) {
1194 if (thread_atoms_insert(sched, thread))
1195 goto out_put;
1196 atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1197 if (!atoms) {
1198 pr_err("in-event: Internal tree error");
1199 goto out_put;
1200 }
1201 if (add_sched_out_event(atoms, 'R', timestamp))
1202 goto out_put;
1203 }
1204
1205 add_runtime_event(atoms, runtime, timestamp);
1206 err = 0;
1207out_put:
1208 thread__put(thread);
1209 return err;
1210}
1211
1212static int latency_wakeup_event(struct perf_sched *sched,
1213 struct evsel *evsel,
1214 struct perf_sample *sample,
1215 struct machine *machine)
1216{
1217 const u32 pid = evsel__intval(evsel, sample, "pid");
1218 struct work_atoms *atoms;
1219 struct work_atom *atom;
1220 struct thread *wakee;
1221 u64 timestamp = sample->time;
1222 int err = -1;
1223
1224 wakee = machine__findnew_thread(machine, -1, pid);
1225 if (wakee == NULL)
1226 return -1;
1227 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1228 if (!atoms) {
1229 if (thread_atoms_insert(sched, wakee))
1230 goto out_put;
1231 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1232 if (!atoms) {
1233 pr_err("wakeup-event: Internal tree error");
1234 goto out_put;
1235 }
1236 if (add_sched_out_event(atoms, 'S', timestamp))
1237 goto out_put;
1238 }
1239
1240 BUG_ON(list_empty(&atoms->work_list));
1241
1242 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1243
1244 /*
1245 * As we do not guarantee the wakeup event happens when
1246 * task is out of run queue, also may happen when task is
1247 * on run queue and wakeup only change ->state to TASK_RUNNING,
1248 * then we should not set the ->wake_up_time when wake up a
1249 * task which is on run queue.
1250 *
1251 * You WILL be missing events if you've recorded only
1252 * one CPU, or are only looking at only one, so don't
1253 * skip in this case.
1254 */
1255 if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
1256 goto out_ok;
1257
1258 sched->nr_timestamps++;
1259 if (atom->sched_out_time > timestamp) {
1260 sched->nr_unordered_timestamps++;
1261 goto out_ok;
1262 }
1263
1264 atom->state = THREAD_WAIT_CPU;
1265 atom->wake_up_time = timestamp;
1266out_ok:
1267 err = 0;
1268out_put:
1269 thread__put(wakee);
1270 return err;
1271}
1272
1273static int latency_migrate_task_event(struct perf_sched *sched,
1274 struct evsel *evsel,
1275 struct perf_sample *sample,
1276 struct machine *machine)
1277{
1278 const u32 pid = evsel__intval(evsel, sample, "pid");
1279 u64 timestamp = sample->time;
1280 struct work_atoms *atoms;
1281 struct work_atom *atom;
1282 struct thread *migrant;
1283 int err = -1;
1284
1285 /*
1286 * Only need to worry about migration when profiling one CPU.
1287 */
1288 if (sched->profile_cpu == -1)
1289 return 0;
1290
1291 migrant = machine__findnew_thread(machine, -1, pid);
1292 if (migrant == NULL)
1293 return -1;
1294 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1295 if (!atoms) {
1296 if (thread_atoms_insert(sched, migrant))
1297 goto out_put;
1298 register_pid(sched, thread__tid(migrant), thread__comm_str(migrant));
1299 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1300 if (!atoms) {
1301 pr_err("migration-event: Internal tree error");
1302 goto out_put;
1303 }
1304 if (add_sched_out_event(atoms, 'R', timestamp))
1305 goto out_put;
1306 }
1307
1308 BUG_ON(list_empty(&atoms->work_list));
1309
1310 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1311 atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1312
1313 sched->nr_timestamps++;
1314
1315 if (atom->sched_out_time > timestamp)
1316 sched->nr_unordered_timestamps++;
1317 err = 0;
1318out_put:
1319 thread__put(migrant);
1320 return err;
1321}
1322
1323static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
1324{
1325 int i;
1326 int ret;
1327 u64 avg;
1328 char max_lat_start[32], max_lat_end[32];
1329
1330 if (!work_list->nb_atoms)
1331 return;
1332 /*
1333 * Ignore idle threads:
1334 */
1335 if (!strcmp(thread__comm_str(work_list->thread), "swapper"))
1336 return;
1337
1338 sched->all_runtime += work_list->total_runtime;
1339 sched->all_count += work_list->nb_atoms;
1340
1341 if (work_list->num_merged > 1) {
1342 ret = printf(" %s:(%d) ", thread__comm_str(work_list->thread),
1343 work_list->num_merged);
1344 } else {
1345 ret = printf(" %s:%d ", thread__comm_str(work_list->thread),
1346 thread__tid(work_list->thread));
1347 }
1348
1349 for (i = 0; i < 24 - ret; i++)
1350 printf(" ");
1351
1352 avg = work_list->total_lat / work_list->nb_atoms;
1353 timestamp__scnprintf_usec(work_list->max_lat_start, max_lat_start, sizeof(max_lat_start));
1354 timestamp__scnprintf_usec(work_list->max_lat_end, max_lat_end, sizeof(max_lat_end));
1355
1356 printf("|%11.3f ms |%9" PRIu64 " | avg:%8.3f ms | max:%8.3f ms | max start: %12s s | max end: %12s s\n",
1357 (double)work_list->total_runtime / NSEC_PER_MSEC,
1358 work_list->nb_atoms, (double)avg / NSEC_PER_MSEC,
1359 (double)work_list->max_lat / NSEC_PER_MSEC,
1360 max_lat_start, max_lat_end);
1361}
1362
1363static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1364{
1365 pid_t l_tid, r_tid;
1366
1367 if (RC_CHK_EQUAL(l->thread, r->thread))
1368 return 0;
1369 l_tid = thread__tid(l->thread);
1370 r_tid = thread__tid(r->thread);
1371 if (l_tid < r_tid)
1372 return -1;
1373 if (l_tid > r_tid)
1374 return 1;
1375 return (int)(RC_CHK_ACCESS(l->thread) - RC_CHK_ACCESS(r->thread));
1376}
1377
1378static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1379{
1380 u64 avgl, avgr;
1381
1382 if (!l->nb_atoms)
1383 return -1;
1384
1385 if (!r->nb_atoms)
1386 return 1;
1387
1388 avgl = l->total_lat / l->nb_atoms;
1389 avgr = r->total_lat / r->nb_atoms;
1390
1391 if (avgl < avgr)
1392 return -1;
1393 if (avgl > avgr)
1394 return 1;
1395
1396 return 0;
1397}
1398
1399static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1400{
1401 if (l->max_lat < r->max_lat)
1402 return -1;
1403 if (l->max_lat > r->max_lat)
1404 return 1;
1405
1406 return 0;
1407}
1408
1409static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1410{
1411 if (l->nb_atoms < r->nb_atoms)
1412 return -1;
1413 if (l->nb_atoms > r->nb_atoms)
1414 return 1;
1415
1416 return 0;
1417}
1418
1419static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1420{
1421 if (l->total_runtime < r->total_runtime)
1422 return -1;
1423 if (l->total_runtime > r->total_runtime)
1424 return 1;
1425
1426 return 0;
1427}
1428
1429static int sort_dimension__add(const char *tok, struct list_head *list)
1430{
1431 size_t i;
1432 static struct sort_dimension avg_sort_dimension = {
1433 .name = "avg",
1434 .cmp = avg_cmp,
1435 };
1436 static struct sort_dimension max_sort_dimension = {
1437 .name = "max",
1438 .cmp = max_cmp,
1439 };
1440 static struct sort_dimension pid_sort_dimension = {
1441 .name = "pid",
1442 .cmp = pid_cmp,
1443 };
1444 static struct sort_dimension runtime_sort_dimension = {
1445 .name = "runtime",
1446 .cmp = runtime_cmp,
1447 };
1448 static struct sort_dimension switch_sort_dimension = {
1449 .name = "switch",
1450 .cmp = switch_cmp,
1451 };
1452 struct sort_dimension *available_sorts[] = {
1453 &pid_sort_dimension,
1454 &avg_sort_dimension,
1455 &max_sort_dimension,
1456 &switch_sort_dimension,
1457 &runtime_sort_dimension,
1458 };
1459
1460 for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
1461 if (!strcmp(available_sorts[i]->name, tok)) {
1462 list_add_tail(&available_sorts[i]->list, list);
1463
1464 return 0;
1465 }
1466 }
1467
1468 return -1;
1469}
1470
1471static void perf_sched__sort_lat(struct perf_sched *sched)
1472{
1473 struct rb_node *node;
1474 struct rb_root_cached *root = &sched->atom_root;
1475again:
1476 for (;;) {
1477 struct work_atoms *data;
1478 node = rb_first_cached(root);
1479 if (!node)
1480 break;
1481
1482 rb_erase_cached(node, root);
1483 data = rb_entry(node, struct work_atoms, node);
1484 __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
1485 }
1486 if (root == &sched->atom_root) {
1487 root = &sched->merged_atom_root;
1488 goto again;
1489 }
1490}
1491
1492static int process_sched_wakeup_event(struct perf_tool *tool,
1493 struct evsel *evsel,
1494 struct perf_sample *sample,
1495 struct machine *machine)
1496{
1497 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1498
1499 if (sched->tp_handler->wakeup_event)
1500 return sched->tp_handler->wakeup_event(sched, evsel, sample, machine);
1501
1502 return 0;
1503}
1504
1505static int process_sched_wakeup_ignore(struct perf_tool *tool __maybe_unused,
1506 struct evsel *evsel __maybe_unused,
1507 struct perf_sample *sample __maybe_unused,
1508 struct machine *machine __maybe_unused)
1509{
1510 return 0;
1511}
1512
1513union map_priv {
1514 void *ptr;
1515 bool color;
1516};
1517
1518static bool thread__has_color(struct thread *thread)
1519{
1520 union map_priv priv = {
1521 .ptr = thread__priv(thread),
1522 };
1523
1524 return priv.color;
1525}
1526
1527static struct thread*
1528map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid, pid_t tid)
1529{
1530 struct thread *thread = machine__findnew_thread(machine, pid, tid);
1531 union map_priv priv = {
1532 .color = false,
1533 };
1534
1535 if (!sched->map.color_pids || !thread || thread__priv(thread))
1536 return thread;
1537
1538 if (thread_map__has(sched->map.color_pids, tid))
1539 priv.color = true;
1540
1541 thread__set_priv(thread, priv.ptr);
1542 return thread;
1543}
1544
1545static bool sched_match_task(struct perf_sched *sched, const char *comm_str)
1546{
1547 bool fuzzy_match = sched->map.fuzzy;
1548 struct strlist *task_names = sched->map.task_names;
1549 struct str_node *node;
1550
1551 strlist__for_each_entry(node, task_names) {
1552 bool match_found = fuzzy_match ? !!strstr(comm_str, node->s) :
1553 !strcmp(comm_str, node->s);
1554 if (match_found)
1555 return true;
1556 }
1557
1558 return false;
1559}
1560
1561static void print_sched_map(struct perf_sched *sched, struct perf_cpu this_cpu, int cpus_nr,
1562 const char *color, bool sched_out)
1563{
1564 for (int i = 0; i < cpus_nr; i++) {
1565 struct perf_cpu cpu = {
1566 .cpu = sched->map.comp ? sched->map.comp_cpus[i].cpu : i,
1567 };
1568 struct thread *curr_thread = sched->curr_thread[cpu.cpu];
1569 struct thread *curr_out_thread = sched->curr_out_thread[cpu.cpu];
1570 struct thread_runtime *curr_tr;
1571 const char *pid_color = color;
1572 const char *cpu_color = color;
1573 char symbol = ' ';
1574 struct thread *thread_to_check = sched_out ? curr_out_thread : curr_thread;
1575
1576 if (thread_to_check && thread__has_color(thread_to_check))
1577 pid_color = COLOR_PIDS;
1578
1579 if (sched->map.color_cpus && perf_cpu_map__has(sched->map.color_cpus, cpu))
1580 cpu_color = COLOR_CPUS;
1581
1582 if (cpu.cpu == this_cpu.cpu)
1583 symbol = '*';
1584
1585 color_fprintf(stdout, cpu.cpu != this_cpu.cpu ? color : cpu_color, "%c", symbol);
1586
1587 thread_to_check = sched_out ? sched->curr_out_thread[cpu.cpu] :
1588 sched->curr_thread[cpu.cpu];
1589
1590 if (thread_to_check) {
1591 curr_tr = thread__get_runtime(thread_to_check);
1592 if (curr_tr == NULL)
1593 return;
1594
1595 if (sched_out) {
1596 if (cpu.cpu == this_cpu.cpu)
1597 color_fprintf(stdout, color, "- ");
1598 else {
1599 curr_tr = thread__get_runtime(sched->curr_thread[cpu.cpu]);
1600 if (curr_tr != NULL)
1601 color_fprintf(stdout, pid_color, "%2s ",
1602 curr_tr->shortname);
1603 }
1604 } else
1605 color_fprintf(stdout, pid_color, "%2s ", curr_tr->shortname);
1606 } else
1607 color_fprintf(stdout, color, " ");
1608 }
1609}
1610
1611static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
1612 struct perf_sample *sample, struct machine *machine)
1613{
1614 const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
1615 const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid");
1616 struct thread *sched_in, *sched_out;
1617 struct thread_runtime *tr;
1618 int new_shortname;
1619 u64 timestamp0, timestamp = sample->time;
1620 s64 delta;
1621 struct perf_cpu this_cpu = {
1622 .cpu = sample->cpu,
1623 };
1624 int cpus_nr;
1625 int proceed;
1626 bool new_cpu = false;
1627 const char *color = PERF_COLOR_NORMAL;
1628 char stimestamp[32];
1629 const char *str;
1630
1631 BUG_ON(this_cpu.cpu >= MAX_CPUS || this_cpu.cpu < 0);
1632
1633 if (this_cpu.cpu > sched->max_cpu.cpu)
1634 sched->max_cpu = this_cpu;
1635
1636 if (sched->map.comp) {
1637 cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS);
1638 if (!__test_and_set_bit(this_cpu.cpu, sched->map.comp_cpus_mask)) {
1639 sched->map.comp_cpus[cpus_nr++] = this_cpu;
1640 new_cpu = true;
1641 }
1642 } else
1643 cpus_nr = sched->max_cpu.cpu;
1644
1645 timestamp0 = sched->cpu_last_switched[this_cpu.cpu];
1646 sched->cpu_last_switched[this_cpu.cpu] = timestamp;
1647 if (timestamp0)
1648 delta = timestamp - timestamp0;
1649 else
1650 delta = 0;
1651
1652 if (delta < 0) {
1653 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1654 return -1;
1655 }
1656
1657 sched_in = map__findnew_thread(sched, machine, -1, next_pid);
1658 sched_out = map__findnew_thread(sched, machine, -1, prev_pid);
1659 if (sched_in == NULL || sched_out == NULL)
1660 return -1;
1661
1662 tr = thread__get_runtime(sched_in);
1663 if (tr == NULL) {
1664 thread__put(sched_in);
1665 return -1;
1666 }
1667
1668 sched->curr_thread[this_cpu.cpu] = thread__get(sched_in);
1669 sched->curr_out_thread[this_cpu.cpu] = thread__get(sched_out);
1670
1671 str = thread__comm_str(sched_in);
1672 new_shortname = 0;
1673 if (!tr->shortname[0]) {
1674 if (!strcmp(thread__comm_str(sched_in), "swapper")) {
1675 /*
1676 * Don't allocate a letter-number for swapper:0
1677 * as a shortname. Instead, we use '.' for it.
1678 */
1679 tr->shortname[0] = '.';
1680 tr->shortname[1] = ' ';
1681 } else if (!sched->map.task_name || sched_match_task(sched, str)) {
1682 tr->shortname[0] = sched->next_shortname1;
1683 tr->shortname[1] = sched->next_shortname2;
1684
1685 if (sched->next_shortname1 < 'Z') {
1686 sched->next_shortname1++;
1687 } else {
1688 sched->next_shortname1 = 'A';
1689 if (sched->next_shortname2 < '9')
1690 sched->next_shortname2++;
1691 else
1692 sched->next_shortname2 = '0';
1693 }
1694 } else {
1695 tr->shortname[0] = '-';
1696 tr->shortname[1] = ' ';
1697 }
1698 new_shortname = 1;
1699 }
1700
1701 if (sched->map.cpus && !perf_cpu_map__has(sched->map.cpus, this_cpu))
1702 goto out;
1703
1704 proceed = 0;
1705 str = thread__comm_str(sched_in);
1706 /*
1707 * Check which of sched_in and sched_out matches the passed --task-name
1708 * arguments and call the corresponding print_sched_map.
1709 */
1710 if (sched->map.task_name && !sched_match_task(sched, str)) {
1711 if (!sched_match_task(sched, thread__comm_str(sched_out)))
1712 goto out;
1713 else
1714 goto sched_out;
1715
1716 } else {
1717 str = thread__comm_str(sched_out);
1718 if (!(sched->map.task_name && !sched_match_task(sched, str)))
1719 proceed = 1;
1720 }
1721
1722 printf(" ");
1723
1724 print_sched_map(sched, this_cpu, cpus_nr, color, false);
1725
1726 timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp));
1727 color_fprintf(stdout, color, " %12s secs ", stimestamp);
1728 if (new_shortname || tr->comm_changed || (verbose > 0 && thread__tid(sched_in))) {
1729 const char *pid_color = color;
1730
1731 if (thread__has_color(sched_in))
1732 pid_color = COLOR_PIDS;
1733
1734 color_fprintf(stdout, pid_color, "%s => %s:%d",
1735 tr->shortname, thread__comm_str(sched_in), thread__tid(sched_in));
1736 tr->comm_changed = false;
1737 }
1738
1739 if (sched->map.comp && new_cpu)
1740 color_fprintf(stdout, color, " (CPU %d)", this_cpu);
1741
1742 if (proceed != 1) {
1743 color_fprintf(stdout, color, "\n");
1744 goto out;
1745 }
1746
1747sched_out:
1748 if (sched->map.task_name) {
1749 tr = thread__get_runtime(sched->curr_out_thread[this_cpu.cpu]);
1750 if (strcmp(tr->shortname, "") == 0)
1751 goto out;
1752
1753 if (proceed == 1)
1754 color_fprintf(stdout, color, "\n");
1755
1756 printf(" ");
1757 print_sched_map(sched, this_cpu, cpus_nr, color, true);
1758 timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp));
1759 color_fprintf(stdout, color, " %12s secs ", stimestamp);
1760 }
1761
1762 color_fprintf(stdout, color, "\n");
1763
1764out:
1765 if (sched->map.task_name)
1766 thread__put(sched_out);
1767
1768 thread__put(sched_in);
1769
1770 return 0;
1771}
1772
1773static int process_sched_switch_event(struct perf_tool *tool,
1774 struct evsel *evsel,
1775 struct perf_sample *sample,
1776 struct machine *machine)
1777{
1778 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1779 int this_cpu = sample->cpu, err = 0;
1780 u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
1781 next_pid = evsel__intval(evsel, sample, "next_pid");
1782
1783 if (sched->curr_pid[this_cpu] != (u32)-1) {
1784 /*
1785 * Are we trying to switch away a PID that is
1786 * not current?
1787 */
1788 if (sched->curr_pid[this_cpu] != prev_pid)
1789 sched->nr_context_switch_bugs++;
1790 }
1791
1792 if (sched->tp_handler->switch_event)
1793 err = sched->tp_handler->switch_event(sched, evsel, sample, machine);
1794
1795 sched->curr_pid[this_cpu] = next_pid;
1796 return err;
1797}
1798
1799static int process_sched_runtime_event(struct perf_tool *tool,
1800 struct evsel *evsel,
1801 struct perf_sample *sample,
1802 struct machine *machine)
1803{
1804 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1805
1806 if (sched->tp_handler->runtime_event)
1807 return sched->tp_handler->runtime_event(sched, evsel, sample, machine);
1808
1809 return 0;
1810}
1811
1812static int perf_sched__process_fork_event(struct perf_tool *tool,
1813 union perf_event *event,
1814 struct perf_sample *sample,
1815 struct machine *machine)
1816{
1817 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1818
1819 /* run the fork event through the perf machinery */
1820 perf_event__process_fork(tool, event, sample, machine);
1821
1822 /* and then run additional processing needed for this command */
1823 if (sched->tp_handler->fork_event)
1824 return sched->tp_handler->fork_event(sched, event, machine);
1825
1826 return 0;
1827}
1828
1829static int process_sched_migrate_task_event(struct perf_tool *tool,
1830 struct evsel *evsel,
1831 struct perf_sample *sample,
1832 struct machine *machine)
1833{
1834 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1835
1836 if (sched->tp_handler->migrate_task_event)
1837 return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine);
1838
1839 return 0;
1840}
1841
1842typedef int (*tracepoint_handler)(struct perf_tool *tool,
1843 struct evsel *evsel,
1844 struct perf_sample *sample,
1845 struct machine *machine);
1846
1847static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused,
1848 union perf_event *event __maybe_unused,
1849 struct perf_sample *sample,
1850 struct evsel *evsel,
1851 struct machine *machine)
1852{
1853 int err = 0;
1854
1855 if (evsel->handler != NULL) {
1856 tracepoint_handler f = evsel->handler;
1857 err = f(tool, evsel, sample, machine);
1858 }
1859
1860 return err;
1861}
1862
1863static int perf_sched__process_comm(struct perf_tool *tool __maybe_unused,
1864 union perf_event *event,
1865 struct perf_sample *sample,
1866 struct machine *machine)
1867{
1868 struct thread *thread;
1869 struct thread_runtime *tr;
1870 int err;
1871
1872 err = perf_event__process_comm(tool, event, sample, machine);
1873 if (err)
1874 return err;
1875
1876 thread = machine__find_thread(machine, sample->pid, sample->tid);
1877 if (!thread) {
1878 pr_err("Internal error: can't find thread\n");
1879 return -1;
1880 }
1881
1882 tr = thread__get_runtime(thread);
1883 if (tr == NULL) {
1884 thread__put(thread);
1885 return -1;
1886 }
1887
1888 tr->comm_changed = true;
1889 thread__put(thread);
1890
1891 return 0;
1892}
1893
1894static int perf_sched__read_events(struct perf_sched *sched)
1895{
1896 struct evsel_str_handler handlers[] = {
1897 { "sched:sched_switch", process_sched_switch_event, },
1898 { "sched:sched_stat_runtime", process_sched_runtime_event, },
1899 { "sched:sched_wakeup", process_sched_wakeup_event, },
1900 { "sched:sched_waking", process_sched_wakeup_event, },
1901 { "sched:sched_wakeup_new", process_sched_wakeup_event, },
1902 { "sched:sched_migrate_task", process_sched_migrate_task_event, },
1903 };
1904 struct perf_session *session;
1905 struct perf_data data = {
1906 .path = input_name,
1907 .mode = PERF_DATA_MODE_READ,
1908 .force = sched->force,
1909 };
1910 int rc = -1;
1911
1912 session = perf_session__new(&data, &sched->tool);
1913 if (IS_ERR(session)) {
1914 pr_debug("Error creating perf session");
1915 return PTR_ERR(session);
1916 }
1917
1918 symbol__init(&session->header.env);
1919
1920 /* prefer sched_waking if it is captured */
1921 if (evlist__find_tracepoint_by_name(session->evlist, "sched:sched_waking"))
1922 handlers[2].handler = process_sched_wakeup_ignore;
1923
1924 if (perf_session__set_tracepoints_handlers(session, handlers))
1925 goto out_delete;
1926
1927 if (perf_session__has_traces(session, "record -R")) {
1928 int err = perf_session__process_events(session);
1929 if (err) {
1930 pr_err("Failed to process events, error %d", err);
1931 goto out_delete;
1932 }
1933
1934 sched->nr_events = session->evlist->stats.nr_events[0];
1935 sched->nr_lost_events = session->evlist->stats.total_lost;
1936 sched->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
1937 }
1938
1939 rc = 0;
1940out_delete:
1941 perf_session__delete(session);
1942 return rc;
1943}
1944
1945/*
1946 * scheduling times are printed as msec.usec
1947 */
1948static inline void print_sched_time(unsigned long long nsecs, int width)
1949{
1950 unsigned long msecs;
1951 unsigned long usecs;
1952
1953 msecs = nsecs / NSEC_PER_MSEC;
1954 nsecs -= msecs * NSEC_PER_MSEC;
1955 usecs = nsecs / NSEC_PER_USEC;
1956 printf("%*lu.%03lu ", width, msecs, usecs);
1957}
1958
1959/*
1960 * returns runtime data for event, allocating memory for it the
1961 * first time it is used.
1962 */
1963static struct evsel_runtime *evsel__get_runtime(struct evsel *evsel)
1964{
1965 struct evsel_runtime *r = evsel->priv;
1966
1967 if (r == NULL) {
1968 r = zalloc(sizeof(struct evsel_runtime));
1969 evsel->priv = r;
1970 }
1971
1972 return r;
1973}
1974
1975/*
1976 * save last time event was seen per cpu
1977 */
1978static void evsel__save_time(struct evsel *evsel, u64 timestamp, u32 cpu)
1979{
1980 struct evsel_runtime *r = evsel__get_runtime(evsel);
1981
1982 if (r == NULL)
1983 return;
1984
1985 if ((cpu >= r->ncpu) || (r->last_time == NULL)) {
1986 int i, n = __roundup_pow_of_two(cpu+1);
1987 void *p = r->last_time;
1988
1989 p = realloc(r->last_time, n * sizeof(u64));
1990 if (!p)
1991 return;
1992
1993 r->last_time = p;
1994 for (i = r->ncpu; i < n; ++i)
1995 r->last_time[i] = (u64) 0;
1996
1997 r->ncpu = n;
1998 }
1999
2000 r->last_time[cpu] = timestamp;
2001}
2002
2003/* returns last time this event was seen on the given cpu */
2004static u64 evsel__get_time(struct evsel *evsel, u32 cpu)
2005{
2006 struct evsel_runtime *r = evsel__get_runtime(evsel);
2007
2008 if ((r == NULL) || (r->last_time == NULL) || (cpu >= r->ncpu))
2009 return 0;
2010
2011 return r->last_time[cpu];
2012}
2013
2014static int comm_width = 30;
2015
2016static char *timehist_get_commstr(struct thread *thread)
2017{
2018 static char str[32];
2019 const char *comm = thread__comm_str(thread);
2020 pid_t tid = thread__tid(thread);
2021 pid_t pid = thread__pid(thread);
2022 int n;
2023
2024 if (pid == 0)
2025 n = scnprintf(str, sizeof(str), "%s", comm);
2026
2027 else if (tid != pid)
2028 n = scnprintf(str, sizeof(str), "%s[%d/%d]", comm, tid, pid);
2029
2030 else
2031 n = scnprintf(str, sizeof(str), "%s[%d]", comm, tid);
2032
2033 if (n > comm_width)
2034 comm_width = n;
2035
2036 return str;
2037}
2038
2039static void timehist_header(struct perf_sched *sched)
2040{
2041 u32 ncpus = sched->max_cpu.cpu + 1;
2042 u32 i, j;
2043
2044 printf("%15s %6s ", "time", "cpu");
2045
2046 if (sched->show_cpu_visual) {
2047 printf(" ");
2048 for (i = 0, j = 0; i < ncpus; ++i) {
2049 printf("%x", j++);
2050 if (j > 15)
2051 j = 0;
2052 }
2053 printf(" ");
2054 }
2055
2056 printf(" %-*s %9s %9s %9s", comm_width,
2057 "task name", "wait time", "sch delay", "run time");
2058
2059 if (sched->show_state)
2060 printf(" %s", "state");
2061
2062 printf("\n");
2063
2064 /*
2065 * units row
2066 */
2067 printf("%15s %-6s ", "", "");
2068
2069 if (sched->show_cpu_visual)
2070 printf(" %*s ", ncpus, "");
2071
2072 printf(" %-*s %9s %9s %9s", comm_width,
2073 "[tid/pid]", "(msec)", "(msec)", "(msec)");
2074
2075 if (sched->show_state)
2076 printf(" %5s", "");
2077
2078 printf("\n");
2079
2080 /*
2081 * separator
2082 */
2083 printf("%.15s %.6s ", graph_dotted_line, graph_dotted_line);
2084
2085 if (sched->show_cpu_visual)
2086 printf(" %.*s ", ncpus, graph_dotted_line);
2087
2088 printf(" %.*s %.9s %.9s %.9s", comm_width,
2089 graph_dotted_line, graph_dotted_line, graph_dotted_line,
2090 graph_dotted_line);
2091
2092 if (sched->show_state)
2093 printf(" %.5s", graph_dotted_line);
2094
2095 printf("\n");
2096}
2097
2098static void timehist_print_sample(struct perf_sched *sched,
2099 struct evsel *evsel,
2100 struct perf_sample *sample,
2101 struct addr_location *al,
2102 struct thread *thread,
2103 u64 t, const char state)
2104{
2105 struct thread_runtime *tr = thread__priv(thread);
2106 const char *next_comm = evsel__strval(evsel, sample, "next_comm");
2107 const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
2108 u32 max_cpus = sched->max_cpu.cpu + 1;
2109 char tstr[64];
2110 char nstr[30];
2111 u64 wait_time;
2112
2113 if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
2114 return;
2115
2116 timestamp__scnprintf_usec(t, tstr, sizeof(tstr));
2117 printf("%15s [%04d] ", tstr, sample->cpu);
2118
2119 if (sched->show_cpu_visual) {
2120 u32 i;
2121 char c;
2122
2123 printf(" ");
2124 for (i = 0; i < max_cpus; ++i) {
2125 /* flag idle times with 'i'; others are sched events */
2126 if (i == sample->cpu)
2127 c = (thread__tid(thread) == 0) ? 'i' : 's';
2128 else
2129 c = ' ';
2130 printf("%c", c);
2131 }
2132 printf(" ");
2133 }
2134
2135 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2136
2137 wait_time = tr->dt_sleep + tr->dt_iowait + tr->dt_preempt;
2138 print_sched_time(wait_time, 6);
2139
2140 print_sched_time(tr->dt_delay, 6);
2141 print_sched_time(tr->dt_run, 6);
2142
2143 if (sched->show_state)
2144 printf(" %5c ", thread__tid(thread) == 0 ? 'I' : state);
2145
2146 if (sched->show_next) {
2147 snprintf(nstr, sizeof(nstr), "next: %s[%d]", next_comm, next_pid);
2148 printf(" %-*s", comm_width, nstr);
2149 }
2150
2151 if (sched->show_wakeups && !sched->show_next)
2152 printf(" %-*s", comm_width, "");
2153
2154 if (thread__tid(thread) == 0)
2155 goto out;
2156
2157 if (sched->show_callchain)
2158 printf(" ");
2159
2160 sample__fprintf_sym(sample, al, 0,
2161 EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE |
2162 EVSEL__PRINT_CALLCHAIN_ARROW |
2163 EVSEL__PRINT_SKIP_IGNORED,
2164 get_tls_callchain_cursor(), symbol_conf.bt_stop_list, stdout);
2165
2166out:
2167 printf("\n");
2168}
2169
2170/*
2171 * Explanation of delta-time stats:
2172 *
2173 * t = time of current schedule out event
2174 * tprev = time of previous sched out event
2175 * also time of schedule-in event for current task
2176 * last_time = time of last sched change event for current task
2177 * (i.e, time process was last scheduled out)
2178 * ready_to_run = time of wakeup for current task
2179 *
2180 * -----|------------|------------|------------|------
2181 * last ready tprev t
2182 * time to run
2183 *
2184 * |-------- dt_wait --------|
2185 * |- dt_delay -|-- dt_run --|
2186 *
2187 * dt_run = run time of current task
2188 * dt_wait = time between last schedule out event for task and tprev
2189 * represents time spent off the cpu
2190 * dt_delay = time between wakeup and schedule-in of task
2191 */
2192
2193static void timehist_update_runtime_stats(struct thread_runtime *r,
2194 u64 t, u64 tprev)
2195{
2196 r->dt_delay = 0;
2197 r->dt_sleep = 0;
2198 r->dt_iowait = 0;
2199 r->dt_preempt = 0;
2200 r->dt_run = 0;
2201
2202 if (tprev) {
2203 r->dt_run = t - tprev;
2204 if (r->ready_to_run) {
2205 if (r->ready_to_run > tprev)
2206 pr_debug("time travel: wakeup time for task > previous sched_switch event\n");
2207 else
2208 r->dt_delay = tprev - r->ready_to_run;
2209 }
2210
2211 if (r->last_time > tprev)
2212 pr_debug("time travel: last sched out time for task > previous sched_switch event\n");
2213 else if (r->last_time) {
2214 u64 dt_wait = tprev - r->last_time;
2215
2216 if (r->last_state == 'R')
2217 r->dt_preempt = dt_wait;
2218 else if (r->last_state == 'D')
2219 r->dt_iowait = dt_wait;
2220 else
2221 r->dt_sleep = dt_wait;
2222 }
2223 }
2224
2225 update_stats(&r->run_stats, r->dt_run);
2226
2227 r->total_run_time += r->dt_run;
2228 r->total_delay_time += r->dt_delay;
2229 r->total_sleep_time += r->dt_sleep;
2230 r->total_iowait_time += r->dt_iowait;
2231 r->total_preempt_time += r->dt_preempt;
2232}
2233
2234static bool is_idle_sample(struct perf_sample *sample,
2235 struct evsel *evsel)
2236{
2237 /* pid 0 == swapper == idle task */
2238 if (evsel__name_is(evsel, "sched:sched_switch"))
2239 return evsel__intval(evsel, sample, "prev_pid") == 0;
2240
2241 return sample->pid == 0;
2242}
2243
2244static void save_task_callchain(struct perf_sched *sched,
2245 struct perf_sample *sample,
2246 struct evsel *evsel,
2247 struct machine *machine)
2248{
2249 struct callchain_cursor *cursor;
2250 struct thread *thread;
2251
2252 /* want main thread for process - has maps */
2253 thread = machine__findnew_thread(machine, sample->pid, sample->pid);
2254 if (thread == NULL) {
2255 pr_debug("Failed to get thread for pid %d.\n", sample->pid);
2256 return;
2257 }
2258
2259 if (!sched->show_callchain || sample->callchain == NULL)
2260 return;
2261
2262 cursor = get_tls_callchain_cursor();
2263
2264 if (thread__resolve_callchain(thread, cursor, evsel, sample,
2265 NULL, NULL, sched->max_stack + 2) != 0) {
2266 if (verbose > 0)
2267 pr_err("Failed to resolve callchain. Skipping\n");
2268
2269 return;
2270 }
2271
2272 callchain_cursor_commit(cursor);
2273
2274 while (true) {
2275 struct callchain_cursor_node *node;
2276 struct symbol *sym;
2277
2278 node = callchain_cursor_current(cursor);
2279 if (node == NULL)
2280 break;
2281
2282 sym = node->ms.sym;
2283 if (sym) {
2284 if (!strcmp(sym->name, "schedule") ||
2285 !strcmp(sym->name, "__schedule") ||
2286 !strcmp(sym->name, "preempt_schedule"))
2287 sym->ignore = 1;
2288 }
2289
2290 callchain_cursor_advance(cursor);
2291 }
2292}
2293
2294static int init_idle_thread(struct thread *thread)
2295{
2296 struct idle_thread_runtime *itr;
2297
2298 thread__set_comm(thread, idle_comm, 0);
2299
2300 itr = zalloc(sizeof(*itr));
2301 if (itr == NULL)
2302 return -ENOMEM;
2303
2304 init_stats(&itr->tr.run_stats);
2305 callchain_init(&itr->callchain);
2306 callchain_cursor_reset(&itr->cursor);
2307 thread__set_priv(thread, itr);
2308
2309 return 0;
2310}
2311
2312/*
2313 * Track idle stats per cpu by maintaining a local thread
2314 * struct for the idle task on each cpu.
2315 */
2316static int init_idle_threads(int ncpu)
2317{
2318 int i, ret;
2319
2320 idle_threads = zalloc(ncpu * sizeof(struct thread *));
2321 if (!idle_threads)
2322 return -ENOMEM;
2323
2324 idle_max_cpu = ncpu;
2325
2326 /* allocate the actual thread struct if needed */
2327 for (i = 0; i < ncpu; ++i) {
2328 idle_threads[i] = thread__new(0, 0);
2329 if (idle_threads[i] == NULL)
2330 return -ENOMEM;
2331
2332 ret = init_idle_thread(idle_threads[i]);
2333 if (ret < 0)
2334 return ret;
2335 }
2336
2337 return 0;
2338}
2339
2340static void free_idle_threads(void)
2341{
2342 int i;
2343
2344 if (idle_threads == NULL)
2345 return;
2346
2347 for (i = 0; i < idle_max_cpu; ++i) {
2348 if ((idle_threads[i]))
2349 thread__delete(idle_threads[i]);
2350 }
2351
2352 free(idle_threads);
2353}
2354
2355static struct thread *get_idle_thread(int cpu)
2356{
2357 /*
2358 * expand/allocate array of pointers to local thread
2359 * structs if needed
2360 */
2361 if ((cpu >= idle_max_cpu) || (idle_threads == NULL)) {
2362 int i, j = __roundup_pow_of_two(cpu+1);
2363 void *p;
2364
2365 p = realloc(idle_threads, j * sizeof(struct thread *));
2366 if (!p)
2367 return NULL;
2368
2369 idle_threads = (struct thread **) p;
2370 for (i = idle_max_cpu; i < j; ++i)
2371 idle_threads[i] = NULL;
2372
2373 idle_max_cpu = j;
2374 }
2375
2376 /* allocate a new thread struct if needed */
2377 if (idle_threads[cpu] == NULL) {
2378 idle_threads[cpu] = thread__new(0, 0);
2379
2380 if (idle_threads[cpu]) {
2381 if (init_idle_thread(idle_threads[cpu]) < 0)
2382 return NULL;
2383 }
2384 }
2385
2386 return idle_threads[cpu];
2387}
2388
2389static void save_idle_callchain(struct perf_sched *sched,
2390 struct idle_thread_runtime *itr,
2391 struct perf_sample *sample)
2392{
2393 struct callchain_cursor *cursor;
2394
2395 if (!sched->show_callchain || sample->callchain == NULL)
2396 return;
2397
2398 cursor = get_tls_callchain_cursor();
2399 if (cursor == NULL)
2400 return;
2401
2402 callchain_cursor__copy(&itr->cursor, cursor);
2403}
2404
2405static struct thread *timehist_get_thread(struct perf_sched *sched,
2406 struct perf_sample *sample,
2407 struct machine *machine,
2408 struct evsel *evsel)
2409{
2410 struct thread *thread;
2411
2412 if (is_idle_sample(sample, evsel)) {
2413 thread = get_idle_thread(sample->cpu);
2414 if (thread == NULL)
2415 pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
2416
2417 } else {
2418 /* there were samples with tid 0 but non-zero pid */
2419 thread = machine__findnew_thread(machine, sample->pid,
2420 sample->tid ?: sample->pid);
2421 if (thread == NULL) {
2422 pr_debug("Failed to get thread for tid %d. skipping sample.\n",
2423 sample->tid);
2424 }
2425
2426 save_task_callchain(sched, sample, evsel, machine);
2427 if (sched->idle_hist) {
2428 struct thread *idle;
2429 struct idle_thread_runtime *itr;
2430
2431 idle = get_idle_thread(sample->cpu);
2432 if (idle == NULL) {
2433 pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
2434 return NULL;
2435 }
2436
2437 itr = thread__priv(idle);
2438 if (itr == NULL)
2439 return NULL;
2440
2441 itr->last_thread = thread;
2442
2443 /* copy task callchain when entering to idle */
2444 if (evsel__intval(evsel, sample, "next_pid") == 0)
2445 save_idle_callchain(sched, itr, sample);
2446 }
2447 }
2448
2449 return thread;
2450}
2451
2452static bool timehist_skip_sample(struct perf_sched *sched,
2453 struct thread *thread,
2454 struct evsel *evsel,
2455 struct perf_sample *sample)
2456{
2457 bool rc = false;
2458
2459 if (thread__is_filtered(thread)) {
2460 rc = true;
2461 sched->skipped_samples++;
2462 }
2463
2464 if (sched->idle_hist) {
2465 if (!evsel__name_is(evsel, "sched:sched_switch"))
2466 rc = true;
2467 else if (evsel__intval(evsel, sample, "prev_pid") != 0 &&
2468 evsel__intval(evsel, sample, "next_pid") != 0)
2469 rc = true;
2470 }
2471
2472 return rc;
2473}
2474
2475static void timehist_print_wakeup_event(struct perf_sched *sched,
2476 struct evsel *evsel,
2477 struct perf_sample *sample,
2478 struct machine *machine,
2479 struct thread *awakened)
2480{
2481 struct thread *thread;
2482 char tstr[64];
2483
2484 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
2485 if (thread == NULL)
2486 return;
2487
2488 /* show wakeup unless both awakee and awaker are filtered */
2489 if (timehist_skip_sample(sched, thread, evsel, sample) &&
2490 timehist_skip_sample(sched, awakened, evsel, sample)) {
2491 return;
2492 }
2493
2494 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2495 printf("%15s [%04d] ", tstr, sample->cpu);
2496 if (sched->show_cpu_visual)
2497 printf(" %*s ", sched->max_cpu.cpu + 1, "");
2498
2499 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2500
2501 /* dt spacer */
2502 printf(" %9s %9s %9s ", "", "", "");
2503
2504 printf("awakened: %s", timehist_get_commstr(awakened));
2505
2506 printf("\n");
2507}
2508
2509static int timehist_sched_wakeup_ignore(struct perf_tool *tool __maybe_unused,
2510 union perf_event *event __maybe_unused,
2511 struct evsel *evsel __maybe_unused,
2512 struct perf_sample *sample __maybe_unused,
2513 struct machine *machine __maybe_unused)
2514{
2515 return 0;
2516}
2517
2518static int timehist_sched_wakeup_event(struct perf_tool *tool,
2519 union perf_event *event __maybe_unused,
2520 struct evsel *evsel,
2521 struct perf_sample *sample,
2522 struct machine *machine)
2523{
2524 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2525 struct thread *thread;
2526 struct thread_runtime *tr = NULL;
2527 /* want pid of awakened task not pid in sample */
2528 const u32 pid = evsel__intval(evsel, sample, "pid");
2529
2530 thread = machine__findnew_thread(machine, 0, pid);
2531 if (thread == NULL)
2532 return -1;
2533
2534 tr = thread__get_runtime(thread);
2535 if (tr == NULL)
2536 return -1;
2537
2538 if (tr->ready_to_run == 0)
2539 tr->ready_to_run = sample->time;
2540
2541 /* show wakeups if requested */
2542 if (sched->show_wakeups &&
2543 !perf_time__skip_sample(&sched->ptime, sample->time))
2544 timehist_print_wakeup_event(sched, evsel, sample, machine, thread);
2545
2546 return 0;
2547}
2548
2549static void timehist_print_migration_event(struct perf_sched *sched,
2550 struct evsel *evsel,
2551 struct perf_sample *sample,
2552 struct machine *machine,
2553 struct thread *migrated)
2554{
2555 struct thread *thread;
2556 char tstr[64];
2557 u32 max_cpus;
2558 u32 ocpu, dcpu;
2559
2560 if (sched->summary_only)
2561 return;
2562
2563 max_cpus = sched->max_cpu.cpu + 1;
2564 ocpu = evsel__intval(evsel, sample, "orig_cpu");
2565 dcpu = evsel__intval(evsel, sample, "dest_cpu");
2566
2567 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
2568 if (thread == NULL)
2569 return;
2570
2571 if (timehist_skip_sample(sched, thread, evsel, sample) &&
2572 timehist_skip_sample(sched, migrated, evsel, sample)) {
2573 return;
2574 }
2575
2576 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2577 printf("%15s [%04d] ", tstr, sample->cpu);
2578
2579 if (sched->show_cpu_visual) {
2580 u32 i;
2581 char c;
2582
2583 printf(" ");
2584 for (i = 0; i < max_cpus; ++i) {
2585 c = (i == sample->cpu) ? 'm' : ' ';
2586 printf("%c", c);
2587 }
2588 printf(" ");
2589 }
2590
2591 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2592
2593 /* dt spacer */
2594 printf(" %9s %9s %9s ", "", "", "");
2595
2596 printf("migrated: %s", timehist_get_commstr(migrated));
2597 printf(" cpu %d => %d", ocpu, dcpu);
2598
2599 printf("\n");
2600}
2601
2602static int timehist_migrate_task_event(struct perf_tool *tool,
2603 union perf_event *event __maybe_unused,
2604 struct evsel *evsel,
2605 struct perf_sample *sample,
2606 struct machine *machine)
2607{
2608 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2609 struct thread *thread;
2610 struct thread_runtime *tr = NULL;
2611 /* want pid of migrated task not pid in sample */
2612 const u32 pid = evsel__intval(evsel, sample, "pid");
2613
2614 thread = machine__findnew_thread(machine, 0, pid);
2615 if (thread == NULL)
2616 return -1;
2617
2618 tr = thread__get_runtime(thread);
2619 if (tr == NULL)
2620 return -1;
2621
2622 tr->migrations++;
2623
2624 /* show migrations if requested */
2625 timehist_print_migration_event(sched, evsel, sample, machine, thread);
2626
2627 return 0;
2628}
2629
2630static int timehist_sched_change_event(struct perf_tool *tool,
2631 union perf_event *event,
2632 struct evsel *evsel,
2633 struct perf_sample *sample,
2634 struct machine *machine)
2635{
2636 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2637 struct perf_time_interval *ptime = &sched->ptime;
2638 struct addr_location al;
2639 struct thread *thread;
2640 struct thread_runtime *tr = NULL;
2641 u64 tprev, t = sample->time;
2642 int rc = 0;
2643 const char state = evsel__taskstate(evsel, sample, "prev_state");
2644
2645 addr_location__init(&al);
2646 if (machine__resolve(machine, &al, sample) < 0) {
2647 pr_err("problem processing %d event. skipping it\n",
2648 event->header.type);
2649 rc = -1;
2650 goto out;
2651 }
2652
2653 thread = timehist_get_thread(sched, sample, machine, evsel);
2654 if (thread == NULL) {
2655 rc = -1;
2656 goto out;
2657 }
2658
2659 if (timehist_skip_sample(sched, thread, evsel, sample))
2660 goto out;
2661
2662 tr = thread__get_runtime(thread);
2663 if (tr == NULL) {
2664 rc = -1;
2665 goto out;
2666 }
2667
2668 tprev = evsel__get_time(evsel, sample->cpu);
2669
2670 /*
2671 * If start time given:
2672 * - sample time is under window user cares about - skip sample
2673 * - tprev is under window user cares about - reset to start of window
2674 */
2675 if (ptime->start && ptime->start > t)
2676 goto out;
2677
2678 if (tprev && ptime->start > tprev)
2679 tprev = ptime->start;
2680
2681 /*
2682 * If end time given:
2683 * - previous sched event is out of window - we are done
2684 * - sample time is beyond window user cares about - reset it
2685 * to close out stats for time window interest
2686 */
2687 if (ptime->end) {
2688 if (tprev > ptime->end)
2689 goto out;
2690
2691 if (t > ptime->end)
2692 t = ptime->end;
2693 }
2694
2695 if (!sched->idle_hist || thread__tid(thread) == 0) {
2696 if (!cpu_list || test_bit(sample->cpu, cpu_bitmap))
2697 timehist_update_runtime_stats(tr, t, tprev);
2698
2699 if (sched->idle_hist) {
2700 struct idle_thread_runtime *itr = (void *)tr;
2701 struct thread_runtime *last_tr;
2702
2703 BUG_ON(thread__tid(thread) != 0);
2704
2705 if (itr->last_thread == NULL)
2706 goto out;
2707
2708 /* add current idle time as last thread's runtime */
2709 last_tr = thread__get_runtime(itr->last_thread);
2710 if (last_tr == NULL)
2711 goto out;
2712
2713 timehist_update_runtime_stats(last_tr, t, tprev);
2714 /*
2715 * remove delta time of last thread as it's not updated
2716 * and otherwise it will show an invalid value next
2717 * time. we only care total run time and run stat.
2718 */
2719 last_tr->dt_run = 0;
2720 last_tr->dt_delay = 0;
2721 last_tr->dt_sleep = 0;
2722 last_tr->dt_iowait = 0;
2723 last_tr->dt_preempt = 0;
2724
2725 if (itr->cursor.nr)
2726 callchain_append(&itr->callchain, &itr->cursor, t - tprev);
2727
2728 itr->last_thread = NULL;
2729 }
2730 }
2731
2732 if (!sched->summary_only)
2733 timehist_print_sample(sched, evsel, sample, &al, thread, t, state);
2734
2735out:
2736 if (sched->hist_time.start == 0 && t >= ptime->start)
2737 sched->hist_time.start = t;
2738 if (ptime->end == 0 || t <= ptime->end)
2739 sched->hist_time.end = t;
2740
2741 if (tr) {
2742 /* time of this sched_switch event becomes last time task seen */
2743 tr->last_time = sample->time;
2744
2745 /* last state is used to determine where to account wait time */
2746 tr->last_state = state;
2747
2748 /* sched out event for task so reset ready to run time */
2749 if (state == 'R')
2750 tr->ready_to_run = t;
2751 else
2752 tr->ready_to_run = 0;
2753 }
2754
2755 evsel__save_time(evsel, sample->time, sample->cpu);
2756
2757 addr_location__exit(&al);
2758 return rc;
2759}
2760
2761static int timehist_sched_switch_event(struct perf_tool *tool,
2762 union perf_event *event,
2763 struct evsel *evsel,
2764 struct perf_sample *sample,
2765 struct machine *machine __maybe_unused)
2766{
2767 return timehist_sched_change_event(tool, event, evsel, sample, machine);
2768}
2769
2770static int process_lost(struct perf_tool *tool __maybe_unused,
2771 union perf_event *event,
2772 struct perf_sample *sample,
2773 struct machine *machine __maybe_unused)
2774{
2775 char tstr[64];
2776
2777 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2778 printf("%15s ", tstr);
2779 printf("lost %" PRI_lu64 " events on cpu %d\n", event->lost.lost, sample->cpu);
2780
2781 return 0;
2782}
2783
2784
2785static void print_thread_runtime(struct thread *t,
2786 struct thread_runtime *r)
2787{
2788 double mean = avg_stats(&r->run_stats);
2789 float stddev;
2790
2791 printf("%*s %5d %9" PRIu64 " ",
2792 comm_width, timehist_get_commstr(t), thread__ppid(t),
2793 (u64) r->run_stats.n);
2794
2795 print_sched_time(r->total_run_time, 8);
2796 stddev = rel_stddev_stats(stddev_stats(&r->run_stats), mean);
2797 print_sched_time(r->run_stats.min, 6);
2798 printf(" ");
2799 print_sched_time((u64) mean, 6);
2800 printf(" ");
2801 print_sched_time(r->run_stats.max, 6);
2802 printf(" ");
2803 printf("%5.2f", stddev);
2804 printf(" %5" PRIu64, r->migrations);
2805 printf("\n");
2806}
2807
2808static void print_thread_waittime(struct thread *t,
2809 struct thread_runtime *r)
2810{
2811 printf("%*s %5d %9" PRIu64 " ",
2812 comm_width, timehist_get_commstr(t), thread__ppid(t),
2813 (u64) r->run_stats.n);
2814
2815 print_sched_time(r->total_run_time, 8);
2816 print_sched_time(r->total_sleep_time, 6);
2817 printf(" ");
2818 print_sched_time(r->total_iowait_time, 6);
2819 printf(" ");
2820 print_sched_time(r->total_preempt_time, 6);
2821 printf(" ");
2822 print_sched_time(r->total_delay_time, 6);
2823 printf("\n");
2824}
2825
2826struct total_run_stats {
2827 struct perf_sched *sched;
2828 u64 sched_count;
2829 u64 task_count;
2830 u64 total_run_time;
2831};
2832
2833static int show_thread_runtime(struct thread *t, void *priv)
2834{
2835 struct total_run_stats *stats = priv;
2836 struct thread_runtime *r;
2837
2838 if (thread__is_filtered(t))
2839 return 0;
2840
2841 r = thread__priv(t);
2842 if (r && r->run_stats.n) {
2843 stats->task_count++;
2844 stats->sched_count += r->run_stats.n;
2845 stats->total_run_time += r->total_run_time;
2846
2847 if (stats->sched->show_state)
2848 print_thread_waittime(t, r);
2849 else
2850 print_thread_runtime(t, r);
2851 }
2852
2853 return 0;
2854}
2855
2856static size_t callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
2857{
2858 const char *sep = " <- ";
2859 struct callchain_list *chain;
2860 size_t ret = 0;
2861 char bf[1024];
2862 bool first;
2863
2864 if (node == NULL)
2865 return 0;
2866
2867 ret = callchain__fprintf_folded(fp, node->parent);
2868 first = (ret == 0);
2869
2870 list_for_each_entry(chain, &node->val, list) {
2871 if (chain->ip >= PERF_CONTEXT_MAX)
2872 continue;
2873 if (chain->ms.sym && chain->ms.sym->ignore)
2874 continue;
2875 ret += fprintf(fp, "%s%s", first ? "" : sep,
2876 callchain_list__sym_name(chain, bf, sizeof(bf),
2877 false));
2878 first = false;
2879 }
2880
2881 return ret;
2882}
2883
2884static size_t timehist_print_idlehist_callchain(struct rb_root_cached *root)
2885{
2886 size_t ret = 0;
2887 FILE *fp = stdout;
2888 struct callchain_node *chain;
2889 struct rb_node *rb_node = rb_first_cached(root);
2890
2891 printf(" %16s %8s %s\n", "Idle time (msec)", "Count", "Callchains");
2892 printf(" %.16s %.8s %.50s\n", graph_dotted_line, graph_dotted_line,
2893 graph_dotted_line);
2894
2895 while (rb_node) {
2896 chain = rb_entry(rb_node, struct callchain_node, rb_node);
2897 rb_node = rb_next(rb_node);
2898
2899 ret += fprintf(fp, " ");
2900 print_sched_time(chain->hit, 12);
2901 ret += 16; /* print_sched_time returns 2nd arg + 4 */
2902 ret += fprintf(fp, " %8d ", chain->count);
2903 ret += callchain__fprintf_folded(fp, chain);
2904 ret += fprintf(fp, "\n");
2905 }
2906
2907 return ret;
2908}
2909
2910static void timehist_print_summary(struct perf_sched *sched,
2911 struct perf_session *session)
2912{
2913 struct machine *m = &session->machines.host;
2914 struct total_run_stats totals;
2915 u64 task_count;
2916 struct thread *t;
2917 struct thread_runtime *r;
2918 int i;
2919 u64 hist_time = sched->hist_time.end - sched->hist_time.start;
2920
2921 memset(&totals, 0, sizeof(totals));
2922 totals.sched = sched;
2923
2924 if (sched->idle_hist) {
2925 printf("\nIdle-time summary\n");
2926 printf("%*s parent sched-out ", comm_width, "comm");
2927 printf(" idle-time min-idle avg-idle max-idle stddev migrations\n");
2928 } else if (sched->show_state) {
2929 printf("\nWait-time summary\n");
2930 printf("%*s parent sched-in ", comm_width, "comm");
2931 printf(" run-time sleep iowait preempt delay\n");
2932 } else {
2933 printf("\nRuntime summary\n");
2934 printf("%*s parent sched-in ", comm_width, "comm");
2935 printf(" run-time min-run avg-run max-run stddev migrations\n");
2936 }
2937 printf("%*s (count) ", comm_width, "");
2938 printf(" (msec) (msec) (msec) (msec) %s\n",
2939 sched->show_state ? "(msec)" : "%");
2940 printf("%.117s\n", graph_dotted_line);
2941
2942 machine__for_each_thread(m, show_thread_runtime, &totals);
2943 task_count = totals.task_count;
2944 if (!task_count)
2945 printf("<no still running tasks>\n");
2946
2947 /* CPU idle stats not tracked when samples were skipped */
2948 if (sched->skipped_samples && !sched->idle_hist)
2949 return;
2950
2951 printf("\nIdle stats:\n");
2952 for (i = 0; i < idle_max_cpu; ++i) {
2953 if (cpu_list && !test_bit(i, cpu_bitmap))
2954 continue;
2955
2956 t = idle_threads[i];
2957 if (!t)
2958 continue;
2959
2960 r = thread__priv(t);
2961 if (r && r->run_stats.n) {
2962 totals.sched_count += r->run_stats.n;
2963 printf(" CPU %2d idle for ", i);
2964 print_sched_time(r->total_run_time, 6);
2965 printf(" msec (%6.2f%%)\n", 100.0 * r->total_run_time / hist_time);
2966 } else
2967 printf(" CPU %2d idle entire time window\n", i);
2968 }
2969
2970 if (sched->idle_hist && sched->show_callchain) {
2971 callchain_param.mode = CHAIN_FOLDED;
2972 callchain_param.value = CCVAL_PERIOD;
2973
2974 callchain_register_param(&callchain_param);
2975
2976 printf("\nIdle stats by callchain:\n");
2977 for (i = 0; i < idle_max_cpu; ++i) {
2978 struct idle_thread_runtime *itr;
2979
2980 t = idle_threads[i];
2981 if (!t)
2982 continue;
2983
2984 itr = thread__priv(t);
2985 if (itr == NULL)
2986 continue;
2987
2988 callchain_param.sort(&itr->sorted_root.rb_root, &itr->callchain,
2989 0, &callchain_param);
2990
2991 printf(" CPU %2d:", i);
2992 print_sched_time(itr->tr.total_run_time, 6);
2993 printf(" msec\n");
2994 timehist_print_idlehist_callchain(&itr->sorted_root);
2995 printf("\n");
2996 }
2997 }
2998
2999 printf("\n"
3000 " Total number of unique tasks: %" PRIu64 "\n"
3001 "Total number of context switches: %" PRIu64 "\n",
3002 totals.task_count, totals.sched_count);
3003
3004 printf(" Total run time (msec): ");
3005 print_sched_time(totals.total_run_time, 2);
3006 printf("\n");
3007
3008 printf(" Total scheduling time (msec): ");
3009 print_sched_time(hist_time, 2);
3010 printf(" (x %d)\n", sched->max_cpu.cpu);
3011}
3012
3013typedef int (*sched_handler)(struct perf_tool *tool,
3014 union perf_event *event,
3015 struct evsel *evsel,
3016 struct perf_sample *sample,
3017 struct machine *machine);
3018
3019static int perf_timehist__process_sample(struct perf_tool *tool,
3020 union perf_event *event,
3021 struct perf_sample *sample,
3022 struct evsel *evsel,
3023 struct machine *machine)
3024{
3025 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
3026 int err = 0;
3027 struct perf_cpu this_cpu = {
3028 .cpu = sample->cpu,
3029 };
3030
3031 if (this_cpu.cpu > sched->max_cpu.cpu)
3032 sched->max_cpu = this_cpu;
3033
3034 if (evsel->handler != NULL) {
3035 sched_handler f = evsel->handler;
3036
3037 err = f(tool, event, evsel, sample, machine);
3038 }
3039
3040 return err;
3041}
3042
3043static int timehist_check_attr(struct perf_sched *sched,
3044 struct evlist *evlist)
3045{
3046 struct evsel *evsel;
3047 struct evsel_runtime *er;
3048
3049 list_for_each_entry(evsel, &evlist->core.entries, core.node) {
3050 er = evsel__get_runtime(evsel);
3051 if (er == NULL) {
3052 pr_err("Failed to allocate memory for evsel runtime data\n");
3053 return -1;
3054 }
3055
3056 /* only need to save callchain related to sched_switch event */
3057 if (sched->show_callchain &&
3058 evsel__name_is(evsel, "sched:sched_switch") &&
3059 !evsel__has_callchain(evsel)) {
3060 pr_info("Samples of sched_switch event do not have callchains.\n");
3061 sched->show_callchain = 0;
3062 symbol_conf.use_callchain = 0;
3063 }
3064 }
3065
3066 return 0;
3067}
3068
3069static int perf_sched__timehist(struct perf_sched *sched)
3070{
3071 struct evsel_str_handler handlers[] = {
3072 { "sched:sched_switch", timehist_sched_switch_event, },
3073 { "sched:sched_wakeup", timehist_sched_wakeup_event, },
3074 { "sched:sched_waking", timehist_sched_wakeup_event, },
3075 { "sched:sched_wakeup_new", timehist_sched_wakeup_event, },
3076 };
3077 const struct evsel_str_handler migrate_handlers[] = {
3078 { "sched:sched_migrate_task", timehist_migrate_task_event, },
3079 };
3080 struct perf_data data = {
3081 .path = input_name,
3082 .mode = PERF_DATA_MODE_READ,
3083 .force = sched->force,
3084 };
3085
3086 struct perf_session *session;
3087 struct evlist *evlist;
3088 int err = -1;
3089
3090 /*
3091 * event handlers for timehist option
3092 */
3093 sched->tool.sample = perf_timehist__process_sample;
3094 sched->tool.mmap = perf_event__process_mmap;
3095 sched->tool.comm = perf_event__process_comm;
3096 sched->tool.exit = perf_event__process_exit;
3097 sched->tool.fork = perf_event__process_fork;
3098 sched->tool.lost = process_lost;
3099 sched->tool.attr = perf_event__process_attr;
3100 sched->tool.tracing_data = perf_event__process_tracing_data;
3101 sched->tool.build_id = perf_event__process_build_id;
3102
3103 sched->tool.ordered_events = true;
3104 sched->tool.ordering_requires_timestamps = true;
3105
3106 symbol_conf.use_callchain = sched->show_callchain;
3107
3108 session = perf_session__new(&data, &sched->tool);
3109 if (IS_ERR(session))
3110 return PTR_ERR(session);
3111
3112 if (cpu_list) {
3113 err = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap);
3114 if (err < 0)
3115 goto out;
3116 }
3117
3118 evlist = session->evlist;
3119
3120 symbol__init(&session->header.env);
3121
3122 if (perf_time__parse_str(&sched->ptime, sched->time_str) != 0) {
3123 pr_err("Invalid time string\n");
3124 return -EINVAL;
3125 }
3126
3127 if (timehist_check_attr(sched, evlist) != 0)
3128 goto out;
3129
3130 setup_pager();
3131
3132 /* prefer sched_waking if it is captured */
3133 if (evlist__find_tracepoint_by_name(session->evlist, "sched:sched_waking"))
3134 handlers[1].handler = timehist_sched_wakeup_ignore;
3135
3136 /* setup per-evsel handlers */
3137 if (perf_session__set_tracepoints_handlers(session, handlers))
3138 goto out;
3139
3140 /* sched_switch event at a minimum needs to exist */
3141 if (!evlist__find_tracepoint_by_name(session->evlist, "sched:sched_switch")) {
3142 pr_err("No sched_switch events found. Have you run 'perf sched record'?\n");
3143 goto out;
3144 }
3145
3146 if (sched->show_migrations &&
3147 perf_session__set_tracepoints_handlers(session, migrate_handlers))
3148 goto out;
3149
3150 /* pre-allocate struct for per-CPU idle stats */
3151 sched->max_cpu.cpu = session->header.env.nr_cpus_online;
3152 if (sched->max_cpu.cpu == 0)
3153 sched->max_cpu.cpu = 4;
3154 if (init_idle_threads(sched->max_cpu.cpu))
3155 goto out;
3156
3157 /* summary_only implies summary option, but don't overwrite summary if set */
3158 if (sched->summary_only)
3159 sched->summary = sched->summary_only;
3160
3161 if (!sched->summary_only)
3162 timehist_header(sched);
3163
3164 err = perf_session__process_events(session);
3165 if (err) {
3166 pr_err("Failed to process events, error %d", err);
3167 goto out;
3168 }
3169
3170 sched->nr_events = evlist->stats.nr_events[0];
3171 sched->nr_lost_events = evlist->stats.total_lost;
3172 sched->nr_lost_chunks = evlist->stats.nr_events[PERF_RECORD_LOST];
3173
3174 if (sched->summary)
3175 timehist_print_summary(sched, session);
3176
3177out:
3178 free_idle_threads();
3179 perf_session__delete(session);
3180
3181 return err;
3182}
3183
3184
3185static void print_bad_events(struct perf_sched *sched)
3186{
3187 if (sched->nr_unordered_timestamps && sched->nr_timestamps) {
3188 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
3189 (double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0,
3190 sched->nr_unordered_timestamps, sched->nr_timestamps);
3191 }
3192 if (sched->nr_lost_events && sched->nr_events) {
3193 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
3194 (double)sched->nr_lost_events/(double)sched->nr_events * 100.0,
3195 sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks);
3196 }
3197 if (sched->nr_context_switch_bugs && sched->nr_timestamps) {
3198 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
3199 (double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0,
3200 sched->nr_context_switch_bugs, sched->nr_timestamps);
3201 if (sched->nr_lost_events)
3202 printf(" (due to lost events?)");
3203 printf("\n");
3204 }
3205}
3206
3207static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *data)
3208{
3209 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
3210 struct work_atoms *this;
3211 const char *comm = thread__comm_str(data->thread), *this_comm;
3212 bool leftmost = true;
3213
3214 while (*new) {
3215 int cmp;
3216
3217 this = container_of(*new, struct work_atoms, node);
3218 parent = *new;
3219
3220 this_comm = thread__comm_str(this->thread);
3221 cmp = strcmp(comm, this_comm);
3222 if (cmp > 0) {
3223 new = &((*new)->rb_left);
3224 } else if (cmp < 0) {
3225 new = &((*new)->rb_right);
3226 leftmost = false;
3227 } else {
3228 this->num_merged++;
3229 this->total_runtime += data->total_runtime;
3230 this->nb_atoms += data->nb_atoms;
3231 this->total_lat += data->total_lat;
3232 list_splice(&data->work_list, &this->work_list);
3233 if (this->max_lat < data->max_lat) {
3234 this->max_lat = data->max_lat;
3235 this->max_lat_start = data->max_lat_start;
3236 this->max_lat_end = data->max_lat_end;
3237 }
3238 zfree(&data);
3239 return;
3240 }
3241 }
3242
3243 data->num_merged++;
3244 rb_link_node(&data->node, parent, new);
3245 rb_insert_color_cached(&data->node, root, leftmost);
3246}
3247
3248static void perf_sched__merge_lat(struct perf_sched *sched)
3249{
3250 struct work_atoms *data;
3251 struct rb_node *node;
3252
3253 if (sched->skip_merge)
3254 return;
3255
3256 while ((node = rb_first_cached(&sched->atom_root))) {
3257 rb_erase_cached(node, &sched->atom_root);
3258 data = rb_entry(node, struct work_atoms, node);
3259 __merge_work_atoms(&sched->merged_atom_root, data);
3260 }
3261}
3262
3263static int setup_cpus_switch_event(struct perf_sched *sched)
3264{
3265 unsigned int i;
3266
3267 sched->cpu_last_switched = calloc(MAX_CPUS, sizeof(*(sched->cpu_last_switched)));
3268 if (!sched->cpu_last_switched)
3269 return -1;
3270
3271 sched->curr_pid = malloc(MAX_CPUS * sizeof(*(sched->curr_pid)));
3272 if (!sched->curr_pid) {
3273 zfree(&sched->cpu_last_switched);
3274 return -1;
3275 }
3276
3277 for (i = 0; i < MAX_CPUS; i++)
3278 sched->curr_pid[i] = -1;
3279
3280 return 0;
3281}
3282
3283static void free_cpus_switch_event(struct perf_sched *sched)
3284{
3285 zfree(&sched->curr_pid);
3286 zfree(&sched->cpu_last_switched);
3287}
3288
3289static int perf_sched__lat(struct perf_sched *sched)
3290{
3291 int rc = -1;
3292 struct rb_node *next;
3293
3294 setup_pager();
3295
3296 if (setup_cpus_switch_event(sched))
3297 return rc;
3298
3299 if (perf_sched__read_events(sched))
3300 goto out_free_cpus_switch_event;
3301
3302 perf_sched__merge_lat(sched);
3303 perf_sched__sort_lat(sched);
3304
3305 printf("\n -------------------------------------------------------------------------------------------------------------------------------------------\n");
3306 printf(" Task | Runtime ms | Count | Avg delay ms | Max delay ms | Max delay start | Max delay end |\n");
3307 printf(" -------------------------------------------------------------------------------------------------------------------------------------------\n");
3308
3309 next = rb_first_cached(&sched->sorted_atom_root);
3310
3311 while (next) {
3312 struct work_atoms *work_list;
3313
3314 work_list = rb_entry(next, struct work_atoms, node);
3315 output_lat_thread(sched, work_list);
3316 next = rb_next(next);
3317 thread__zput(work_list->thread);
3318 }
3319
3320 printf(" -----------------------------------------------------------------------------------------------------------------\n");
3321 printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n",
3322 (double)sched->all_runtime / NSEC_PER_MSEC, sched->all_count);
3323
3324 printf(" ---------------------------------------------------\n");
3325
3326 print_bad_events(sched);
3327 printf("\n");
3328
3329 rc = 0;
3330
3331out_free_cpus_switch_event:
3332 free_cpus_switch_event(sched);
3333 return rc;
3334}
3335
3336static int setup_map_cpus(struct perf_sched *sched)
3337{
3338 sched->max_cpu.cpu = sysconf(_SC_NPROCESSORS_CONF);
3339
3340 if (sched->map.comp) {
3341 sched->map.comp_cpus = zalloc(sched->max_cpu.cpu * sizeof(int));
3342 if (!sched->map.comp_cpus)
3343 return -1;
3344 }
3345
3346 if (sched->map.cpus_str) {
3347 sched->map.cpus = perf_cpu_map__new(sched->map.cpus_str);
3348 if (!sched->map.cpus) {
3349 pr_err("failed to get cpus map from %s\n", sched->map.cpus_str);
3350 zfree(&sched->map.comp_cpus);
3351 return -1;
3352 }
3353 }
3354
3355 return 0;
3356}
3357
3358static int setup_color_pids(struct perf_sched *sched)
3359{
3360 struct perf_thread_map *map;
3361
3362 if (!sched->map.color_pids_str)
3363 return 0;
3364
3365 map = thread_map__new_by_tid_str(sched->map.color_pids_str);
3366 if (!map) {
3367 pr_err("failed to get thread map from %s\n", sched->map.color_pids_str);
3368 return -1;
3369 }
3370
3371 sched->map.color_pids = map;
3372 return 0;
3373}
3374
3375static int setup_color_cpus(struct perf_sched *sched)
3376{
3377 struct perf_cpu_map *map;
3378
3379 if (!sched->map.color_cpus_str)
3380 return 0;
3381
3382 map = perf_cpu_map__new(sched->map.color_cpus_str);
3383 if (!map) {
3384 pr_err("failed to get thread map from %s\n", sched->map.color_cpus_str);
3385 return -1;
3386 }
3387
3388 sched->map.color_cpus = map;
3389 return 0;
3390}
3391
3392static int perf_sched__map(struct perf_sched *sched)
3393{
3394 int rc = -1;
3395
3396 sched->curr_thread = calloc(MAX_CPUS, sizeof(*(sched->curr_thread)));
3397 if (!sched->curr_thread)
3398 return rc;
3399
3400 sched->curr_out_thread = calloc(MAX_CPUS, sizeof(*(sched->curr_out_thread)));
3401 if (!sched->curr_out_thread)
3402 return rc;
3403
3404 if (setup_cpus_switch_event(sched))
3405 goto out_free_curr_thread;
3406
3407 if (setup_map_cpus(sched))
3408 goto out_free_cpus_switch_event;
3409
3410 if (setup_color_pids(sched))
3411 goto out_put_map_cpus;
3412
3413 if (setup_color_cpus(sched))
3414 goto out_put_color_pids;
3415
3416 setup_pager();
3417 if (perf_sched__read_events(sched))
3418 goto out_put_color_cpus;
3419
3420 rc = 0;
3421 print_bad_events(sched);
3422
3423out_put_color_cpus:
3424 perf_cpu_map__put(sched->map.color_cpus);
3425
3426out_put_color_pids:
3427 perf_thread_map__put(sched->map.color_pids);
3428
3429out_put_map_cpus:
3430 zfree(&sched->map.comp_cpus);
3431 perf_cpu_map__put(sched->map.cpus);
3432
3433out_free_cpus_switch_event:
3434 free_cpus_switch_event(sched);
3435
3436out_free_curr_thread:
3437 zfree(&sched->curr_thread);
3438 return rc;
3439}
3440
3441static int perf_sched__replay(struct perf_sched *sched)
3442{
3443 int ret;
3444 unsigned long i;
3445
3446 mutex_init(&sched->start_work_mutex);
3447 mutex_init(&sched->work_done_wait_mutex);
3448
3449 ret = setup_cpus_switch_event(sched);
3450 if (ret)
3451 goto out_mutex_destroy;
3452
3453 calibrate_run_measurement_overhead(sched);
3454 calibrate_sleep_measurement_overhead(sched);
3455
3456 test_calibrations(sched);
3457
3458 ret = perf_sched__read_events(sched);
3459 if (ret)
3460 goto out_free_cpus_switch_event;
3461
3462 printf("nr_run_events: %ld\n", sched->nr_run_events);
3463 printf("nr_sleep_events: %ld\n", sched->nr_sleep_events);
3464 printf("nr_wakeup_events: %ld\n", sched->nr_wakeup_events);
3465
3466 if (sched->targetless_wakeups)
3467 printf("target-less wakeups: %ld\n", sched->targetless_wakeups);
3468 if (sched->multitarget_wakeups)
3469 printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups);
3470 if (sched->nr_run_events_optimized)
3471 printf("run atoms optimized: %ld\n",
3472 sched->nr_run_events_optimized);
3473
3474 print_task_traces(sched);
3475 add_cross_task_wakeups(sched);
3476
3477 sched->thread_funcs_exit = false;
3478 create_tasks(sched);
3479 printf("------------------------------------------------------------\n");
3480 if (sched->replay_repeat == 0)
3481 sched->replay_repeat = UINT_MAX;
3482
3483 for (i = 0; i < sched->replay_repeat; i++)
3484 run_one_test(sched);
3485
3486 sched->thread_funcs_exit = true;
3487 destroy_tasks(sched);
3488
3489out_free_cpus_switch_event:
3490 free_cpus_switch_event(sched);
3491
3492out_mutex_destroy:
3493 mutex_destroy(&sched->start_work_mutex);
3494 mutex_destroy(&sched->work_done_wait_mutex);
3495 return ret;
3496}
3497
3498static void setup_sorting(struct perf_sched *sched, const struct option *options,
3499 const char * const usage_msg[])
3500{
3501 char *tmp, *tok, *str = strdup(sched->sort_order);
3502
3503 for (tok = strtok_r(str, ", ", &tmp);
3504 tok; tok = strtok_r(NULL, ", ", &tmp)) {
3505 if (sort_dimension__add(tok, &sched->sort_list) < 0) {
3506 usage_with_options_msg(usage_msg, options,
3507 "Unknown --sort key: `%s'", tok);
3508 }
3509 }
3510
3511 free(str);
3512
3513 sort_dimension__add("pid", &sched->cmp_pid);
3514}
3515
3516static bool schedstat_events_exposed(void)
3517{
3518 /*
3519 * Select "sched:sched_stat_wait" event to check
3520 * whether schedstat tracepoints are exposed.
3521 */
3522 return IS_ERR(trace_event__tp_format("sched", "sched_stat_wait")) ?
3523 false : true;
3524}
3525
3526static int __cmd_record(int argc, const char **argv)
3527{
3528 unsigned int rec_argc, i, j;
3529 char **rec_argv;
3530 const char **rec_argv_copy;
3531 const char * const record_args[] = {
3532 "record",
3533 "-a",
3534 "-R",
3535 "-m", "1024",
3536 "-c", "1",
3537 "-e", "sched:sched_switch",
3538 "-e", "sched:sched_stat_runtime",
3539 "-e", "sched:sched_process_fork",
3540 "-e", "sched:sched_wakeup_new",
3541 "-e", "sched:sched_migrate_task",
3542 };
3543
3544 /*
3545 * The tracepoints trace_sched_stat_{wait, sleep, iowait}
3546 * are not exposed to user if CONFIG_SCHEDSTATS is not set,
3547 * to prevent "perf sched record" execution failure, determine
3548 * whether to record schedstat events according to actual situation.
3549 */
3550 const char * const schedstat_args[] = {
3551 "-e", "sched:sched_stat_wait",
3552 "-e", "sched:sched_stat_sleep",
3553 "-e", "sched:sched_stat_iowait",
3554 };
3555 unsigned int schedstat_argc = schedstat_events_exposed() ?
3556 ARRAY_SIZE(schedstat_args) : 0;
3557
3558 struct tep_event *waking_event;
3559 int ret;
3560
3561 /*
3562 * +2 for either "-e", "sched:sched_wakeup" or
3563 * "-e", "sched:sched_waking"
3564 */
3565 rec_argc = ARRAY_SIZE(record_args) + 2 + schedstat_argc + argc - 1;
3566 rec_argv = calloc(rec_argc + 1, sizeof(char *));
3567 if (rec_argv == NULL)
3568 return -ENOMEM;
3569 rec_argv_copy = calloc(rec_argc + 1, sizeof(char *));
3570 if (rec_argv_copy == NULL) {
3571 free(rec_argv);
3572 return -ENOMEM;
3573 }
3574
3575 for (i = 0; i < ARRAY_SIZE(record_args); i++)
3576 rec_argv[i] = strdup(record_args[i]);
3577
3578 rec_argv[i++] = strdup("-e");
3579 waking_event = trace_event__tp_format("sched", "sched_waking");
3580 if (!IS_ERR(waking_event))
3581 rec_argv[i++] = strdup("sched:sched_waking");
3582 else
3583 rec_argv[i++] = strdup("sched:sched_wakeup");
3584
3585 for (j = 0; j < schedstat_argc; j++)
3586 rec_argv[i++] = strdup(schedstat_args[j]);
3587
3588 for (j = 1; j < (unsigned int)argc; j++, i++)
3589 rec_argv[i] = strdup(argv[j]);
3590
3591 BUG_ON(i != rec_argc);
3592
3593 memcpy(rec_argv_copy, rec_argv, sizeof(char *) * rec_argc);
3594 ret = cmd_record(rec_argc, rec_argv_copy);
3595
3596 for (i = 0; i < rec_argc; i++)
3597 free(rec_argv[i]);
3598 free(rec_argv);
3599 free(rec_argv_copy);
3600
3601 return ret;
3602}
3603
3604int cmd_sched(int argc, const char **argv)
3605{
3606 static const char default_sort_order[] = "avg, max, switch, runtime";
3607 struct perf_sched sched = {
3608 .tool = {
3609 .sample = perf_sched__process_tracepoint_sample,
3610 .comm = perf_sched__process_comm,
3611 .namespaces = perf_event__process_namespaces,
3612 .lost = perf_event__process_lost,
3613 .fork = perf_sched__process_fork_event,
3614 .ordered_events = true,
3615 },
3616 .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid),
3617 .sort_list = LIST_HEAD_INIT(sched.sort_list),
3618 .sort_order = default_sort_order,
3619 .replay_repeat = 10,
3620 .profile_cpu = -1,
3621 .next_shortname1 = 'A',
3622 .next_shortname2 = '0',
3623 .skip_merge = 0,
3624 .show_callchain = 1,
3625 .max_stack = 5,
3626 };
3627 const struct option sched_options[] = {
3628 OPT_STRING('i', "input", &input_name, "file",
3629 "input file name"),
3630 OPT_INCR('v', "verbose", &verbose,
3631 "be more verbose (show symbol address, etc)"),
3632 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
3633 "dump raw trace in ASCII"),
3634 OPT_BOOLEAN('f', "force", &sched.force, "don't complain, do it"),
3635 OPT_END()
3636 };
3637 const struct option latency_options[] = {
3638 OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
3639 "sort by key(s): runtime, switch, avg, max"),
3640 OPT_INTEGER('C', "CPU", &sched.profile_cpu,
3641 "CPU to profile on"),
3642 OPT_BOOLEAN('p', "pids", &sched.skip_merge,
3643 "latency stats per pid instead of per comm"),
3644 OPT_PARENT(sched_options)
3645 };
3646 const struct option replay_options[] = {
3647 OPT_UINTEGER('r', "repeat", &sched.replay_repeat,
3648 "repeat the workload replay N times (0: infinite)"),
3649 OPT_PARENT(sched_options)
3650 };
3651 const struct option map_options[] = {
3652 OPT_BOOLEAN(0, "compact", &sched.map.comp,
3653 "map output in compact mode"),
3654 OPT_STRING(0, "color-pids", &sched.map.color_pids_str, "pids",
3655 "highlight given pids in map"),
3656 OPT_STRING(0, "color-cpus", &sched.map.color_cpus_str, "cpus",
3657 "highlight given CPUs in map"),
3658 OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus",
3659 "display given CPUs in map"),
3660 OPT_STRING(0, "task-name", &sched.map.task_name, "task",
3661 "map output only for the given task name(s)."),
3662 OPT_BOOLEAN(0, "fuzzy-name", &sched.map.fuzzy,
3663 "given command name can be partially matched (fuzzy matching)"),
3664 OPT_PARENT(sched_options)
3665 };
3666 const struct option timehist_options[] = {
3667 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
3668 "file", "vmlinux pathname"),
3669 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
3670 "file", "kallsyms pathname"),
3671 OPT_BOOLEAN('g', "call-graph", &sched.show_callchain,
3672 "Display call chains if present (default on)"),
3673 OPT_UINTEGER(0, "max-stack", &sched.max_stack,
3674 "Maximum number of functions to display backtrace."),
3675 OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
3676 "Look for files with symbols relative to this directory"),
3677 OPT_BOOLEAN('s', "summary", &sched.summary_only,
3678 "Show only syscall summary with statistics"),
3679 OPT_BOOLEAN('S', "with-summary", &sched.summary,
3680 "Show all syscalls and summary with statistics"),
3681 OPT_BOOLEAN('w', "wakeups", &sched.show_wakeups, "Show wakeup events"),
3682 OPT_BOOLEAN('n', "next", &sched.show_next, "Show next task"),
3683 OPT_BOOLEAN('M', "migrations", &sched.show_migrations, "Show migration events"),
3684 OPT_BOOLEAN('V', "cpu-visual", &sched.show_cpu_visual, "Add CPU visual"),
3685 OPT_BOOLEAN('I', "idle-hist", &sched.idle_hist, "Show idle events only"),
3686 OPT_STRING(0, "time", &sched.time_str, "str",
3687 "Time span for analysis (start,stop)"),
3688 OPT_BOOLEAN(0, "state", &sched.show_state, "Show task state when sched-out"),
3689 OPT_STRING('p', "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
3690 "analyze events only for given process id(s)"),
3691 OPT_STRING('t', "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
3692 "analyze events only for given thread id(s)"),
3693 OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
3694 OPT_PARENT(sched_options)
3695 };
3696
3697 const char * const latency_usage[] = {
3698 "perf sched latency [<options>]",
3699 NULL
3700 };
3701 const char * const replay_usage[] = {
3702 "perf sched replay [<options>]",
3703 NULL
3704 };
3705 const char * const map_usage[] = {
3706 "perf sched map [<options>]",
3707 NULL
3708 };
3709 const char * const timehist_usage[] = {
3710 "perf sched timehist [<options>]",
3711 NULL
3712 };
3713 const char *const sched_subcommands[] = { "record", "latency", "map",
3714 "replay", "script",
3715 "timehist", NULL };
3716 const char *sched_usage[] = {
3717 NULL,
3718 NULL
3719 };
3720 struct trace_sched_handler lat_ops = {
3721 .wakeup_event = latency_wakeup_event,
3722 .switch_event = latency_switch_event,
3723 .runtime_event = latency_runtime_event,
3724 .migrate_task_event = latency_migrate_task_event,
3725 };
3726 struct trace_sched_handler map_ops = {
3727 .switch_event = map_switch_event,
3728 };
3729 struct trace_sched_handler replay_ops = {
3730 .wakeup_event = replay_wakeup_event,
3731 .switch_event = replay_switch_event,
3732 .fork_event = replay_fork_event,
3733 };
3734 int ret;
3735
3736 argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands,
3737 sched_usage, PARSE_OPT_STOP_AT_NON_OPTION);
3738 if (!argc)
3739 usage_with_options(sched_usage, sched_options);
3740
3741 /*
3742 * Aliased to 'perf script' for now:
3743 */
3744 if (!strcmp(argv[0], "script")) {
3745 return cmd_script(argc, argv);
3746 } else if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
3747 return __cmd_record(argc, argv);
3748 } else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) {
3749 sched.tp_handler = &lat_ops;
3750 if (argc > 1) {
3751 argc = parse_options(argc, argv, latency_options, latency_usage, 0);
3752 if (argc)
3753 usage_with_options(latency_usage, latency_options);
3754 }
3755 setup_sorting(&sched, latency_options, latency_usage);
3756 return perf_sched__lat(&sched);
3757 } else if (!strcmp(argv[0], "map")) {
3758 if (argc) {
3759 argc = parse_options(argc, argv, map_options, map_usage, 0);
3760 if (argc)
3761 usage_with_options(map_usage, map_options);
3762
3763 if (sched.map.task_name) {
3764 sched.map.task_names = strlist__new(sched.map.task_name, NULL);
3765 if (sched.map.task_names == NULL) {
3766 fprintf(stderr, "Failed to parse task names\n");
3767 return -1;
3768 }
3769 }
3770 }
3771 sched.tp_handler = &map_ops;
3772 setup_sorting(&sched, latency_options, latency_usage);
3773 return perf_sched__map(&sched);
3774 } else if (strlen(argv[0]) > 2 && strstarts("replay", argv[0])) {
3775 sched.tp_handler = &replay_ops;
3776 if (argc) {
3777 argc = parse_options(argc, argv, replay_options, replay_usage, 0);
3778 if (argc)
3779 usage_with_options(replay_usage, replay_options);
3780 }
3781 return perf_sched__replay(&sched);
3782 } else if (!strcmp(argv[0], "timehist")) {
3783 if (argc) {
3784 argc = parse_options(argc, argv, timehist_options,
3785 timehist_usage, 0);
3786 if (argc)
3787 usage_with_options(timehist_usage, timehist_options);
3788 }
3789 if ((sched.show_wakeups || sched.show_next) &&
3790 sched.summary_only) {
3791 pr_err(" Error: -s and -[n|w] are mutually exclusive.\n");
3792 parse_options_usage(timehist_usage, timehist_options, "s", true);
3793 if (sched.show_wakeups)
3794 parse_options_usage(NULL, timehist_options, "w", true);
3795 if (sched.show_next)
3796 parse_options_usage(NULL, timehist_options, "n", true);
3797 return -EINVAL;
3798 }
3799 ret = symbol__validate_sym_arguments();
3800 if (ret)
3801 return ret;
3802
3803 return perf_sched__timehist(&sched);
3804 } else {
3805 usage_with_options(sched_usage, sched_options);
3806 }
3807
3808 return 0;
3809}