perf tools: Correct size given to memset
[deliverable/linux.git] / tools / perf / builtin-sched.c
CommitLineData
0a02ad93 1#include "builtin.h"
b1ffe8f3 2#include "perf.h"
0a02ad93
IM
3
4#include "util/util.h"
5#include "util/cache.h"
6#include "util/symbol.h"
7#include "util/thread.h"
8#include "util/header.h"
9
10#include "util/parse-options.h"
b1ffe8f3 11#include "util/trace-event.h"
0a02ad93 12
0a02ad93 13#include "util/debug.h"
016e92fb 14#include "util/data_map.h"
0a02ad93 15
b1ffe8f3 16#include <sys/prctl.h>
0a02ad93 17
b1ffe8f3
IM
18#include <semaphore.h>
19#include <pthread.h>
20#include <math.h>
419ab0d6 21
ec156764 22static char const *input_name = "perf.data";
0a02ad93 23
ec156764
IM
24static struct perf_header *header;
25static u64 sample_type;
0a02ad93 26
daa1d7a5
FW
27static char default_sort_order[] = "avg, max, switch, runtime";
28static char *sort_order = default_sort_order;
29
55ffb7a6
MG
30static int profile_cpu = -1;
31
b1ffe8f3
IM
32#define PR_SET_NAME 15 /* Set process name */
33#define MAX_CPUS 4096
0a02ad93 34
b1ffe8f3
IM
35static u64 run_measurement_overhead;
36static u64 sleep_measurement_overhead;
ec156764 37
b1ffe8f3
IM
38#define COMM_LEN 20
39#define SYM_LEN 129
ec156764 40
b1ffe8f3 41#define MAX_PID 65536
ec156764 42
b1ffe8f3 43static unsigned long nr_tasks;
ec156764 44
39aeb52f 45struct sched_atom;
ec156764 46
b1ffe8f3
IM
47struct task_desc {
48 unsigned long nr;
49 unsigned long pid;
50 char comm[COMM_LEN];
ec156764 51
b1ffe8f3
IM
52 unsigned long nr_events;
53 unsigned long curr_event;
39aeb52f 54 struct sched_atom **atoms;
b1ffe8f3
IM
55
56 pthread_t thread;
57 sem_t sleep_sem;
ec156764 58
b1ffe8f3
IM
59 sem_t ready_for_work;
60 sem_t work_done_sem;
61
62 u64 cpu_usage;
63};
64
65enum sched_event_type {
66 SCHED_EVENT_RUN,
67 SCHED_EVENT_SLEEP,
68 SCHED_EVENT_WAKEUP,
55ffb7a6 69 SCHED_EVENT_MIGRATION,
b1ffe8f3
IM
70};
71
39aeb52f 72struct sched_atom {
b1ffe8f3
IM
73 enum sched_event_type type;
74 u64 timestamp;
75 u64 duration;
76 unsigned long nr;
77 int specific_wait;
78 sem_t *wait_sem;
79 struct task_desc *wakee;
80};
81
82static struct task_desc *pid_to_task[MAX_PID];
83
84static struct task_desc **tasks;
85
86static pthread_mutex_t start_work_mutex = PTHREAD_MUTEX_INITIALIZER;
87static u64 start_time;
88
89static pthread_mutex_t work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER;
ec156764 90
b1ffe8f3
IM
91static unsigned long nr_run_events;
92static unsigned long nr_sleep_events;
93static unsigned long nr_wakeup_events;
94
95static unsigned long nr_sleep_corrections;
96static unsigned long nr_run_events_optimized;
97
98static unsigned long targetless_wakeups;
99static unsigned long multitarget_wakeups;
100
101static u64 cpu_usage;
102static u64 runavg_cpu_usage;
103static u64 parent_cpu_usage;
104static u64 runavg_parent_cpu_usage;
105
106static unsigned long nr_runs;
107static u64 sum_runtime;
108static u64 sum_fluct;
109static u64 run_avg;
110
111static unsigned long replay_repeat = 10;
ea57c4f5 112static unsigned long nr_timestamps;
dc02bf71
IM
113static unsigned long nr_unordered_timestamps;
114static unsigned long nr_state_machine_bugs;
c8a37751 115static unsigned long nr_context_switch_bugs;
dc02bf71
IM
116static unsigned long nr_events;
117static unsigned long nr_lost_chunks;
118static unsigned long nr_lost_events;
b1ffe8f3
IM
119
120#define TASK_STATE_TO_CHAR_STR "RSDTtZX"
121
122enum thread_state {
123 THREAD_SLEEPING = 0,
124 THREAD_WAIT_CPU,
125 THREAD_SCHED_IN,
126 THREAD_IGNORE
127};
128
129struct work_atom {
130 struct list_head list;
131 enum thread_state state;
aa1ab9d2 132 u64 sched_out_time;
b1ffe8f3
IM
133 u64 wake_up_time;
134 u64 sched_in_time;
135 u64 runtime;
136};
137
39aeb52f 138struct work_atoms {
139 struct list_head work_list;
b1ffe8f3
IM
140 struct thread *thread;
141 struct rb_node node;
142 u64 max_lat;
143 u64 total_lat;
144 u64 nb_atoms;
145 u64 total_runtime;
146};
147
39aeb52f 148typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
b1ffe8f3
IM
149
150static struct rb_root atom_root, sorted_atom_root;
151
152static u64 all_runtime;
153static u64 all_count;
154
b1ffe8f3
IM
155
156static u64 get_nsecs(void)
ec156764
IM
157{
158 struct timespec ts;
159
160 clock_gettime(CLOCK_MONOTONIC, &ts);
161
162 return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
163}
164
b1ffe8f3 165static void burn_nsecs(u64 nsecs)
ec156764 166{
b1ffe8f3 167 u64 T0 = get_nsecs(), T1;
ec156764
IM
168
169 do {
170 T1 = get_nsecs();
171 } while (T1 + run_measurement_overhead < T0 + nsecs);
172}
173
b1ffe8f3 174static void sleep_nsecs(u64 nsecs)
ec156764
IM
175{
176 struct timespec ts;
177
178 ts.tv_nsec = nsecs % 999999999;
179 ts.tv_sec = nsecs / 999999999;
180
181 nanosleep(&ts, NULL);
182}
183
184static void calibrate_run_measurement_overhead(void)
185{
b1ffe8f3 186 u64 T0, T1, delta, min_delta = 1000000000ULL;
ec156764
IM
187 int i;
188
189 for (i = 0; i < 10; i++) {
190 T0 = get_nsecs();
191 burn_nsecs(0);
192 T1 = get_nsecs();
193 delta = T1-T0;
194 min_delta = min(min_delta, delta);
195 }
196 run_measurement_overhead = min_delta;
197
ad236fd2 198 printf("run measurement overhead: %Ld nsecs\n", min_delta);
ec156764
IM
199}
200
201static void calibrate_sleep_measurement_overhead(void)
202{
b1ffe8f3 203 u64 T0, T1, delta, min_delta = 1000000000ULL;
ec156764
IM
204 int i;
205
206 for (i = 0; i < 10; i++) {
207 T0 = get_nsecs();
208 sleep_nsecs(10000);
209 T1 = get_nsecs();
210 delta = T1-T0;
211 min_delta = min(min_delta, delta);
212 }
213 min_delta -= 10000;
214 sleep_measurement_overhead = min_delta;
215
ad236fd2 216 printf("sleep measurement overhead: %Ld nsecs\n", min_delta);
ec156764
IM
217}
218
39aeb52f 219static struct sched_atom *
b1ffe8f3 220get_new_event(struct task_desc *task, u64 timestamp)
ec156764 221{
36479484 222 struct sched_atom *event = zalloc(sizeof(*event));
ec156764
IM
223 unsigned long idx = task->nr_events;
224 size_t size;
225
226 event->timestamp = timestamp;
227 event->nr = idx;
228
229 task->nr_events++;
39aeb52f 230 size = sizeof(struct sched_atom *) * task->nr_events;
231 task->atoms = realloc(task->atoms, size);
232 BUG_ON(!task->atoms);
ec156764 233
39aeb52f 234 task->atoms[idx] = event;
ec156764
IM
235
236 return event;
237}
238
39aeb52f 239static struct sched_atom *last_event(struct task_desc *task)
ec156764
IM
240{
241 if (!task->nr_events)
242 return NULL;
243
39aeb52f 244 return task->atoms[task->nr_events - 1];
ec156764
IM
245}
246
247static void
b1ffe8f3 248add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration)
ec156764 249{
39aeb52f 250 struct sched_atom *event, *curr_event = last_event(task);
ec156764
IM
251
252 /*
fbf94829
IM
253 * optimize an existing RUN event by merging this one
254 * to it:
255 */
ec156764
IM
256 if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
257 nr_run_events_optimized++;
258 curr_event->duration += duration;
259 return;
260 }
261
262 event = get_new_event(task, timestamp);
263
264 event->type = SCHED_EVENT_RUN;
265 event->duration = duration;
266
267 nr_run_events++;
268}
269
ec156764 270static void
b1ffe8f3 271add_sched_event_wakeup(struct task_desc *task, u64 timestamp,
ec156764
IM
272 struct task_desc *wakee)
273{
39aeb52f 274 struct sched_atom *event, *wakee_event;
ec156764
IM
275
276 event = get_new_event(task, timestamp);
277 event->type = SCHED_EVENT_WAKEUP;
278 event->wakee = wakee;
279
280 wakee_event = last_event(wakee);
281 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
282 targetless_wakeups++;
283 return;
284 }
285 if (wakee_event->wait_sem) {
286 multitarget_wakeups++;
287 return;
288 }
289
36479484 290 wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
ec156764
IM
291 sem_init(wakee_event->wait_sem, 0, 0);
292 wakee_event->specific_wait = 1;
293 event->wait_sem = wakee_event->wait_sem;
294
295 nr_wakeup_events++;
296}
297
298static void
b1ffe8f3 299add_sched_event_sleep(struct task_desc *task, u64 timestamp,
ad236fd2 300 u64 task_state __used)
ec156764 301{
39aeb52f 302 struct sched_atom *event = get_new_event(task, timestamp);
ec156764
IM
303
304 event->type = SCHED_EVENT_SLEEP;
305
306 nr_sleep_events++;
307}
308
309static struct task_desc *register_pid(unsigned long pid, const char *comm)
310{
311 struct task_desc *task;
312
313 BUG_ON(pid >= MAX_PID);
314
315 task = pid_to_task[pid];
316
317 if (task)
318 return task;
319
36479484 320 task = zalloc(sizeof(*task));
ec156764
IM
321 task->pid = pid;
322 task->nr = nr_tasks;
323 strcpy(task->comm, comm);
324 /*
325 * every task starts in sleeping state - this gets ignored
326 * if there's no wakeup pointing to this sleep state:
327 */
328 add_sched_event_sleep(task, 0, 0);
329
330 pid_to_task[pid] = task;
331 nr_tasks++;
332 tasks = realloc(tasks, nr_tasks*sizeof(struct task_task *));
333 BUG_ON(!tasks);
334 tasks[task->nr] = task;
335
ad236fd2
IM
336 if (verbose)
337 printf("registered task #%ld, PID %ld (%s)\n", nr_tasks, pid, comm);
ec156764
IM
338
339 return task;
340}
341
342
ec156764
IM
343static void print_task_traces(void)
344{
345 struct task_desc *task;
346 unsigned long i;
347
348 for (i = 0; i < nr_tasks; i++) {
349 task = tasks[i];
ad236fd2 350 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
ec156764
IM
351 task->nr, task->comm, task->pid, task->nr_events);
352 }
353}
354
355static void add_cross_task_wakeups(void)
356{
357 struct task_desc *task1, *task2;
358 unsigned long i, j;
359
360 for (i = 0; i < nr_tasks; i++) {
361 task1 = tasks[i];
362 j = i + 1;
363 if (j == nr_tasks)
364 j = 0;
365 task2 = tasks[j];
366 add_sched_event_wakeup(task1, 0, task2);
367 }
368}
369
370static void
39aeb52f 371process_sched_event(struct task_desc *this_task __used, struct sched_atom *atom)
ec156764
IM
372{
373 int ret = 0;
b1ffe8f3 374 u64 now;
ec156764
IM
375 long long delta;
376
377 now = get_nsecs();
39aeb52f 378 delta = start_time + atom->timestamp - now;
ec156764 379
39aeb52f 380 switch (atom->type) {
ec156764 381 case SCHED_EVENT_RUN:
39aeb52f 382 burn_nsecs(atom->duration);
ec156764
IM
383 break;
384 case SCHED_EVENT_SLEEP:
39aeb52f 385 if (atom->wait_sem)
386 ret = sem_wait(atom->wait_sem);
ec156764
IM
387 BUG_ON(ret);
388 break;
389 case SCHED_EVENT_WAKEUP:
39aeb52f 390 if (atom->wait_sem)
391 ret = sem_post(atom->wait_sem);
ec156764
IM
392 BUG_ON(ret);
393 break;
55ffb7a6
MG
394 case SCHED_EVENT_MIGRATION:
395 break;
ec156764
IM
396 default:
397 BUG_ON(1);
398 }
399}
400
b1ffe8f3 401static u64 get_cpu_usage_nsec_parent(void)
ec156764
IM
402{
403 struct rusage ru;
b1ffe8f3 404 u64 sum;
ec156764
IM
405 int err;
406
407 err = getrusage(RUSAGE_SELF, &ru);
408 BUG_ON(err);
409
410 sum = ru.ru_utime.tv_sec*1e9 + ru.ru_utime.tv_usec*1e3;
411 sum += ru.ru_stime.tv_sec*1e9 + ru.ru_stime.tv_usec*1e3;
412
413 return sum;
414}
415
c0c9e721 416static int self_open_counters(void)
ec156764 417{
c0c9e721
XG
418 struct perf_event_attr attr;
419 int fd;
ec156764 420
c0c9e721 421 memset(&attr, 0, sizeof(attr));
ec156764 422
c0c9e721
XG
423 attr.type = PERF_TYPE_SOFTWARE;
424 attr.config = PERF_COUNT_SW_TASK_CLOCK;
ec156764 425
c0c9e721
XG
426 fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
427
428 if (fd < 0)
429 die("Error: sys_perf_event_open() syscall returned"
430 "with %d (%s)\n", fd, strerror(errno));
431 return fd;
432}
433
434static u64 get_cpu_usage_nsec_self(int fd)
435{
436 u64 runtime;
437 int ret;
438
439 ret = read(fd, &runtime, sizeof(runtime));
440 BUG_ON(ret != sizeof(runtime));
441
442 return runtime;
ec156764
IM
443}
444
445static void *thread_func(void *ctx)
446{
447 struct task_desc *this_task = ctx;
b1ffe8f3 448 u64 cpu_usage_0, cpu_usage_1;
ec156764
IM
449 unsigned long i, ret;
450 char comm2[22];
c0c9e721 451 int fd;
ec156764 452
ec156764
IM
453 sprintf(comm2, ":%s", this_task->comm);
454 prctl(PR_SET_NAME, comm2);
c0c9e721 455 fd = self_open_counters();
ec156764
IM
456
457again:
458 ret = sem_post(&this_task->ready_for_work);
459 BUG_ON(ret);
ec156764
IM
460 ret = pthread_mutex_lock(&start_work_mutex);
461 BUG_ON(ret);
462 ret = pthread_mutex_unlock(&start_work_mutex);
463 BUG_ON(ret);
ec156764 464
c0c9e721 465 cpu_usage_0 = get_cpu_usage_nsec_self(fd);
ec156764
IM
466
467 for (i = 0; i < this_task->nr_events; i++) {
468 this_task->curr_event = i;
39aeb52f 469 process_sched_event(this_task, this_task->atoms[i]);
ec156764
IM
470 }
471
c0c9e721 472 cpu_usage_1 = get_cpu_usage_nsec_self(fd);
ec156764 473 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
ec156764
IM
474 ret = sem_post(&this_task->work_done_sem);
475 BUG_ON(ret);
ec156764
IM
476
477 ret = pthread_mutex_lock(&work_done_wait_mutex);
478 BUG_ON(ret);
479 ret = pthread_mutex_unlock(&work_done_wait_mutex);
480 BUG_ON(ret);
ec156764
IM
481
482 goto again;
483}
484
485static void create_tasks(void)
486{
487 struct task_desc *task;
488 pthread_attr_t attr;
489 unsigned long i;
490 int err;
491
492 err = pthread_attr_init(&attr);
493 BUG_ON(err);
494 err = pthread_attr_setstacksize(&attr, (size_t)(16*1024));
495 BUG_ON(err);
496 err = pthread_mutex_lock(&start_work_mutex);
497 BUG_ON(err);
498 err = pthread_mutex_lock(&work_done_wait_mutex);
499 BUG_ON(err);
500 for (i = 0; i < nr_tasks; i++) {
501 task = tasks[i];
502 sem_init(&task->sleep_sem, 0, 0);
503 sem_init(&task->ready_for_work, 0, 0);
504 sem_init(&task->work_done_sem, 0, 0);
505 task->curr_event = 0;
506 err = pthread_create(&task->thread, &attr, thread_func, task);
507 BUG_ON(err);
508 }
509}
510
ec156764
IM
511static void wait_for_tasks(void)
512{
b1ffe8f3 513 u64 cpu_usage_0, cpu_usage_1;
ec156764
IM
514 struct task_desc *task;
515 unsigned long i, ret;
516
ec156764 517 start_time = get_nsecs();
ec156764
IM
518 cpu_usage = 0;
519 pthread_mutex_unlock(&work_done_wait_mutex);
520
521 for (i = 0; i < nr_tasks; i++) {
522 task = tasks[i];
523 ret = sem_wait(&task->ready_for_work);
524 BUG_ON(ret);
525 sem_init(&task->ready_for_work, 0, 0);
526 }
527 ret = pthread_mutex_lock(&work_done_wait_mutex);
528 BUG_ON(ret);
529
530 cpu_usage_0 = get_cpu_usage_nsec_parent();
531
532 pthread_mutex_unlock(&start_work_mutex);
533
ec156764
IM
534 for (i = 0; i < nr_tasks; i++) {
535 task = tasks[i];
536 ret = sem_wait(&task->work_done_sem);
537 BUG_ON(ret);
538 sem_init(&task->work_done_sem, 0, 0);
539 cpu_usage += task->cpu_usage;
540 task->cpu_usage = 0;
541 }
542
543 cpu_usage_1 = get_cpu_usage_nsec_parent();
544 if (!runavg_cpu_usage)
545 runavg_cpu_usage = cpu_usage;
546 runavg_cpu_usage = (runavg_cpu_usage*9 + cpu_usage)/10;
547
548 parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
549 if (!runavg_parent_cpu_usage)
550 runavg_parent_cpu_usage = parent_cpu_usage;
551 runavg_parent_cpu_usage = (runavg_parent_cpu_usage*9 +
552 parent_cpu_usage)/10;
553
554 ret = pthread_mutex_lock(&start_work_mutex);
555 BUG_ON(ret);
556
557 for (i = 0; i < nr_tasks; i++) {
558 task = tasks[i];
559 sem_init(&task->sleep_sem, 0, 0);
560 task->curr_event = 0;
561 }
562}
563
ec156764
IM
564static void run_one_test(void)
565{
b1ffe8f3 566 u64 T0, T1, delta, avg_delta, fluct, std_dev;
ec156764
IM
567
568 T0 = get_nsecs();
569 wait_for_tasks();
570 T1 = get_nsecs();
571
572 delta = T1 - T0;
573 sum_runtime += delta;
574 nr_runs++;
575
576 avg_delta = sum_runtime / nr_runs;
577 if (delta < avg_delta)
578 fluct = avg_delta - delta;
579 else
580 fluct = delta - avg_delta;
581 sum_fluct += fluct;
582 std_dev = sum_fluct / nr_runs / sqrt(nr_runs);
583 if (!run_avg)
584 run_avg = delta;
585 run_avg = (run_avg*9 + delta)/10;
586
ad236fd2 587 printf("#%-3ld: %0.3f, ",
ec156764
IM
588 nr_runs, (double)delta/1000000.0);
589
ad236fd2 590 printf("ravg: %0.2f, ",
ec156764
IM
591 (double)run_avg/1e6);
592
ad236fd2 593 printf("cpu: %0.2f / %0.2f",
ec156764
IM
594 (double)cpu_usage/1e6, (double)runavg_cpu_usage/1e6);
595
596#if 0
597 /*
fbf94829
IM
598 * rusage statistics done by the parent, these are less
599 * accurate than the sum_exec_runtime based statistics:
600 */
ad236fd2 601 printf(" [%0.2f / %0.2f]",
ec156764
IM
602 (double)parent_cpu_usage/1e6,
603 (double)runavg_parent_cpu_usage/1e6);
604#endif
605
ad236fd2 606 printf("\n");
ec156764
IM
607
608 if (nr_sleep_corrections)
ad236fd2 609 printf(" (%ld sleep corrections)\n", nr_sleep_corrections);
ec156764
IM
610 nr_sleep_corrections = 0;
611}
612
613static void test_calibrations(void)
614{
b1ffe8f3 615 u64 T0, T1;
ec156764
IM
616
617 T0 = get_nsecs();
618 burn_nsecs(1e6);
619 T1 = get_nsecs();
620
ad236fd2 621 printf("the run test took %Ld nsecs\n", T1-T0);
ec156764
IM
622
623 T0 = get_nsecs();
624 sleep_nsecs(1e6);
625 T1 = get_nsecs();
626
ad236fd2 627 printf("the sleep test took %Ld nsecs\n", T1-T0);
ec156764
IM
628}
629
46538818
FW
630#define FILL_FIELD(ptr, field, event, data) \
631 ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data)
632
633#define FILL_ARRAY(ptr, array, event, data) \
634do { \
635 void *__array = raw_field_ptr(event, #array, data); \
636 memcpy(ptr.array, __array, sizeof(ptr.array)); \
637} while(0)
638
639#define FILL_COMMON_FIELDS(ptr, event, data) \
640do { \
641 FILL_FIELD(ptr, common_type, event, data); \
642 FILL_FIELD(ptr, common_flags, event, data); \
643 FILL_FIELD(ptr, common_preempt_count, event, data); \
644 FILL_FIELD(ptr, common_pid, event, data); \
645 FILL_FIELD(ptr, common_tgid, event, data); \
646} while (0)
647
419ab0d6
FW
648
649
650struct trace_switch_event {
651 u32 size;
652
653 u16 common_type;
654 u8 common_flags;
655 u8 common_preempt_count;
656 u32 common_pid;
657 u32 common_tgid;
658
659 char prev_comm[16];
660 u32 prev_pid;
661 u32 prev_prio;
662 u64 prev_state;
663 char next_comm[16];
664 u32 next_pid;
665 u32 next_prio;
666};
667
39aeb52f 668struct trace_runtime_event {
669 u32 size;
670
671 u16 common_type;
672 u8 common_flags;
673 u8 common_preempt_count;
674 u32 common_pid;
675 u32 common_tgid;
676
677 char comm[16];
678 u32 pid;
679 u64 runtime;
680 u64 vruntime;
681};
419ab0d6 682
fbf94829
IM
683struct trace_wakeup_event {
684 u32 size;
685
686 u16 common_type;
687 u8 common_flags;
688 u8 common_preempt_count;
689 u32 common_pid;
690 u32 common_tgid;
691
692 char comm[16];
693 u32 pid;
694
695 u32 prio;
696 u32 success;
697 u32 cpu;
698};
699
419ab0d6
FW
700struct trace_fork_event {
701 u32 size;
46538818 702
419ab0d6
FW
703 u16 common_type;
704 u8 common_flags;
705 u8 common_preempt_count;
706 u32 common_pid;
707 u32 common_tgid;
708
709 char parent_comm[16];
710 u32 parent_pid;
711 char child_comm[16];
712 u32 child_pid;
713};
714
55ffb7a6
MG
715struct trace_migrate_task_event {
716 u32 size;
717
718 u16 common_type;
719 u8 common_flags;
720 u8 common_preempt_count;
721 u32 common_pid;
722 u32 common_tgid;
723
724 char comm[16];
725 u32 pid;
726
727 u32 prio;
728 u32 cpu;
729};
730
419ab0d6
FW
731struct trace_sched_handler {
732 void (*switch_event)(struct trace_switch_event *,
733 struct event *,
734 int cpu,
735 u64 timestamp,
736 struct thread *thread);
737
39aeb52f 738 void (*runtime_event)(struct trace_runtime_event *,
739 struct event *,
740 int cpu,
741 u64 timestamp,
742 struct thread *thread);
743
419ab0d6
FW
744 void (*wakeup_event)(struct trace_wakeup_event *,
745 struct event *,
746 int cpu,
747 u64 timestamp,
748 struct thread *thread);
749
750 void (*fork_event)(struct trace_fork_event *,
751 struct event *,
752 int cpu,
753 u64 timestamp,
754 struct thread *thread);
55ffb7a6
MG
755
756 void (*migrate_task_event)(struct trace_migrate_task_event *,
757 struct event *,
758 int cpu,
759 u64 timestamp,
760 struct thread *thread);
419ab0d6 761};
46538818 762
46538818 763
419ab0d6
FW
764static void
765replay_wakeup_event(struct trace_wakeup_event *wakeup_event,
766 struct event *event,
767 int cpu __used,
768 u64 timestamp __used,
769 struct thread *thread __used)
770{
771 struct task_desc *waker, *wakee;
fbf94829 772
ad236fd2
IM
773 if (verbose) {
774 printf("sched_wakeup event %p\n", event);
fbf94829 775
ad236fd2 776 printf(" ... pid %d woke up %s/%d\n",
419ab0d6
FW
777 wakeup_event->common_pid,
778 wakeup_event->comm,
779 wakeup_event->pid);
ad236fd2 780 }
fbf94829 781
419ab0d6
FW
782 waker = register_pid(wakeup_event->common_pid, "<unknown>");
783 wakee = register_pid(wakeup_event->pid, wakeup_event->comm);
fbf94829
IM
784
785 add_sched_event_wakeup(waker, timestamp, wakee);
ec156764
IM
786}
787
d1153389 788static u64 cpu_last_switched[MAX_CPUS];
fbf94829
IM
789
790static void
419ab0d6
FW
791replay_switch_event(struct trace_switch_event *switch_event,
792 struct event *event,
793 int cpu,
794 u64 timestamp,
795 struct thread *thread __used)
ec156764 796{
fbf94829
IM
797 struct task_desc *prev, *next;
798 u64 timestamp0;
799 s64 delta;
800
ad236fd2
IM
801 if (verbose)
802 printf("sched_switch event %p\n", event);
803
fbf94829
IM
804 if (cpu >= MAX_CPUS || cpu < 0)
805 return;
806
807 timestamp0 = cpu_last_switched[cpu];
808 if (timestamp0)
809 delta = timestamp - timestamp0;
810 else
811 delta = 0;
812
813 if (delta < 0)
814 die("hm, delta: %Ld < 0 ?\n", delta);
815
ad236fd2
IM
816 if (verbose) {
817 printf(" ... switch from %s/%d to %s/%d [ran %Ld nsecs]\n",
419ab0d6
FW
818 switch_event->prev_comm, switch_event->prev_pid,
819 switch_event->next_comm, switch_event->next_pid,
ad236fd2
IM
820 delta);
821 }
fbf94829 822
419ab0d6
FW
823 prev = register_pid(switch_event->prev_pid, switch_event->prev_comm);
824 next = register_pid(switch_event->next_pid, switch_event->next_comm);
fbf94829
IM
825
826 cpu_last_switched[cpu] = timestamp;
827
828 add_sched_event_run(prev, timestamp, delta);
419ab0d6 829 add_sched_event_sleep(prev, timestamp, switch_event->prev_state);
fbf94829
IM
830}
831
fbf94829 832
419ab0d6
FW
833static void
834replay_fork_event(struct trace_fork_event *fork_event,
835 struct event *event,
836 int cpu __used,
837 u64 timestamp __used,
838 struct thread *thread __used)
839{
840 if (verbose) {
841 printf("sched_fork event %p\n", event);
842 printf("... parent: %s/%d\n", fork_event->parent_comm, fork_event->parent_pid);
843 printf("... child: %s/%d\n", fork_event->child_comm, fork_event->child_pid);
844 }
845 register_pid(fork_event->parent_pid, fork_event->parent_comm);
846 register_pid(fork_event->child_pid, fork_event->child_comm);
847}
fbf94829 848
419ab0d6 849static struct trace_sched_handler replay_ops = {
ea92ed5a
IM
850 .wakeup_event = replay_wakeup_event,
851 .switch_event = replay_switch_event,
852 .fork_event = replay_fork_event,
fbf94829
IM
853};
854
b1ffe8f3
IM
855struct sort_dimension {
856 const char *name;
b5fae128 857 sort_fn_t cmp;
b1ffe8f3
IM
858 struct list_head list;
859};
860
861static LIST_HEAD(cmp_pid);
862
daa1d7a5 863static int
39aeb52f 864thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
daa1d7a5
FW
865{
866 struct sort_dimension *sort;
867 int ret = 0;
868
b5fae128
IM
869 BUG_ON(list_empty(list));
870
daa1d7a5
FW
871 list_for_each_entry(sort, list, list) {
872 ret = sort->cmp(l, r);
873 if (ret)
874 return ret;
875 }
876
877 return ret;
878}
879
39aeb52f 880static struct work_atoms *
b5fae128
IM
881thread_atoms_search(struct rb_root *root, struct thread *thread,
882 struct list_head *sort_list)
883{
884 struct rb_node *node = root->rb_node;
39aeb52f 885 struct work_atoms key = { .thread = thread };
b5fae128
IM
886
887 while (node) {
39aeb52f 888 struct work_atoms *atoms;
b5fae128
IM
889 int cmp;
890
39aeb52f 891 atoms = container_of(node, struct work_atoms, node);
b5fae128
IM
892
893 cmp = thread_lat_cmp(sort_list, &key, atoms);
894 if (cmp > 0)
895 node = node->rb_left;
896 else if (cmp < 0)
897 node = node->rb_right;
898 else {
899 BUG_ON(thread != atoms->thread);
900 return atoms;
901 }
902 }
903 return NULL;
904}
905
cdce9d73 906static void
39aeb52f 907__thread_latency_insert(struct rb_root *root, struct work_atoms *data,
daa1d7a5 908 struct list_head *sort_list)
cdce9d73
FW
909{
910 struct rb_node **new = &(root->rb_node), *parent = NULL;
911
912 while (*new) {
39aeb52f 913 struct work_atoms *this;
daa1d7a5 914 int cmp;
cdce9d73 915
39aeb52f 916 this = container_of(*new, struct work_atoms, node);
cdce9d73 917 parent = *new;
daa1d7a5
FW
918
919 cmp = thread_lat_cmp(sort_list, data, this);
920
921 if (cmp > 0)
cdce9d73 922 new = &((*new)->rb_left);
cdce9d73 923 else
daa1d7a5 924 new = &((*new)->rb_right);
cdce9d73
FW
925 }
926
927 rb_link_node(&data->node, parent, new);
928 rb_insert_color(&data->node, root);
929}
930
b1ffe8f3 931static void thread_atoms_insert(struct thread *thread)
cdce9d73 932{
36479484 933 struct work_atoms *atoms = zalloc(sizeof(*atoms));
17562205 934 if (!atoms)
cdce9d73
FW
935 die("No memory");
936
17562205 937 atoms->thread = thread;
39aeb52f 938 INIT_LIST_HEAD(&atoms->work_list);
b1ffe8f3 939 __thread_latency_insert(&atom_root, atoms, &cmp_pid);
cdce9d73
FW
940}
941
942static void
943latency_fork_event(struct trace_fork_event *fork_event __used,
944 struct event *event __used,
945 int cpu __used,
946 u64 timestamp __used,
947 struct thread *thread __used)
948{
949 /* should insert the newcomer */
950}
951
ea92ed5a 952__used
cdce9d73
FW
953static char sched_out_state(struct trace_switch_event *switch_event)
954{
955 const char *str = TASK_STATE_TO_CHAR_STR;
956
957 return str[switch_event->prev_state];
958}
959
960static void
39aeb52f 961add_sched_out_event(struct work_atoms *atoms,
962 char run_state,
963 u64 timestamp)
cdce9d73 964{
36479484 965 struct work_atom *atom = zalloc(sizeof(*atom));
b1ffe8f3 966 if (!atom)
cdce9d73
FW
967 die("Non memory");
968
aa1ab9d2
FW
969 atom->sched_out_time = timestamp;
970
39aeb52f 971 if (run_state == 'R') {
b1ffe8f3 972 atom->state = THREAD_WAIT_CPU;
aa1ab9d2 973 atom->wake_up_time = atom->sched_out_time;
c6ced611
FW
974 }
975
39aeb52f 976 list_add_tail(&atom->list, &atoms->work_list);
cdce9d73
FW
977}
978
979static void
39aeb52f 980add_runtime_event(struct work_atoms *atoms, u64 delta, u64 timestamp __used)
981{
982 struct work_atom *atom;
983
984 BUG_ON(list_empty(&atoms->work_list));
985
986 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
987
988 atom->runtime += delta;
989 atoms->total_runtime += delta;
990}
991
992static void
993add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
cdce9d73 994{
b1ffe8f3 995 struct work_atom *atom;
66685678 996 u64 delta;
cdce9d73 997
39aeb52f 998 if (list_empty(&atoms->work_list))
cdce9d73
FW
999 return;
1000
39aeb52f 1001 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
cdce9d73 1002
b1ffe8f3 1003 if (atom->state != THREAD_WAIT_CPU)
cdce9d73
FW
1004 return;
1005
b1ffe8f3
IM
1006 if (timestamp < atom->wake_up_time) {
1007 atom->state = THREAD_IGNORE;
cdce9d73
FW
1008 return;
1009 }
1010
b1ffe8f3
IM
1011 atom->state = THREAD_SCHED_IN;
1012 atom->sched_in_time = timestamp;
66685678 1013
b1ffe8f3 1014 delta = atom->sched_in_time - atom->wake_up_time;
66685678
FW
1015 atoms->total_lat += delta;
1016 if (delta > atoms->max_lat)
1017 atoms->max_lat = delta;
1018 atoms->nb_atoms++;
cdce9d73
FW
1019}
1020
cdce9d73
FW
1021static void
1022latency_switch_event(struct trace_switch_event *switch_event,
1023 struct event *event __used,
ea92ed5a 1024 int cpu,
cdce9d73
FW
1025 u64 timestamp,
1026 struct thread *thread __used)
1027{
39aeb52f 1028 struct work_atoms *out_events, *in_events;
cdce9d73 1029 struct thread *sched_out, *sched_in;
ea92ed5a
IM
1030 u64 timestamp0;
1031 s64 delta;
1032
39aeb52f 1033 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
ea92ed5a
IM
1034
1035 timestamp0 = cpu_last_switched[cpu];
1036 cpu_last_switched[cpu] = timestamp;
1037 if (timestamp0)
1038 delta = timestamp - timestamp0;
1039 else
1040 delta = 0;
1041
1042 if (delta < 0)
1043 die("hm, delta: %Ld < 0 ?\n", delta);
1044
cdce9d73 1045
d5b889f2
ACM
1046 sched_out = threads__findnew(switch_event->prev_pid);
1047 sched_in = threads__findnew(switch_event->next_pid);
cdce9d73 1048
39aeb52f 1049 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1050 if (!out_events) {
1051 thread_atoms_insert(sched_out);
1052 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1053 if (!out_events)
1054 die("out-event: Internal tree error");
1055 }
1056 add_sched_out_event(out_events, sched_out_state(switch_event), timestamp);
1057
1058 in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1059 if (!in_events) {
b1ffe8f3 1060 thread_atoms_insert(sched_in);
39aeb52f 1061 in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1062 if (!in_events)
1063 die("in-event: Internal tree error");
1064 /*
1065 * Take came in we have not heard about yet,
1066 * add in an initial atom in runnable state:
1067 */
1068 add_sched_out_event(in_events, 'R', timestamp);
cdce9d73 1069 }
39aeb52f 1070 add_sched_in_event(in_events, timestamp);
1071}
cdce9d73 1072
39aeb52f 1073static void
1074latency_runtime_event(struct trace_runtime_event *runtime_event,
1075 struct event *event __used,
1076 int cpu,
1077 u64 timestamp,
1078 struct thread *this_thread __used)
1079{
d5b889f2
ACM
1080 struct thread *thread = threads__findnew(runtime_event->pid);
1081 struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
39aeb52f 1082
1083 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
39aeb52f 1084 if (!atoms) {
1085 thread_atoms_insert(thread);
1086 atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1087 if (!atoms)
1088 die("in-event: Internal tree error");
1089 add_sched_out_event(atoms, 'R', timestamp);
cdce9d73
FW
1090 }
1091
39aeb52f 1092 add_runtime_event(atoms, runtime_event->runtime, timestamp);
cdce9d73
FW
1093}
1094
1095static void
1096latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
39aeb52f 1097 struct event *__event __used,
cdce9d73
FW
1098 int cpu __used,
1099 u64 timestamp,
1100 struct thread *thread __used)
1101{
39aeb52f 1102 struct work_atoms *atoms;
b1ffe8f3 1103 struct work_atom *atom;
cdce9d73
FW
1104 struct thread *wakee;
1105
1106 /* Note for later, it may be interesting to observe the failing cases */
1107 if (!wakeup_event->success)
1108 return;
1109
d5b889f2 1110 wakee = threads__findnew(wakeup_event->pid);
b5fae128 1111 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
17562205 1112 if (!atoms) {
b1ffe8f3 1113 thread_atoms_insert(wakee);
39aeb52f 1114 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1115 if (!atoms)
1116 die("wakeup-event: Internal tree error");
1117 add_sched_out_event(atoms, 'S', timestamp);
cdce9d73
FW
1118 }
1119
39aeb52f 1120 BUG_ON(list_empty(&atoms->work_list));
cdce9d73 1121
39aeb52f 1122 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
cdce9d73 1123
55ffb7a6
MG
1124 /*
1125 * You WILL be missing events if you've recorded only
1126 * one CPU, or are only looking at only one, so don't
1127 * make useless noise.
1128 */
1129 if (profile_cpu == -1 && atom->state != THREAD_SLEEPING)
dc02bf71 1130 nr_state_machine_bugs++;
cdce9d73 1131
ea57c4f5
IM
1132 nr_timestamps++;
1133 if (atom->sched_out_time > timestamp) {
dc02bf71 1134 nr_unordered_timestamps++;
aa1ab9d2 1135 return;
ea57c4f5 1136 }
aa1ab9d2 1137
b1ffe8f3
IM
1138 atom->state = THREAD_WAIT_CPU;
1139 atom->wake_up_time = timestamp;
cdce9d73
FW
1140}
1141
55ffb7a6
MG
1142static void
1143latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
1144 struct event *__event __used,
1145 int cpu __used,
1146 u64 timestamp,
1147 struct thread *thread __used)
1148{
1149 struct work_atoms *atoms;
1150 struct work_atom *atom;
1151 struct thread *migrant;
1152
1153 /*
1154 * Only need to worry about migration when profiling one CPU.
1155 */
1156 if (profile_cpu == -1)
1157 return;
1158
d5b889f2 1159 migrant = threads__findnew(migrate_task_event->pid);
55ffb7a6
MG
1160 atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
1161 if (!atoms) {
1162 thread_atoms_insert(migrant);
1163 register_pid(migrant->pid, migrant->comm);
1164 atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
1165 if (!atoms)
1166 die("migration-event: Internal tree error");
1167 add_sched_out_event(atoms, 'R', timestamp);
1168 }
1169
1170 BUG_ON(list_empty(&atoms->work_list));
1171
1172 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1173 atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1174
1175 nr_timestamps++;
1176
1177 if (atom->sched_out_time > timestamp)
1178 nr_unordered_timestamps++;
1179}
1180
cdce9d73 1181static struct trace_sched_handler lat_ops = {
ea92ed5a
IM
1182 .wakeup_event = latency_wakeup_event,
1183 .switch_event = latency_switch_event,
39aeb52f 1184 .runtime_event = latency_runtime_event,
ea92ed5a 1185 .fork_event = latency_fork_event,
55ffb7a6 1186 .migrate_task_event = latency_migrate_task_event,
cdce9d73
FW
1187};
1188
39aeb52f 1189static void output_lat_thread(struct work_atoms *work_list)
cdce9d73 1190{
cdce9d73
FW
1191 int i;
1192 int ret;
66685678 1193 u64 avg;
cdce9d73 1194
39aeb52f 1195 if (!work_list->nb_atoms)
cdce9d73 1196 return;
ea57c4f5
IM
1197 /*
1198 * Ignore idle threads:
1199 */
80ed0987 1200 if (!strcmp(work_list->thread->comm, "swapper"))
ea57c4f5 1201 return;
cdce9d73 1202
39aeb52f 1203 all_runtime += work_list->total_runtime;
1204 all_count += work_list->nb_atoms;
66685678 1205
80ed0987 1206 ret = printf(" %s:%d ", work_list->thread->comm, work_list->thread->pid);
cdce9d73 1207
08f69e6c 1208 for (i = 0; i < 24 - ret; i++)
cdce9d73
FW
1209 printf(" ");
1210
39aeb52f 1211 avg = work_list->total_lat / work_list->nb_atoms;
cdce9d73 1212
dc02bf71 1213 printf("|%11.3f ms |%9llu | avg:%9.3f ms | max:%9.3f ms |\n",
39aeb52f 1214 (double)work_list->total_runtime / 1e6,
1215 work_list->nb_atoms, (double)avg / 1e6,
1216 (double)work_list->max_lat / 1e6);
cdce9d73
FW
1217}
1218
39aeb52f 1219static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
daa1d7a5 1220{
daa1d7a5
FW
1221 if (l->thread->pid < r->thread->pid)
1222 return -1;
1223 if (l->thread->pid > r->thread->pid)
1224 return 1;
1225
1226 return 0;
1227}
1228
1229static struct sort_dimension pid_sort_dimension = {
b5fae128
IM
1230 .name = "pid",
1231 .cmp = pid_cmp,
daa1d7a5
FW
1232};
1233
39aeb52f 1234static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
daa1d7a5
FW
1235{
1236 u64 avgl, avgr;
1237
1238 if (!l->nb_atoms)
1239 return -1;
1240
1241 if (!r->nb_atoms)
1242 return 1;
1243
1244 avgl = l->total_lat / l->nb_atoms;
1245 avgr = r->total_lat / r->nb_atoms;
1246
1247 if (avgl < avgr)
1248 return -1;
1249 if (avgl > avgr)
1250 return 1;
1251
1252 return 0;
1253}
1254
1255static struct sort_dimension avg_sort_dimension = {
b5fae128
IM
1256 .name = "avg",
1257 .cmp = avg_cmp,
daa1d7a5
FW
1258};
1259
39aeb52f 1260static int max_cmp(struct work_atoms *l, struct work_atoms *r)
daa1d7a5
FW
1261{
1262 if (l->max_lat < r->max_lat)
1263 return -1;
1264 if (l->max_lat > r->max_lat)
1265 return 1;
1266
1267 return 0;
1268}
1269
1270static struct sort_dimension max_sort_dimension = {
b5fae128
IM
1271 .name = "max",
1272 .cmp = max_cmp,
daa1d7a5
FW
1273};
1274
39aeb52f 1275static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
daa1d7a5
FW
1276{
1277 if (l->nb_atoms < r->nb_atoms)
1278 return -1;
1279 if (l->nb_atoms > r->nb_atoms)
1280 return 1;
1281
1282 return 0;
1283}
1284
1285static struct sort_dimension switch_sort_dimension = {
b5fae128
IM
1286 .name = "switch",
1287 .cmp = switch_cmp,
daa1d7a5
FW
1288};
1289
39aeb52f 1290static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
daa1d7a5
FW
1291{
1292 if (l->total_runtime < r->total_runtime)
1293 return -1;
1294 if (l->total_runtime > r->total_runtime)
1295 return 1;
1296
1297 return 0;
1298}
1299
1300static struct sort_dimension runtime_sort_dimension = {
b5fae128
IM
1301 .name = "runtime",
1302 .cmp = runtime_cmp,
daa1d7a5
FW
1303};
1304
1305static struct sort_dimension *available_sorts[] = {
1306 &pid_sort_dimension,
1307 &avg_sort_dimension,
1308 &max_sort_dimension,
1309 &switch_sort_dimension,
1310 &runtime_sort_dimension,
1311};
1312
1313#define NB_AVAILABLE_SORTS (int)(sizeof(available_sorts) / sizeof(struct sort_dimension *))
1314
1315static LIST_HEAD(sort_list);
1316
cbef79a8 1317static int sort_dimension__add(const char *tok, struct list_head *list)
daa1d7a5
FW
1318{
1319 int i;
1320
1321 for (i = 0; i < NB_AVAILABLE_SORTS; i++) {
1322 if (!strcmp(available_sorts[i]->name, tok)) {
1323 list_add_tail(&available_sorts[i]->list, list);
1324
1325 return 0;
1326 }
1327 }
1328
1329 return -1;
1330}
1331
1332static void setup_sorting(void);
1333
1334static void sort_lat(void)
1335{
1336 struct rb_node *node;
1337
1338 for (;;) {
39aeb52f 1339 struct work_atoms *data;
b1ffe8f3 1340 node = rb_first(&atom_root);
daa1d7a5
FW
1341 if (!node)
1342 break;
1343
b1ffe8f3 1344 rb_erase(node, &atom_root);
39aeb52f 1345 data = rb_entry(node, struct work_atoms, node);
b1ffe8f3 1346 __thread_latency_insert(&sorted_atom_root, data, &sort_list);
daa1d7a5
FW
1347 }
1348}
1349
419ab0d6
FW
1350static struct trace_sched_handler *trace_handler;
1351
fbf94829 1352static void
f48f669d 1353process_sched_wakeup_event(void *data,
419ab0d6
FW
1354 struct event *event,
1355 int cpu __used,
1356 u64 timestamp __used,
1357 struct thread *thread __used)
1358{
1359 struct trace_wakeup_event wakeup_event;
1360
f48f669d 1361 FILL_COMMON_FIELDS(wakeup_event, event, data);
419ab0d6 1362
f48f669d
XG
1363 FILL_ARRAY(wakeup_event, comm, event, data);
1364 FILL_FIELD(wakeup_event, pid, event, data);
1365 FILL_FIELD(wakeup_event, prio, event, data);
1366 FILL_FIELD(wakeup_event, success, event, data);
1367 FILL_FIELD(wakeup_event, cpu, event, data);
419ab0d6 1368
0ec04e16
IM
1369 if (trace_handler->wakeup_event)
1370 trace_handler->wakeup_event(&wakeup_event, event, cpu, timestamp, thread);
419ab0d6
FW
1371}
1372
c8a37751
IM
1373/*
1374 * Track the current task - that way we can know whether there's any
1375 * weird events, such as a task being switched away that is not current.
1376 */
40749d0f 1377static int max_cpu;
0ec04e16 1378
c8a37751
IM
1379static u32 curr_pid[MAX_CPUS] = { [0 ... MAX_CPUS-1] = -1 };
1380
0ec04e16
IM
1381static struct thread *curr_thread[MAX_CPUS];
1382
1383static char next_shortname1 = 'A';
1384static char next_shortname2 = '0';
1385
1386static void
1387map_switch_event(struct trace_switch_event *switch_event,
1388 struct event *event __used,
1389 int this_cpu,
1390 u64 timestamp,
1391 struct thread *thread __used)
1392{
1393 struct thread *sched_out, *sched_in;
1394 int new_shortname;
1395 u64 timestamp0;
1396 s64 delta;
1397 int cpu;
1398
1399 BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
1400
1401 if (this_cpu > max_cpu)
1402 max_cpu = this_cpu;
1403
1404 timestamp0 = cpu_last_switched[this_cpu];
1405 cpu_last_switched[this_cpu] = timestamp;
1406 if (timestamp0)
1407 delta = timestamp - timestamp0;
1408 else
1409 delta = 0;
1410
1411 if (delta < 0)
1412 die("hm, delta: %Ld < 0 ?\n", delta);
1413
1414
d5b889f2
ACM
1415 sched_out = threads__findnew(switch_event->prev_pid);
1416 sched_in = threads__findnew(switch_event->next_pid);
0ec04e16
IM
1417
1418 curr_thread[this_cpu] = sched_in;
1419
1420 printf(" ");
1421
1422 new_shortname = 0;
1423 if (!sched_in->shortname[0]) {
1424 sched_in->shortname[0] = next_shortname1;
1425 sched_in->shortname[1] = next_shortname2;
1426
1427 if (next_shortname1 < 'Z') {
1428 next_shortname1++;
1429 } else {
1430 next_shortname1='A';
1431 if (next_shortname2 < '9') {
1432 next_shortname2++;
1433 } else {
1434 next_shortname2='0';
1435 }
1436 }
1437 new_shortname = 1;
1438 }
1439
1440 for (cpu = 0; cpu <= max_cpu; cpu++) {
1441 if (cpu != this_cpu)
1442 printf(" ");
1443 else
1444 printf("*");
1445
1446 if (curr_thread[cpu]) {
1447 if (curr_thread[cpu]->pid)
1448 printf("%2s ", curr_thread[cpu]->shortname);
1449 else
1450 printf(". ");
1451 } else
1452 printf(" ");
1453 }
1454
1455 printf(" %12.6f secs ", (double)timestamp/1e9);
1456 if (new_shortname) {
1457 printf("%s => %s:%d\n",
1458 sched_in->shortname, sched_in->comm, sched_in->pid);
1459 } else {
1460 printf("\n");
1461 }
1462}
1463
1464
419ab0d6 1465static void
f48f669d 1466process_sched_switch_event(void *data,
419ab0d6 1467 struct event *event,
0ec04e16 1468 int this_cpu,
419ab0d6
FW
1469 u64 timestamp __used,
1470 struct thread *thread __used)
1471{
1472 struct trace_switch_event switch_event;
1473
f48f669d 1474 FILL_COMMON_FIELDS(switch_event, event, data);
419ab0d6 1475
f48f669d
XG
1476 FILL_ARRAY(switch_event, prev_comm, event, data);
1477 FILL_FIELD(switch_event, prev_pid, event, data);
1478 FILL_FIELD(switch_event, prev_prio, event, data);
1479 FILL_FIELD(switch_event, prev_state, event, data);
1480 FILL_ARRAY(switch_event, next_comm, event, data);
1481 FILL_FIELD(switch_event, next_pid, event, data);
1482 FILL_FIELD(switch_event, next_prio, event, data);
419ab0d6 1483
0ec04e16 1484 if (curr_pid[this_cpu] != (u32)-1) {
c8a37751
IM
1485 /*
1486 * Are we trying to switch away a PID that is
1487 * not current?
1488 */
0ec04e16 1489 if (curr_pid[this_cpu] != switch_event.prev_pid)
c8a37751
IM
1490 nr_context_switch_bugs++;
1491 }
0ec04e16
IM
1492 if (trace_handler->switch_event)
1493 trace_handler->switch_event(&switch_event, event, this_cpu, timestamp, thread);
c8a37751 1494
0ec04e16 1495 curr_pid[this_cpu] = switch_event.next_pid;
419ab0d6
FW
1496}
1497
39aeb52f 1498static void
f48f669d 1499process_sched_runtime_event(void *data,
39aeb52f 1500 struct event *event,
1501 int cpu __used,
1502 u64 timestamp __used,
1503 struct thread *thread __used)
1504{
1505 struct trace_runtime_event runtime_event;
1506
f48f669d
XG
1507 FILL_ARRAY(runtime_event, comm, event, data);
1508 FILL_FIELD(runtime_event, pid, event, data);
1509 FILL_FIELD(runtime_event, runtime, event, data);
1510 FILL_FIELD(runtime_event, vruntime, event, data);
39aeb52f 1511
0ec04e16
IM
1512 if (trace_handler->runtime_event)
1513 trace_handler->runtime_event(&runtime_event, event, cpu, timestamp, thread);
39aeb52f 1514}
1515
419ab0d6 1516static void
f48f669d 1517process_sched_fork_event(void *data,
419ab0d6
FW
1518 struct event *event,
1519 int cpu __used,
1520 u64 timestamp __used,
1521 struct thread *thread __used)
fbf94829 1522{
46538818
FW
1523 struct trace_fork_event fork_event;
1524
f48f669d 1525 FILL_COMMON_FIELDS(fork_event, event, data);
46538818 1526
f48f669d
XG
1527 FILL_ARRAY(fork_event, parent_comm, event, data);
1528 FILL_FIELD(fork_event, parent_pid, event, data);
1529 FILL_ARRAY(fork_event, child_comm, event, data);
1530 FILL_FIELD(fork_event, child_pid, event, data);
46538818 1531
0ec04e16
IM
1532 if (trace_handler->fork_event)
1533 trace_handler->fork_event(&fork_event, event, cpu, timestamp, thread);
fbf94829
IM
1534}
1535
419ab0d6
FW
1536static void
1537process_sched_exit_event(struct event *event,
1538 int cpu __used,
1539 u64 timestamp __used,
1540 struct thread *thread __used)
fbf94829 1541{
ad236fd2
IM
1542 if (verbose)
1543 printf("sched_exit event %p\n", event);
ec156764
IM
1544}
1545
55ffb7a6 1546static void
f48f669d 1547process_sched_migrate_task_event(void *data,
55ffb7a6
MG
1548 struct event *event,
1549 int cpu __used,
1550 u64 timestamp __used,
1551 struct thread *thread __used)
1552{
1553 struct trace_migrate_task_event migrate_task_event;
1554
f48f669d 1555 FILL_COMMON_FIELDS(migrate_task_event, event, data);
55ffb7a6 1556
f48f669d
XG
1557 FILL_ARRAY(migrate_task_event, comm, event, data);
1558 FILL_FIELD(migrate_task_event, pid, event, data);
1559 FILL_FIELD(migrate_task_event, prio, event, data);
1560 FILL_FIELD(migrate_task_event, cpu, event, data);
55ffb7a6
MG
1561
1562 if (trace_handler->migrate_task_event)
1563 trace_handler->migrate_task_event(&migrate_task_event, event, cpu, timestamp, thread);
1564}
1565
ec156764 1566static void
f48f669d 1567process_raw_event(event_t *raw_event __used, void *data,
ec156764
IM
1568 int cpu, u64 timestamp, struct thread *thread)
1569{
ec156764
IM
1570 struct event *event;
1571 int type;
1572
d8bd9e0a 1573
f48f669d 1574 type = trace_parse_common_type(data);
ec156764
IM
1575 event = trace_find_event(type);
1576
ec156764 1577 if (!strcmp(event->name, "sched_switch"))
f48f669d 1578 process_sched_switch_event(data, event, cpu, timestamp, thread);
39aeb52f 1579 if (!strcmp(event->name, "sched_stat_runtime"))
f48f669d 1580 process_sched_runtime_event(data, event, cpu, timestamp, thread);
ec156764 1581 if (!strcmp(event->name, "sched_wakeup"))
f48f669d 1582 process_sched_wakeup_event(data, event, cpu, timestamp, thread);
fbf94829 1583 if (!strcmp(event->name, "sched_wakeup_new"))
f48f669d 1584 process_sched_wakeup_event(data, event, cpu, timestamp, thread);
fbf94829 1585 if (!strcmp(event->name, "sched_process_fork"))
f48f669d 1586 process_sched_fork_event(data, event, cpu, timestamp, thread);
fbf94829
IM
1587 if (!strcmp(event->name, "sched_process_exit"))
1588 process_sched_exit_event(event, cpu, timestamp, thread);
55ffb7a6 1589 if (!strcmp(event->name, "sched_migrate_task"))
f48f669d 1590 process_sched_migrate_task_event(data, event, cpu, timestamp, thread);
ec156764
IM
1591}
1592
62daacb5 1593static int process_sample_event(event_t *event)
0a02ad93 1594{
180f95e2 1595 struct sample_data data;
0a02ad93 1596 struct thread *thread;
a80deb62
ACM
1597
1598 if (!(sample_type & PERF_SAMPLE_RAW))
1599 return 0;
0a02ad93 1600
180f95e2
OH
1601 memset(&data, 0, sizeof(data));
1602 data.time = -1;
1603 data.cpu = -1;
1604 data.period = -1;
0a02ad93 1605
180f95e2 1606 event__parse_sample(event, sample_type, &data);
0a02ad93 1607
62daacb5 1608 dump_printf("(IP, %d): %d/%d: %p period: %Ld\n",
0a02ad93 1609 event->header.misc,
180f95e2
OH
1610 data.pid, data.tid,
1611 (void *)(long)data.ip,
1612 (long long)data.period);
0a02ad93 1613
180f95e2 1614 thread = threads__findnew(data.pid);
0a02ad93 1615 if (thread == NULL) {
6beba7ad
ACM
1616 pr_debug("problem processing %d event, skipping it.\n",
1617 event->header.type);
0a02ad93
IM
1618 return -1;
1619 }
1620
f39cdf25
JL
1621 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
1622
180f95e2 1623 if (profile_cpu != -1 && profile_cpu != (int)data.cpu)
55ffb7a6
MG
1624 return 0;
1625
f48f669d 1626 process_raw_event(event, data.raw_data, data.cpu, data.time, thread);
0a02ad93
IM
1627
1628 return 0;
1629}
1630
62daacb5 1631static int process_lost_event(event_t *event __used)
0a02ad93 1632{
016e92fb
FW
1633 nr_lost_chunks++;
1634 nr_lost_events += event->lost.lost;
0a02ad93 1635
016e92fb
FW
1636 return 0;
1637}
0a02ad93 1638
016e92fb
FW
1639static int sample_type_check(u64 type)
1640{
1641 sample_type = type;
0a02ad93 1642
016e92fb
FW
1643 if (!(sample_type & PERF_SAMPLE_RAW)) {
1644 fprintf(stderr,
1645 "No trace sample to read. Did you call perf record "
1646 "without -R?");
0a02ad93
IM
1647 return -1;
1648 }
1649
1650 return 0;
1651}
1652
016e92fb
FW
1653static struct perf_file_handler file_handler = {
1654 .process_sample_event = process_sample_event,
62daacb5 1655 .process_comm_event = event__process_comm,
016e92fb
FW
1656 .process_lost_event = process_lost_event,
1657 .sample_type_check = sample_type_check,
1658};
1659
46f392c9 1660static int read_events(void)
0a02ad93 1661{
d5b889f2 1662 register_idle_thread();
016e92fb 1663 register_perf_file_handler(&file_handler);
0a02ad93 1664
b32d133a 1665 return mmap_dispatch_perf_file(&header, input_name, 0, 0,
62daacb5 1666 &event__cwdlen, &event__cwd);
0a02ad93
IM
1667}
1668
0ec04e16
IM
1669static void print_bad_events(void)
1670{
1671 if (nr_unordered_timestamps && nr_timestamps) {
1672 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
1673 (double)nr_unordered_timestamps/(double)nr_timestamps*100.0,
1674 nr_unordered_timestamps, nr_timestamps);
1675 }
1676 if (nr_lost_events && nr_events) {
1677 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1678 (double)nr_lost_events/(double)nr_events*100.0,
1679 nr_lost_events, nr_events, nr_lost_chunks);
1680 }
1681 if (nr_state_machine_bugs && nr_timestamps) {
1682 printf(" INFO: %.3f%% state machine bugs (%ld out of %ld)",
1683 (double)nr_state_machine_bugs/(double)nr_timestamps*100.0,
1684 nr_state_machine_bugs, nr_timestamps);
1685 if (nr_lost_events)
1686 printf(" (due to lost events?)");
1687 printf("\n");
1688 }
1689 if (nr_context_switch_bugs && nr_timestamps) {
1690 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
1691 (double)nr_context_switch_bugs/(double)nr_timestamps*100.0,
1692 nr_context_switch_bugs, nr_timestamps);
1693 if (nr_lost_events)
1694 printf(" (due to lost events?)");
1695 printf("\n");
1696 }
1697}
1698
1699static void __cmd_lat(void)
1700{
1701 struct rb_node *next;
1702
1703 setup_pager();
1704 read_events();
1705 sort_lat();
1706
1707 printf("\n -----------------------------------------------------------------------------------------\n");
1708 printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms |\n");
1709 printf(" -----------------------------------------------------------------------------------------\n");
1710
1711 next = rb_first(&sorted_atom_root);
1712
1713 while (next) {
1714 struct work_atoms *work_list;
1715
1716 work_list = rb_entry(next, struct work_atoms, node);
1717 output_lat_thread(work_list);
1718 next = rb_next(next);
1719 }
1720
1721 printf(" -----------------------------------------------------------------------------------------\n");
1722 printf(" TOTAL: |%11.3f ms |%9Ld |\n",
1723 (double)all_runtime/1e6, all_count);
1724
1725 printf(" ---------------------------------------------------\n");
1726
1727 print_bad_events();
1728 printf("\n");
1729
1730}
1731
1732static struct trace_sched_handler map_ops = {
1733 .wakeup_event = NULL,
1734 .switch_event = map_switch_event,
1735 .runtime_event = NULL,
1736 .fork_event = NULL,
1737};
1738
1739static void __cmd_map(void)
1740{
40749d0f
IM
1741 max_cpu = sysconf(_SC_NPROCESSORS_CONF);
1742
0ec04e16
IM
1743 setup_pager();
1744 read_events();
1745 print_bad_events();
1746}
1747
1748static void __cmd_replay(void)
1749{
1750 unsigned long i;
1751
1752 calibrate_run_measurement_overhead();
1753 calibrate_sleep_measurement_overhead();
1754
1755 test_calibrations();
1756
1757 read_events();
1758
1759 printf("nr_run_events: %ld\n", nr_run_events);
1760 printf("nr_sleep_events: %ld\n", nr_sleep_events);
1761 printf("nr_wakeup_events: %ld\n", nr_wakeup_events);
1762
1763 if (targetless_wakeups)
1764 printf("target-less wakeups: %ld\n", targetless_wakeups);
1765 if (multitarget_wakeups)
1766 printf("multi-target wakeups: %ld\n", multitarget_wakeups);
1767 if (nr_run_events_optimized)
1768 printf("run atoms optimized: %ld\n",
1769 nr_run_events_optimized);
1770
1771 print_task_traces();
1772 add_cross_task_wakeups();
1773
1774 create_tasks();
1775 printf("------------------------------------------------------------\n");
1776 for (i = 0; i < replay_repeat; i++)
1777 run_one_test();
1778}
1779
1780
46f392c9 1781static const char * const sched_usage[] = {
4b77a729 1782 "perf sched [<options>] {record|latency|map|replay|trace}",
0a02ad93
IM
1783 NULL
1784};
1785
f2858d8a 1786static const struct option sched_options[] = {
4b77a729
MG
1787 OPT_STRING('i', "input", &input_name, "file",
1788 "input file name"),
f2858d8a
IM
1789 OPT_BOOLEAN('v', "verbose", &verbose,
1790 "be more verbose (show symbol address, etc)"),
0a02ad93
IM
1791 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1792 "dump raw trace in ASCII"),
f2858d8a
IM
1793 OPT_END()
1794};
1795
1796static const char * const latency_usage[] = {
1797 "perf sched latency [<options>]",
1798 NULL
1799};
1800
1801static const struct option latency_options[] = {
daa1d7a5
FW
1802 OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1803 "sort by key(s): runtime, switch, avg, max"),
0a02ad93
IM
1804 OPT_BOOLEAN('v', "verbose", &verbose,
1805 "be more verbose (show symbol address, etc)"),
55ffb7a6
MG
1806 OPT_INTEGER('C', "CPU", &profile_cpu,
1807 "CPU to profile on"),
f2858d8a
IM
1808 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1809 "dump raw trace in ASCII"),
1810 OPT_END()
1811};
1812
1813static const char * const replay_usage[] = {
1814 "perf sched replay [<options>]",
1815 NULL
1816};
1817
1818static const struct option replay_options[] = {
1819 OPT_INTEGER('r', "repeat", &replay_repeat,
1820 "repeat the workload replay N times (-1: infinite)"),
1821 OPT_BOOLEAN('v', "verbose", &verbose,
1822 "be more verbose (show symbol address, etc)"),
1823 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1824 "dump raw trace in ASCII"),
0a02ad93
IM
1825 OPT_END()
1826};
1827
daa1d7a5
FW
1828static void setup_sorting(void)
1829{
1830 char *tmp, *tok, *str = strdup(sort_order);
1831
1832 for (tok = strtok_r(str, ", ", &tmp);
1833 tok; tok = strtok_r(NULL, ", ", &tmp)) {
1834 if (sort_dimension__add(tok, &sort_list) < 0) {
1835 error("Unknown --sort key: `%s'", tok);
f2858d8a 1836 usage_with_options(latency_usage, latency_options);
daa1d7a5
FW
1837 }
1838 }
1839
1840 free(str);
1841
cbef79a8 1842 sort_dimension__add("pid", &cmp_pid);
daa1d7a5
FW
1843}
1844
1fc35b29
IM
1845static const char *record_args[] = {
1846 "record",
1847 "-a",
1848 "-R",
d1302522 1849 "-M",
ea57c4f5 1850 "-f",
dc02bf71 1851 "-m", "1024",
1fc35b29
IM
1852 "-c", "1",
1853 "-e", "sched:sched_switch:r",
1854 "-e", "sched:sched_stat_wait:r",
1855 "-e", "sched:sched_stat_sleep:r",
1856 "-e", "sched:sched_stat_iowait:r",
ea57c4f5 1857 "-e", "sched:sched_stat_runtime:r",
1fc35b29
IM
1858 "-e", "sched:sched_process_exit:r",
1859 "-e", "sched:sched_process_fork:r",
1860 "-e", "sched:sched_wakeup:r",
1861 "-e", "sched:sched_migrate_task:r",
1862};
1863
1864static int __cmd_record(int argc, const char **argv)
1865{
1866 unsigned int rec_argc, i, j;
1867 const char **rec_argv;
1868
1869 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1870 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1871
1872 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1873 rec_argv[i] = strdup(record_args[i]);
1874
1875 for (j = 1; j < (unsigned int)argc; j++, i++)
1876 rec_argv[i] = argv[j];
1877
1878 BUG_ON(i != rec_argc);
1879
1880 return cmd_record(i, rec_argv, NULL);
1881}
1882
0a02ad93
IM
1883int cmd_sched(int argc, const char **argv, const char *prefix __used)
1884{
f2858d8a
IM
1885 argc = parse_options(argc, argv, sched_options, sched_usage,
1886 PARSE_OPT_STOP_AT_NON_OPTION);
1887 if (!argc)
1888 usage_with_options(sched_usage, sched_options);
0a02ad93 1889
c0777c5a
XG
1890 /*
1891 * Aliased to 'perf trace' for now:
1892 */
1893 if (!strcmp(argv[0], "trace"))
1894 return cmd_trace(argc, argv, prefix);
1895
1896 symbol__init(0);
1fc35b29
IM
1897 if (!strncmp(argv[0], "rec", 3)) {
1898 return __cmd_record(argc, argv);
1899 } else if (!strncmp(argv[0], "lat", 3)) {
cdce9d73 1900 trace_handler = &lat_ops;
f2858d8a
IM
1901 if (argc > 1) {
1902 argc = parse_options(argc, argv, latency_options, latency_usage, 0);
1903 if (argc)
1904 usage_with_options(latency_usage, latency_options);
f2858d8a 1905 }
b5fae128 1906 setup_sorting();
46f392c9 1907 __cmd_lat();
0ec04e16
IM
1908 } else if (!strcmp(argv[0], "map")) {
1909 trace_handler = &map_ops;
1910 setup_sorting();
1911 __cmd_map();
f2858d8a
IM
1912 } else if (!strncmp(argv[0], "rep", 3)) {
1913 trace_handler = &replay_ops;
1914 if (argc) {
1915 argc = parse_options(argc, argv, replay_options, replay_usage, 0);
1916 if (argc)
1917 usage_with_options(replay_usage, replay_options);
1918 }
1919 __cmd_replay();
1920 } else {
1921 usage_with_options(sched_usage, sched_options);
1922 }
1923
ec156764 1924 return 0;
0a02ad93 1925}
This page took 0.141549 seconds and 5 git commands to generate.