5 #include "util/cache.h"
6 #include "util/symbol.h"
7 #include "util/thread.h"
8 #include "util/header.h"
9 #include "util/session.h"
11 #include "util/parse-options.h"
12 #include "util/trace-event.h"
14 #include "util/debug.h"
16 #include <sys/prctl.h>
18 #include <semaphore.h>
22 static char const *input_name
= "perf.data";
24 static u64 sample_type
;
26 static char default_sort_order
[] = "avg, max, switch, runtime";
27 static char *sort_order
= default_sort_order
;
29 static int profile_cpu
= -1;
31 #define PR_SET_NAME 15 /* Set process name */
34 static u64 run_measurement_overhead
;
35 static u64 sleep_measurement_overhead
;
42 static unsigned long nr_tasks
;
51 unsigned long nr_events
;
52 unsigned long curr_event
;
53 struct sched_atom
**atoms
;
64 enum sched_event_type
{
68 SCHED_EVENT_MIGRATION
,
72 enum sched_event_type type
;
78 struct task_desc
*wakee
;
81 static struct task_desc
*pid_to_task
[MAX_PID
];
83 static struct task_desc
**tasks
;
85 static pthread_mutex_t start_work_mutex
= PTHREAD_MUTEX_INITIALIZER
;
86 static u64 start_time
;
88 static pthread_mutex_t work_done_wait_mutex
= PTHREAD_MUTEX_INITIALIZER
;
90 static unsigned long nr_run_events
;
91 static unsigned long nr_sleep_events
;
92 static unsigned long nr_wakeup_events
;
94 static unsigned long nr_sleep_corrections
;
95 static unsigned long nr_run_events_optimized
;
97 static unsigned long targetless_wakeups
;
98 static unsigned long multitarget_wakeups
;
100 static u64 cpu_usage
;
101 static u64 runavg_cpu_usage
;
102 static u64 parent_cpu_usage
;
103 static u64 runavg_parent_cpu_usage
;
105 static unsigned long nr_runs
;
106 static u64 sum_runtime
;
107 static u64 sum_fluct
;
110 static unsigned long replay_repeat
= 10;
111 static unsigned long nr_timestamps
;
112 static unsigned long nr_unordered_timestamps
;
113 static unsigned long nr_state_machine_bugs
;
114 static unsigned long nr_context_switch_bugs
;
115 static unsigned long nr_events
;
116 static unsigned long nr_lost_chunks
;
117 static unsigned long nr_lost_events
;
119 #define TASK_STATE_TO_CHAR_STR "RSDTtZX"
129 struct list_head list
;
130 enum thread_state state
;
138 struct list_head work_list
;
139 struct thread
*thread
;
148 typedef int (*sort_fn_t
)(struct work_atoms
*, struct work_atoms
*);
150 static struct rb_root atom_root
, sorted_atom_root
;
152 static u64 all_runtime
;
153 static u64 all_count
;
156 static u64
get_nsecs(void)
160 clock_gettime(CLOCK_MONOTONIC
, &ts
);
162 return ts
.tv_sec
* 1000000000ULL + ts
.tv_nsec
;
165 static void burn_nsecs(u64 nsecs
)
167 u64 T0
= get_nsecs(), T1
;
171 } while (T1
+ run_measurement_overhead
< T0
+ nsecs
);
174 static void sleep_nsecs(u64 nsecs
)
178 ts
.tv_nsec
= nsecs
% 999999999;
179 ts
.tv_sec
= nsecs
/ 999999999;
181 nanosleep(&ts
, NULL
);
184 static void calibrate_run_measurement_overhead(void)
186 u64 T0
, T1
, delta
, min_delta
= 1000000000ULL;
189 for (i
= 0; i
< 10; i
++) {
194 min_delta
= min(min_delta
, delta
);
196 run_measurement_overhead
= min_delta
;
198 printf("run measurement overhead: %Ld nsecs\n", min_delta
);
201 static void calibrate_sleep_measurement_overhead(void)
203 u64 T0
, T1
, delta
, min_delta
= 1000000000ULL;
206 for (i
= 0; i
< 10; i
++) {
211 min_delta
= min(min_delta
, delta
);
214 sleep_measurement_overhead
= min_delta
;
216 printf("sleep measurement overhead: %Ld nsecs\n", min_delta
);
219 static struct sched_atom
*
220 get_new_event(struct task_desc
*task
, u64 timestamp
)
222 struct sched_atom
*event
= zalloc(sizeof(*event
));
223 unsigned long idx
= task
->nr_events
;
226 event
->timestamp
= timestamp
;
230 size
= sizeof(struct sched_atom
*) * task
->nr_events
;
231 task
->atoms
= realloc(task
->atoms
, size
);
232 BUG_ON(!task
->atoms
);
234 task
->atoms
[idx
] = event
;
239 static struct sched_atom
*last_event(struct task_desc
*task
)
241 if (!task
->nr_events
)
244 return task
->atoms
[task
->nr_events
- 1];
248 add_sched_event_run(struct task_desc
*task
, u64 timestamp
, u64 duration
)
250 struct sched_atom
*event
, *curr_event
= last_event(task
);
253 * optimize an existing RUN event by merging this one
256 if (curr_event
&& curr_event
->type
== SCHED_EVENT_RUN
) {
257 nr_run_events_optimized
++;
258 curr_event
->duration
+= duration
;
262 event
= get_new_event(task
, timestamp
);
264 event
->type
= SCHED_EVENT_RUN
;
265 event
->duration
= duration
;
271 add_sched_event_wakeup(struct task_desc
*task
, u64 timestamp
,
272 struct task_desc
*wakee
)
274 struct sched_atom
*event
, *wakee_event
;
276 event
= get_new_event(task
, timestamp
);
277 event
->type
= SCHED_EVENT_WAKEUP
;
278 event
->wakee
= wakee
;
280 wakee_event
= last_event(wakee
);
281 if (!wakee_event
|| wakee_event
->type
!= SCHED_EVENT_SLEEP
) {
282 targetless_wakeups
++;
285 if (wakee_event
->wait_sem
) {
286 multitarget_wakeups
++;
290 wakee_event
->wait_sem
= zalloc(sizeof(*wakee_event
->wait_sem
));
291 sem_init(wakee_event
->wait_sem
, 0, 0);
292 wakee_event
->specific_wait
= 1;
293 event
->wait_sem
= wakee_event
->wait_sem
;
299 add_sched_event_sleep(struct task_desc
*task
, u64 timestamp
,
300 u64 task_state __used
)
302 struct sched_atom
*event
= get_new_event(task
, timestamp
);
304 event
->type
= SCHED_EVENT_SLEEP
;
309 static struct task_desc
*register_pid(unsigned long pid
, const char *comm
)
311 struct task_desc
*task
;
313 BUG_ON(pid
>= MAX_PID
);
315 task
= pid_to_task
[pid
];
320 task
= zalloc(sizeof(*task
));
323 strcpy(task
->comm
, comm
);
325 * every task starts in sleeping state - this gets ignored
326 * if there's no wakeup pointing to this sleep state:
328 add_sched_event_sleep(task
, 0, 0);
330 pid_to_task
[pid
] = task
;
332 tasks
= realloc(tasks
, nr_tasks
*sizeof(struct task_task
*));
334 tasks
[task
->nr
] = task
;
337 printf("registered task #%ld, PID %ld (%s)\n", nr_tasks
, pid
, comm
);
343 static void print_task_traces(void)
345 struct task_desc
*task
;
348 for (i
= 0; i
< nr_tasks
; i
++) {
350 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
351 task
->nr
, task
->comm
, task
->pid
, task
->nr_events
);
355 static void add_cross_task_wakeups(void)
357 struct task_desc
*task1
, *task2
;
360 for (i
= 0; i
< nr_tasks
; i
++) {
366 add_sched_event_wakeup(task1
, 0, task2
);
371 process_sched_event(struct task_desc
*this_task __used
, struct sched_atom
*atom
)
378 delta
= start_time
+ atom
->timestamp
- now
;
380 switch (atom
->type
) {
381 case SCHED_EVENT_RUN
:
382 burn_nsecs(atom
->duration
);
384 case SCHED_EVENT_SLEEP
:
386 ret
= sem_wait(atom
->wait_sem
);
389 case SCHED_EVENT_WAKEUP
:
391 ret
= sem_post(atom
->wait_sem
);
394 case SCHED_EVENT_MIGRATION
:
401 static u64
get_cpu_usage_nsec_parent(void)
407 err
= getrusage(RUSAGE_SELF
, &ru
);
410 sum
= ru
.ru_utime
.tv_sec
*1e9
+ ru
.ru_utime
.tv_usec
*1e3
;
411 sum
+= ru
.ru_stime
.tv_sec
*1e9
+ ru
.ru_stime
.tv_usec
*1e3
;
416 static int self_open_counters(void)
418 struct perf_event_attr attr
;
421 memset(&attr
, 0, sizeof(attr
));
423 attr
.type
= PERF_TYPE_SOFTWARE
;
424 attr
.config
= PERF_COUNT_SW_TASK_CLOCK
;
426 fd
= sys_perf_event_open(&attr
, 0, -1, -1, 0);
429 die("Error: sys_perf_event_open() syscall returned"
430 "with %d (%s)\n", fd
, strerror(errno
));
434 static u64
get_cpu_usage_nsec_self(int fd
)
439 ret
= read(fd
, &runtime
, sizeof(runtime
));
440 BUG_ON(ret
!= sizeof(runtime
));
445 static void *thread_func(void *ctx
)
447 struct task_desc
*this_task
= ctx
;
448 u64 cpu_usage_0
, cpu_usage_1
;
449 unsigned long i
, ret
;
453 sprintf(comm2
, ":%s", this_task
->comm
);
454 prctl(PR_SET_NAME
, comm2
);
455 fd
= self_open_counters();
458 ret
= sem_post(&this_task
->ready_for_work
);
460 ret
= pthread_mutex_lock(&start_work_mutex
);
462 ret
= pthread_mutex_unlock(&start_work_mutex
);
465 cpu_usage_0
= get_cpu_usage_nsec_self(fd
);
467 for (i
= 0; i
< this_task
->nr_events
; i
++) {
468 this_task
->curr_event
= i
;
469 process_sched_event(this_task
, this_task
->atoms
[i
]);
472 cpu_usage_1
= get_cpu_usage_nsec_self(fd
);
473 this_task
->cpu_usage
= cpu_usage_1
- cpu_usage_0
;
474 ret
= sem_post(&this_task
->work_done_sem
);
477 ret
= pthread_mutex_lock(&work_done_wait_mutex
);
479 ret
= pthread_mutex_unlock(&work_done_wait_mutex
);
485 static void create_tasks(void)
487 struct task_desc
*task
;
492 err
= pthread_attr_init(&attr
);
494 err
= pthread_attr_setstacksize(&attr
, (size_t)(16*1024));
496 err
= pthread_mutex_lock(&start_work_mutex
);
498 err
= pthread_mutex_lock(&work_done_wait_mutex
);
500 for (i
= 0; i
< nr_tasks
; i
++) {
502 sem_init(&task
->sleep_sem
, 0, 0);
503 sem_init(&task
->ready_for_work
, 0, 0);
504 sem_init(&task
->work_done_sem
, 0, 0);
505 task
->curr_event
= 0;
506 err
= pthread_create(&task
->thread
, &attr
, thread_func
, task
);
511 static void wait_for_tasks(void)
513 u64 cpu_usage_0
, cpu_usage_1
;
514 struct task_desc
*task
;
515 unsigned long i
, ret
;
517 start_time
= get_nsecs();
519 pthread_mutex_unlock(&work_done_wait_mutex
);
521 for (i
= 0; i
< nr_tasks
; i
++) {
523 ret
= sem_wait(&task
->ready_for_work
);
525 sem_init(&task
->ready_for_work
, 0, 0);
527 ret
= pthread_mutex_lock(&work_done_wait_mutex
);
530 cpu_usage_0
= get_cpu_usage_nsec_parent();
532 pthread_mutex_unlock(&start_work_mutex
);
534 for (i
= 0; i
< nr_tasks
; i
++) {
536 ret
= sem_wait(&task
->work_done_sem
);
538 sem_init(&task
->work_done_sem
, 0, 0);
539 cpu_usage
+= task
->cpu_usage
;
543 cpu_usage_1
= get_cpu_usage_nsec_parent();
544 if (!runavg_cpu_usage
)
545 runavg_cpu_usage
= cpu_usage
;
546 runavg_cpu_usage
= (runavg_cpu_usage
*9 + cpu_usage
)/10;
548 parent_cpu_usage
= cpu_usage_1
- cpu_usage_0
;
549 if (!runavg_parent_cpu_usage
)
550 runavg_parent_cpu_usage
= parent_cpu_usage
;
551 runavg_parent_cpu_usage
= (runavg_parent_cpu_usage
*9 +
552 parent_cpu_usage
)/10;
554 ret
= pthread_mutex_lock(&start_work_mutex
);
557 for (i
= 0; i
< nr_tasks
; i
++) {
559 sem_init(&task
->sleep_sem
, 0, 0);
560 task
->curr_event
= 0;
564 static void run_one_test(void)
566 u64 T0
, T1
, delta
, avg_delta
, fluct
, std_dev
;
573 sum_runtime
+= delta
;
576 avg_delta
= sum_runtime
/ nr_runs
;
577 if (delta
< avg_delta
)
578 fluct
= avg_delta
- delta
;
580 fluct
= delta
- avg_delta
;
582 std_dev
= sum_fluct
/ nr_runs
/ sqrt(nr_runs
);
585 run_avg
= (run_avg
*9 + delta
)/10;
587 printf("#%-3ld: %0.3f, ",
588 nr_runs
, (double)delta
/1000000.0);
590 printf("ravg: %0.2f, ",
591 (double)run_avg
/1e6
);
593 printf("cpu: %0.2f / %0.2f",
594 (double)cpu_usage
/1e6
, (double)runavg_cpu_usage
/1e6
);
598 * rusage statistics done by the parent, these are less
599 * accurate than the sum_exec_runtime based statistics:
601 printf(" [%0.2f / %0.2f]",
602 (double)parent_cpu_usage
/1e6
,
603 (double)runavg_parent_cpu_usage
/1e6
);
608 if (nr_sleep_corrections
)
609 printf(" (%ld sleep corrections)\n", nr_sleep_corrections
);
610 nr_sleep_corrections
= 0;
613 static void test_calibrations(void)
621 printf("the run test took %Ld nsecs\n", T1
-T0
);
627 printf("the sleep test took %Ld nsecs\n", T1
-T0
);
630 #define FILL_FIELD(ptr, field, event, data) \
631 ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data)
633 #define FILL_ARRAY(ptr, array, event, data) \
635 void *__array = raw_field_ptr(event, #array, data); \
636 memcpy(ptr.array, __array, sizeof(ptr.array)); \
639 #define FILL_COMMON_FIELDS(ptr, event, data) \
641 FILL_FIELD(ptr, common_type, event, data); \
642 FILL_FIELD(ptr, common_flags, event, data); \
643 FILL_FIELD(ptr, common_preempt_count, event, data); \
644 FILL_FIELD(ptr, common_pid, event, data); \
645 FILL_FIELD(ptr, common_tgid, event, data); \
650 struct trace_switch_event
{
655 u8 common_preempt_count
;
668 struct trace_runtime_event
{
673 u8 common_preempt_count
;
683 struct trace_wakeup_event
{
688 u8 common_preempt_count
;
700 struct trace_fork_event
{
705 u8 common_preempt_count
;
709 char parent_comm
[16];
715 struct trace_migrate_task_event
{
720 u8 common_preempt_count
;
731 struct trace_sched_handler
{
732 void (*switch_event
)(struct trace_switch_event
*,
736 struct thread
*thread
);
738 void (*runtime_event
)(struct trace_runtime_event
*,
742 struct thread
*thread
);
744 void (*wakeup_event
)(struct trace_wakeup_event
*,
748 struct thread
*thread
);
750 void (*fork_event
)(struct trace_fork_event
*,
754 struct thread
*thread
);
756 void (*migrate_task_event
)(struct trace_migrate_task_event
*,
760 struct thread
*thread
);
765 replay_wakeup_event(struct trace_wakeup_event
*wakeup_event
,
768 u64 timestamp __used
,
769 struct thread
*thread __used
)
771 struct task_desc
*waker
, *wakee
;
774 printf("sched_wakeup event %p\n", event
);
776 printf(" ... pid %d woke up %s/%d\n",
777 wakeup_event
->common_pid
,
782 waker
= register_pid(wakeup_event
->common_pid
, "<unknown>");
783 wakee
= register_pid(wakeup_event
->pid
, wakeup_event
->comm
);
785 add_sched_event_wakeup(waker
, timestamp
, wakee
);
788 static u64 cpu_last_switched
[MAX_CPUS
];
791 replay_switch_event(struct trace_switch_event
*switch_event
,
795 struct thread
*thread __used
)
797 struct task_desc
*prev
, *next
;
802 printf("sched_switch event %p\n", event
);
804 if (cpu
>= MAX_CPUS
|| cpu
< 0)
807 timestamp0
= cpu_last_switched
[cpu
];
809 delta
= timestamp
- timestamp0
;
814 die("hm, delta: %Ld < 0 ?\n", delta
);
817 printf(" ... switch from %s/%d to %s/%d [ran %Ld nsecs]\n",
818 switch_event
->prev_comm
, switch_event
->prev_pid
,
819 switch_event
->next_comm
, switch_event
->next_pid
,
823 prev
= register_pid(switch_event
->prev_pid
, switch_event
->prev_comm
);
824 next
= register_pid(switch_event
->next_pid
, switch_event
->next_comm
);
826 cpu_last_switched
[cpu
] = timestamp
;
828 add_sched_event_run(prev
, timestamp
, delta
);
829 add_sched_event_sleep(prev
, timestamp
, switch_event
->prev_state
);
834 replay_fork_event(struct trace_fork_event
*fork_event
,
837 u64 timestamp __used
,
838 struct thread
*thread __used
)
841 printf("sched_fork event %p\n", event
);
842 printf("... parent: %s/%d\n", fork_event
->parent_comm
, fork_event
->parent_pid
);
843 printf("... child: %s/%d\n", fork_event
->child_comm
, fork_event
->child_pid
);
845 register_pid(fork_event
->parent_pid
, fork_event
->parent_comm
);
846 register_pid(fork_event
->child_pid
, fork_event
->child_comm
);
849 static struct trace_sched_handler replay_ops
= {
850 .wakeup_event
= replay_wakeup_event
,
851 .switch_event
= replay_switch_event
,
852 .fork_event
= replay_fork_event
,
855 struct sort_dimension
{
858 struct list_head list
;
861 static LIST_HEAD(cmp_pid
);
864 thread_lat_cmp(struct list_head
*list
, struct work_atoms
*l
, struct work_atoms
*r
)
866 struct sort_dimension
*sort
;
869 BUG_ON(list_empty(list
));
871 list_for_each_entry(sort
, list
, list
) {
872 ret
= sort
->cmp(l
, r
);
880 static struct work_atoms
*
881 thread_atoms_search(struct rb_root
*root
, struct thread
*thread
,
882 struct list_head
*sort_list
)
884 struct rb_node
*node
= root
->rb_node
;
885 struct work_atoms key
= { .thread
= thread
};
888 struct work_atoms
*atoms
;
891 atoms
= container_of(node
, struct work_atoms
, node
);
893 cmp
= thread_lat_cmp(sort_list
, &key
, atoms
);
895 node
= node
->rb_left
;
897 node
= node
->rb_right
;
899 BUG_ON(thread
!= atoms
->thread
);
907 __thread_latency_insert(struct rb_root
*root
, struct work_atoms
*data
,
908 struct list_head
*sort_list
)
910 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
913 struct work_atoms
*this;
916 this = container_of(*new, struct work_atoms
, node
);
919 cmp
= thread_lat_cmp(sort_list
, data
, this);
922 new = &((*new)->rb_left
);
924 new = &((*new)->rb_right
);
927 rb_link_node(&data
->node
, parent
, new);
928 rb_insert_color(&data
->node
, root
);
931 static void thread_atoms_insert(struct thread
*thread
)
933 struct work_atoms
*atoms
= zalloc(sizeof(*atoms
));
937 atoms
->thread
= thread
;
938 INIT_LIST_HEAD(&atoms
->work_list
);
939 __thread_latency_insert(&atom_root
, atoms
, &cmp_pid
);
943 latency_fork_event(struct trace_fork_event
*fork_event __used
,
944 struct event
*event __used
,
946 u64 timestamp __used
,
947 struct thread
*thread __used
)
949 /* should insert the newcomer */
953 static char sched_out_state(struct trace_switch_event
*switch_event
)
955 const char *str
= TASK_STATE_TO_CHAR_STR
;
957 return str
[switch_event
->prev_state
];
961 add_sched_out_event(struct work_atoms
*atoms
,
965 struct work_atom
*atom
= zalloc(sizeof(*atom
));
969 atom
->sched_out_time
= timestamp
;
971 if (run_state
== 'R') {
972 atom
->state
= THREAD_WAIT_CPU
;
973 atom
->wake_up_time
= atom
->sched_out_time
;
976 list_add_tail(&atom
->list
, &atoms
->work_list
);
980 add_runtime_event(struct work_atoms
*atoms
, u64 delta
, u64 timestamp __used
)
982 struct work_atom
*atom
;
984 BUG_ON(list_empty(&atoms
->work_list
));
986 atom
= list_entry(atoms
->work_list
.prev
, struct work_atom
, list
);
988 atom
->runtime
+= delta
;
989 atoms
->total_runtime
+= delta
;
993 add_sched_in_event(struct work_atoms
*atoms
, u64 timestamp
)
995 struct work_atom
*atom
;
998 if (list_empty(&atoms
->work_list
))
1001 atom
= list_entry(atoms
->work_list
.prev
, struct work_atom
, list
);
1003 if (atom
->state
!= THREAD_WAIT_CPU
)
1006 if (timestamp
< atom
->wake_up_time
) {
1007 atom
->state
= THREAD_IGNORE
;
1011 atom
->state
= THREAD_SCHED_IN
;
1012 atom
->sched_in_time
= timestamp
;
1014 delta
= atom
->sched_in_time
- atom
->wake_up_time
;
1015 atoms
->total_lat
+= delta
;
1016 if (delta
> atoms
->max_lat
) {
1017 atoms
->max_lat
= delta
;
1018 atoms
->max_lat_at
= timestamp
;
1024 latency_switch_event(struct trace_switch_event
*switch_event
,
1025 struct event
*event __used
,
1028 struct thread
*thread __used
)
1030 struct work_atoms
*out_events
, *in_events
;
1031 struct thread
*sched_out
, *sched_in
;
1035 BUG_ON(cpu
>= MAX_CPUS
|| cpu
< 0);
1037 timestamp0
= cpu_last_switched
[cpu
];
1038 cpu_last_switched
[cpu
] = timestamp
;
1040 delta
= timestamp
- timestamp0
;
1045 die("hm, delta: %Ld < 0 ?\n", delta
);
1048 sched_out
= threads__findnew(switch_event
->prev_pid
);
1049 sched_in
= threads__findnew(switch_event
->next_pid
);
1051 out_events
= thread_atoms_search(&atom_root
, sched_out
, &cmp_pid
);
1053 thread_atoms_insert(sched_out
);
1054 out_events
= thread_atoms_search(&atom_root
, sched_out
, &cmp_pid
);
1056 die("out-event: Internal tree error");
1058 add_sched_out_event(out_events
, sched_out_state(switch_event
), timestamp
);
1060 in_events
= thread_atoms_search(&atom_root
, sched_in
, &cmp_pid
);
1062 thread_atoms_insert(sched_in
);
1063 in_events
= thread_atoms_search(&atom_root
, sched_in
, &cmp_pid
);
1065 die("in-event: Internal tree error");
1067 * Take came in we have not heard about yet,
1068 * add in an initial atom in runnable state:
1070 add_sched_out_event(in_events
, 'R', timestamp
);
1072 add_sched_in_event(in_events
, timestamp
);
1076 latency_runtime_event(struct trace_runtime_event
*runtime_event
,
1077 struct event
*event __used
,
1080 struct thread
*this_thread __used
)
1082 struct thread
*thread
= threads__findnew(runtime_event
->pid
);
1083 struct work_atoms
*atoms
= thread_atoms_search(&atom_root
, thread
, &cmp_pid
);
1085 BUG_ON(cpu
>= MAX_CPUS
|| cpu
< 0);
1087 thread_atoms_insert(thread
);
1088 atoms
= thread_atoms_search(&atom_root
, thread
, &cmp_pid
);
1090 die("in-event: Internal tree error");
1091 add_sched_out_event(atoms
, 'R', timestamp
);
1094 add_runtime_event(atoms
, runtime_event
->runtime
, timestamp
);
1098 latency_wakeup_event(struct trace_wakeup_event
*wakeup_event
,
1099 struct event
*__event __used
,
1102 struct thread
*thread __used
)
1104 struct work_atoms
*atoms
;
1105 struct work_atom
*atom
;
1106 struct thread
*wakee
;
1108 /* Note for later, it may be interesting to observe the failing cases */
1109 if (!wakeup_event
->success
)
1112 wakee
= threads__findnew(wakeup_event
->pid
);
1113 atoms
= thread_atoms_search(&atom_root
, wakee
, &cmp_pid
);
1115 thread_atoms_insert(wakee
);
1116 atoms
= thread_atoms_search(&atom_root
, wakee
, &cmp_pid
);
1118 die("wakeup-event: Internal tree error");
1119 add_sched_out_event(atoms
, 'S', timestamp
);
1122 BUG_ON(list_empty(&atoms
->work_list
));
1124 atom
= list_entry(atoms
->work_list
.prev
, struct work_atom
, list
);
1127 * You WILL be missing events if you've recorded only
1128 * one CPU, or are only looking at only one, so don't
1129 * make useless noise.
1131 if (profile_cpu
== -1 && atom
->state
!= THREAD_SLEEPING
)
1132 nr_state_machine_bugs
++;
1135 if (atom
->sched_out_time
> timestamp
) {
1136 nr_unordered_timestamps
++;
1140 atom
->state
= THREAD_WAIT_CPU
;
1141 atom
->wake_up_time
= timestamp
;
1145 latency_migrate_task_event(struct trace_migrate_task_event
*migrate_task_event
,
1146 struct event
*__event __used
,
1149 struct thread
*thread __used
)
1151 struct work_atoms
*atoms
;
1152 struct work_atom
*atom
;
1153 struct thread
*migrant
;
1156 * Only need to worry about migration when profiling one CPU.
1158 if (profile_cpu
== -1)
1161 migrant
= threads__findnew(migrate_task_event
->pid
);
1162 atoms
= thread_atoms_search(&atom_root
, migrant
, &cmp_pid
);
1164 thread_atoms_insert(migrant
);
1165 register_pid(migrant
->pid
, migrant
->comm
);
1166 atoms
= thread_atoms_search(&atom_root
, migrant
, &cmp_pid
);
1168 die("migration-event: Internal tree error");
1169 add_sched_out_event(atoms
, 'R', timestamp
);
1172 BUG_ON(list_empty(&atoms
->work_list
));
1174 atom
= list_entry(atoms
->work_list
.prev
, struct work_atom
, list
);
1175 atom
->sched_in_time
= atom
->sched_out_time
= atom
->wake_up_time
= timestamp
;
1179 if (atom
->sched_out_time
> timestamp
)
1180 nr_unordered_timestamps
++;
1183 static struct trace_sched_handler lat_ops
= {
1184 .wakeup_event
= latency_wakeup_event
,
1185 .switch_event
= latency_switch_event
,
1186 .runtime_event
= latency_runtime_event
,
1187 .fork_event
= latency_fork_event
,
1188 .migrate_task_event
= latency_migrate_task_event
,
1191 static void output_lat_thread(struct work_atoms
*work_list
)
1197 if (!work_list
->nb_atoms
)
1200 * Ignore idle threads:
1202 if (!strcmp(work_list
->thread
->comm
, "swapper"))
1205 all_runtime
+= work_list
->total_runtime
;
1206 all_count
+= work_list
->nb_atoms
;
1208 ret
= printf(" %s:%d ", work_list
->thread
->comm
, work_list
->thread
->pid
);
1210 for (i
= 0; i
< 24 - ret
; i
++)
1213 avg
= work_list
->total_lat
/ work_list
->nb_atoms
;
1215 printf("|%11.3f ms |%9llu | avg:%9.3f ms | max:%9.3f ms | max at: %9.6f s\n",
1216 (double)work_list
->total_runtime
/ 1e6
,
1217 work_list
->nb_atoms
, (double)avg
/ 1e6
,
1218 (double)work_list
->max_lat
/ 1e6
,
1219 (double)work_list
->max_lat_at
/ 1e9
);
1222 static int pid_cmp(struct work_atoms
*l
, struct work_atoms
*r
)
1224 if (l
->thread
->pid
< r
->thread
->pid
)
1226 if (l
->thread
->pid
> r
->thread
->pid
)
1232 static struct sort_dimension pid_sort_dimension
= {
1237 static int avg_cmp(struct work_atoms
*l
, struct work_atoms
*r
)
1247 avgl
= l
->total_lat
/ l
->nb_atoms
;
1248 avgr
= r
->total_lat
/ r
->nb_atoms
;
1258 static struct sort_dimension avg_sort_dimension
= {
1263 static int max_cmp(struct work_atoms
*l
, struct work_atoms
*r
)
1265 if (l
->max_lat
< r
->max_lat
)
1267 if (l
->max_lat
> r
->max_lat
)
1273 static struct sort_dimension max_sort_dimension
= {
1278 static int switch_cmp(struct work_atoms
*l
, struct work_atoms
*r
)
1280 if (l
->nb_atoms
< r
->nb_atoms
)
1282 if (l
->nb_atoms
> r
->nb_atoms
)
1288 static struct sort_dimension switch_sort_dimension
= {
1293 static int runtime_cmp(struct work_atoms
*l
, struct work_atoms
*r
)
1295 if (l
->total_runtime
< r
->total_runtime
)
1297 if (l
->total_runtime
> r
->total_runtime
)
1303 static struct sort_dimension runtime_sort_dimension
= {
1308 static struct sort_dimension
*available_sorts
[] = {
1309 &pid_sort_dimension
,
1310 &avg_sort_dimension
,
1311 &max_sort_dimension
,
1312 &switch_sort_dimension
,
1313 &runtime_sort_dimension
,
1316 #define NB_AVAILABLE_SORTS (int)(sizeof(available_sorts) / sizeof(struct sort_dimension *))
1318 static LIST_HEAD(sort_list
);
1320 static int sort_dimension__add(const char *tok
, struct list_head
*list
)
1324 for (i
= 0; i
< NB_AVAILABLE_SORTS
; i
++) {
1325 if (!strcmp(available_sorts
[i
]->name
, tok
)) {
1326 list_add_tail(&available_sorts
[i
]->list
, list
);
1335 static void setup_sorting(void);
1337 static void sort_lat(void)
1339 struct rb_node
*node
;
1342 struct work_atoms
*data
;
1343 node
= rb_first(&atom_root
);
1347 rb_erase(node
, &atom_root
);
1348 data
= rb_entry(node
, struct work_atoms
, node
);
1349 __thread_latency_insert(&sorted_atom_root
, data
, &sort_list
);
1353 static struct trace_sched_handler
*trace_handler
;
1356 process_sched_wakeup_event(void *data
,
1357 struct event
*event
,
1359 u64 timestamp __used
,
1360 struct thread
*thread __used
)
1362 struct trace_wakeup_event wakeup_event
;
1364 FILL_COMMON_FIELDS(wakeup_event
, event
, data
);
1366 FILL_ARRAY(wakeup_event
, comm
, event
, data
);
1367 FILL_FIELD(wakeup_event
, pid
, event
, data
);
1368 FILL_FIELD(wakeup_event
, prio
, event
, data
);
1369 FILL_FIELD(wakeup_event
, success
, event
, data
);
1370 FILL_FIELD(wakeup_event
, cpu
, event
, data
);
1372 if (trace_handler
->wakeup_event
)
1373 trace_handler
->wakeup_event(&wakeup_event
, event
, cpu
, timestamp
, thread
);
1377 * Track the current task - that way we can know whether there's any
1378 * weird events, such as a task being switched away that is not current.
1382 static u32 curr_pid
[MAX_CPUS
] = { [0 ... MAX_CPUS
-1] = -1 };
1384 static struct thread
*curr_thread
[MAX_CPUS
];
1386 static char next_shortname1
= 'A';
1387 static char next_shortname2
= '0';
1390 map_switch_event(struct trace_switch_event
*switch_event
,
1391 struct event
*event __used
,
1394 struct thread
*thread __used
)
1396 struct thread
*sched_out
, *sched_in
;
1402 BUG_ON(this_cpu
>= MAX_CPUS
|| this_cpu
< 0);
1404 if (this_cpu
> max_cpu
)
1407 timestamp0
= cpu_last_switched
[this_cpu
];
1408 cpu_last_switched
[this_cpu
] = timestamp
;
1410 delta
= timestamp
- timestamp0
;
1415 die("hm, delta: %Ld < 0 ?\n", delta
);
1418 sched_out
= threads__findnew(switch_event
->prev_pid
);
1419 sched_in
= threads__findnew(switch_event
->next_pid
);
1421 curr_thread
[this_cpu
] = sched_in
;
1426 if (!sched_in
->shortname
[0]) {
1427 sched_in
->shortname
[0] = next_shortname1
;
1428 sched_in
->shortname
[1] = next_shortname2
;
1430 if (next_shortname1
< 'Z') {
1433 next_shortname1
='A';
1434 if (next_shortname2
< '9') {
1437 next_shortname2
='0';
1443 for (cpu
= 0; cpu
<= max_cpu
; cpu
++) {
1444 if (cpu
!= this_cpu
)
1449 if (curr_thread
[cpu
]) {
1450 if (curr_thread
[cpu
]->pid
)
1451 printf("%2s ", curr_thread
[cpu
]->shortname
);
1458 printf(" %12.6f secs ", (double)timestamp
/1e9
);
1459 if (new_shortname
) {
1460 printf("%s => %s:%d\n",
1461 sched_in
->shortname
, sched_in
->comm
, sched_in
->pid
);
1469 process_sched_switch_event(void *data
,
1470 struct event
*event
,
1472 u64 timestamp __used
,
1473 struct thread
*thread __used
)
1475 struct trace_switch_event switch_event
;
1477 FILL_COMMON_FIELDS(switch_event
, event
, data
);
1479 FILL_ARRAY(switch_event
, prev_comm
, event
, data
);
1480 FILL_FIELD(switch_event
, prev_pid
, event
, data
);
1481 FILL_FIELD(switch_event
, prev_prio
, event
, data
);
1482 FILL_FIELD(switch_event
, prev_state
, event
, data
);
1483 FILL_ARRAY(switch_event
, next_comm
, event
, data
);
1484 FILL_FIELD(switch_event
, next_pid
, event
, data
);
1485 FILL_FIELD(switch_event
, next_prio
, event
, data
);
1487 if (curr_pid
[this_cpu
] != (u32
)-1) {
1489 * Are we trying to switch away a PID that is
1492 if (curr_pid
[this_cpu
] != switch_event
.prev_pid
)
1493 nr_context_switch_bugs
++;
1495 if (trace_handler
->switch_event
)
1496 trace_handler
->switch_event(&switch_event
, event
, this_cpu
, timestamp
, thread
);
1498 curr_pid
[this_cpu
] = switch_event
.next_pid
;
1502 process_sched_runtime_event(void *data
,
1503 struct event
*event
,
1505 u64 timestamp __used
,
1506 struct thread
*thread __used
)
1508 struct trace_runtime_event runtime_event
;
1510 FILL_ARRAY(runtime_event
, comm
, event
, data
);
1511 FILL_FIELD(runtime_event
, pid
, event
, data
);
1512 FILL_FIELD(runtime_event
, runtime
, event
, data
);
1513 FILL_FIELD(runtime_event
, vruntime
, event
, data
);
1515 if (trace_handler
->runtime_event
)
1516 trace_handler
->runtime_event(&runtime_event
, event
, cpu
, timestamp
, thread
);
1520 process_sched_fork_event(void *data
,
1521 struct event
*event
,
1523 u64 timestamp __used
,
1524 struct thread
*thread __used
)
1526 struct trace_fork_event fork_event
;
1528 FILL_COMMON_FIELDS(fork_event
, event
, data
);
1530 FILL_ARRAY(fork_event
, parent_comm
, event
, data
);
1531 FILL_FIELD(fork_event
, parent_pid
, event
, data
);
1532 FILL_ARRAY(fork_event
, child_comm
, event
, data
);
1533 FILL_FIELD(fork_event
, child_pid
, event
, data
);
1535 if (trace_handler
->fork_event
)
1536 trace_handler
->fork_event(&fork_event
, event
, cpu
, timestamp
, thread
);
1540 process_sched_exit_event(struct event
*event
,
1542 u64 timestamp __used
,
1543 struct thread
*thread __used
)
1546 printf("sched_exit event %p\n", event
);
1550 process_sched_migrate_task_event(void *data
,
1551 struct event
*event
,
1553 u64 timestamp __used
,
1554 struct thread
*thread __used
)
1556 struct trace_migrate_task_event migrate_task_event
;
1558 FILL_COMMON_FIELDS(migrate_task_event
, event
, data
);
1560 FILL_ARRAY(migrate_task_event
, comm
, event
, data
);
1561 FILL_FIELD(migrate_task_event
, pid
, event
, data
);
1562 FILL_FIELD(migrate_task_event
, prio
, event
, data
);
1563 FILL_FIELD(migrate_task_event
, cpu
, event
, data
);
1565 if (trace_handler
->migrate_task_event
)
1566 trace_handler
->migrate_task_event(&migrate_task_event
, event
, cpu
, timestamp
, thread
);
1570 process_raw_event(event_t
*raw_event __used
, void *data
,
1571 int cpu
, u64 timestamp
, struct thread
*thread
)
1573 struct event
*event
;
1577 type
= trace_parse_common_type(data
);
1578 event
= trace_find_event(type
);
1580 if (!strcmp(event
->name
, "sched_switch"))
1581 process_sched_switch_event(data
, event
, cpu
, timestamp
, thread
);
1582 if (!strcmp(event
->name
, "sched_stat_runtime"))
1583 process_sched_runtime_event(data
, event
, cpu
, timestamp
, thread
);
1584 if (!strcmp(event
->name
, "sched_wakeup"))
1585 process_sched_wakeup_event(data
, event
, cpu
, timestamp
, thread
);
1586 if (!strcmp(event
->name
, "sched_wakeup_new"))
1587 process_sched_wakeup_event(data
, event
, cpu
, timestamp
, thread
);
1588 if (!strcmp(event
->name
, "sched_process_fork"))
1589 process_sched_fork_event(data
, event
, cpu
, timestamp
, thread
);
1590 if (!strcmp(event
->name
, "sched_process_exit"))
1591 process_sched_exit_event(event
, cpu
, timestamp
, thread
);
1592 if (!strcmp(event
->name
, "sched_migrate_task"))
1593 process_sched_migrate_task_event(data
, event
, cpu
, timestamp
, thread
);
1596 static int process_sample_event(event_t
*event
,
1597 struct perf_session
*session __used
)
1599 struct sample_data data
;
1600 struct thread
*thread
;
1602 if (!(sample_type
& PERF_SAMPLE_RAW
))
1605 memset(&data
, 0, sizeof(data
));
1610 event__parse_sample(event
, sample_type
, &data
);
1612 dump_printf("(IP, %d): %d/%d: %p period: %Ld\n",
1615 (void *)(long)data
.ip
,
1616 (long long)data
.period
);
1618 thread
= threads__findnew(data
.pid
);
1619 if (thread
== NULL
) {
1620 pr_debug("problem processing %d event, skipping it.\n",
1621 event
->header
.type
);
1625 dump_printf(" ... thread: %s:%d\n", thread
->comm
, thread
->pid
);
1627 if (profile_cpu
!= -1 && profile_cpu
!= (int)data
.cpu
)
1630 process_raw_event(event
, data
.raw_data
, data
.cpu
, data
.time
, thread
);
1635 static int process_lost_event(event_t
*event __used
,
1636 struct perf_session
*session __used
)
1639 nr_lost_events
+= event
->lost
.lost
;
1644 static int sample_type_check(u64 type
)
1648 if (!(sample_type
& PERF_SAMPLE_RAW
)) {
1650 "No trace sample to read. Did you call perf record "
1658 static struct perf_event_ops event_ops
= {
1659 .process_sample_event
= process_sample_event
,
1660 .process_comm_event
= event__process_comm
,
1661 .process_lost_event
= process_lost_event
,
1662 .sample_type_check
= sample_type_check
,
1665 static int read_events(void)
1668 struct perf_session
*session
= perf_session__new(input_name
, O_RDONLY
, 0);
1670 if (session
== NULL
)
1673 register_idle_thread();
1675 err
= perf_session__process_events(session
, &event_ops
, 0,
1676 &event__cwdlen
, &event__cwd
);
1677 perf_session__delete(session
);
1681 static void print_bad_events(void)
1683 if (nr_unordered_timestamps
&& nr_timestamps
) {
1684 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
1685 (double)nr_unordered_timestamps
/(double)nr_timestamps
*100.0,
1686 nr_unordered_timestamps
, nr_timestamps
);
1688 if (nr_lost_events
&& nr_events
) {
1689 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1690 (double)nr_lost_events
/(double)nr_events
*100.0,
1691 nr_lost_events
, nr_events
, nr_lost_chunks
);
1693 if (nr_state_machine_bugs
&& nr_timestamps
) {
1694 printf(" INFO: %.3f%% state machine bugs (%ld out of %ld)",
1695 (double)nr_state_machine_bugs
/(double)nr_timestamps
*100.0,
1696 nr_state_machine_bugs
, nr_timestamps
);
1698 printf(" (due to lost events?)");
1701 if (nr_context_switch_bugs
&& nr_timestamps
) {
1702 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
1703 (double)nr_context_switch_bugs
/(double)nr_timestamps
*100.0,
1704 nr_context_switch_bugs
, nr_timestamps
);
1706 printf(" (due to lost events?)");
1711 static void __cmd_lat(void)
1713 struct rb_node
*next
;
1719 printf("\n ---------------------------------------------------------------------------------------------------------------\n");
1720 printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n");
1721 printf(" ---------------------------------------------------------------------------------------------------------------\n");
1723 next
= rb_first(&sorted_atom_root
);
1726 struct work_atoms
*work_list
;
1728 work_list
= rb_entry(next
, struct work_atoms
, node
);
1729 output_lat_thread(work_list
);
1730 next
= rb_next(next
);
1733 printf(" -----------------------------------------------------------------------------------------\n");
1734 printf(" TOTAL: |%11.3f ms |%9Ld |\n",
1735 (double)all_runtime
/1e6
, all_count
);
1737 printf(" ---------------------------------------------------\n");
1744 static struct trace_sched_handler map_ops
= {
1745 .wakeup_event
= NULL
,
1746 .switch_event
= map_switch_event
,
1747 .runtime_event
= NULL
,
1751 static void __cmd_map(void)
1753 max_cpu
= sysconf(_SC_NPROCESSORS_CONF
);
1760 static void __cmd_replay(void)
1764 calibrate_run_measurement_overhead();
1765 calibrate_sleep_measurement_overhead();
1767 test_calibrations();
1771 printf("nr_run_events: %ld\n", nr_run_events
);
1772 printf("nr_sleep_events: %ld\n", nr_sleep_events
);
1773 printf("nr_wakeup_events: %ld\n", nr_wakeup_events
);
1775 if (targetless_wakeups
)
1776 printf("target-less wakeups: %ld\n", targetless_wakeups
);
1777 if (multitarget_wakeups
)
1778 printf("multi-target wakeups: %ld\n", multitarget_wakeups
);
1779 if (nr_run_events_optimized
)
1780 printf("run atoms optimized: %ld\n",
1781 nr_run_events_optimized
);
1783 print_task_traces();
1784 add_cross_task_wakeups();
1787 printf("------------------------------------------------------------\n");
1788 for (i
= 0; i
< replay_repeat
; i
++)
1793 static const char * const sched_usage
[] = {
1794 "perf sched [<options>] {record|latency|map|replay|trace}",
1798 static const struct option sched_options
[] = {
1799 OPT_STRING('i', "input", &input_name
, "file",
1801 OPT_BOOLEAN('v', "verbose", &verbose
,
1802 "be more verbose (show symbol address, etc)"),
1803 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace
,
1804 "dump raw trace in ASCII"),
1808 static const char * const latency_usage
[] = {
1809 "perf sched latency [<options>]",
1813 static const struct option latency_options
[] = {
1814 OPT_STRING('s', "sort", &sort_order
, "key[,key2...]",
1815 "sort by key(s): runtime, switch, avg, max"),
1816 OPT_BOOLEAN('v', "verbose", &verbose
,
1817 "be more verbose (show symbol address, etc)"),
1818 OPT_INTEGER('C', "CPU", &profile_cpu
,
1819 "CPU to profile on"),
1820 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace
,
1821 "dump raw trace in ASCII"),
1825 static const char * const replay_usage
[] = {
1826 "perf sched replay [<options>]",
1830 static const struct option replay_options
[] = {
1831 OPT_INTEGER('r', "repeat", &replay_repeat
,
1832 "repeat the workload replay N times (-1: infinite)"),
1833 OPT_BOOLEAN('v', "verbose", &verbose
,
1834 "be more verbose (show symbol address, etc)"),
1835 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace
,
1836 "dump raw trace in ASCII"),
1840 static void setup_sorting(void)
1842 char *tmp
, *tok
, *str
= strdup(sort_order
);
1844 for (tok
= strtok_r(str
, ", ", &tmp
);
1845 tok
; tok
= strtok_r(NULL
, ", ", &tmp
)) {
1846 if (sort_dimension__add(tok
, &sort_list
) < 0) {
1847 error("Unknown --sort key: `%s'", tok
);
1848 usage_with_options(latency_usage
, latency_options
);
1854 sort_dimension__add("pid", &cmp_pid
);
1857 static const char *record_args
[] = {
1865 "-e", "sched:sched_switch:r",
1866 "-e", "sched:sched_stat_wait:r",
1867 "-e", "sched:sched_stat_sleep:r",
1868 "-e", "sched:sched_stat_iowait:r",
1869 "-e", "sched:sched_stat_runtime:r",
1870 "-e", "sched:sched_process_exit:r",
1871 "-e", "sched:sched_process_fork:r",
1872 "-e", "sched:sched_wakeup:r",
1873 "-e", "sched:sched_migrate_task:r",
1876 static int __cmd_record(int argc
, const char **argv
)
1878 unsigned int rec_argc
, i
, j
;
1879 const char **rec_argv
;
1881 rec_argc
= ARRAY_SIZE(record_args
) + argc
- 1;
1882 rec_argv
= calloc(rec_argc
+ 1, sizeof(char *));
1884 for (i
= 0; i
< ARRAY_SIZE(record_args
); i
++)
1885 rec_argv
[i
] = strdup(record_args
[i
]);
1887 for (j
= 1; j
< (unsigned int)argc
; j
++, i
++)
1888 rec_argv
[i
] = argv
[j
];
1890 BUG_ON(i
!= rec_argc
);
1892 return cmd_record(i
, rec_argv
, NULL
);
1895 int cmd_sched(int argc
, const char **argv
, const char *prefix __used
)
1897 argc
= parse_options(argc
, argv
, sched_options
, sched_usage
,
1898 PARSE_OPT_STOP_AT_NON_OPTION
);
1900 usage_with_options(sched_usage
, sched_options
);
1903 * Aliased to 'perf trace' for now:
1905 if (!strcmp(argv
[0], "trace"))
1906 return cmd_trace(argc
, argv
, prefix
);
1909 if (!strncmp(argv
[0], "rec", 3)) {
1910 return __cmd_record(argc
, argv
);
1911 } else if (!strncmp(argv
[0], "lat", 3)) {
1912 trace_handler
= &lat_ops
;
1914 argc
= parse_options(argc
, argv
, latency_options
, latency_usage
, 0);
1916 usage_with_options(latency_usage
, latency_options
);
1920 } else if (!strcmp(argv
[0], "map")) {
1921 trace_handler
= &map_ops
;
1924 } else if (!strncmp(argv
[0], "rep", 3)) {
1925 trace_handler
= &replay_ops
;
1927 argc
= parse_options(argc
, argv
, replay_options
, replay_usage
, 0);
1929 usage_with_options(replay_usage
, replay_options
);
1933 usage_with_options(sched_usage
, sched_options
);