Merge tag 'please-pull-pstore' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl...
[deliverable/linux.git] / kernel / sched / debug.c
1 /*
2 * kernel/sched/debug.c
3 *
4 * Print the CFS rbtree
5 *
6 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 #include <linux/proc_fs.h>
14 #include <linux/sched.h>
15 #include <linux/seq_file.h>
16 #include <linux/kallsyms.h>
17 #include <linux/utsname.h>
18
19 #include "sched.h"
20
21 static DEFINE_SPINLOCK(sched_debug_lock);
22
23 /*
24 * This allows printing both to /proc/sched_debug and
25 * to the console
26 */
27 #define SEQ_printf(m, x...) \
28 do { \
29 if (m) \
30 seq_printf(m, x); \
31 else \
32 printk(x); \
33 } while (0)
34
35 /*
36 * Ease the printing of nsec fields:
37 */
38 static long long nsec_high(unsigned long long nsec)
39 {
40 if ((long long)nsec < 0) {
41 nsec = -nsec;
42 do_div(nsec, 1000000);
43 return -nsec;
44 }
45 do_div(nsec, 1000000);
46
47 return nsec;
48 }
49
50 static unsigned long nsec_low(unsigned long long nsec)
51 {
52 if ((long long)nsec < 0)
53 nsec = -nsec;
54
55 return do_div(nsec, 1000000);
56 }
57
58 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
59
60 #ifdef CONFIG_FAIR_GROUP_SCHED
61 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
62 {
63 struct sched_entity *se = tg->se[cpu];
64
65 #define P(F) \
66 SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
67 #define PN(F) \
68 SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
69
70 if (!se) {
71 struct sched_avg *avg = &cpu_rq(cpu)->avg;
72 P(avg->runnable_avg_sum);
73 P(avg->runnable_avg_period);
74 return;
75 }
76
77
78 PN(se->exec_start);
79 PN(se->vruntime);
80 PN(se->sum_exec_runtime);
81 #ifdef CONFIG_SCHEDSTATS
82 PN(se->statistics.wait_start);
83 PN(se->statistics.sleep_start);
84 PN(se->statistics.block_start);
85 PN(se->statistics.sleep_max);
86 PN(se->statistics.block_max);
87 PN(se->statistics.exec_max);
88 PN(se->statistics.slice_max);
89 PN(se->statistics.wait_max);
90 PN(se->statistics.wait_sum);
91 P(se->statistics.wait_count);
92 #endif
93 P(se->load.weight);
94 #ifdef CONFIG_SMP
95 P(se->avg.runnable_avg_sum);
96 P(se->avg.runnable_avg_period);
97 P(se->avg.load_avg_contrib);
98 P(se->avg.decay_count);
99 #endif
100 #undef PN
101 #undef P
102 }
103 #endif
104
105 #ifdef CONFIG_CGROUP_SCHED
106 static char group_path[PATH_MAX];
107
108 static char *task_group_path(struct task_group *tg)
109 {
110 if (autogroup_path(tg, group_path, PATH_MAX))
111 return group_path;
112
113 cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
114 return group_path;
115 }
116 #endif
117
118 static void
119 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
120 {
121 if (rq->curr == p)
122 SEQ_printf(m, "R");
123 else
124 SEQ_printf(m, " ");
125
126 SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
127 p->comm, p->pid,
128 SPLIT_NS(p->se.vruntime),
129 (long long)(p->nvcsw + p->nivcsw),
130 p->prio);
131 #ifdef CONFIG_SCHEDSTATS
132 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
133 SPLIT_NS(p->se.vruntime),
134 SPLIT_NS(p->se.sum_exec_runtime),
135 SPLIT_NS(p->se.statistics.sum_sleep_runtime));
136 #else
137 SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
138 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
139 #endif
140 #ifdef CONFIG_CGROUP_SCHED
141 SEQ_printf(m, " %s", task_group_path(task_group(p)));
142 #endif
143
144 SEQ_printf(m, "\n");
145 }
146
147 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
148 {
149 struct task_struct *g, *p;
150 unsigned long flags;
151
152 SEQ_printf(m,
153 "\nrunnable tasks:\n"
154 " task PID tree-key switches prio"
155 " exec-runtime sum-exec sum-sleep\n"
156 "------------------------------------------------------"
157 "----------------------------------------------------\n");
158
159 read_lock_irqsave(&tasklist_lock, flags);
160
161 do_each_thread(g, p) {
162 if (!p->on_rq || task_cpu(p) != rq_cpu)
163 continue;
164
165 print_task(m, rq, p);
166 } while_each_thread(g, p);
167
168 read_unlock_irqrestore(&tasklist_lock, flags);
169 }
170
171 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
172 {
173 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
174 spread, rq0_min_vruntime, spread0;
175 struct rq *rq = cpu_rq(cpu);
176 struct sched_entity *last;
177 unsigned long flags;
178
179 #ifdef CONFIG_FAIR_GROUP_SCHED
180 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
181 #else
182 SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
183 #endif
184 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
185 SPLIT_NS(cfs_rq->exec_clock));
186
187 raw_spin_lock_irqsave(&rq->lock, flags);
188 if (cfs_rq->rb_leftmost)
189 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
190 last = __pick_last_entity(cfs_rq);
191 if (last)
192 max_vruntime = last->vruntime;
193 min_vruntime = cfs_rq->min_vruntime;
194 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
195 raw_spin_unlock_irqrestore(&rq->lock, flags);
196 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
197 SPLIT_NS(MIN_vruntime));
198 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
199 SPLIT_NS(min_vruntime));
200 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
201 SPLIT_NS(max_vruntime));
202 spread = max_vruntime - MIN_vruntime;
203 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
204 SPLIT_NS(spread));
205 spread0 = min_vruntime - rq0_min_vruntime;
206 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
207 SPLIT_NS(spread0));
208 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
209 cfs_rq->nr_spread_over);
210 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
211 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
212 #ifdef CONFIG_FAIR_GROUP_SCHED
213 #ifdef CONFIG_SMP
214 SEQ_printf(m, " .%-30s: %lld\n", "runnable_load_avg",
215 cfs_rq->runnable_load_avg);
216 SEQ_printf(m, " .%-30s: %lld\n", "blocked_load_avg",
217 cfs_rq->blocked_load_avg);
218 SEQ_printf(m, " .%-30s: %lld\n", "tg_load_avg",
219 (unsigned long long)atomic64_read(&cfs_rq->tg->load_avg));
220 SEQ_printf(m, " .%-30s: %lld\n", "tg_load_contrib",
221 cfs_rq->tg_load_contrib);
222 SEQ_printf(m, " .%-30s: %d\n", "tg_runnable_contrib",
223 cfs_rq->tg_runnable_contrib);
224 SEQ_printf(m, " .%-30s: %d\n", "tg->runnable_avg",
225 atomic_read(&cfs_rq->tg->runnable_avg));
226 #endif
227
228 print_cfs_group_stats(m, cpu, cfs_rq->tg);
229 #endif
230 }
231
232 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
233 {
234 #ifdef CONFIG_RT_GROUP_SCHED
235 SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
236 #else
237 SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
238 #endif
239
240 #define P(x) \
241 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
242 #define PN(x) \
243 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
244
245 P(rt_nr_running);
246 P(rt_throttled);
247 PN(rt_time);
248 PN(rt_runtime);
249
250 #undef PN
251 #undef P
252 }
253
254 extern __read_mostly int sched_clock_running;
255
256 static void print_cpu(struct seq_file *m, int cpu)
257 {
258 struct rq *rq = cpu_rq(cpu);
259 unsigned long flags;
260
261 #ifdef CONFIG_X86
262 {
263 unsigned int freq = cpu_khz ? : 1;
264
265 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
266 cpu, freq / 1000, (freq % 1000));
267 }
268 #else
269 SEQ_printf(m, "cpu#%d\n", cpu);
270 #endif
271
272 #define P(x) \
273 do { \
274 if (sizeof(rq->x) == 4) \
275 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \
276 else \
277 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
278 } while (0)
279
280 #define PN(x) \
281 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
282
283 P(nr_running);
284 SEQ_printf(m, " .%-30s: %lu\n", "load",
285 rq->load.weight);
286 P(nr_switches);
287 P(nr_load_updates);
288 P(nr_uninterruptible);
289 PN(next_balance);
290 P(curr->pid);
291 PN(clock);
292 P(cpu_load[0]);
293 P(cpu_load[1]);
294 P(cpu_load[2]);
295 P(cpu_load[3]);
296 P(cpu_load[4]);
297 #undef P
298 #undef PN
299
300 #ifdef CONFIG_SCHEDSTATS
301 #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n);
302 #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
303
304 P(yld_count);
305
306 P(sched_count);
307 P(sched_goidle);
308 #ifdef CONFIG_SMP
309 P64(avg_idle);
310 #endif
311
312 P(ttwu_count);
313 P(ttwu_local);
314
315 #undef P
316 #undef P64
317 #endif
318 spin_lock_irqsave(&sched_debug_lock, flags);
319 print_cfs_stats(m, cpu);
320 print_rt_stats(m, cpu);
321
322 rcu_read_lock();
323 print_rq(m, rq, cpu);
324 rcu_read_unlock();
325 spin_unlock_irqrestore(&sched_debug_lock, flags);
326 SEQ_printf(m, "\n");
327 }
328
329 static const char *sched_tunable_scaling_names[] = {
330 "none",
331 "logaritmic",
332 "linear"
333 };
334
335 static void sched_debug_header(struct seq_file *m)
336 {
337 u64 ktime, sched_clk, cpu_clk;
338 unsigned long flags;
339
340 local_irq_save(flags);
341 ktime = ktime_to_ns(ktime_get());
342 sched_clk = sched_clock();
343 cpu_clk = local_clock();
344 local_irq_restore(flags);
345
346 SEQ_printf(m, "Sched Debug Version: v0.10, %s %.*s\n",
347 init_utsname()->release,
348 (int)strcspn(init_utsname()->version, " "),
349 init_utsname()->version);
350
351 #define P(x) \
352 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
353 #define PN(x) \
354 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
355 PN(ktime);
356 PN(sched_clk);
357 PN(cpu_clk);
358 P(jiffies);
359 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
360 P(sched_clock_stable);
361 #endif
362 #undef PN
363 #undef P
364
365 SEQ_printf(m, "\n");
366 SEQ_printf(m, "sysctl_sched\n");
367
368 #define P(x) \
369 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
370 #define PN(x) \
371 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
372 PN(sysctl_sched_latency);
373 PN(sysctl_sched_min_granularity);
374 PN(sysctl_sched_wakeup_granularity);
375 P(sysctl_sched_child_runs_first);
376 P(sysctl_sched_features);
377 #undef PN
378 #undef P
379
380 SEQ_printf(m, " .%-40s: %d (%s)\n",
381 "sysctl_sched_tunable_scaling",
382 sysctl_sched_tunable_scaling,
383 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
384 SEQ_printf(m, "\n");
385 }
386
387 static int sched_debug_show(struct seq_file *m, void *v)
388 {
389 int cpu = (unsigned long)(v - 2);
390
391 if (cpu != -1)
392 print_cpu(m, cpu);
393 else
394 sched_debug_header(m);
395
396 return 0;
397 }
398
399 void sysrq_sched_debug_show(void)
400 {
401 int cpu;
402
403 sched_debug_header(NULL);
404 for_each_online_cpu(cpu)
405 print_cpu(NULL, cpu);
406
407 }
408
409 /*
410 * This itererator needs some explanation.
411 * It returns 1 for the header position.
412 * This means 2 is cpu 0.
413 * In a hotplugged system some cpus, including cpu 0, may be missing so we have
414 * to use cpumask_* to iterate over the cpus.
415 */
416 static void *sched_debug_start(struct seq_file *file, loff_t *offset)
417 {
418 unsigned long n = *offset;
419
420 if (n == 0)
421 return (void *) 1;
422
423 n--;
424
425 if (n > 0)
426 n = cpumask_next(n - 1, cpu_online_mask);
427 else
428 n = cpumask_first(cpu_online_mask);
429
430 *offset = n + 1;
431
432 if (n < nr_cpu_ids)
433 return (void *)(unsigned long)(n + 2);
434 return NULL;
435 }
436
437 static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
438 {
439 (*offset)++;
440 return sched_debug_start(file, offset);
441 }
442
443 static void sched_debug_stop(struct seq_file *file, void *data)
444 {
445 }
446
447 static const struct seq_operations sched_debug_sops = {
448 .start = sched_debug_start,
449 .next = sched_debug_next,
450 .stop = sched_debug_stop,
451 .show = sched_debug_show,
452 };
453
454 static int sched_debug_release(struct inode *inode, struct file *file)
455 {
456 seq_release(inode, file);
457
458 return 0;
459 }
460
461 static int sched_debug_open(struct inode *inode, struct file *filp)
462 {
463 int ret = 0;
464
465 ret = seq_open(filp, &sched_debug_sops);
466
467 return ret;
468 }
469
470 static const struct file_operations sched_debug_fops = {
471 .open = sched_debug_open,
472 .read = seq_read,
473 .llseek = seq_lseek,
474 .release = sched_debug_release,
475 };
476
477 static int __init init_sched_debug_procfs(void)
478 {
479 struct proc_dir_entry *pe;
480
481 pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
482 if (!pe)
483 return -ENOMEM;
484 return 0;
485 }
486
487 __initcall(init_sched_debug_procfs);
488
489 void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
490 {
491 unsigned long nr_switches;
492
493 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid,
494 get_nr_threads(p));
495 SEQ_printf(m,
496 "---------------------------------------------------------\n");
497 #define __P(F) \
498 SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)F)
499 #define P(F) \
500 SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)p->F)
501 #define __PN(F) \
502 SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
503 #define PN(F) \
504 SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
505
506 PN(se.exec_start);
507 PN(se.vruntime);
508 PN(se.sum_exec_runtime);
509
510 nr_switches = p->nvcsw + p->nivcsw;
511
512 #ifdef CONFIG_SCHEDSTATS
513 PN(se.statistics.wait_start);
514 PN(se.statistics.sleep_start);
515 PN(se.statistics.block_start);
516 PN(se.statistics.sleep_max);
517 PN(se.statistics.block_max);
518 PN(se.statistics.exec_max);
519 PN(se.statistics.slice_max);
520 PN(se.statistics.wait_max);
521 PN(se.statistics.wait_sum);
522 P(se.statistics.wait_count);
523 PN(se.statistics.iowait_sum);
524 P(se.statistics.iowait_count);
525 P(se.nr_migrations);
526 P(se.statistics.nr_migrations_cold);
527 P(se.statistics.nr_failed_migrations_affine);
528 P(se.statistics.nr_failed_migrations_running);
529 P(se.statistics.nr_failed_migrations_hot);
530 P(se.statistics.nr_forced_migrations);
531 P(se.statistics.nr_wakeups);
532 P(se.statistics.nr_wakeups_sync);
533 P(se.statistics.nr_wakeups_migrate);
534 P(se.statistics.nr_wakeups_local);
535 P(se.statistics.nr_wakeups_remote);
536 P(se.statistics.nr_wakeups_affine);
537 P(se.statistics.nr_wakeups_affine_attempts);
538 P(se.statistics.nr_wakeups_passive);
539 P(se.statistics.nr_wakeups_idle);
540
541 {
542 u64 avg_atom, avg_per_cpu;
543
544 avg_atom = p->se.sum_exec_runtime;
545 if (nr_switches)
546 do_div(avg_atom, nr_switches);
547 else
548 avg_atom = -1LL;
549
550 avg_per_cpu = p->se.sum_exec_runtime;
551 if (p->se.nr_migrations) {
552 avg_per_cpu = div64_u64(avg_per_cpu,
553 p->se.nr_migrations);
554 } else {
555 avg_per_cpu = -1LL;
556 }
557
558 __PN(avg_atom);
559 __PN(avg_per_cpu);
560 }
561 #endif
562 __P(nr_switches);
563 SEQ_printf(m, "%-35s:%21Ld\n",
564 "nr_voluntary_switches", (long long)p->nvcsw);
565 SEQ_printf(m, "%-35s:%21Ld\n",
566 "nr_involuntary_switches", (long long)p->nivcsw);
567
568 P(se.load.weight);
569 P(policy);
570 P(prio);
571 #undef PN
572 #undef __PN
573 #undef P
574 #undef __P
575
576 {
577 unsigned int this_cpu = raw_smp_processor_id();
578 u64 t0, t1;
579
580 t0 = cpu_clock(this_cpu);
581 t1 = cpu_clock(this_cpu);
582 SEQ_printf(m, "%-35s:%21Ld\n",
583 "clock-delta", (long long)(t1-t0));
584 }
585 }
586
587 void proc_sched_set_task(struct task_struct *p)
588 {
589 #ifdef CONFIG_SCHEDSTATS
590 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
591 #endif
592 }
This page took 0.044594 seconds and 5 git commands to generate.