Merge tag 'mvebu-dt-4.2-3' of git://git.infradead.org/linux-mvebu into next/late
[deliverable/linux.git] / kernel / sched / debug.c
1 /*
2 * kernel/sched/debug.c
3 *
4 * Print the CFS rbtree
5 *
6 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 #include <linux/proc_fs.h>
14 #include <linux/sched.h>
15 #include <linux/seq_file.h>
16 #include <linux/kallsyms.h>
17 #include <linux/utsname.h>
18 #include <linux/mempolicy.h>
19
20 #include "sched.h"
21
22 static DEFINE_SPINLOCK(sched_debug_lock);
23
24 /*
25 * This allows printing both to /proc/sched_debug and
26 * to the console
27 */
28 #define SEQ_printf(m, x...) \
29 do { \
30 if (m) \
31 seq_printf(m, x); \
32 else \
33 printk(x); \
34 } while (0)
35
36 /*
37 * Ease the printing of nsec fields:
38 */
39 static long long nsec_high(unsigned long long nsec)
40 {
41 if ((long long)nsec < 0) {
42 nsec = -nsec;
43 do_div(nsec, 1000000);
44 return -nsec;
45 }
46 do_div(nsec, 1000000);
47
48 return nsec;
49 }
50
51 static unsigned long nsec_low(unsigned long long nsec)
52 {
53 if ((long long)nsec < 0)
54 nsec = -nsec;
55
56 return do_div(nsec, 1000000);
57 }
58
59 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
60
61 #ifdef CONFIG_FAIR_GROUP_SCHED
62 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
63 {
64 struct sched_entity *se = tg->se[cpu];
65
66 #define P(F) \
67 SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
68 #define PN(F) \
69 SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
70
71 if (!se) {
72 struct sched_avg *avg = &cpu_rq(cpu)->avg;
73 P(avg->runnable_avg_sum);
74 P(avg->avg_period);
75 return;
76 }
77
78
79 PN(se->exec_start);
80 PN(se->vruntime);
81 PN(se->sum_exec_runtime);
82 #ifdef CONFIG_SCHEDSTATS
83 PN(se->statistics.wait_start);
84 PN(se->statistics.sleep_start);
85 PN(se->statistics.block_start);
86 PN(se->statistics.sleep_max);
87 PN(se->statistics.block_max);
88 PN(se->statistics.exec_max);
89 PN(se->statistics.slice_max);
90 PN(se->statistics.wait_max);
91 PN(se->statistics.wait_sum);
92 P(se->statistics.wait_count);
93 #endif
94 P(se->load.weight);
95 #ifdef CONFIG_SMP
96 P(se->avg.runnable_avg_sum);
97 P(se->avg.running_avg_sum);
98 P(se->avg.avg_period);
99 P(se->avg.load_avg_contrib);
100 P(se->avg.utilization_avg_contrib);
101 P(se->avg.decay_count);
102 #endif
103 #undef PN
104 #undef P
105 }
106 #endif
107
108 #ifdef CONFIG_CGROUP_SCHED
109 static char group_path[PATH_MAX];
110
111 static char *task_group_path(struct task_group *tg)
112 {
113 if (autogroup_path(tg, group_path, PATH_MAX))
114 return group_path;
115
116 return cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
117 }
118 #endif
119
120 static void
121 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
122 {
123 if (rq->curr == p)
124 SEQ_printf(m, "R");
125 else
126 SEQ_printf(m, " ");
127
128 SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
129 p->comm, task_pid_nr(p),
130 SPLIT_NS(p->se.vruntime),
131 (long long)(p->nvcsw + p->nivcsw),
132 p->prio);
133 #ifdef CONFIG_SCHEDSTATS
134 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
135 SPLIT_NS(p->se.vruntime),
136 SPLIT_NS(p->se.sum_exec_runtime),
137 SPLIT_NS(p->se.statistics.sum_sleep_runtime));
138 #else
139 SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
140 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
141 #endif
142 #ifdef CONFIG_NUMA_BALANCING
143 SEQ_printf(m, " %d", task_node(p));
144 #endif
145 #ifdef CONFIG_CGROUP_SCHED
146 SEQ_printf(m, " %s", task_group_path(task_group(p)));
147 #endif
148
149 SEQ_printf(m, "\n");
150 }
151
152 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
153 {
154 struct task_struct *g, *p;
155
156 SEQ_printf(m,
157 "\nrunnable tasks:\n"
158 " task PID tree-key switches prio"
159 " exec-runtime sum-exec sum-sleep\n"
160 "------------------------------------------------------"
161 "----------------------------------------------------\n");
162
163 rcu_read_lock();
164 for_each_process_thread(g, p) {
165 if (task_cpu(p) != rq_cpu)
166 continue;
167
168 print_task(m, rq, p);
169 }
170 rcu_read_unlock();
171 }
172
173 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
174 {
175 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
176 spread, rq0_min_vruntime, spread0;
177 struct rq *rq = cpu_rq(cpu);
178 struct sched_entity *last;
179 unsigned long flags;
180
181 #ifdef CONFIG_FAIR_GROUP_SCHED
182 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
183 #else
184 SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
185 #endif
186 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
187 SPLIT_NS(cfs_rq->exec_clock));
188
189 raw_spin_lock_irqsave(&rq->lock, flags);
190 if (cfs_rq->rb_leftmost)
191 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
192 last = __pick_last_entity(cfs_rq);
193 if (last)
194 max_vruntime = last->vruntime;
195 min_vruntime = cfs_rq->min_vruntime;
196 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
197 raw_spin_unlock_irqrestore(&rq->lock, flags);
198 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
199 SPLIT_NS(MIN_vruntime));
200 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
201 SPLIT_NS(min_vruntime));
202 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
203 SPLIT_NS(max_vruntime));
204 spread = max_vruntime - MIN_vruntime;
205 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
206 SPLIT_NS(spread));
207 spread0 = min_vruntime - rq0_min_vruntime;
208 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
209 SPLIT_NS(spread0));
210 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
211 cfs_rq->nr_spread_over);
212 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
213 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
214 #ifdef CONFIG_SMP
215 SEQ_printf(m, " .%-30s: %ld\n", "runnable_load_avg",
216 cfs_rq->runnable_load_avg);
217 SEQ_printf(m, " .%-30s: %ld\n", "blocked_load_avg",
218 cfs_rq->blocked_load_avg);
219 SEQ_printf(m, " .%-30s: %ld\n", "utilization_load_avg",
220 cfs_rq->utilization_load_avg);
221 #ifdef CONFIG_FAIR_GROUP_SCHED
222 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_contrib",
223 cfs_rq->tg_load_contrib);
224 SEQ_printf(m, " .%-30s: %d\n", "tg_runnable_contrib",
225 cfs_rq->tg_runnable_contrib);
226 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
227 atomic_long_read(&cfs_rq->tg->load_avg));
228 SEQ_printf(m, " .%-30s: %d\n", "tg->runnable_avg",
229 atomic_read(&cfs_rq->tg->runnable_avg));
230 #endif
231 #endif
232 #ifdef CONFIG_CFS_BANDWIDTH
233 SEQ_printf(m, " .%-30s: %d\n", "tg->cfs_bandwidth.timer_active",
234 cfs_rq->tg->cfs_bandwidth.timer_active);
235 SEQ_printf(m, " .%-30s: %d\n", "throttled",
236 cfs_rq->throttled);
237 SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
238 cfs_rq->throttle_count);
239 #endif
240
241 #ifdef CONFIG_FAIR_GROUP_SCHED
242 print_cfs_group_stats(m, cpu, cfs_rq->tg);
243 #endif
244 }
245
246 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
247 {
248 #ifdef CONFIG_RT_GROUP_SCHED
249 SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
250 #else
251 SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
252 #endif
253
254 #define P(x) \
255 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
256 #define PN(x) \
257 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
258
259 P(rt_nr_running);
260 P(rt_throttled);
261 PN(rt_time);
262 PN(rt_runtime);
263
264 #undef PN
265 #undef P
266 }
267
268 void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
269 {
270 SEQ_printf(m, "\ndl_rq[%d]:\n", cpu);
271 SEQ_printf(m, " .%-30s: %ld\n", "dl_nr_running", dl_rq->dl_nr_running);
272 }
273
274 extern __read_mostly int sched_clock_running;
275
276 static void print_cpu(struct seq_file *m, int cpu)
277 {
278 struct rq *rq = cpu_rq(cpu);
279 unsigned long flags;
280
281 #ifdef CONFIG_X86
282 {
283 unsigned int freq = cpu_khz ? : 1;
284
285 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
286 cpu, freq / 1000, (freq % 1000));
287 }
288 #else
289 SEQ_printf(m, "cpu#%d\n", cpu);
290 #endif
291
292 #define P(x) \
293 do { \
294 if (sizeof(rq->x) == 4) \
295 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \
296 else \
297 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
298 } while (0)
299
300 #define PN(x) \
301 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
302
303 P(nr_running);
304 SEQ_printf(m, " .%-30s: %lu\n", "load",
305 rq->load.weight);
306 P(nr_switches);
307 P(nr_load_updates);
308 P(nr_uninterruptible);
309 PN(next_balance);
310 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
311 PN(clock);
312 PN(clock_task);
313 P(cpu_load[0]);
314 P(cpu_load[1]);
315 P(cpu_load[2]);
316 P(cpu_load[3]);
317 P(cpu_load[4]);
318 #undef P
319 #undef PN
320
321 #ifdef CONFIG_SCHEDSTATS
322 #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n);
323 #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
324
325 P(yld_count);
326
327 P(sched_count);
328 P(sched_goidle);
329 #ifdef CONFIG_SMP
330 P64(avg_idle);
331 P64(max_idle_balance_cost);
332 #endif
333
334 P(ttwu_count);
335 P(ttwu_local);
336
337 #undef P
338 #undef P64
339 #endif
340 spin_lock_irqsave(&sched_debug_lock, flags);
341 print_cfs_stats(m, cpu);
342 print_rt_stats(m, cpu);
343 print_dl_stats(m, cpu);
344
345 print_rq(m, rq, cpu);
346 spin_unlock_irqrestore(&sched_debug_lock, flags);
347 SEQ_printf(m, "\n");
348 }
349
350 static const char *sched_tunable_scaling_names[] = {
351 "none",
352 "logaritmic",
353 "linear"
354 };
355
356 static void sched_debug_header(struct seq_file *m)
357 {
358 u64 ktime, sched_clk, cpu_clk;
359 unsigned long flags;
360
361 local_irq_save(flags);
362 ktime = ktime_to_ns(ktime_get());
363 sched_clk = sched_clock();
364 cpu_clk = local_clock();
365 local_irq_restore(flags);
366
367 SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
368 init_utsname()->release,
369 (int)strcspn(init_utsname()->version, " "),
370 init_utsname()->version);
371
372 #define P(x) \
373 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
374 #define PN(x) \
375 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
376 PN(ktime);
377 PN(sched_clk);
378 PN(cpu_clk);
379 P(jiffies);
380 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
381 P(sched_clock_stable());
382 #endif
383 #undef PN
384 #undef P
385
386 SEQ_printf(m, "\n");
387 SEQ_printf(m, "sysctl_sched\n");
388
389 #define P(x) \
390 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
391 #define PN(x) \
392 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
393 PN(sysctl_sched_latency);
394 PN(sysctl_sched_min_granularity);
395 PN(sysctl_sched_wakeup_granularity);
396 P(sysctl_sched_child_runs_first);
397 P(sysctl_sched_features);
398 #undef PN
399 #undef P
400
401 SEQ_printf(m, " .%-40s: %d (%s)\n",
402 "sysctl_sched_tunable_scaling",
403 sysctl_sched_tunable_scaling,
404 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
405 SEQ_printf(m, "\n");
406 }
407
408 static int sched_debug_show(struct seq_file *m, void *v)
409 {
410 int cpu = (unsigned long)(v - 2);
411
412 if (cpu != -1)
413 print_cpu(m, cpu);
414 else
415 sched_debug_header(m);
416
417 return 0;
418 }
419
420 void sysrq_sched_debug_show(void)
421 {
422 int cpu;
423
424 sched_debug_header(NULL);
425 for_each_online_cpu(cpu)
426 print_cpu(NULL, cpu);
427
428 }
429
430 /*
431 * This itererator needs some explanation.
432 * It returns 1 for the header position.
433 * This means 2 is cpu 0.
434 * In a hotplugged system some cpus, including cpu 0, may be missing so we have
435 * to use cpumask_* to iterate over the cpus.
436 */
437 static void *sched_debug_start(struct seq_file *file, loff_t *offset)
438 {
439 unsigned long n = *offset;
440
441 if (n == 0)
442 return (void *) 1;
443
444 n--;
445
446 if (n > 0)
447 n = cpumask_next(n - 1, cpu_online_mask);
448 else
449 n = cpumask_first(cpu_online_mask);
450
451 *offset = n + 1;
452
453 if (n < nr_cpu_ids)
454 return (void *)(unsigned long)(n + 2);
455 return NULL;
456 }
457
458 static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
459 {
460 (*offset)++;
461 return sched_debug_start(file, offset);
462 }
463
464 static void sched_debug_stop(struct seq_file *file, void *data)
465 {
466 }
467
468 static const struct seq_operations sched_debug_sops = {
469 .start = sched_debug_start,
470 .next = sched_debug_next,
471 .stop = sched_debug_stop,
472 .show = sched_debug_show,
473 };
474
475 static int sched_debug_release(struct inode *inode, struct file *file)
476 {
477 seq_release(inode, file);
478
479 return 0;
480 }
481
482 static int sched_debug_open(struct inode *inode, struct file *filp)
483 {
484 int ret = 0;
485
486 ret = seq_open(filp, &sched_debug_sops);
487
488 return ret;
489 }
490
491 static const struct file_operations sched_debug_fops = {
492 .open = sched_debug_open,
493 .read = seq_read,
494 .llseek = seq_lseek,
495 .release = sched_debug_release,
496 };
497
498 static int __init init_sched_debug_procfs(void)
499 {
500 struct proc_dir_entry *pe;
501
502 pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
503 if (!pe)
504 return -ENOMEM;
505 return 0;
506 }
507
508 __initcall(init_sched_debug_procfs);
509
510 #define __P(F) \
511 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
512 #define P(F) \
513 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
514 #define __PN(F) \
515 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
516 #define PN(F) \
517 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
518
519
520 static void sched_show_numa(struct task_struct *p, struct seq_file *m)
521 {
522 #ifdef CONFIG_NUMA_BALANCING
523 struct mempolicy *pol;
524 int node, i;
525
526 if (p->mm)
527 P(mm->numa_scan_seq);
528
529 task_lock(p);
530 pol = p->mempolicy;
531 if (pol && !(pol->flags & MPOL_F_MORON))
532 pol = NULL;
533 mpol_get(pol);
534 task_unlock(p);
535
536 SEQ_printf(m, "numa_migrations, %ld\n", xchg(&p->numa_pages_migrated, 0));
537
538 for_each_online_node(node) {
539 for (i = 0; i < 2; i++) {
540 unsigned long nr_faults = -1;
541 int cpu_current, home_node;
542
543 if (p->numa_faults)
544 nr_faults = p->numa_faults[2*node + i];
545
546 cpu_current = !i ? (task_node(p) == node) :
547 (pol && node_isset(node, pol->v.nodes));
548
549 home_node = (p->numa_preferred_nid == node);
550
551 SEQ_printf(m, "numa_faults_memory, %d, %d, %d, %d, %ld\n",
552 i, node, cpu_current, home_node, nr_faults);
553 }
554 }
555
556 mpol_put(pol);
557 #endif
558 }
559
560 void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
561 {
562 unsigned long nr_switches;
563
564 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p),
565 get_nr_threads(p));
566 SEQ_printf(m,
567 "---------------------------------------------------------"
568 "----------\n");
569 #define __P(F) \
570 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
571 #define P(F) \
572 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
573 #define __PN(F) \
574 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
575 #define PN(F) \
576 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
577
578 PN(se.exec_start);
579 PN(se.vruntime);
580 PN(se.sum_exec_runtime);
581
582 nr_switches = p->nvcsw + p->nivcsw;
583
584 #ifdef CONFIG_SCHEDSTATS
585 PN(se.statistics.wait_start);
586 PN(se.statistics.sleep_start);
587 PN(se.statistics.block_start);
588 PN(se.statistics.sleep_max);
589 PN(se.statistics.block_max);
590 PN(se.statistics.exec_max);
591 PN(se.statistics.slice_max);
592 PN(se.statistics.wait_max);
593 PN(se.statistics.wait_sum);
594 P(se.statistics.wait_count);
595 PN(se.statistics.iowait_sum);
596 P(se.statistics.iowait_count);
597 P(se.nr_migrations);
598 P(se.statistics.nr_migrations_cold);
599 P(se.statistics.nr_failed_migrations_affine);
600 P(se.statistics.nr_failed_migrations_running);
601 P(se.statistics.nr_failed_migrations_hot);
602 P(se.statistics.nr_forced_migrations);
603 P(se.statistics.nr_wakeups);
604 P(se.statistics.nr_wakeups_sync);
605 P(se.statistics.nr_wakeups_migrate);
606 P(se.statistics.nr_wakeups_local);
607 P(se.statistics.nr_wakeups_remote);
608 P(se.statistics.nr_wakeups_affine);
609 P(se.statistics.nr_wakeups_affine_attempts);
610 P(se.statistics.nr_wakeups_passive);
611 P(se.statistics.nr_wakeups_idle);
612
613 {
614 u64 avg_atom, avg_per_cpu;
615
616 avg_atom = p->se.sum_exec_runtime;
617 if (nr_switches)
618 avg_atom = div64_ul(avg_atom, nr_switches);
619 else
620 avg_atom = -1LL;
621
622 avg_per_cpu = p->se.sum_exec_runtime;
623 if (p->se.nr_migrations) {
624 avg_per_cpu = div64_u64(avg_per_cpu,
625 p->se.nr_migrations);
626 } else {
627 avg_per_cpu = -1LL;
628 }
629
630 __PN(avg_atom);
631 __PN(avg_per_cpu);
632 }
633 #endif
634 __P(nr_switches);
635 SEQ_printf(m, "%-45s:%21Ld\n",
636 "nr_voluntary_switches", (long long)p->nvcsw);
637 SEQ_printf(m, "%-45s:%21Ld\n",
638 "nr_involuntary_switches", (long long)p->nivcsw);
639
640 P(se.load.weight);
641 #ifdef CONFIG_SMP
642 P(se.avg.runnable_avg_sum);
643 P(se.avg.running_avg_sum);
644 P(se.avg.avg_period);
645 P(se.avg.load_avg_contrib);
646 P(se.avg.utilization_avg_contrib);
647 P(se.avg.decay_count);
648 #endif
649 P(policy);
650 P(prio);
651 #undef PN
652 #undef __PN
653 #undef P
654 #undef __P
655
656 {
657 unsigned int this_cpu = raw_smp_processor_id();
658 u64 t0, t1;
659
660 t0 = cpu_clock(this_cpu);
661 t1 = cpu_clock(this_cpu);
662 SEQ_printf(m, "%-45s:%21Ld\n",
663 "clock-delta", (long long)(t1-t0));
664 }
665
666 sched_show_numa(p, m);
667 }
668
669 void proc_sched_set_task(struct task_struct *p)
670 {
671 #ifdef CONFIG_SCHEDSTATS
672 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
673 #endif
674 }
This page took 0.077068 seconds and 5 git commands to generate.