2 * Workqueue statistical tracer.
4 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
9 #include <trace/workqueue.h>
10 #include <linux/list.h>
11 #include <linux/percpu.h>
12 #include "trace_stat.h"
16 /* A cpu workqueue thread */
17 struct cpu_workqueue_stats
{
18 struct list_head list
;
19 /* Useful to know if we print the cpu headers */
23 /* Can be inserted from interrupt or user context, need to be atomic */
26 * Don't need to be atomic, works are serialized in a single workqueue thread
29 unsigned int executed
;
32 /* List of workqueue threads on one cpu */
33 struct workqueue_global_stats
{
34 struct list_head list
;
38 /* Don't need a global lock because allocated before the workqueues, and
41 static DEFINE_PER_CPU(struct workqueue_global_stats
, all_workqueue_stat
);
42 #define workqueue_cpu_stat(cpu) (&per_cpu(all_workqueue_stat, cpu))
44 /* Insertion of a work */
46 probe_workqueue_insertion(struct task_struct
*wq_thread
,
47 struct work_struct
*work
)
49 int cpu
= cpumask_first(&wq_thread
->cpus_allowed
);
50 struct cpu_workqueue_stats
*node
, *next
;
53 spin_lock_irqsave(&workqueue_cpu_stat(cpu
)->lock
, flags
);
54 list_for_each_entry_safe(node
, next
, &workqueue_cpu_stat(cpu
)->list
,
56 if (node
->pid
== wq_thread
->pid
) {
57 atomic_inc(&node
->inserted
);
61 pr_debug("trace_workqueue: entry not found\n");
63 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
66 /* Execution of a work */
68 probe_workqueue_execution(struct task_struct
*wq_thread
,
69 struct work_struct
*work
)
71 int cpu
= cpumask_first(&wq_thread
->cpus_allowed
);
72 struct cpu_workqueue_stats
*node
, *next
;
75 spin_lock_irqsave(&workqueue_cpu_stat(cpu
)->lock
, flags
);
76 list_for_each_entry_safe(node
, next
, &workqueue_cpu_stat(cpu
)->list
,
78 if (node
->pid
== wq_thread
->pid
) {
83 pr_debug("trace_workqueue: entry not found\n");
85 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
88 /* Creation of a cpu workqueue thread */
89 static void probe_workqueue_creation(struct task_struct
*wq_thread
, int cpu
)
91 struct cpu_workqueue_stats
*cws
;
94 WARN_ON(cpu
< 0 || cpu
>= num_possible_cpus());
96 /* Workqueues are sometimes created in atomic context */
97 cws
= kzalloc(sizeof(struct cpu_workqueue_stats
), GFP_ATOMIC
);
99 pr_warning("trace_workqueue: not enough memory\n");
102 tracing_record_cmdline(wq_thread
);
104 INIT_LIST_HEAD(&cws
->list
);
107 cws
->pid
= wq_thread
->pid
;
109 spin_lock_irqsave(&workqueue_cpu_stat(cpu
)->lock
, flags
);
110 if (list_empty(&workqueue_cpu_stat(cpu
)->list
))
111 cws
->first_entry
= true;
112 list_add_tail(&cws
->list
, &workqueue_cpu_stat(cpu
)->list
);
113 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
116 /* Destruction of a cpu workqueue thread */
117 static void probe_workqueue_destruction(struct task_struct
*wq_thread
)
119 /* Workqueue only execute on one cpu */
120 int cpu
= cpumask_first(&wq_thread
->cpus_allowed
);
121 struct cpu_workqueue_stats
*node
, *next
;
124 spin_lock_irqsave(&workqueue_cpu_stat(cpu
)->lock
, flags
);
125 list_for_each_entry_safe(node
, next
, &workqueue_cpu_stat(cpu
)->list
,
127 if (node
->pid
== wq_thread
->pid
) {
128 list_del(&node
->list
);
134 pr_debug("trace_workqueue: don't find workqueue to destroy\n");
136 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
140 static struct cpu_workqueue_stats
*workqueue_stat_start_cpu(int cpu
)
143 struct cpu_workqueue_stats
*ret
= NULL
;
146 spin_lock_irqsave(&workqueue_cpu_stat(cpu
)->lock
, flags
);
148 if (!list_empty(&workqueue_cpu_stat(cpu
)->list
))
149 ret
= list_entry(workqueue_cpu_stat(cpu
)->list
.next
,
150 struct cpu_workqueue_stats
, list
);
152 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
157 static void *workqueue_stat_start(void)
162 for_each_possible_cpu(cpu
) {
163 ret
= workqueue_stat_start_cpu(cpu
);
170 static void *workqueue_stat_next(void *prev
, int idx
)
172 struct cpu_workqueue_stats
*prev_cws
= prev
;
173 int cpu
= prev_cws
->cpu
;
177 spin_lock_irqsave(&workqueue_cpu_stat(cpu
)->lock
, flags
);
178 if (list_is_last(&prev_cws
->list
, &workqueue_cpu_stat(cpu
)->list
)) {
179 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
180 for (++cpu
; cpu
< num_possible_cpus(); cpu
++) {
181 ret
= workqueue_stat_start_cpu(cpu
);
187 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
189 return list_entry(prev_cws
->list
.next
, struct cpu_workqueue_stats
,
193 static int workqueue_stat_show(struct seq_file
*s
, void *p
)
195 struct cpu_workqueue_stats
*cws
= p
;
199 seq_printf(s
, "%3d %6d %6u %s\n", cws
->cpu
,
200 atomic_read(&cws
->inserted
),
202 trace_find_cmdline(cws
->pid
));
204 spin_lock_irqsave(&workqueue_cpu_stat(cpu
)->lock
, flags
);
205 if (&cws
->list
== workqueue_cpu_stat(cpu
)->list
.next
)
207 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
212 static int workqueue_stat_headers(struct seq_file
*s
)
214 seq_printf(s
, "# CPU INSERTED EXECUTED NAME\n");
215 seq_printf(s
, "# | | | |\n\n");
219 struct tracer_stat workqueue_stats __read_mostly
= {
220 .name
= "workqueues",
221 .stat_start
= workqueue_stat_start
,
222 .stat_next
= workqueue_stat_next
,
223 .stat_show
= workqueue_stat_show
,
224 .stat_headers
= workqueue_stat_headers
228 int __init
stat_workqueue_init(void)
230 if (register_stat_tracer(&workqueue_stats
)) {
231 pr_warning("Unable to register workqueue stat tracer\n");
237 fs_initcall(stat_workqueue_init
);
240 * Workqueues are created very early, just after pre-smp initcalls.
241 * So we must register our tracepoints at this stage.
243 int __init
trace_workqueue_early_init(void)
247 ret
= register_trace_workqueue_insertion(probe_workqueue_insertion
);
251 ret
= register_trace_workqueue_execution(probe_workqueue_execution
);
255 ret
= register_trace_workqueue_creation(probe_workqueue_creation
);
259 ret
= register_trace_workqueue_destruction(probe_workqueue_destruction
);
263 for_each_possible_cpu(cpu
) {
264 spin_lock_init(&workqueue_cpu_stat(cpu
)->lock
);
265 INIT_LIST_HEAD(&workqueue_cpu_stat(cpu
)->list
);
271 unregister_trace_workqueue_creation(probe_workqueue_creation
);
273 unregister_trace_workqueue_execution(probe_workqueue_execution
);
275 unregister_trace_workqueue_insertion(probe_workqueue_insertion
);
277 pr_warning("trace_workqueue: unable to trace workqueues\n");
281 early_initcall(trace_workqueue_early_init
);