Commit | Line | Data |
---|---|---|
c337ddc2 | 1 | /* |
886d51a3 MD |
2 | * lttng-statedump.c |
3 | * | |
c337ddc2 MD |
4 | * Linux Trace Toolkit Next Generation Kernel State Dump |
5 | * | |
6 | * Copyright 2005 Jean-Hugues Deschenes <jean-hugues.deschenes@polymtl.ca> | |
7 | * Copyright 2006-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
8 | * | |
886d51a3 MD |
9 | * This library is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU Lesser General Public | |
11 | * License as published by the Free Software Foundation; only | |
12 | * version 2.1 of the License. | |
13 | * | |
14 | * This library is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * Lesser General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU Lesser General Public | |
20 | * License along with this library; if not, write to the Free Software | |
21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
22 | * | |
c337ddc2 MD |
23 | * Changes: |
24 | * Eric Clement: Add listing of network IP interface | |
25 | * 2006, 2007 Mathieu Desnoyers Fix kernel threads | |
26 | * Various updates | |
c337ddc2 MD |
27 | */ |
28 | ||
29 | #include <linux/init.h> | |
30 | #include <linux/module.h> | |
31 | #include <linux/netlink.h> | |
32 | #include <linux/inet.h> | |
33 | #include <linux/ip.h> | |
34 | #include <linux/kthread.h> | |
35 | #include <linux/proc_fs.h> | |
36 | #include <linux/file.h> | |
37 | #include <linux/interrupt.h> | |
38 | #include <linux/irqnr.h> | |
39 | #include <linux/cpu.h> | |
40 | #include <linux/netdevice.h> | |
41 | #include <linux/inetdevice.h> | |
42 | #include <linux/sched.h> | |
43 | #include <linux/mm.h> | |
44 | #include <linux/fdtable.h> | |
45 | #include <linux/swap.h> | |
46 | #include <linux/wait.h> | |
47 | #include <linux/mutex.h> | |
f0dbdefb | 48 | #include <linux/device.h> |
c337ddc2 MD |
49 | |
50 | #include "lttng-events.h" | |
13ab8b0a | 51 | #include "lttng-tracer.h" |
c337ddc2 | 52 | #include "wrapper/irqdesc.h" |
3a523f5b | 53 | #include "wrapper/spinlock.h" |
361c023a | 54 | #include "wrapper/fdtable.h" |
3247f8bd | 55 | #include "wrapper/nsproxy.h" |
29784493 | 56 | #include "wrapper/irq.h" |
dd8d5afb | 57 | #include "wrapper/tracepoint.h" |
f0dbdefb | 58 | #include "wrapper/genhd.h" |
b06ed645 | 59 | #include "wrapper/file.h" |
c337ddc2 | 60 | |
29784493 | 61 | #ifdef CONFIG_LTTNG_HAS_LIST_IRQ |
c337ddc2 MD |
62 | #include <linux/irq.h> |
63 | #endif | |
64 | ||
65 | /* Define the tracepoints, but do not build the probes */ | |
66 | #define CREATE_TRACE_POINTS | |
67 | #define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module | |
68 | #define TRACE_INCLUDE_FILE lttng-statedump | |
3bc29f0a | 69 | #define LTTNG_INSTRUMENTATION |
c337ddc2 MD |
70 | #include "instrumentation/events/lttng-module/lttng-statedump.h" |
71 | ||
f0dbdefb | 72 | DEFINE_TRACE(lttng_statedump_block_device); |
20591cf7 MD |
73 | DEFINE_TRACE(lttng_statedump_end); |
74 | DEFINE_TRACE(lttng_statedump_interrupt); | |
75 | DEFINE_TRACE(lttng_statedump_file_descriptor); | |
76 | DEFINE_TRACE(lttng_statedump_start); | |
77 | DEFINE_TRACE(lttng_statedump_process_state); | |
78 | DEFINE_TRACE(lttng_statedump_network_interface); | |
79 | ||
361c023a MD |
80 | struct lttng_fd_ctx { |
81 | char *page; | |
82 | struct lttng_session *session; | |
83 | struct task_struct *p; | |
d561ecfb | 84 | struct files_struct *files; |
361c023a MD |
85 | }; |
86 | ||
c337ddc2 MD |
87 | /* |
88 | * Protected by the trace lock. | |
89 | */ | |
90 | static struct delayed_work cpu_work[NR_CPUS]; | |
91 | static DECLARE_WAIT_QUEUE_HEAD(statedump_wq); | |
92 | static atomic_t kernel_threads_to_run; | |
93 | ||
94 | enum lttng_thread_type { | |
95 | LTTNG_USER_THREAD = 0, | |
96 | LTTNG_KERNEL_THREAD = 1, | |
97 | }; | |
98 | ||
99 | enum lttng_execution_mode { | |
100 | LTTNG_USER_MODE = 0, | |
101 | LTTNG_SYSCALL = 1, | |
102 | LTTNG_TRAP = 2, | |
103 | LTTNG_IRQ = 3, | |
104 | LTTNG_SOFTIRQ = 4, | |
105 | LTTNG_MODE_UNKNOWN = 5, | |
106 | }; | |
107 | ||
108 | enum lttng_execution_submode { | |
109 | LTTNG_NONE = 0, | |
110 | LTTNG_UNKNOWN = 1, | |
111 | }; | |
112 | ||
113 | enum lttng_process_status { | |
114 | LTTNG_UNNAMED = 0, | |
115 | LTTNG_WAIT_FORK = 1, | |
116 | LTTNG_WAIT_CPU = 2, | |
117 | LTTNG_EXIT = 3, | |
118 | LTTNG_ZOMBIE = 4, | |
119 | LTTNG_WAIT = 5, | |
120 | LTTNG_RUN = 6, | |
121 | LTTNG_DEAD = 7, | |
122 | }; | |
123 | ||
f0dbdefb HD |
124 | static |
125 | int lttng_enumerate_block_devices(struct lttng_session *session) | |
126 | { | |
127 | struct class *ptr_block_class; | |
128 | struct device_type *ptr_disk_type; | |
129 | struct class_dev_iter iter; | |
130 | struct device *dev; | |
131 | ||
132 | ptr_block_class = wrapper_get_block_class(); | |
133 | if (!ptr_block_class) | |
134 | return -ENOSYS; | |
135 | ptr_disk_type = wrapper_get_disk_type(); | |
136 | if (!ptr_disk_type) { | |
137 | return -ENOSYS; | |
138 | } | |
139 | class_dev_iter_init(&iter, ptr_block_class, NULL, ptr_disk_type); | |
140 | while ((dev = class_dev_iter_next(&iter))) { | |
141 | struct disk_part_iter piter; | |
142 | struct gendisk *disk = dev_to_disk(dev); | |
143 | struct hd_struct *part; | |
144 | ||
5a91f3df MD |
145 | /* |
146 | * Don't show empty devices or things that have been | |
147 | * suppressed | |
148 | */ | |
149 | if (get_capacity(disk) == 0 || | |
150 | (disk->flags & GENHD_FL_SUPPRESS_PARTITION_INFO)) | |
151 | continue; | |
152 | ||
f0dbdefb HD |
153 | disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); |
154 | while ((part = disk_part_iter_next(&piter))) { | |
155 | char name_buf[BDEVNAME_SIZE]; | |
156 | char *p; | |
157 | ||
158 | p = wrapper_disk_name(disk, part->partno, name_buf); | |
159 | if (!p) { | |
160 | disk_part_iter_exit(&piter); | |
161 | class_dev_iter_exit(&iter); | |
162 | return -ENOSYS; | |
163 | } | |
164 | trace_lttng_statedump_block_device(session, | |
165 | part_devt(part), name_buf); | |
166 | } | |
167 | disk_part_iter_exit(&piter); | |
168 | } | |
169 | class_dev_iter_exit(&iter); | |
170 | return 0; | |
171 | } | |
172 | ||
c337ddc2 | 173 | #ifdef CONFIG_INET |
f0dbdefb | 174 | |
c337ddc2 MD |
175 | static |
176 | void lttng_enumerate_device(struct lttng_session *session, | |
177 | struct net_device *dev) | |
178 | { | |
179 | struct in_device *in_dev; | |
180 | struct in_ifaddr *ifa; | |
181 | ||
182 | if (dev->flags & IFF_UP) { | |
183 | in_dev = in_dev_get(dev); | |
184 | if (in_dev) { | |
185 | for (ifa = in_dev->ifa_list; ifa != NULL; | |
186 | ifa = ifa->ifa_next) { | |
187 | trace_lttng_statedump_network_interface( | |
188 | session, dev, ifa); | |
189 | } | |
190 | in_dev_put(in_dev); | |
191 | } | |
192 | } else { | |
193 | trace_lttng_statedump_network_interface( | |
194 | session, dev, NULL); | |
195 | } | |
196 | } | |
197 | ||
198 | static | |
199 | int lttng_enumerate_network_ip_interface(struct lttng_session *session) | |
200 | { | |
201 | struct net_device *dev; | |
202 | ||
203 | read_lock(&dev_base_lock); | |
204 | for_each_netdev(&init_net, dev) | |
205 | lttng_enumerate_device(session, dev); | |
206 | read_unlock(&dev_base_lock); | |
207 | ||
208 | return 0; | |
209 | } | |
210 | #else /* CONFIG_INET */ | |
211 | static inline | |
212 | int lttng_enumerate_network_ip_interface(struct lttng_session *session) | |
213 | { | |
214 | return 0; | |
215 | } | |
216 | #endif /* CONFIG_INET */ | |
217 | ||
aa29f2d3 MD |
218 | #ifdef FD_ISSET /* For old kernels lacking close_on_exec() */ |
219 | static inline bool lttng_close_on_exec(int fd, const struct fdtable *fdt) | |
220 | { | |
221 | return FD_ISSET(fd, fdt->close_on_exec); | |
222 | } | |
223 | #else | |
224 | static inline bool lttng_close_on_exec(int fd, const struct fdtable *fdt) | |
225 | { | |
226 | return close_on_exec(fd, fdt); | |
227 | } | |
228 | #endif | |
229 | ||
361c023a MD |
230 | static |
231 | int lttng_dump_one_fd(const void *p, struct file *file, unsigned int fd) | |
232 | { | |
233 | const struct lttng_fd_ctx *ctx = p; | |
234 | const char *s = d_path(&file->f_path, ctx->page, PAGE_SIZE); | |
29021503 | 235 | unsigned int flags = file->f_flags; |
d561ecfb | 236 | struct fdtable *fdt; |
361c023a | 237 | |
29021503 MD |
238 | /* |
239 | * We don't expose kernel internal flags, only userspace-visible | |
240 | * flags. | |
241 | */ | |
242 | flags &= ~FMODE_NONOTIFY; | |
d561ecfb MD |
243 | fdt = files_fdtable(ctx->files); |
244 | /* | |
245 | * We need to check here again whether fd is within the fdt | |
246 | * max_fds range, because we might be seeing a different | |
247 | * files_fdtable() than iterate_fd(), assuming only RCU is | |
248 | * protecting the read. In reality, iterate_fd() holds | |
249 | * file_lock, which should ensure the fdt does not change while | |
250 | * the lock is taken, but we are not aware whether this is | |
251 | * guaranteed or not, so play safe. | |
252 | */ | |
aa29f2d3 | 253 | if (fd < fdt->max_fds && lttng_close_on_exec(fd, fdt)) |
29021503 | 254 | flags |= O_CLOEXEC; |
361c023a MD |
255 | if (IS_ERR(s)) { |
256 | struct dentry *dentry = file->f_path.dentry; | |
257 | ||
258 | /* Make sure we give at least some info */ | |
259 | spin_lock(&dentry->d_lock); | |
260 | trace_lttng_statedump_file_descriptor(ctx->session, ctx->p, fd, | |
29021503 | 261 | dentry->d_name.name, flags, file->f_mode); |
361c023a MD |
262 | spin_unlock(&dentry->d_lock); |
263 | goto end; | |
264 | } | |
29021503 MD |
265 | trace_lttng_statedump_file_descriptor(ctx->session, ctx->p, fd, s, |
266 | flags, file->f_mode); | |
361c023a MD |
267 | end: |
268 | return 0; | |
269 | } | |
c337ddc2 MD |
270 | |
271 | static | |
272 | void lttng_enumerate_task_fd(struct lttng_session *session, | |
273 | struct task_struct *p, char *tmp) | |
274 | { | |
361c023a | 275 | struct lttng_fd_ctx ctx = { .page = tmp, .session = session, .p = p }; |
d561ecfb | 276 | struct files_struct *files; |
c337ddc2 MD |
277 | |
278 | task_lock(p); | |
d561ecfb MD |
279 | files = p->files; |
280 | if (!files) | |
281 | goto end; | |
282 | ctx.files = files; | |
283 | lttng_iterate_fd(files, 0, lttng_dump_one_fd, &ctx); | |
284 | end: | |
c337ddc2 MD |
285 | task_unlock(p); |
286 | } | |
287 | ||
288 | static | |
289 | int lttng_enumerate_file_descriptors(struct lttng_session *session) | |
290 | { | |
291 | struct task_struct *p; | |
cfcee1c7 MD |
292 | char *tmp; |
293 | ||
294 | tmp = (char *) __get_free_page(GFP_KERNEL); | |
295 | if (!tmp) | |
296 | return -ENOMEM; | |
c337ddc2 MD |
297 | |
298 | /* Enumerate active file descriptors */ | |
299 | rcu_read_lock(); | |
300 | for_each_process(p) | |
301 | lttng_enumerate_task_fd(session, p, tmp); | |
302 | rcu_read_unlock(); | |
303 | free_page((unsigned long) tmp); | |
304 | return 0; | |
305 | } | |
306 | ||
0658bdda MD |
307 | #if 0 |
308 | /* | |
309 | * FIXME: we cannot take a mmap_sem while in a RCU read-side critical section | |
310 | * (scheduling in atomic). Normally, the tasklist lock protects this kind of | |
311 | * iteration, but it is not exported to modules. | |
312 | */ | |
c337ddc2 MD |
313 | static |
314 | void lttng_enumerate_task_vm_maps(struct lttng_session *session, | |
315 | struct task_struct *p) | |
316 | { | |
317 | struct mm_struct *mm; | |
318 | struct vm_area_struct *map; | |
319 | unsigned long ino; | |
320 | ||
321 | /* get_task_mm does a task_lock... */ | |
322 | mm = get_task_mm(p); | |
323 | if (!mm) | |
324 | return; | |
325 | ||
326 | map = mm->mmap; | |
327 | if (map) { | |
328 | down_read(&mm->mmap_sem); | |
329 | while (map) { | |
330 | if (map->vm_file) | |
b06ed645 | 331 | ino = map->vm_file->lttng_f_dentry->d_inode->i_ino; |
c337ddc2 MD |
332 | else |
333 | ino = 0; | |
334 | trace_lttng_statedump_vm_map(session, p, map, ino); | |
335 | map = map->vm_next; | |
336 | } | |
337 | up_read(&mm->mmap_sem); | |
338 | } | |
339 | mmput(mm); | |
340 | } | |
341 | ||
342 | static | |
343 | int lttng_enumerate_vm_maps(struct lttng_session *session) | |
344 | { | |
345 | struct task_struct *p; | |
346 | ||
347 | rcu_read_lock(); | |
348 | for_each_process(p) | |
349 | lttng_enumerate_task_vm_maps(session, p); | |
350 | rcu_read_unlock(); | |
351 | return 0; | |
352 | } | |
0658bdda | 353 | #endif |
c337ddc2 | 354 | |
29784493 | 355 | #ifdef CONFIG_LTTNG_HAS_LIST_IRQ |
47faec4b JN |
356 | |
357 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)) | |
358 | #define irq_desc_get_chip(desc) get_irq_desc_chip(desc) | |
359 | #endif | |
360 | ||
c337ddc2 | 361 | static |
cfcee1c7 | 362 | int lttng_list_interrupts(struct lttng_session *session) |
c337ddc2 MD |
363 | { |
364 | unsigned int irq; | |
365 | unsigned long flags = 0; | |
366 | struct irq_desc *desc; | |
367 | ||
368 | #define irq_to_desc wrapper_irq_to_desc | |
369 | /* needs irq_desc */ | |
370 | for_each_irq_desc(irq, desc) { | |
371 | struct irqaction *action; | |
372 | const char *irq_chip_name = | |
373 | irq_desc_get_chip(desc)->name ? : "unnamed_irq_chip"; | |
374 | ||
375 | local_irq_save(flags); | |
3a523f5b | 376 | wrapper_desc_spin_lock(&desc->lock); |
c337ddc2 MD |
377 | for (action = desc->action; action; action = action->next) { |
378 | trace_lttng_statedump_interrupt(session, | |
379 | irq, irq_chip_name, action); | |
380 | } | |
3a523f5b | 381 | wrapper_desc_spin_unlock(&desc->lock); |
c337ddc2 MD |
382 | local_irq_restore(flags); |
383 | } | |
cfcee1c7 | 384 | return 0; |
c337ddc2 MD |
385 | #undef irq_to_desc |
386 | } | |
387 | #else | |
388 | static inline | |
cfcee1c7 | 389 | int lttng_list_interrupts(struct lttng_session *session) |
c337ddc2 | 390 | { |
cfcee1c7 | 391 | return 0; |
c337ddc2 MD |
392 | } |
393 | #endif | |
394 | ||
4ba1f53c MD |
395 | /* |
396 | * Called with task lock held. | |
397 | */ | |
73e8ba37 JD |
398 | static |
399 | void lttng_statedump_process_ns(struct lttng_session *session, | |
400 | struct task_struct *p, | |
401 | enum lttng_thread_type type, | |
402 | enum lttng_execution_mode mode, | |
403 | enum lttng_execution_submode submode, | |
404 | enum lttng_process_status status) | |
405 | { | |
406 | struct nsproxy *proxy; | |
407 | struct pid_namespace *pid_ns; | |
408 | ||
4ba1f53c MD |
409 | /* |
410 | * Back and forth on locking strategy within Linux upstream for nsproxy. | |
411 | * See Linux upstream commit 728dba3a39c66b3d8ac889ddbe38b5b1c264aec3 | |
412 | * "namespaces: Use task_lock and not rcu to protect nsproxy" | |
413 | * for details. | |
414 | */ | |
2260bfdd | 415 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0) || \ |
a5c47aa6 AM |
416 | LTTNG_UBUNTU_KERNEL_RANGE(3,13,11,36, 3,14,0,0) || \ |
417 | LTTNG_UBUNTU_KERNEL_RANGE(3,16,0,11, 3,17,0,0)) | |
e1d0406d | 418 | proxy = p->nsproxy; |
2260bfdd | 419 | #else |
73e8ba37 JD |
420 | rcu_read_lock(); |
421 | proxy = task_nsproxy(p); | |
2260bfdd | 422 | #endif |
73e8ba37 | 423 | if (proxy) { |
3247f8bd | 424 | pid_ns = lttng_get_proxy_pid_ns(proxy); |
73e8ba37 JD |
425 | do { |
426 | trace_lttng_statedump_process_state(session, | |
427 | p, type, mode, submode, status, pid_ns); | |
af73f727 | 428 | pid_ns = pid_ns->parent; |
73e8ba37 JD |
429 | } while (pid_ns); |
430 | } else { | |
431 | trace_lttng_statedump_process_state(session, | |
432 | p, type, mode, submode, status, NULL); | |
433 | } | |
2260bfdd | 434 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0) || \ |
a5c47aa6 AM |
435 | LTTNG_UBUNTU_KERNEL_RANGE(3,13,11,36, 3,14,0,0) || \ |
436 | LTTNG_UBUNTU_KERNEL_RANGE(3,16,0,11, 3,17,0,0)) | |
e1d0406d | 437 | /* (nothing) */ |
2260bfdd | 438 | #else |
73e8ba37 | 439 | rcu_read_unlock(); |
2260bfdd | 440 | #endif |
73e8ba37 JD |
441 | } |
442 | ||
c337ddc2 MD |
443 | static |
444 | int lttng_enumerate_process_states(struct lttng_session *session) | |
445 | { | |
446 | struct task_struct *g, *p; | |
447 | ||
448 | rcu_read_lock(); | |
449 | for_each_process(g) { | |
450 | p = g; | |
451 | do { | |
452 | enum lttng_execution_mode mode = | |
453 | LTTNG_MODE_UNKNOWN; | |
454 | enum lttng_execution_submode submode = | |
455 | LTTNG_UNKNOWN; | |
456 | enum lttng_process_status status; | |
457 | enum lttng_thread_type type; | |
458 | ||
459 | task_lock(p); | |
460 | if (p->exit_state == EXIT_ZOMBIE) | |
461 | status = LTTNG_ZOMBIE; | |
462 | else if (p->exit_state == EXIT_DEAD) | |
463 | status = LTTNG_DEAD; | |
464 | else if (p->state == TASK_RUNNING) { | |
465 | /* Is this a forked child that has not run yet? */ | |
466 | if (list_empty(&p->rt.run_list)) | |
467 | status = LTTNG_WAIT_FORK; | |
468 | else | |
469 | /* | |
470 | * All tasks are considered as wait_cpu; | |
471 | * the viewer will sort out if the task | |
472 | * was really running at this time. | |
473 | */ | |
474 | status = LTTNG_WAIT_CPU; | |
475 | } else if (p->state & | |
476 | (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)) { | |
477 | /* Task is waiting for something to complete */ | |
478 | status = LTTNG_WAIT; | |
479 | } else | |
480 | status = LTTNG_UNNAMED; | |
481 | submode = LTTNG_NONE; | |
482 | ||
483 | /* | |
484 | * Verification of t->mm is to filter out kernel | |
485 | * threads; Viewer will further filter out if a | |
486 | * user-space thread was in syscall mode or not. | |
487 | */ | |
488 | if (p->mm) | |
489 | type = LTTNG_USER_THREAD; | |
490 | else | |
491 | type = LTTNG_KERNEL_THREAD; | |
73e8ba37 | 492 | lttng_statedump_process_ns(session, |
c337ddc2 MD |
493 | p, type, mode, submode, status); |
494 | task_unlock(p); | |
495 | } while_each_thread(g, p); | |
496 | } | |
497 | rcu_read_unlock(); | |
498 | ||
499 | return 0; | |
500 | } | |
501 | ||
502 | static | |
503 | void lttng_statedump_work_func(struct work_struct *work) | |
504 | { | |
505 | if (atomic_dec_and_test(&kernel_threads_to_run)) | |
506 | /* If we are the last thread, wake up do_lttng_statedump */ | |
507 | wake_up(&statedump_wq); | |
508 | } | |
509 | ||
510 | static | |
511 | int do_lttng_statedump(struct lttng_session *session) | |
512 | { | |
cfcee1c7 | 513 | int cpu, ret; |
c337ddc2 | 514 | |
c337ddc2 | 515 | trace_lttng_statedump_start(session); |
cfcee1c7 MD |
516 | ret = lttng_enumerate_process_states(session); |
517 | if (ret) | |
518 | return ret; | |
519 | ret = lttng_enumerate_file_descriptors(session); | |
520 | if (ret) | |
521 | return ret; | |
522 | /* | |
523 | * FIXME | |
524 | * ret = lttng_enumerate_vm_maps(session); | |
525 | * if (ret) | |
526 | * return ret; | |
527 | */ | |
528 | ret = lttng_list_interrupts(session); | |
529 | if (ret) | |
530 | return ret; | |
531 | ret = lttng_enumerate_network_ip_interface(session); | |
532 | if (ret) | |
533 | return ret; | |
534 | ret = lttng_enumerate_block_devices(session); | |
535 | switch (ret) { | |
536 | case -ENOSYS: | |
537 | printk(KERN_WARNING "LTTng: block device enumeration is not supported by kernel\n"); | |
538 | break; | |
539 | default: | |
540 | return ret; | |
541 | } | |
c337ddc2 MD |
542 | |
543 | /* TODO lttng_dump_idt_table(session); */ | |
544 | /* TODO lttng_dump_softirq_vec(session); */ | |
545 | /* TODO lttng_list_modules(session); */ | |
546 | /* TODO lttng_dump_swap_files(session); */ | |
547 | ||
548 | /* | |
549 | * Fire off a work queue on each CPU. Their sole purpose in life | |
550 | * is to guarantee that each CPU has been in a state where is was in | |
551 | * syscall mode (i.e. not in a trap, an IRQ or a soft IRQ). | |
552 | */ | |
553 | get_online_cpus(); | |
554 | atomic_set(&kernel_threads_to_run, num_online_cpus()); | |
555 | for_each_online_cpu(cpu) { | |
556 | INIT_DELAYED_WORK(&cpu_work[cpu], lttng_statedump_work_func); | |
557 | schedule_delayed_work_on(cpu, &cpu_work[cpu], 0); | |
558 | } | |
559 | /* Wait for all threads to run */ | |
7a7128e0 | 560 | __wait_event(statedump_wq, (atomic_read(&kernel_threads_to_run) == 0)); |
c337ddc2 MD |
561 | put_online_cpus(); |
562 | /* Our work is done */ | |
c337ddc2 MD |
563 | trace_lttng_statedump_end(session); |
564 | return 0; | |
565 | } | |
566 | ||
567 | /* | |
568 | * Called with session mutex held. | |
569 | */ | |
570 | int lttng_statedump_start(struct lttng_session *session) | |
571 | { | |
c337ddc2 MD |
572 | return do_lttng_statedump(session); |
573 | } | |
574 | EXPORT_SYMBOL_GPL(lttng_statedump_start); | |
575 | ||
dd8d5afb MD |
576 | static |
577 | int __init lttng_statedump_init(void) | |
578 | { | |
d16aa9c9 MD |
579 | /* |
580 | * Allow module to load even if the fixup cannot be done. This | |
581 | * will allow seemless transition when the underlying issue fix | |
582 | * is merged into the Linux kernel, and when tracepoint.c | |
583 | * "tracepoint_module_notify" is turned into a static function. | |
584 | */ | |
585 | (void) wrapper_lttng_fixup_sig(THIS_MODULE); | |
586 | return 0; | |
dd8d5afb MD |
587 | } |
588 | ||
589 | module_init(lttng_statedump_init); | |
590 | ||
461277e7 MD |
591 | static |
592 | void __exit lttng_statedump_exit(void) | |
593 | { | |
594 | } | |
595 | ||
596 | module_exit(lttng_statedump_exit); | |
597 | ||
c337ddc2 MD |
598 | MODULE_LICENSE("GPL and additional rights"); |
599 | MODULE_AUTHOR("Jean-Hugues Deschenes"); | |
600 | MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Statedump"); | |
13ab8b0a MD |
601 | MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "." |
602 | __stringify(LTTNG_MODULES_MINOR_VERSION) "." | |
603 | __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION) | |
604 | LTTNG_MODULES_EXTRAVERSION); |