Drop support for kernels < 4.4 from LTTng tracer core
[lttng-modules.git] / src / lttng-statedump-impl.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-statedump.c
4 *
5 * Linux Trace Toolkit Next Generation Kernel State Dump
6 *
7 * Copyright 2005 Jean-Hugues Deschenes <jean-hugues.deschenes@polymtl.ca>
8 * Copyright 2006-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 *
10 * Changes:
11 * Eric Clement: Add listing of network IP interface
12 * 2006, 2007 Mathieu Desnoyers Fix kernel threads
13 * Various updates
14 */
15
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/netlink.h>
19 #include <linux/inet.h>
20 #include <linux/ip.h>
21 #include <linux/kthread.h>
22 #include <linux/proc_fs.h>
23 #include <linux/file.h>
24 #include <linux/interrupt.h>
25 #include <linux/irq.h>
26 #include <linux/irqnr.h>
27 #include <linux/netdevice.h>
28 #include <linux/inetdevice.h>
29 #include <linux/mm.h>
30 #include <linux/swap.h>
31 #include <linux/wait.h>
32 #include <linux/mutex.h>
33 #include <linux/device.h>
34
35 #include <linux/blkdev.h>
36
37 #include <lttng/events.h>
38 #include <lttng/tracer.h>
39 #include <wrapper/cpu.h>
40 #include <wrapper/irqdesc.h>
41 #include <wrapper/fdtable.h>
42 #include <wrapper/tracepoint.h>
43 #include <wrapper/blkdev.h>
44 #include <wrapper/fdtable.h>
45 #include <wrapper/sched.h>
46
47 /* Define the tracepoints, but do not build the probes */
48 #define CREATE_TRACE_POINTS
49 #define TRACE_INCLUDE_PATH instrumentation/events
50 #define TRACE_INCLUDE_FILE lttng-statedump
51 #define LTTNG_INSTRUMENTATION
52 #include <instrumentation/events/lttng-statedump.h>
53
54 LTTNG_DEFINE_TRACE(lttng_statedump_block_device,
55 TP_PROTO(struct lttng_kernel_session *session,
56 dev_t dev, const char *diskname),
57 TP_ARGS(session, dev, diskname));
58
59 LTTNG_DEFINE_TRACE(lttng_statedump_end,
60 TP_PROTO(struct lttng_kernel_session *session),
61 TP_ARGS(session));
62
63 LTTNG_DEFINE_TRACE(lttng_statedump_interrupt,
64 TP_PROTO(struct lttng_kernel_session *session,
65 unsigned int irq, const char *chip_name,
66 struct irqaction *action),
67 TP_ARGS(session, irq, chip_name, action));
68
69 LTTNG_DEFINE_TRACE(lttng_statedump_file_descriptor,
70 TP_PROTO(struct lttng_kernel_session *session,
71 struct files_struct *files,
72 int fd, const char *filename,
73 unsigned int flags, fmode_t fmode),
74 TP_ARGS(session, files, fd, filename, flags, fmode));
75
76 LTTNG_DEFINE_TRACE(lttng_statedump_start,
77 TP_PROTO(struct lttng_kernel_session *session),
78 TP_ARGS(session));
79
80 LTTNG_DEFINE_TRACE(lttng_statedump_process_state,
81 TP_PROTO(struct lttng_kernel_session *session,
82 struct task_struct *p,
83 int type, int mode, int submode, int status,
84 struct files_struct *files),
85 TP_ARGS(session, p, type, mode, submode, status, files));
86
87 LTTNG_DEFINE_TRACE(lttng_statedump_process_pid_ns,
88 TP_PROTO(struct lttng_kernel_session *session,
89 struct task_struct *p,
90 struct pid_namespace *pid_ns),
91 TP_ARGS(session, p, pid_ns));
92
93 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,6,0))
94 LTTNG_DEFINE_TRACE(lttng_statedump_process_cgroup_ns,
95 TP_PROTO(struct lttng_kernel_session *session,
96 struct task_struct *p,
97 struct cgroup_namespace *cgroup_ns),
98 TP_ARGS(session, p, cgroup_ns));
99 #endif
100
101 LTTNG_DEFINE_TRACE(lttng_statedump_process_ipc_ns,
102 TP_PROTO(struct lttng_kernel_session *session,
103 struct task_struct *p,
104 struct ipc_namespace *ipc_ns),
105 TP_ARGS(session, p, ipc_ns));
106
107 #ifndef LTTNG_MNT_NS_MISSING_HEADER
108 LTTNG_DEFINE_TRACE(lttng_statedump_process_mnt_ns,
109 TP_PROTO(struct lttng_kernel_session *session,
110 struct task_struct *p,
111 struct mnt_namespace *mnt_ns),
112 TP_ARGS(session, p, mnt_ns));
113 #endif
114
115 LTTNG_DEFINE_TRACE(lttng_statedump_process_net_ns,
116 TP_PROTO(struct lttng_kernel_session *session,
117 struct task_struct *p,
118 struct net *net_ns),
119 TP_ARGS(session, p, net_ns));
120
121 LTTNG_DEFINE_TRACE(lttng_statedump_process_user_ns,
122 TP_PROTO(struct lttng_kernel_session *session,
123 struct task_struct *p,
124 struct user_namespace *user_ns),
125 TP_ARGS(session, p, user_ns));
126
127 LTTNG_DEFINE_TRACE(lttng_statedump_process_uts_ns,
128 TP_PROTO(struct lttng_kernel_session *session,
129 struct task_struct *p,
130 struct uts_namespace *uts_ns),
131 TP_ARGS(session, p, uts_ns));
132
133 LTTNG_DEFINE_TRACE(lttng_statedump_process_time_ns,
134 TP_PROTO(struct lttng_kernel_session *session,
135 struct task_struct *p,
136 struct time_namespace *time_ns),
137 TP_ARGS(session, p, time_ns));
138
139 LTTNG_DEFINE_TRACE(lttng_statedump_network_interface,
140 TP_PROTO(struct lttng_kernel_session *session,
141 struct net_device *dev, struct in_ifaddr *ifa),
142 TP_ARGS(session, dev, ifa));
143
144 #ifdef LTTNG_HAVE_STATEDUMP_CPU_TOPOLOGY
145 LTTNG_DEFINE_TRACE(lttng_statedump_cpu_topology,
146 TP_PROTO(struct lttng_kernel_session *session, struct cpuinfo_x86 *c),
147 TP_ARGS(session, c));
148 #endif
149
150 struct lttng_fd_ctx {
151 char *page;
152 struct lttng_kernel_session *session;
153 struct files_struct *files;
154 };
155
156 /*
157 * Protected by the trace lock.
158 */
159 static struct delayed_work cpu_work[NR_CPUS];
160 static DECLARE_WAIT_QUEUE_HEAD(statedump_wq);
161 static atomic_t kernel_threads_to_run;
162
163 enum lttng_thread_type {
164 LTTNG_USER_THREAD = 0,
165 LTTNG_KERNEL_THREAD = 1,
166 };
167
168 enum lttng_execution_mode {
169 LTTNG_USER_MODE = 0,
170 LTTNG_SYSCALL = 1,
171 LTTNG_TRAP = 2,
172 LTTNG_IRQ = 3,
173 LTTNG_SOFTIRQ = 4,
174 LTTNG_MODE_UNKNOWN = 5,
175 };
176
177 enum lttng_execution_submode {
178 LTTNG_NONE = 0,
179 LTTNG_UNKNOWN = 1,
180 };
181
182 enum lttng_process_status {
183 LTTNG_UNNAMED = 0,
184 LTTNG_WAIT_FORK = 1,
185 LTTNG_WAIT_CPU = 2,
186 LTTNG_EXIT = 3,
187 LTTNG_ZOMBIE = 4,
188 LTTNG_WAIT = 5,
189 LTTNG_RUN = 6,
190 LTTNG_DEAD = 7,
191 };
192
193
194 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(6,0,0))
195
196 #define LTTNG_PART_STRUCT_TYPE struct block_device
197
198 static
199 int lttng_get_part_name(struct gendisk *disk, struct block_device *part, char *name_buf)
200 {
201 int ret;
202
203 ret = snprintf(name_buf, BDEVNAME_SIZE, "%pg", part);
204 if (ret < 0 || ret >= BDEVNAME_SIZE)
205 return -ENOSYS;
206
207 return 0;
208 }
209
210 static
211 dev_t lttng_get_part_devt(struct block_device *part)
212 {
213 return part->bd_dev;
214 }
215
216 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,11,0))
217
218 #define LTTNG_PART_STRUCT_TYPE struct block_device
219
220 static
221 int lttng_get_part_name(struct gendisk *disk, struct block_device *part, char *name_buf)
222 {
223 const char *p;
224
225 p = bdevname(part, name_buf);
226 if (!p)
227 return -ENOSYS;
228
229 return 0;
230 }
231
232 static
233 dev_t lttng_get_part_devt(struct block_device *part)
234 {
235 return part->bd_dev;
236 }
237
238 #else
239
240 #define LTTNG_PART_STRUCT_TYPE struct hd_struct
241
242 static
243 int lttng_get_part_name(struct gendisk *disk, struct hd_struct *part, char *name_buf)
244 {
245 const char *p;
246 struct block_device bdev;
247
248 /*
249 * Create a partial 'struct blockdevice' to use
250 * 'bdevname()' which is a simple wrapper over
251 * 'disk_name()' but has the honor to be EXPORT_SYMBOL.
252 */
253 bdev.bd_disk = disk;
254 bdev.bd_part = part;
255
256 p = bdevname(&bdev, name_buf);
257 if (!p)
258 return -ENOSYS;
259
260 return 0;
261 }
262
263 static
264 dev_t lttng_get_part_devt(struct hd_struct *part)
265 {
266 return part_devt(part);
267 }
268 #endif
269
270 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,12,0))
271 static
272 int lttng_statedump_each_block_device(struct lttng_kernel_session *session, struct gendisk *disk)
273 {
274 struct block_device *part;
275 unsigned long idx;
276 int ret = 0;
277
278 /* Include partition 0 */
279 idx = 0;
280
281 rcu_read_lock();
282 xa_for_each(&disk->part_tbl, idx, part) {
283 char name_buf[BDEVNAME_SIZE];
284
285 /* Exclude non-partitions bdev and empty partitions. */
286 if (bdev_is_partition(part) && !bdev_nr_sectors(part))
287 continue;
288
289 if (lttng_get_part_name(disk, part, name_buf) == -ENOSYS) {
290 ret = -ENOSYS;
291 goto end;
292 }
293 trace_lttng_statedump_block_device(session, lttng_get_part_devt(part),
294 name_buf);
295 }
296 end:
297 rcu_read_unlock();
298 return ret;
299 }
300 #else
301 static
302 int lttng_statedump_each_block_device(struct lttng_kernel_session *session, struct gendisk *disk)
303 {
304 struct disk_part_iter piter;
305 LTTNG_PART_STRUCT_TYPE *part;
306
307 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
308
309 while ((part = disk_part_iter_next(&piter))) {
310 char name_buf[BDEVNAME_SIZE];
311
312 if (lttng_get_part_name(disk, part, name_buf) == -ENOSYS) {
313 disk_part_iter_exit(&piter);
314 return -ENOSYS;
315 }
316 trace_lttng_statedump_block_device(session, lttng_get_part_devt(part),
317 name_buf);
318 }
319 disk_part_iter_exit(&piter);
320
321 return 0;
322 }
323 #endif
324
325 static
326 int lttng_enumerate_block_devices(struct lttng_kernel_session *session)
327 {
328 struct class *ptr_block_class;
329 struct device_type *ptr_disk_type;
330 struct class_dev_iter iter;
331 struct device *dev;
332 int ret = 0;
333
334 ptr_block_class = wrapper_get_block_class();
335 if (!ptr_block_class) {
336 ret = -ENOSYS;
337 goto end;
338 }
339 ptr_disk_type = wrapper_get_disk_type();
340 if (!ptr_disk_type) {
341 ret = -ENOSYS;
342 goto end;
343 }
344 class_dev_iter_init(&iter, ptr_block_class, NULL, ptr_disk_type);
345 while ((dev = class_dev_iter_next(&iter))) {
346 struct gendisk *disk = dev_to_disk(dev);
347
348 /*
349 * Don't show empty devices or things that have been
350 * suppressed
351 */
352 if (get_capacity(disk) == 0 ||
353 (disk->flags & LTTNG_GENHD_FL_HIDDEN))
354 continue;
355
356 ret = lttng_statedump_each_block_device(session, disk);
357 }
358 class_dev_iter_exit(&iter);
359 end:
360 return ret;
361 }
362
363 #ifdef CONFIG_INET
364
365 static
366 void lttng_enumerate_device(struct lttng_kernel_session *session,
367 struct net_device *dev)
368 {
369 struct in_device *in_dev;
370 struct in_ifaddr *ifa;
371
372 if (dev->flags & IFF_UP) {
373 in_dev = in_dev_get(dev);
374 if (in_dev) {
375 for (ifa = in_dev->ifa_list; ifa != NULL;
376 ifa = ifa->ifa_next) {
377 trace_lttng_statedump_network_interface(
378 session, dev, ifa);
379 }
380 in_dev_put(in_dev);
381 }
382 } else {
383 trace_lttng_statedump_network_interface(
384 session, dev, NULL);
385 }
386 }
387
388 static
389 int lttng_enumerate_network_ip_interface(struct lttng_kernel_session *session)
390 {
391 struct net_device *dev;
392
393 read_lock(&dev_base_lock);
394 for_each_netdev(&init_net, dev)
395 lttng_enumerate_device(session, dev);
396 read_unlock(&dev_base_lock);
397
398 return 0;
399 }
400 #else /* CONFIG_INET */
401 static inline
402 int lttng_enumerate_network_ip_interface(struct lttng_kernel_session *session)
403 {
404 return 0;
405 }
406 #endif /* CONFIG_INET */
407
408 static
409 int lttng_dump_one_fd(const void *p, struct file *file, unsigned int fd)
410 {
411 const struct lttng_fd_ctx *ctx = p;
412 const char *s = d_path(&file->f_path, ctx->page, PAGE_SIZE);
413 unsigned int flags = file->f_flags;
414 struct fdtable *fdt;
415
416 /*
417 * We don't expose kernel internal flags, only userspace-visible
418 * flags.
419 */
420 flags &= ~FMODE_NONOTIFY;
421 fdt = files_fdtable(ctx->files);
422 /*
423 * We need to check here again whether fd is within the fdt
424 * max_fds range, because we might be seeing a different
425 * files_fdtable() than iterate_fd(), assuming only RCU is
426 * protecting the read. In reality, iterate_fd() holds
427 * file_lock, which should ensure the fdt does not change while
428 * the lock is taken, but we are not aware whether this is
429 * guaranteed or not, so play safe.
430 */
431 if (fd < fdt->max_fds && close_on_exec(fd, fdt))
432 flags |= O_CLOEXEC;
433 if (IS_ERR(s)) {
434 struct dentry *dentry = file->f_path.dentry;
435
436 /* Make sure we give at least some info */
437 spin_lock(&dentry->d_lock);
438 trace_lttng_statedump_file_descriptor(ctx->session,
439 ctx->files, fd, dentry->d_name.name, flags,
440 file->f_mode);
441 spin_unlock(&dentry->d_lock);
442 goto end;
443 }
444 trace_lttng_statedump_file_descriptor(ctx->session,
445 ctx->files, fd, s, flags, file->f_mode);
446 end:
447 return 0;
448 }
449
450 /* Called with task lock held. */
451 static
452 void lttng_enumerate_files(struct lttng_kernel_session *session,
453 struct files_struct *files,
454 char *tmp)
455 {
456 struct lttng_fd_ctx ctx = { .page = tmp, .session = session, .files = files, };
457
458 iterate_fd(files, 0, lttng_dump_one_fd, &ctx);
459 }
460
461 #ifdef LTTNG_HAVE_STATEDUMP_CPU_TOPOLOGY
462 static
463 int lttng_enumerate_cpu_topology(struct lttng_kernel_session *session)
464 {
465 int cpu;
466 const cpumask_t *cpumask = cpu_possible_mask;
467
468 for (cpu = cpumask_first(cpumask); cpu < nr_cpu_ids;
469 cpu = cpumask_next(cpu, cpumask)) {
470 trace_lttng_statedump_cpu_topology(session, &cpu_data(cpu));
471 }
472
473 return 0;
474 }
475 #else
476 static
477 int lttng_enumerate_cpu_topology(struct lttng_kernel_session *session)
478 {
479 return 0;
480 }
481 #endif
482
483 #if 0
484 /*
485 * FIXME: we cannot take a mmap_sem while in a RCU read-side critical section
486 * (scheduling in atomic). Normally, the tasklist lock protects this kind of
487 * iteration, but it is not exported to modules.
488 */
489 static
490 void lttng_enumerate_task_vm_maps(struct lttng_kernel_session *session,
491 struct task_struct *p)
492 {
493 struct mm_struct *mm;
494 struct vm_area_struct *map;
495 unsigned long ino;
496
497 /* get_task_mm does a task_lock... */
498 mm = get_task_mm(p);
499 if (!mm)
500 return;
501
502 map = mm->mmap;
503 if (map) {
504 down_read(&mm->mmap_sem);
505 while (map) {
506 if (map->vm_file)
507 ino = map->vm_file->f_path.dentry->d_inode->i_ino;
508 else
509 ino = 0;
510 trace_lttng_statedump_vm_map(session, p, map, ino);
511 map = map->vm_next;
512 }
513 up_read(&mm->mmap_sem);
514 }
515 mmput(mm);
516 }
517
518 static
519 int lttng_enumerate_vm_maps(struct lttng_kernel_session *session)
520 {
521 struct task_struct *p;
522
523 rcu_read_lock();
524 for_each_process(p)
525 lttng_enumerate_task_vm_maps(session, p);
526 rcu_read_unlock();
527 return 0;
528 }
529 #endif
530
531 static
532 int lttng_list_interrupts(struct lttng_kernel_session *session)
533 {
534 unsigned int irq;
535 unsigned long flags = 0;
536 struct irq_desc *desc;
537
538 #define irq_to_desc wrapper_irq_to_desc
539 /* needs irq_desc */
540 for_each_irq_desc(irq, desc) {
541 struct irqaction *action;
542 const char *irq_chip_name =
543 irq_desc_get_chip(desc)->name ? : "unnamed_irq_chip";
544
545 local_irq_save(flags);
546 raw_spin_lock(&desc->lock);
547 for (action = desc->action; action; action = action->next) {
548 trace_lttng_statedump_interrupt(session,
549 irq, irq_chip_name, action);
550 }
551 raw_spin_unlock(&desc->lock);
552 local_irq_restore(flags);
553 }
554 return 0;
555 #undef irq_to_desc
556 }
557
558 /*
559 * Statedump the task's namespaces using the proc filesystem inode number as
560 * the unique identifier. The user and pid ns are nested and will be dumped
561 * recursively.
562 *
563 * Called with task lock held.
564 */
565 static
566 void lttng_statedump_process_ns(struct lttng_kernel_session *session,
567 struct task_struct *p,
568 enum lttng_thread_type type,
569 enum lttng_execution_mode mode,
570 enum lttng_execution_submode submode,
571 enum lttng_process_status status)
572 {
573 struct nsproxy *proxy;
574 struct pid_namespace *pid_ns;
575 struct user_namespace *user_ns;
576
577 /*
578 * The pid and user namespaces are special, they are nested and
579 * accessed with specific functions instead of the nsproxy struct
580 * like the other namespaces.
581 */
582 pid_ns = task_active_pid_ns(p);
583 do {
584 trace_lttng_statedump_process_pid_ns(session, p, pid_ns);
585 pid_ns = pid_ns ? pid_ns->parent : NULL;
586 } while (pid_ns);
587
588
589 user_ns = task_cred_xxx(p, user_ns);
590 do {
591 trace_lttng_statedump_process_user_ns(session, p, user_ns);
592 /*
593 * trace_lttng_statedump_process_user_ns() internally
594 * checks whether user_ns is NULL. While this does not
595 * appear to be a possible return value for
596 * task_cred_xxx(), err on the safe side and check
597 * for NULL here as well to be consistent with the
598 * paranoid behavior of
599 * trace_lttng_statedump_process_user_ns().
600 */
601 user_ns = user_ns ? user_ns->parent : NULL;
602 } while (user_ns);
603
604 proxy = p->nsproxy;
605
606 if (proxy) {
607 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,6,0))
608 trace_lttng_statedump_process_cgroup_ns(session, p, proxy->cgroup_ns);
609 #endif
610 trace_lttng_statedump_process_ipc_ns(session, p, proxy->ipc_ns);
611 #ifndef LTTNG_MNT_NS_MISSING_HEADER
612 trace_lttng_statedump_process_mnt_ns(session, p, proxy->mnt_ns);
613 #endif
614 trace_lttng_statedump_process_net_ns(session, p, proxy->net_ns);
615 trace_lttng_statedump_process_uts_ns(session, p, proxy->uts_ns);
616 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0) || \
617 LTTNG_RHEL_KERNEL_RANGE(4,18,0,305,0,0, 4,19,0,0,0,0))
618 trace_lttng_statedump_process_time_ns(session, p, proxy->time_ns);
619 #endif
620 }
621 }
622
623 static
624 int lttng_enumerate_process_states(struct lttng_kernel_session *session)
625 {
626 struct task_struct *g, *p;
627 char *tmp;
628
629 tmp = (char *) __get_free_page(GFP_KERNEL);
630 if (!tmp)
631 return -ENOMEM;
632
633 rcu_read_lock();
634 for_each_process(g) {
635 struct files_struct *prev_files = NULL;
636
637 p = g;
638 do {
639 enum lttng_execution_mode mode =
640 LTTNG_MODE_UNKNOWN;
641 enum lttng_execution_submode submode =
642 LTTNG_UNKNOWN;
643 enum lttng_process_status status;
644 enum lttng_thread_type type;
645 struct files_struct *files;
646
647 task_lock(p);
648 if (p->exit_state == EXIT_ZOMBIE)
649 status = LTTNG_ZOMBIE;
650 else if (p->exit_state == EXIT_DEAD)
651 status = LTTNG_DEAD;
652 else if (lttng_task_is_running(p)) {
653 /* Is this a forked child that has not run yet? */
654 if (list_empty(&p->rt.run_list))
655 status = LTTNG_WAIT_FORK;
656 else
657 /*
658 * All tasks are considered as wait_cpu;
659 * the viewer will sort out if the task
660 * was really running at this time.
661 */
662 status = LTTNG_WAIT_CPU;
663 } else if (lttng_get_task_state(p) &
664 (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)) {
665 /* Task is waiting for something to complete */
666 status = LTTNG_WAIT;
667 } else
668 status = LTTNG_UNNAMED;
669 submode = LTTNG_NONE;
670
671 /*
672 * Verification of t->mm is to filter out kernel
673 * threads; Viewer will further filter out if a
674 * user-space thread was in syscall mode or not.
675 */
676 if (p->mm)
677 type = LTTNG_USER_THREAD;
678 else
679 type = LTTNG_KERNEL_THREAD;
680 files = p->files;
681
682 trace_lttng_statedump_process_state(session,
683 p, type, mode, submode, status, files);
684 lttng_statedump_process_ns(session,
685 p, type, mode, submode, status);
686 /*
687 * As an optimisation for the common case, do not
688 * repeat information for the same files_struct in
689 * two consecutive threads. This is the common case
690 * for threads sharing the same fd table. RCU guarantees
691 * that the same files_struct pointer is not re-used
692 * throughout processes/threads iteration.
693 */
694 if (files && files != prev_files) {
695 lttng_enumerate_files(session, files, tmp);
696 prev_files = files;
697 }
698 task_unlock(p);
699 } while_each_thread(g, p);
700 }
701 rcu_read_unlock();
702
703 free_page((unsigned long) tmp);
704
705 return 0;
706 }
707
708 static
709 void lttng_statedump_work_func(struct work_struct *work)
710 {
711 if (atomic_dec_and_test(&kernel_threads_to_run))
712 /* If we are the last thread, wake up do_lttng_statedump */
713 wake_up(&statedump_wq);
714 }
715
716 static
717 int do_lttng_statedump(struct lttng_kernel_session *session)
718 {
719 int cpu, ret;
720
721 trace_lttng_statedump_start(session);
722 ret = lttng_enumerate_process_states(session);
723 if (ret)
724 return ret;
725 /*
726 * FIXME
727 * ret = lttng_enumerate_vm_maps(session);
728 * if (ret)
729 * return ret;
730 */
731 ret = lttng_list_interrupts(session);
732 if (ret)
733 return ret;
734 ret = lttng_enumerate_network_ip_interface(session);
735 if (ret)
736 return ret;
737 ret = lttng_enumerate_block_devices(session);
738 switch (ret) {
739 case 0:
740 break;
741 case -ENOSYS:
742 printk(KERN_WARNING "LTTng: block device enumeration is not supported by kernel\n");
743 break;
744 default:
745 return ret;
746 }
747 ret = lttng_enumerate_cpu_topology(session);
748 if (ret)
749 return ret;
750
751 /* TODO lttng_dump_idt_table(session); */
752 /* TODO lttng_dump_softirq_vec(session); */
753 /* TODO lttng_list_modules(session); */
754 /* TODO lttng_dump_swap_files(session); */
755
756 /*
757 * Fire off a work queue on each CPU. Their sole purpose in life
758 * is to guarantee that each CPU has been in a state where is was in
759 * syscall mode (i.e. not in a trap, an IRQ or a soft IRQ).
760 */
761 lttng_cpus_read_lock();
762 atomic_set(&kernel_threads_to_run, num_online_cpus());
763 for_each_online_cpu(cpu) {
764 INIT_DELAYED_WORK(&cpu_work[cpu], lttng_statedump_work_func);
765 schedule_delayed_work_on(cpu, &cpu_work[cpu], 0);
766 }
767 /* Wait for all threads to run */
768 __wait_event(statedump_wq, (atomic_read(&kernel_threads_to_run) == 0));
769 lttng_cpus_read_unlock();
770 /* Our work is done */
771 trace_lttng_statedump_end(session);
772 return 0;
773 }
774
775 /*
776 * Called with session mutex held.
777 */
778 int lttng_statedump_start(struct lttng_kernel_session *session)
779 {
780 return do_lttng_statedump(session);
781 }
782 EXPORT_SYMBOL_GPL(lttng_statedump_start);
783
784 static
785 int __init lttng_statedump_init(void)
786 {
787 return 0;
788 }
789
790 module_init(lttng_statedump_init);
791
792 static
793 void __exit lttng_statedump_exit(void)
794 {
795 }
796
797 module_exit(lttng_statedump_exit);
798
799 MODULE_LICENSE("GPL and additional rights");
800 MODULE_AUTHOR("Jean-Hugues Deschenes");
801 MODULE_DESCRIPTION("LTTng statedump provider");
802 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
803 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
804 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
805 LTTNG_MODULES_EXTRAVERSION);
This page took 0.045949 seconds and 5 git commands to generate.