9d708af9f751b59286292ff6a7edfa17735e7a08
[lttng-modules.git] / src / lttng-statedump-impl.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-statedump.c
4 *
5 * Linux Trace Toolkit Next Generation Kernel State Dump
6 *
7 * Copyright 2005 Jean-Hugues Deschenes <jean-hugues.deschenes@polymtl.ca>
8 * Copyright 2006-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 *
10 * Changes:
11 * Eric Clement: Add listing of network IP interface
12 * 2006, 2007 Mathieu Desnoyers Fix kernel threads
13 * Various updates
14 */
15
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/netlink.h>
19 #include <linux/inet.h>
20 #include <linux/ip.h>
21 #include <linux/kthread.h>
22 #include <linux/proc_fs.h>
23 #include <linux/file.h>
24 #include <linux/interrupt.h>
25 #include <linux/irq.h>
26 #include <linux/irqnr.h>
27 #include <linux/netdevice.h>
28 #include <linux/inetdevice.h>
29 #include <linux/mm.h>
30 #include <linux/swap.h>
31 #include <linux/wait.h>
32 #include <linux/mutex.h>
33 #include <linux/device.h>
34
35 #include <linux/blkdev.h>
36
37 #include <lttng/events.h>
38 #include <lttng/tracer.h>
39 #include <wrapper/cpu.h>
40 #include <wrapper/irqdesc.h>
41 #include <wrapper/fdtable.h>
42 #include <wrapper/tracepoint.h>
43 #include <wrapper/blkdev.h>
44 #include <wrapper/fdtable.h>
45 #include <wrapper/sched.h>
46
47 /* Define the tracepoints, but do not build the probes */
48 #define CREATE_TRACE_POINTS
49 #define TRACE_INCLUDE_PATH instrumentation/events
50 #define TRACE_INCLUDE_FILE lttng-statedump
51 #define LTTNG_INSTRUMENTATION
52 #include <instrumentation/events/lttng-statedump.h>
53
54 LTTNG_DEFINE_TRACE(lttng_statedump_block_device,
55 TP_PROTO(struct lttng_kernel_session *session,
56 dev_t dev, const char *diskname),
57 TP_ARGS(session, dev, diskname));
58
59 LTTNG_DEFINE_TRACE(lttng_statedump_end,
60 TP_PROTO(struct lttng_kernel_session *session),
61 TP_ARGS(session));
62
63 LTTNG_DEFINE_TRACE(lttng_statedump_interrupt,
64 TP_PROTO(struct lttng_kernel_session *session,
65 unsigned int irq, const char *chip_name,
66 struct irqaction *action),
67 TP_ARGS(session, irq, chip_name, action));
68
69 LTTNG_DEFINE_TRACE(lttng_statedump_file_descriptor,
70 TP_PROTO(struct lttng_kernel_session *session,
71 struct files_struct *files,
72 int fd, const char *filename,
73 unsigned int flags, fmode_t fmode),
74 TP_ARGS(session, files, fd, filename, flags, fmode));
75
76 LTTNG_DEFINE_TRACE(lttng_statedump_start,
77 TP_PROTO(struct lttng_kernel_session *session),
78 TP_ARGS(session));
79
80 LTTNG_DEFINE_TRACE(lttng_statedump_process_state,
81 TP_PROTO(struct lttng_kernel_session *session,
82 struct task_struct *p,
83 int type, int mode, int submode, int status,
84 struct files_struct *files),
85 TP_ARGS(session, p, type, mode, submode, status, files));
86
87 LTTNG_DEFINE_TRACE(lttng_statedump_process_pid_ns,
88 TP_PROTO(struct lttng_kernel_session *session,
89 struct task_struct *p,
90 struct pid_namespace *pid_ns),
91 TP_ARGS(session, p, pid_ns));
92
93 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,6,0))
94 LTTNG_DEFINE_TRACE(lttng_statedump_process_cgroup_ns,
95 TP_PROTO(struct lttng_kernel_session *session,
96 struct task_struct *p,
97 struct cgroup_namespace *cgroup_ns),
98 TP_ARGS(session, p, cgroup_ns));
99 #endif
100
101 LTTNG_DEFINE_TRACE(lttng_statedump_process_ipc_ns,
102 TP_PROTO(struct lttng_kernel_session *session,
103 struct task_struct *p,
104 struct ipc_namespace *ipc_ns),
105 TP_ARGS(session, p, ipc_ns));
106
107 #ifndef LTTNG_MNT_NS_MISSING_HEADER
108 LTTNG_DEFINE_TRACE(lttng_statedump_process_mnt_ns,
109 TP_PROTO(struct lttng_kernel_session *session,
110 struct task_struct *p,
111 struct mnt_namespace *mnt_ns),
112 TP_ARGS(session, p, mnt_ns));
113 #endif
114
115 LTTNG_DEFINE_TRACE(lttng_statedump_process_net_ns,
116 TP_PROTO(struct lttng_kernel_session *session,
117 struct task_struct *p,
118 struct net *net_ns),
119 TP_ARGS(session, p, net_ns));
120
121 LTTNG_DEFINE_TRACE(lttng_statedump_process_user_ns,
122 TP_PROTO(struct lttng_kernel_session *session,
123 struct task_struct *p,
124 struct user_namespace *user_ns),
125 TP_ARGS(session, p, user_ns));
126
127 LTTNG_DEFINE_TRACE(lttng_statedump_process_uts_ns,
128 TP_PROTO(struct lttng_kernel_session *session,
129 struct task_struct *p,
130 struct uts_namespace *uts_ns),
131 TP_ARGS(session, p, uts_ns));
132
133 LTTNG_DEFINE_TRACE(lttng_statedump_process_time_ns,
134 TP_PROTO(struct lttng_kernel_session *session,
135 struct task_struct *p,
136 struct time_namespace *time_ns),
137 TP_ARGS(session, p, time_ns));
138
139 LTTNG_DEFINE_TRACE(lttng_statedump_network_interface,
140 TP_PROTO(struct lttng_kernel_session *session,
141 struct net_device *dev, struct in_ifaddr *ifa),
142 TP_ARGS(session, dev, ifa));
143
144 #ifdef LTTNG_HAVE_STATEDUMP_CPU_TOPOLOGY
145 LTTNG_DEFINE_TRACE(lttng_statedump_cpu_topology,
146 TP_PROTO(struct lttng_kernel_session *session, struct cpuinfo_x86 *c),
147 TP_ARGS(session, c));
148 #endif
149
150 struct lttng_fd_ctx {
151 char *page;
152 struct lttng_kernel_session *session;
153 struct files_struct *files;
154 };
155
156 /*
157 * Protected by the trace lock.
158 */
159 static struct delayed_work cpu_work[NR_CPUS];
160 static DECLARE_WAIT_QUEUE_HEAD(statedump_wq);
161 static atomic_t kernel_threads_to_run;
162
163 enum lttng_thread_type {
164 LTTNG_USER_THREAD = 0,
165 LTTNG_KERNEL_THREAD = 1,
166 };
167
168 enum lttng_execution_mode {
169 LTTNG_USER_MODE = 0,
170 LTTNG_SYSCALL = 1,
171 LTTNG_TRAP = 2,
172 LTTNG_IRQ = 3,
173 LTTNG_SOFTIRQ = 4,
174 LTTNG_MODE_UNKNOWN = 5,
175 };
176
177 enum lttng_execution_submode {
178 LTTNG_NONE = 0,
179 LTTNG_UNKNOWN = 1,
180 };
181
182 enum lttng_process_status {
183 LTTNG_UNNAMED = 0,
184 LTTNG_WAIT_FORK = 1,
185 LTTNG_WAIT_CPU = 2,
186 LTTNG_EXIT = 3,
187 LTTNG_ZOMBIE = 4,
188 LTTNG_WAIT = 5,
189 LTTNG_RUN = 6,
190 LTTNG_DEAD = 7,
191 };
192
193
194 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(6,0,0))
195
196 #define LTTNG_PART_STRUCT_TYPE struct block_device
197
198 static
199 int lttng_get_part_name(struct gendisk *disk, struct block_device *part, char *name_buf)
200 {
201 int ret;
202
203 ret = snprintf(name_buf, BDEVNAME_SIZE, "%pg", part);
204 if (ret < 0 || ret >= BDEVNAME_SIZE)
205 return -ENOSYS;
206
207 return 0;
208 }
209
210 static
211 dev_t lttng_get_part_devt(struct block_device *part)
212 {
213 return part->bd_dev;
214 }
215
216 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,11,0))
217
218 #define LTTNG_PART_STRUCT_TYPE struct block_device
219
220 static
221 int lttng_get_part_name(struct gendisk *disk, struct block_device *part, char *name_buf)
222 {
223 const char *p;
224
225 p = bdevname(part, name_buf);
226 if (!p)
227 return -ENOSYS;
228
229 return 0;
230 }
231
232 static
233 dev_t lttng_get_part_devt(struct block_device *part)
234 {
235 return part->bd_dev;
236 }
237
238 #else
239
240 #define LTTNG_PART_STRUCT_TYPE struct hd_struct
241
242 static
243 int lttng_get_part_name(struct gendisk *disk, struct hd_struct *part, char *name_buf)
244 {
245 const char *p;
246 struct block_device bdev;
247
248 /*
249 * Create a partial 'struct blockdevice' to use
250 * 'bdevname()' which is a simple wrapper over
251 * 'disk_name()' but has the honor to be EXPORT_SYMBOL.
252 */
253 bdev.bd_disk = disk;
254 bdev.bd_part = part;
255
256 p = bdevname(&bdev, name_buf);
257 if (!p)
258 return -ENOSYS;
259
260 return 0;
261 }
262
263 static
264 dev_t lttng_get_part_devt(struct hd_struct *part)
265 {
266 return part_devt(part);
267 }
268 #endif
269
270 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,12,0))
271 static
272 int lttng_statedump_each_block_device(struct lttng_kernel_session *session, struct gendisk *disk)
273 {
274 struct block_device *part;
275 unsigned long idx;
276 int ret = 0;
277
278 /* Include partition 0 */
279 idx = 0;
280
281 rcu_read_lock();
282 xa_for_each(&disk->part_tbl, idx, part) {
283 char name_buf[BDEVNAME_SIZE];
284
285 /* Exclude non-partitions bdev and empty partitions. */
286 if (bdev_is_partition(part) && !bdev_nr_sectors(part))
287 continue;
288
289 if (lttng_get_part_name(disk, part, name_buf) == -ENOSYS) {
290 ret = -ENOSYS;
291 goto end;
292 }
293 trace_lttng_statedump_block_device(session, lttng_get_part_devt(part),
294 name_buf);
295 }
296 end:
297 rcu_read_unlock();
298 return ret;
299 }
300 #else
301 static
302 int lttng_statedump_each_block_device(struct lttng_kernel_session *session, struct gendisk *disk)
303 {
304 struct disk_part_iter piter;
305 LTTNG_PART_STRUCT_TYPE *part;
306
307 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
308
309 while ((part = disk_part_iter_next(&piter))) {
310 char name_buf[BDEVNAME_SIZE];
311
312 if (lttng_get_part_name(disk, part, name_buf) == -ENOSYS) {
313 disk_part_iter_exit(&piter);
314 return -ENOSYS;
315 }
316 trace_lttng_statedump_block_device(session, lttng_get_part_devt(part),
317 name_buf);
318 }
319 disk_part_iter_exit(&piter);
320
321 return 0;
322 }
323 #endif
324
325 static
326 int lttng_enumerate_block_devices(struct lttng_kernel_session *session)
327 {
328 struct class *ptr_block_class;
329 struct device_type *ptr_disk_type;
330 struct class_dev_iter iter;
331 struct device *dev;
332 int ret = 0;
333
334 ptr_block_class = wrapper_get_block_class();
335 if (!ptr_block_class) {
336 ret = -ENOSYS;
337 goto end;
338 }
339 ptr_disk_type = wrapper_get_disk_type();
340 if (!ptr_disk_type) {
341 ret = -ENOSYS;
342 goto end;
343 }
344 class_dev_iter_init(&iter, ptr_block_class, NULL, ptr_disk_type);
345 while ((dev = class_dev_iter_next(&iter))) {
346 struct gendisk *disk = dev_to_disk(dev);
347
348 /*
349 * Don't show empty devices or things that have been
350 * suppressed
351 */
352 if (get_capacity(disk) == 0 ||
353 (disk->flags & LTTNG_GENHD_FL_HIDDEN))
354 continue;
355
356 ret = lttng_statedump_each_block_device(session, disk);
357 }
358 class_dev_iter_exit(&iter);
359 end:
360 return ret;
361 }
362
363 #ifdef CONFIG_INET
364
365 static
366 void lttng_enumerate_device(struct lttng_kernel_session *session,
367 struct net_device *dev)
368 {
369 struct in_device *in_dev;
370 struct in_ifaddr *ifa;
371
372 if (dev->flags & IFF_UP) {
373 in_dev = in_dev_get(dev);
374 if (in_dev) {
375 for (ifa = in_dev->ifa_list; ifa != NULL;
376 ifa = ifa->ifa_next) {
377 trace_lttng_statedump_network_interface(
378 session, dev, ifa);
379 }
380 in_dev_put(in_dev);
381 }
382 } else {
383 trace_lttng_statedump_network_interface(
384 session, dev, NULL);
385 }
386 }
387
388 static
389 int lttng_enumerate_network_ip_interface(struct lttng_kernel_session *session)
390 {
391 struct net_device *dev;
392
393 read_lock(&dev_base_lock);
394 for_each_netdev(&init_net, dev)
395 lttng_enumerate_device(session, dev);
396 read_unlock(&dev_base_lock);
397
398 return 0;
399 }
400 #else /* CONFIG_INET */
401 static inline
402 int lttng_enumerate_network_ip_interface(struct lttng_kernel_session *session)
403 {
404 return 0;
405 }
406 #endif /* CONFIG_INET */
407
408 static
409 int lttng_dump_one_fd(const void *p, struct file *file, unsigned int fd)
410 {
411 const struct lttng_fd_ctx *ctx = p;
412 const char *s = d_path(&file->f_path, ctx->page, PAGE_SIZE);
413 unsigned int flags = file->f_flags;
414 struct fdtable *fdt;
415
416 /*
417 * We don't expose kernel internal flags, only userspace-visible
418 * flags.
419 */
420 flags &= ~FMODE_NONOTIFY;
421 fdt = files_fdtable(ctx->files);
422 /*
423 * We need to check here again whether fd is within the fdt
424 * max_fds range, because we might be seeing a different
425 * files_fdtable() than iterate_fd(), assuming only RCU is
426 * protecting the read. In reality, iterate_fd() holds
427 * file_lock, which should ensure the fdt does not change while
428 * the lock is taken, but we are not aware whether this is
429 * guaranteed or not, so play safe.
430 */
431 if (fd < fdt->max_fds && close_on_exec(fd, fdt))
432 flags |= O_CLOEXEC;
433 if (IS_ERR(s)) {
434 struct dentry *dentry = file->f_path.dentry;
435
436 /* Make sure we give at least some info */
437 spin_lock(&dentry->d_lock);
438 trace_lttng_statedump_file_descriptor(ctx->session,
439 ctx->files, fd, dentry->d_name.name, flags,
440 file->f_mode);
441 spin_unlock(&dentry->d_lock);
442 goto end;
443 }
444 trace_lttng_statedump_file_descriptor(ctx->session,
445 ctx->files, fd, s, flags, file->f_mode);
446 end:
447 return 0;
448 }
449
450 /* Called with task lock held. */
451 static
452 void lttng_enumerate_files(struct lttng_kernel_session *session,
453 struct files_struct *files,
454 char *tmp)
455 {
456 struct lttng_fd_ctx ctx = { .page = tmp, .session = session, .files = files, };
457
458 iterate_fd(files, 0, lttng_dump_one_fd, &ctx);
459 }
460
461 #ifdef LTTNG_HAVE_STATEDUMP_CPU_TOPOLOGY
462 static
463 int lttng_enumerate_cpu_topology(struct lttng_kernel_session *session)
464 {
465 int cpu;
466 const cpumask_t *cpumask = cpu_possible_mask;
467
468 for (cpu = cpumask_first(cpumask); cpu < nr_cpu_ids;
469 cpu = cpumask_next(cpu, cpumask)) {
470 trace_lttng_statedump_cpu_topology(session, &cpu_data(cpu));
471 }
472
473 return 0;
474 }
475 #else
476 static
477 int lttng_enumerate_cpu_topology(struct lttng_kernel_session *session)
478 {
479 return 0;
480 }
481 #endif
482
483 #if 0
484 /*
485 * FIXME: we cannot take a mmap_sem while in a RCU read-side critical section
486 * (scheduling in atomic). Normally, the tasklist lock protects this kind of
487 * iteration, but it is not exported to modules.
488 */
489 static
490 void lttng_enumerate_task_vm_maps(struct lttng_kernel_session *session,
491 struct task_struct *p)
492 {
493 struct mm_struct *mm;
494 struct vm_area_struct *map;
495 unsigned long ino;
496
497 /* get_task_mm does a task_lock... */
498 mm = get_task_mm(p);
499 if (!mm)
500 return;
501
502 map = mm->mmap;
503 if (map) {
504 down_read(&mm->mmap_sem);
505 while (map) {
506 if (map->vm_file)
507 ino = map->vm_file->f_path.dentry->d_inode->i_ino;
508 else
509 ino = 0;
510 trace_lttng_statedump_vm_map(session, p, map, ino);
511 map = map->vm_next;
512 }
513 up_read(&mm->mmap_sem);
514 }
515 mmput(mm);
516 }
517
518 static
519 int lttng_enumerate_vm_maps(struct lttng_kernel_session *session)
520 {
521 struct task_struct *p;
522
523 rcu_read_lock();
524 for_each_process(p)
525 lttng_enumerate_task_vm_maps(session, p);
526 rcu_read_unlock();
527 return 0;
528 }
529 #endif
530
531 static
532 int lttng_list_interrupts(struct lttng_kernel_session *session)
533 {
534 unsigned int irq;
535 unsigned long flags = 0;
536 struct irq_desc *desc;
537
538 #define irq_to_desc wrapper_irq_to_desc
539 /* needs irq_desc */
540 for_each_irq_desc(irq, desc) {
541 struct irqaction *action;
542 const char *irq_chip_name =
543 irq_desc_get_chip(desc)->name ? : "unnamed_irq_chip";
544
545 local_irq_save(flags);
546 raw_spin_lock(&desc->lock);
547 for (action = desc->action; action; action = action->next) {
548 trace_lttng_statedump_interrupt(session,
549 irq, irq_chip_name, action);
550 }
551 raw_spin_unlock(&desc->lock);
552 local_irq_restore(flags);
553 }
554 return 0;
555 #undef irq_to_desc
556 }
557
558 /*
559 * Statedump the task's namespaces using the proc filesystem inode number as
560 * the unique identifier. The user and pid ns are nested and will be dumped
561 * recursively.
562 *
563 * Called with task lock held.
564 */
565 static
566 void lttng_statedump_process_ns(struct lttng_kernel_session *session,
567 struct task_struct *p,
568 enum lttng_thread_type type,
569 enum lttng_execution_mode mode,
570 enum lttng_execution_submode submode,
571 enum lttng_process_status status)
572 {
573 struct nsproxy *proxy;
574 struct pid_namespace *pid_ns;
575 struct user_namespace *user_ns;
576
577 /*
578 * The pid and user namespaces are special, they are nested and
579 * accessed with specific functions instead of the nsproxy struct
580 * like the other namespaces.
581 */
582 pid_ns = task_active_pid_ns(p);
583 do {
584 trace_lttng_statedump_process_pid_ns(session, p, pid_ns);
585 pid_ns = pid_ns ? pid_ns->parent : NULL;
586 } while (pid_ns);
587
588
589 user_ns = task_cred_xxx(p, user_ns);
590 do {
591 trace_lttng_statedump_process_user_ns(session, p, user_ns);
592 /*
593 * trace_lttng_statedump_process_user_ns() internally
594 * checks whether user_ns is NULL. While this does not
595 * appear to be a possible return value for
596 * task_cred_xxx(), err on the safe side and check
597 * for NULL here as well to be consistent with the
598 * paranoid behavior of
599 * trace_lttng_statedump_process_user_ns().
600 */
601 user_ns = user_ns ? user_ns->parent : NULL;
602 } while (user_ns);
603
604 /*
605 * Back and forth on locking strategy within Linux upstream for nsproxy.
606 * See Linux upstream commit 728dba3a39c66b3d8ac889ddbe38b5b1c264aec3
607 * "namespaces: Use task_lock and not rcu to protect nsproxy"
608 * for details.
609 */
610 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,17,0) || \
611 LTTNG_UBUNTU_KERNEL_RANGE(3,13,11,36, 3,14,0,0) || \
612 LTTNG_UBUNTU_KERNEL_RANGE(3,16,1,11, 3,17,0,0) || \
613 LTTNG_RHEL_KERNEL_RANGE(3,10,0,229,13,0, 3,11,0,0,0,0))
614 proxy = p->nsproxy;
615 #else
616 rcu_read_lock();
617 proxy = task_nsproxy(p);
618 #endif
619 if (proxy) {
620 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,6,0))
621 trace_lttng_statedump_process_cgroup_ns(session, p, proxy->cgroup_ns);
622 #endif
623 trace_lttng_statedump_process_ipc_ns(session, p, proxy->ipc_ns);
624 #ifndef LTTNG_MNT_NS_MISSING_HEADER
625 trace_lttng_statedump_process_mnt_ns(session, p, proxy->mnt_ns);
626 #endif
627 trace_lttng_statedump_process_net_ns(session, p, proxy->net_ns);
628 trace_lttng_statedump_process_uts_ns(session, p, proxy->uts_ns);
629 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0) || \
630 LTTNG_RHEL_KERNEL_RANGE(4,18,0,305,0,0, 4,19,0,0,0,0))
631 trace_lttng_statedump_process_time_ns(session, p, proxy->time_ns);
632 #endif
633 }
634 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,17,0) || \
635 LTTNG_UBUNTU_KERNEL_RANGE(3,13,11,36, 3,14,0,0) || \
636 LTTNG_UBUNTU_KERNEL_RANGE(3,16,1,11, 3,17,0,0) || \
637 LTTNG_RHEL_KERNEL_RANGE(3,10,0,229,13,0, 3,11,0,0,0,0))
638 /* (nothing) */
639 #else
640 rcu_read_unlock();
641 #endif
642 }
643
644 static
645 int lttng_enumerate_process_states(struct lttng_kernel_session *session)
646 {
647 struct task_struct *g, *p;
648 char *tmp;
649
650 tmp = (char *) __get_free_page(GFP_KERNEL);
651 if (!tmp)
652 return -ENOMEM;
653
654 rcu_read_lock();
655 for_each_process(g) {
656 struct files_struct *prev_files = NULL;
657
658 p = g;
659 do {
660 enum lttng_execution_mode mode =
661 LTTNG_MODE_UNKNOWN;
662 enum lttng_execution_submode submode =
663 LTTNG_UNKNOWN;
664 enum lttng_process_status status;
665 enum lttng_thread_type type;
666 struct files_struct *files;
667
668 task_lock(p);
669 if (p->exit_state == EXIT_ZOMBIE)
670 status = LTTNG_ZOMBIE;
671 else if (p->exit_state == EXIT_DEAD)
672 status = LTTNG_DEAD;
673 else if (lttng_task_is_running(p)) {
674 /* Is this a forked child that has not run yet? */
675 if (list_empty(&p->rt.run_list))
676 status = LTTNG_WAIT_FORK;
677 else
678 /*
679 * All tasks are considered as wait_cpu;
680 * the viewer will sort out if the task
681 * was really running at this time.
682 */
683 status = LTTNG_WAIT_CPU;
684 } else if (lttng_get_task_state(p) &
685 (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)) {
686 /* Task is waiting for something to complete */
687 status = LTTNG_WAIT;
688 } else
689 status = LTTNG_UNNAMED;
690 submode = LTTNG_NONE;
691
692 /*
693 * Verification of t->mm is to filter out kernel
694 * threads; Viewer will further filter out if a
695 * user-space thread was in syscall mode or not.
696 */
697 if (p->mm)
698 type = LTTNG_USER_THREAD;
699 else
700 type = LTTNG_KERNEL_THREAD;
701 files = p->files;
702
703 trace_lttng_statedump_process_state(session,
704 p, type, mode, submode, status, files);
705 lttng_statedump_process_ns(session,
706 p, type, mode, submode, status);
707 /*
708 * As an optimisation for the common case, do not
709 * repeat information for the same files_struct in
710 * two consecutive threads. This is the common case
711 * for threads sharing the same fd table. RCU guarantees
712 * that the same files_struct pointer is not re-used
713 * throughout processes/threads iteration.
714 */
715 if (files && files != prev_files) {
716 lttng_enumerate_files(session, files, tmp);
717 prev_files = files;
718 }
719 task_unlock(p);
720 } while_each_thread(g, p);
721 }
722 rcu_read_unlock();
723
724 free_page((unsigned long) tmp);
725
726 return 0;
727 }
728
729 static
730 void lttng_statedump_work_func(struct work_struct *work)
731 {
732 if (atomic_dec_and_test(&kernel_threads_to_run))
733 /* If we are the last thread, wake up do_lttng_statedump */
734 wake_up(&statedump_wq);
735 }
736
737 static
738 int do_lttng_statedump(struct lttng_kernel_session *session)
739 {
740 int cpu, ret;
741
742 trace_lttng_statedump_start(session);
743 ret = lttng_enumerate_process_states(session);
744 if (ret)
745 return ret;
746 /*
747 * FIXME
748 * ret = lttng_enumerate_vm_maps(session);
749 * if (ret)
750 * return ret;
751 */
752 ret = lttng_list_interrupts(session);
753 if (ret)
754 return ret;
755 ret = lttng_enumerate_network_ip_interface(session);
756 if (ret)
757 return ret;
758 ret = lttng_enumerate_block_devices(session);
759 switch (ret) {
760 case 0:
761 break;
762 case -ENOSYS:
763 printk(KERN_WARNING "LTTng: block device enumeration is not supported by kernel\n");
764 break;
765 default:
766 return ret;
767 }
768 ret = lttng_enumerate_cpu_topology(session);
769 if (ret)
770 return ret;
771
772 /* TODO lttng_dump_idt_table(session); */
773 /* TODO lttng_dump_softirq_vec(session); */
774 /* TODO lttng_list_modules(session); */
775 /* TODO lttng_dump_swap_files(session); */
776
777 /*
778 * Fire off a work queue on each CPU. Their sole purpose in life
779 * is to guarantee that each CPU has been in a state where is was in
780 * syscall mode (i.e. not in a trap, an IRQ or a soft IRQ).
781 */
782 lttng_cpus_read_lock();
783 atomic_set(&kernel_threads_to_run, num_online_cpus());
784 for_each_online_cpu(cpu) {
785 INIT_DELAYED_WORK(&cpu_work[cpu], lttng_statedump_work_func);
786 schedule_delayed_work_on(cpu, &cpu_work[cpu], 0);
787 }
788 /* Wait for all threads to run */
789 __wait_event(statedump_wq, (atomic_read(&kernel_threads_to_run) == 0));
790 lttng_cpus_read_unlock();
791 /* Our work is done */
792 trace_lttng_statedump_end(session);
793 return 0;
794 }
795
796 /*
797 * Called with session mutex held.
798 */
799 int lttng_statedump_start(struct lttng_kernel_session *session)
800 {
801 return do_lttng_statedump(session);
802 }
803 EXPORT_SYMBOL_GPL(lttng_statedump_start);
804
805 static
806 int __init lttng_statedump_init(void)
807 {
808 /*
809 * Allow module to load even if the fixup cannot be done. This
810 * will allow seemless transition when the underlying issue fix
811 * is merged into the Linux kernel, and when tracepoint.c
812 * "tracepoint_module_notify" is turned into a static function.
813 */
814 (void) wrapper_lttng_fixup_sig(THIS_MODULE);
815 return 0;
816 }
817
818 module_init(lttng_statedump_init);
819
820 static
821 void __exit lttng_statedump_exit(void)
822 {
823 }
824
825 module_exit(lttng_statedump_exit);
826
827 MODULE_LICENSE("GPL and additional rights");
828 MODULE_AUTHOR("Jean-Hugues Deschenes");
829 MODULE_DESCRIPTION("LTTng statedump provider");
830 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
831 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
832 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
833 LTTNG_MODULES_EXTRAVERSION);
This page took 0.04937 seconds and 3 git commands to generate.