Fix: dev_base_lock removed in linux 6.9-rc1
[lttng-modules.git] / src / lttng-statedump-impl.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-statedump.c
4 *
5 * Linux Trace Toolkit Next Generation Kernel State Dump
6 *
7 * Copyright 2005 Jean-Hugues Deschenes <jean-hugues.deschenes@polymtl.ca>
8 * Copyright 2006-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 *
10 * Changes:
11 * Eric Clement: Add listing of network IP interface
12 * 2006, 2007 Mathieu Desnoyers Fix kernel threads
13 * Various updates
14 */
15
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/netlink.h>
19 #include <linux/inet.h>
20 #include <linux/ip.h>
21 #include <linux/kthread.h>
22 #include <linux/proc_fs.h>
23 #include <linux/file.h>
24 #include <linux/interrupt.h>
25 #include <linux/irq.h>
26 #include <linux/irqnr.h>
27 #include <linux/netdevice.h>
28 #include <linux/inetdevice.h>
29 #include <linux/mm.h>
30 #include <linux/swap.h>
31 #include <linux/wait.h>
32 #include <linux/mutex.h>
33 #include <linux/device.h>
34
35 #include <linux/blkdev.h>
36
37 #include <lttng/events.h>
38 #include <lttng/tracer.h>
39 #include <wrapper/cpu.h>
40 #include <wrapper/irqdesc.h>
41 #include <wrapper/fdtable.h>
42 #include <wrapper/tracepoint.h>
43 #include <wrapper/blkdev.h>
44 #include <wrapper/fdtable.h>
45 #include <wrapper/sched.h>
46
47 /* Define the tracepoints, but do not build the probes */
48 #define CREATE_TRACE_POINTS
49 #define TRACE_INCLUDE_PATH instrumentation/events
50 #define TRACE_INCLUDE_FILE lttng-statedump
51 #define LTTNG_INSTRUMENTATION
52 #include <instrumentation/events/lttng-statedump.h>
53
54 LTTNG_DEFINE_TRACE(lttng_statedump_block_device,
55 TP_PROTO(struct lttng_kernel_session *session,
56 dev_t dev, const char *diskname),
57 TP_ARGS(session, dev, diskname));
58
59 LTTNG_DEFINE_TRACE(lttng_statedump_end,
60 TP_PROTO(struct lttng_kernel_session *session),
61 TP_ARGS(session));
62
63 LTTNG_DEFINE_TRACE(lttng_statedump_interrupt,
64 TP_PROTO(struct lttng_kernel_session *session,
65 unsigned int irq, const char *chip_name,
66 struct irqaction *action),
67 TP_ARGS(session, irq, chip_name, action));
68
69 LTTNG_DEFINE_TRACE(lttng_statedump_file_descriptor,
70 TP_PROTO(struct lttng_kernel_session *session,
71 struct files_struct *files,
72 int fd, const char *filename,
73 unsigned int flags, fmode_t fmode),
74 TP_ARGS(session, files, fd, filename, flags, fmode));
75
76 LTTNG_DEFINE_TRACE(lttng_statedump_start,
77 TP_PROTO(struct lttng_kernel_session *session),
78 TP_ARGS(session));
79
80 LTTNG_DEFINE_TRACE(lttng_statedump_process_state,
81 TP_PROTO(struct lttng_kernel_session *session,
82 struct task_struct *p,
83 int type, int mode, int submode, int status,
84 struct files_struct *files),
85 TP_ARGS(session, p, type, mode, submode, status, files));
86
87 LTTNG_DEFINE_TRACE(lttng_statedump_process_pid_ns,
88 TP_PROTO(struct lttng_kernel_session *session,
89 struct task_struct *p,
90 struct pid_namespace *pid_ns),
91 TP_ARGS(session, p, pid_ns));
92
93 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,6,0))
94 LTTNG_DEFINE_TRACE(lttng_statedump_process_cgroup_ns,
95 TP_PROTO(struct lttng_kernel_session *session,
96 struct task_struct *p,
97 struct cgroup_namespace *cgroup_ns),
98 TP_ARGS(session, p, cgroup_ns));
99 #endif
100
101 LTTNG_DEFINE_TRACE(lttng_statedump_process_ipc_ns,
102 TP_PROTO(struct lttng_kernel_session *session,
103 struct task_struct *p,
104 struct ipc_namespace *ipc_ns),
105 TP_ARGS(session, p, ipc_ns));
106
107 #ifndef LTTNG_MNT_NS_MISSING_HEADER
108 LTTNG_DEFINE_TRACE(lttng_statedump_process_mnt_ns,
109 TP_PROTO(struct lttng_kernel_session *session,
110 struct task_struct *p,
111 struct mnt_namespace *mnt_ns),
112 TP_ARGS(session, p, mnt_ns));
113 #endif
114
115 LTTNG_DEFINE_TRACE(lttng_statedump_process_net_ns,
116 TP_PROTO(struct lttng_kernel_session *session,
117 struct task_struct *p,
118 struct net *net_ns),
119 TP_ARGS(session, p, net_ns));
120
121 LTTNG_DEFINE_TRACE(lttng_statedump_process_user_ns,
122 TP_PROTO(struct lttng_kernel_session *session,
123 struct task_struct *p,
124 struct user_namespace *user_ns),
125 TP_ARGS(session, p, user_ns));
126
127 LTTNG_DEFINE_TRACE(lttng_statedump_process_uts_ns,
128 TP_PROTO(struct lttng_kernel_session *session,
129 struct task_struct *p,
130 struct uts_namespace *uts_ns),
131 TP_ARGS(session, p, uts_ns));
132
133 LTTNG_DEFINE_TRACE(lttng_statedump_process_time_ns,
134 TP_PROTO(struct lttng_kernel_session *session,
135 struct task_struct *p,
136 struct time_namespace *time_ns),
137 TP_ARGS(session, p, time_ns));
138
139 LTTNG_DEFINE_TRACE(lttng_statedump_network_interface,
140 TP_PROTO(struct lttng_kernel_session *session,
141 struct net_device *dev, struct in_ifaddr *ifa),
142 TP_ARGS(session, dev, ifa));
143
144 #ifdef LTTNG_HAVE_STATEDUMP_CPU_TOPOLOGY
145 LTTNG_DEFINE_TRACE(lttng_statedump_cpu_topology,
146 TP_PROTO(struct lttng_kernel_session *session, struct cpuinfo_x86 *c),
147 TP_ARGS(session, c));
148 #endif
149
150 struct lttng_fd_ctx {
151 char *page;
152 struct lttng_kernel_session *session;
153 struct files_struct *files;
154 };
155
156 /*
157 * Protected by the trace lock.
158 */
159 static struct delayed_work cpu_work[NR_CPUS];
160 static DECLARE_WAIT_QUEUE_HEAD(statedump_wq);
161 static atomic_t kernel_threads_to_run;
162
163 enum lttng_thread_type {
164 LTTNG_USER_THREAD = 0,
165 LTTNG_KERNEL_THREAD = 1,
166 };
167
168 enum lttng_execution_mode {
169 LTTNG_USER_MODE = 0,
170 LTTNG_SYSCALL = 1,
171 LTTNG_TRAP = 2,
172 LTTNG_IRQ = 3,
173 LTTNG_SOFTIRQ = 4,
174 LTTNG_MODE_UNKNOWN = 5,
175 };
176
177 enum lttng_execution_submode {
178 LTTNG_NONE = 0,
179 LTTNG_UNKNOWN = 1,
180 };
181
182 enum lttng_process_status {
183 LTTNG_UNNAMED = 0,
184 LTTNG_WAIT_FORK = 1,
185 LTTNG_WAIT_CPU = 2,
186 LTTNG_EXIT = 3,
187 LTTNG_ZOMBIE = 4,
188 LTTNG_WAIT = 5,
189 LTTNG_RUN = 6,
190 LTTNG_DEAD = 7,
191 };
192
193
194 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(6,0,0) || \
195 LTTNG_RHEL_KERNEL_RANGE(5,14,0,163,0,0, 5,15,0,0,0,0))
196
197 #define LTTNG_PART_STRUCT_TYPE struct block_device
198
199 static
200 int lttng_get_part_name(struct gendisk *disk, struct block_device *part, char *name_buf)
201 {
202 int ret;
203
204 ret = snprintf(name_buf, BDEVNAME_SIZE, "%pg", part);
205 if (ret < 0 || ret >= BDEVNAME_SIZE)
206 return -ENOSYS;
207
208 return 0;
209 }
210
211 static
212 dev_t lttng_get_part_devt(struct block_device *part)
213 {
214 return part->bd_dev;
215 }
216
217 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,11,0))
218
219 #define LTTNG_PART_STRUCT_TYPE struct block_device
220
221 static
222 int lttng_get_part_name(struct gendisk *disk, struct block_device *part, char *name_buf)
223 {
224 const char *p;
225
226 p = bdevname(part, name_buf);
227 if (!p)
228 return -ENOSYS;
229
230 return 0;
231 }
232
233 static
234 dev_t lttng_get_part_devt(struct block_device *part)
235 {
236 return part->bd_dev;
237 }
238
239 #else
240
241 #define LTTNG_PART_STRUCT_TYPE struct hd_struct
242
243 static
244 int lttng_get_part_name(struct gendisk *disk, struct hd_struct *part, char *name_buf)
245 {
246 const char *p;
247 struct block_device bdev;
248
249 /*
250 * Create a partial 'struct blockdevice' to use
251 * 'bdevname()' which is a simple wrapper over
252 * 'disk_name()' but has the honor to be EXPORT_SYMBOL.
253 */
254 bdev.bd_disk = disk;
255 bdev.bd_part = part;
256
257 p = bdevname(&bdev, name_buf);
258 if (!p)
259 return -ENOSYS;
260
261 return 0;
262 }
263
264 static
265 dev_t lttng_get_part_devt(struct hd_struct *part)
266 {
267 return part_devt(part);
268 }
269 #endif
270
271 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,12,0))
272 static
273 int lttng_statedump_each_block_device(struct lttng_kernel_session *session, struct gendisk *disk)
274 {
275 struct block_device *part;
276 unsigned long idx;
277 int ret = 0;
278
279 /* Include partition 0 */
280 idx = 0;
281
282 rcu_read_lock();
283 xa_for_each(&disk->part_tbl, idx, part) {
284 char name_buf[BDEVNAME_SIZE];
285
286 /* Exclude non-partitions bdev and empty partitions. */
287 if (bdev_is_partition(part) && !bdev_nr_sectors(part))
288 continue;
289
290 if (lttng_get_part_name(disk, part, name_buf) == -ENOSYS) {
291 ret = -ENOSYS;
292 goto end;
293 }
294 trace_lttng_statedump_block_device(session, lttng_get_part_devt(part),
295 name_buf);
296 }
297 end:
298 rcu_read_unlock();
299 return ret;
300 }
301 #else
302 static
303 int lttng_statedump_each_block_device(struct lttng_kernel_session *session, struct gendisk *disk)
304 {
305 struct disk_part_iter piter;
306 LTTNG_PART_STRUCT_TYPE *part;
307
308 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
309
310 while ((part = disk_part_iter_next(&piter))) {
311 char name_buf[BDEVNAME_SIZE];
312
313 if (lttng_get_part_name(disk, part, name_buf) == -ENOSYS) {
314 disk_part_iter_exit(&piter);
315 return -ENOSYS;
316 }
317 trace_lttng_statedump_block_device(session, lttng_get_part_devt(part),
318 name_buf);
319 }
320 disk_part_iter_exit(&piter);
321
322 return 0;
323 }
324 #endif
325
326 static
327 int lttng_enumerate_block_devices(struct lttng_kernel_session *session)
328 {
329 struct class *ptr_block_class;
330 struct device_type *ptr_disk_type;
331 struct class_dev_iter iter;
332 struct device *dev;
333 int ret = 0;
334
335 ptr_block_class = wrapper_get_block_class();
336 if (!ptr_block_class) {
337 ret = -ENOSYS;
338 goto end;
339 }
340 ptr_disk_type = wrapper_get_disk_type();
341 if (!ptr_disk_type) {
342 ret = -ENOSYS;
343 goto end;
344 }
345 class_dev_iter_init(&iter, ptr_block_class, NULL, ptr_disk_type);
346 while ((dev = class_dev_iter_next(&iter))) {
347 struct gendisk *disk = dev_to_disk(dev);
348
349 /*
350 * Don't show empty devices or things that have been
351 * suppressed
352 */
353 if (get_capacity(disk) == 0 ||
354 (disk->flags & LTTNG_GENHD_FL_HIDDEN))
355 continue;
356
357 ret = lttng_statedump_each_block_device(session, disk);
358 }
359 class_dev_iter_exit(&iter);
360 end:
361 return ret;
362 }
363
364 #ifdef CONFIG_INET
365
366 static
367 void lttng_enumerate_device(struct lttng_kernel_session *session,
368 struct net_device *dev)
369 {
370 struct in_device *in_dev;
371 struct in_ifaddr *ifa;
372
373 if (dev->flags & IFF_UP) {
374 in_dev = in_dev_get(dev);
375 if (in_dev) {
376 for (ifa = in_dev->ifa_list; ifa != NULL;
377 ifa = ifa->ifa_next) {
378 trace_lttng_statedump_network_interface(
379 session, dev, ifa);
380 }
381 in_dev_put(in_dev);
382 }
383 } else {
384 trace_lttng_statedump_network_interface(
385 session, dev, NULL);
386 }
387 }
388
389 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(6,9,0))
390 static
391 int lttng_enumerate_network_ip_interface(struct lttng_kernel_session *session)
392 {
393 struct net_device *dev;
394
395 rtnl_lock();
396 for_each_netdev(&init_net, dev)
397 lttng_enumerate_device(session, dev);
398 rtnl_unlock();
399
400 return 0;
401 }
402 #else
403 static
404 int lttng_enumerate_network_ip_interface(struct lttng_kernel_session *session)
405 {
406 struct net_device *dev;
407
408 read_lock(&dev_base_lock);
409 for_each_netdev(&init_net, dev)
410 lttng_enumerate_device(session, dev);
411 read_unlock(&dev_base_lock);
412
413 return 0;
414 }
415 #endif /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(6,9,0)) */
416 #else /* CONFIG_INET */
417 static inline
418 int lttng_enumerate_network_ip_interface(struct lttng_kernel_session *session)
419 {
420 return 0;
421 }
422 #endif /* CONFIG_INET */
423
424 static
425 int lttng_dump_one_fd(const void *p, struct file *file, unsigned int fd)
426 {
427 const struct lttng_fd_ctx *ctx = p;
428 const char *s = d_path(&file->f_path, ctx->page, PAGE_SIZE);
429 unsigned int flags = file->f_flags;
430 struct fdtable *fdt;
431
432 /*
433 * We don't expose kernel internal flags, only userspace-visible
434 * flags.
435 */
436 flags &= ~FMODE_NONOTIFY;
437 fdt = files_fdtable(ctx->files);
438 /*
439 * We need to check here again whether fd is within the fdt
440 * max_fds range, because we might be seeing a different
441 * files_fdtable() than iterate_fd(), assuming only RCU is
442 * protecting the read. In reality, iterate_fd() holds
443 * file_lock, which should ensure the fdt does not change while
444 * the lock is taken, but we are not aware whether this is
445 * guaranteed or not, so play safe.
446 */
447 if (fd < fdt->max_fds && close_on_exec(fd, fdt))
448 flags |= O_CLOEXEC;
449 if (IS_ERR(s)) {
450 struct dentry *dentry = file->f_path.dentry;
451
452 /* Make sure we give at least some info */
453 spin_lock(&dentry->d_lock);
454 trace_lttng_statedump_file_descriptor(ctx->session,
455 ctx->files, fd, dentry->d_name.name, flags,
456 file->f_mode);
457 spin_unlock(&dentry->d_lock);
458 goto end;
459 }
460 trace_lttng_statedump_file_descriptor(ctx->session,
461 ctx->files, fd, s, flags, file->f_mode);
462 end:
463 return 0;
464 }
465
466 /* Called with task lock held. */
467 static
468 void lttng_enumerate_files(struct lttng_kernel_session *session,
469 struct files_struct *files,
470 char *tmp)
471 {
472 struct lttng_fd_ctx ctx = { .page = tmp, .session = session, .files = files, };
473
474 iterate_fd(files, 0, lttng_dump_one_fd, &ctx);
475 }
476
477 #ifdef LTTNG_HAVE_STATEDUMP_CPU_TOPOLOGY
478 static
479 int lttng_enumerate_cpu_topology(struct lttng_kernel_session *session)
480 {
481 int cpu;
482 const cpumask_t *cpumask = cpu_possible_mask;
483
484 for (cpu = cpumask_first(cpumask); cpu < nr_cpu_ids;
485 cpu = cpumask_next(cpu, cpumask)) {
486 trace_lttng_statedump_cpu_topology(session, &cpu_data(cpu));
487 }
488
489 return 0;
490 }
491 #else
492 static
493 int lttng_enumerate_cpu_topology(struct lttng_kernel_session *session)
494 {
495 return 0;
496 }
497 #endif
498
499 #if 0
500 /*
501 * FIXME: we cannot take a mmap_sem while in a RCU read-side critical section
502 * (scheduling in atomic). Normally, the tasklist lock protects this kind of
503 * iteration, but it is not exported to modules.
504 */
505 static
506 void lttng_enumerate_task_vm_maps(struct lttng_kernel_session *session,
507 struct task_struct *p)
508 {
509 struct mm_struct *mm;
510 struct vm_area_struct *map;
511 unsigned long ino;
512
513 /* get_task_mm does a task_lock... */
514 mm = get_task_mm(p);
515 if (!mm)
516 return;
517
518 map = mm->mmap;
519 if (map) {
520 down_read(&mm->mmap_sem);
521 while (map) {
522 if (map->vm_file)
523 ino = map->vm_file->f_path.dentry->d_inode->i_ino;
524 else
525 ino = 0;
526 trace_lttng_statedump_vm_map(session, p, map, ino);
527 map = map->vm_next;
528 }
529 up_read(&mm->mmap_sem);
530 }
531 mmput(mm);
532 }
533
534 static
535 int lttng_enumerate_vm_maps(struct lttng_kernel_session *session)
536 {
537 struct task_struct *p;
538
539 rcu_read_lock();
540 for_each_process(p)
541 lttng_enumerate_task_vm_maps(session, p);
542 rcu_read_unlock();
543 return 0;
544 }
545 #endif
546
547 static
548 int lttng_list_interrupts(struct lttng_kernel_session *session)
549 {
550 unsigned int irq;
551 unsigned long flags = 0;
552 struct irq_desc *desc;
553
554 #define irq_to_desc wrapper_irq_to_desc
555 /* needs irq_desc */
556 for_each_irq_desc(irq, desc) {
557 struct irqaction *action;
558 const char *irq_chip_name =
559 irq_desc_get_chip(desc)->name ? : "unnamed_irq_chip";
560
561 local_irq_save(flags);
562 raw_spin_lock(&desc->lock);
563 for (action = desc->action; action; action = action->next) {
564 trace_lttng_statedump_interrupt(session,
565 irq, irq_chip_name, action);
566 }
567 raw_spin_unlock(&desc->lock);
568 local_irq_restore(flags);
569 }
570 return 0;
571 #undef irq_to_desc
572 }
573
574 /*
575 * Statedump the task's namespaces using the proc filesystem inode number as
576 * the unique identifier. The user and pid ns are nested and will be dumped
577 * recursively.
578 *
579 * Called with task lock held.
580 */
581 static
582 void lttng_statedump_process_ns(struct lttng_kernel_session *session,
583 struct task_struct *p,
584 enum lttng_thread_type type,
585 enum lttng_execution_mode mode,
586 enum lttng_execution_submode submode,
587 enum lttng_process_status status)
588 {
589 struct nsproxy *proxy;
590 struct pid_namespace *pid_ns;
591 struct user_namespace *user_ns;
592
593 /*
594 * The pid and user namespaces are special, they are nested and
595 * accessed with specific functions instead of the nsproxy struct
596 * like the other namespaces.
597 */
598 pid_ns = task_active_pid_ns(p);
599 do {
600 trace_lttng_statedump_process_pid_ns(session, p, pid_ns);
601 pid_ns = pid_ns ? pid_ns->parent : NULL;
602 } while (pid_ns);
603
604
605 user_ns = task_cred_xxx(p, user_ns);
606 do {
607 trace_lttng_statedump_process_user_ns(session, p, user_ns);
608 /*
609 * trace_lttng_statedump_process_user_ns() internally
610 * checks whether user_ns is NULL. While this does not
611 * appear to be a possible return value for
612 * task_cred_xxx(), err on the safe side and check
613 * for NULL here as well to be consistent with the
614 * paranoid behavior of
615 * trace_lttng_statedump_process_user_ns().
616 */
617 user_ns = user_ns ? user_ns->parent : NULL;
618 } while (user_ns);
619
620 proxy = p->nsproxy;
621
622 if (proxy) {
623 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,6,0))
624 trace_lttng_statedump_process_cgroup_ns(session, p, proxy->cgroup_ns);
625 #endif
626 trace_lttng_statedump_process_ipc_ns(session, p, proxy->ipc_ns);
627 #ifndef LTTNG_MNT_NS_MISSING_HEADER
628 trace_lttng_statedump_process_mnt_ns(session, p, proxy->mnt_ns);
629 #endif
630 trace_lttng_statedump_process_net_ns(session, p, proxy->net_ns);
631 trace_lttng_statedump_process_uts_ns(session, p, proxy->uts_ns);
632 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0) || \
633 LTTNG_RHEL_KERNEL_RANGE(4,18,0,305,0,0, 4,19,0,0,0,0))
634 trace_lttng_statedump_process_time_ns(session, p, proxy->time_ns);
635 #endif
636 }
637 }
638
639 static
640 int lttng_enumerate_process_states(struct lttng_kernel_session *session)
641 {
642 struct task_struct *g, *p;
643 char *tmp;
644
645 tmp = (char *) __get_free_page(GFP_KERNEL);
646 if (!tmp)
647 return -ENOMEM;
648
649 rcu_read_lock();
650 for_each_process(g) {
651 struct files_struct *prev_files = NULL;
652
653 p = g;
654 do {
655 enum lttng_execution_mode mode =
656 LTTNG_MODE_UNKNOWN;
657 enum lttng_execution_submode submode =
658 LTTNG_UNKNOWN;
659 enum lttng_process_status status;
660 enum lttng_thread_type type;
661 struct files_struct *files;
662
663 task_lock(p);
664 if (p->exit_state == EXIT_ZOMBIE)
665 status = LTTNG_ZOMBIE;
666 else if (p->exit_state == EXIT_DEAD)
667 status = LTTNG_DEAD;
668 else if (lttng_task_is_running(p)) {
669 /* Is this a forked child that has not run yet? */
670 if (list_empty(&p->rt.run_list))
671 status = LTTNG_WAIT_FORK;
672 else
673 /*
674 * All tasks are considered as wait_cpu;
675 * the viewer will sort out if the task
676 * was really running at this time.
677 */
678 status = LTTNG_WAIT_CPU;
679 } else if (lttng_get_task_state(p) &
680 (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)) {
681 /* Task is waiting for something to complete */
682 status = LTTNG_WAIT;
683 } else
684 status = LTTNG_UNNAMED;
685 submode = LTTNG_NONE;
686
687 /*
688 * Verification of t->mm is to filter out kernel
689 * threads; Viewer will further filter out if a
690 * user-space thread was in syscall mode or not.
691 */
692 if (p->mm)
693 type = LTTNG_USER_THREAD;
694 else
695 type = LTTNG_KERNEL_THREAD;
696 files = p->files;
697
698 trace_lttng_statedump_process_state(session,
699 p, type, mode, submode, status, files);
700 lttng_statedump_process_ns(session,
701 p, type, mode, submode, status);
702 /*
703 * As an optimisation for the common case, do not
704 * repeat information for the same files_struct in
705 * two consecutive threads. This is the common case
706 * for threads sharing the same fd table. RCU guarantees
707 * that the same files_struct pointer is not re-used
708 * throughout processes/threads iteration.
709 */
710 if (files && files != prev_files) {
711 lttng_enumerate_files(session, files, tmp);
712 prev_files = files;
713 }
714 task_unlock(p);
715 } while_each_thread(g, p);
716 }
717 rcu_read_unlock();
718
719 free_page((unsigned long) tmp);
720
721 return 0;
722 }
723
724 static
725 void lttng_statedump_work_func(struct work_struct *work)
726 {
727 if (atomic_dec_and_test(&kernel_threads_to_run))
728 /* If we are the last thread, wake up do_lttng_statedump */
729 wake_up(&statedump_wq);
730 }
731
732 static
733 int do_lttng_statedump(struct lttng_kernel_session *session)
734 {
735 int cpu, ret;
736
737 trace_lttng_statedump_start(session);
738 ret = lttng_enumerate_process_states(session);
739 if (ret)
740 return ret;
741 /*
742 * FIXME
743 * ret = lttng_enumerate_vm_maps(session);
744 * if (ret)
745 * return ret;
746 */
747 ret = lttng_list_interrupts(session);
748 if (ret)
749 return ret;
750 ret = lttng_enumerate_network_ip_interface(session);
751 if (ret)
752 return ret;
753 ret = lttng_enumerate_block_devices(session);
754 switch (ret) {
755 case 0:
756 break;
757 case -ENOSYS:
758 printk(KERN_WARNING "LTTng: block device enumeration is not supported by kernel\n");
759 break;
760 default:
761 return ret;
762 }
763 ret = lttng_enumerate_cpu_topology(session);
764 if (ret)
765 return ret;
766
767 /* TODO lttng_dump_idt_table(session); */
768 /* TODO lttng_dump_softirq_vec(session); */
769 /* TODO lttng_list_modules(session); */
770 /* TODO lttng_dump_swap_files(session); */
771
772 /*
773 * Fire off a work queue on each CPU. Their sole purpose in life
774 * is to guarantee that each CPU has been in a state where is was in
775 * syscall mode (i.e. not in a trap, an IRQ or a soft IRQ).
776 */
777 lttng_cpus_read_lock();
778 atomic_set(&kernel_threads_to_run, num_online_cpus());
779 for_each_online_cpu(cpu) {
780 INIT_DELAYED_WORK(&cpu_work[cpu], lttng_statedump_work_func);
781 schedule_delayed_work_on(cpu, &cpu_work[cpu], 0);
782 }
783 /* Wait for all threads to run */
784 __wait_event(statedump_wq, (atomic_read(&kernel_threads_to_run) == 0));
785 lttng_cpus_read_unlock();
786 /* Our work is done */
787 trace_lttng_statedump_end(session);
788 return 0;
789 }
790
791 /*
792 * Called with session mutex held.
793 */
794 int lttng_statedump_start(struct lttng_kernel_session *session)
795 {
796 return do_lttng_statedump(session);
797 }
798 EXPORT_SYMBOL_GPL(lttng_statedump_start);
799
800 static
801 int __init lttng_statedump_init(void)
802 {
803 return 0;
804 }
805
806 module_init(lttng_statedump_init);
807
808 static
809 void __exit lttng_statedump_exit(void)
810 {
811 }
812
813 module_exit(lttng_statedump_exit);
814
815 MODULE_LICENSE("GPL and additional rights");
816 MODULE_AUTHOR("Jean-Hugues Deschenes");
817 MODULE_DESCRIPTION("LTTng statedump provider");
818 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
819 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
820 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
821 LTTNG_MODULES_EXTRAVERSION);
This page took 0.045866 seconds and 5 git commands to generate.