uprobe: Support multiple call sites for the same uprobe event
[lttng-modules.git] / lttng-statedump-impl.c
1 /* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1)
2 *
3 * lttng-statedump.c
4 *
5 * Linux Trace Toolkit Next Generation Kernel State Dump
6 *
7 * Copyright 2005 Jean-Hugues Deschenes <jean-hugues.deschenes@polymtl.ca>
8 * Copyright 2006-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 *
10 * Changes:
11 * Eric Clement: Add listing of network IP interface
12 * 2006, 2007 Mathieu Desnoyers Fix kernel threads
13 * Various updates
14 */
15
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/netlink.h>
19 #include <linux/inet.h>
20 #include <linux/ip.h>
21 #include <linux/kthread.h>
22 #include <linux/proc_fs.h>
23 #include <linux/file.h>
24 #include <linux/interrupt.h>
25 #include <linux/irqnr.h>
26 #include <linux/cpu.h>
27 #include <linux/netdevice.h>
28 #include <linux/inetdevice.h>
29 #include <linux/sched.h>
30 #include <linux/mm.h>
31 #include <linux/fdtable.h>
32 #include <linux/swap.h>
33 #include <linux/wait.h>
34 #include <linux/mutex.h>
35 #include <linux/device.h>
36
37 #include <lttng-events.h>
38 #include <lttng-tracer.h>
39 #include <wrapper/irqdesc.h>
40 #include <wrapper/spinlock.h>
41 #include <wrapper/fdtable.h>
42 #include <wrapper/irq.h>
43 #include <wrapper/tracepoint.h>
44 #include <wrapper/genhd.h>
45 #include <wrapper/file.h>
46 #include <wrapper/time.h>
47
48 #ifdef CONFIG_LTTNG_HAS_LIST_IRQ
49 #include <linux/irq.h>
50 #endif
51
52 /* Define the tracepoints, but do not build the probes */
53 #define CREATE_TRACE_POINTS
54 #define TRACE_INCLUDE_PATH instrumentation/events/lttng-module
55 #define TRACE_INCLUDE_FILE lttng-statedump
56 #define LTTNG_INSTRUMENTATION
57 #include <instrumentation/events/lttng-module/lttng-statedump.h>
58
59 DEFINE_TRACE(lttng_statedump_block_device);
60 DEFINE_TRACE(lttng_statedump_end);
61 DEFINE_TRACE(lttng_statedump_interrupt);
62 DEFINE_TRACE(lttng_statedump_file_descriptor);
63 DEFINE_TRACE(lttng_statedump_start);
64 DEFINE_TRACE(lttng_statedump_process_state);
65 DEFINE_TRACE(lttng_statedump_network_interface);
66
67 struct lttng_fd_ctx {
68 char *page;
69 struct lttng_session *session;
70 struct task_struct *p;
71 struct files_struct *files;
72 };
73
74 /*
75 * Protected by the trace lock.
76 */
77 static struct delayed_work cpu_work[NR_CPUS];
78 static DECLARE_WAIT_QUEUE_HEAD(statedump_wq);
79 static atomic_t kernel_threads_to_run;
80
81 enum lttng_thread_type {
82 LTTNG_USER_THREAD = 0,
83 LTTNG_KERNEL_THREAD = 1,
84 };
85
86 enum lttng_execution_mode {
87 LTTNG_USER_MODE = 0,
88 LTTNG_SYSCALL = 1,
89 LTTNG_TRAP = 2,
90 LTTNG_IRQ = 3,
91 LTTNG_SOFTIRQ = 4,
92 LTTNG_MODE_UNKNOWN = 5,
93 };
94
95 enum lttng_execution_submode {
96 LTTNG_NONE = 0,
97 LTTNG_UNKNOWN = 1,
98 };
99
100 enum lttng_process_status {
101 LTTNG_UNNAMED = 0,
102 LTTNG_WAIT_FORK = 1,
103 LTTNG_WAIT_CPU = 2,
104 LTTNG_EXIT = 3,
105 LTTNG_ZOMBIE = 4,
106 LTTNG_WAIT = 5,
107 LTTNG_RUN = 6,
108 LTTNG_DEAD = 7,
109 };
110
111 static
112 int lttng_enumerate_block_devices(struct lttng_session *session)
113 {
114 struct class *ptr_block_class;
115 struct device_type *ptr_disk_type;
116 struct class_dev_iter iter;
117 struct device *dev;
118
119 ptr_block_class = wrapper_get_block_class();
120 if (!ptr_block_class)
121 return -ENOSYS;
122 ptr_disk_type = wrapper_get_disk_type();
123 if (!ptr_disk_type) {
124 return -ENOSYS;
125 }
126 class_dev_iter_init(&iter, ptr_block_class, NULL, ptr_disk_type);
127 while ((dev = class_dev_iter_next(&iter))) {
128 struct disk_part_iter piter;
129 struct gendisk *disk = dev_to_disk(dev);
130 struct hd_struct *part;
131
132 /*
133 * Don't show empty devices or things that have been
134 * suppressed
135 */
136 if (get_capacity(disk) == 0 ||
137 (disk->flags & GENHD_FL_SUPPRESS_PARTITION_INFO))
138 continue;
139
140 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
141 while ((part = disk_part_iter_next(&piter))) {
142 char name_buf[BDEVNAME_SIZE];
143 char *p;
144
145 p = wrapper_disk_name(disk, part->partno, name_buf);
146 if (!p) {
147 disk_part_iter_exit(&piter);
148 class_dev_iter_exit(&iter);
149 return -ENOSYS;
150 }
151 trace_lttng_statedump_block_device(session,
152 part_devt(part), name_buf);
153 }
154 disk_part_iter_exit(&piter);
155 }
156 class_dev_iter_exit(&iter);
157 return 0;
158 }
159
160 #ifdef CONFIG_INET
161
162 static
163 void lttng_enumerate_device(struct lttng_session *session,
164 struct net_device *dev)
165 {
166 struct in_device *in_dev;
167 struct in_ifaddr *ifa;
168
169 if (dev->flags & IFF_UP) {
170 in_dev = in_dev_get(dev);
171 if (in_dev) {
172 for (ifa = in_dev->ifa_list; ifa != NULL;
173 ifa = ifa->ifa_next) {
174 trace_lttng_statedump_network_interface(
175 session, dev, ifa);
176 }
177 in_dev_put(in_dev);
178 }
179 } else {
180 trace_lttng_statedump_network_interface(
181 session, dev, NULL);
182 }
183 }
184
185 static
186 int lttng_enumerate_network_ip_interface(struct lttng_session *session)
187 {
188 struct net_device *dev;
189
190 read_lock(&dev_base_lock);
191 for_each_netdev(&init_net, dev)
192 lttng_enumerate_device(session, dev);
193 read_unlock(&dev_base_lock);
194
195 return 0;
196 }
197 #else /* CONFIG_INET */
198 static inline
199 int lttng_enumerate_network_ip_interface(struct lttng_session *session)
200 {
201 return 0;
202 }
203 #endif /* CONFIG_INET */
204
205 static
206 int lttng_dump_one_fd(const void *p, struct file *file, unsigned int fd)
207 {
208 const struct lttng_fd_ctx *ctx = p;
209 const char *s = d_path(&file->f_path, ctx->page, PAGE_SIZE);
210 unsigned int flags = file->f_flags;
211 struct fdtable *fdt;
212
213 /*
214 * We don't expose kernel internal flags, only userspace-visible
215 * flags.
216 */
217 flags &= ~FMODE_NONOTIFY;
218 fdt = files_fdtable(ctx->files);
219 /*
220 * We need to check here again whether fd is within the fdt
221 * max_fds range, because we might be seeing a different
222 * files_fdtable() than iterate_fd(), assuming only RCU is
223 * protecting the read. In reality, iterate_fd() holds
224 * file_lock, which should ensure the fdt does not change while
225 * the lock is taken, but we are not aware whether this is
226 * guaranteed or not, so play safe.
227 */
228 if (fd < fdt->max_fds && lttng_close_on_exec(fd, fdt))
229 flags |= O_CLOEXEC;
230 if (IS_ERR(s)) {
231 struct dentry *dentry = file->f_path.dentry;
232
233 /* Make sure we give at least some info */
234 spin_lock(&dentry->d_lock);
235 trace_lttng_statedump_file_descriptor(ctx->session, ctx->p, fd,
236 dentry->d_name.name, flags, file->f_mode);
237 spin_unlock(&dentry->d_lock);
238 goto end;
239 }
240 trace_lttng_statedump_file_descriptor(ctx->session, ctx->p, fd, s,
241 flags, file->f_mode);
242 end:
243 return 0;
244 }
245
246 static
247 void lttng_enumerate_task_fd(struct lttng_session *session,
248 struct task_struct *p, char *tmp)
249 {
250 struct lttng_fd_ctx ctx = { .page = tmp, .session = session, .p = p };
251 struct files_struct *files;
252
253 task_lock(p);
254 files = p->files;
255 if (!files)
256 goto end;
257 ctx.files = files;
258 lttng_iterate_fd(files, 0, lttng_dump_one_fd, &ctx);
259 end:
260 task_unlock(p);
261 }
262
263 static
264 int lttng_enumerate_file_descriptors(struct lttng_session *session)
265 {
266 struct task_struct *p;
267 char *tmp;
268
269 tmp = (char *) __get_free_page(GFP_KERNEL);
270 if (!tmp)
271 return -ENOMEM;
272
273 /* Enumerate active file descriptors */
274 rcu_read_lock();
275 for_each_process(p)
276 lttng_enumerate_task_fd(session, p, tmp);
277 rcu_read_unlock();
278 free_page((unsigned long) tmp);
279 return 0;
280 }
281
282 #if 0
283 /*
284 * FIXME: we cannot take a mmap_sem while in a RCU read-side critical section
285 * (scheduling in atomic). Normally, the tasklist lock protects this kind of
286 * iteration, but it is not exported to modules.
287 */
288 static
289 void lttng_enumerate_task_vm_maps(struct lttng_session *session,
290 struct task_struct *p)
291 {
292 struct mm_struct *mm;
293 struct vm_area_struct *map;
294 unsigned long ino;
295
296 /* get_task_mm does a task_lock... */
297 mm = get_task_mm(p);
298 if (!mm)
299 return;
300
301 map = mm->mmap;
302 if (map) {
303 down_read(&mm->mmap_sem);
304 while (map) {
305 if (map->vm_file)
306 ino = map->vm_file->lttng_f_dentry->d_inode->i_ino;
307 else
308 ino = 0;
309 trace_lttng_statedump_vm_map(session, p, map, ino);
310 map = map->vm_next;
311 }
312 up_read(&mm->mmap_sem);
313 }
314 mmput(mm);
315 }
316
317 static
318 int lttng_enumerate_vm_maps(struct lttng_session *session)
319 {
320 struct task_struct *p;
321
322 rcu_read_lock();
323 for_each_process(p)
324 lttng_enumerate_task_vm_maps(session, p);
325 rcu_read_unlock();
326 return 0;
327 }
328 #endif
329
330 #ifdef CONFIG_LTTNG_HAS_LIST_IRQ
331
332 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39))
333 #define irq_desc_get_chip(desc) get_irq_desc_chip(desc)
334 #endif
335
336 static
337 int lttng_list_interrupts(struct lttng_session *session)
338 {
339 unsigned int irq;
340 unsigned long flags = 0;
341 struct irq_desc *desc;
342
343 #define irq_to_desc wrapper_irq_to_desc
344 /* needs irq_desc */
345 for_each_irq_desc(irq, desc) {
346 struct irqaction *action;
347 const char *irq_chip_name =
348 irq_desc_get_chip(desc)->name ? : "unnamed_irq_chip";
349
350 local_irq_save(flags);
351 wrapper_desc_spin_lock(&desc->lock);
352 for (action = desc->action; action; action = action->next) {
353 trace_lttng_statedump_interrupt(session,
354 irq, irq_chip_name, action);
355 }
356 wrapper_desc_spin_unlock(&desc->lock);
357 local_irq_restore(flags);
358 }
359 return 0;
360 #undef irq_to_desc
361 }
362 #else
363 static inline
364 int lttng_list_interrupts(struct lttng_session *session)
365 {
366 return 0;
367 }
368 #endif
369
370 /*
371 * Called with task lock held.
372 */
373 static
374 void lttng_statedump_process_ns(struct lttng_session *session,
375 struct task_struct *p,
376 enum lttng_thread_type type,
377 enum lttng_execution_mode mode,
378 enum lttng_execution_submode submode,
379 enum lttng_process_status status)
380 {
381 struct pid_namespace *pid_ns;
382
383 pid_ns = task_active_pid_ns(p);
384 do {
385 trace_lttng_statedump_process_state(session,
386 p, type, mode, submode, status, pid_ns);
387 pid_ns = pid_ns->parent;
388 } while (pid_ns);
389 }
390
391 static
392 int lttng_enumerate_process_states(struct lttng_session *session)
393 {
394 struct task_struct *g, *p;
395
396 rcu_read_lock();
397 for_each_process(g) {
398 p = g;
399 do {
400 enum lttng_execution_mode mode =
401 LTTNG_MODE_UNKNOWN;
402 enum lttng_execution_submode submode =
403 LTTNG_UNKNOWN;
404 enum lttng_process_status status;
405 enum lttng_thread_type type;
406
407 task_lock(p);
408 if (p->exit_state == EXIT_ZOMBIE)
409 status = LTTNG_ZOMBIE;
410 else if (p->exit_state == EXIT_DEAD)
411 status = LTTNG_DEAD;
412 else if (p->state == TASK_RUNNING) {
413 /* Is this a forked child that has not run yet? */
414 if (list_empty(&p->rt.run_list))
415 status = LTTNG_WAIT_FORK;
416 else
417 /*
418 * All tasks are considered as wait_cpu;
419 * the viewer will sort out if the task
420 * was really running at this time.
421 */
422 status = LTTNG_WAIT_CPU;
423 } else if (p->state &
424 (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)) {
425 /* Task is waiting for something to complete */
426 status = LTTNG_WAIT;
427 } else
428 status = LTTNG_UNNAMED;
429 submode = LTTNG_NONE;
430
431 /*
432 * Verification of t->mm is to filter out kernel
433 * threads; Viewer will further filter out if a
434 * user-space thread was in syscall mode or not.
435 */
436 if (p->mm)
437 type = LTTNG_USER_THREAD;
438 else
439 type = LTTNG_KERNEL_THREAD;
440 lttng_statedump_process_ns(session,
441 p, type, mode, submode, status);
442 task_unlock(p);
443 } while_each_thread(g, p);
444 }
445 rcu_read_unlock();
446
447 return 0;
448 }
449
450 static
451 void lttng_statedump_work_func(struct work_struct *work)
452 {
453 if (atomic_dec_and_test(&kernel_threads_to_run))
454 /* If we are the last thread, wake up do_lttng_statedump */
455 wake_up(&statedump_wq);
456 }
457
458 static
459 int do_lttng_statedump(struct lttng_session *session)
460 {
461 int cpu, ret;
462
463 trace_lttng_statedump_start(session);
464 ret = lttng_enumerate_process_states(session);
465 if (ret)
466 return ret;
467 ret = lttng_enumerate_file_descriptors(session);
468 if (ret)
469 return ret;
470 /*
471 * FIXME
472 * ret = lttng_enumerate_vm_maps(session);
473 * if (ret)
474 * return ret;
475 */
476 ret = lttng_list_interrupts(session);
477 if (ret)
478 return ret;
479 ret = lttng_enumerate_network_ip_interface(session);
480 if (ret)
481 return ret;
482 ret = lttng_enumerate_block_devices(session);
483 switch (ret) {
484 case 0:
485 break;
486 case -ENOSYS:
487 printk(KERN_WARNING "LTTng: block device enumeration is not supported by kernel\n");
488 break;
489 default:
490 return ret;
491 }
492
493 /* TODO lttng_dump_idt_table(session); */
494 /* TODO lttng_dump_softirq_vec(session); */
495 /* TODO lttng_list_modules(session); */
496 /* TODO lttng_dump_swap_files(session); */
497
498 /*
499 * Fire off a work queue on each CPU. Their sole purpose in life
500 * is to guarantee that each CPU has been in a state where is was in
501 * syscall mode (i.e. not in a trap, an IRQ or a soft IRQ).
502 */
503 get_online_cpus();
504 atomic_set(&kernel_threads_to_run, num_online_cpus());
505 for_each_online_cpu(cpu) {
506 INIT_DELAYED_WORK(&cpu_work[cpu], lttng_statedump_work_func);
507 schedule_delayed_work_on(cpu, &cpu_work[cpu], 0);
508 }
509 /* Wait for all threads to run */
510 __wait_event(statedump_wq, (atomic_read(&kernel_threads_to_run) == 0));
511 put_online_cpus();
512 /* Our work is done */
513 trace_lttng_statedump_end(session);
514 return 0;
515 }
516
517 /*
518 * Called with session mutex held.
519 */
520 int lttng_statedump_start(struct lttng_session *session)
521 {
522 return do_lttng_statedump(session);
523 }
524 EXPORT_SYMBOL_GPL(lttng_statedump_start);
525
526 static
527 int __init lttng_statedump_init(void)
528 {
529 /*
530 * Allow module to load even if the fixup cannot be done. This
531 * will allow seemless transition when the underlying issue fix
532 * is merged into the Linux kernel, and when tracepoint.c
533 * "tracepoint_module_notify" is turned into a static function.
534 */
535 (void) wrapper_lttng_fixup_sig(THIS_MODULE);
536 return 0;
537 }
538
539 module_init(lttng_statedump_init);
540
541 static
542 void __exit lttng_statedump_exit(void)
543 {
544 }
545
546 module_exit(lttng_statedump_exit);
547
548 MODULE_LICENSE("GPL and additional rights");
549 MODULE_AUTHOR("Jean-Hugues Deschenes");
550 MODULE_DESCRIPTION("LTTng statedump provider");
551 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
552 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
553 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
554 LTTNG_MODULES_EXTRAVERSION);
This page took 0.040958 seconds and 4 git commands to generate.