Use exported symbol bdevname() instead of disk_name()
[lttng-modules.git] / src / lttng-statedump-impl.c
CommitLineData
b7cdc182 1/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
9f36eaed 2 *
886d51a3
MD
3 * lttng-statedump.c
4 *
c337ddc2
MD
5 * Linux Trace Toolkit Next Generation Kernel State Dump
6 *
7 * Copyright 2005 Jean-Hugues Deschenes <jean-hugues.deschenes@polymtl.ca>
8 * Copyright 2006-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 *
10 * Changes:
11 * Eric Clement: Add listing of network IP interface
12 * 2006, 2007 Mathieu Desnoyers Fix kernel threads
13 * Various updates
c337ddc2
MD
14 */
15
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/netlink.h>
19#include <linux/inet.h>
20#include <linux/ip.h>
21#include <linux/kthread.h>
22#include <linux/proc_fs.h>
23#include <linux/file.h>
24#include <linux/interrupt.h>
25#include <linux/irqnr.h>
26#include <linux/cpu.h>
27#include <linux/netdevice.h>
28#include <linux/inetdevice.h>
29#include <linux/sched.h>
30#include <linux/mm.h>
c337ddc2
MD
31#include <linux/swap.h>
32#include <linux/wait.h>
33#include <linux/mutex.h>
f0dbdefb 34#include <linux/device.h>
c337ddc2 35
2df37e95
MD
36#include <lttng/events.h>
37#include <lttng/tracer.h>
241ae9a8 38#include <wrapper/irqdesc.h>
241ae9a8 39#include <wrapper/fdtable.h>
1965e6b4 40#include <wrapper/namespace.h>
241ae9a8
MD
41#include <wrapper/irq.h>
42#include <wrapper/tracepoint.h>
43#include <wrapper/genhd.h>
44#include <wrapper/file.h>
c07dca48 45#include <wrapper/fdtable.h>
c337ddc2 46
29784493 47#ifdef CONFIG_LTTNG_HAS_LIST_IRQ
c337ddc2
MD
48#include <linux/irq.h>
49#endif
50
51/* Define the tracepoints, but do not build the probes */
52#define CREATE_TRACE_POINTS
4f47ccf0 53#define TRACE_INCLUDE_PATH instrumentation/events
c337ddc2 54#define TRACE_INCLUDE_FILE lttng-statedump
3bc29f0a 55#define LTTNG_INSTRUMENTATION
4f47ccf0 56#include <instrumentation/events/lttng-statedump.h>
c337ddc2 57
f0dbdefb 58DEFINE_TRACE(lttng_statedump_block_device);
20591cf7
MD
59DEFINE_TRACE(lttng_statedump_end);
60DEFINE_TRACE(lttng_statedump_interrupt);
61DEFINE_TRACE(lttng_statedump_file_descriptor);
62DEFINE_TRACE(lttng_statedump_start);
63DEFINE_TRACE(lttng_statedump_process_state);
1965e6b4
MJ
64DEFINE_TRACE(lttng_statedump_process_pid_ns);
65#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0))
66DEFINE_TRACE(lttng_statedump_process_cgroup_ns);
67#endif
68DEFINE_TRACE(lttng_statedump_process_ipc_ns);
69#ifndef LTTNG_MNT_NS_MISSING_HEADER
70DEFINE_TRACE(lttng_statedump_process_mnt_ns);
71#endif
72DEFINE_TRACE(lttng_statedump_process_net_ns);
73DEFINE_TRACE(lttng_statedump_process_user_ns);
74DEFINE_TRACE(lttng_statedump_process_uts_ns);
20591cf7 75DEFINE_TRACE(lttng_statedump_network_interface);
d0b55e4c 76#ifdef LTTNG_HAVE_STATEDUMP_CPU_TOPOLOGY
502e4132
JD
77DEFINE_TRACE(lttng_statedump_cpu_topology);
78#endif
20591cf7 79
361c023a
MD
80struct lttng_fd_ctx {
81 char *page;
82 struct lttng_session *session;
d561ecfb 83 struct files_struct *files;
361c023a
MD
84};
85
c337ddc2
MD
86/*
87 * Protected by the trace lock.
88 */
89static struct delayed_work cpu_work[NR_CPUS];
90static DECLARE_WAIT_QUEUE_HEAD(statedump_wq);
91static atomic_t kernel_threads_to_run;
92
93enum lttng_thread_type {
94 LTTNG_USER_THREAD = 0,
95 LTTNG_KERNEL_THREAD = 1,
96};
97
98enum lttng_execution_mode {
99 LTTNG_USER_MODE = 0,
100 LTTNG_SYSCALL = 1,
101 LTTNG_TRAP = 2,
102 LTTNG_IRQ = 3,
103 LTTNG_SOFTIRQ = 4,
104 LTTNG_MODE_UNKNOWN = 5,
105};
106
107enum lttng_execution_submode {
108 LTTNG_NONE = 0,
109 LTTNG_UNKNOWN = 1,
110};
111
112enum lttng_process_status {
113 LTTNG_UNNAMED = 0,
114 LTTNG_WAIT_FORK = 1,
115 LTTNG_WAIT_CPU = 2,
116 LTTNG_EXIT = 3,
117 LTTNG_ZOMBIE = 4,
118 LTTNG_WAIT = 5,
119 LTTNG_RUN = 6,
120 LTTNG_DEAD = 7,
121};
122
f0dbdefb
HD
123static
124int lttng_enumerate_block_devices(struct lttng_session *session)
125{
126 struct class *ptr_block_class;
127 struct device_type *ptr_disk_type;
128 struct class_dev_iter iter;
129 struct device *dev;
130
131 ptr_block_class = wrapper_get_block_class();
132 if (!ptr_block_class)
133 return -ENOSYS;
134 ptr_disk_type = wrapper_get_disk_type();
135 if (!ptr_disk_type) {
136 return -ENOSYS;
137 }
138 class_dev_iter_init(&iter, ptr_block_class, NULL, ptr_disk_type);
139 while ((dev = class_dev_iter_next(&iter))) {
140 struct disk_part_iter piter;
141 struct gendisk *disk = dev_to_disk(dev);
142 struct hd_struct *part;
143
5a91f3df
MD
144 /*
145 * Don't show empty devices or things that have been
146 * suppressed
147 */
148 if (get_capacity(disk) == 0 ||
149 (disk->flags & GENHD_FL_SUPPRESS_PARTITION_INFO))
150 continue;
151
f0dbdefb
HD
152 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
153 while ((part = disk_part_iter_next(&piter))) {
ff04d185 154 struct block_device bdev;
f0dbdefb 155 char name_buf[BDEVNAME_SIZE];
ff04d185 156 const char *p;
f0dbdefb 157
ff04d185
MJ
158 /*
159 * Create a partial 'struct blockdevice' to use
160 * 'bdevname()' which is a simple wrapper over
161 * 'disk_name()' but has the honor to be EXPORT_SYMBOL.
162 */
163 bdev.bd_disk = disk;
164 bdev.bd_part = part;
165
166 p = bdevname(&bdev, name_buf);
f0dbdefb
HD
167 if (!p) {
168 disk_part_iter_exit(&piter);
169 class_dev_iter_exit(&iter);
170 return -ENOSYS;
171 }
172 trace_lttng_statedump_block_device(session,
173 part_devt(part), name_buf);
174 }
175 disk_part_iter_exit(&piter);
176 }
177 class_dev_iter_exit(&iter);
178 return 0;
179}
180
c337ddc2 181#ifdef CONFIG_INET
f0dbdefb 182
c337ddc2
MD
183static
184void lttng_enumerate_device(struct lttng_session *session,
185 struct net_device *dev)
186{
187 struct in_device *in_dev;
188 struct in_ifaddr *ifa;
189
190 if (dev->flags & IFF_UP) {
191 in_dev = in_dev_get(dev);
192 if (in_dev) {
193 for (ifa = in_dev->ifa_list; ifa != NULL;
194 ifa = ifa->ifa_next) {
195 trace_lttng_statedump_network_interface(
196 session, dev, ifa);
197 }
198 in_dev_put(in_dev);
199 }
200 } else {
201 trace_lttng_statedump_network_interface(
202 session, dev, NULL);
203 }
204}
205
206static
207int lttng_enumerate_network_ip_interface(struct lttng_session *session)
208{
209 struct net_device *dev;
210
211 read_lock(&dev_base_lock);
212 for_each_netdev(&init_net, dev)
213 lttng_enumerate_device(session, dev);
214 read_unlock(&dev_base_lock);
215
216 return 0;
217}
218#else /* CONFIG_INET */
219static inline
220int lttng_enumerate_network_ip_interface(struct lttng_session *session)
221{
222 return 0;
223}
224#endif /* CONFIG_INET */
225
361c023a
MD
226static
227int lttng_dump_one_fd(const void *p, struct file *file, unsigned int fd)
228{
229 const struct lttng_fd_ctx *ctx = p;
230 const char *s = d_path(&file->f_path, ctx->page, PAGE_SIZE);
29021503 231 unsigned int flags = file->f_flags;
d561ecfb 232 struct fdtable *fdt;
361c023a 233
29021503
MD
234 /*
235 * We don't expose kernel internal flags, only userspace-visible
236 * flags.
237 */
238 flags &= ~FMODE_NONOTIFY;
d561ecfb
MD
239 fdt = files_fdtable(ctx->files);
240 /*
241 * We need to check here again whether fd is within the fdt
242 * max_fds range, because we might be seeing a different
243 * files_fdtable() than iterate_fd(), assuming only RCU is
244 * protecting the read. In reality, iterate_fd() holds
245 * file_lock, which should ensure the fdt does not change while
246 * the lock is taken, but we are not aware whether this is
247 * guaranteed or not, so play safe.
248 */
aa29f2d3 249 if (fd < fdt->max_fds && lttng_close_on_exec(fd, fdt))
29021503 250 flags |= O_CLOEXEC;
361c023a
MD
251 if (IS_ERR(s)) {
252 struct dentry *dentry = file->f_path.dentry;
253
254 /* Make sure we give at least some info */
255 spin_lock(&dentry->d_lock);
e7a0ca72
MD
256 trace_lttng_statedump_file_descriptor(ctx->session,
257 ctx->files, fd, dentry->d_name.name, flags,
258 file->f_mode);
361c023a
MD
259 spin_unlock(&dentry->d_lock);
260 goto end;
261 }
e7a0ca72
MD
262 trace_lttng_statedump_file_descriptor(ctx->session,
263 ctx->files, fd, s, flags, file->f_mode);
361c023a
MD
264end:
265 return 0;
266}
c337ddc2 267
e7a0ca72 268/* Called with task lock held. */
c337ddc2 269static
e7a0ca72
MD
270void lttng_enumerate_files(struct lttng_session *session,
271 struct files_struct *files,
272 char *tmp)
c337ddc2 273{
e7a0ca72 274 struct lttng_fd_ctx ctx = { .page = tmp, .session = session, .files = files, };
c337ddc2 275
d561ecfb 276 lttng_iterate_fd(files, 0, lttng_dump_one_fd, &ctx);
c337ddc2
MD
277}
278
d0b55e4c 279#ifdef LTTNG_HAVE_STATEDUMP_CPU_TOPOLOGY
502e4132
JD
280static
281int lttng_enumerate_cpu_topology(struct lttng_session *session)
282{
283 int cpu;
284 const cpumask_t *cpumask = cpu_possible_mask;
285
286 for (cpu = cpumask_first(cpumask); cpu < nr_cpu_ids;
287 cpu = cpumask_next(cpu, cpumask)) {
288 trace_lttng_statedump_cpu_topology(session, &cpu_data(cpu));
289 }
290
291 return 0;
292}
293#else
294static
295int lttng_enumerate_cpu_topology(struct lttng_session *session)
296{
297 return 0;
298}
299#endif
300
0658bdda
MD
301#if 0
302/*
303 * FIXME: we cannot take a mmap_sem while in a RCU read-side critical section
304 * (scheduling in atomic). Normally, the tasklist lock protects this kind of
305 * iteration, but it is not exported to modules.
306 */
c337ddc2
MD
307static
308void lttng_enumerate_task_vm_maps(struct lttng_session *session,
309 struct task_struct *p)
310{
311 struct mm_struct *mm;
312 struct vm_area_struct *map;
313 unsigned long ino;
314
315 /* get_task_mm does a task_lock... */
316 mm = get_task_mm(p);
317 if (!mm)
318 return;
319
320 map = mm->mmap;
321 if (map) {
322 down_read(&mm->mmap_sem);
323 while (map) {
324 if (map->vm_file)
b06ed645 325 ino = map->vm_file->lttng_f_dentry->d_inode->i_ino;
c337ddc2
MD
326 else
327 ino = 0;
328 trace_lttng_statedump_vm_map(session, p, map, ino);
329 map = map->vm_next;
330 }
331 up_read(&mm->mmap_sem);
332 }
333 mmput(mm);
334}
335
336static
337int lttng_enumerate_vm_maps(struct lttng_session *session)
338{
339 struct task_struct *p;
340
341 rcu_read_lock();
342 for_each_process(p)
343 lttng_enumerate_task_vm_maps(session, p);
344 rcu_read_unlock();
345 return 0;
346}
0658bdda 347#endif
c337ddc2 348
29784493 349#ifdef CONFIG_LTTNG_HAS_LIST_IRQ
47faec4b 350
c337ddc2 351static
cfcee1c7 352int lttng_list_interrupts(struct lttng_session *session)
c337ddc2
MD
353{
354 unsigned int irq;
355 unsigned long flags = 0;
356 struct irq_desc *desc;
357
358#define irq_to_desc wrapper_irq_to_desc
359 /* needs irq_desc */
360 for_each_irq_desc(irq, desc) {
361 struct irqaction *action;
362 const char *irq_chip_name =
363 irq_desc_get_chip(desc)->name ? : "unnamed_irq_chip";
364
365 local_irq_save(flags);
fc94c945 366 raw_spin_lock(&desc->lock);
c337ddc2
MD
367 for (action = desc->action; action; action = action->next) {
368 trace_lttng_statedump_interrupt(session,
369 irq, irq_chip_name, action);
370 }
fc94c945 371 raw_spin_unlock(&desc->lock);
c337ddc2
MD
372 local_irq_restore(flags);
373 }
cfcee1c7 374 return 0;
c337ddc2
MD
375#undef irq_to_desc
376}
377#else
378static inline
cfcee1c7 379int lttng_list_interrupts(struct lttng_session *session)
c337ddc2 380{
cfcee1c7 381 return 0;
c337ddc2
MD
382}
383#endif
384
4ba1f53c 385/*
1965e6b4
MJ
386 * Statedump the task's namespaces using the proc filesystem inode number as
387 * the unique identifier. The user and pid ns are nested and will be dumped
388 * recursively.
389 *
4ba1f53c
MD
390 * Called with task lock held.
391 */
73e8ba37
JD
392static
393void lttng_statedump_process_ns(struct lttng_session *session,
394 struct task_struct *p,
395 enum lttng_thread_type type,
396 enum lttng_execution_mode mode,
397 enum lttng_execution_submode submode,
398 enum lttng_process_status status)
399{
1965e6b4 400 struct nsproxy *proxy;
73e8ba37 401 struct pid_namespace *pid_ns;
1965e6b4 402 struct user_namespace *user_ns;
73e8ba37 403
1965e6b4
MJ
404 /*
405 * The pid and user namespaces are special, they are nested and
406 * accessed with specific functions instead of the nsproxy struct
407 * like the other namespaces.
408 */
887bcdac
MJ
409 pid_ns = task_active_pid_ns(p);
410 do {
1965e6b4 411 trace_lttng_statedump_process_pid_ns(session, p, pid_ns);
adcc8b5e 412 pid_ns = pid_ns ? pid_ns->parent : NULL;
887bcdac 413 } while (pid_ns);
1965e6b4
MJ
414
415
416 user_ns = task_cred_xxx(p, user_ns);
417 do {
418 trace_lttng_statedump_process_user_ns(session, p, user_ns);
1964cccb
MD
419 /*
420 * trace_lttng_statedump_process_user_ns() internally
421 * checks whether user_ns is NULL. While this does not
422 * appear to be a possible return value for
423 * task_cred_xxx(), err on the safe side and check
424 * for NULL here as well to be consistent with the
425 * paranoid behavior of
426 * trace_lttng_statedump_process_user_ns().
427 */
428 user_ns = user_ns ? user_ns->lttng_user_ns_parent : NULL;
1965e6b4
MJ
429 } while (user_ns);
430
431 /*
432 * Back and forth on locking strategy within Linux upstream for nsproxy.
433 * See Linux upstream commit 728dba3a39c66b3d8ac889ddbe38b5b1c264aec3
434 * "namespaces: Use task_lock and not rcu to protect nsproxy"
435 * for details.
436 */
437#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0) || \
438 LTTNG_UBUNTU_KERNEL_RANGE(3,13,11,36, 3,14,0,0) || \
439 LTTNG_UBUNTU_KERNEL_RANGE(3,16,1,11, 3,17,0,0) || \
440 LTTNG_RHEL_KERNEL_RANGE(3,10,0,229,13,0, 3,11,0,0,0,0))
441 proxy = p->nsproxy;
442#else
443 rcu_read_lock();
444 proxy = task_nsproxy(p);
445#endif
446 if (proxy) {
447#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0))
448 trace_lttng_statedump_process_cgroup_ns(session, p, proxy->cgroup_ns);
449#endif
450 trace_lttng_statedump_process_ipc_ns(session, p, proxy->ipc_ns);
451#ifndef LTTNG_MNT_NS_MISSING_HEADER
452 trace_lttng_statedump_process_mnt_ns(session, p, proxy->mnt_ns);
453#endif
454 trace_lttng_statedump_process_net_ns(session, p, proxy->net_ns);
455 trace_lttng_statedump_process_uts_ns(session, p, proxy->uts_ns);
456 }
457#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0) || \
458 LTTNG_UBUNTU_KERNEL_RANGE(3,13,11,36, 3,14,0,0) || \
459 LTTNG_UBUNTU_KERNEL_RANGE(3,16,1,11, 3,17,0,0) || \
460 LTTNG_RHEL_KERNEL_RANGE(3,10,0,229,13,0, 3,11,0,0,0,0))
461 /* (nothing) */
462#else
463 rcu_read_unlock();
464#endif
73e8ba37
JD
465}
466
c337ddc2
MD
467static
468int lttng_enumerate_process_states(struct lttng_session *session)
469{
470 struct task_struct *g, *p;
e7a0ca72
MD
471 char *tmp;
472
473 tmp = (char *) __get_free_page(GFP_KERNEL);
474 if (!tmp)
475 return -ENOMEM;
c337ddc2
MD
476
477 rcu_read_lock();
478 for_each_process(g) {
e7a0ca72
MD
479 struct files_struct *prev_files = NULL;
480
c337ddc2
MD
481 p = g;
482 do {
483 enum lttng_execution_mode mode =
484 LTTNG_MODE_UNKNOWN;
485 enum lttng_execution_submode submode =
486 LTTNG_UNKNOWN;
487 enum lttng_process_status status;
488 enum lttng_thread_type type;
e7a0ca72 489 struct files_struct *files;
c337ddc2
MD
490
491 task_lock(p);
492 if (p->exit_state == EXIT_ZOMBIE)
493 status = LTTNG_ZOMBIE;
494 else if (p->exit_state == EXIT_DEAD)
495 status = LTTNG_DEAD;
496 else if (p->state == TASK_RUNNING) {
497 /* Is this a forked child that has not run yet? */
498 if (list_empty(&p->rt.run_list))
499 status = LTTNG_WAIT_FORK;
500 else
501 /*
502 * All tasks are considered as wait_cpu;
503 * the viewer will sort out if the task
504 * was really running at this time.
505 */
506 status = LTTNG_WAIT_CPU;
507 } else if (p->state &
508 (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)) {
509 /* Task is waiting for something to complete */
510 status = LTTNG_WAIT;
511 } else
512 status = LTTNG_UNNAMED;
513 submode = LTTNG_NONE;
514
515 /*
516 * Verification of t->mm is to filter out kernel
517 * threads; Viewer will further filter out if a
518 * user-space thread was in syscall mode or not.
519 */
520 if (p->mm)
521 type = LTTNG_USER_THREAD;
522 else
523 type = LTTNG_KERNEL_THREAD;
e7a0ca72 524 files = p->files;
d2a927ac
MJ
525
526 trace_lttng_statedump_process_state(session,
e7a0ca72 527 p, type, mode, submode, status, files);
73e8ba37 528 lttng_statedump_process_ns(session,
c337ddc2 529 p, type, mode, submode, status);
e7a0ca72
MD
530 /*
531 * As an optimisation for the common case, do not
532 * repeat information for the same files_struct in
533 * two consecutive threads. This is the common case
534 * for threads sharing the same fd table. RCU guarantees
535 * that the same files_struct pointer is not re-used
536 * throughout processes/threads iteration.
537 */
538 if (files && files != prev_files) {
539 lttng_enumerate_files(session, files, tmp);
540 prev_files = files;
541 }
c337ddc2
MD
542 task_unlock(p);
543 } while_each_thread(g, p);
544 }
545 rcu_read_unlock();
546
e7a0ca72
MD
547 free_page((unsigned long) tmp);
548
c337ddc2
MD
549 return 0;
550}
551
552static
553void lttng_statedump_work_func(struct work_struct *work)
554{
555 if (atomic_dec_and_test(&kernel_threads_to_run))
556 /* If we are the last thread, wake up do_lttng_statedump */
557 wake_up(&statedump_wq);
558}
559
560static
561int do_lttng_statedump(struct lttng_session *session)
562{
cfcee1c7 563 int cpu, ret;
c337ddc2 564
c337ddc2 565 trace_lttng_statedump_start(session);
cfcee1c7 566 ret = lttng_enumerate_process_states(session);
cfcee1c7
MD
567 if (ret)
568 return ret;
569 /*
570 * FIXME
571 * ret = lttng_enumerate_vm_maps(session);
572 * if (ret)
573 * return ret;
574 */
575 ret = lttng_list_interrupts(session);
576 if (ret)
577 return ret;
578 ret = lttng_enumerate_network_ip_interface(session);
579 if (ret)
580 return ret;
581 ret = lttng_enumerate_block_devices(session);
582 switch (ret) {
84c7055e
MD
583 case 0:
584 break;
cfcee1c7
MD
585 case -ENOSYS:
586 printk(KERN_WARNING "LTTng: block device enumeration is not supported by kernel\n");
587 break;
588 default:
589 return ret;
590 }
502e4132
JD
591 ret = lttng_enumerate_cpu_topology(session);
592 if (ret)
593 return ret;
c337ddc2
MD
594
595 /* TODO lttng_dump_idt_table(session); */
596 /* TODO lttng_dump_softirq_vec(session); */
597 /* TODO lttng_list_modules(session); */
598 /* TODO lttng_dump_swap_files(session); */
599
600 /*
601 * Fire off a work queue on each CPU. Their sole purpose in life
602 * is to guarantee that each CPU has been in a state where is was in
603 * syscall mode (i.e. not in a trap, an IRQ or a soft IRQ).
604 */
605 get_online_cpus();
606 atomic_set(&kernel_threads_to_run, num_online_cpus());
607 for_each_online_cpu(cpu) {
608 INIT_DELAYED_WORK(&cpu_work[cpu], lttng_statedump_work_func);
609 schedule_delayed_work_on(cpu, &cpu_work[cpu], 0);
610 }
611 /* Wait for all threads to run */
7a7128e0 612 __wait_event(statedump_wq, (atomic_read(&kernel_threads_to_run) == 0));
c337ddc2
MD
613 put_online_cpus();
614 /* Our work is done */
c337ddc2
MD
615 trace_lttng_statedump_end(session);
616 return 0;
617}
618
619/*
620 * Called with session mutex held.
621 */
622int lttng_statedump_start(struct lttng_session *session)
623{
c337ddc2
MD
624 return do_lttng_statedump(session);
625}
626EXPORT_SYMBOL_GPL(lttng_statedump_start);
627
dd8d5afb
MD
628static
629int __init lttng_statedump_init(void)
630{
d16aa9c9
MD
631 /*
632 * Allow module to load even if the fixup cannot be done. This
633 * will allow seemless transition when the underlying issue fix
634 * is merged into the Linux kernel, and when tracepoint.c
635 * "tracepoint_module_notify" is turned into a static function.
636 */
637 (void) wrapper_lttng_fixup_sig(THIS_MODULE);
638 return 0;
dd8d5afb
MD
639}
640
641module_init(lttng_statedump_init);
642
461277e7
MD
643static
644void __exit lttng_statedump_exit(void)
645{
646}
647
648module_exit(lttng_statedump_exit);
649
c337ddc2
MD
650MODULE_LICENSE("GPL and additional rights");
651MODULE_AUTHOR("Jean-Hugues Deschenes");
1c124020 652MODULE_DESCRIPTION("LTTng statedump provider");
13ab8b0a
MD
653MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
654 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
655 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
656 LTTNG_MODULES_EXTRAVERSION);
This page took 0.065665 seconds and 4 git commands to generate.