-/* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1)
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
*
* lttng-statedump.c
*
#include <linux/inetdevice.h>
#include <linux/sched.h>
#include <linux/mm.h>
-#include <linux/fdtable.h>
#include <linux/swap.h>
#include <linux/wait.h>
#include <linux/mutex.h>
#include <linux/device.h>
+#include <linux/fdtable.h>
+#include <linux/irq.h>
+#include <linux/genhd.h>
#include <lttng-events.h>
#include <lttng-tracer.h>
-#include <wrapper/irqdesc.h>
-#include <wrapper/fdtable.h>
-#include <wrapper/namespace.h>
-#include <wrapper/irq.h>
-#include <wrapper/tracepoint.h>
-#include <wrapper/genhd.h>
-#include <wrapper/file.h>
-#include <wrapper/time.h>
-
-#ifdef CONFIG_LTTNG_HAS_LIST_IRQ
-#include <linux/irq.h>
-#endif
/* Define the tracepoints, but do not build the probes */
#define CREATE_TRACE_POINTS
struct lttng_fd_ctx {
char *page;
struct lttng_session *session;
- struct task_struct *p;
struct files_struct *files;
};
static
int lttng_enumerate_block_devices(struct lttng_session *session)
{
- struct class *ptr_block_class;
- struct device_type *ptr_disk_type;
struct class_dev_iter iter;
struct device *dev;
- ptr_block_class = wrapper_get_block_class();
- if (!ptr_block_class)
- return -ENOSYS;
- ptr_disk_type = wrapper_get_disk_type();
- if (!ptr_disk_type) {
- return -ENOSYS;
- }
- class_dev_iter_init(&iter, ptr_block_class, NULL, ptr_disk_type);
+ class_dev_iter_init(&iter, gendisk_block_class(), NULL,
+ gendisk_device_type());
while ((dev = class_dev_iter_next(&iter))) {
struct disk_part_iter piter;
struct gendisk *disk = dev_to_disk(dev);
char name_buf[BDEVNAME_SIZE];
char *p;
- p = wrapper_disk_name(disk, part->partno, name_buf);
+ p = gendisk_name(disk, part->partno, name_buf);
if (!p) {
disk_part_iter_exit(&piter);
class_dev_iter_exit(&iter);
* the lock is taken, but we are not aware whether this is
* guaranteed or not, so play safe.
*/
- if (fd < fdt->max_fds && lttng_close_on_exec(fd, fdt))
+ if (fd < fdt->max_fds && close_on_exec(fd, fdt))
flags |= O_CLOEXEC;
if (IS_ERR(s)) {
struct dentry *dentry = file->f_path.dentry;
/* Make sure we give at least some info */
spin_lock(&dentry->d_lock);
- trace_lttng_statedump_file_descriptor(ctx->session, ctx->p, fd,
- dentry->d_name.name, flags, file->f_mode);
+ trace_lttng_statedump_file_descriptor(ctx->session,
+ ctx->files, fd, dentry->d_name.name, flags,
+ file->f_mode);
spin_unlock(&dentry->d_lock);
goto end;
}
- trace_lttng_statedump_file_descriptor(ctx->session, ctx->p, fd, s,
- flags, file->f_mode);
+ trace_lttng_statedump_file_descriptor(ctx->session,
+ ctx->files, fd, s, flags, file->f_mode);
end:
return 0;
}
+/* Called with task lock held. */
static
-void lttng_enumerate_task_fd(struct lttng_session *session,
- struct task_struct *p, char *tmp)
-{
- struct lttng_fd_ctx ctx = { .page = tmp, .session = session, .p = p };
- struct files_struct *files;
-
- task_lock(p);
- files = p->files;
- if (!files)
- goto end;
- ctx.files = files;
- lttng_iterate_fd(files, 0, lttng_dump_one_fd, &ctx);
-end:
- task_unlock(p);
-}
-
-static
-int lttng_enumerate_file_descriptors(struct lttng_session *session)
+void lttng_enumerate_files(struct lttng_session *session,
+ struct files_struct *files,
+ char *tmp)
{
- struct task_struct *p;
- char *tmp;
+ struct lttng_fd_ctx ctx = { .page = tmp, .session = session, .files = files, };
- tmp = (char *) __get_free_page(GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
-
- /* Enumerate active file descriptors */
- rcu_read_lock();
- for_each_process(p)
- lttng_enumerate_task_fd(session, p, tmp);
- rcu_read_unlock();
- free_page((unsigned long) tmp);
- return 0;
+ iterate_fd(files, 0, lttng_dump_one_fd, &ctx);
}
#ifdef LTTNG_HAVE_STATEDUMP_CPU_TOPOLOGY
down_read(&mm->mmap_sem);
while (map) {
if (map->vm_file)
- ino = map->vm_file->lttng_f_dentry->d_inode->i_ino;
+ ino = map->vm_file->f_path.dentry->d_inode->i_ino;
else
ino = 0;
trace_lttng_statedump_vm_map(session, p, map, ino);
}
#endif
-#ifdef CONFIG_LTTNG_HAS_LIST_IRQ
-
static
int lttng_list_interrupts(struct lttng_session *session)
{
unsigned long flags = 0;
struct irq_desc *desc;
-#define irq_to_desc wrapper_irq_to_desc
/* needs irq_desc */
for_each_irq_desc(irq, desc) {
struct irqaction *action;
local_irq_restore(flags);
}
return 0;
-#undef irq_to_desc
-}
-#else
-static inline
-int lttng_list_interrupts(struct lttng_session *session)
-{
- return 0;
}
-#endif
/*
* Statedump the task's namespaces using the proc filesystem inode number as
*/
pid_ns = task_active_pid_ns(p);
do {
- trace_lttng_statedump_process_state(session,
- p, type, mode, submode, status, pid_ns);
trace_lttng_statedump_process_pid_ns(session, p, pid_ns);
- pid_ns = pid_ns->parent;
+ pid_ns = pid_ns ? pid_ns->parent : NULL;
} while (pid_ns);
user_ns = task_cred_xxx(p, user_ns);
do {
trace_lttng_statedump_process_user_ns(session, p, user_ns);
- user_ns = user_ns->lttng_user_ns_parent;
+ /*
+ * trace_lttng_statedump_process_user_ns() internally
+ * checks whether user_ns is NULL. While this does not
+ * appear to be a possible return value for
+ * task_cred_xxx(), err on the safe side and check
+ * for NULL here as well to be consistent with the
+ * paranoid behavior of
+ * trace_lttng_statedump_process_user_ns().
+ */
+ user_ns = user_ns ? user_ns->parent : NULL;
} while (user_ns);
/*
int lttng_enumerate_process_states(struct lttng_session *session)
{
struct task_struct *g, *p;
+ char *tmp;
+
+ tmp = (char *) __get_free_page(GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
rcu_read_lock();
for_each_process(g) {
+ struct files_struct *prev_files = NULL;
+
p = g;
do {
enum lttng_execution_mode mode =
LTTNG_UNKNOWN;
enum lttng_process_status status;
enum lttng_thread_type type;
+ struct files_struct *files;
task_lock(p);
if (p->exit_state == EXIT_ZOMBIE)
type = LTTNG_USER_THREAD;
else
type = LTTNG_KERNEL_THREAD;
+ files = p->files;
+
+ trace_lttng_statedump_process_state(session,
+ p, type, mode, submode, status, files);
lttng_statedump_process_ns(session,
p, type, mode, submode, status);
+ /*
+ * As an optimisation for the common case, do not
+ * repeat information for the same files_struct in
+ * two consecutive threads. This is the common case
+ * for threads sharing the same fd table. RCU guarantees
+ * that the same files_struct pointer is not re-used
+ * throughout processes/threads iteration.
+ */
+ if (files && files != prev_files) {
+ lttng_enumerate_files(session, files, tmp);
+ prev_files = files;
+ }
task_unlock(p);
} while_each_thread(g, p);
}
rcu_read_unlock();
+ free_page((unsigned long) tmp);
+
return 0;
}
trace_lttng_statedump_start(session);
ret = lttng_enumerate_process_states(session);
- if (ret)
- return ret;
- ret = lttng_enumerate_file_descriptors(session);
if (ret)
return ret;
/*
static
int __init lttng_statedump_init(void)
{
- /*
- * Allow module to load even if the fixup cannot be done. This
- * will allow seemless transition when the underlying issue fix
- * is merged into the Linux kernel, and when tracepoint.c
- * "tracepoint_module_notify" is turned into a static function.
- */
- (void) wrapper_lttng_fixup_sig(THIS_MODULE);
return 0;
}