#include "wrapper/irq.h"
#include "wrapper/tracepoint.h"
#include "wrapper/genhd.h"
+#include "wrapper/file.h"
+#include "wrapper/time.h"
#ifdef CONFIG_LTTNG_HAS_LIST_IRQ
#include <linux/irq.h>
#define CREATE_TRACE_POINTS
#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
#define TRACE_INCLUDE_FILE lttng-statedump
+#define LTTNG_INSTRUMENTATION
#include "instrumentation/events/lttng-module/lttng-statedump.h"
DEFINE_TRACE(lttng_statedump_block_device);
char *page;
struct lttng_session *session;
struct task_struct *p;
- struct fdtable *fdt;
+ struct files_struct *files;
};
/*
const struct lttng_fd_ctx *ctx = p;
const char *s = d_path(&file->f_path, ctx->page, PAGE_SIZE);
unsigned int flags = file->f_flags;
+ struct fdtable *fdt;
/*
* We don't expose kernel internal flags, only userspace-visible
* flags.
*/
flags &= ~FMODE_NONOTIFY;
- if (test_bit(fd, ctx->fdt->close_on_exec))
+ fdt = files_fdtable(ctx->files);
+ /*
+ * We need to check here again whether fd is within the fdt
+ * max_fds range, because we might be seeing a different
+ * files_fdtable() than iterate_fd(), assuming only RCU is
+ * protecting the read. In reality, iterate_fd() holds
+ * file_lock, which should ensure the fdt does not change while
+ * the lock is taken, but we are not aware whether this is
+ * guaranteed or not, so play safe.
+ */
+ if (fd < fdt->max_fds && lttng_close_on_exec(fd, fdt))
flags |= O_CLOEXEC;
if (IS_ERR(s)) {
struct dentry *dentry = file->f_path.dentry;
struct task_struct *p, char *tmp)
{
struct lttng_fd_ctx ctx = { .page = tmp, .session = session, .p = p };
+ struct files_struct *files;
task_lock(p);
- ctx.fdt = files_fdtable(p->files);
- lttng_iterate_fd(p->files, 0, lttng_dump_one_fd, &ctx);
+ files = p->files;
+ if (!files)
+ goto end;
+ ctx.files = files;
+ lttng_iterate_fd(files, 0, lttng_dump_one_fd, &ctx);
+end:
task_unlock(p);
}
down_read(&mm->mmap_sem);
while (map) {
if (map->vm_file)
- ino = map->vm_file->f_dentry->d_inode->i_ino;
+ ino = map->vm_file->lttng_f_dentry->d_inode->i_ino;
else
ino = 0;
trace_lttng_statedump_vm_map(session, p, map, ino);
}
#endif
+/*
+ * Called with task lock held.
+ */
static
void lttng_statedump_process_ns(struct lttng_session *session,
struct task_struct *p,
struct nsproxy *proxy;
struct pid_namespace *pid_ns;
+ /*
+ * Back and forth on locking strategy within Linux upstream for nsproxy.
+ * See Linux upstream commit 728dba3a39c66b3d8ac889ddbe38b5b1c264aec3
+ * "namespaces: Use task_lock and not rcu to protect nsproxy"
+ * for details.
+ */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0) || \
+ LTTNG_UBUNTU_KERNEL_RANGE(3,13,11,36, 3,14,0,0) || \
+ LTTNG_UBUNTU_KERNEL_RANGE(3,16,0,11, 3,17,0,0))
+ proxy = p->nsproxy;
+#else
rcu_read_lock();
proxy = task_nsproxy(p);
+#endif
if (proxy) {
pid_ns = lttng_get_proxy_pid_ns(proxy);
do {
trace_lttng_statedump_process_state(session,
p, type, mode, submode, status, NULL);
}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0) || \
+ LTTNG_UBUNTU_KERNEL_RANGE(3,13,11,36, 3,14,0,0) || \
+ LTTNG_UBUNTU_KERNEL_RANGE(3,16,0,11, 3,17,0,0))
+ /* (nothing) */
+#else
rcu_read_unlock();
+#endif
}
static