ltt-relay-objs := ltt-events.o ltt-debugfs-abi.o \
ltt-probes.o ltt-core.o ltt-context.o \
lttng-context-pid.o lttng-context-comm.o \
- lttng-context-prio.o lttng-context-nice.o
+ lttng-context-prio.o lttng-context-nice.o \
+ wrapper/poll.o
ifneq ($(CONFIG_PERF_EVENTS),)
ltt-relay-objs += lttng-context-perf-counters.o
ringbuffer/ring_buffer_vfs.o \
ringbuffer/ring_buffer_splice.o \
ringbuffer/ring_buffer_mmap.o \
- prio_heap/lttng_prio_heap.o
+ prio_heap/lttng_prio_heap.o \
+ ../wrapper/poll.o \
+ ../wrapper/splice.o
return finalized;
}
+static inline
+int lib_ring_buffer_channel_is_finalized(const struct channel *chan)
+{
+ return chan->finalized;
+}
+
+static inline
+int lib_ring_buffer_channel_is_disabled(const struct channel *chan)
+{
+ return atomic_read(&chan->record_disabled);
+}
+
static inline
unsigned long lib_ring_buffer_get_read_data_size(
const struct lib_ring_buffer_config *config,
int cpu_hp_enable:1; /* Enable CPU hotplug notif. */
int hp_iter_enable:1; /* Enable hp iter notif. */
wait_queue_head_t read_wait; /* reader wait queue */
+ wait_queue_head_t hp_wait; /* CPU hotplug wait queue */
+ int finalized; /* Has channel been finalized */
struct channel_iter iter; /* Channel read-side iterator */
struct kref ref; /* Reference count */
};
case CPU_DOWN_FAILED_FROZEN:
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
+ wake_up_interruptible(&chan->hp_wait);
lib_ring_buffer_start_switch_timer(buf);
lib_ring_buffer_start_read_timer(buf);
return NOTIFY_OK;
chan->read_timer_interval = usecs_to_jiffies(read_timer_interval);
kref_init(&chan->ref);
init_waitqueue_head(&chan->read_wait);
+ init_waitqueue_head(&chan->hp_wait);
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
#if defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER)
ACCESS_ONCE(buf->finalized) = 1;
wake_up_interruptible(&buf->read_wait);
}
+ ACCESS_ONCE(chan->finalized) = 1;
+ wake_up_interruptible(&chan->hp_wait);
wake_up_interruptible(&chan->read_wait);
kref_put(&chan->ref, channel_release);
priv = chan->backend.priv;
struct lib_ring_buffer *buf = filp->private_data;
struct channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = chan->backend.config;
- int finalized;
+ int finalized, disabled;
if (filp->f_mode & FMODE_READ) {
- poll_wait_set_exclusive(wait);
+ init_poll_funcptr(wait, wrapper_pollwait_exclusive);
poll_wait(filp, &buf->read_wait, wait);
finalized = lib_ring_buffer_is_finalized(config, buf);
+ disabled = lib_ring_buffer_channel_is_disabled(chan);
+
/*
* lib_ring_buffer_is_finalized() contains a smp_rmb() ordering
* finalized load before offsets loads.
*/
WARN_ON(atomic_long_read(&buf->active_readers) != 1);
retry:
+ if (disabled)
+ return POLLERR;
+
if (subbuf_trunc(lib_ring_buffer_get_offset(config, buf), chan)
- subbuf_trunc(lib_ring_buffer_get_consumed(config, buf), chan)
== 0) {
struct channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = chan->backend.config;
+ if (lib_ring_buffer_channel_is_disabled(chan))
+ return -EIO;
+
switch (cmd) {
case RING_BUFFER_SNAPSHOT:
return lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
struct channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = chan->backend.config;
+ if (lib_ring_buffer_channel_is_disabled(chan))
+ return -EIO;
+
switch (cmd) {
case RING_BUFFER_SNAPSHOT:
return lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
}
EXPORT_SYMBOL_GPL(lttng_append_context);
+void lttng_remove_context_field(struct lttng_ctx **ctx_p,
+ struct lttng_ctx_field *field)
+{
+ struct lttng_ctx *ctx;
+
+ ctx = *ctx_p;
+ ctx->nr_fields--;
+ memset(&ctx->fields[ctx->nr_fields], 0, sizeof(struct lttng_ctx_field));
+}
+EXPORT_SYMBOL_GPL(lttng_remove_context_field);
+
void lttng_destroy_context(struct lttng_ctx *ctx)
{
int i;
#include <linux/slab.h>
#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
#include "wrapper/ringbuffer/vfs.h"
+#include "wrapper/poll.h"
#include "ltt-debugfs-abi.h"
#include "ltt-events.h"
#include "ltt-tracer.h"
}
}
-/* TODO: poll */
-#if 0
/**
* lttng_channel_poll - lttng stream addition/removal monitoring
*
unsigned int mask = 0;
if (file->f_mode & FMODE_READ) {
- poll_wait_set_exclusive(wait);
- poll_wait(file, &channel->notify_wait, wait);
+ init_poll_funcptr(wait, wrapper_pollwait_exclusive);
+ poll_wait(file, channel->ops->get_hp_wait_queue(channel->chan),
+ wait);
- /* TODO: identify when the channel is being finalized. */
- if (finalized)
+ if (channel->ops->is_disabled(channel->chan))
+ return POLLERR;
+ if (channel->ops->is_finalized(channel->chan))
return POLLHUP;
else
return POLLIN | POLLRDNORM;
return mask;
}
-#endif //0
static
int lttng_channel_release(struct inode *inode, struct file *file)
static const struct file_operations lttng_channel_fops = {
.release = lttng_channel_release,
-/* TODO */
-#if 0
.poll = lttng_channel_poll,
-#endif //0
.unlocked_ioctl = lttng_channel_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = lttng_channel_ioctl,
if (!chan)
goto nomem;
chan->session = session;
- init_waitqueue_head(&chan->notify_wait);
chan->id = session->free_chan_id++;
/*
* Note: the channel creation op already writes into the packet
* we need to bail out after timeout or being
* interrupted.
*/
- waitret = wait_event_interruptible_timeout(*chan->ops->get_reader_wait_queue(chan),
+ waitret = wait_event_interruptible_timeout(*chan->ops->get_reader_wait_queue(chan->chan),
({
ret = chan->ops->event_reserve(&ctx, 0);
ret != -ENOBUFS || !ret;
union {
struct {
struct perf_event **e; /* per-cpu array */
- struct list_head head;
+ struct notifier_block nb;
+ int hp_enable;
struct perf_event_attr *attr;
} perf_counter;
} u;
* may change due to concurrent writes.
*/
size_t (*packet_avail_size)(struct channel *chan);
- wait_queue_head_t *(*get_reader_wait_queue)(struct ltt_channel *chan);
+ wait_queue_head_t *(*get_reader_wait_queue)(struct channel *chan);
+ wait_queue_head_t *(*get_hp_wait_queue)(struct channel *chan);
+ int (*is_finalized)(struct channel *chan);
+ int (*is_disabled)(struct channel *chan);
};
struct ltt_channel {
struct file *file; /* File associated to channel */
unsigned int free_event_id; /* Next event ID to allocate */
struct list_head list; /* Channel list */
- wait_queue_head_t notify_wait; /* Channel addition notif. waitqueue */
struct ltt_channel_ops *ops;
int header_type; /* 0: unset, 1: compact, 2: large */
int metadata_dumped:1;
int ltt_probes_init(void);
void ltt_probes_exit(void);
struct lttng_ctx_field *lttng_append_context(struct lttng_ctx **ctx);
+void lttng_remove_context_field(struct lttng_ctx **ctx,
+ struct lttng_ctx_field *field);
void lttng_destroy_context(struct lttng_ctx *ctx);
int lttng_add_pid_to_ctx(struct lttng_ctx **ctx);
int lttng_add_comm_to_ctx(struct lttng_ctx **ctx);
}
static
-wait_queue_head_t *ltt_get_reader_wait_queue(struct ltt_channel *chan)
+wait_queue_head_t *ltt_get_reader_wait_queue(struct channel *chan)
{
- return &chan->chan->read_wait;
+ return &chan->read_wait;
+}
+
+static
+wait_queue_head_t *ltt_get_hp_wait_queue(struct channel *chan)
+{
+ return &chan->hp_wait;
+}
+
+static
+int ltt_is_finalized(struct channel *chan)
+{
+ return lib_ring_buffer_channel_is_finalized(chan);
+}
+
+static
+int ltt_is_disabled(struct channel *chan)
+{
+ return lib_ring_buffer_channel_is_disabled(chan);
}
static struct ltt_transport ltt_relay_transport = {
.event_write = ltt_event_write,
.packet_avail_size = NULL, /* Would be racy anyway */
.get_reader_wait_queue = ltt_get_reader_wait_queue,
+ .get_hp_wait_queue = ltt_get_hp_wait_queue,
+ .is_finalized = ltt_is_finalized,
+ .is_disabled = ltt_is_disabled,
},
};
}
static
-wait_queue_head_t *ltt_get_reader_wait_queue(struct ltt_channel *chan)
+wait_queue_head_t *ltt_get_reader_wait_queue(struct channel *chan)
{
- return &chan->chan->read_wait;
+ return &chan->read_wait;
+}
+
+static
+wait_queue_head_t *ltt_get_hp_wait_queue(struct channel *chan)
+{
+ return &chan->hp_wait;
+}
+
+static
+int ltt_is_finalized(struct channel *chan)
+{
+ return lib_ring_buffer_channel_is_finalized(chan);
+}
+
+static
+int ltt_is_disabled(struct channel *chan)
+{
+ return lib_ring_buffer_channel_is_disabled(chan);
}
static struct ltt_transport ltt_relay_transport = {
.event_write = ltt_event_write,
.packet_avail_size = ltt_packet_avail_size,
.get_reader_wait_queue = ltt_get_reader_wait_queue,
+ .get_hp_wait_queue = ltt_get_hp_wait_queue,
+ .is_finalized = ltt_is_finalized,
+ .is_disabled = ltt_is_disabled,
},
};
#include "wrapper/vmalloc.h"
#include "ltt-tracer.h"
-/*
- * TODO: Add CPU hotplug support.
- */
-
-static DEFINE_MUTEX(perf_counter_mutex);
-static LIST_HEAD(perf_counter_contexts);
-
static
size_t perf_counter_get_size(size_t offset)
{
uint64_t value;
event = field->u.perf_counter.e[ctx->cpu];
- event->pmu->read(event);
- value = local64_read(&event->count);
+ if (likely(event)) {
+ event->pmu->read(event);
+ value = local64_read(&event->count);
+ } else {
+ /*
+ * Perf chooses not to be clever and not to support enabling a
+ * perf counter before the cpu is brought up. Therefore, we need
+ * to support having events coming (e.g. scheduler events)
+ * before the counter is setup. Write an arbitrary 0 in this
+ * case.
+ */
+ value = 0;
+ }
lib_ring_buffer_align_ctx(ctx, ltt_alignof(value));
chan->ops->event_write(ctx, &value, sizeof(value));
}
struct perf_event **events = field->u.perf_counter.e;
int cpu;
- mutex_lock(&perf_counter_mutex);
- list_del(&field->u.perf_counter.head);
+ get_online_cpus();
for_each_online_cpu(cpu)
perf_event_release_kernel(events[cpu]);
- mutex_unlock(&perf_counter_mutex);
+ put_online_cpus();
+#ifdef CONFIG_HOTPLUG_CPU
+ unregister_cpu_notifier(&field->u.perf_counter.nb);
+#endif
kfree(field->event_field.name);
kfree(field->u.perf_counter.attr);
kfree(events);
}
+#ifdef CONFIG_HOTPLUG_CPU
+
+/**
+ * lttng_perf_counter_hp_callback - CPU hotplug callback
+ * @nb: notifier block
+ * @action: hotplug action to take
+ * @hcpu: CPU number
+ *
+ * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
+ *
+ * We can setup perf counters when the cpu is online (up prepare seems to be too
+ * soon).
+ */
+static
+int __cpuinit lttng_perf_counter_cpu_hp_callback(struct notifier_block *nb,
+ unsigned long action,
+ void *hcpu)
+{
+ unsigned int cpu = (unsigned long) hcpu;
+ struct lttng_ctx_field *field =
+ container_of(nb, struct lttng_ctx_field, u.perf_counter.nb);
+ struct perf_event **events = field->u.perf_counter.e;
+ struct perf_event_attr *attr = field->u.perf_counter.attr;
+ struct perf_event *pevent;
+
+ if (!field->u.perf_counter.hp_enable)
+ return NOTIFY_OK;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ pevent = perf_event_create_kernel_counter(attr,
+ cpu, NULL, overflow_callback);
+ if (!pevent)
+ return NOTIFY_BAD;
+ barrier(); /* Create perf counter before setting event */
+ events[cpu] = pevent;
+ break;
+ case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ pevent = events[cpu];
+ events[cpu] = NULL;
+ barrier(); /* NULLify event before perf counter teardown */
+ perf_event_release_kernel(pevent);
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+#endif
+
int lttng_add_perf_counter_to_ctx(uint32_t type,
uint64_t config,
const char *name,
attr->pinned = 1;
attr->disabled = 0;
- mutex_lock(&perf_counter_mutex);
-
- for_each_online_cpu(cpu) {
- events[cpu] = perf_event_create_kernel_counter(attr,
- cpu, NULL, overflow_callback);
- if (!events[cpu]) {
- ret = -EINVAL;
- goto name_alloc_error;
- }
- }
-
name_alloc = kstrdup(name, GFP_KERNEL);
if (!name_alloc) {
ret = -ENOMEM;
field = lttng_append_context(ctx);
if (!field) {
ret = -ENOMEM;
- goto error;
+ goto append_context_error;
+ }
+
+#ifdef CONFIG_HOTPLUG_CPU
+ field->u.perf_counter.nb.notifier_call =
+ lttng_perf_counter_cpu_hp_callback;
+ field->u.perf_counter.nb.priority = 0;
+ register_cpu_notifier(&field->u.perf_counter.nb);
+#endif
+
+ get_online_cpus();
+ for_each_online_cpu(cpu) {
+ events[cpu] = perf_event_create_kernel_counter(attr,
+ cpu, NULL, overflow_callback);
+ if (!events[cpu]) {
+ ret = -EINVAL;
+ goto counter_error;
+ }
}
+ put_online_cpus();
+
field->destroy = lttng_destroy_perf_counter_field;
field->event_field.name = name_alloc;
field->record = perf_counter_record;
field->u.perf_counter.e = events;
field->u.perf_counter.attr = attr;
-
- list_add(&field->u.perf_counter.head, &perf_counter_contexts);
- mutex_unlock(&perf_counter_mutex);
+ field->u.perf_counter.hp_enable = 1;
wrapper_vmalloc_sync_all();
return 0;
-error:
- kfree(name_alloc);
-name_alloc_error:
+counter_error:
for_each_online_cpu(cpu) {
if (events[cpu])
perf_event_release_kernel(events[cpu]);
}
- mutex_unlock(&perf_counter_mutex);
+ put_online_cpus();
+#ifdef CONFIG_HOTPLUG_CPU
+ unregister_cpu_notifier(&field->u.perf_counter.nb);
+#endif
+ lttng_remove_context_field(ctx, field);
+append_context_error:
+ kfree(name_alloc);
+name_alloc_error:
kfree(attr);
error_attr:
kfree(events);
--- /dev/null
+/*
+ * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
+ *
+ * wrapper around poll __pollwait and poll_get_entry. Using KALLSYMS to get its
+ * address when available, else we need to have a kernel that exports this
+ * function to GPL modules.
+ *
+ * Dual LGPL v2.1/GPL v2 license.
+ */
+
+#ifdef CONFIG_KALLSYMS
+
+#include <linux/kallsyms.h>
+#include <linux/poll.h>
+
+struct poll_table_entry;
+struct splice_pipe_desc;
+
+static
+void (*__pollwait_sym)(struct file *filp, wait_queue_head_t *wait_address,
+ poll_table *p);
+static
+struct poll_table_entry *(*poll_get_entry_sym)(struct poll_wqueues *p);
+
+void wrapper_pollwait_exclusive(struct file *filp,
+ wait_queue_head_t *wait_address,
+ poll_table *p)
+
+{
+ struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt);
+ struct poll_table_entry *entry;
+
+ if (!poll_get_entry_sym)
+ poll_get_entry_sym = (void *) kallsyms_lookup_name("poll_get_entry");
+ if (!poll_get_entry_sym) {
+ printk(KERN_WARNING "LTTng: poll_get_entry_sym symbol lookup failed.\n");
+ return;
+ }
+ entry = poll_get_entry_sym(pwq);
+
+ if (!__pollwait_sym)
+ __pollwait_sym = (void *) kallsyms_lookup_name("__pollwait");
+ if (!__pollwait_sym) {
+ printk(KERN_WARNING "LTTng: __pollwait symbol lookup failed.\n");
+ return;
+ }
+ return __pollwait_sym(filp, wait_address, p);
+}
+
+#else
+
+#include <linux/poll.h>
+
+ssize_t wrapper_pollwait_exclusive(struct file *filp,
+ wait_queue_head_t *wait_address,
+ poll_table *p)
+{
+ return pollwait_exclusive(filp, wait_address, p);
+}
+
+#endif
+#ifndef _LTTNG_WRAPPER_POLL_H
+#define _LTTNG_WRAPPER_POLL_H
+
/*
- * wrapper/poll.h
+ * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
*
- * Copyright (C) 2010-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * wrapper around poll __pollwait and poll_get_entry. Using KALLSYMS to get its
+ * address when available, else we need to have a kernel that exports this
+ * function to GPL modules.
*
* Dual LGPL v2.1/GPL v2 license.
*/
-#ifndef CONFIG_LIB_RING_BUFFER
#include <linux/poll.h>
-#warning "poll_wait_set_exclusive() is defined as no-op. Will increase LTTng overhead. Please consider using the LTTng kernel tree for better results."
-
-/*
- * Will cause higher overhead when signalling all possible reader threads when a
- * buffer is ready to be consumed.
- */
-#define poll_wait_set_exclusive(poll_table)
+void wrapper_pollwait_exclusive(struct file *filp,
+ wait_queue_head_t *wait_address,
+ poll_table *p);
-#endif
+#endif /* _LTTNG_WRAPPER_POLL_H */
--- /dev/null
+/*
+ * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
+ *
+ * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when
+ * available, else we need to have a kernel that exports this function to GPL
+ * modules.
+ *
+ * Dual LGPL v2.1/GPL v2 license.
+ */
+
+#ifdef CONFIG_KALLSYMS
+
+#include <linux/kallsyms.h>
+#include <linux/fs.h>
+#include <linux/splice.h>
+
+static
+ssize_t (*splice_to_pipe_sym)(struct pipe_inode_info *pipe,
+ struct splice_pipe_desc *spd);
+
+ssize_t wrapper_splice_to_pipe(struct pipe_inode_info *pipe,
+ struct splice_pipe_desc *spd)
+{
+ if (!splice_to_pipe_sym)
+ splice_to_pipe_sym = (void *) kallsyms_lookup_name("splice_to_pipe");
+ if (splice_to_pipe_sym) {
+ return splice_to_pipe_sym(pipe, spd);
+ } else {
+ printk(KERN_WARNING "LTTng: splice_to_pipe symbol lookup failed.\n");
+ return -ENOSYS;
+ }
+}
+
+#else
+
+#include <linux/fs.h>
+#include <linux/splice.h>
+
+ssize_t wrapper_splice_to_pipe(struct pipe_inode_info *pipe,
+ struct splice_pipe_desc *spd)
+{
+ return splice_to_pipe(pipe, spd);
+}
+
+#endif
* Dual LGPL v2.1/GPL v2 license.
*/
-#ifdef CONFIG_KALLSYMS
-
-#include <linux/kallsyms.h>
-#include <linux/splice.h>
-
-static inline
-ssize_t wrapper_splice_to_pipe(struct pipe_inode_info *pipe,
- struct splice_pipe_desc *spd)
-{
- ssize_t (*splice_to_pipe_sym)(struct pipe_inode_info *pipe,
- struct splice_pipe_desc *spd);
-
- splice_to_pipe_sym = (void *) kallsyms_lookup_name("splice_to_pipe");
- if (splice_to_pipe_sym) {
- return splice_to_pipe_sym(pipe, spd);
- } else {
- printk(KERN_WARNING "LTTng: splice_to_pipe symbol lookup failed.\n");
- return -ENOSYS;
- }
-}
-#else
-
#include <linux/splice.h>
-static inline
ssize_t wrapper_splice_to_pipe(struct pipe_inode_info *pipe,
- struct splice_pipe_desc *spd)
-{
- return splice_to_pipe(pipe, spd);
-}
-#endif
+ struct splice_pipe_desc *spd);
#endif /* _LTT_WRAPPER_SPLICE_H */