struct lttng_kernel_event *event_param)
{
struct lttng_channel *channel = channel_file->private_data;
- struct lttng_event *event;
int event_fd, ret;
struct file *event_file;
+ void *priv;
event_param->name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
switch (event_param->instrumentation) {
ret = PTR_ERR(event_file);
goto file_error;
}
- /*
- * We tolerate no failure path after event creation. It
- * will stay invariant for the rest of the session.
- */
- event = lttng_event_create(channel, event_param, NULL, NULL);
- WARN_ON_ONCE(!event);
- if (IS_ERR(event)) {
- ret = PTR_ERR(event);
- goto event_error;
+ if (event_param->instrumentation == LTTNG_KERNEL_TRACEPOINT) {
+ struct lttng_enabler *enabler;
+
+ if (event_param->name[strlen(event_param->name) - 1] == '*') {
+ enabler = lttng_enabler_create(LTTNG_ENABLER_WILDCARD,
+ event_param, channel);
+ } else {
+ enabler = lttng_enabler_create(LTTNG_ENABLER_NAME,
+ event_param, channel);
+ }
+ priv = enabler;
+ } else {
+ struct lttng_event *event;
+
+ /*
+ * We tolerate no failure path after event creation. It
+ * will stay invariant for the rest of the session.
+ */
+ event = lttng_event_create(channel, event_param,
+ NULL, NULL,
+ event_param->instrumentation);
+ WARN_ON_ONCE(!event);
+ if (IS_ERR(event)) {
+ ret = PTR_ERR(event);
+ goto event_error;
+ }
+ priv = event;
}
- event_file->private_data = event;
+ event_file->private_data = priv;
fd_install(event_fd, event_file);
/* The event holds a reference on the channel */
atomic_long_inc(&channel_file->f_count);
event_fd = 0;
if (event_param->u.syscall.enable) {
ret = lttng_syscall_filter_enable(channel,
- event_param->name[0] == '\0' ?
+ !strcmp(event_param->name, "*") ?
NULL : event_param->name);
if (ret)
goto fd_error;
} else {
ret = lttng_syscall_filter_disable(channel,
- event_param->name[0] == '\0' ?
+ !strcmp(event_param->name, "*") ?
NULL : event_param->name);
if (ret)
goto fd_error;
static
long lttng_event_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- struct lttng_event *event = file->private_data;
+ struct lttng_event *event;
+ struct lttng_enabler *enabler;
+ enum lttng_event_type *evtype = file->private_data;
switch (cmd) {
case LTTNG_KERNEL_OLD_CONTEXT:
{
- struct lttng_kernel_context *ucontext_param;
- struct lttng_kernel_old_context *old_ucontext_param;
- int ret;
-
- ucontext_param = kmalloc(sizeof(struct lttng_kernel_context),
- GFP_KERNEL);
- if (!ucontext_param) {
- ret = -ENOMEM;
- goto old_ctx_end;
- }
- old_ucontext_param = kmalloc(sizeof(struct lttng_kernel_old_context),
- GFP_KERNEL);
- if (!old_ucontext_param) {
- ret = -ENOMEM;
- goto old_ctx_error_free_param;
- }
-
- if (copy_from_user(old_ucontext_param,
- (struct lttng_kernel_old_context __user *) arg,
- sizeof(struct lttng_kernel_old_context))) {
- ret = -EFAULT;
- goto old_ctx_error_free_old_param;
- }
- ucontext_param->ctx = old_ucontext_param->ctx;
- memcpy(ucontext_param->padding, old_ucontext_param->padding,
- sizeof(ucontext_param->padding));
- /* only type that uses the union */
- if (old_ucontext_param->ctx == LTTNG_KERNEL_CONTEXT_PERF_COUNTER) {
- ucontext_param->u.perf_counter.type =
- old_ucontext_param->u.perf_counter.type;
- ucontext_param->u.perf_counter.config =
- old_ucontext_param->u.perf_counter.config;
- memcpy(ucontext_param->u.perf_counter.name,
- old_ucontext_param->u.perf_counter.name,
- sizeof(ucontext_param->u.perf_counter.name));
- }
-
- ret = lttng_abi_add_context(file,
- ucontext_param,
- &event->ctx, event->chan->session);
-
-old_ctx_error_free_old_param:
- kfree(old_ucontext_param);
-old_ctx_error_free_param:
- kfree(ucontext_param);
-old_ctx_end:
- return ret;
+ /* Not implemented */
+ return -ENOSYS;
}
case LTTNG_KERNEL_CONTEXT:
{
- struct lttng_kernel_context ucontext_param;
-
- if (copy_from_user(&ucontext_param,
- (struct lttng_kernel_context __user *) arg,
- sizeof(ucontext_param)))
- return -EFAULT;
- return lttng_abi_add_context(file,
- &ucontext_param,
- &event->ctx, event->chan->session);
+ /* Not implemented */
+ return -ENOSYS;
}
case LTTNG_KERNEL_OLD_ENABLE:
case LTTNG_KERNEL_ENABLE:
- return lttng_event_enable(event);
+ switch (*evtype) {
+ case LTTNG_TYPE_EVENT:
+ event = file->private_data;
+ return lttng_event_enable(event);
+ case LTTNG_TYPE_ENABLER:
+ enabler = file->private_data;
+ return lttng_enabler_enable(enabler);
+ default:
+ WARN_ON_ONCE(1);
+ return -ENOSYS;
+ }
case LTTNG_KERNEL_OLD_DISABLE:
case LTTNG_KERNEL_DISABLE:
- return lttng_event_disable(event);
+ switch (*evtype) {
+ case LTTNG_TYPE_EVENT:
+ event = file->private_data;
+ return lttng_event_disable(event);
+ case LTTNG_TYPE_ENABLER:
+ enabler = file->private_data;
+ return lttng_enabler_disable(enabler);
+ default:
+ WARN_ON_ONCE(1);
+ return -ENOSYS;
+ }
default:
return -ENOIOCTLCMD;
}
static
int lttng_event_release(struct inode *inode, struct file *file)
{
- struct lttng_event *event = file->private_data;
+ struct lttng_event *event;
+ struct lttng_enabler *enabler;
+ enum lttng_event_type *evtype = file->private_data;
+
+ if (!evtype)
+ return 0;
+
+ switch (*evtype) {
+ case LTTNG_TYPE_EVENT:
+ event = file->private_data;
+ if (event)
+ fput(event->chan->file);
+ break;
+ case LTTNG_TYPE_ENABLER:
+ enabler = file->private_data;
+ if (enabler)
+ fput(enabler->chan->file);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
- if (event)
- fput(event->chan->file);
return 0;
}
#include <linux/file.h>
#include <linux/anon_inodes.h>
#include "wrapper/file.h"
+#include <linux/jhash.h>
+
#include "wrapper/uuid.h"
#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
#include "wrapper/random.h"
static DEFINE_MUTEX(sessions_mutex);
static struct kmem_cache *event_cache;
+static void lttng_session_lazy_sync_enablers(struct lttng_session *session);
+static void lttng_session_sync_enablers(struct lttng_session *session);
+static void lttng_enabler_destroy(struct lttng_enabler *enabler);
+
static void _lttng_event_destroy(struct lttng_event *event);
static void _lttng_channel_destroy(struct lttng_channel *chan);
static int _lttng_event_unregister(struct lttng_event *event);
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
}
+void lttng_lock_sessions(void)
+{
+ mutex_lock(&sessions_mutex);
+}
+
+void lttng_unlock_sessions(void)
+{
+ mutex_unlock(&sessions_mutex);
+}
+
+/*
+ * Called with sessions lock held.
+ */
+int lttng_session_active(void)
+{
+ struct lttng_session *iter;
+
+ list_for_each_entry(iter, &sessions, list) {
+ if (iter->active)
+ return 1;
+ }
+ return 0;
+}
+
struct lttng_session *lttng_session_create(void)
{
struct lttng_session *session;
struct lttng_metadata_cache *metadata_cache;
+ int i;
mutex_lock(&sessions_mutex);
session = kzalloc(sizeof(struct lttng_session), GFP_KERNEL);
INIT_LIST_HEAD(&metadata_cache->metadata_stream);
memcpy(&metadata_cache->uuid, &session->uuid,
sizeof(metadata_cache->uuid));
+ INIT_LIST_HEAD(&session->enablers_head);
+ for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
+ INIT_HLIST_HEAD(&session->events_ht.table[i]);
list_add(&session->list, &sessions);
mutex_unlock(&sessions_mutex);
return session;
struct lttng_channel *chan, *tmpchan;
struct lttng_event *event, *tmpevent;
struct lttng_metadata_stream *metadata_stream;
+ struct lttng_enabler *enabler, *tmpenabler;
int ret;
mutex_lock(&sessions_mutex);
WARN_ON(ret);
}
synchronize_trace(); /* Wait for in-flight events to complete */
+ list_for_each_entry_safe(enabler, tmpenabler,
+ &session->enablers_head, node)
+ lttng_enabler_destroy(enabler);
list_for_each_entry_safe(event, tmpevent, &session->events, list)
_lttng_event_destroy(event);
list_for_each_entry_safe(chan, tmpchan, &session->chan, list) {
goto end;
}
+ /* Set transient enabler state to "enabled" */
+ session->tstate = 1;
+
/*
* Snapshot the number of events per channel to know the type of header
* we need to use.
chan->header_type = 2; /* large */
}
+ /* We need to sync enablers with session before activation. */
+ lttng_session_sync_enablers(session);
+
ACCESS_ONCE(session->active) = 1;
ACCESS_ONCE(session->been_active) = 1;
ret = _lttng_session_metadata_statedump(session);
goto end;
}
ACCESS_ONCE(session->active) = 0;
+
+ /* Set transient enabler state to "disabled" */
+ session->tstate = 0;
+ lttng_session_sync_enablers(session);
end:
mutex_unlock(&sessions_mutex);
return ret;
int lttng_channel_enable(struct lttng_channel *channel)
{
- int old;
+ int ret = 0;
- if (channel->channel_type == METADATA_CHANNEL)
- return -EPERM;
- old = xchg(&channel->enabled, 1);
- if (old)
- return -EEXIST;
- return 0;
+ mutex_lock(&sessions_mutex);
+ if (channel->channel_type == METADATA_CHANNEL) {
+ ret = -EPERM;
+ goto end;
+ }
+ if (channel->enabled) {
+ ret = -EEXIST;
+ goto end;
+ }
+ /* Set transient enabler state to "enabled" */
+ channel->tstate = 1;
+ lttng_session_sync_enablers(channel->session);
+ /* Set atomically the state to "enabled" */
+ ACCESS_ONCE(channel->enabled) = 1;
+end:
+ mutex_unlock(&sessions_mutex);
+ return ret;
}
int lttng_channel_disable(struct lttng_channel *channel)
{
- int old;
+ int ret = 0;
- if (channel->channel_type == METADATA_CHANNEL)
- return -EPERM;
- old = xchg(&channel->enabled, 0);
- if (!old)
- return -EEXIST;
- return 0;
+ mutex_lock(&sessions_mutex);
+ if (channel->channel_type == METADATA_CHANNEL) {
+ ret = -EPERM;
+ goto end;
+ }
+ if (!channel->enabled) {
+ ret = -EEXIST;
+ goto end;
+ }
+ /* Set atomically the state to "disabled" */
+ ACCESS_ONCE(channel->enabled) = 0;
+ /* Set transient enabler state to "enabled" */
+ channel->tstate = 0;
+ lttng_session_sync_enablers(channel->session);
+end:
+ mutex_unlock(&sessions_mutex);
+ return ret;
}
int lttng_event_enable(struct lttng_event *event)
{
- int old;
+ int ret = 0;
- if (event->chan->channel_type == METADATA_CHANNEL)
- return -EPERM;
- old = xchg(&event->enabled, 1);
- if (old)
- return -EEXIST;
- return 0;
+ mutex_lock(&sessions_mutex);
+ if (event->chan->channel_type == METADATA_CHANNEL) {
+ ret = -EPERM;
+ goto end;
+ }
+ if (event->enabled) {
+ ret = -EEXIST;
+ goto end;
+ }
+ ACCESS_ONCE(event->enabled) = 1;
+ lttng_session_sync_enablers(event->chan->session);
+end:
+ mutex_unlock(&sessions_mutex);
+ return ret;
}
int lttng_event_disable(struct lttng_event *event)
{
- int old;
+ int ret = 0;
- if (event->chan->channel_type == METADATA_CHANNEL)
- return -EPERM;
- old = xchg(&event->enabled, 0);
- if (!old)
- return -EEXIST;
- return 0;
+ mutex_lock(&sessions_mutex);
+ if (event->chan->channel_type == METADATA_CHANNEL) {
+ ret = -EPERM;
+ goto end;
+ }
+ if (!event->enabled) {
+ ret = -EEXIST;
+ goto end;
+ }
+ ACCESS_ONCE(event->enabled) = 0;
+ lttng_session_sync_enablers(event->chan->session);
+end:
+ mutex_unlock(&sessions_mutex);
+ return ret;
}
static struct lttng_transport *lttng_transport_find(const char *name)
switch_timer_interval, read_timer_interval);
if (!chan->chan)
goto create_error;
+ chan->tstate = 1;
chan->enabled = 1;
chan->transport = transport;
chan->channel_type = channel_type;
/*
* Supports event creation while tracing session is active.
+ * Needs to be called with sessions mutex held.
*/
-struct lttng_event *lttng_event_create(struct lttng_channel *chan,
- struct lttng_kernel_event *event_param,
- void *filter,
- const struct lttng_event_desc *internal_desc)
+struct lttng_event *_lttng_event_create(struct lttng_channel *chan,
+ struct lttng_kernel_event *event_param,
+ void *filter,
+ const struct lttng_event_desc *event_desc,
+ enum lttng_kernel_instrumentation itype)
{
+ struct lttng_session *session = chan->session;
struct lttng_event *event;
+ const char *event_name;
+ struct hlist_head *head;
+ size_t name_len;
+ uint32_t hash;
int ret;
- mutex_lock(&sessions_mutex);
if (chan->free_event_id == -1U) {
ret = -EMFILE;
goto full;
}
- /*
- * This is O(n^2) (for each event, the loop is called at event
- * creation). Might require a hash if we have lots of events.
- */
- list_for_each_entry(event, &chan->session->events, list) {
- if (!strcmp(event->desc->name, event_param->name)) {
- /*
- * Allow events with the same name to appear in
- * different channels.
- */
- if (event->chan == chan) {
- ret = -EEXIST;
- goto exist;
- }
+
+ switch (itype) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ event_name = event_desc->name;
+ break;
+ case LTTNG_KERNEL_KPROBE:
+ case LTTNG_KERNEL_KRETPROBE:
+ case LTTNG_KERNEL_FUNCTION:
+ case LTTNG_KERNEL_NOOP:
+ event_name = event_param->name;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ goto type_error;
+ }
+ name_len = strlen(event_name);
+ hash = jhash(event_name, name_len, 0);
+ head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
+ hlist_for_each_entry(event, head, hlist) {
+ WARN_ON_ONCE(!event->desc);
+ if (!strncmp(event->desc->name, event_name,
+ LTTNG_KERNEL_SYM_NAME_LEN - 1)
+ && chan == event->chan) {
+ ret = -EEXIST;
+ goto exist;
}
}
+
event = kmem_cache_zalloc(event_cache, GFP_KERNEL);
if (!event) {
ret = -ENOMEM;
event->chan = chan;
event->filter = filter;
event->id = chan->free_event_id++;
- event->enabled = 1;
- event->instrumentation = event_param->instrumentation;
- /* Populate lttng_event structure before tracepoint registration. */
- smp_wmb();
- switch (event_param->instrumentation) {
+ event->instrumentation = itype;
+ event->evtype = LTTNG_TYPE_EVENT;
+ INIT_LIST_HEAD(&event->enablers_ref_head);
+
+ switch (itype) {
case LTTNG_KERNEL_TRACEPOINT:
- event->desc = lttng_event_get(event_param->name);
+ /* Event will be enabled by enabler sync. */
+ event->enabled = 0;
+ event->registered = 0;
+ event->desc = lttng_event_get(event_name);
if (!event->desc) {
ret = -ENOENT;
goto register_error;
}
- ret = lttng_wrapper_tracepoint_probe_register(event->desc->kname,
- event->desc->probe_callback,
- event);
- if (ret) {
- ret = -EINVAL;
- goto register_error;
- }
+ /* Populate lttng_event structure before event registration. */
+ smp_wmb();
break;
case LTTNG_KERNEL_KPROBE:
- ret = lttng_kprobes_register(event_param->name,
+ event->enabled = 1;
+ event->registered = 1;
+ /*
+ * Populate lttng_event structure before event
+ * registration.
+ */
+ smp_wmb();
+ ret = lttng_kprobes_register(event_name,
event_param->u.kprobe.symbol_name,
event_param->u.kprobe.offset,
event_param->u.kprobe.addr,
struct lttng_event *event_return;
/* kretprobe defines 2 events */
+ event->enabled = 1;
+ event->registered = 1;
event_return =
kmem_cache_zalloc(event_cache, GFP_KERNEL);
if (!event_return) {
event_return->filter = filter;
event_return->id = chan->free_event_id++;
event_return->enabled = 1;
- event_return->instrumentation = event_param->instrumentation;
+ event_return->registered = 1;
+ event_return->instrumentation = itype;
/*
* Populate lttng_event structure before kretprobe registration.
*/
smp_wmb();
- ret = lttng_kretprobes_register(event_param->name,
+ ret = lttng_kretprobes_register(event_name,
event_param->u.kretprobe.symbol_name,
event_param->u.kretprobe.offset,
event_param->u.kretprobe.addr,
break;
}
case LTTNG_KERNEL_FUNCTION:
- ret = lttng_ftrace_register(event_param->name,
+ event->enabled = 1;
+ event->registered = 1;
+ /*
+ * Populate lttng_event structure before event
+ * registration.
+ */
+ smp_wmb();
+ ret = lttng_ftrace_register(event_name,
event_param->u.ftrace.symbol_name,
event);
if (ret) {
WARN_ON_ONCE(!ret);
break;
case LTTNG_KERNEL_NOOP:
- event->desc = internal_desc;
+ event->enabled = 1;
+ event->registered = 0;
+ event->desc = event_desc;
if (!event->desc) {
ret = -EINVAL;
goto register_error;
if (ret) {
goto statedump_error;
}
+ hlist_add_head(&event->hlist, head);
list_add(&event->list, &chan->session->events);
mutex_unlock(&sessions_mutex);
return event;
kmem_cache_free(event_cache, event);
cache_error:
exist:
+type_error:
full:
- mutex_unlock(&sessions_mutex);
return ERR_PTR(ret);
}
+struct lttng_event *lttng_event_create(struct lttng_channel *chan,
+ struct lttng_kernel_event *event_param,
+ void *filter,
+ const struct lttng_event_desc *event_desc,
+ enum lttng_kernel_instrumentation itype)
+{
+ struct lttng_event *event;
+
+ mutex_lock(&sessions_mutex);
+ event = _lttng_event_create(chan, event_param, filter, event_desc,
+ itype);
+ mutex_unlock(&sessions_mutex);
+ return event;
+}
+
+/* Only used for tracepoints for now. */
+static
+void register_event(struct lttng_event *event)
+{
+ const struct lttng_event_desc *desc;
+ int ret;
+
+ WARN_ON_ONCE(event->instrumentation != LTTNG_KERNEL_TRACEPOINT);
+ if (event->registered)
+ return;
+ desc = event->desc;
+ ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
+ desc->probe_callback, event);
+ if (!ret)
+ event->registered = 1;
+}
+
/*
* Only used internally at session destruction.
*/
{
int ret = -EINVAL;
+ if (!event->registered)
+ return 0;
+
switch (event->instrumentation) {
case LTTNG_KERNEL_TRACEPOINT:
ret = lttng_wrapper_tracepoint_probe_unregister(event->desc->kname,
default:
WARN_ON_ONCE(1);
}
+ if (!ret)
+ event->registered = 0;
return ret;
}
return ret;
}
+/*
+ * Enabler management.
+ */
+static
+int lttng_desc_match_wildcard_enabler(const struct lttng_event_desc *desc,
+ struct lttng_enabler *enabler)
+{
+ WARN_ON_ONCE(enabler->type != LTTNG_ENABLER_WILDCARD);
+ /* Compare excluding final '*' */
+ if (strncmp(desc->name, enabler->event_param.name,
+ strlen(enabler->event_param.name) - 1))
+ return 0;
+ return 1;
+}
+
+static
+int lttng_desc_match_name_enabler(const struct lttng_event_desc *desc,
+ struct lttng_enabler *enabler)
+{
+ WARN_ON_ONCE(enabler->type != LTTNG_ENABLER_NAME);
+ if (strcmp(desc->name, enabler->event_param.name))
+ return 0;
+ return 1;
+}
+
+static
+int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
+ struct lttng_enabler *enabler)
+{
+ switch (enabler->type) {
+ case LTTNG_ENABLER_WILDCARD:
+ return lttng_desc_match_wildcard_enabler(desc, enabler);
+ case LTTNG_ENABLER_NAME:
+ return lttng_desc_match_name_enabler(desc, enabler);
+ default:
+ return -EINVAL;
+ }
+}
+
+static
+int lttng_event_match_enabler(struct lttng_event *event,
+ struct lttng_enabler *enabler)
+{
+ if (lttng_desc_match_enabler(event->desc, enabler)
+ && event->chan == enabler->chan)
+ return 1;
+ else
+ return 0;
+}
+
+static
+struct lttng_enabler_ref *lttng_event_enabler_ref(struct lttng_event *event,
+ struct lttng_enabler *enabler)
+{
+ struct lttng_enabler_ref *enabler_ref;
+
+ list_for_each_entry(enabler_ref,
+ &event->enablers_ref_head, node) {
+ if (enabler_ref->ref == enabler)
+ return enabler_ref;
+ }
+ return NULL;
+}
+
+/*
+ * Create struct lttng_event if it is missing and present in the list of
+ * tracepoint probes.
+ * Should be called with sessions mutex held.
+ */
+static
+void lttng_create_event_if_missing(struct lttng_enabler *enabler)
+{
+ struct lttng_session *session = enabler->chan->session;
+ struct lttng_probe_desc *probe_desc;
+ const struct lttng_event_desc *desc;
+ int i;
+ struct list_head *probe_list;
+
+ probe_list = lttng_get_probe_list_head();
+ /*
+ * For each probe event, if we find that a probe event matches
+ * our enabler, create an associated lttng_event if not
+ * already present.
+ */
+ list_for_each_entry(probe_desc, probe_list, head) {
+ for (i = 0; i < probe_desc->nr_events; i++) {
+ int found = 0;
+ struct hlist_head *head;
+ const char *event_name;
+ size_t name_len;
+ uint32_t hash;
+ struct lttng_event *event;
+
+ desc = probe_desc->event_desc[i];
+ if (!lttng_desc_match_enabler(desc, enabler))
+ continue;
+ event_name = desc->name;
+ name_len = strlen(event_name);
+
+ /*
+ * Check if already created.
+ */
+ hash = jhash(event_name, name_len, 0);
+ head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
+ hlist_for_each_entry(event, head, hlist) {
+ if (event->desc == desc
+ && event->chan == enabler->chan)
+ found = 1;
+ }
+ if (found)
+ continue;
+
+ /*
+ * We need to create an event for this
+ * event probe.
+ */
+ event = _lttng_event_create(enabler->chan,
+ NULL, NULL, desc,
+ LTTNG_KERNEL_TRACEPOINT);
+ if (!event) {
+ printk(KERN_INFO "Unable to create event %s\n",
+ probe_desc->event_desc[i]->name);
+ }
+ }
+ }
+}
+
+/*
+ * Create events associated with an enabler (if not already present),
+ * and add backward reference from the event to the enabler.
+ * Should be called with sessions mutex held.
+ */
+static
+int lttng_enabler_ref_events(struct lttng_enabler *enabler)
+{
+ struct lttng_session *session = enabler->chan->session;
+ struct lttng_event *event;
+
+ /* First ensure that probe events are created for this enabler. */
+ lttng_create_event_if_missing(enabler);
+
+ /* For each event matching enabler in session event list. */
+ list_for_each_entry(event, &session->events, list) {
+ struct lttng_enabler_ref *enabler_ref;
+
+ if (!lttng_event_match_enabler(event, enabler))
+ continue;
+ enabler_ref = lttng_event_enabler_ref(event, enabler);
+ if (!enabler_ref) {
+ /*
+ * If no backward ref, create it.
+ * Add backward ref from event to enabler.
+ */
+ enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
+ if (!enabler_ref)
+ return -ENOMEM;
+ enabler_ref->ref = enabler;
+ list_add(&enabler_ref->node,
+ &event->enablers_ref_head);
+ }
+
+ /* TODO: merge event context. */
+ }
+ return 0;
+}
+
+/*
+ * Called at module load: connect the probe on all enablers matching
+ * this event.
+ * Called with sessions lock held.
+ */
+int lttng_fix_pending_events(void)
+{
+ struct lttng_session *session;
+
+ list_for_each_entry(session, &sessions, list)
+ lttng_session_lazy_sync_enablers(session);
+ return 0;
+}
+
+struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type,
+ struct lttng_kernel_event *event_param,
+ struct lttng_channel *chan)
+{
+ struct lttng_enabler *enabler;
+
+ enabler = kzalloc(sizeof(*enabler), GFP_KERNEL);
+ if (!enabler)
+ return NULL;
+ enabler->type = type;
+ memcpy(&enabler->event_param, event_param,
+ sizeof(enabler->event_param));
+ enabler->chan = chan;
+ /* ctx left NULL */
+ enabler->enabled = 1;
+ enabler->evtype = LTTNG_TYPE_ENABLER;
+ mutex_lock(&sessions_mutex);
+ list_add(&enabler->node, &enabler->chan->session->enablers_head);
+ lttng_session_lazy_sync_enablers(enabler->chan->session);
+ mutex_unlock(&sessions_mutex);
+ return enabler;
+}
+
+int lttng_enabler_enable(struct lttng_enabler *enabler)
+{
+ mutex_lock(&sessions_mutex);
+ enabler->enabled = 1;
+ lttng_session_lazy_sync_enablers(enabler->chan->session);
+ mutex_unlock(&sessions_mutex);
+ return 0;
+}
+
+int lttng_enabler_disable(struct lttng_enabler *enabler)
+{
+ mutex_lock(&sessions_mutex);
+ enabler->enabled = 0;
+ lttng_session_lazy_sync_enablers(enabler->chan->session);
+ mutex_unlock(&sessions_mutex);
+ return 0;
+}
+
+int lttng_enabler_attach_context(struct lttng_enabler *enabler,
+ struct lttng_kernel_context *context_param)
+{
+ return -ENOSYS;
+}
+
+static
+void lttng_enabler_destroy(struct lttng_enabler *enabler)
+{
+ /* Destroy contexts */
+ lttng_destroy_context(enabler->ctx);
+
+ list_del(&enabler->node);
+ kfree(enabler);
+}
+
+/*
+ * lttng_session_sync_enablers should be called just before starting a
+ * session.
+ * Should be called with sessions mutex held.
+ */
+static
+void lttng_session_sync_enablers(struct lttng_session *session)
+{
+ struct lttng_enabler *enabler;
+ struct lttng_event *event;
+
+ list_for_each_entry(enabler, &session->enablers_head, node)
+ lttng_enabler_ref_events(enabler);
+ /*
+ * For each event, if at least one of its enablers is enabled,
+ * and its channel and session transient states are enabled, we
+ * enable the event, else we disable it.
+ */
+ list_for_each_entry(event, &session->events, list) {
+ struct lttng_enabler_ref *enabler_ref;
+ int enabled = 0;
+
+ if (event->instrumentation == LTTNG_KERNEL_TRACEPOINT) {
+ /* Enable events */
+ list_for_each_entry(enabler_ref,
+ &event->enablers_ref_head, node) {
+ if (enabler_ref->ref->enabled) {
+ enabled = 1;
+ break;
+ }
+ }
+ } else {
+ /* Not handled with lazy sync. */
+ continue;
+ }
+ /*
+ * Enabled state is based on union of enablers, with
+ * intesection of session and channel transient enable
+ * states.
+ */
+ enabled = enabled && session->tstate && event->chan->tstate;
+
+ ACCESS_ONCE(event->enabled) = enabled;
+ /*
+ * Sync tracepoint registration with event enabled
+ * state.
+ */
+ if (enabled) {
+ register_event(event);
+ } else {
+ _lttng_event_unregister(event);
+ }
+ }
+}
+
+/*
+ * Apply enablers to session events, adding events to session if need
+ * be. It is required after each modification applied to an active
+ * session, and right before session "start".
+ * "lazy" sync means we only sync if required.
+ * Should be called with sessions mutex held.
+ */
+static
+void lttng_session_lazy_sync_enablers(struct lttng_session *session)
+{
+ /* We can skip if session is not active */
+ if (!session->active)
+ return;
+ lttng_session_sync_enablers(session);
+}
+
/*
* Serialize at most one packet worth of metadata into a metadata
* channel.
#include "lttng-events.h"
-static LIST_HEAD(probe_list);
-static DEFINE_MUTEX(probe_mutex);
+/*
+ * probe list is protected by sessions lock.
+ */
+static LIST_HEAD(_probe_list);
+
+/*
+ * List of probes registered by not yet processed.
+ */
+static LIST_HEAD(lazy_probe_init);
+/*
+ * lazy_nesting counter ensures we don't trigger lazy probe registration
+ * fixup while we are performing the fixup. It is protected by the
+ * sessions lock.
+ */
+static int lazy_nesting;
+
+/*
+ * Called under sessions lock.
+ */
static
-const struct lttng_event_desc *find_event(const char *name)
+int check_event_provider(struct lttng_probe_desc *desc)
{
- struct lttng_probe_desc *probe_desc;
int i;
+ size_t provider_name_len;
- list_for_each_entry(probe_desc, &probe_list, head) {
- for (i = 0; i < probe_desc->nr_events; i++) {
- if (!strcmp(probe_desc->event_desc[i]->name, name))
- return probe_desc->event_desc[i];
+ provider_name_len = strnlen(desc->provider,
+ LTTNG_KERNEL_SYM_NAME_LEN - 1);
+ for (i = 0; i < desc->nr_events; i++) {
+ if (strncmp(desc->event_desc[i]->name,
+ desc->provider,
+ provider_name_len))
+ return 0; /* provider mismatch */
+ }
+ return 1;
+}
+
+/*
+ * Called under sessions lock.
+ */
+static
+void lttng_lazy_probe_register(struct lttng_probe_desc *desc)
+{
+ struct lttng_probe_desc *iter;
+ struct list_head *probe_list;
+
+ /*
+ * Each provider enforce that every event name begins with the
+ * provider name. Check this in an assertion for extra
+ * carefulness. This ensures we cannot have duplicate event
+ * names across providers.
+ */
+ WARN_ON_ONCE(!check_event_provider(desc));
+
+ /*
+ * The provider ensures there are no duplicate event names.
+ * Duplicated TRACEPOINT_EVENT event names would generate a
+ * compile-time error due to duplicated symbol names.
+ */
+
+ /*
+ * We sort the providers by struct lttng_probe_desc pointer
+ * address.
+ */
+ probe_list = &_probe_list;
+ list_for_each_entry_reverse(iter, probe_list, head) {
+ BUG_ON(iter == desc); /* Should never be in the list twice */
+ if (iter < desc) {
+ /* We belong to the location right after iter. */
+ list_add(&desc->head, &iter->head);
+ goto desc_added;
}
}
+ /* We should be added at the head of the list */
+ list_add(&desc->head, probe_list);
+desc_added:
+ printk(KERN_DEBUG "just registered probe %s containing %u events\n",
+ desc->provider, desc->nr_events);
+}
+
+/*
+ * Called under sessions lock.
+ */
+static
+void fixup_lazy_probes(void)
+{
+ struct lttng_probe_desc *iter, *tmp;
+ int ret;
+
+ lazy_nesting++;
+ list_for_each_entry_safe(iter, tmp,
+ &lazy_probe_init, lazy_init_head) {
+ lttng_lazy_probe_register(iter);
+ iter->lazy = 0;
+ list_del(&iter->lazy_init_head);
+ }
+ ret = lttng_fix_pending_events();
+ WARN_ON_ONCE(ret);
+ lazy_nesting--;
+}
+
+/*
+ * Called under sessions lock.
+ */
+struct list_head *lttng_get_probe_list_head(void)
+{
+ if (!lazy_nesting && !list_empty(&lazy_probe_init))
+ fixup_lazy_probes();
+ return &_probe_list;
+}
+
+static
+const struct lttng_probe_desc *find_provider(const char *provider)
+{
+ struct lttng_probe_desc *iter;
+ struct list_head *probe_list;
+
+ probe_list = lttng_get_probe_list_head();
+ list_for_each_entry(iter, probe_list, head) {
+ if (!strcmp(iter->provider, provider))
+ return iter;
+ }
return NULL;
}
int lttng_probe_register(struct lttng_probe_desc *desc)
{
int ret = 0;
- int i;
- mutex_lock(&probe_mutex);
+ lttng_lock_sessions();
+
/*
- * TODO: This is O(N^2). Turn into a hash table when probe registration
- * overhead becomes an issue.
+ * Check if the provider has already been registered.
*/
- for (i = 0; i < desc->nr_events; i++) {
- if (find_event(desc->event_desc[i]->name)) {
- ret = -EEXIST;
- goto end;
- }
+ if (find_provider(desc->provider)) {
+ ret = -EEXIST;
+ goto end;
}
- list_add(&desc->head, &probe_list);
+ list_add(&desc->lazy_init_head, &lazy_probe_init);
+ desc->lazy = 1;
+ printk(KERN_DEBUG "adding probe %s containing %u events to lazy registration list\n",
+ desc->provider, desc->nr_events);
+ /*
+ * If there is at least one active session, we need to register
+ * the probe immediately, since we cannot delay event
+ * registration because they are needed ASAP.
+ */
+ if (lttng_session_active())
+ fixup_lazy_probes();
end:
- mutex_unlock(&probe_mutex);
+ lttng_unlock_sessions();
return ret;
}
EXPORT_SYMBOL_GPL(lttng_probe_register);
void lttng_probe_unregister(struct lttng_probe_desc *desc)
{
- mutex_lock(&probe_mutex);
- list_del(&desc->head);
- mutex_unlock(&probe_mutex);
+ lttng_lock_sessions();
+ if (!desc->lazy)
+ list_del(&desc->head);
+ else
+ list_del(&desc->lazy_init_head);
+ printk(KERN_DEBUG "just unregistered probe %s\n", desc->provider);
+ lttng_unlock_sessions();
}
EXPORT_SYMBOL_GPL(lttng_probe_unregister);
+/*
+ * TODO: this is O(nr_probes * nb_events), could be faster.
+ * Called with sessions lock held.
+ */
+static
+const struct lttng_event_desc *find_event(const char *name)
+{
+ struct lttng_probe_desc *probe_desc;
+ int i;
+
+ list_for_each_entry(probe_desc, &_probe_list, head) {
+ for (i = 0; i < probe_desc->nr_events; i++) {
+ if (!strcmp(probe_desc->event_desc[i]->name, name))
+ return probe_desc->event_desc[i];
+ }
+ }
+ return NULL;
+}
+
+/*
+ * Called with sessions lock held.
+ */
const struct lttng_event_desc *lttng_event_get(const char *name)
{
const struct lttng_event_desc *event;
int ret;
- mutex_lock(&probe_mutex);
event = find_event(name);
- mutex_unlock(&probe_mutex);
if (!event)
return NULL;
ret = try_module_get(event->owner);
}
EXPORT_SYMBOL_GPL(lttng_event_get);
+/*
+ * Called with sessions lock held.
+ */
void lttng_event_put(const struct lttng_event_desc *event)
{
module_put(event->owner);
struct lttng_probe_desc *probe_desc;
int iter = 0, i;
- mutex_lock(&probe_mutex);
- list_for_each_entry(probe_desc, &probe_list, head) {
+ lttng_lock_sessions();
+ list_for_each_entry(probe_desc, &_probe_list, head) {
for (i = 0; i < probe_desc->nr_events; i++) {
if (iter++ >= *pos)
return (void *) probe_desc->event_desc[i];
int iter = 0, i;
(*ppos)++;
- list_for_each_entry(probe_desc, &probe_list, head) {
+ list_for_each_entry(probe_desc, &_probe_list, head) {
for (i = 0; i < probe_desc->nr_events; i++) {
if (iter++ >= *ppos)
return (void *) probe_desc->event_desc[i];
static
void tp_list_stop(struct seq_file *m, void *p)
{
- mutex_unlock(&probe_mutex);
+ lttng_unlock_sessions();
}
static