* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+/*
+ * This page_alloc.h wrapper needs to be included before gfpflags.h because it
+ * overrides a function with a define.
+ */
+#include "wrapper/page_alloc.h"
+
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/anon_inodes.h>
#include "wrapper/file.h"
#include <linux/jhash.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
#include "wrapper/uuid.h"
#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
#include "lttng-events.h"
#include "lttng-tracer.h"
#include "lttng-abi-old.h"
+#include "wrapper/vzalloc.h"
#define METADATA_CACHE_DEFAULT_SIZE 4096
GFP_KERNEL);
if (!metadata_cache)
goto err_free_session;
- metadata_cache->data = kzalloc(METADATA_CACHE_DEFAULT_SIZE,
- GFP_KERNEL);
+ metadata_cache->data = lttng_vzalloc(METADATA_CACHE_DEFAULT_SIZE);
if (!metadata_cache->data)
goto err_free_cache;
metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
kref_init(&metadata_cache->refcount);
+ mutex_init(&metadata_cache->lock);
session->metadata_cache = metadata_cache;
INIT_LIST_HEAD(&metadata_cache->metadata_stream);
memcpy(&metadata_cache->uuid, &session->uuid,
{
struct lttng_metadata_cache *cache =
container_of(kref, struct lttng_metadata_cache, refcount);
- kfree(cache->data);
+ vfree(cache->data);
kfree(cache);
}
ret = -EEXIST;
goto end;
}
- ACCESS_ONCE(event->enabled) = 1;
- lttng_session_sync_enablers(event->chan->session);
+ switch (event->instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ case LTTNG_KERNEL_SYSCALL:
+ ret = -EINVAL;
+ break;
+ case LTTNG_KERNEL_KPROBE:
+ case LTTNG_KERNEL_FUNCTION:
+ case LTTNG_KERNEL_NOOP:
+ ACCESS_ONCE(event->enabled) = 1;
+ break;
+ case LTTNG_KERNEL_KRETPROBE:
+ ret = lttng_kretprobes_event_enable_state(event, 1);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ }
end:
mutex_unlock(&sessions_mutex);
return ret;
ret = -EEXIST;
goto end;
}
- ACCESS_ONCE(event->enabled) = 0;
- lttng_session_sync_enablers(event->chan->session);
+ switch (event->instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ case LTTNG_KERNEL_SYSCALL:
+ ret = -EINVAL;
+ break;
+ case LTTNG_KERNEL_KPROBE:
+ case LTTNG_KERNEL_FUNCTION:
+ case LTTNG_KERNEL_NOOP:
+ ACCESS_ONCE(event->enabled) = 0;
+ break;
+ case LTTNG_KERNEL_KRETPROBE:
+ ret = lttng_kretprobes_event_enable_state(event, 0);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ }
end:
mutex_unlock(&sessions_mutex);
return ret;
name_len = strlen(event_name);
hash = jhash(event_name, name_len, 0);
head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
- hlist_for_each_entry(event, head, hlist) {
+ lttng_hlist_for_each_entry(event, head, hlist) {
WARN_ON_ONCE(!event->desc);
if (!strncmp(event->desc->name, event_name,
LTTNG_KERNEL_SYM_NAME_LEN - 1)
event->id = chan->free_event_id++;
event->instrumentation = itype;
event->evtype = LTTNG_TYPE_EVENT;
+ INIT_LIST_HEAD(&event->bytecode_runtime_head);
INIT_LIST_HEAD(&event->enablers_ref_head);
switch (itype) {
smp_wmb();
break;
case LTTNG_KERNEL_KPROBE:
- event->enabled = 1;
+ /*
+ * Needs to be explicitly enabled after creation, since
+ * we may want to apply filters.
+ */
+ event->enabled = 0;
event->registered = 1;
/*
* Populate lttng_event structure before event
struct lttng_event *event_return;
/* kretprobe defines 2 events */
- event->enabled = 1;
+ /*
+ * Needs to be explicitly enabled after creation, since
+ * we may want to apply filters.
+ */
+ event->enabled = 0;
event->registered = 1;
event_return =
kmem_cache_zalloc(event_cache, GFP_KERNEL);
event_return->chan = chan;
event_return->filter = filter;
event_return->id = chan->free_event_id++;
- event_return->enabled = 1;
+ event_return->enabled = 0;
event_return->registered = 1;
event_return->instrumentation = itype;
/*
break;
}
case LTTNG_KERNEL_FUNCTION:
- event->enabled = 1;
+ /*
+ * Needs to be explicitly enabled after creation, since
+ * we may want to apply filters.
+ */
+ event->enabled = 0;
event->registered = 1;
/*
* Populate lttng_event structure before event
break;
case LTTNG_KERNEL_NOOP:
case LTTNG_KERNEL_SYSCALL:
- event->enabled = 1;
+ /*
+ * Needs to be explicitly enabled after creation, since
+ * we may want to apply filters.
+ */
+ event->enabled = 0;
event->registered = 0;
event->desc = event_desc;
if (!event->desc) {
}
hlist_add_head(&event->hlist, head);
list_add(&event->list, &chan->session->events);
- mutex_unlock(&sessions_mutex);
return event;
statedump_error:
void register_event(struct lttng_event *event)
{
const struct lttng_event_desc *desc;
- int ret;
+ int ret = -EINVAL;
- WARN_ON_ONCE(event->instrumentation != LTTNG_KERNEL_TRACEPOINT);
if (event->registered)
return;
+
desc = event->desc;
- ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
- desc->probe_callback, event);
+ switch (event->instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
+ desc->probe_callback,
+ event);
+ break;
+ case LTTNG_KERNEL_SYSCALL:
+ ret = lttng_syscall_filter_enable(event->chan,
+ desc->name);
+ break;
+ case LTTNG_KERNEL_KPROBE:
+ case LTTNG_KERNEL_KRETPROBE:
+ case LTTNG_KERNEL_FUNCTION:
+ case LTTNG_KERNEL_NOOP:
+ ret = 0;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
if (!ret)
event->registered = 1;
}
*/
int _lttng_event_unregister(struct lttng_event *event)
{
+ const struct lttng_event_desc *desc;
int ret = -EINVAL;
if (!event->registered)
return 0;
+ desc = event->desc;
switch (event->instrumentation) {
case LTTNG_KERNEL_TRACEPOINT:
ret = lttng_wrapper_tracepoint_probe_unregister(event->desc->kname,
event->desc->probe_callback,
event);
- if (ret)
- return ret;
break;
case LTTNG_KERNEL_KPROBE:
lttng_kprobes_unregister(event);
lttng_ftrace_unregister(event);
ret = 0;
break;
- case LTTNG_KERNEL_NOOP:
case LTTNG_KERNEL_SYSCALL:
+ ret = lttng_syscall_filter_disable(event->chan,
+ desc->name);
+ break;
+ case LTTNG_KERNEL_NOOP:
ret = 0;
break;
default:
ret = PTR_ERR(tracker_pids_list_file);
goto file_error;
}
+ if (atomic_long_add_unless(&session->file->f_count,
+ 1, INT_MAX) == INT_MAX) {
+ goto refcount_error;
+ }
ret = lttng_tracker_pids_list_fops.open(NULL, tracker_pids_list_file);
if (ret < 0)
goto open_error;
m = tracker_pids_list_file->private_data;
m->private = session;
fd_install(file_fd, tracker_pids_list_file);
- atomic_long_inc(&session->file->f_count);
return file_fd;
open_error:
+ atomic_long_dec(&session->file->f_count);
+refcount_error:
fput(tracker_pids_list_file);
file_error:
put_unused_fd(file_fd);
* Enabler management.
*/
static
-int lttng_desc_match_wildcard_enabler(const struct lttng_event_desc *desc,
- struct lttng_enabler *enabler)
+int lttng_match_enabler_wildcard(const char *desc_name,
+ const char *name)
{
- WARN_ON_ONCE(enabler->type != LTTNG_ENABLER_WILDCARD);
/* Compare excluding final '*' */
- if (strncmp(desc->name, enabler->event_param.name,
- strlen(enabler->event_param.name) - 1))
+ if (strncmp(desc_name, name, strlen(name) - 1))
return 0;
return 1;
}
static
-int lttng_desc_match_name_enabler(const struct lttng_event_desc *desc,
- struct lttng_enabler *enabler)
+int lttng_match_enabler_name(const char *desc_name,
+ const char *name)
{
- WARN_ON_ONCE(enabler->type != LTTNG_ENABLER_NAME);
- if (strcmp(desc->name, enabler->event_param.name))
+ if (strcmp(desc_name, name))
return 0;
return 1;
}
int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
struct lttng_enabler *enabler)
{
+ const char *desc_name, *enabler_name;
+
+ enabler_name = enabler->event_param.name;
+ switch (enabler->event_param.instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ desc_name = desc->name;
+ break;
+ case LTTNG_KERNEL_SYSCALL:
+ desc_name = desc->name;
+ if (!strncmp(desc_name, "compat_", strlen("compat_")))
+ desc_name += strlen("compat_");
+ if (!strncmp(desc_name, "syscall_exit_",
+ strlen("syscall_exit_"))) {
+ desc_name += strlen("syscall_exit_");
+ } else if (!strncmp(desc_name, "syscall_entry_",
+ strlen("syscall_entry_"))) {
+ desc_name += strlen("syscall_entry_");
+ } else {
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
switch (enabler->type) {
case LTTNG_ENABLER_WILDCARD:
- return lttng_desc_match_wildcard_enabler(desc, enabler);
+ return lttng_match_enabler_wildcard(desc_name, enabler_name);
case LTTNG_ENABLER_NAME:
- return lttng_desc_match_name_enabler(desc, enabler);
+ return lttng_match_enabler_name(desc_name, enabler_name);
default:
return -EINVAL;
}
*/
hash = jhash(event_name, name_len, 0);
head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
- hlist_for_each_entry(event, head, hlist) {
+ lttng_hlist_for_each_entry(event, head, hlist) {
if (event->desc == desc
&& event->chan == enabler->chan)
found = 1;
&event->enablers_ref_head);
}
+ /*
+ * Link filter bytecodes if not linked yet.
+ */
+ lttng_enabler_event_link_bytecode(event, enabler);
+
/* TODO: merge event context. */
}
return 0;
if (!enabler)
return NULL;
enabler->type = type;
+ INIT_LIST_HEAD(&enabler->filter_bytecode_head);
memcpy(&enabler->event_param, event_param,
sizeof(enabler->event_param));
enabler->chan = chan;
/* ctx left NULL */
- enabler->enabled = 1;
+ enabler->enabled = 0;
enabler->evtype = LTTNG_TYPE_ENABLER;
mutex_lock(&sessions_mutex);
list_add(&enabler->node, &enabler->chan->session->enablers_head);
return 0;
}
+int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
+ struct lttng_kernel_filter_bytecode __user *bytecode)
+{
+ struct lttng_filter_bytecode_node *bytecode_node;
+ uint32_t bytecode_len;
+ int ret;
+
+ ret = get_user(bytecode_len, &bytecode->len);
+ if (ret)
+ return ret;
+ bytecode_node = kzalloc(sizeof(*bytecode_node) + bytecode_len,
+ GFP_KERNEL);
+ if (!bytecode_node)
+ return -ENOMEM;
+ ret = copy_from_user(&bytecode_node->bc, bytecode,
+ sizeof(*bytecode) + bytecode_len);
+ if (ret)
+ goto error_free;
+ bytecode_node->enabler = enabler;
+ /* Enforce length based on allocated size */
+ bytecode_node->bc.len = bytecode_len;
+ list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
+ lttng_session_lazy_sync_enablers(enabler->chan->session);
+ return 0;
+
+error_free:
+ kfree(bytecode_node);
+ return ret;
+}
+
int lttng_enabler_attach_context(struct lttng_enabler *enabler,
struct lttng_kernel_context *context_param)
{
static
void lttng_enabler_destroy(struct lttng_enabler *enabler)
{
+ struct lttng_filter_bytecode_node *filter_node, *tmp_filter_node;
+
+ /* Destroy filter bytecode */
+ list_for_each_entry_safe(filter_node, tmp_filter_node,
+ &enabler->filter_bytecode_head, node) {
+ kfree(filter_node);
+ }
+
/* Destroy contexts */
lttng_destroy_context(enabler->ctx);
*/
list_for_each_entry(event, &session->events, list) {
struct lttng_enabler_ref *enabler_ref;
- int enabled = 0;
+ struct lttng_bytecode_runtime *runtime;
+ int enabled = 0, has_enablers_without_bytecode = 0;
- if (event->instrumentation == LTTNG_KERNEL_TRACEPOINT) {
+ switch (event->instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ case LTTNG_KERNEL_SYSCALL:
/* Enable events */
list_for_each_entry(enabler_ref,
&event->enablers_ref_head, node) {
break;
}
}
- } else {
+ break;
+ default:
/* Not handled with lazy sync. */
continue;
}
} else {
_lttng_event_unregister(event);
}
+
+ /* Check if has enablers without bytecode enabled */
+ list_for_each_entry(enabler_ref,
+ &event->enablers_ref_head, node) {
+ if (enabler_ref->ref->enabled
+ && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
+ has_enablers_without_bytecode = 1;
+ break;
+ }
+ }
+ event->has_enablers_without_bytecode =
+ has_enablers_without_bytecode;
+
+ /* Enable filters */
+ list_for_each_entry(runtime,
+ &event->bytecode_runtime_head, node)
+ lttng_filter_sync_state(runtime);
}
}
/*
* Serialize at most one packet worth of metadata into a metadata
* channel.
- * We have exclusive access to our metadata buffer (protected by the
- * sessions_mutex), so we can do racy operations such as looking for
- * remaining space left in packet and write, since mutual exclusion
- * protects us from concurrent writes.
+ * We grab the metadata cache mutex to get exclusive access to our metadata
+ * buffer and to the metadata cache. Exclusive access to the metadata buffer
+ * allows us to do racy operations such as looking for remaining space left in
+ * packet and write, since mutual exclusion protects us from concurrent writes.
+ * Mutual exclusion on the metadata cache allow us to read the cache content
+ * without racing against reallocation of the cache by updates.
* Returns the number of bytes written in the channel, 0 if no data
* was written and a negative value on error.
*/
size_t len, reserve_len;
/*
- * Ensure we support mutiple get_next / put sequences followed
- * by put_next. The metadata stream lock internally protects
- * reading the metadata cache. It can indeed be read
- * concurrently by "get_next_subbuf" and "flush" operations on
- * the buffer invoked by different processes.
+ * Ensure we support mutiple get_next / put sequences followed by
+ * put_next. The metadata cache lock protects reading the metadata
+ * cache. It can indeed be read concurrently by "get_next_subbuf" and
+ * "flush" operations on the buffer invoked by different processes.
+ * Moreover, since the metadata cache memory can be reallocated, we
+ * need to have exclusive access against updates even though we only
+ * read it.
*/
- mutex_lock(&stream->lock);
+ mutex_lock(&stream->metadata_cache->lock);
WARN_ON(stream->metadata_in < stream->metadata_out);
if (stream->metadata_in != stream->metadata_out)
goto end;
ret = reserve_len;
end:
- mutex_unlock(&stream->lock);
+ mutex_unlock(&stream->metadata_cache->lock);
return ret;
}
/*
* Write the metadata to the metadata cache.
* Must be called with sessions_mutex held.
+ * The metadata cache lock protects us from concurrent read access from
+ * thread outputting metadata content to ring buffer.
*/
int lttng_metadata_printf(struct lttng_session *session,
const char *fmt, ...)
return -ENOMEM;
len = strlen(str);
+ mutex_lock(&session->metadata_cache->lock);
if (session->metadata_cache->metadata_written + len >
session->metadata_cache->cache_alloc) {
char *tmp_cache_realloc;
tmp_cache_alloc_size = max_t(unsigned int,
session->metadata_cache->cache_alloc + len,
session->metadata_cache->cache_alloc << 1);
- tmp_cache_realloc = krealloc(session->metadata_cache->data,
- tmp_cache_alloc_size, GFP_KERNEL);
+ tmp_cache_realloc = lttng_vzalloc(tmp_cache_alloc_size);
if (!tmp_cache_realloc)
goto err;
+ if (session->metadata_cache->data) {
+ memcpy(tmp_cache_realloc,
+ session->metadata_cache->data,
+ session->metadata_cache->cache_alloc);
+ vfree(session->metadata_cache->data);
+ }
+
session->metadata_cache->cache_alloc = tmp_cache_alloc_size;
session->metadata_cache->data = tmp_cache_realloc;
}
session->metadata_cache->metadata_written,
str, len);
session->metadata_cache->metadata_written += len;
+ mutex_unlock(&session->metadata_cache->lock);
kfree(str);
list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list)
return 0;
err:
+ mutex_unlock(&session->metadata_cache->lock);
kfree(str);
return -ENOMEM;
}
ret = wrapper_lttng_fixup_sig(THIS_MODULE);
if (ret)
return ret;
-
- ret = lttng_tracepoint_init();
+ ret = wrapper_get_pfnblock_flags_mask_init();
if (ret)
return ret;
+ ret = lttng_context_init();
+ if (ret)
+ return ret;
+ ret = lttng_tracepoint_init();
+ if (ret)
+ goto error_tp;
event_cache = KMEM_CACHE(lttng_event, 0);
if (!event_cache) {
ret = -ENOMEM;
kmem_cache_destroy(event_cache);
error_kmem:
lttng_tracepoint_exit();
+error_tp:
+ lttng_context_exit();
return ret;
}
lttng_session_destroy(session);
kmem_cache_destroy(event_cache);
lttng_tracepoint_exit();
+ lttng_context_exit();
}
module_exit(lttng_events_exit);