* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+/*
+ * This page_alloc.h wrapper needs to be included before gfpflags.h because it
+ * overrides a function with a define.
+ */
+#include "wrapper/page_alloc.h"
+
#include <linux/module.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/utsname.h>
+#include <linux/err.h>
+#include <linux/vmalloc.h>
+
#include "wrapper/uuid.h"
#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
#include "wrapper/random.h"
#include "wrapper/tracepoint.h"
+#include "lttng-kernel-version.h"
#include "lttng-events.h"
#include "lttng-tracer.h"
#include "lttng-abi-old.h"
+#include "wrapper/vzalloc.h"
+#include "wrapper/ringbuffer/backend.h"
+#include "wrapper/ringbuffer/frontend.h"
#define METADATA_CACHE_DEFAULT_SIZE 4096
void synchronize_trace(void)
{
synchronize_sched();
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
+#ifdef CONFIG_PREEMPT_RT_FULL
+ synchronize_rcu();
+#endif
+#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
#ifdef CONFIG_PREEMPT_RT
synchronize_rcu();
#endif
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
}
struct lttng_session *lttng_session_create(void)
GFP_KERNEL);
if (!metadata_cache)
goto err_free_session;
- metadata_cache->data = kzalloc(METADATA_CACHE_DEFAULT_SIZE,
- GFP_KERNEL);
+ metadata_cache->data = lttng_vzalloc(METADATA_CACHE_DEFAULT_SIZE);
if (!metadata_cache->data)
goto err_free_cache;
metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
kref_init(&metadata_cache->refcount);
+ mutex_init(&metadata_cache->lock);
session->metadata_cache = metadata_cache;
INIT_LIST_HEAD(&metadata_cache->metadata_stream);
+ memcpy(&metadata_cache->uuid, &session->uuid,
+ sizeof(metadata_cache->uuid));
list_add(&session->list, &sessions);
mutex_unlock(&sessions_mutex);
return session;
{
struct lttng_metadata_cache *cache =
container_of(kref, struct lttng_metadata_cache, refcount);
- kfree(cache->data);
+ vfree(cache->data);
kfree(cache);
}
chan->header_type = 2; /* large */
}
+ /* Clear each stream's quiescent state. */
+ list_for_each_entry(chan, &session->chan, list) {
+ if (chan->channel_type != METADATA_CHANNEL)
+ lib_ring_buffer_clear_quiescent_channel(chan->chan);
+ }
+
ACCESS_ONCE(session->active) = 1;
ACCESS_ONCE(session->been_active) = 1;
ret = _lttng_session_metadata_statedump(session);
int lttng_session_disable(struct lttng_session *session)
{
int ret = 0;
+ struct lttng_channel *chan;
mutex_lock(&sessions_mutex);
if (!session->active) {
goto end;
}
ACCESS_ONCE(session->active) = 0;
+
+ /* Set each stream's quiescent state. */
+ list_for_each_entry(chan, &session->chan, list) {
+ if (chan->channel_type != METADATA_CHANNEL)
+ lib_ring_buffer_set_quiescent_channel(chan->chan);
+ }
+
end:
mutex_unlock(&sessions_mutex);
return ret;
int ret;
mutex_lock(&sessions_mutex);
- if (chan->free_event_id == -1U)
+ if (chan->free_event_id == -1U) {
+ ret = -EMFILE;
goto full;
+ }
/*
* This is O(n^2) (for each event, the loop is called at event
* creation). Might require a hash if we have lots of events.
*/
- list_for_each_entry(event, &chan->session->events, list)
- if (!strcmp(event->desc->name, event_param->name))
- goto exist;
+ list_for_each_entry(event, &chan->session->events, list) {
+ if (!strcmp(event->desc->name, event_param->name)) {
+ /*
+ * Allow events with the same name to appear in
+ * different channels.
+ */
+ if (event->chan == chan) {
+ ret = -EEXIST;
+ goto exist;
+ }
+ }
+ }
event = kmem_cache_zalloc(event_cache, GFP_KERNEL);
- if (!event)
+ if (!event) {
+ ret = -ENOMEM;
goto cache_error;
+ }
event->chan = chan;
event->filter = filter;
event->id = chan->free_event_id++;
switch (event_param->instrumentation) {
case LTTNG_KERNEL_TRACEPOINT:
event->desc = lttng_event_get(event_param->name);
- if (!event->desc)
+ if (!event->desc) {
+ ret = -ENOENT;
goto register_error;
- ret = kabi_2635_tracepoint_probe_register(event_param->name,
+ }
+ ret = lttng_wrapper_tracepoint_probe_register(event->desc->kname,
event->desc->probe_callback,
event);
- if (ret)
+ if (ret) {
+ ret = -EINVAL;
goto register_error;
+ }
break;
case LTTNG_KERNEL_KPROBE:
ret = lttng_kprobes_register(event_param->name,
event_param->u.kprobe.offset,
event_param->u.kprobe.addr,
event);
- if (ret)
+ if (ret) {
+ ret = -EINVAL;
goto register_error;
+ }
ret = try_module_get(event->desc->owner);
WARN_ON_ONCE(!ret);
break;
/* kretprobe defines 2 events */
event_return =
kmem_cache_zalloc(event_cache, GFP_KERNEL);
- if (!event_return)
+ if (!event_return) {
+ ret = -ENOMEM;
goto register_error;
+ }
event_return->chan = chan;
event_return->filter = filter;
event_return->id = chan->free_event_id++;
event, event_return);
if (ret) {
kmem_cache_free(event_cache, event_return);
+ ret = -EINVAL;
goto register_error;
}
/* Take 2 refs on the module: one per event. */
WARN_ON_ONCE(!ret);
ret = _lttng_event_metadata_statedump(chan->session, chan,
event_return);
+ WARN_ON_ONCE(ret > 0);
if (ret) {
kmem_cache_free(event_cache, event_return);
module_put(event->desc->owner);
ret = lttng_ftrace_register(event_param->name,
event_param->u.ftrace.symbol_name,
event);
- if (ret)
+ if (ret) {
goto register_error;
+ }
ret = try_module_get(event->desc->owner);
WARN_ON_ONCE(!ret);
break;
case LTTNG_KERNEL_NOOP:
event->desc = internal_desc;
- if (!event->desc)
+ if (!event->desc) {
+ ret = -EINVAL;
goto register_error;
+ }
break;
default:
WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ goto register_error;
}
ret = _lttng_event_metadata_statedump(chan->session, chan, event);
- if (ret)
+ WARN_ON_ONCE(ret > 0);
+ if (ret) {
goto statedump_error;
+ }
list_add(&event->list, &chan->session->events);
mutex_unlock(&sessions_mutex);
return event;
exist:
full:
mutex_unlock(&sessions_mutex);
- return NULL;
+ return ERR_PTR(ret);
}
/*
switch (event->instrumentation) {
case LTTNG_KERNEL_TRACEPOINT:
- ret = kabi_2635_tracepoint_probe_unregister(event->desc->name,
+ ret = lttng_wrapper_tracepoint_probe_unregister(event->desc->kname,
event->desc->probe_callback,
event);
if (ret)
/*
* Serialize at most one packet worth of metadata into a metadata
* channel.
- * We have exclusive access to our metadata buffer (protected by the
- * sessions_mutex), so we can do racy operations such as looking for
- * remaining space left in packet and write, since mutual exclusion
- * protects us from concurrent writes.
+ * We grab the metadata cache mutex to get exclusive access to our metadata
+ * buffer and to the metadata cache. Exclusive access to the metadata buffer
+ * allows us to do racy operations such as looking for remaining space left in
+ * packet and write, since mutual exclusion protects us from concurrent writes.
+ * Mutual exclusion on the metadata cache allow us to read the cache content
+ * without racing against reallocation of the cache by updates.
+ * Returns the number of bytes written in the channel, 0 if no data
+ * was written and a negative value on error.
*/
int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
struct channel *chan)
size_t len, reserve_len;
/*
- * Ensure we support mutiple get_next / put sequences followed
- * by put_next.
+ * Ensure we support mutiple get_next / put sequences followed by
+ * put_next. The metadata cache lock protects reading the metadata
+ * cache. It can indeed be read concurrently by "get_next_subbuf" and
+ * "flush" operations on the buffer invoked by different processes.
+ * Moreover, since the metadata cache memory can be reallocated, we
+ * need to have exclusive access against updates even though we only
+ * read it.
*/
+ mutex_lock(&stream->metadata_cache->lock);
WARN_ON(stream->metadata_in < stream->metadata_out);
if (stream->metadata_in != stream->metadata_out)
- return 0;
+ goto end;
len = stream->metadata_cache->metadata_written -
stream->metadata_in;
if (!len)
- return 0;
+ goto end;
reserve_len = min_t(size_t,
stream->transport->ops.packet_avail_size(chan),
len);
ret = reserve_len;
end:
+ mutex_unlock(&stream->metadata_cache->lock);
return ret;
}
/*
* Write the metadata to the metadata cache.
* Must be called with sessions_mutex held.
+ * The metadata cache lock protects us from concurrent read access from
+ * thread outputting metadata content to ring buffer.
*/
int lttng_metadata_printf(struct lttng_session *session,
const char *fmt, ...)
return -ENOMEM;
len = strlen(str);
+ mutex_lock(&session->metadata_cache->lock);
if (session->metadata_cache->metadata_written + len >
session->metadata_cache->cache_alloc) {
char *tmp_cache_realloc;
tmp_cache_alloc_size = max_t(unsigned int,
session->metadata_cache->cache_alloc + len,
session->metadata_cache->cache_alloc << 1);
- tmp_cache_realloc = krealloc(session->metadata_cache->data,
- tmp_cache_alloc_size, GFP_KERNEL);
+ tmp_cache_realloc = lttng_vzalloc(tmp_cache_alloc_size);
if (!tmp_cache_realloc)
goto err;
+ if (session->metadata_cache->data) {
+ memcpy(tmp_cache_realloc,
+ session->metadata_cache->data,
+ session->metadata_cache->cache_alloc);
+ vfree(session->metadata_cache->data);
+ }
+
session->metadata_cache->cache_alloc = tmp_cache_alloc_size;
session->metadata_cache->data = tmp_cache_realloc;
}
session->metadata_cache->metadata_written,
str, len);
session->metadata_cache->metadata_written += len;
+ mutex_unlock(&session->metadata_cache->lock);
kfree(str);
list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list)
return 0;
err:
+ mutex_unlock(&session->metadata_cache->lock);
kfree(str);
return -ENOMEM;
}
ret = lttng_metadata_printf(session,
"event {\n"
- " name = %s;\n"
+ " name = \"%s\";\n"
" id = %u;\n"
" stream_id = %u;\n",
event->desc->name,
{
int ret;
+ ret = wrapper_lttng_fixup_sig(THIS_MODULE);
+ if (ret)
+ return ret;
+ ret = wrapper_get_pfnblock_flags_mask_init();
+ if (ret)
+ return ret;
+ ret = lttng_tracepoint_init();
+ if (ret)
+ return ret;
event_cache = KMEM_CACHE(lttng_event, 0);
- if (!event_cache)
- return -ENOMEM;
+ if (!event_cache) {
+ ret = -ENOMEM;
+ goto error_kmem;
+ }
ret = lttng_abi_init();
if (ret)
goto error_abi;
+ ret = lttng_logger_init();
+ if (ret)
+ goto error_logger;
return 0;
+
+error_logger:
+ lttng_abi_exit();
error_abi:
kmem_cache_destroy(event_cache);
+error_kmem:
+ lttng_tracepoint_exit();
return ret;
}
{
struct lttng_session *session, *tmpsession;
+ lttng_logger_exit();
lttng_abi_exit();
list_for_each_entry_safe(session, tmpsession, &sessions, list)
lttng_session_destroy(session);
kmem_cache_destroy(event_cache);
+ lttng_tracepoint_exit();
}
module_exit(lttng_events_exit);
MODULE_LICENSE("GPL and additional rights");
MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
MODULE_DESCRIPTION("LTTng Events");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+ __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+ __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+ LTTNG_MODULES_EXTRAVERSION);