/*
* lttng-events.c
*
- * Copyright 2010-2011 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
* Holds LTTng per-session event registry.
*
- * Dual LGPL v2.1/GPL v2 license.
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+/*
+ * This page_alloc.h wrapper needs to be included before gfpflags.h because it
+ * overrides a function with a define.
+ */
+#include "wrapper/page_alloc.h"
+
#include <linux/module.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
+#include <linux/utsname.h>
+#include <linux/err.h>
+#include <linux/vmalloc.h>
+
#include "wrapper/uuid.h"
#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
#include "wrapper/random.h"
+#include "wrapper/tracepoint.h"
+#include "lttng-kernel-version.h"
#include "lttng-events.h"
#include "lttng-tracer.h"
+#include "lttng-abi-old.h"
+#include "wrapper/vzalloc.h"
+#include "wrapper/ringbuffer/backend.h"
+#include "wrapper/ringbuffer/frontend.h"
+
+#define METADATA_CACHE_DEFAULT_SIZE 4096
static LIST_HEAD(sessions);
static LIST_HEAD(lttng_transport_list);
+/*
+ * Protect the sessions and metadata caches.
+ */
static DEFINE_MUTEX(sessions_mutex);
static struct kmem_cache *event_cache;
struct lttng_event *event);
static
int _lttng_session_metadata_statedump(struct lttng_session *session);
+static
+void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
void synchronize_trace(void)
{
synchronize_sched();
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
+#ifdef CONFIG_PREEMPT_RT_FULL
+ synchronize_rcu();
+#endif
+#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
#ifdef CONFIG_PREEMPT_RT
synchronize_rcu();
#endif
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
}
struct lttng_session *lttng_session_create(void)
{
struct lttng_session *session;
+ struct lttng_metadata_cache *metadata_cache;
mutex_lock(&sessions_mutex);
session = kzalloc(sizeof(struct lttng_session), GFP_KERNEL);
if (!session)
- return NULL;
+ goto err;
INIT_LIST_HEAD(&session->chan);
INIT_LIST_HEAD(&session->events);
uuid_le_gen(&session->uuid);
+
+ metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
+ GFP_KERNEL);
+ if (!metadata_cache)
+ goto err_free_session;
+ metadata_cache->data = lttng_vzalloc(METADATA_CACHE_DEFAULT_SIZE);
+ if (!metadata_cache->data)
+ goto err_free_cache;
+ metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
+ kref_init(&metadata_cache->refcount);
+ mutex_init(&metadata_cache->lock);
+ session->metadata_cache = metadata_cache;
+ INIT_LIST_HEAD(&metadata_cache->metadata_stream);
+ memcpy(&metadata_cache->uuid, &session->uuid,
+ sizeof(metadata_cache->uuid));
list_add(&session->list, &sessions);
mutex_unlock(&sessions_mutex);
return session;
+
+err_free_cache:
+ kfree(metadata_cache);
+err_free_session:
+ kfree(session);
+err:
+ mutex_unlock(&sessions_mutex);
+ return NULL;
+}
+
+void metadata_cache_destroy(struct kref *kref)
+{
+ struct lttng_metadata_cache *cache =
+ container_of(kref, struct lttng_metadata_cache, refcount);
+ vfree(cache->data);
+ kfree(cache);
}
void lttng_session_destroy(struct lttng_session *session)
{
struct lttng_channel *chan, *tmpchan;
struct lttng_event *event, *tmpevent;
+ struct lttng_metadata_stream *metadata_stream;
int ret;
mutex_lock(&sessions_mutex);
synchronize_trace(); /* Wait for in-flight events to complete */
list_for_each_entry_safe(event, tmpevent, &session->events, list)
_lttng_event_destroy(event);
- list_for_each_entry_safe(chan, tmpchan, &session->chan, list)
+ list_for_each_entry_safe(chan, tmpchan, &session->chan, list) {
+ BUG_ON(chan->channel_type == METADATA_CHANNEL);
_lttng_channel_destroy(chan);
+ }
+ list_for_each_entry(metadata_stream, &session->metadata_cache->metadata_stream, list)
+ _lttng_metadata_channel_hangup(metadata_stream);
+ kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
list_del(&session->list);
mutex_unlock(&sessions_mutex);
kfree(session);
chan->header_type = 2; /* large */
}
+ /* Clear each stream's quiescent state. */
+ list_for_each_entry(chan, &session->chan, list) {
+ if (chan->channel_type != METADATA_CHANNEL)
+ lib_ring_buffer_clear_quiescent_channel(chan->chan);
+ }
+
ACCESS_ONCE(session->active) = 1;
ACCESS_ONCE(session->been_active) = 1;
ret = _lttng_session_metadata_statedump(session);
+ if (ret) {
+ ACCESS_ONCE(session->active) = 0;
+ goto end;
+ }
+ ret = lttng_statedump_start(session);
if (ret)
ACCESS_ONCE(session->active) = 0;
end:
int lttng_session_disable(struct lttng_session *session)
{
int ret = 0;
+ struct lttng_channel *chan;
mutex_lock(&sessions_mutex);
if (!session->active) {
goto end;
}
ACCESS_ONCE(session->active) = 0;
+
+ /* Set each stream's quiescent state. */
+ list_for_each_entry(chan, &session->chan, list) {
+ if (chan->channel_type != METADATA_CHANNEL)
+ lib_ring_buffer_set_quiescent_channel(chan->chan);
+ }
+
end:
mutex_unlock(&sessions_mutex);
return ret;
{
int old;
- if (channel == channel->session->metadata)
+ if (channel->channel_type == METADATA_CHANNEL)
return -EPERM;
old = xchg(&channel->enabled, 1);
if (old)
{
int old;
- if (channel == channel->session->metadata)
+ if (channel->channel_type == METADATA_CHANNEL)
return -EPERM;
old = xchg(&channel->enabled, 0);
if (!old)
{
int old;
- if (event->chan == event->chan->session->metadata)
+ if (event->chan->channel_type == METADATA_CHANNEL)
return -EPERM;
old = xchg(&event->enabled, 1);
if (old)
{
int old;
- if (event->chan == event->chan->session->metadata)
+ if (event->chan->channel_type == METADATA_CHANNEL)
return -EPERM;
old = xchg(&event->enabled, 0);
if (!old)
void *buf_addr,
size_t subbuf_size, size_t num_subbuf,
unsigned int switch_timer_interval,
- unsigned int read_timer_interval)
+ unsigned int read_timer_interval,
+ enum channel_type channel_type)
{
struct lttng_channel *chan;
struct lttng_transport *transport = NULL;
mutex_lock(&sessions_mutex);
- if (session->been_active)
+ if (session->been_active && channel_type != METADATA_CHANNEL)
goto active; /* Refuse to add channel to active session */
transport = lttng_transport_find(transport_name);
if (!transport) {
goto nomem;
chan->session = session;
chan->id = session->free_chan_id++;
+ chan->ops = &transport->ops;
/*
* Note: the channel creation op already writes into the packet
* headers. Therefore the "chan" information used as input
* should be already accessible.
*/
- chan->chan = transport->ops.channel_create("[lttng]", chan, buf_addr,
- subbuf_size, num_subbuf, switch_timer_interval,
- read_timer_interval);
+ chan->chan = transport->ops.channel_create(transport_name,
+ chan, buf_addr, subbuf_size, num_subbuf,
+ switch_timer_interval, read_timer_interval);
if (!chan->chan)
goto create_error;
chan->enabled = 1;
- chan->ops = &transport->ops;
chan->transport = transport;
+ chan->channel_type = channel_type;
list_add(&chan->list, &session->chan);
mutex_unlock(&sessions_mutex);
return chan;
}
/*
- * Only used internally at session destruction.
+ * Only used internally at session destruction for per-cpu channels, and
+ * when metadata channel is released.
+ * Needs to be called with sessions mutex held.
*/
static
void _lttng_channel_destroy(struct lttng_channel *chan)
kfree(chan);
}
+void lttng_metadata_channel_destroy(struct lttng_channel *chan)
+{
+ BUG_ON(chan->channel_type != METADATA_CHANNEL);
+
+ /* Protect the metadata cache with the sessions_mutex. */
+ mutex_lock(&sessions_mutex);
+ _lttng_channel_destroy(chan);
+ mutex_unlock(&sessions_mutex);
+}
+EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
+
+static
+void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
+{
+ stream->finalized = 1;
+ wake_up_interruptible(&stream->read_wait);
+}
+
/*
* Supports event creation while tracing session is active.
*/
int ret;
mutex_lock(&sessions_mutex);
- if (chan->free_event_id == -1UL)
+ if (chan->free_event_id == -1U) {
+ ret = -EMFILE;
goto full;
+ }
/*
* This is O(n^2) (for each event, the loop is called at event
* creation). Might require a hash if we have lots of events.
*/
- list_for_each_entry(event, &chan->session->events, list)
- if (!strcmp(event->desc->name, event_param->name))
- goto exist;
+ list_for_each_entry(event, &chan->session->events, list) {
+ if (!strcmp(event->desc->name, event_param->name)) {
+ /*
+ * Allow events with the same name to appear in
+ * different channels.
+ */
+ if (event->chan == chan) {
+ ret = -EEXIST;
+ goto exist;
+ }
+ }
+ }
event = kmem_cache_zalloc(event_cache, GFP_KERNEL);
- if (!event)
+ if (!event) {
+ ret = -ENOMEM;
goto cache_error;
+ }
event->chan = chan;
event->filter = filter;
event->id = chan->free_event_id++;
switch (event_param->instrumentation) {
case LTTNG_KERNEL_TRACEPOINT:
event->desc = lttng_event_get(event_param->name);
- if (!event->desc)
+ if (!event->desc) {
+ ret = -ENOENT;
goto register_error;
- ret = tracepoint_probe_register(event_param->name,
+ }
+ ret = lttng_wrapper_tracepoint_probe_register(event->desc->kname,
event->desc->probe_callback,
event);
- if (ret)
+ if (ret) {
+ ret = -EINVAL;
goto register_error;
+ }
break;
case LTTNG_KERNEL_KPROBE:
ret = lttng_kprobes_register(event_param->name,
event_param->u.kprobe.offset,
event_param->u.kprobe.addr,
event);
- if (ret)
+ if (ret) {
+ ret = -EINVAL;
goto register_error;
+ }
ret = try_module_get(event->desc->owner);
WARN_ON_ONCE(!ret);
break;
/* kretprobe defines 2 events */
event_return =
kmem_cache_zalloc(event_cache, GFP_KERNEL);
- if (!event_return)
+ if (!event_return) {
+ ret = -ENOMEM;
goto register_error;
+ }
event_return->chan = chan;
event_return->filter = filter;
event_return->id = chan->free_event_id++;
event, event_return);
if (ret) {
kmem_cache_free(event_cache, event_return);
+ ret = -EINVAL;
goto register_error;
}
/* Take 2 refs on the module: one per event. */
WARN_ON_ONCE(!ret);
ret = _lttng_event_metadata_statedump(chan->session, chan,
event_return);
+ WARN_ON_ONCE(ret > 0);
if (ret) {
kmem_cache_free(event_cache, event_return);
module_put(event->desc->owner);
ret = lttng_ftrace_register(event_param->name,
event_param->u.ftrace.symbol_name,
event);
- if (ret)
+ if (ret) {
goto register_error;
+ }
ret = try_module_get(event->desc->owner);
WARN_ON_ONCE(!ret);
break;
case LTTNG_KERNEL_NOOP:
event->desc = internal_desc;
- if (!event->desc)
+ if (!event->desc) {
+ ret = -EINVAL;
goto register_error;
+ }
break;
default:
WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ goto register_error;
}
ret = _lttng_event_metadata_statedump(chan->session, chan, event);
- if (ret)
+ WARN_ON_ONCE(ret > 0);
+ if (ret) {
goto statedump_error;
+ }
list_add(&event->list, &chan->session->events);
mutex_unlock(&sessions_mutex);
return event;
exist:
full:
mutex_unlock(&sessions_mutex);
- return NULL;
+ return ERR_PTR(ret);
}
/*
switch (event->instrumentation) {
case LTTNG_KERNEL_TRACEPOINT:
- ret = tracepoint_probe_unregister(event->desc->name,
+ ret = lttng_wrapper_tracepoint_probe_unregister(event->desc->kname,
event->desc->probe_callback,
event);
if (ret)
}
/*
- * We have exclusive access to our metadata buffer (protected by the
- * sessions_mutex), so we can do racy operations such as looking for
- * remaining space left in packet and write, since mutual exclusion
- * protects us from concurrent writes.
+ * Serialize at most one packet worth of metadata into a metadata
+ * channel.
+ * We grab the metadata cache mutex to get exclusive access to our metadata
+ * buffer and to the metadata cache. Exclusive access to the metadata buffer
+ * allows us to do racy operations such as looking for remaining space left in
+ * packet and write, since mutual exclusion protects us from concurrent writes.
+ * Mutual exclusion on the metadata cache allow us to read the cache content
+ * without racing against reallocation of the cache by updates.
+ * Returns the number of bytes written in the channel, 0 if no data
+ * was written and a negative value on error.
+ */
+int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
+ struct channel *chan)
+{
+ struct lib_ring_buffer_ctx ctx;
+ int ret = 0;
+ size_t len, reserve_len;
+
+ /*
+ * Ensure we support mutiple get_next / put sequences followed by
+ * put_next. The metadata cache lock protects reading the metadata
+ * cache. It can indeed be read concurrently by "get_next_subbuf" and
+ * "flush" operations on the buffer invoked by different processes.
+ * Moreover, since the metadata cache memory can be reallocated, we
+ * need to have exclusive access against updates even though we only
+ * read it.
+ */
+ mutex_lock(&stream->metadata_cache->lock);
+ WARN_ON(stream->metadata_in < stream->metadata_out);
+ if (stream->metadata_in != stream->metadata_out)
+ goto end;
+
+ len = stream->metadata_cache->metadata_written -
+ stream->metadata_in;
+ if (!len)
+ goto end;
+ reserve_len = min_t(size_t,
+ stream->transport->ops.packet_avail_size(chan),
+ len);
+ lib_ring_buffer_ctx_init(&ctx, chan, NULL, reserve_len,
+ sizeof(char), -1);
+ /*
+ * If reservation failed, return an error to the caller.
+ */
+ ret = stream->transport->ops.event_reserve(&ctx, 0);
+ if (ret != 0) {
+ printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
+ goto end;
+ }
+ stream->transport->ops.event_write(&ctx,
+ stream->metadata_cache->data + stream->metadata_in,
+ reserve_len);
+ stream->transport->ops.event_commit(&ctx);
+ stream->metadata_in += reserve_len;
+ ret = reserve_len;
+
+end:
+ mutex_unlock(&stream->metadata_cache->lock);
+ return ret;
+}
+
+/*
+ * Write the metadata to the metadata cache.
+ * Must be called with sessions_mutex held.
+ * The metadata cache lock protects us from concurrent read access from
+ * thread outputting metadata content to ring buffer.
*/
int lttng_metadata_printf(struct lttng_session *session,
const char *fmt, ...)
{
- struct lib_ring_buffer_ctx ctx;
- struct lttng_channel *chan = session->metadata;
char *str;
- int ret = 0, waitret;
- size_t len, reserve_len, pos;
+ size_t len;
va_list ap;
+ struct lttng_metadata_stream *stream;
WARN_ON_ONCE(!ACCESS_ONCE(session->active));
return -ENOMEM;
len = strlen(str);
- pos = 0;
-
- for (pos = 0; pos < len; pos += reserve_len) {
- reserve_len = min_t(size_t,
- chan->ops->packet_avail_size(chan->chan),
- len - pos);
- lib_ring_buffer_ctx_init(&ctx, chan->chan, NULL, reserve_len,
- sizeof(char), -1);
- /*
- * We don't care about metadata buffer's records lost
- * count, because we always retry here. Report error if
- * we need to bail out after timeout or being
- * interrupted.
- */
- waitret = wait_event_interruptible_timeout(*chan->ops->get_writer_buf_wait_queue(chan->chan, -1),
- ({
- ret = chan->ops->event_reserve(&ctx, 0);
- ret != -ENOBUFS || !ret;
- }),
- msecs_to_jiffies(LTTNG_METADATA_TIMEOUT_MSEC));
- if (!waitret || waitret == -ERESTARTSYS || ret) {
- printk(KERN_WARNING "LTTng: Failure to write metadata to buffers (%s)\n",
- waitret == -ERESTARTSYS ? "interrupted" :
- (ret == -ENOBUFS ? "timeout" : "I/O error"));
- if (waitret == -ERESTARTSYS)
- ret = waitret;
- goto end;
+ mutex_lock(&session->metadata_cache->lock);
+ if (session->metadata_cache->metadata_written + len >
+ session->metadata_cache->cache_alloc) {
+ char *tmp_cache_realloc;
+ unsigned int tmp_cache_alloc_size;
+
+ tmp_cache_alloc_size = max_t(unsigned int,
+ session->metadata_cache->cache_alloc + len,
+ session->metadata_cache->cache_alloc << 1);
+ tmp_cache_realloc = lttng_vzalloc(tmp_cache_alloc_size);
+ if (!tmp_cache_realloc)
+ goto err;
+ if (session->metadata_cache->data) {
+ memcpy(tmp_cache_realloc,
+ session->metadata_cache->data,
+ session->metadata_cache->cache_alloc);
+ vfree(session->metadata_cache->data);
}
- chan->ops->event_write(&ctx, &str[pos], reserve_len);
- chan->ops->event_commit(&ctx);
+
+ session->metadata_cache->cache_alloc = tmp_cache_alloc_size;
+ session->metadata_cache->data = tmp_cache_realloc;
}
-end:
+ memcpy(session->metadata_cache->data +
+ session->metadata_cache->metadata_written,
+ str, len);
+ session->metadata_cache->metadata_written += len;
+ mutex_unlock(&session->metadata_cache->lock);
kfree(str);
- return ret;
+
+ list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list)
+ wake_up_interruptible(&stream->read_wait);
+
+ return 0;
+
+err:
+ mutex_unlock(&session->metadata_cache->lock);
+ kfree(str);
+ return -ENOMEM;
}
+/*
+ * Must be called with sessions_mutex held.
+ */
static
int _lttng_field_statedump(struct lttng_session *session,
const struct lttng_event_field *field)
return ret;
}
+/*
+ * Must be called with sessions_mutex held.
+ */
static
int _lttng_event_metadata_statedump(struct lttng_session *session,
struct lttng_channel *chan,
if (event->metadata_dumped || !ACCESS_ONCE(session->active))
return 0;
- if (chan == session->metadata)
+ if (chan->channel_type == METADATA_CHANNEL)
return 0;
ret = lttng_metadata_printf(session,
"event {\n"
- " name = %s;\n"
+ " name = \"%s\";\n"
" id = %u;\n"
" stream_id = %u;\n",
event->desc->name,
}
+/*
+ * Must be called with sessions_mutex held.
+ */
static
int _lttng_channel_metadata_statedump(struct lttng_session *session,
struct lttng_channel *chan)
if (chan->metadata_dumped || !ACCESS_ONCE(session->active))
return 0;
- if (chan == session->metadata)
+
+ if (chan->channel_type == METADATA_CHANNEL)
return 0;
WARN_ON_ONCE(!chan->header_type);
return ret;
}
+/*
+ * Must be called with sessions_mutex held.
+ */
static
int _lttng_stream_packet_context_declare(struct lttng_session *session)
{
"struct packet_context {\n"
" uint64_clock_monotonic_t timestamp_begin;\n"
" uint64_clock_monotonic_t timestamp_end;\n"
- " uint32_t events_discarded;\n"
- " uint32_t content_size;\n"
- " uint32_t packet_size;\n"
+ " uint64_t content_size;\n"
+ " uint64_t packet_size;\n"
+ " unsigned long events_discarded;\n"
" uint32_t cpu_id;\n"
"};\n\n"
);
* Large header:
* id: range: 0 - 65534.
* id 65535 is reserved to indicate an extended header.
+ *
+ * Must be called with sessions_mutex held.
*/
static
int _lttng_event_header_declare(struct lttng_session *session)
local_irq_restore(flags);
offset = (monotonic[0] + monotonic[1]) >> 1;
- realtime = rts.tv_sec * NSEC_PER_SEC;
+ realtime = (uint64_t) rts.tv_sec * NSEC_PER_SEC;
realtime += rts.tv_nsec;
offset = realtime - offset;
return offset;
/*
* Output metadata into this session's metadata buffers.
+ * Must be called with sessions_mutex held.
*/
static
int _lttng_session_metadata_statedump(struct lttng_session *session)
return 0;
if (session->metadata_dumped)
goto skip_session;
- if (!session->metadata) {
- printk(KERN_WARNING "LTTng: attempt to start tracing, but metadata channel is not found. Operation abort.\n");
- return -EPERM;
- }
snprintf(uuid_s, sizeof(uuid_s),
"%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
"typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
"typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
"typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
+ "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
"typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
"typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
"\n"
lttng_alignof(uint16_t) * CHAR_BIT,
lttng_alignof(uint32_t) * CHAR_BIT,
lttng_alignof(uint64_t) * CHAR_BIT,
- CTF_VERSION_MAJOR,
- CTF_VERSION_MINOR,
+ sizeof(unsigned long) * CHAR_BIT,
+ lttng_alignof(unsigned long) * CHAR_BIT,
+ CTF_SPEC_MAJOR,
+ CTF_SPEC_MINOR,
uuid_s,
#ifdef __BIG_ENDIAN
"be"
if (ret)
goto end;
+ ret = lttng_metadata_printf(session,
+ "env {\n"
+ " hostname = \"%s\";\n"
+ " domain = \"kernel\";\n"
+ " sysname = \"%s\";\n"
+ " kernel_release = \"%s\";\n"
+ " kernel_version = \"%s\";\n"
+ " tracer_name = \"lttng-modules\";\n"
+ " tracer_major = %d;\n"
+ " tracer_minor = %d;\n"
+ " tracer_patchlevel = %d;\n"
+ "};\n\n",
+ current->nsproxy->uts_ns->name.nodename,
+ utsname()->sysname,
+ utsname()->release,
+ utsname()->version,
+ LTTNG_MODULES_MAJOR_VERSION,
+ LTTNG_MODULES_MINOR_VERSION,
+ LTTNG_MODULES_PATCHLEVEL_VERSION
+ );
+ if (ret)
+ goto end;
+
ret = lttng_metadata_printf(session,
"clock {\n"
" name = %s;\n",
if (!trace_clock_uuid(clock_uuid_s)) {
ret = lttng_metadata_printf(session,
- " uuid = %s;\n",
+ " uuid = \"%s\";\n",
clock_uuid_s
);
if (ret)
{
int ret;
+ ret = wrapper_lttng_fixup_sig(THIS_MODULE);
+ if (ret)
+ return ret;
+ ret = wrapper_get_pfnblock_flags_mask_init();
+ if (ret)
+ return ret;
+ ret = lttng_tracepoint_init();
+ if (ret)
+ return ret;
event_cache = KMEM_CACHE(lttng_event, 0);
- if (!event_cache)
- return -ENOMEM;
+ if (!event_cache) {
+ ret = -ENOMEM;
+ goto error_kmem;
+ }
ret = lttng_abi_init();
if (ret)
goto error_abi;
+ ret = lttng_logger_init();
+ if (ret)
+ goto error_logger;
return 0;
+
+error_logger:
+ lttng_abi_exit();
error_abi:
kmem_cache_destroy(event_cache);
+error_kmem:
+ lttng_tracepoint_exit();
return ret;
}
{
struct lttng_session *session, *tmpsession;
+ lttng_logger_exit();
lttng_abi_exit();
list_for_each_entry_safe(session, tmpsession, &sessions, list)
lttng_session_destroy(session);
kmem_cache_destroy(event_cache);
+ lttng_tracepoint_exit();
}
module_exit(lttng_events_exit);
MODULE_LICENSE("GPL and additional rights");
MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
MODULE_DESCRIPTION("LTTng Events");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+ __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+ __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+ LTTNG_MODULES_EXTRAVERSION);