* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+/*
+ * This page_alloc.h wrapper needs to be included before gfpflags.h because it
+ * overrides a function with a define.
+ */
+#include "wrapper/page_alloc.h"
+
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/anon_inodes.h>
#include "wrapper/file.h"
#include <linux/jhash.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
#include "wrapper/uuid.h"
#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
#include "lttng-events.h"
#include "lttng-tracer.h"
#include "lttng-abi-old.h"
+#include "lttng-endian.h"
+#include "wrapper/vzalloc.h"
#define METADATA_CACHE_DEFAULT_SIZE 4096
GFP_KERNEL);
if (!metadata_cache)
goto err_free_session;
- metadata_cache->data = kzalloc(METADATA_CACHE_DEFAULT_SIZE,
- GFP_KERNEL);
+ metadata_cache->data = lttng_vzalloc(METADATA_CACHE_DEFAULT_SIZE);
if (!metadata_cache->data)
goto err_free_cache;
metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
kref_init(&metadata_cache->refcount);
+ mutex_init(&metadata_cache->lock);
session->metadata_cache = metadata_cache;
INIT_LIST_HEAD(&metadata_cache->metadata_stream);
memcpy(&metadata_cache->uuid, &session->uuid,
{
struct lttng_metadata_cache *cache =
container_of(kref, struct lttng_metadata_cache, refcount);
- kfree(cache->data);
+ vfree(cache->data);
kfree(cache);
}
ret = -EEXIST;
goto end;
}
- ACCESS_ONCE(event->enabled) = 1;
- lttng_session_sync_enablers(event->chan->session);
+ switch (event->instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ case LTTNG_KERNEL_SYSCALL:
+ ret = -EINVAL;
+ break;
+ case LTTNG_KERNEL_KPROBE:
+ case LTTNG_KERNEL_FUNCTION:
+ case LTTNG_KERNEL_NOOP:
+ ACCESS_ONCE(event->enabled) = 1;
+ break;
+ case LTTNG_KERNEL_KRETPROBE:
+ ret = lttng_kretprobes_event_enable_state(event, 1);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ }
end:
mutex_unlock(&sessions_mutex);
return ret;
ret = -EEXIST;
goto end;
}
- ACCESS_ONCE(event->enabled) = 0;
- lttng_session_sync_enablers(event->chan->session);
+ switch (event->instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ case LTTNG_KERNEL_SYSCALL:
+ ret = -EINVAL;
+ break;
+ case LTTNG_KERNEL_KPROBE:
+ case LTTNG_KERNEL_FUNCTION:
+ case LTTNG_KERNEL_NOOP:
+ ACCESS_ONCE(event->enabled) = 0;
+ break;
+ case LTTNG_KERNEL_KRETPROBE:
+ ret = lttng_kretprobes_event_enable_state(event, 0);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ }
end:
mutex_unlock(&sessions_mutex);
return ret;
name_len = strlen(event_name);
hash = jhash(event_name, name_len, 0);
head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
- hlist_for_each_entry(event, head, hlist) {
+ lttng_hlist_for_each_entry(event, head, hlist) {
WARN_ON_ONCE(!event->desc);
if (!strncmp(event->desc->name, event_name,
LTTNG_KERNEL_SYM_NAME_LEN - 1)
smp_wmb();
break;
case LTTNG_KERNEL_KPROBE:
- event->enabled = 1;
+ /*
+ * Needs to be explicitly enabled after creation, since
+ * we may want to apply filters.
+ */
+ event->enabled = 0;
event->registered = 1;
/*
* Populate lttng_event structure before event
struct lttng_event *event_return;
/* kretprobe defines 2 events */
- event->enabled = 1;
+ /*
+ * Needs to be explicitly enabled after creation, since
+ * we may want to apply filters.
+ */
+ event->enabled = 0;
event->registered = 1;
event_return =
kmem_cache_zalloc(event_cache, GFP_KERNEL);
event_return->chan = chan;
event_return->filter = filter;
event_return->id = chan->free_event_id++;
- event_return->enabled = 1;
+ event_return->enabled = 0;
event_return->registered = 1;
event_return->instrumentation = itype;
/*
break;
}
case LTTNG_KERNEL_FUNCTION:
- event->enabled = 1;
+ /*
+ * Needs to be explicitly enabled after creation, since
+ * we may want to apply filters.
+ */
+ event->enabled = 0;
event->registered = 1;
/*
* Populate lttng_event structure before event
break;
case LTTNG_KERNEL_NOOP:
case LTTNG_KERNEL_SYSCALL:
- event->enabled = 1;
+ /*
+ * Needs to be explicitly enabled after creation, since
+ * we may want to apply filters.
+ */
+ event->enabled = 0;
event->registered = 0;
event->desc = event_desc;
if (!event->desc) {
}
hlist_add_head(&event->hlist, head);
list_add(&event->list, &chan->session->events);
- mutex_unlock(&sessions_mutex);
return event;
statedump_error:
ret = PTR_ERR(tracker_pids_list_file);
goto file_error;
}
+ if (atomic_long_add_unless(&session->file->f_count,
+ 1, INT_MAX) == INT_MAX) {
+ goto refcount_error;
+ }
ret = lttng_tracker_pids_list_fops.open(NULL, tracker_pids_list_file);
if (ret < 0)
goto open_error;
m = tracker_pids_list_file->private_data;
m->private = session;
fd_install(file_fd, tracker_pids_list_file);
- atomic_long_inc(&session->file->f_count);
return file_fd;
open_error:
+ atomic_long_dec(&session->file->f_count);
+refcount_error:
fput(tracker_pids_list_file);
file_error:
put_unused_fd(file_fd);
*/
hash = jhash(event_name, name_len, 0);
head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
- hlist_for_each_entry(event, head, hlist) {
+ lttng_hlist_for_each_entry(event, head, hlist) {
if (event->desc == desc
&& event->chan == enabler->chan)
found = 1;
/*
* Serialize at most one packet worth of metadata into a metadata
* channel.
- * We have exclusive access to our metadata buffer (protected by the
- * sessions_mutex), so we can do racy operations such as looking for
- * remaining space left in packet and write, since mutual exclusion
- * protects us from concurrent writes.
+ * We grab the metadata cache mutex to get exclusive access to our metadata
+ * buffer and to the metadata cache. Exclusive access to the metadata buffer
+ * allows us to do racy operations such as looking for remaining space left in
+ * packet and write, since mutual exclusion protects us from concurrent writes.
+ * Mutual exclusion on the metadata cache allow us to read the cache content
+ * without racing against reallocation of the cache by updates.
* Returns the number of bytes written in the channel, 0 if no data
* was written and a negative value on error.
*/
size_t len, reserve_len;
/*
- * Ensure we support mutiple get_next / put sequences followed
- * by put_next. The metadata stream lock internally protects
- * reading the metadata cache. It can indeed be read
- * concurrently by "get_next_subbuf" and "flush" operations on
- * the buffer invoked by different processes.
+ * Ensure we support mutiple get_next / put sequences followed by
+ * put_next. The metadata cache lock protects reading the metadata
+ * cache. It can indeed be read concurrently by "get_next_subbuf" and
+ * "flush" operations on the buffer invoked by different processes.
+ * Moreover, since the metadata cache memory can be reallocated, we
+ * need to have exclusive access against updates even though we only
+ * read it.
*/
- mutex_lock(&stream->lock);
+ mutex_lock(&stream->metadata_cache->lock);
WARN_ON(stream->metadata_in < stream->metadata_out);
if (stream->metadata_in != stream->metadata_out)
goto end;
ret = reserve_len;
end:
- mutex_unlock(&stream->lock);
+ mutex_unlock(&stream->metadata_cache->lock);
return ret;
}
/*
* Write the metadata to the metadata cache.
* Must be called with sessions_mutex held.
+ * The metadata cache lock protects us from concurrent read access from
+ * thread outputting metadata content to ring buffer.
*/
int lttng_metadata_printf(struct lttng_session *session,
const char *fmt, ...)
return -ENOMEM;
len = strlen(str);
+ mutex_lock(&session->metadata_cache->lock);
if (session->metadata_cache->metadata_written + len >
session->metadata_cache->cache_alloc) {
char *tmp_cache_realloc;
tmp_cache_alloc_size = max_t(unsigned int,
session->metadata_cache->cache_alloc + len,
session->metadata_cache->cache_alloc << 1);
- tmp_cache_realloc = krealloc(session->metadata_cache->data,
- tmp_cache_alloc_size, GFP_KERNEL);
+ tmp_cache_realloc = lttng_vzalloc(tmp_cache_alloc_size);
if (!tmp_cache_realloc)
goto err;
+ if (session->metadata_cache->data) {
+ memcpy(tmp_cache_realloc,
+ session->metadata_cache->data,
+ session->metadata_cache->cache_alloc);
+ vfree(session->metadata_cache->data);
+ }
+
session->metadata_cache->cache_alloc = tmp_cache_alloc_size;
session->metadata_cache->data = tmp_cache_realloc;
}
session->metadata_cache->metadata_written,
str, len);
session->metadata_cache->metadata_written += len;
+ mutex_unlock(&session->metadata_cache->lock);
kfree(str);
list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list)
return 0;
err:
+ mutex_unlock(&session->metadata_cache->lock);
kfree(str);
return -ENOMEM;
}
? "UTF8"
: "ASCII",
field->type.u.basic.integer.base,
-#ifdef __BIG_ENDIAN
+#if __BYTE_ORDER == __BIG_ENDIAN
field->type.u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
#else
field->type.u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
? "UTF8"
: "ASCII",
elem_type->u.basic.integer.base,
-#ifdef __BIG_ENDIAN
+#if __BYTE_ORDER == __BIG_ENDIAN
elem_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
#else
elem_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
? "UTF8"
: "ASCII"),
length_type->u.basic.integer.base,
-#ifdef __BIG_ENDIAN
+#if __BYTE_ORDER == __BIG_ENDIAN
length_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
#else
length_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
? "UTF8"
: "ASCII"),
elem_type->u.basic.integer.base,
-#ifdef __BIG_ENDIAN
+#if __BYTE_ORDER == __BIG_ENDIAN
elem_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
#else
elem_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
* taken at start of trace.
* Yes, this is only an approximation. Yes, we can (and will) do better
* in future versions.
+ * Return 0 if offset is negative. It may happen if the system sets
+ * the REALTIME clock to 0 after boot.
*/
static
uint64_t measure_clock_offset(void)
{
- uint64_t offset, monotonic[2], realtime;
+ uint64_t monotonic_avg, monotonic[2], realtime;
+ int64_t offset;
struct timespec rts = { 0, 0 };
unsigned long flags;
monotonic[1] = trace_clock_read64();
local_irq_restore(flags);
- offset = (monotonic[0] + monotonic[1]) >> 1;
+ monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
realtime = (uint64_t) rts.tv_sec * NSEC_PER_SEC;
realtime += rts.tv_nsec;
- offset = realtime - offset;
+ offset = (int64_t) realtime - monotonic_avg;
+ if (offset < 0)
+ return 0;
return offset;
}
CTF_SPEC_MAJOR,
CTF_SPEC_MINOR,
uuid_s,
-#ifdef __BIG_ENDIAN
+#if __BYTE_ORDER == __BIG_ENDIAN
"be"
#else
"le"
ret = wrapper_lttng_fixup_sig(THIS_MODULE);
if (ret)
return ret;
-
+ ret = wrapper_get_pfnblock_flags_mask_init();
+ if (ret)
+ return ret;
+ ret = wrapper_get_pageblock_flags_mask_init();
+ if (ret)
+ return ret;
ret = lttng_context_init();
if (ret)
return ret;