X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=lttng-events.c;h=1f2863945a74f977fcc38b2635020064b80f84aa;hb=241ae9a8fb62c3ce467d244e280062c24e73eb7a;hp=ea22a35c6a317c5a81500f3752a250fdd197ee73;hpb=f127e61ee231d002fb9a7803643a157e06f6d2e2;p=lttng-modules.git diff --git a/lttng-events.c b/lttng-events.c index ea22a35c..1f286394 100644 --- a/lttng-events.c +++ b/lttng-events.c @@ -20,6 +20,12 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +/* + * This page_alloc.h wrapper needs to be included before gfpflags.h because it + * overrides a function with a define. + */ +#include "wrapper/page_alloc.h" + #include #include #include @@ -30,18 +36,21 @@ #include #include #include -#include "wrapper/file.h" +#include #include - -#include "wrapper/uuid.h" -#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */ -#include "wrapper/random.h" -#include "wrapper/tracepoint.h" -#include "wrapper/list.h" -#include "lttng-kernel-version.h" -#include "lttng-events.h" -#include "lttng-tracer.h" -#include "lttng-abi-old.h" +#include +#include + +#include +#include /* for wrapper_vmalloc_sync_all() */ +#include +#include +#include +#include +#include +#include +#include +#include #define METADATA_CACHE_DEFAULT_SIZE 4096 @@ -125,12 +134,12 @@ struct lttng_session *lttng_session_create(void) GFP_KERNEL); if (!metadata_cache) goto err_free_session; - metadata_cache->data = kzalloc(METADATA_CACHE_DEFAULT_SIZE, - GFP_KERNEL); + metadata_cache->data = lttng_vzalloc(METADATA_CACHE_DEFAULT_SIZE); if (!metadata_cache->data) goto err_free_cache; metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE; kref_init(&metadata_cache->refcount); + mutex_init(&metadata_cache->lock); session->metadata_cache = metadata_cache; INIT_LIST_HEAD(&metadata_cache->metadata_stream); memcpy(&metadata_cache->uuid, &session->uuid, @@ -155,7 +164,7 @@ void metadata_cache_destroy(struct kref *kref) { struct lttng_metadata_cache *cache = container_of(kref, struct lttng_metadata_cache, refcount); - kfree(cache->data); + vfree(cache->data); kfree(cache); } @@ -261,6 +270,48 @@ end: return ret; } +int lttng_session_metadata_regenerate(struct lttng_session *session) +{ + int ret = 0; + struct lttng_channel *chan; + struct lttng_event *event; + struct lttng_metadata_cache *cache = session->metadata_cache; + struct lttng_metadata_stream *stream; + + mutex_lock(&sessions_mutex); + if (!session->active) { + ret = -EBUSY; + goto end; + } + + mutex_lock(&cache->lock); + memset(cache->data, 0, cache->cache_alloc); + cache->metadata_written = 0; + cache->version++; + list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list) { + stream->metadata_out = 0; + stream->metadata_in = 0; + } + mutex_unlock(&cache->lock); + + session->metadata_dumped = 0; + list_for_each_entry(chan, &session->chan, list) { + chan->metadata_dumped = 0; + } + + list_for_each_entry(event, &session->events, list) { + event->metadata_dumped = 0; + } + + ret = _lttng_session_metadata_statedump(session); + +end: + mutex_unlock(&sessions_mutex); + return ret; +} + + + int lttng_channel_enable(struct lttng_channel *channel) { int ret = 0; @@ -320,8 +371,23 @@ int lttng_event_enable(struct lttng_event *event) ret = -EEXIST; goto end; } - ACCESS_ONCE(event->enabled) = 1; - lttng_session_sync_enablers(event->chan->session); + switch (event->instrumentation) { + case LTTNG_KERNEL_TRACEPOINT: + case LTTNG_KERNEL_SYSCALL: + ret = -EINVAL; + break; + case LTTNG_KERNEL_KPROBE: + case LTTNG_KERNEL_FUNCTION: + case LTTNG_KERNEL_NOOP: + ACCESS_ONCE(event->enabled) = 1; + break; + case LTTNG_KERNEL_KRETPROBE: + ret = lttng_kretprobes_event_enable_state(event, 1); + break; + default: + WARN_ON_ONCE(1); + ret = -EINVAL; + } end: mutex_unlock(&sessions_mutex); return ret; @@ -340,8 +406,23 @@ int lttng_event_disable(struct lttng_event *event) ret = -EEXIST; goto end; } - ACCESS_ONCE(event->enabled) = 0; - lttng_session_sync_enablers(event->chan->session); + switch (event->instrumentation) { + case LTTNG_KERNEL_TRACEPOINT: + case LTTNG_KERNEL_SYSCALL: + ret = -EINVAL; + break; + case LTTNG_KERNEL_KPROBE: + case LTTNG_KERNEL_FUNCTION: + case LTTNG_KERNEL_NOOP: + ACCESS_ONCE(event->enabled) = 0; + break; + case LTTNG_KERNEL_KRETPROBE: + ret = lttng_kretprobes_event_enable_state(event, 0); + break; + default: + WARN_ON_ONCE(1); + ret = -EINVAL; + } end: mutex_unlock(&sessions_mutex); return ret; @@ -492,7 +573,7 @@ struct lttng_event *_lttng_event_create(struct lttng_channel *chan, name_len = strlen(event_name); hash = jhash(event_name, name_len, 0); head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)]; - hlist_for_each_entry(event, head, hlist) { + lttng_hlist_for_each_entry(event, head, hlist) { WARN_ON_ONCE(!event->desc); if (!strncmp(event->desc->name, event_name, LTTNG_KERNEL_SYM_NAME_LEN - 1) @@ -529,7 +610,11 @@ struct lttng_event *_lttng_event_create(struct lttng_channel *chan, smp_wmb(); break; case LTTNG_KERNEL_KPROBE: - event->enabled = 1; + /* + * Needs to be explicitly enabled after creation, since + * we may want to apply filters. + */ + event->enabled = 0; event->registered = 1; /* * Populate lttng_event structure before event @@ -553,7 +638,11 @@ struct lttng_event *_lttng_event_create(struct lttng_channel *chan, struct lttng_event *event_return; /* kretprobe defines 2 events */ - event->enabled = 1; + /* + * Needs to be explicitly enabled after creation, since + * we may want to apply filters. + */ + event->enabled = 0; event->registered = 1; event_return = kmem_cache_zalloc(event_cache, GFP_KERNEL); @@ -564,7 +653,7 @@ struct lttng_event *_lttng_event_create(struct lttng_channel *chan, event_return->chan = chan; event_return->filter = filter; event_return->id = chan->free_event_id++; - event_return->enabled = 1; + event_return->enabled = 0; event_return->registered = 1; event_return->instrumentation = itype; /* @@ -599,7 +688,11 @@ struct lttng_event *_lttng_event_create(struct lttng_channel *chan, break; } case LTTNG_KERNEL_FUNCTION: - event->enabled = 1; + /* + * Needs to be explicitly enabled after creation, since + * we may want to apply filters. + */ + event->enabled = 0; event->registered = 1; /* * Populate lttng_event structure before event @@ -617,7 +710,11 @@ struct lttng_event *_lttng_event_create(struct lttng_channel *chan, break; case LTTNG_KERNEL_NOOP: case LTTNG_KERNEL_SYSCALL: - event->enabled = 1; + /* + * Needs to be explicitly enabled after creation, since + * we may want to apply filters. + */ + event->enabled = 0; event->registered = 0; event->desc = event_desc; if (!event->desc) { @@ -637,7 +734,6 @@ struct lttng_event *_lttng_event_create(struct lttng_channel *chan, } hlist_add_head(&event->hlist, head); list_add(&event->list, &chan->session->events); - mutex_unlock(&sessions_mutex); return event; statedump_error: @@ -989,17 +1085,22 @@ int lttng_session_list_tracker_pids(struct lttng_session *session) ret = PTR_ERR(tracker_pids_list_file); goto file_error; } + if (atomic_long_add_unless(&session->file->f_count, + 1, INT_MAX) == INT_MAX) { + goto refcount_error; + } ret = lttng_tracker_pids_list_fops.open(NULL, tracker_pids_list_file); if (ret < 0) goto open_error; m = tracker_pids_list_file->private_data; m->private = session; fd_install(file_fd, tracker_pids_list_file); - atomic_long_inc(&session->file->f_count); return file_fd; open_error: + atomic_long_dec(&session->file->f_count); +refcount_error: fput(tracker_pids_list_file); file_error: put_unused_fd(file_fd); @@ -1131,7 +1232,7 @@ void lttng_create_tracepoint_if_missing(struct lttng_enabler *enabler) */ hash = jhash(event_name, name_len, 0); head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)]; - hlist_for_each_entry(event, head, hlist) { + lttng_hlist_for_each_entry(event, head, hlist) { if (event->desc == desc && event->chan == enabler->chan) found = 1; @@ -1257,7 +1358,7 @@ struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type, sizeof(enabler->event_param)); enabler->chan = chan; /* ctx left NULL */ - enabler->enabled = 1; + enabler->enabled = 0; enabler->evtype = LTTNG_TYPE_ENABLER; mutex_lock(&sessions_mutex); list_add(&enabler->node, &enabler->chan->session->enablers_head); @@ -1433,10 +1534,12 @@ void lttng_session_lazy_sync_enablers(struct lttng_session *session) /* * Serialize at most one packet worth of metadata into a metadata * channel. - * We have exclusive access to our metadata buffer (protected by the - * sessions_mutex), so we can do racy operations such as looking for - * remaining space left in packet and write, since mutual exclusion - * protects us from concurrent writes. + * We grab the metadata cache mutex to get exclusive access to our metadata + * buffer and to the metadata cache. Exclusive access to the metadata buffer + * allows us to do racy operations such as looking for remaining space left in + * packet and write, since mutual exclusion protects us from concurrent writes. + * Mutual exclusion on the metadata cache allow us to read the cache content + * without racing against reallocation of the cache by updates. * Returns the number of bytes written in the channel, 0 if no data * was written and a negative value on error. */ @@ -1448,17 +1551,23 @@ int lttng_metadata_output_channel(struct lttng_metadata_stream *stream, size_t len, reserve_len; /* - * Ensure we support mutiple get_next / put sequences followed - * by put_next. The metadata stream lock internally protects - * reading the metadata cache. It can indeed be read - * concurrently by "get_next_subbuf" and "flush" operations on - * the buffer invoked by different processes. + * Ensure we support mutiple get_next / put sequences followed by + * put_next. The metadata cache lock protects reading the metadata + * cache. It can indeed be read concurrently by "get_next_subbuf" and + * "flush" operations on the buffer invoked by different processes. + * Moreover, since the metadata cache memory can be reallocated, we + * need to have exclusive access against updates even though we only + * read it. */ - mutex_lock(&stream->lock); + mutex_lock(&stream->metadata_cache->lock); WARN_ON(stream->metadata_in < stream->metadata_out); if (stream->metadata_in != stream->metadata_out) goto end; + /* Metadata regenerated, change the version. */ + if (stream->metadata_cache->version != stream->version) + stream->version = stream->metadata_cache->version; + len = stream->metadata_cache->metadata_written - stream->metadata_in; if (!len) @@ -1484,13 +1593,15 @@ int lttng_metadata_output_channel(struct lttng_metadata_stream *stream, ret = reserve_len; end: - mutex_unlock(&stream->lock); + mutex_unlock(&stream->metadata_cache->lock); return ret; } /* * Write the metadata to the metadata cache. * Must be called with sessions_mutex held. + * The metadata cache lock protects us from concurrent read access from + * thread outputting metadata content to ring buffer. */ int lttng_metadata_printf(struct lttng_session *session, const char *fmt, ...) @@ -1509,6 +1620,7 @@ int lttng_metadata_printf(struct lttng_session *session, return -ENOMEM; len = strlen(str); + mutex_lock(&session->metadata_cache->lock); if (session->metadata_cache->metadata_written + len > session->metadata_cache->cache_alloc) { char *tmp_cache_realloc; @@ -1517,10 +1629,16 @@ int lttng_metadata_printf(struct lttng_session *session, tmp_cache_alloc_size = max_t(unsigned int, session->metadata_cache->cache_alloc + len, session->metadata_cache->cache_alloc << 1); - tmp_cache_realloc = krealloc(session->metadata_cache->data, - tmp_cache_alloc_size, GFP_KERNEL); + tmp_cache_realloc = lttng_vzalloc(tmp_cache_alloc_size); if (!tmp_cache_realloc) goto err; + if (session->metadata_cache->data) { + memcpy(tmp_cache_realloc, + session->metadata_cache->data, + session->metadata_cache->cache_alloc); + vfree(session->metadata_cache->data); + } + session->metadata_cache->cache_alloc = tmp_cache_alloc_size; session->metadata_cache->data = tmp_cache_realloc; } @@ -1528,6 +1646,7 @@ int lttng_metadata_printf(struct lttng_session *session, session->metadata_cache->metadata_written, str, len); session->metadata_cache->metadata_written += len; + mutex_unlock(&session->metadata_cache->lock); kfree(str); list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list) @@ -1536,6 +1655,7 @@ int lttng_metadata_printf(struct lttng_session *session, return 0; err: + mutex_unlock(&session->metadata_cache->lock); kfree(str); return -ENOMEM; } @@ -1580,6 +1700,14 @@ int _lttng_field_statedump(struct lttng_session *session, const struct lttng_basic_type *elem_type; elem_type = &field->type.u.array.elem_type; + if (field->type.u.array.elem_alignment) { + ret = lttng_metadata_printf(session, + " struct { } align(%u) _%s_padding;\n", + field->type.u.array.elem_alignment * CHAR_BIT, + field->name); + if (ret) + return ret; + } ret = lttng_metadata_printf(session, " integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s[%u];\n", elem_type->u.basic.integer.size, @@ -1626,6 +1754,14 @@ int _lttng_field_statedump(struct lttng_session *session, if (ret) return ret; + if (field->type.u.sequence.elem_alignment) { + ret = lttng_metadata_printf(session, + " struct { } align(%u) _%s_padding;\n", + field->type.u.sequence.elem_alignment * CHAR_BIT, + field->name); + if (ret) + return ret; + } ret = lttng_metadata_printf(session, " integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s[ __%s_length ];\n", elem_type->u.basic.integer.size, @@ -1830,6 +1966,7 @@ int _lttng_stream_packet_context_declare(struct lttng_session *session) " uint64_clock_monotonic_t timestamp_end;\n" " uint64_t content_size;\n" " uint64_t packet_size;\n" + " uint64_t packet_seq_num;\n" " unsigned long events_discarded;\n" " uint32_t cpu_id;\n" "};\n\n" @@ -1886,11 +2023,15 @@ int _lttng_event_header_declare(struct lttng_session *session) * taken at start of trace. * Yes, this is only an approximation. Yes, we can (and will) do better * in future versions. + * This function may return a negative offset. It may happen if the + * system sets the REALTIME clock to 0 after boot. */ static -uint64_t measure_clock_offset(void) +int64_t measure_clock_offset(void) { - uint64_t offset, monotonic[2], realtime; + uint64_t monotonic_avg, monotonic[2], realtime; + uint64_t tcf = trace_clock_freq(); + int64_t offset; struct timespec rts = { 0, 0 }; unsigned long flags; @@ -1901,10 +2042,17 @@ uint64_t measure_clock_offset(void) monotonic[1] = trace_clock_read64(); local_irq_restore(flags); - offset = (monotonic[0] + monotonic[1]) >> 1; - realtime = (uint64_t) rts.tv_sec * NSEC_PER_SEC; - realtime += rts.tv_nsec; - offset = realtime - offset; + monotonic_avg = (monotonic[0] + monotonic[1]) >> 1; + realtime = (uint64_t) rts.tv_sec * tcf; + if (tcf == NSEC_PER_SEC) { + realtime += rts.tv_nsec; + } else { + uint64_t n = rts.tv_nsec * tcf; + + do_div(n, NSEC_PER_SEC); + realtime += n; + } + offset = (int64_t) realtime - monotonic_avg; return offset; } @@ -1951,6 +2099,7 @@ int _lttng_session_metadata_statedump(struct lttng_session *session) " uint32_t magic;\n" " uint8_t uuid[16];\n" " uint32_t stream_id;\n" + " uint64_t stream_instance_id;\n" " };\n" "};\n\n", lttng_alignof(uint8_t) * CHAR_BIT, @@ -1996,8 +2145,8 @@ int _lttng_session_metadata_statedump(struct lttng_session *session) ret = lttng_metadata_printf(session, "clock {\n" - " name = %s;\n", - "monotonic" + " name = \"%s\";\n", + trace_clock_name() ); if (ret) goto end; @@ -2012,13 +2161,14 @@ int _lttng_session_metadata_statedump(struct lttng_session *session) } ret = lttng_metadata_printf(session, - " description = \"Monotonic Clock\";\n" + " description = \"%s\";\n" " freq = %llu; /* Frequency, in Hz */\n" " /* clock value offset from Epoch is: offset * (1/freq) */\n" - " offset = %llu;\n" + " offset = %lld;\n" "};\n\n", + trace_clock_description(), (unsigned long long) trace_clock_freq(), - (unsigned long long) measure_clock_offset() + (long long) measure_clock_offset() ); if (ret) goto end; @@ -2026,20 +2176,23 @@ int _lttng_session_metadata_statedump(struct lttng_session *session) ret = lttng_metadata_printf(session, "typealias integer {\n" " size = 27; align = 1; signed = false;\n" - " map = clock.monotonic.value;\n" + " map = clock.%s.value;\n" "} := uint27_clock_monotonic_t;\n" "\n" "typealias integer {\n" " size = 32; align = %u; signed = false;\n" - " map = clock.monotonic.value;\n" + " map = clock.%s.value;\n" "} := uint32_clock_monotonic_t;\n" "\n" "typealias integer {\n" " size = 64; align = %u; signed = false;\n" - " map = clock.monotonic.value;\n" + " map = clock.%s.value;\n" "} := uint64_clock_monotonic_t;\n\n", + trace_clock_name(), lttng_alignof(uint32_t) * CHAR_BIT, - lttng_alignof(uint64_t) * CHAR_BIT + trace_clock_name(), + lttng_alignof(uint64_t) * CHAR_BIT, + trace_clock_name() ); if (ret) goto end; @@ -2115,7 +2268,12 @@ static int __init lttng_events_init(void) ret = wrapper_lttng_fixup_sig(THIS_MODULE); if (ret) return ret; - + ret = wrapper_get_pfnblock_flags_mask_init(); + if (ret) + return ret; + ret = wrapper_get_pageblock_flags_mask_init(); + if (ret) + return ret; ret = lttng_context_init(); if (ret) return ret;