X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=lttng-events.c;h=9b3d766915fd4dcd78f564b1681497eccccf5b26;hb=9a0aa3b1fd7f693fc14b29cce9eb07cd275b0347;hp=2820a0e4d1f9045017dd772d08b7f9325be85cc9;hpb=7a88f8b50696dd71e80c08661159caf8e119bf51;p=lttng-modules.git diff --git a/lttng-events.c b/lttng-events.c index 2820a0e4..9b3d7669 100644 --- a/lttng-events.c +++ b/lttng-events.c @@ -102,6 +102,7 @@ struct lttng_session *lttng_session_create(void) goto err_free_cache; metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE; kref_init(&metadata_cache->refcount); + mutex_init(&metadata_cache->lock); session->metadata_cache = metadata_cache; INIT_LIST_HEAD(&metadata_cache->metadata_stream); memcpy(&metadata_cache->uuid, &session->uuid, @@ -595,10 +596,12 @@ void _lttng_event_destroy(struct lttng_event *event) /* * Serialize at most one packet worth of metadata into a metadata * channel. - * We have exclusive access to our metadata buffer (protected by the - * sessions_mutex), so we can do racy operations such as looking for - * remaining space left in packet and write, since mutual exclusion - * protects us from concurrent writes. + * We grab the metadata cache mutex to get exclusive access to our metadata + * buffer and to the metadata cache. Exclusive access to the metadata buffer + * allows us to do racy operations such as looking for remaining space left in + * packet and write, since mutual exclusion protects us from concurrent writes. + * Mutual exclusion on the metadata cache allow us to read the cache content + * without racing against reallocation of the cache by updates. * Returns the number of bytes written in the channel, 0 if no data * was written and a negative value on error. */ @@ -610,13 +613,15 @@ int lttng_metadata_output_channel(struct lttng_metadata_stream *stream, size_t len, reserve_len; /* - * Ensure we support mutiple get_next / put sequences followed - * by put_next. The metadata stream lock internally protects - * reading the metadata cache. It can indeed be read - * concurrently by "get_next_subbuf" and "flush" operations on - * the buffer invoked by different processes. + * Ensure we support mutiple get_next / put sequences followed by + * put_next. The metadata cache lock protects reading the metadata + * cache. It can indeed be read concurrently by "get_next_subbuf" and + * "flush" operations on the buffer invoked by different processes. + * Moreover, since the metadata cache memory can be reallocated, we + * need to have exclusive access against updates even though we only + * read it. */ - mutex_lock(&stream->lock); + mutex_lock(&stream->metadata_cache->lock); WARN_ON(stream->metadata_in < stream->metadata_out); if (stream->metadata_in != stream->metadata_out) goto end; @@ -646,13 +651,15 @@ int lttng_metadata_output_channel(struct lttng_metadata_stream *stream, ret = reserve_len; end: - mutex_unlock(&stream->lock); + mutex_unlock(&stream->metadata_cache->lock); return ret; } /* * Write the metadata to the metadata cache. * Must be called with sessions_mutex held. + * The metadata cache lock protects us from concurrent read access from + * thread outputting metadata content to ring buffer. */ int lttng_metadata_printf(struct lttng_session *session, const char *fmt, ...) @@ -671,6 +678,7 @@ int lttng_metadata_printf(struct lttng_session *session, return -ENOMEM; len = strlen(str); + mutex_lock(&session->metadata_cache->lock); if (session->metadata_cache->metadata_written + len > session->metadata_cache->cache_alloc) { char *tmp_cache_realloc; @@ -690,6 +698,7 @@ int lttng_metadata_printf(struct lttng_session *session, session->metadata_cache->metadata_written, str, len); session->metadata_cache->metadata_written += len; + mutex_unlock(&session->metadata_cache->lock); kfree(str); list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list) @@ -698,6 +707,7 @@ int lttng_metadata_printf(struct lttng_session *session, return 0; err: + mutex_unlock(&session->metadata_cache->lock); kfree(str); return -ENOMEM; }