* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+/*
+ * This page_alloc.h wrapper needs to be included before gfpflags.h because it
+ * overrides a function with a define.
+ */
+#include "wrapper/page_alloc.h"
+
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/anon_inodes.h>
#include "wrapper/file.h"
#include <linux/jhash.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
#include "wrapper/uuid.h"
#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
GFP_KERNEL);
if (!metadata_cache)
goto err_free_session;
- metadata_cache->data = kzalloc(METADATA_CACHE_DEFAULT_SIZE,
- GFP_KERNEL);
+ metadata_cache->data = vzalloc(METADATA_CACHE_DEFAULT_SIZE);
if (!metadata_cache->data)
goto err_free_cache;
metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
kref_init(&metadata_cache->refcount);
+ mutex_init(&metadata_cache->lock);
session->metadata_cache = metadata_cache;
INIT_LIST_HEAD(&metadata_cache->metadata_stream);
memcpy(&metadata_cache->uuid, &session->uuid,
{
struct lttng_metadata_cache *cache =
container_of(kref, struct lttng_metadata_cache, refcount);
- kfree(cache->data);
+ vfree(cache->data);
kfree(cache);
}
name_len = strlen(event_name);
hash = jhash(event_name, name_len, 0);
head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
- hlist_for_each_entry(event, head, hlist) {
+ lttng_hlist_for_each_entry(event, head, hlist) {
WARN_ON_ONCE(!event->desc);
if (!strncmp(event->desc->name, event_name,
LTTNG_KERNEL_SYM_NAME_LEN - 1)
}
hlist_add_head(&event->hlist, head);
list_add(&event->list, &chan->session->events);
- mutex_unlock(&sessions_mutex);
return event;
statedump_error:
*/
hash = jhash(event_name, name_len, 0);
head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
- hlist_for_each_entry(event, head, hlist) {
+ lttng_hlist_for_each_entry(event, head, hlist) {
if (event->desc == desc
&& event->chan == enabler->chan)
found = 1;
sizeof(enabler->event_param));
enabler->chan = chan;
/* ctx left NULL */
- enabler->enabled = 1;
+ enabler->enabled = 0;
enabler->evtype = LTTNG_TYPE_ENABLER;
mutex_lock(&sessions_mutex);
list_add(&enabler->node, &enabler->chan->session->enablers_head);
/*
* Serialize at most one packet worth of metadata into a metadata
* channel.
- * We have exclusive access to our metadata buffer (protected by the
- * sessions_mutex), so we can do racy operations such as looking for
- * remaining space left in packet and write, since mutual exclusion
- * protects us from concurrent writes.
+ * We grab the metadata cache mutex to get exclusive access to our metadata
+ * buffer and to the metadata cache. Exclusive access to the metadata buffer
+ * allows us to do racy operations such as looking for remaining space left in
+ * packet and write, since mutual exclusion protects us from concurrent writes.
+ * Mutual exclusion on the metadata cache allow us to read the cache content
+ * without racing against reallocation of the cache by updates.
* Returns the number of bytes written in the channel, 0 if no data
* was written and a negative value on error.
*/
size_t len, reserve_len;
/*
- * Ensure we support mutiple get_next / put sequences followed
- * by put_next. The metadata stream lock internally protects
- * reading the metadata cache. It can indeed be read
- * concurrently by "get_next_subbuf" and "flush" operations on
- * the buffer invoked by different processes.
+ * Ensure we support mutiple get_next / put sequences followed by
+ * put_next. The metadata cache lock protects reading the metadata
+ * cache. It can indeed be read concurrently by "get_next_subbuf" and
+ * "flush" operations on the buffer invoked by different processes.
+ * Moreover, since the metadata cache memory can be reallocated, we
+ * need to have exclusive access against updates even though we only
+ * read it.
*/
- mutex_lock(&stream->lock);
+ mutex_lock(&stream->metadata_cache->lock);
WARN_ON(stream->metadata_in < stream->metadata_out);
if (stream->metadata_in != stream->metadata_out)
goto end;
ret = reserve_len;
end:
- mutex_unlock(&stream->lock);
+ mutex_unlock(&stream->metadata_cache->lock);
return ret;
}
/*
* Write the metadata to the metadata cache.
* Must be called with sessions_mutex held.
+ * The metadata cache lock protects us from concurrent read access from
+ * thread outputting metadata content to ring buffer.
*/
int lttng_metadata_printf(struct lttng_session *session,
const char *fmt, ...)
return -ENOMEM;
len = strlen(str);
+ mutex_lock(&session->metadata_cache->lock);
if (session->metadata_cache->metadata_written + len >
session->metadata_cache->cache_alloc) {
char *tmp_cache_realloc;
tmp_cache_alloc_size = max_t(unsigned int,
session->metadata_cache->cache_alloc + len,
session->metadata_cache->cache_alloc << 1);
- tmp_cache_realloc = krealloc(session->metadata_cache->data,
- tmp_cache_alloc_size, GFP_KERNEL);
+ tmp_cache_realloc = vzalloc(tmp_cache_alloc_size);
if (!tmp_cache_realloc)
goto err;
+ if (session->metadata_cache->data) {
+ memcpy(tmp_cache_realloc,
+ session->metadata_cache->data,
+ session->metadata_cache->cache_alloc);
+ vfree(session->metadata_cache->data);
+ }
+
session->metadata_cache->cache_alloc = tmp_cache_alloc_size;
session->metadata_cache->data = tmp_cache_realloc;
}
session->metadata_cache->metadata_written,
str, len);
session->metadata_cache->metadata_written += len;
+ mutex_unlock(&session->metadata_cache->lock);
kfree(str);
list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list)
return 0;
err:
+ mutex_unlock(&session->metadata_cache->lock);
kfree(str);
return -ENOMEM;
}
" uint64_clock_monotonic_t timestamp_end;\n"
" uint64_t content_size;\n"
" uint64_t packet_size;\n"
+ " uint64_t packet_seq_num;\n"
" unsigned long events_discarded;\n"
" uint32_t cpu_id;\n"
"};\n\n"
ret = wrapper_lttng_fixup_sig(THIS_MODULE);
if (ret)
return ret;
-
+ ret = wrapper_get_pfnblock_flags_mask_init();
+ if (ret)
+ return ret;
ret = lttng_context_init();
if (ret)
return ret;