#include <helper.h>
#include <lttng/ust-ctl.h>
#include <ust-comm.h>
+#include <lttng/ust-dynamic-type.h>
+#include <lttng/ust-context-provider.h>
#include "error.h"
#include "compat.h"
#include "lttng-ust-uuid.h"
#include "tracepoint-internal.h"
#include "lttng-tracer.h"
#include "lttng-tracer-core.h"
+#include "lttng-ust-statedump.h"
#include "wait.h"
#include "../libringbuffer/shm.h"
#include "jhash.h"
/*
- * The sessions mutex is the centralized mutex across UST tracing
- * control and probe registration. All operations within this file are
- * called by the communication thread, under ust_lock protection.
+ * All operations within this file are called by the communication
+ * thread, under ust_lock protection.
*/
-static pthread_mutex_t sessions_mutex = PTHREAD_MUTEX_INITIALIZER;
-void ust_lock(void)
-{
- pthread_mutex_lock(&sessions_mutex);
-}
+static CDS_LIST_HEAD(sessions);
-void ust_unlock(void)
+struct cds_list_head *_lttng_get_sessions(void)
{
- pthread_mutex_unlock(&sessions_mutex);
+ return &sessions;
}
-static CDS_LIST_HEAD(sessions);
-
static void _lttng_event_destroy(struct lttng_event *event);
+static void _lttng_enum_destroy(struct lttng_enum *_enum);
static
void lttng_session_lazy_sync_enablers(struct lttng_session *session);
enum lttng_ust_loglevel_type req_type,
int req_loglevel)
{
- if (req_type == LTTNG_UST_LOGLEVEL_ALL)
- return 1;
if (!has_loglevel)
loglevel = TRACE_DEFAULT;
switch (req_type) {
case LTTNG_UST_LOGLEVEL_RANGE:
- if (loglevel <= req_loglevel || req_loglevel == -1)
+ if (loglevel <= req_loglevel
+ || (req_loglevel == -1 && loglevel <= TRACE_DEBUG))
return 1;
else
return 0;
case LTTNG_UST_LOGLEVEL_SINGLE:
- if (loglevel == req_loglevel || req_loglevel == -1)
+ if (loglevel == req_loglevel
+ || (req_loglevel == -1 && loglevel <= TRACE_DEBUG))
return 1;
else
return 0;
case LTTNG_UST_LOGLEVEL_ALL:
default:
- return 1;
+ if (loglevel <= TRACE_DEBUG)
+ return 1;
+ else
+ return 0;
}
}
session = zmalloc(sizeof(struct lttng_session));
if (!session)
return NULL;
+ if (lttng_session_context_init(&session->ctx)) {
+ free(session);
+ return NULL;
+ }
CDS_INIT_LIST_HEAD(&session->chan_head);
CDS_INIT_LIST_HEAD(&session->events_head);
+ CDS_INIT_LIST_HEAD(&session->enums_head);
CDS_INIT_LIST_HEAD(&session->enablers_head);
for (i = 0; i < LTTNG_UST_EVENT_HT_SIZE; i++)
CDS_INIT_HLIST_HEAD(&session->events_ht.table[i]);
+ for (i = 0; i < LTTNG_UST_ENUM_HT_SIZE; i++)
+ CDS_INIT_HLIST_HEAD(&session->enums_ht.table[i]);
cds_list_add(&session->node, &sessions);
return session;
}
{
struct lttng_channel *chan, *tmpchan;
struct lttng_event *event, *tmpevent;
+ struct lttng_enum *_enum, *tmp_enum;
struct lttng_enabler *enabler, *tmpenabler;
CMM_ACCESS_ONCE(session->active) = 0;
cds_list_for_each_entry_safe(event, tmpevent,
&session->events_head, node)
_lttng_event_destroy(event);
+ cds_list_for_each_entry_safe(_enum, tmp_enum,
+ &session->enums_head, node)
+ _lttng_enum_destroy(_enum);
cds_list_for_each_entry_safe(chan, tmpchan, &session->chan_head, node)
_lttng_channel_unmap(chan);
cds_list_del(&session->node);
+ lttng_destroy_context(session->ctx);
free(session);
}
+static
+int lttng_enum_create(const struct lttng_enum_desc *desc,
+ struct lttng_session *session)
+{
+ const char *enum_name = desc->name;
+ struct lttng_enum *_enum;
+ struct cds_hlist_head *head;
+ struct cds_hlist_node *node;
+ int ret = 0;
+ size_t name_len = strlen(enum_name);
+ uint32_t hash;
+ int notify_socket;
+
+ hash = jhash(enum_name, name_len, 0);
+ head = &session->enums_ht.table[hash & (LTTNG_UST_ENUM_HT_SIZE - 1)];
+ cds_hlist_for_each_entry(_enum, node, head, hlist) {
+ assert(_enum->desc);
+ if (!strncmp(_enum->desc->name, desc->name,
+ LTTNG_UST_SYM_NAME_LEN - 1)) {
+ ret = -EEXIST;
+ goto exist;
+ }
+ }
+
+ notify_socket = lttng_get_notify_socket(session->owner);
+ if (notify_socket < 0) {
+ ret = notify_socket;
+ goto socket_error;
+ }
+
+ _enum = zmalloc(sizeof(*_enum));
+ if (!_enum) {
+ ret = -ENOMEM;
+ goto cache_error;
+ }
+ _enum->session = session;
+ _enum->desc = desc;
+
+ ret = ustcomm_register_enum(notify_socket,
+ session->objd,
+ enum_name,
+ desc->nr_entries,
+ desc->entries,
+ &_enum->id);
+ if (ret < 0) {
+ DBG("Error (%d) registering enumeration to sessiond", ret);
+ goto sessiond_register_error;
+ }
+ cds_list_add(&_enum->node, &session->enums_head);
+ cds_hlist_add_head(&_enum->hlist, head);
+ return 0;
+
+sessiond_register_error:
+ free(_enum);
+cache_error:
+socket_error:
+exist:
+ return ret;
+}
+
+static
+int lttng_create_enum_check(const struct lttng_type *type,
+ struct lttng_session *session)
+{
+ switch (type->atype) {
+ case atype_enum:
+ {
+ const struct lttng_enum_desc *enum_desc;
+ int ret;
+
+ enum_desc = type->u.basic.enumeration.desc;
+ ret = lttng_enum_create(enum_desc, session);
+ if (ret && ret != -EEXIST) {
+ DBG("Unable to create enum error: (%d)", ret);
+ return ret;
+ }
+ break;
+ }
+ case atype_dynamic:
+ {
+ const struct lttng_event_field *tag_field_generic;
+ const struct lttng_enum_desc *enum_desc;
+ int ret;
+
+ tag_field_generic = lttng_ust_dynamic_type_tag_field();
+ enum_desc = tag_field_generic->type.u.basic.enumeration.desc;
+ ret = lttng_enum_create(enum_desc, session);
+ if (ret && ret != -EEXIST) {
+ DBG("Unable to create enum error: (%d)", ret);
+ return ret;
+ }
+ break;
+ }
+ default:
+ /* TODO: nested types when they become supported. */
+ break;
+ }
+ return 0;
+}
+
+static
+int lttng_create_all_event_enums(size_t nr_fields,
+ const struct lttng_event_field *event_fields,
+ struct lttng_session *session)
+{
+ size_t i;
+ int ret;
+
+ /* For each field, ensure enum is part of the session. */
+ for (i = 0; i < nr_fields; i++) {
+ const struct lttng_type *type = &event_fields[i].type;
+
+ ret = lttng_create_enum_check(type, session);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static
+int lttng_create_all_ctx_enums(size_t nr_fields,
+ const struct lttng_ctx_field *ctx_fields,
+ struct lttng_session *session)
+{
+ size_t i;
+ int ret;
+
+ /* For each field, ensure enum is part of the session. */
+ for (i = 0; i < nr_fields; i++) {
+ const struct lttng_type *type = &ctx_fields[i].event_field.type;
+
+ ret = lttng_create_enum_check(type, session);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * Ensure that a state-dump will be performed for this session at the end
+ * of the current handle_message().
+ */
+int lttng_session_statedump(struct lttng_session *session)
+{
+ session->statedump_pending = 1;
+ lttng_ust_sockinfo_session_enabled(session->owner);
+ return 0;
+}
+
int lttng_session_enable(struct lttng_session *session)
{
int ret = 0;
/* Set transient enabler state to "enabled" */
session->tstate = 1;
- /* We need to sync enablers with session before activation. */
- lttng_session_sync_enablers(session);
/*
* Snapshot the number of events per channel to know the type of header
if (ctx) {
nr_fields = ctx->nr_fields;
fields = ctx->fields;
+ ret = lttng_create_all_ctx_enums(nr_fields, fields,
+ session);
+ if (ret < 0) {
+ DBG("Error (%d) adding enum to session", ret);
+ return ret;
+ }
}
ret = ustcomm_register_channel(notify_socket,
+ session,
session->objd,
chan->objd,
nr_fields,
}
}
+ /* We need to sync enablers with session before activation. */
+ lttng_session_sync_enablers(session);
+
/* Set atomically the state to "active" */
CMM_ACCESS_ONCE(session->active) = 1;
CMM_ACCESS_ONCE(session->been_active) = 1;
+
+ ret = lttng_session_statedump(session);
+ if (ret)
+ return ret;
end:
return ret;
}
goto socket_error;
}
+ ret = lttng_create_all_event_enums(desc->nr_fields, desc->fields,
+ session);
+ if (ret < 0) {
+ DBG("Error (%d) adding enum to session", ret);
+ goto create_enum_error;
+ }
+
/*
* Check if loglevel match. Refuse to connect event if not.
*/
/* Fetch event ID from sessiond */
ret = ustcomm_register_event(notify_socket,
+ session,
session->objd,
chan->objd,
event_name,
sessiond_register_error:
free(event);
cache_error:
+create_enum_error:
socket_error:
exist:
return ret;
int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
struct lttng_enabler *enabler)
{
+ struct lttng_ust_excluder_node *excluder;
+
+ /* If event matches with an excluder, return 'does not match' */
+ cds_list_for_each_entry(excluder, &enabler->excluder_head, node) {
+ int count;
+
+ for (count = 0; count < excluder->excluder.count; count++) {
+ int found, len;
+ char *excluder_name;
+
+ excluder_name = (char *) (excluder->excluder.names)
+ + count * LTTNG_UST_SYM_NAME_LEN;
+ len = strnlen(excluder_name, LTTNG_UST_SYM_NAME_LEN);
+ if (len > 0 && excluder_name[len - 1] == '*') {
+ found = !strncmp(desc->name, excluder_name,
+ len - 1);
+ } else {
+ found = !strncmp(desc->name, excluder_name,
+ LTTNG_UST_SYM_NAME_LEN - 1);
+ }
+ if (found) {
+ return 0;
+ }
+ }
+ }
switch (enabler->type) {
case LTTNG_ENABLER_WILDCARD:
return lttng_desc_match_wildcard_enabler(desc, enabler);
return 0;
}
+/*
+ * For each session of the owner thread, execute pending statedump.
+ * Only dump state for the sessions owned by the caller thread, because
+ * we don't keep ust_lock across the entire iteration.
+ */
+void lttng_handle_pending_statedump(void *owner)
+{
+ struct lttng_session *session;
+
+ /* Execute state dump */
+ do_lttng_ust_statedump(owner);
+
+ /* Clear pending state dump */
+ if (ust_lock()) {
+ goto end;
+ }
+ cds_list_for_each_entry(session, &sessions, node) {
+ if (session->owner != owner)
+ continue;
+ if (!session->statedump_pending)
+ continue;
+ session->statedump_pending = 0;
+ }
+end:
+ ust_unlock();
+ return;
+}
+
/*
* Only used internally at session destruction.
*/
free(event);
}
+static
+void _lttng_enum_destroy(struct lttng_enum *_enum)
+{
+ cds_list_del(&_enum->node);
+ free(_enum);
+}
+
void lttng_ust_events_exit(void)
{
struct lttng_session *session, *tmpsession;
return NULL;
enabler->type = type;
CDS_INIT_LIST_HEAD(&enabler->filter_bytecode_head);
+ CDS_INIT_LIST_HEAD(&enabler->excluder_head);
memcpy(&enabler->event_param, event_param,
sizeof(enabler->event_param));
enabler->chan = chan;
/* ctx left NULL */
- enabler->enabled = 1;
+ enabler->enabled = 0;
cds_list_add(&enabler->node, &enabler->chan->session->enablers_head);
lttng_session_lazy_sync_enablers(enabler->chan->session);
return enabler;
return 0;
}
+int lttng_enabler_attach_exclusion(struct lttng_enabler *enabler,
+ struct lttng_ust_excluder_node *excluder)
+{
+ excluder->enabler = enabler;
+ cds_list_add_tail(&excluder->node, &enabler->excluder_head);
+ lttng_session_lazy_sync_enablers(enabler->chan->session);
+ return 0;
+}
+
int lttng_attach_context(struct lttng_ust_context *context_param,
+ union ust_args *uargs,
struct lttng_ctx **ctx, struct lttng_session *session)
{
/*
switch (context_param->ctx) {
case LTTNG_UST_CONTEXT_PTHREAD_ID:
return lttng_add_pthread_id_to_ctx(ctx);
+ case LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER:
+ {
+ struct lttng_ust_perf_counter_ctx *perf_ctx_param;
+
+ perf_ctx_param = &context_param->u.perf_counter;
+ return lttng_add_perf_counter_to_ctx(
+ perf_ctx_param->type,
+ perf_ctx_param->config,
+ perf_ctx_param->name,
+ ctx);
+ }
case LTTNG_UST_CONTEXT_VTID:
return lttng_add_vtid_to_ctx(ctx);
case LTTNG_UST_CONTEXT_VPID:
return lttng_add_procname_to_ctx(ctx);
case LTTNG_UST_CONTEXT_IP:
return lttng_add_ip_to_ctx(ctx);
+ case LTTNG_UST_CONTEXT_CPU_ID:
+ return lttng_add_cpu_id_to_ctx(ctx);
+ case LTTNG_UST_CONTEXT_APP_CONTEXT:
+ return lttng_ust_add_app_context_to_ctx_rcu(uargs->app_context.ctxname,
+ ctx);
default:
return -EINVAL;
}
void lttng_enabler_destroy(struct lttng_enabler *enabler)
{
struct lttng_ust_filter_bytecode_node *filter_node, *tmp_filter_node;
+ struct lttng_ust_excluder_node *excluder_node, *tmp_excluder_node;
/* Destroy filter bytecode */
cds_list_for_each_entry_safe(filter_node, tmp_filter_node,
free(filter_node);
}
+ /* Destroy excluders */
+ cds_list_for_each_entry_safe(excluder_node, tmp_excluder_node,
+ &enabler->excluder_head, node) {
+ free(excluder_node);
+ }
+
/* Destroy contexts */
lttng_destroy_context(enabler->ctx);
return;
lttng_session_sync_enablers(session);
}
+
+/*
+ * Update all sessions with the given app context.
+ * Called with ust lock held.
+ * This is invoked when an application context gets loaded/unloaded. It
+ * ensures the context callbacks are in sync with the application
+ * context (either app context callbacks, or dummy callbacks).
+ */
+void lttng_ust_context_set_session_provider(const char *name,
+ size_t (*get_size)(struct lttng_ctx_field *field, size_t offset),
+ void (*record)(struct lttng_ctx_field *field,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_channel *chan),
+ void (*get_value)(struct lttng_ctx_field *field,
+ struct lttng_ctx_value *value))
+{
+ struct lttng_session *session;
+
+ cds_list_for_each_entry(session, &sessions, node) {
+ struct lttng_channel *chan;
+ struct lttng_event *event;
+ int ret;
+
+ ret = lttng_ust_context_set_provider_rcu(&session->ctx,
+ name, get_size, record, get_value);
+ if (ret)
+ abort();
+ cds_list_for_each_entry(chan, &session->chan_head, node) {
+ ret = lttng_ust_context_set_provider_rcu(&chan->ctx,
+ name, get_size, record, get_value);
+ if (ret)
+ abort();
+ }
+ cds_list_for_each_entry(event, &session->events_head, node) {
+ ret = lttng_ust_context_set_provider_rcu(&event->ctx,
+ name, get_size, record, get_value);
+ if (ret)
+ abort();
+ }
+ }
+}