#include "tracepoint-internal.h"
#include "lttng-tracer.h"
#include "lttng-tracer-core.h"
-#include "lttng-ust-baddr.h"
+#include "lttng-ust-statedump.h"
#include "wait.h"
#include "../libringbuffer/shm.h"
#include "jhash.h"
/*
- * The sessions mutex is the centralized mutex across UST tracing
- * control and probe registration. All operations within this file are
- * called by the communication thread, under ust_lock protection.
+ * All operations within this file are called by the communication
+ * thread, under ust_lock protection.
*/
-static pthread_mutex_t sessions_mutex = PTHREAD_MUTEX_INITIALIZER;
-
-void ust_lock(void)
-{
- pthread_mutex_lock(&sessions_mutex);
-}
-
-void ust_unlock(void)
-{
- pthread_mutex_unlock(&sessions_mutex);
-}
static CDS_LIST_HEAD(sessions);
}
static void _lttng_event_destroy(struct lttng_event *event);
+static void _lttng_enum_destroy(struct lttng_enum *_enum);
static
void lttng_session_lazy_sync_enablers(struct lttng_session *session);
enum lttng_ust_loglevel_type req_type,
int req_loglevel)
{
- if (req_type == LTTNG_UST_LOGLEVEL_ALL)
- return 1;
if (!has_loglevel)
loglevel = TRACE_DEFAULT;
switch (req_type) {
case LTTNG_UST_LOGLEVEL_RANGE:
- if (loglevel <= req_loglevel || req_loglevel == -1)
+ if (loglevel <= req_loglevel
+ || (req_loglevel == -1 && loglevel <= TRACE_DEBUG))
return 1;
else
return 0;
case LTTNG_UST_LOGLEVEL_SINGLE:
- if (loglevel == req_loglevel || req_loglevel == -1)
+ if (loglevel == req_loglevel
+ || (req_loglevel == -1 && loglevel <= TRACE_DEBUG))
return 1;
else
return 0;
case LTTNG_UST_LOGLEVEL_ALL:
default:
- return 1;
+ if (loglevel <= TRACE_DEBUG)
+ return 1;
+ else
+ return 0;
}
}
return NULL;
CDS_INIT_LIST_HEAD(&session->chan_head);
CDS_INIT_LIST_HEAD(&session->events_head);
+ CDS_INIT_LIST_HEAD(&session->enums_head);
CDS_INIT_LIST_HEAD(&session->enablers_head);
for (i = 0; i < LTTNG_UST_EVENT_HT_SIZE; i++)
CDS_INIT_HLIST_HEAD(&session->events_ht.table[i]);
+ for (i = 0; i < LTTNG_UST_ENUM_HT_SIZE; i++)
+ CDS_INIT_HLIST_HEAD(&session->enums_ht.table[i]);
cds_list_add(&session->node, &sessions);
return session;
}
{
struct lttng_channel *chan, *tmpchan;
struct lttng_event *event, *tmpevent;
+ struct lttng_enum *_enum, *tmp_enum;
struct lttng_enabler *enabler, *tmpenabler;
CMM_ACCESS_ONCE(session->active) = 0;
cds_list_for_each_entry_safe(event, tmpevent,
&session->events_head, node)
_lttng_event_destroy(event);
+ cds_list_for_each_entry_safe(_enum, tmp_enum,
+ &session->enums_head, node)
+ _lttng_enum_destroy(_enum);
cds_list_for_each_entry_safe(chan, tmpchan, &session->chan_head, node)
_lttng_channel_unmap(chan);
cds_list_del(&session->node);
/* Set transient enabler state to "enabled" */
session->tstate = 1;
- /* We need to sync enablers with session before activation. */
- lttng_session_sync_enablers(session);
/*
* Snapshot the number of events per channel to know the type of header
}
}
+ /* We need to sync enablers with session before activation. */
+ lttng_session_sync_enablers(session);
+
/* Set atomically the state to "active" */
CMM_ACCESS_ONCE(session->active) = 1;
CMM_ACCESS_ONCE(session->been_active) = 1;
return ret;
}
+static
+int lttng_enum_create(const struct lttng_enum_desc *desc,
+ struct lttng_session *session)
+{
+ const char *enum_name = desc->name;
+ struct lttng_enum *_enum;
+ struct cds_hlist_head *head;
+ struct cds_hlist_node *node;
+ int ret = 0;
+ size_t name_len = strlen(enum_name);
+ uint32_t hash;
+ int notify_socket;
+
+ hash = jhash(enum_name, name_len, 0);
+ head = &session->enums_ht.table[hash & (LTTNG_UST_ENUM_HT_SIZE - 1)];
+ cds_hlist_for_each_entry(_enum, node, head, hlist) {
+ assert(_enum->desc);
+ if (!strncmp(_enum->desc->name, desc->name,
+ LTTNG_UST_SYM_NAME_LEN - 1)) {
+ ret = -EEXIST;
+ goto exist;
+ }
+ }
+
+ notify_socket = lttng_get_notify_socket(session->owner);
+ if (notify_socket < 0) {
+ ret = notify_socket;
+ goto socket_error;
+ }
+
+ _enum = zmalloc(sizeof(*_enum));
+ if (!_enum) {
+ ret = -ENOMEM;
+ goto cache_error;
+ }
+ _enum->session = session;
+ _enum->desc = desc;
+
+ ret = ustcomm_register_enum(notify_socket,
+ session->objd,
+ enum_name,
+ desc->nr_entries,
+ desc->entries,
+ &_enum->id);
+ if (ret < 0) {
+ DBG("Error (%d) registering enumeration to sessiond", ret);
+ goto sessiond_register_error;
+ }
+ cds_list_add(&_enum->node, &session->enums_head);
+ cds_hlist_add_head(&_enum->hlist, head);
+ return 0;
+
+sessiond_register_error:
+ free(_enum);
+cache_error:
+socket_error:
+exist:
+ return ret;
+}
+
+static
+int lttng_event_create_all_enums(const struct lttng_event_desc *desc,
+ struct lttng_session *session)
+{
+ unsigned int nr_fields, i;
+ const struct lttng_event_field *fields;
+
+ /* For each field, ensure enum is part of the session. */
+ nr_fields = desc->nr_fields;
+ fields = desc->fields;
+ for (i = 0; i < nr_fields; i++) {
+ const struct lttng_type *type = &fields[i].type;
+
+ switch (type->atype) {
+ case atype_enum:
+ {
+ const struct lttng_enum_desc *enum_desc;
+ int ret;
+
+ enum_desc = type->u.basic.enumeration.desc;
+ ret = lttng_enum_create(enum_desc, session);
+ if (ret && ret != -EEXIST) {
+ DBG("Unable to create enum error: (%d)", ret);
+ return ret;
+ }
+ break;
+ }
+ default:
+ /* TODO: nested types when they become supported. */
+ continue;
+ }
+ }
+ return 0;
+}
+
/*
* Supports event creation while tracing session is active.
*/
goto socket_error;
}
+ ret = lttng_event_create_all_enums(desc, session);
+ if (ret < 0) {
+ DBG("Error (%d) adding enum to session", ret);
+ goto create_enum_error;
+ }
+
/*
* Check if loglevel match. Refuse to connect event if not.
*/
/* Fetch event ID from sessiond */
ret = ustcomm_register_event(notify_socket,
+ session,
session->objd,
chan->objd,
event_name,
sessiond_register_error:
free(event);
cache_error:
+create_enum_error:
socket_error:
exist:
return ret;
* Only dump state for the sessions owned by the caller thread, because
* we don't keep ust_lock across the entire iteration.
*/
-int lttng_handle_pending_statedump(void *owner)
+void lttng_handle_pending_statedump(void *owner)
{
struct lttng_session *session;
/* Execute state dump */
- lttng_ust_baddr_statedump(owner);
+ do_lttng_ust_statedump(owner);
/* Clear pending state dump */
- ust_lock();
+ if (ust_lock()) {
+ goto end;
+ }
cds_list_for_each_entry(session, &sessions, node) {
if (session->owner != owner)
continue;
continue;
session->statedump_pending = 0;
}
+end:
ust_unlock();
- return 0;
+ return;
}
/*
free(event);
}
+static
+void _lttng_enum_destroy(struct lttng_enum *_enum)
+{
+ cds_list_del(&_enum->node);
+ free(_enum);
+}
+
void lttng_ust_events_exit(void)
{
struct lttng_session *session, *tmpsession;
sizeof(enabler->event_param));
enabler->chan = chan;
/* ctx left NULL */
- enabler->enabled = 1;
+ enabler->enabled = 0;
cds_list_add(&enabler->node, &enabler->chan->session->enablers_head);
lttng_session_lazy_sync_enablers(enabler->chan->session);
return enabler;
switch (context_param->ctx) {
case LTTNG_UST_CONTEXT_PTHREAD_ID:
return lttng_add_pthread_id_to_ctx(ctx);
+ case LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER:
+ {
+ struct lttng_ust_perf_counter_ctx *perf_ctx_param;
+
+ perf_ctx_param = &context_param->u.perf_counter;
+ return lttng_add_perf_counter_to_ctx(
+ perf_ctx_param->type,
+ perf_ctx_param->config,
+ perf_ctx_param->name,
+ ctx);
+ }
case LTTNG_UST_CONTEXT_VTID:
return lttng_add_vtid_to_ctx(ctx);
case LTTNG_UST_CONTEXT_VPID:
return lttng_add_procname_to_ctx(ctx);
case LTTNG_UST_CONTEXT_IP:
return lttng_add_ip_to_ctx(ctx);
+ case LTTNG_UST_CONTEXT_CPU_ID:
+ return lttng_add_cpu_id_to_ctx(ctx);
default:
return -EINVAL;
}