#include "buffer-registry.hpp"
#include "channel.hpp"
#include "cmd.hpp"
+#include "consumer-output.hpp"
#include "consumer.hpp"
#include "event-notifier-error-accounting.hpp"
#include "event.hpp"
#include "lttng-syscall.hpp"
#include "notification-thread-commands.hpp"
#include "notification-thread.hpp"
-#include "rotate.hpp"
#include "rotation-thread.hpp"
#include "session.hpp"
#include "timer.hpp"
#include <lttng/event-internal.hpp>
#include <lttng/event-rule/event-rule-internal.hpp>
#include <lttng/event-rule/event-rule.h>
+#include <lttng/kernel.h>
#include <lttng/location-internal.hpp>
#include <lttng/lttng-error.h>
#include <lttng/rotate-internal.hpp>
static struct cmd_completion_handler *current_completion_handler;
static int validate_ust_event_name(const char *);
-static int cmd_enable_event_internal(struct ltt_session *session,
+static int cmd_enable_event_internal(ltt_session::locked_ref& session,
const struct lttng_domain *domain,
char *channel_name,
struct lttng_event *event,
struct lttng_bytecode *filter,
struct lttng_event_exclusion *exclusion,
int wpipe);
-static enum lttng_error_code cmd_enable_channel_internal(struct ltt_session *session,
+static enum lttng_error_code cmd_enable_channel_internal(ltt_session::locked_ref& session,
const struct lttng_domain *domain,
const struct lttng_channel *_attr,
int wpipe);
* Create a session path used by list_lttng_sessions for the case that the
* session consumer is on the network.
*/
-static int build_network_session_path(char *dst, size_t size, struct ltt_session *session)
+static int
+build_network_session_path(char *dst, size_t size, const ltt_session::locked_ref& session)
{
int ret, kdata_port, udata_port;
struct lttng_uri *kuri = nullptr, *uuri = nullptr, *uri = nullptr;
char tmp_uurl[PATH_MAX], tmp_urls[PATH_MAX];
- LTTNG_ASSERT(session);
LTTNG_ASSERT(dst);
memset(tmp_urls, 0, sizeof(tmp_urls));
* Get run-time attributes if the session has been started (discarded events,
* lost packets).
*/
-static int get_kernel_runtime_stats(struct ltt_session *session,
+static int get_kernel_runtime_stats(const ltt_session::locked_ref& session,
struct ltt_kernel_channel *kchan,
uint64_t *discarded_events,
uint64_t *lost_packets)
* Get run-time attributes if the session has been started (discarded events,
* lost packets).
*/
-static int get_ust_runtime_stats(struct ltt_session *session,
+static int get_ust_runtime_stats(const ltt_session::locked_ref& session,
struct ltt_ust_channel *uchan,
uint64_t *discarded_events,
uint64_t *lost_packets)
enum lttng_error_code ret_code;
int ret = 0;
unsigned int local_nb_events = 0;
- struct agent_event *event;
- struct lttng_ht_iter iter;
unsigned long agent_event_count;
assert(agt);
local_nb_events = (unsigned int) agent_event_count;
- {
- lttng::urcu::read_lock_guard read_lock;
-
- cds_lfht_for_each_entry (agt->events->ht, &iter.iter, event, node.node) {
- struct lttng_event *tmp_event = lttng_event_create();
+ for (auto *event :
+ lttng::urcu::lfht_iteration_adapter<agent_event,
+ decltype(agent_event::node),
+ &agent_event::node>(*agt->events->ht)) {
+ struct lttng_event *tmp_event = lttng_event_create();
- if (!tmp_event) {
- ret_code = LTTNG_ERR_NOMEM;
- goto error;
- }
+ if (!tmp_event) {
+ ret_code = LTTNG_ERR_NOMEM;
+ goto error;
+ }
- if (lttng_strncpy(tmp_event->name, event->name, sizeof(tmp_event->name))) {
- lttng_event_destroy(tmp_event);
- ret_code = LTTNG_ERR_FATAL;
- goto error;
- }
+ if (lttng_strncpy(tmp_event->name, event->name, sizeof(tmp_event->name))) {
+ lttng_event_destroy(tmp_event);
+ ret_code = LTTNG_ERR_FATAL;
+ goto error;
+ }
- tmp_event->name[sizeof(tmp_event->name) - 1] = '\0';
- tmp_event->enabled = !!event->enabled_count;
- tmp_event->loglevel = event->loglevel_value;
- tmp_event->loglevel_type = event->loglevel_type;
+ tmp_event->name[sizeof(tmp_event->name) - 1] = '\0';
+ tmp_event->enabled = !!event->enabled_count;
+ tmp_event->loglevel = event->loglevel_value;
+ tmp_event->loglevel_type = event->loglevel_type;
- ret = lttng_event_serialize(tmp_event,
- 0,
- nullptr,
- event->filter_expression,
- 0,
- nullptr,
- reply_payload);
- lttng_event_destroy(tmp_event);
- if (ret) {
- ret_code = LTTNG_ERR_FATAL;
- goto error;
- }
+ ret = lttng_event_serialize(
+ tmp_event, 0, nullptr, event->filter_expression, 0, nullptr, reply_payload);
+ lttng_event_destroy(tmp_event);
+ if (ret) {
+ ret_code = LTTNG_ERR_FATAL;
+ goto error;
}
}
end:
struct lttng_ht_iter iter;
struct lttng_ht_node_str *node;
struct ltt_ust_channel *uchan;
- struct ltt_ust_event *uevent;
unsigned long channel_event_count;
unsigned int local_nb_events = 0;
DBG("Listing UST global events for channel %s", channel_name);
- lttng::urcu::read_lock_guard read_lock;
+ const lttng::urcu::read_lock_guard read_lock;
lttng_ht_lookup(ust_global->channels, (void *) channel_name, &iter);
- node = lttng_ht_iter_get_node_str(&iter);
+ node = lttng_ht_iter_get_node<lttng_ht_node_str>(&iter);
if (node == nullptr) {
ret_code = LTTNG_ERR_UST_CHAN_NOT_FOUND;
goto error;
}
- uchan = caa_container_of(&node->node, struct ltt_ust_channel, node.node);
+ uchan = lttng::utils::container_of(node, <t_ust_channel::node);
channel_event_count = lttng_ht_get_count(uchan->events);
if (channel_event_count == 0) {
DBG3("Listing UST global %d events", *nb_events);
- cds_lfht_for_each_entry (uchan->events->ht, &iter.iter, uevent, node.node) {
+ for (auto *uevent :
+ lttng::urcu::lfht_iteration_adapter<ltt_ust_event,
+ decltype(ltt_ust_event::node),
+ <t_ust_event::node>(*uchan->events->ht)) {
struct lttng_event *tmp_event = nullptr;
if (uevent->internal) {
tmp_event->exclusion = 1;
}
+ std::vector<const char *> exclusion_names;
+ if (uevent->exclusion) {
+ for (int i = 0; i < uevent->exclusion->count; i++) {
+ exclusion_names.emplace_back(
+ LTTNG_EVENT_EXCLUSION_NAME_AT(uevent->exclusion, i));
+ }
+ }
+
/*
* We do not care about the filter bytecode and the fd from the
* userspace_probe_location.
*/
ret = lttng_event_serialize(tmp_event,
- uevent->exclusion ? uevent->exclusion->count : 0,
- uevent->exclusion ? (char **) uevent->exclusion->names :
- nullptr,
+ exclusion_names.size(),
+ exclusion_names.size() ? exclusion_names.data() :
+ nullptr,
uevent->filter_expression,
0,
nullptr,
{
enum lttng_error_code ret_code;
int ret;
- struct ltt_kernel_event *event;
struct ltt_kernel_channel *kchan;
assert(reply_payload);
}
/* Kernel channels */
- cds_list_for_each_entry (event, &kchan->events_list.head, list) {
+ for (auto event :
+ lttng::urcu::list_iteration_adapter<ltt_kernel_event, <t_kernel_event::list>(
+ kchan->events_list.head)) {
struct lttng_event *tmp_event = lttng_event_create();
if (!tmp_event) {
* Add URI so the consumer output object. Set the correct path depending on the
* domain adding the default trace directory.
*/
-static enum lttng_error_code add_uri_to_consumer(const struct ltt_session *session,
+static enum lttng_error_code add_uri_to_consumer(const ltt_session::locked_ref& session,
struct consumer_output *consumer,
struct lttng_uri *uri,
enum lttng_domain_type domain)
static int init_kernel_tracing(struct ltt_kernel_session *session)
{
int ret = 0;
- struct lttng_ht_iter iter;
- struct consumer_socket *socket;
LTTNG_ASSERT(session);
if (session->consumer_fds_sent == 0 && session->consumer != nullptr) {
- lttng::urcu::read_lock_guard read_lock;
-
- cds_lfht_for_each_entry (
- session->consumer->socks->ht, &iter.iter, socket, node.node) {
+ for (auto *socket :
+ lttng::urcu::lfht_iteration_adapter<consumer_socket,
+ decltype(consumer_socket::node),
+ &consumer_socket::node>(
+ *session->consumer->socks->ht)) {
pthread_mutex_lock(socket->lock);
ret = kernel_consumer_send_session(socket, session);
pthread_mutex_unlock(socket->lock);
* the relayd and send them to the right domain consumer. Consumer type MUST be
* network.
*/
-int cmd_setup_relayd(struct ltt_session *session)
+int cmd_setup_relayd(const ltt_session::locked_ref& session)
{
int ret = LTTNG_OK;
struct ltt_ust_session *usess;
struct ltt_kernel_session *ksess;
- struct consumer_socket *socket;
- struct lttng_ht_iter iter;
LTTNG_OPTIONAL(uint64_t) current_chunk_id = {};
- LTTNG_ASSERT(session);
-
usess = session->ust_session;
ksess = session->kernel_session;
DBG("Setting relayd for session %s", session->name);
if (session->current_trace_chunk) {
- enum lttng_trace_chunk_status status = lttng_trace_chunk_get_id(
+ const lttng_trace_chunk_status status = lttng_trace_chunk_get_id(
session->current_trace_chunk, ¤t_chunk_id.value);
if (status == LTTNG_TRACE_CHUNK_STATUS_OK) {
if (usess && usess->consumer && usess->consumer->type == CONSUMER_DST_NET &&
usess->consumer->enabled) {
/* For each consumer socket, send relayd sockets */
- lttng::urcu::read_lock_guard read_lock;
-
- cds_lfht_for_each_entry (
- usess->consumer->socks->ht, &iter.iter, socket, node.node) {
+ for (auto *socket :
+ lttng::urcu::lfht_iteration_adapter<consumer_socket,
+ decltype(consumer_socket::node),
+ &consumer_socket::node>(
+ *usess->consumer->socks->ht)) {
pthread_mutex_lock(socket->lock);
ret = send_consumer_relayd_sockets(
session->id,
if (ksess && ksess->consumer && ksess->consumer->type == CONSUMER_DST_NET &&
ksess->consumer->enabled) {
- lttng::urcu::read_lock_guard read_lock;
+ const lttng::urcu::read_lock_guard read_lock;
- cds_lfht_for_each_entry (
- ksess->consumer->socks->ht, &iter.iter, socket, node.node) {
+ for (auto *socket :
+ lttng::urcu::lfht_iteration_adapter<consumer_socket,
+ decltype(consumer_socket::node),
+ &consumer_socket::node>(
+ *ksess->consumer->socks->ht)) {
pthread_mutex_lock(socket->lock);
ret = send_consumer_relayd_sockets(
session->id,
int start_kernel_session(struct ltt_kernel_session *ksess)
{
int ret;
- struct ltt_kernel_channel *kchan;
/* Open kernel metadata */
if (ksess->metadata == nullptr && ksess->output_traces) {
}
/* For each channel */
- cds_list_for_each_entry (kchan, &ksess->channel_list.head, list) {
+ for (auto kchan :
+ lttng::urcu::list_iteration_adapter<ltt_kernel_channel, <t_kernel_channel::list>(
+ ksess->channel_list.head)) {
if (kchan->stream_count == 0) {
ret = kernel_open_channel_stream(kchan);
if (ret < 0) {
int stop_kernel_session(struct ltt_kernel_session *ksess)
{
- struct ltt_kernel_channel *kchan;
bool error_occurred = false;
int ret;
}
/* Flush all buffers after stopping */
- cds_list_for_each_entry (kchan, &ksess->channel_list.head, list) {
+ for (auto kchan :
+ lttng::urcu::list_iteration_adapter<ltt_kernel_channel, <t_kernel_channel::list>(
+ ksess->channel_list.head)) {
ret = kernel_flush_buffer(kchan);
if (ret < 0) {
ERR("Kernel flush buffer error");
/*
* Command LTTNG_DISABLE_CHANNEL processed by the client thread.
*/
-int cmd_disable_channel(struct ltt_session *session,
+int cmd_disable_channel(const ltt_session::locked_ref& session,
enum lttng_domain_type domain,
char *channel_name)
{
usess = session->ust_session;
- lttng::urcu::read_lock_guard read_lock;
+ const lttng::urcu::read_lock_guard read_lock;
switch (domain) {
case LTTNG_DOMAIN_KERNEL:
*
* The wpipe arguments is used as a notifier for the kernel thread.
*/
-int cmd_enable_channel(struct command_ctx *cmd_ctx, int sock, int wpipe)
+int cmd_enable_channel(command_ctx *cmd_ctx, ltt_session::locked_ref& session, int sock, int wpipe)
{
int ret;
size_t channel_len;
goto end;
}
- ret = cmd_enable_channel_internal(cmd_ctx->session, &command_domain, channel, wpipe);
+ ret = cmd_enable_channel_internal(session, &command_domain, channel, wpipe);
end:
lttng_dynamic_buffer_reset(&channel_buffer);
return ret;
}
-static enum lttng_error_code cmd_enable_channel_internal(struct ltt_session *session,
+static enum lttng_error_code cmd_enable_channel_internal(ltt_session::locked_ref& session,
const struct lttng_domain *domain,
const struct lttng_channel *_attr,
int wpipe)
size_t len;
struct lttng_channel *attr = nullptr;
- LTTNG_ASSERT(session);
LTTNG_ASSERT(_attr);
LTTNG_ASSERT(domain);
- lttng::urcu::read_lock_guard read_lock;
+ const lttng::urcu::read_lock_guard read_lock;
attr = lttng_channel_copy(_attr);
if (!attr) {
break;
case LTTNG_DOMAIN_JUL:
case LTTNG_DOMAIN_LOG4J:
+ case LTTNG_DOMAIN_LOG4J2:
case LTTNG_DOMAIN_PYTHON:
if (!agent_tracing_is_enabled()) {
DBG("Attempted to enable a channel in an agent domain but the agent thread is not running");
case LTTNG_DOMAIN_UST:
case LTTNG_DOMAIN_JUL:
case LTTNG_DOMAIN_LOG4J:
+ case LTTNG_DOMAIN_LOG4J2:
case LTTNG_DOMAIN_PYTHON:
{
struct ltt_ust_channel *uchan;
ret_code = LTTNG_ERR_INVALID_CHANNEL_NAME;
goto error;
}
+ } else if (domain->type == LTTNG_DOMAIN_LOG4J2) {
+ if (strncmp(attr->name,
+ DEFAULT_LOG4J2_CHANNEL_NAME,
+ LTTNG_SYMBOL_NAME_LEN - 1) != 0) {
+ ret_code = LTTNG_ERR_INVALID_CHANNEL_NAME;
+ goto error;
+ }
} else if (domain->type == LTTNG_DOMAIN_PYTHON) {
if (strncmp(attr->name,
DEFAULT_PYTHON_CHANNEL_NAME,
}
enum lttng_error_code
-cmd_process_attr_tracker_get_tracking_policy(struct ltt_session *session,
+cmd_process_attr_tracker_get_tracking_policy(const ltt_session::locked_ref& session,
enum lttng_domain_type domain,
enum lttng_process_attr process_attr,
enum lttng_tracking_policy *policy)
}
enum lttng_error_code
-cmd_process_attr_tracker_set_tracking_policy(struct ltt_session *session,
+cmd_process_attr_tracker_set_tracking_policy(const ltt_session::locked_ref& session,
enum lttng_domain_type domain,
enum lttng_process_attr process_attr,
enum lttng_tracking_policy policy)
}
enum lttng_error_code
-cmd_process_attr_tracker_inclusion_set_add_value(struct ltt_session *session,
+cmd_process_attr_tracker_inclusion_set_add_value(const ltt_session::locked_ref& session,
enum lttng_domain_type domain,
enum lttng_process_attr process_attr,
const struct process_attr_value *value)
}
enum lttng_error_code
-cmd_process_attr_tracker_inclusion_set_remove_value(struct ltt_session *session,
+cmd_process_attr_tracker_inclusion_set_remove_value(const ltt_session::locked_ref& session,
enum lttng_domain_type domain,
enum lttng_process_attr process_attr,
const struct process_attr_value *value)
}
enum lttng_error_code
-cmd_process_attr_tracker_get_inclusion_set(struct ltt_session *session,
+cmd_process_attr_tracker_get_inclusion_set(const ltt_session::locked_ref& session,
enum lttng_domain_type domain,
enum lttng_process_attr process_attr,
struct lttng_process_attr_values **values)
* Command LTTNG_DISABLE_EVENT processed by the client thread.
*/
int cmd_disable_event(struct command_ctx *cmd_ctx,
+ ltt_session::locked_ref& locked_session,
struct lttng_event *event,
char *filter_expression,
struct lttng_bytecode *bytecode,
struct lttng_event_exclusion *exclusion)
{
int ret;
+ const ltt_session& session = *locked_session;
const char *event_name;
- const struct ltt_session *session = cmd_ctx->session;
const char *channel_name = cmd_ctx->lsm.u.disable.channel_name;
const enum lttng_domain_type domain = cmd_ctx->lsm.domain.type;
event_name = event->name;
- lttng::urcu::read_lock_guard read_lock;
+ const lttng::urcu::read_lock_guard read_lock;
/* Error out on unhandled search criteria */
if (event->loglevel_type || event->loglevel != -1 || event->enabled || event->pid ||
struct ltt_kernel_channel *kchan;
struct ltt_kernel_session *ksess;
- ksess = session->kernel_session;
+ ksess = session.kernel_session;
/*
* If a non-default channel has been created in the
struct ltt_ust_channel *uchan;
struct ltt_ust_session *usess;
- usess = session->ust_session;
+ usess = session.ust_session;
if (validate_ust_event_name(event_name)) {
ret = LTTNG_ERR_INVALID_EVENT_NAME;
break;
}
case LTTNG_DOMAIN_LOG4J:
+ case LTTNG_DOMAIN_LOG4J2:
case LTTNG_DOMAIN_JUL:
case LTTNG_DOMAIN_PYTHON:
{
struct agent *agt;
- struct ltt_ust_session *usess = session->ust_session;
+ struct ltt_ust_session *usess = session.ust_session;
LTTNG_ASSERT(usess);
* Command LTTNG_ADD_CONTEXT processed by the client thread.
*/
int cmd_add_context(struct command_ctx *cmd_ctx,
+ ltt_session::locked_ref& locked_session,
const struct lttng_event_context *event_context,
int kwpipe)
{
int ret, chan_kern_created = 0, chan_ust_created = 0;
const enum lttng_domain_type domain = cmd_ctx->lsm.domain.type;
- const struct ltt_session *session = cmd_ctx->session;
+ const struct ltt_session& session = *locked_session;
const char *channel_name = cmd_ctx->lsm.u.context.channel_name;
/*
* some point in time before. The tracer does not allow it and would
* result in a corrupted trace.
*/
- if (cmd_ctx->session->has_been_started) {
+ if (session.has_been_started) {
ret = LTTNG_ERR_TRACE_ALREADY_STARTED;
goto end;
}
switch (domain) {
case LTTNG_DOMAIN_KERNEL:
- LTTNG_ASSERT(session->kernel_session);
+ LTTNG_ASSERT(session.kernel_session);
- if (session->kernel_session->channel_count == 0) {
+ if (session.kernel_session->channel_count == 0) {
/* Create default channel */
- ret = channel_kernel_create(session->kernel_session, nullptr, kwpipe);
+ ret = channel_kernel_create(session.kernel_session, nullptr, kwpipe);
if (ret != LTTNG_OK) {
goto error;
}
chan_kern_created = 1;
}
/* Add kernel context to kernel tracer */
- ret = context_kernel_add(session->kernel_session, event_context, channel_name);
+ ret = context_kernel_add(session.kernel_session, event_context, channel_name);
if (ret != LTTNG_OK) {
goto error;
}
break;
case LTTNG_DOMAIN_JUL:
case LTTNG_DOMAIN_LOG4J:
+ case LTTNG_DOMAIN_LOG4J2:
{
/*
* Validate channel name.
strcmp(channel_name, DEFAULT_LOG4J_CHANNEL_NAME) != 0) {
ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
goto error;
+ } else if (domain == LTTNG_DOMAIN_LOG4J2 && *channel_name &&
+ strcmp(channel_name, DEFAULT_LOG4J2_CHANNEL_NAME) != 0) {
+ ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
+ goto error;
}
}
/* fall through */
case LTTNG_DOMAIN_UST:
{
- struct ltt_ust_session *usess = session->ust_session;
+ struct ltt_ust_session *usess = session.ust_session;
unsigned int chan_count;
LTTNG_ASSERT(usess);
error:
if (chan_kern_created) {
struct ltt_kernel_channel *kchan = trace_kernel_get_channel_by_name(
- DEFAULT_CHANNEL_NAME, session->kernel_session);
+ DEFAULT_CHANNEL_NAME, session.kernel_session);
/* Created previously, this should NOT fail. */
LTTNG_ASSERT(kchan);
kernel_destroy_channel(kchan);
if (chan_ust_created) {
struct ltt_ust_channel *uchan = trace_ust_find_channel_by_name(
- session->ust_session->domain_global.channels, DEFAULT_CHANNEL_NAME);
+ session.ust_session->domain_global.channels, DEFAULT_CHANNEL_NAME);
/* Created previously, this should NOT fail. */
LTTNG_ASSERT(uchan);
/* Remove from the channel list of the session. */
- trace_ust_delete_channel(session->ust_session->domain_global.channels, uchan);
+ trace_ust_delete_channel(session.ust_session->domain_global.channels, uchan);
trace_ust_destroy_channel(uchan);
}
end:
*/
if (name_starts_with(name, DEFAULT_JUL_EVENT_COMPONENT) ||
name_starts_with(name, DEFAULT_LOG4J_EVENT_COMPONENT) ||
+ name_starts_with(name, DEFAULT_LOG4J2_EVENT_COMPONENT) ||
name_starts_with(name, DEFAULT_PYTHON_EVENT_COMPONENT)) {
ret = -1;
}
* be hidden from clients. Such events are used in the agent implementation to
* enable the events through which all "agent" events are funeled.
*/
-static int _cmd_enable_event(struct ltt_session *session,
+static int _cmd_enable_event(ltt_session::locked_ref& locked_session,
const struct lttng_domain *domain,
char *channel_name,
struct lttng_event *event,
{
int ret = 0, channel_created = 0;
struct lttng_channel *attr = nullptr;
+ const ltt_session& session = *locked_session;
- LTTNG_ASSERT(session);
LTTNG_ASSERT(event);
LTTNG_ASSERT(channel_name);
}
}
- DBG("Enable event command for event \'%s\'", event->name);
-
- lttng::urcu::read_lock_guard read_lock;
+ const lttng::urcu::read_lock_guard read_lock;
switch (domain->type) {
case LTTNG_DOMAIN_KERNEL:
* session, explicitely require that -c chan_name needs
* to be provided.
*/
- if (session->kernel_session->has_non_default_channel && channel_name[0] == '\0') {
+ if (session.kernel_session->has_non_default_channel && channel_name[0] == '\0') {
ret = LTTNG_ERR_NEED_CHANNEL_NAME;
goto error;
}
- kchan = trace_kernel_get_channel_by_name(channel_name, session->kernel_session);
+ kchan = trace_kernel_get_channel_by_name(channel_name, session.kernel_session);
if (kchan == nullptr) {
attr = channel_new_default_attr(LTTNG_DOMAIN_KERNEL, LTTNG_BUFFER_GLOBAL);
if (attr == nullptr) {
goto error;
}
- ret = cmd_enable_channel_internal(session, domain, attr, wpipe);
+ ret = cmd_enable_channel_internal(locked_session, domain, attr, wpipe);
if (ret != LTTNG_OK) {
goto error;
}
}
/* Get the newly created kernel channel pointer */
- kchan = trace_kernel_get_channel_by_name(channel_name, session->kernel_session);
+ kchan = trace_kernel_get_channel_by_name(channel_name, session.kernel_session);
if (kchan == nullptr) {
/* This sould not happen... */
ret = LTTNG_ERR_FATAL;
case LTTNG_DOMAIN_UST:
{
struct ltt_ust_channel *uchan;
- struct ltt_ust_session *usess = session->ust_session;
+ struct ltt_ust_session *usess = session.ust_session;
LTTNG_ASSERT(usess);
goto error;
}
- ret = cmd_enable_channel_internal(session, domain, attr, wpipe);
+ ret = cmd_enable_channel_internal(locked_session, domain, attr, wpipe);
if (ret != LTTNG_OK) {
goto error;
}
break;
}
case LTTNG_DOMAIN_LOG4J:
+ case LTTNG_DOMAIN_LOG4J2:
case LTTNG_DOMAIN_JUL:
case LTTNG_DOMAIN_PYTHON:
{
struct agent *agt;
struct lttng_event uevent;
struct lttng_domain tmp_dom;
- struct ltt_ust_session *usess = session->ust_session;
+ struct ltt_ust_session *usess = session.ust_session;
LTTNG_ASSERT(usess);
memset(&uevent, 0, sizeof(uevent));
uevent.type = LTTNG_EVENT_TRACEPOINT;
uevent.loglevel_type = LTTNG_EVENT_LOGLEVEL_ALL;
+ uevent.loglevel = -1;
default_event_name = event_get_default_agent_ust_name(domain->type);
if (!default_event_name) {
ret = LTTNG_ERR_FATAL;
case LTTNG_DOMAIN_LOG4J:
default_chan_name = DEFAULT_LOG4J_CHANNEL_NAME;
break;
+ case LTTNG_DOMAIN_LOG4J2:
+ default_chan_name = DEFAULT_LOG4J2_CHANNEL_NAME;
+ break;
case LTTNG_DOMAIN_JUL:
default_chan_name = DEFAULT_JUL_CHANNEL_NAME;
break;
}
}
- ret = cmd_enable_event_internal(session,
+ ret = cmd_enable_event_internal(locked_session,
&tmp_dom,
(char *) default_chan_name,
&uevent,
* We own filter, exclusion, and filter_expression.
*/
int cmd_enable_event(struct command_ctx *cmd_ctx,
+ ltt_session::locked_ref& locked_session,
struct lttng_event *event,
char *filter_expression,
struct lttng_event_exclusion *exclusion,
* - bytecode,
* - exclusion
*/
- ret = _cmd_enable_event(cmd_ctx->session,
+ ret = _cmd_enable_event(locked_session,
&command_domain,
cmd_ctx->lsm.u.enable.channel_name,
event,
* never be made visible to clients and are immune to checks such as
* reserved names.
*/
-static int cmd_enable_event_internal(struct ltt_session *session,
+static int cmd_enable_event_internal(ltt_session::locked_ref& locked_session,
const struct lttng_domain *domain,
char *channel_name,
struct lttng_event *event,
struct lttng_event_exclusion *exclusion,
int wpipe)
{
- return _cmd_enable_event(session,
+ return _cmd_enable_event(locked_session,
domain,
channel_name,
event,
}
break;
case LTTNG_DOMAIN_LOG4J:
+ case LTTNG_DOMAIN_LOG4J2:
case LTTNG_DOMAIN_JUL:
case LTTNG_DOMAIN_PYTHON:
nb_events = agent_list_events(&events, domain);
/*
* Command LTTNG_START_TRACE processed by the client thread.
- *
- * Called with session mutex held.
*/
-int cmd_start_trace(struct ltt_session *session)
+int cmd_start_trace(const ltt_session::locked_ref& session)
{
enum lttng_error_code ret;
unsigned long nb_chan = 0;
const bool session_rotated_after_last_stop = session->rotated_after_last_stop;
const bool session_cleared_after_last_stop = session->cleared_after_last_stop;
- LTTNG_ASSERT(session);
-
/* Ease our life a bit ;) */
ksession = session->kernel_session;
usess = session->ust_session;
/* Flag session that trace should start automatically */
if (usess) {
- int int_ret = ust_app_start_trace_all(usess);
+ const int int_ret = ust_app_start_trace_all(usess);
if (int_ret < 0) {
ret = LTTNG_ERR_UST_START_FAIL;
session->rotated_after_last_stop = false;
if (session->rotate_timer_period && !session->rotation_schedule_timer_enabled) {
- int int_ret = timer_session_rotation_schedule_timer_start(
+ const int int_ret = timer_session_rotation_schedule_timer_start(
session, session->rotate_timer_period);
if (int_ret < 0) {
/*
* Command LTTNG_STOP_TRACE processed by the client thread.
*/
-int cmd_stop_trace(struct ltt_session *session)
+int cmd_stop_trace(const ltt_session::locked_ref& session)
{
int ret;
struct ltt_kernel_session *ksession;
struct ltt_ust_session *usess;
- LTTNG_ASSERT(session);
-
DBG("Begin stop session \"%s\" (id %" PRIu64 ")", session->name, session->id);
/* Short cut */
ksession = session->kernel_session;
* Set the base_path of the session only if subdir of a control uris is set.
* Return LTTNG_OK on success, otherwise LTTNG_ERR_*.
*/
-static int
-set_session_base_path_from_uris(struct ltt_session *session, size_t nb_uri, struct lttng_uri *uris)
+static int set_session_base_path_from_uris(const ltt_session::locked_ref& session,
+ size_t nb_uri,
+ struct lttng_uri *uris)
{
int ret;
size_t i;
/*
* Command LTTNG_SET_CONSUMER_URI processed by the client thread.
*/
-int cmd_set_consumer_uri(struct ltt_session *session, size_t nb_uri, struct lttng_uri *uris)
+int cmd_set_consumer_uri(const ltt_session::locked_ref& session,
+ size_t nb_uri,
+ struct lttng_uri *uris)
{
int ret, i;
struct ltt_kernel_session *ksess = session->kernel_session;
struct ltt_ust_session *usess = session->ust_session;
- LTTNG_ASSERT(session);
LTTNG_ASSERT(uris);
LTTNG_ASSERT(nb_uri > 0);
}
static enum lttng_error_code
-set_session_output_from_descriptor(struct ltt_session *session,
+set_session_output_from_descriptor(const ltt_session::locked_ref& session,
const struct lttng_session_descriptor *descriptor)
{
int ret;
enum lttng_error_code ret_code = LTTNG_OK;
- enum lttng_session_descriptor_type session_type =
+ const lttng_session_descriptor_type session_type =
lttng_session_descriptor_get_type(descriptor);
- enum lttng_session_descriptor_output_type output_type =
+ const lttng_session_descriptor_output_type output_type =
lttng_session_descriptor_get_output_type(descriptor);
struct lttng_uri uris[2] = {};
size_t uri_count = 0;
struct ltt_session *new_session = nullptr;
enum lttng_session_descriptor_status descriptor_status;
- session_lock_list();
+ const auto list_lock = lttng::sessiond::lock_session_list();
if (home_path) {
if (*home_path != '/') {
ERR("Home path provided by client is not absolute");
/* Announce the session's destruction to the notification thread when it is destroyed. */
ret = session_add_destroy_notifier(
- new_session,
- [](const struct ltt_session *session, void *user_data __attribute__((unused))) {
+ [new_session]() {
+ session_get(new_session);
+ new_session->lock();
+ return ltt_session::make_locked_ref(*new_session);
+ }(),
+ [](const ltt_session::locked_ref& session,
+ void *user_data __attribute__((unused))) {
(void) notification_thread_command_remove_session(
the_notification_thread_handle, session->id);
},
break;
}
- ret_code = set_session_output_from_descriptor(new_session, descriptor);
+ ret_code = set_session_output_from_descriptor(
+ [new_session]() {
+ session_get(new_session);
+ new_session->lock();
+ return ltt_session::make_locked_ref(*new_session);
+ }(),
+ descriptor);
if (ret_code != LTTNG_OK) {
goto end;
}
/* Release the global reference on error. */
session_destroy(new_session);
}
- session_unlock_list();
+
return ret_code;
}
return ret_code;
}
-static void cmd_destroy_session_reply(const struct ltt_session *session, void *_reply_context)
+static void cmd_destroy_session_reply(const ltt_session::locked_ref& session, void *_reply_context)
{
int ret;
ssize_t comm_ret;
*
* Called with session lock held.
*/
-int cmd_destroy_session(struct ltt_session *session,
- struct notification_thread_handle *notification_thread_handle,
- int *sock_fd)
+int cmd_destroy_session(const ltt_session::locked_ref& session, int *sock_fd)
{
int ret;
enum lttng_error_code destruction_last_error = LTTNG_OK;
reply_context->reply_sock_fd = *sock_fd;
}
- /* Safety net */
- LTTNG_ASSERT(session);
-
DBG("Begin destroy session %s (id %" PRIu64 ")", session->name, session->id);
if (session->active) {
DBG("Session \"%s\" is active, attempting to stop it before destroying it",
}
if (session->rotate_size) {
- unsubscribe_session_consumed_size_rotation(session, notification_thread_handle);
+ try {
+ the_rotation_thread_handle->unsubscribe_session_consumed_size_rotation(
+ *session);
+ } catch (const std::exception& e) {
+ /* Continue the destruction of the session anyway. */
+ ERR("Failed to unsubscribe rotation thread notification channel from consumed size condition during session destruction: %s",
+ e.what());
+ }
+
session->rotate_size = 0;
}
* still holds a reference to the session, thus delaying its destruction
* _at least_ up to the point when that reference is released.
*/
- session_destroy(session);
+ session_destroy(&session.get());
if (reply_context) {
reply_context->destruction_status = destruction_last_error;
ret = session_add_destroy_notifier(
/*
* Command LTTNG_REGISTER_CONSUMER processed by the client thread.
*/
-int cmd_register_consumer(struct ltt_session *session,
+int cmd_register_consumer(const ltt_session::locked_ref& session,
enum lttng_domain_type domain,
const char *sock_path,
struct consumer_data *cdata)
int ret, sock;
struct consumer_socket *socket = nullptr;
- LTTNG_ASSERT(session);
LTTNG_ASSERT(cdata);
LTTNG_ASSERT(sock_path);
pthread_mutex_init(socket->lock, nullptr);
socket->registered = 1;
- lttng::urcu::read_lock_guard read_lock;
+ const lttng::urcu::read_lock_guard read_lock;
consumer_add_socket(socket, ksess->consumer);
pthread_mutex_lock(&cdata->pid_mutex);
/*
* Command LTTNG_LIST_DOMAINS processed by the client thread.
*/
-ssize_t cmd_list_domains(struct ltt_session *session, struct lttng_domain **domains)
+ssize_t cmd_list_domains(const ltt_session::locked_ref& session, struct lttng_domain **domains)
{
int ret, index = 0;
ssize_t nb_dom = 0;
- struct agent *agt;
- struct lttng_ht_iter iter;
if (session->kernel_session != nullptr) {
DBG3("Listing domains found kernel domain");
DBG3("Listing domains found UST global domain");
nb_dom++;
- lttng::urcu::read_lock_guard read_lock;
-
- cds_lfht_for_each_entry (
- session->ust_session->agents->ht, &iter.iter, agt, node.node) {
+ for (auto *agt :
+ lttng::urcu::lfht_iteration_adapter<agent, decltype(agent::node), &agent::node>(
+ *session->ust_session->agents->ht)) {
if (agt->being_used) {
nb_dom++;
}
index++;
{
- lttng::urcu::read_lock_guard read_lock;
+ const lttng::urcu::read_lock_guard read_lock;
- cds_lfht_for_each_entry (
- session->ust_session->agents->ht, &iter.iter, agt, node.node) {
+ for (auto *agt : lttng::urcu::lfht_iteration_adapter<agent,
+ decltype(agent::node),
+ &agent::node>(
+ *session->ust_session->agents->ht)) {
if (agt->being_used) {
(*domains)[index].type = agt->domain;
(*domains)[index].buf_type =
* Command LTTNG_LIST_CHANNELS processed by the client thread.
*/
enum lttng_error_code cmd_list_channels(enum lttng_domain_type domain,
- struct ltt_session *session,
+ const ltt_session::locked_ref& session,
struct lttng_payload *payload)
{
int ret = 0;
size_t cmd_header_offset;
enum lttng_error_code ret_code;
- assert(session);
- assert(payload);
+ LTTNG_ASSERT(payload);
DBG("Listing channels for session %s", session->name);
case LTTNG_DOMAIN_KERNEL:
{
/* Kernel channels */
- struct ltt_kernel_channel *kchan;
if (session->kernel_session != nullptr) {
- cds_list_for_each_entry (
- kchan, &session->kernel_session->channel_list.head, list) {
+ for (auto kchan :
+ lttng::urcu::list_iteration_adapter<ltt_kernel_channel,
+ <t_kernel_channel::list>(
+ session->kernel_session->channel_list.head)) {
uint64_t discarded_events, lost_packets;
struct lttng_channel_extended *extended;
}
case LTTNG_DOMAIN_UST:
{
- struct lttng_ht_iter iter;
- struct ltt_ust_channel *uchan;
-
- {
- lttng::urcu::read_lock_guard read_lock;
-
- cds_lfht_for_each_entry (session->ust_session->domain_global.channels->ht,
- &iter.iter,
- uchan,
- node.node) {
- uint64_t discarded_events = 0, lost_packets = 0;
- struct lttng_channel *channel = nullptr;
- struct lttng_channel_extended *extended;
-
- channel = trace_ust_channel_to_lttng_channel(uchan);
- if (!channel) {
- ret_code = LTTNG_ERR_NOMEM;
- goto end;
- }
+ for (auto *uchan :
+ lttng::urcu::lfht_iteration_adapter<ltt_ust_channel,
+ decltype(ltt_ust_channel::node),
+ <t_ust_channel::node>(
+ *session->ust_session->domain_global.channels->ht)) {
+ uint64_t discarded_events = 0, lost_packets = 0;
+ struct lttng_channel *channel = nullptr;
+ struct lttng_channel_extended *extended;
+
+ channel = trace_ust_channel_to_lttng_channel(uchan);
+ if (!channel) {
+ ret_code = LTTNG_ERR_NOMEM;
+ goto end;
+ }
- extended = (struct lttng_channel_extended *)
- channel->attr.extended.ptr;
+ extended = (struct lttng_channel_extended *) channel->attr.extended.ptr;
- ret = get_ust_runtime_stats(
- session, uchan, &discarded_events, &lost_packets);
- if (ret < 0) {
- lttng_channel_destroy(channel);
- ret_code = LTTNG_ERR_UNK;
- goto end;
- }
-
- extended->discarded_events = discarded_events;
- extended->lost_packets = lost_packets;
+ ret = get_ust_runtime_stats(
+ session, uchan, &discarded_events, &lost_packets);
+ if (ret < 0) {
+ lttng_channel_destroy(channel);
+ ret_code = LTTNG_ERR_UNK;
+ goto end;
+ }
- ret = lttng_channel_serialize(channel, &payload->buffer);
- if (ret) {
- ERR("Failed to serialize lttng_channel: channel name = '%s'",
- channel->name);
- lttng_channel_destroy(channel);
- ret_code = LTTNG_ERR_UNK;
- goto end;
- }
+ extended->discarded_events = discarded_events;
+ extended->lost_packets = lost_packets;
+ ret = lttng_channel_serialize(channel, &payload->buffer);
+ if (ret) {
+ ERR("Failed to serialize lttng_channel: channel name = '%s'",
+ channel->name);
lttng_channel_destroy(channel);
- i++;
+ ret_code = LTTNG_ERR_UNK;
+ goto end;
}
+
+ lttng_channel_destroy(channel);
+ i++;
}
break;
* Command LTTNG_LIST_EVENTS processed by the client thread.
*/
enum lttng_error_code cmd_list_events(enum lttng_domain_type domain,
- struct ltt_session *session,
+ const ltt_session::locked_ref& session,
char *channel_name,
struct lttng_payload *reply_payload)
{
break;
}
case LTTNG_DOMAIN_LOG4J:
+ case LTTNG_DOMAIN_LOG4J2:
case LTTNG_DOMAIN_JUL:
case LTTNG_DOMAIN_PYTHON:
if (session->ust_session) {
- struct lttng_ht_iter iter;
- struct agent *agt;
-
- lttng::urcu::read_lock_guard read_lock;
-
- cds_lfht_for_each_entry (
- session->ust_session->agents->ht, &iter.iter, agt, node.node) {
+ for (auto *agt : lttng::urcu::lfht_iteration_adapter<agent,
+ decltype(agent::node),
+ &agent::node>(
+ *session->ust_session->agents->ht)) {
if (agt->domain == domain) {
ret_code = list_lttng_agent_events(
agt, reply_payload, &nb_events);
* Using the session list, filled a lttng_session array to send back to the
* client for session listing.
*
- * The session list lock MUST be acquired before calling this function. Use
- * session_lock_list() and session_unlock_list().
+ * The session list lock MUST be acquired before calling this function.
*/
void cmd_list_lttng_sessions(struct lttng_session *sessions,
size_t session_count,
{
int ret;
unsigned int i = 0;
- struct ltt_session *session;
struct ltt_session_list *list = session_get_list();
struct lttng_session_extended *extended = (typeof(extended)) (&sessions[session_count]);
* Iterate over session list and append data after the control struct in
* the buffer.
*/
- cds_list_for_each_entry (session, &list->head, list) {
- if (!session_get(session)) {
- continue;
- }
+ for (auto raw_session_ptr :
+ lttng::urcu::list_iteration_adapter<ltt_session, <t_session::list>(list->head)) {
+ auto session = [raw_session_ptr]() {
+ session_get(raw_session_ptr);
+ raw_session_ptr->lock();
+ return ltt_session::make_locked_ref(*raw_session_ptr);
+ }();
+
/*
* Only list the sessions the user can control.
*/
if (!session_access_ok(session, uid) || session->destroyed) {
- session_put(session);
continue;
}
}
if (ret < 0) {
PERROR("snprintf session path");
- session_put(session);
continue;
}
extended[i].creation_time.value = (uint64_t) session->creation_time;
extended[i].creation_time.is_set = 1;
i++;
- session_put(session);
}
}
+/*
+ * Command LTTCOMM_SESSIOND_COMMAND_KERNEL_TRACER_STATUS
+ */
+enum lttng_error_code cmd_kernel_tracer_status(enum lttng_kernel_tracer_status *status)
+{
+ if (status == nullptr) {
+ return LTTNG_ERR_INVALID;
+ }
+
+ *status = get_kernel_tracer_status();
+ return LTTNG_OK;
+}
+
/*
* Command LTTNG_DATA_PENDING returning 0 if the data is NOT pending meaning
* ready for trace analysis (or any kind of reader) or else 1 for pending data.
*/
-int cmd_data_pending(struct ltt_session *session)
+int cmd_data_pending(const ltt_session::locked_ref& session)
{
int ret;
struct ltt_kernel_session *ksess = session->kernel_session;
struct ltt_ust_session *usess = session->ust_session;
- LTTNG_ASSERT(session);
-
DBG("Data pending for session %s", session->name);
/* Session MUST be stopped to ask for data availability. */
*
* Return LTTNG_OK on success or else a LTTNG_ERR code.
*/
-int cmd_snapshot_add_output(struct ltt_session *session,
+int cmd_snapshot_add_output(const ltt_session::locked_ref& session,
const struct lttng_snapshot_output *output,
uint32_t *id)
{
int ret;
struct snapshot_output *new_output;
- LTTNG_ASSERT(session);
LTTNG_ASSERT(output);
DBG("Cmd snapshot add output for session %s", session->name);
*
* Return LTTNG_OK on success or else a LTTNG_ERR code.
*/
-int cmd_snapshot_del_output(struct ltt_session *session, const struct lttng_snapshot_output *output)
+int cmd_snapshot_del_output(const ltt_session::locked_ref& session,
+ const struct lttng_snapshot_output *output)
{
int ret;
struct snapshot_output *sout = nullptr;
- LTTNG_ASSERT(session);
LTTNG_ASSERT(output);
- lttng::urcu::read_lock_guard read_lock;
+ const lttng::urcu::read_lock_guard read_lock;
/*
* Permission denied to create an output if the session is not
*
* Return the size of the newly allocated outputs or a negative LTTNG_ERR code.
*/
-ssize_t cmd_snapshot_list_outputs(struct ltt_session *session,
+ssize_t cmd_snapshot_list_outputs(const ltt_session::locked_ref& session,
struct lttng_snapshot_output **outputs)
{
int ret, idx = 0;
struct lttng_snapshot_output *list = nullptr;
- struct lttng_ht_iter iter;
- struct snapshot_output *output;
- LTTNG_ASSERT(session);
LTTNG_ASSERT(outputs);
DBG("Cmd snapshot list outputs for session %s", session->name);
}
/* Copy list from session to the new list object. */
- {
- lttng::urcu::read_lock_guard read_lock;
-
- cds_lfht_for_each_entry (
- session->snapshot.output_ht->ht, &iter.iter, output, node.node) {
- LTTNG_ASSERT(output->consumer);
- list[idx].id = output->id;
- list[idx].max_size = output->max_size;
- if (lttng_strncpy(list[idx].name, output->name, sizeof(list[idx].name))) {
+ for (auto *output : lttng::urcu::lfht_iteration_adapter<snapshot_output,
+ decltype(snapshot_output::node),
+ &snapshot_output::node>(
+ *session->snapshot.output_ht->ht)) {
+ LTTNG_ASSERT(output->consumer);
+ list[idx].id = output->id;
+ list[idx].max_size = output->max_size;
+ if (lttng_strncpy(list[idx].name, output->name, sizeof(list[idx].name))) {
+ ret = -LTTNG_ERR_INVALID;
+ goto error;
+ }
+
+ if (output->consumer->type == CONSUMER_DST_LOCAL) {
+ if (lttng_strncpy(list[idx].ctrl_url,
+ output->consumer->dst.session_root_path,
+ sizeof(list[idx].ctrl_url))) {
ret = -LTTNG_ERR_INVALID;
goto error;
}
-
- if (output->consumer->type == CONSUMER_DST_LOCAL) {
- if (lttng_strncpy(list[idx].ctrl_url,
- output->consumer->dst.session_root_path,
- sizeof(list[idx].ctrl_url))) {
- ret = -LTTNG_ERR_INVALID;
- goto error;
- }
- } else {
- /* Control URI. */
- ret = uri_to_str_url(&output->consumer->dst.net.control,
- list[idx].ctrl_url,
- sizeof(list[idx].ctrl_url));
- if (ret < 0) {
- ret = -LTTNG_ERR_NOMEM;
- goto error;
- }
-
- /* Data URI. */
- ret = uri_to_str_url(&output->consumer->dst.net.data,
- list[idx].data_url,
- sizeof(list[idx].data_url));
- if (ret < 0) {
- ret = -LTTNG_ERR_NOMEM;
- goto error;
- }
+ } else {
+ /* Control URI. */
+ ret = uri_to_str_url(&output->consumer->dst.net.control,
+ list[idx].ctrl_url,
+ sizeof(list[idx].ctrl_url));
+ if (ret < 0) {
+ ret = -LTTNG_ERR_NOMEM;
+ goto error;
}
- idx++;
+ /* Data URI. */
+ ret = uri_to_str_url(&output->consumer->dst.net.data,
+ list[idx].data_url,
+ sizeof(list[idx].data_url));
+ if (ret < 0) {
+ ret = -LTTNG_ERR_NOMEM;
+ goto error;
+ }
}
+
+ idx++;
}
*outputs = list;
*
* Return 0 if the metadata can be generated, a LTTNG_ERR code otherwise.
*/
-static int check_regenerate_metadata_support(struct ltt_session *session)
+static int check_regenerate_metadata_support(const ltt_session::locked_ref& session)
{
int ret;
- LTTNG_ASSERT(session);
-
if (session->live_timer != 0) {
ret = LTTNG_ERR_LIVE_SESSION;
goto end;
*
* Return LTTNG_OK on success or else a LTTNG_ERR code.
*/
-int cmd_regenerate_metadata(struct ltt_session *session)
+int cmd_regenerate_metadata(const ltt_session::locked_ref& session)
{
int ret;
- LTTNG_ASSERT(session);
-
ret = check_regenerate_metadata_support(session);
if (ret) {
goto end;
*
* Return LTTNG_OK on success or else a LTTNG_ERR code.
*/
-int cmd_regenerate_statedump(struct ltt_session *session)
+int cmd_regenerate_statedump(const ltt_session::locked_ref& session)
{
int ret;
- LTTNG_ASSERT(session);
-
if (!session->active) {
ret = LTTNG_ERR_SESSION_NOT_STARTED;
goto end;
trigger_status = lttng_trigger_get_name(trigger, &trigger_name);
trigger_name = trigger_status == LTTNG_TRIGGER_STATUS_OK ? trigger_name : "(anonymous)";
- session_lock_list();
+ const auto list_lock = lttng::sessiond::lock_session_list();
switch (trigger_domain) {
case LTTNG_DOMAIN_KERNEL:
{
(int) trigger_owner,
ret_code);
}
+
+ return ret_code;
}
break;
}
break;
case LTTNG_DOMAIN_JUL:
case LTTNG_DOMAIN_LOG4J:
+ case LTTNG_DOMAIN_LOG4J2:
case LTTNG_DOMAIN_PYTHON:
{
/* Agent domains. */
agt = agent_create(trigger_domain);
if (!agt) {
ret_code = LTTNG_ERR_NOMEM;
- goto end_unlock_session_list;
+ return ret_code;
}
agent_add(agt, the_trigger_agents_ht_by_domain);
ret_code = (lttng_error_code) trigger_agent_enable(trigger, agt);
if (ret_code != LTTNG_OK) {
- goto end_unlock_session_list;
+ return ret_code;
}
break;
abort();
}
- ret_code = LTTNG_OK;
-end_unlock_session_list:
- session_unlock_list();
- return ret_code;
+ return LTTNG_OK;
}
-enum lttng_error_code cmd_register_trigger(const struct lttng_credentials *cmd_creds,
- struct lttng_trigger *trigger,
- bool is_trigger_anonymous,
- struct notification_thread_handle *notification_thread,
- struct lttng_trigger **return_trigger)
+lttng::ctl::trigger cmd_register_trigger(const struct lttng_credentials *cmd_creds,
+ struct lttng_trigger *trigger,
+ bool is_trigger_anonymous,
+ struct notification_thread_handle *notification_thread)
{
enum lttng_error_code ret_code;
const char *trigger_name;
*/
if (!lttng_credentials_is_equal_uid(lttng_trigger_get_credentials(trigger), cmd_creds)) {
if (lttng_credentials_get_uid(cmd_creds) != 0) {
- ERR("Trigger credentials do not match the command credentials: trigger name = '%s', trigger owner uid = %d, command creds uid = %d",
- trigger_name,
- (int) trigger_owner,
- (int) lttng_credentials_get_uid(cmd_creds));
- ret_code = LTTNG_ERR_INVALID_TRIGGER;
- goto end;
+ LTTNG_THROW_CTL(
+ fmt::format(
+ "Trigger credentials do not match the command credentials: trigger_name = `{}`, trigger_owner_uid={}, command_creds_uid={}",
+ trigger_name,
+ trigger_owner,
+ lttng_credentials_get_uid(cmd_creds)),
+ LTTNG_ERR_INVALID_TRIGGER);
}
}
*/
ret_code = lttng_trigger_generate_bytecode(trigger, cmd_creds);
if (ret_code != LTTNG_OK) {
- ERR("Failed to generate bytecode of trigger: trigger name = '%s', trigger owner uid = %d, error code = %d",
- trigger_name,
- (int) trigger_owner,
- ret_code);
- goto end;
+ LTTNG_THROW_CTL(
+ fmt::format(
+ "Failed to generate bytecode of trigger: trigger_name=`{}`, trigger_owner_uid={}",
+ trigger_name,
+ trigger_owner),
+ ret_code);
}
/*
ret_code = notification_thread_command_register_trigger(
notification_thread, trigger, is_trigger_anonymous);
if (ret_code != LTTNG_OK) {
- DBG("Failed to register trigger to notification thread: trigger name = '%s', trigger owner uid = %d, error code = %d",
- trigger_name,
- (int) trigger_owner,
- ret_code);
- goto end;
+ LTTNG_THROW_CTL(
+ fmt::format(
+ "Failed to register trigger to notification thread: trigger_name=`{}`, trigger_owner_uid={}",
+ trigger_name,
+ trigger_owner),
+ ret_code);
}
trigger_status = lttng_trigger_get_name(trigger, &trigger_name);
ret_code = synchronize_tracer_notifier_register(
notification_thread, trigger, cmd_creds);
if (ret_code != LTTNG_OK) {
- ERR("Error registering tracer notifier: %s", lttng_strerror(-ret_code));
- goto end;
+ LTTNG_THROW_CTL("Failed to register tracer notifier", ret_code);
}
}
* reference to the trigger so the caller doesn't have to care if those
* are distinct instances or not.
*/
- if (ret_code == LTTNG_OK) {
- lttng_trigger_get(trigger);
- *return_trigger = trigger;
- /* Ownership of trigger was transferred to caller. */
- trigger = nullptr;
- }
-end:
- return ret_code;
+ LTTNG_ASSERT(ret_code == LTTNG_OK);
+ lttng_trigger_get(trigger);
+ return lttng::ctl::trigger(trigger);
}
static enum lttng_error_code
LTTNG_ASSERT(lttng_condition_get_type(condition) ==
LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
- session_lock_list();
+ const auto list_lock = lttng::sessiond::lock_session_list();
switch (trigger_domain) {
case LTTNG_DOMAIN_KERNEL:
ret_code = kernel_unregister_event_notifier(trigger);
if (ret_code != LTTNG_OK) {
- goto end_unlock_session_list;
+ return ret_code;
}
break;
break;
case LTTNG_DOMAIN_JUL:
case LTTNG_DOMAIN_LOG4J:
+ case LTTNG_DOMAIN_LOG4J2:
case LTTNG_DOMAIN_PYTHON:
{
/* Agent domains. */
LTTNG_ASSERT(agt);
ret_code = (lttng_error_code) trigger_agent_disable(trigger, agt);
if (ret_code != LTTNG_OK) {
- goto end_unlock_session_list;
+ return ret_code;
}
break;
abort();
}
- ret_code = LTTNG_OK;
-
-end_unlock_session_list:
- session_unlock_list();
- return ret_code;
+ return LTTNG_OK;
}
enum lttng_error_code cmd_unregister_trigger(const struct lttng_credentials *cmd_creds,
* Return LTTNG_OK on success or a LTTNG_ERR code.
*/
static enum lttng_error_code set_relayd_for_snapshot(struct consumer_output *output,
- const struct ltt_session *session)
+ const ltt_session::locked_ref& session)
{
enum lttng_error_code status = LTTNG_OK;
- struct lttng_ht_iter iter;
- struct consumer_socket *socket;
LTTNG_OPTIONAL(uint64_t) current_chunk_id = {};
const char *base_path;
LTTNG_ASSERT(output);
- LTTNG_ASSERT(session);
DBG2("Set relayd object from snapshot output");
if (session->current_trace_chunk) {
- enum lttng_trace_chunk_status chunk_status = lttng_trace_chunk_get_id(
+ const lttng_trace_chunk_status chunk_status = lttng_trace_chunk_get_id(
session->current_trace_chunk, ¤t_chunk_id.value);
if (chunk_status == LTTNG_TRACE_CHUNK_STATUS_OK) {
* For each consumer socket, create and send the relayd object of the
* snapshot output.
*/
- {
- lttng::urcu::read_lock_guard read_lock;
-
- cds_lfht_for_each_entry (output->socks->ht, &iter.iter, socket, node.node) {
- pthread_mutex_lock(socket->lock);
- status = send_consumer_relayd_sockets(
- session->id,
- output,
- socket,
- session->name,
- session->hostname,
- base_path,
- session->live_timer,
- current_chunk_id.is_set ? ¤t_chunk_id.value : nullptr,
- session->creation_time,
- session->name_contains_creation_time);
- pthread_mutex_unlock(socket->lock);
- if (status != LTTNG_OK) {
- goto error;
- }
+ for (auto *socket :
+ lttng::urcu::lfht_iteration_adapter<consumer_socket,
+ decltype(consumer_socket::node),
+ &consumer_socket::node>(*output->socks->ht)) {
+ pthread_mutex_lock(socket->lock);
+ status = send_consumer_relayd_sockets(
+ session->id,
+ output,
+ socket,
+ session->name,
+ session->hostname,
+ base_path,
+ session->live_timer,
+ current_chunk_id.is_set ? ¤t_chunk_id.value : nullptr,
+ session->creation_time,
+ session->name_contains_creation_time);
+ pthread_mutex_unlock(socket->lock);
+ if (status != LTTNG_OK) {
+ goto error;
}
}
*/
static enum lttng_error_code record_kernel_snapshot(struct ltt_kernel_session *ksess,
const struct consumer_output *output,
- const struct ltt_session *session,
uint64_t nb_packets_per_stream)
{
enum lttng_error_code status;
LTTNG_ASSERT(ksess);
LTTNG_ASSERT(output);
- LTTNG_ASSERT(session);
status = kernel_snapshot_record(ksess, output, nb_packets_per_stream);
return status;
*/
static enum lttng_error_code record_ust_snapshot(struct ltt_ust_session *usess,
const struct consumer_output *output,
- const struct ltt_session *session,
uint64_t nb_packets_per_stream)
{
enum lttng_error_code status;
LTTNG_ASSERT(usess);
LTTNG_ASSERT(output);
- LTTNG_ASSERT(session);
status = ust_app_snapshot_record(usess, output, nb_packets_per_stream);
return status;
}
-static uint64_t get_session_size_one_more_packet_per_stream(const struct ltt_session *session,
+static uint64_t get_session_size_one_more_packet_per_stream(const ltt_session::locked_ref& session,
uint64_t cur_nr_packets)
{
uint64_t tot_size = 0;
if (session->kernel_session) {
- struct ltt_kernel_channel *chan;
- const struct ltt_kernel_session *ksess = session->kernel_session;
+ struct ltt_kernel_session *ksess = session->kernel_session;
- cds_list_for_each_entry (chan, &ksess->channel_list.head, list) {
+ for (auto chan : lttng::urcu::list_iteration_adapter<ltt_kernel_channel,
+ <t_kernel_channel::list>(
+ ksess->channel_list.head)) {
if (cur_nr_packets >= chan->channel->attr.num_subbuf) {
/*
* Don't take channel into account if we
* an approximation: for instance, applications could appear/disappear
* in between this call and actually grabbing data.
*/
-static int64_t get_session_nb_packets_per_stream(const struct ltt_session *session,
+static int64_t get_session_nb_packets_per_stream(const ltt_session::locked_ref& session,
uint64_t max_size)
{
int64_t size_left;
return cur_nb_packets;
}
-static enum lttng_error_code snapshot_record(struct ltt_session *session,
+static enum lttng_error_code snapshot_record(const ltt_session::locked_ref& session,
const struct snapshot_output *snapshot_output)
{
int64_t nb_packets_per_stream;
if (session->kernel_session) {
ret_code = record_kernel_snapshot(session->kernel_session,
snapshot_kernel_consumer_output,
- session,
nb_packets_per_stream);
if (ret_code != LTTNG_OK) {
goto error_close_trace_chunk;
}
if (session->ust_session) {
- ret_code = record_ust_snapshot(session->ust_session,
- snapshot_ust_consumer_output,
- session,
- nb_packets_per_stream);
+ ret_code = record_ust_snapshot(
+ session->ust_session, snapshot_ust_consumer_output, nb_packets_per_stream);
if (ret_code != LTTNG_OK) {
goto error_close_trace_chunk;
}
*
* Return LTTNG_OK on success or else a LTTNG_ERR code.
*/
-int cmd_snapshot_record(struct ltt_session *session,
+int cmd_snapshot_record(const ltt_session::locked_ref& session,
const struct lttng_snapshot_output *output,
int wait __attribute__((unused)))
{
char datetime[16];
struct snapshot_output *tmp_output = nullptr;
- LTTNG_ASSERT(session);
LTTNG_ASSERT(output);
DBG("Cmd snapshot record for session %s", session->name);
}
snapshot_success = 1;
} else {
- struct snapshot_output *sout;
- struct lttng_ht_iter iter;
-
- lttng::urcu::read_lock_guard read_lock;
-
- cds_lfht_for_each_entry (
- session->snapshot.output_ht->ht, &iter.iter, sout, node.node) {
+ for (auto *sout :
+ lttng::urcu::lfht_iteration_adapter<snapshot_output,
+ decltype(snapshot_output::node),
+ &snapshot_output::node>(
+ *session->snapshot.output_ht->ht)) {
struct snapshot_output output_copy;
/*
/*
* Command LTTNG_SET_SESSION_SHM_PATH processed by the client thread.
*/
-int cmd_set_session_shm_path(struct ltt_session *session, const char *shm_path)
+int cmd_set_session_shm_path(const ltt_session::locked_ref& session, const char *shm_path)
{
- /* Safety net */
- LTTNG_ASSERT(session);
-
/*
* Can only set shm path before session is started.
*/
return LTTNG_ERR_SESSION_STARTED;
}
- strncpy(session->shm_path, shm_path, sizeof(session->shm_path));
- session->shm_path[sizeof(session->shm_path) - 1] = '\0';
-
- return LTTNG_OK;
+ /* Report an error if shm_path is too long or not null-terminated. */
+ const auto copy_ret = lttng_strncpy(session->shm_path, shm_path, sizeof(session->shm_path));
+ return copy_ret == 0 ? LTTNG_OK : LTTNG_ERR_INVALID;
}
/*
*
* Returns LTTNG_OK on success or else a negative LTTng error code.
*/
-int cmd_rotate_session(struct ltt_session *session,
+int cmd_rotate_session(const ltt_session::locked_ref& session,
struct lttng_rotate_session_return *rotate_return,
bool quiet_rotation,
enum lttng_trace_chunk_command_type command)
bool failed_to_rotate = false;
enum lttng_error_code rotation_fail_code = LTTNG_OK;
- LTTNG_ASSERT(session);
-
if (!session->has_been_started) {
cmd_ret = LTTNG_ERR_START_SESSION_ONCE;
goto end;
*
* Return LTTNG_OK on success or else an LTTNG_ERR code.
*/
-int cmd_rotate_get_info(struct ltt_session *session,
+int cmd_rotate_get_info(const ltt_session::locked_ref& session,
struct lttng_rotation_get_info_return *info_return,
uint64_t rotation_id)
{
*
* Return LTTNG_OK on success or else a positive LTTNG_ERR code.
*/
-int cmd_rotation_set_schedule(struct ltt_session *session,
+int cmd_rotation_set_schedule(const ltt_session::locked_ref& session,
bool activate,
enum lttng_rotation_schedule_type schedule_type,
- uint64_t new_value,
- struct notification_thread_handle *notification_thread_handle)
+ uint64_t new_value)
{
int ret;
uint64_t *parameter_value;
- LTTNG_ASSERT(session);
-
DBG("Cmd rotate set schedule session %s", session->name);
if (session->live_timer || !session->output_traces) {
break;
case LTTNG_ROTATION_SCHEDULE_TYPE_SIZE_THRESHOLD:
if (activate) {
- ret = subscribe_session_consumed_size_rotation(
- session, new_value, notification_thread_handle);
- if (ret) {
- ERR("Failed to enable consumed-size notification in ROTATION_SET_SCHEDULE command");
+ try {
+ the_rotation_thread_handle->subscribe_session_consumed_size_rotation(
+ *session, new_value);
+ } catch (const std::exception& e) {
+ ERR("Failed to enable consumed-size notification in ROTATION_SET_SCHEDULE command: %s",
+ e.what());
ret = LTTNG_ERR_UNK;
goto end;
}
} else {
- ret = unsubscribe_session_consumed_size_rotation(
- session, notification_thread_handle);
- if (ret) {
- ERR("Failed to disable consumed-size notification in ROTATION_SET_SCHEDULE command");
+ try {
+ the_rotation_thread_handle
+ ->unsubscribe_session_consumed_size_rotation(*session);
+ } catch (const std::exception& e) {
+ ERR("Failed to disable consumed-size notification in ROTATION_SET_SCHEDULE command: %s",
+ e.what());
ret = LTTNG_ERR_UNK;
goto end;
}