Clean-up: sessiond: move registry_session free functions under class
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.cpp
index a848d474c9ba5c697d42b12f6ce0e2f0abae9025..16848191d6f6d8f8b2bde23e207d3a3f47b8824c 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
+ * Copyright (C) 2011 EfficiOS Inc.
  * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
  *
  * SPDX-License-Identifier: GPL-2.0-only
@@ -7,10 +7,48 @@
  */
 
 #define _LGPL_SOURCE
+
+#include "buffer-registry.hpp"
+#include "condition-internal.hpp"
+#include "event-notifier-error-accounting.hpp"
+#include "event.hpp"
+#include "fd-limit.hpp"
+#include "field.hpp"
+#include "health-sessiond.hpp"
+#include "lttng-sessiond.hpp"
+#include "lttng-ust-ctl.hpp"
+#include "lttng-ust-error.hpp"
+#include "notification-thread-commands.hpp"
+#include "rotate.hpp"
+#include "session.hpp"
+#include "ust-app.hpp"
+#include "ust-consumer.hpp"
+#include "ust-field-convert.hpp"
+#include "utils.hpp"
+
+#include <common/bytecode/bytecode.hpp>
+#include <common/common.hpp>
+#include <common/compat/errno.hpp>
+#include <common/exception.hpp>
+#include <common/format.hpp>
+#include <common/hashtable/utils.hpp>
+#include <common/make-unique.hpp>
+#include <common/sessiond-comm/sessiond-comm.hpp>
+#include <common/urcu.hpp>
+
+#include <lttng/condition/condition.h>
+#include <lttng/condition/event-rule-matches-internal.hpp>
+#include <lttng/condition/event-rule-matches.h>
+#include <lttng/event-rule/event-rule-internal.hpp>
+#include <lttng/event-rule/event-rule.h>
+#include <lttng/event-rule/user-tracepoint.h>
+#include <lttng/trigger/trigger-internal.hpp>
+
 #include <errno.h>
 #include <fcntl.h>
 #include <inttypes.h>
 #include <pthread.h>
+#include <signal.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
 #include <sys/types.h>
 #include <unistd.h>
 #include <urcu/compiler.h>
-#include <signal.h>
-
-#include <common/bytecode/bytecode.h>
-#include <common/compat/errno.h>
-#include <common/common.h>
-#include <common/hashtable/utils.h>
-#include <lttng/event-rule/event-rule.h>
-#include <lttng/event-rule/event-rule-internal.h>
-#include <lttng/event-rule/user-tracepoint.h>
-#include <lttng/condition/condition.h>
-#include <lttng/condition/event-rule-matches-internal.h>
-#include <lttng/condition/event-rule-matches.h>
-#include <lttng/trigger/trigger-internal.h>
-#include <common/sessiond-comm/sessiond-comm.h>
-
-#include "buffer-registry.h"
-#include "condition-internal.h"
-#include "fd-limit.h"
-#include "health-sessiond.h"
-#include "ust-app.h"
-#include "ust-consumer.h"
-#include "lttng-ust-ctl.h"
-#include "lttng-ust-error.h"
-#include "utils.h"
-#include "session.h"
-#include "lttng-sessiond.h"
-#include "notification-thread-commands.h"
-#include "rotate.h"
-#include "event.h"
-#include "event-notifier-error-accounting.h"
+#include <vector>
 
+namespace lsu = lttng::sessiond::ust;
+namespace lst = lttng::sessiond::trace;
 
 struct lttng_ht *ust_app_ht;
 struct lttng_ht *ust_app_ht_by_sock;
@@ -66,6 +77,63 @@ static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
 static uint64_t _next_session_id;
 static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
 
+namespace {
+
+/*
+ * Return the session registry according to the buffer type of the given
+ * session.
+ *
+ * A registry per UID object MUST exists before calling this function or else
+ * it LTTNG_ASSERT() if not found. RCU read side lock must be acquired.
+ */
+static lsu::registry_session *get_session_registry(
+               const struct ust_app_session *ua_sess)
+{
+       lsu::registry_session *registry = NULL;
+
+       LTTNG_ASSERT(ua_sess);
+
+       switch (ua_sess->buffer_type) {
+       case LTTNG_BUFFER_PER_PID:
+       {
+               struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
+               if (!reg_pid) {
+                       goto error;
+               }
+               registry = reg_pid->registry->reg.ust;
+               break;
+       }
+       case LTTNG_BUFFER_PER_UID:
+       {
+               struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
+                               ua_sess->tracing_id, ua_sess->bits_per_long,
+                               lttng_credentials_get_uid(&ua_sess->real_credentials));
+               if (!reg_uid) {
+                       goto error;
+               }
+               registry = reg_uid->registry->reg.ust;
+               break;
+       }
+       default:
+               abort();
+       };
+
+error:
+       return registry;
+}
+
+lsu::registry_session::locked_ptr
+get_locked_session_registry(const struct ust_app_session *ua_sess)
+{
+       auto session = get_session_registry(ua_sess);
+       if (session) {
+               pthread_mutex_lock(&session->_lock);
+       }
+
+       return lsu::registry_session::locked_ptr{session};
+}
+} /* namespace */
+
 /*
  * Return the incremented value of next_channel_key.
  */
@@ -233,49 +301,6 @@ static void close_notify_sock_rcu(struct rcu_head *head)
        free(obj);
 }
 
-/*
- * Return the session registry according to the buffer type of the given
- * session.
- *
- * A registry per UID object MUST exists before calling this function or else
- * it LTTNG_ASSERT() if not found. RCU read side lock must be acquired.
- */
-static struct ust_registry_session *get_session_registry(
-               struct ust_app_session *ua_sess)
-{
-       struct ust_registry_session *registry = NULL;
-
-       LTTNG_ASSERT(ua_sess);
-
-       switch (ua_sess->buffer_type) {
-       case LTTNG_BUFFER_PER_PID:
-       {
-               struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
-               if (!reg_pid) {
-                       goto error;
-               }
-               registry = reg_pid->registry->reg.ust;
-               break;
-       }
-       case LTTNG_BUFFER_PER_UID:
-       {
-               struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
-                               ua_sess->tracing_id, ua_sess->bits_per_long,
-                               lttng_credentials_get_uid(&ua_sess->real_credentials));
-               if (!reg_uid) {
-                       goto error;
-               }
-               registry = reg_uid->registry->reg.ust;
-               break;
-       }
-       default:
-               abort();
-       };
-
-error:
-       return registry;
-}
-
 /*
  * Delete ust context safely. RCU read lock must be held before calling
  * this function.
@@ -287,6 +312,7 @@ void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx,
        int ret;
 
        LTTNG_ASSERT(ua_ctx);
+       ASSERT_RCU_READ_LOCKED();
 
        if (ua_ctx->obj) {
                pthread_mutex_lock(&app->sock_lock);
@@ -321,6 +347,7 @@ void delete_ust_app_event(int sock, struct ust_app_event *ua_event,
        int ret;
 
        LTTNG_ASSERT(ua_event);
+       ASSERT_RCU_READ_LOCKED();
 
        free(ua_event->filter);
        if (ua_event->exclusion != NULL)
@@ -443,25 +470,20 @@ void delete_ust_app_stream(int sock, struct ust_app_stream *stream,
                struct ust_app *app)
 {
        LTTNG_ASSERT(stream);
+       ASSERT_RCU_READ_LOCKED();
 
        (void) release_ust_app_stream(sock, stream, app);
        free(stream);
 }
 
-/*
- * We need to execute ht_destroy outside of RCU read-side critical
- * section and outside of call_rcu thread, so we postpone its execution
- * using ht_cleanup_push. It is simpler than to change the semantic of
- * the many callers of delete_ust_app_session().
- */
 static
 void delete_ust_app_channel_rcu(struct rcu_head *head)
 {
        struct ust_app_channel *ua_chan =
                caa_container_of(head, struct ust_app_channel, rcu_head);
 
-       ht_cleanup_push(ua_chan->ctx);
-       ht_cleanup_push(ua_chan->events);
+       lttng_ht_destroy(ua_chan->ctx);
+       lttng_ht_destroy(ua_chan->events);
        free(ua_chan);
 }
 
@@ -537,18 +559,19 @@ end:
  *
  * The session list lock must be held by the caller.
  */
-static
-void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
-               struct ust_app *app)
+static void delete_ust_app_channel(int sock,
+               struct ust_app_channel *ua_chan,
+               struct ust_app *app,
+               const lsu::registry_session::locked_ptr& locked_registry)
 {
        int ret;
        struct lttng_ht_iter iter;
        struct ust_app_event *ua_event;
        struct ust_app_ctx *ua_ctx;
        struct ust_app_stream *stream, *stmp;
-       struct ust_registry_session *registry;
 
        LTTNG_ASSERT(ua_chan);
+       ASSERT_RCU_READ_LOCKED();
 
        DBG3("UST app deleting channel %s", ua_chan->name);
 
@@ -576,11 +599,14 @@ void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
 
        if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
                /* Wipe and free registry from session registry. */
-               registry = get_session_registry(ua_chan->session);
-               if (registry) {
-                       ust_registry_channel_del_free(registry, ua_chan->key,
-                               sock >= 0);
+               if (locked_registry) {
+                       try {
+                               locked_registry->remove_channel(ua_chan->key, sock >= 0);
+                       } catch (const std::exception &ex) {
+                               DBG("Could not find channel for removal: %s", ex.what());
+                       }
                }
+
                /*
                 * A negative socket can be used by the caller when
                 * cleaning-up a ua_chan in an error path. Skip the
@@ -650,7 +676,7 @@ int ust_app_release_object(struct ust_app *app, struct lttng_ust_abi_object_data
 /*
  * Push metadata to consumer socket.
  *
- * RCU read-side lock must be held to guarantee existance of socket.
+ * RCU read-side lock must be held to guarantee existence of socket.
  * Must be called with the ust app session lock held.
  * Must be called with the registry lock held.
  *
@@ -659,8 +685,9 @@ int ust_app_release_object(struct ust_app *app, struct lttng_ust_abi_object_data
  * but it can be caused by recoverable errors (e.g. the application has
  * terminated concurrently).
  */
-ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
-               struct consumer_socket *socket, int send_zero_data)
+ssize_t ust_app_push_metadata(const lsu::registry_session::locked_ptr& locked_registry,
+               struct consumer_socket *socket,
+               int send_zero_data)
 {
        int ret;
        char *metadata_str = NULL;
@@ -668,10 +695,11 @@ ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
        ssize_t ret_val;
        uint64_t metadata_key, metadata_version;
 
-       LTTNG_ASSERT(registry);
+       LTTNG_ASSERT(locked_registry);
        LTTNG_ASSERT(socket);
+       ASSERT_RCU_READ_LOCKED();
 
-       metadata_key = registry->metadata_key;
+       metadata_key = locked_registry->_metadata_key;
 
        /*
         * Means that no metadata was assigned to the session. This can
@@ -681,13 +709,13 @@ ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
                return 0;
        }
 
-       offset = registry->metadata_len_sent;
-       len = registry->metadata_len - registry->metadata_len_sent;
-       new_metadata_len_sent = registry->metadata_len;
-       metadata_version = registry->metadata_version;
+       offset = locked_registry->_metadata_len_sent;
+       len = locked_registry->_metadata_len - locked_registry->_metadata_len_sent;
+       new_metadata_len_sent = locked_registry->_metadata_len;
+       metadata_version = locked_registry->_metadata_version;
        if (len == 0) {
                DBG3("No metadata to push for metadata key %" PRIu64,
-                               registry->metadata_key);
+                               locked_registry->_metadata_key);
                ret_val = len;
                if (send_zero_data) {
                        DBG("No metadata to push");
@@ -697,17 +725,17 @@ ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
        }
 
        /* Allocate only what we have to send. */
-       metadata_str = (char *) zmalloc(len);
+       metadata_str = calloc<char>(len);
        if (!metadata_str) {
                PERROR("zmalloc ust app metadata string");
                ret_val = -ENOMEM;
                goto error;
        }
        /* Copy what we haven't sent out. */
-       memcpy(metadata_str, registry->metadata + offset, len);
+       memcpy(metadata_str, locked_registry->_metadata + offset, len);
 
 push_data:
-       pthread_mutex_unlock(&registry->lock);
+       pthread_mutex_unlock(&locked_registry->_lock);
        /*
         * We need to unlock the registry while we push metadata to
         * break a circular dependency between the consumerd metadata
@@ -722,7 +750,7 @@ push_data:
         */
        ret = consumer_push_metadata(socket, metadata_key,
                        metadata_str, len, offset, metadata_version);
-       pthread_mutex_lock(&registry->lock);
+       pthread_mutex_lock(&locked_registry->_lock);
        if (ret < 0) {
                /*
                 * There is an acceptable race here between the registry
@@ -759,8 +787,8 @@ push_data:
                 * largest metadata_len_sent value of the concurrent
                 * send.
                 */
-               registry->metadata_len_sent =
-                       std::max(registry->metadata_len_sent,
+               locked_registry->_metadata_len_sent =
+                       std::max(locked_registry->_metadata_len_sent,
                                new_metadata_len_sent);
        }
        free(metadata_str);
@@ -776,7 +804,7 @@ error:
                 * the metadata cache has been destroyed on the
                 * consumer.
                 */
-               registry->metadata_closed = 1;
+               locked_registry->_metadata_closed = true;
        }
 error_push:
        free(metadata_str);
@@ -789,7 +817,7 @@ error_push:
  * socket to send the metadata is retrieved from consumer, if sock
  * is not NULL we use it to send the metadata.
  * RCU read-side lock must be held while calling this function,
- * therefore ensuring existance of registry. It also ensures existance
+ * therefore ensuring existence of registry. It also ensures existence
  * of socket throughout this function.
  *
  * Return 0 on success else a negative error.
@@ -797,40 +825,38 @@ error_push:
  * but it can be caused by recoverable errors (e.g. the application has
  * terminated concurrently).
  */
-static int push_metadata(struct ust_registry_session *registry,
+static int push_metadata(const lsu::registry_session::locked_ptr& locked_registry,
                struct consumer_output *consumer)
 {
        int ret_val;
        ssize_t ret;
        struct consumer_socket *socket;
 
-       LTTNG_ASSERT(registry);
+       LTTNG_ASSERT(locked_registry);
        LTTNG_ASSERT(consumer);
+       ASSERT_RCU_READ_LOCKED();
 
-       pthread_mutex_lock(&registry->lock);
-       if (registry->metadata_closed) {
+       if (locked_registry->_metadata_closed) {
                ret_val = -EPIPE;
                goto error;
        }
 
        /* Get consumer socket to use to push the metadata.*/
-       socket = consumer_find_socket_by_bitness(registry->bits_per_long,
+       socket = consumer_find_socket_by_bitness(locked_registry->abi.bits_per_long,
                        consumer);
        if (!socket) {
                ret_val = -1;
                goto error;
        }
 
-       ret = ust_app_push_metadata(registry, socket, 0);
+       ret = ust_app_push_metadata(locked_registry, socket, 0);
        if (ret < 0) {
                ret_val = ret;
                goto error;
        }
-       pthread_mutex_unlock(&registry->lock);
        return 0;
 
 error:
-       pthread_mutex_unlock(&registry->lock);
        return ret_val;
 }
 
@@ -846,39 +872,17 @@ error:
  *
  * Return 0 on success else a negative value.
  */
-static int close_metadata(struct ust_registry_session *registry,
+static int close_metadata(uint64_t metadata_key, unsigned int consumer_bitness,
                struct consumer_output *consumer)
 {
        int ret;
        struct consumer_socket *socket;
-       uint64_t metadata_key;
-       bool registry_was_already_closed;
+       lttng::urcu::read_lock_guard read_lock_guard;
 
-       LTTNG_ASSERT(registry);
        LTTNG_ASSERT(consumer);
 
-       rcu_read_lock();
-
-       pthread_mutex_lock(&registry->lock);
-       metadata_key = registry->metadata_key;
-       registry_was_already_closed = registry->metadata_closed;
-       if (metadata_key != 0) {
-               /*
-                * Metadata closed. Even on error this means that the consumer
-                * is not responding or not found so either way a second close
-                * should NOT be emit for this registry.
-                */
-               registry->metadata_closed = 1;
-       }
-       pthread_mutex_unlock(&registry->lock);
-
-       if (metadata_key == 0 || registry_was_already_closed) {
-               ret = 0;
-               goto end;
-       }
-
-       /* Get consumer socket to use to push the metadata.*/
-       socket = consumer_find_socket_by_bitness(registry->bits_per_long,
+       /* Get consumer socket to use to push the metadata. */
+       socket = consumer_find_socket_by_bitness(consumer_bitness,
                        consumer);
        if (!socket) {
                ret = -1;
@@ -891,23 +895,16 @@ static int close_metadata(struct ust_registry_session *registry,
        }
 
 end:
-       rcu_read_unlock();
        return ret;
 }
 
-/*
- * We need to execute ht_destroy outside of RCU read-side critical
- * section and outside of call_rcu thread, so we postpone its execution
- * using ht_cleanup_push. It is simpler than to change the semantic of
- * the many callers of delete_ust_app_session().
- */
 static
 void delete_ust_app_session_rcu(struct rcu_head *head)
 {
        struct ust_app_session *ua_sess =
                caa_container_of(head, struct ust_app_session, rcu_head);
 
-       ht_cleanup_push(ua_sess->channels);
+       lttng_ht_destroy(ua_sess->channels);
        free(ua_sess);
 }
 
@@ -924,21 +921,30 @@ void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
        int ret;
        struct lttng_ht_iter iter;
        struct ust_app_channel *ua_chan;
-       struct ust_registry_session *registry;
 
        LTTNG_ASSERT(ua_sess);
+       ASSERT_RCU_READ_LOCKED();
 
        pthread_mutex_lock(&ua_sess->lock);
 
        LTTNG_ASSERT(!ua_sess->deleted);
        ua_sess->deleted = true;
 
-       registry = get_session_registry(ua_sess);
+       auto locked_registry = get_locked_session_registry(ua_sess);
        /* Registry can be null on error path during initialization. */
-       if (registry) {
+       if (locked_registry) {
                /* Push metadata for application before freeing the application. */
-               (void) push_metadata(registry, ua_sess->consumer);
+               (void) push_metadata(locked_registry, ua_sess->consumer);
+       }
+
+       cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
+                       node.node) {
+               ret = lttng_ht_del(ua_sess->channels, &iter);
+               LTTNG_ASSERT(!ret);
+               delete_ust_app_channel(sock, ua_chan, app, locked_registry);
+       }
 
+       if (locked_registry) {
                /*
                 * Don't ask to close metadata for global per UID buffers. Close
                 * metadata only on destroy trace session in this case. Also, the
@@ -946,16 +952,17 @@ void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
                 * close so don't send a close command if closed.
                 */
                if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
-                       /* And ask to close it for this session registry. */
-                       (void) close_metadata(registry, ua_sess->consumer);
-               }
-       }
+                       const auto metadata_key = locked_registry->_metadata_key;
+                       const auto consumer_bitness = locked_registry->abi.bits_per_long;
 
-       cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
-                       node.node) {
-               ret = lttng_ht_del(ua_sess->channels, &iter);
-               LTTNG_ASSERT(!ret);
-               delete_ust_app_channel(sock, ua_chan, app);
+                       if (!locked_registry->_metadata_closed && metadata_key != 0) {
+                               locked_registry->_metadata_closed = true;
+                       }
+
+                       /* Release lock before communication, see comments in close_metadata(). */
+                       locked_registry.reset();
+                       (void) close_metadata(metadata_key, consumer_bitness, ua_sess->consumer);
+               }
        }
 
        /* In case of per PID, the registry is kept in the session. */
@@ -1004,8 +1011,6 @@ void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
 /*
  * Delete a traceable application structure from the global list. Never call
  * this function outside of a call_rcu call.
- *
- * RCU read side lock should _NOT_ be held when calling this function.
  */
 static
 void delete_ust_app(struct ust_app *app)
@@ -1047,10 +1052,10 @@ void delete_ust_app(struct ust_app *app)
 
        rcu_read_unlock();
 
-       ht_cleanup_push(app->sessions);
-       ht_cleanup_push(app->ust_sessions_objd);
-       ht_cleanup_push(app->ust_objd);
-       ht_cleanup_push(app->token_to_event_notifier_rule_ht);
+       lttng_ht_destroy(app->sessions);
+       lttng_ht_destroy(app->ust_sessions_objd);
+       lttng_ht_destroy(app->ust_objd);
+       lttng_ht_destroy(app->token_to_event_notifier_rule_ht);
 
        /*
         * This could be NULL if the event notifier setup failed (e.g the app
@@ -1167,7 +1172,7 @@ struct ust_app_session *alloc_ust_app_session(void)
        struct ust_app_session *ua_sess;
 
        /* Init most of the default value by allocating and zeroing */
-       ua_sess = (ust_app_session *) zmalloc(sizeof(struct ust_app_session));
+       ua_sess = zmalloc<ust_app_session>();
        if (ua_sess == NULL) {
                PERROR("malloc");
                goto error_free;
@@ -1195,7 +1200,7 @@ struct ust_app_channel *alloc_ust_app_channel(const char *name,
        struct ust_app_channel *ua_chan;
 
        /* Init most of the default value by allocating and zeroing */
-       ua_chan = (ust_app_channel *) zmalloc(sizeof(struct ust_app_channel));
+       ua_chan = zmalloc<ust_app_channel>();
        if (ua_chan == NULL) {
                PERROR("malloc");
                goto error;
@@ -1247,7 +1252,7 @@ struct ust_app_stream *ust_app_alloc_stream(void)
 {
        struct ust_app_stream *stream = NULL;
 
-       stream = (ust_app_stream *) zmalloc(sizeof(*stream));
+       stream = zmalloc<ust_app_stream>();
        if (stream == NULL) {
                PERROR("zmalloc ust app stream");
                goto error;
@@ -1270,7 +1275,7 @@ struct ust_app_event *alloc_ust_app_event(char *name,
        struct ust_app_event *ua_event;
 
        /* Init most of the default value by allocating and zeroing */
-       ua_event = (ust_app_event *) zmalloc(sizeof(struct ust_app_event));
+       ua_event = zmalloc<ust_app_event>();
        if (ua_event == NULL) {
                PERROR("Failed to allocate ust_app_event structure");
                goto error;
@@ -1307,7 +1312,7 @@ static struct ust_app_event_notifier_rule *alloc_ust_app_event_notifier_rule(
        struct lttng_condition *condition = NULL;
        const struct lttng_event_rule *event_rule = NULL;
 
-       ua_event_notifier_rule = (ust_app_event_notifier_rule *) zmalloc(sizeof(struct ust_app_event_notifier_rule));
+       ua_event_notifier_rule = zmalloc<ust_app_event_notifier_rule>();
        if (ua_event_notifier_rule == NULL) {
                PERROR("Failed to allocate ust_app_event_notifier_rule structure");
                goto error;
@@ -1367,7 +1372,7 @@ struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context_attr *uctx)
 {
        struct ust_app_ctx *ua_ctx;
 
-       ua_ctx = (ust_app_ctx *) zmalloc(sizeof(struct ust_app_ctx));
+       ua_ctx = zmalloc<ust_app_ctx>();
        if (ua_ctx == NULL) {
                goto error;
        }
@@ -1410,7 +1415,7 @@ static struct lttng_ust_abi_filter_bytecode *create_ust_filter_bytecode_from_byt
        struct lttng_ust_abi_filter_bytecode *filter = NULL;
 
        /* Copy filter bytecode. */
-       filter = (lttng_ust_abi_filter_bytecode *) zmalloc(sizeof(*filter) + orig_f->len);
+       filter = zmalloc<lttng_ust_abi_filter_bytecode>(sizeof(*filter) + orig_f->len);
        if (!filter) {
                PERROR("Failed to allocate lttng_ust_filter_bytecode: bytecode len = %" PRIu32 " bytes", orig_f->len);
                goto error;
@@ -1434,7 +1439,7 @@ create_ust_capture_bytecode_from_bytecode(const struct lttng_bytecode *orig_f)
        struct lttng_ust_abi_capture_bytecode *capture = NULL;
 
        /* Copy capture bytecode. */
-       capture = (lttng_ust_abi_capture_bytecode *) zmalloc(sizeof(*capture) + orig_f->len);
+       capture = zmalloc<lttng_ust_abi_capture_bytecode>(sizeof(*capture) + orig_f->len);
        if (!capture) {
                PERROR("Failed to allocate lttng_ust_abi_capture_bytecode: bytecode len = %" PRIu32 " bytes", orig_f->len);
                goto error;
@@ -1456,6 +1461,8 @@ struct ust_app *ust_app_find_by_sock(int sock)
        struct lttng_ht_node_ulong *node;
        struct lttng_ht_iter iter;
 
+       ASSERT_RCU_READ_LOCKED();
+
        lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
        node = lttng_ht_iter_get_node_ulong(&iter);
        if (node == NULL) {
@@ -1478,6 +1485,8 @@ static struct ust_app *find_app_by_notify_sock(int sock)
        struct lttng_ht_node_ulong *node;
        struct lttng_ht_iter iter;
 
+       ASSERT_RCU_READ_LOCKED();
+
        lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
                        &iter);
        node = lttng_ht_iter_get_node_ulong(&iter);
@@ -1546,6 +1555,7 @@ static struct ust_app_event_notifier_rule *find_ust_app_event_notifier_rule(
        struct ust_app_event_notifier_rule *event_notifier_rule = NULL;
 
        LTTNG_ASSERT(ht);
+       ASSERT_RCU_READ_LOCKED();
 
        lttng_ht_lookup(ht, &token, &iter);
        node = lttng_ht_iter_get_node_u64(&iter);
@@ -1713,7 +1723,7 @@ struct lttng_ust_abi_event_exclusion *create_ust_exclusion_from_exclusion(
        size_t exclusion_alloc_size = sizeof(struct lttng_ust_abi_event_exclusion) +
                LTTNG_UST_ABI_SYM_NAME_LEN * exclusion->count;
 
-       ust_exclusion = (lttng_ust_abi_event_exclusion *) zmalloc(exclusion_alloc_size);
+       ust_exclusion = zmalloc<lttng_ust_abi_event_exclusion>(exclusion_alloc_size);
        if (!ust_exclusion) {
                PERROR("malloc");
                goto end;
@@ -1986,8 +1996,6 @@ static int send_channel_pid_to_ust(struct ust_app *app,
                cds_list_del(&stream->list);
                delete_ust_app_stream(-1, stream, app);
        }
-       /* Flag the channel that it is sent to the application. */
-       ua_chan->is_sent = 1;
 
 error:
        health_code_update();
@@ -2000,7 +2008,7 @@ error:
  * Should be called with session mutex held.
  */
 static
-int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
+int create_ust_event(struct ust_app *app,
                struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
 {
        int ret = 0;
@@ -2151,7 +2159,7 @@ static int init_ust_event_notifier_from_event_rule(
 
        event_notifier->event.instrumentation = LTTNG_UST_ABI_TRACEPOINT;
        ret = lttng_strncpy(event_notifier->event.name, pattern,
-                       LTTNG_UST_ABI_SYM_NAME_LEN - 1);
+                       sizeof(event_notifier->event.name));
        if (ret) {
                ERR("Failed to copy event rule pattern to notifier: pattern = '%s' ",
                                pattern);
@@ -2232,7 +2240,7 @@ static int create_ust_event_notifier(struct ust_app *app,
 
        ua_event_notifier_rule->handle = ua_event_notifier_rule->obj->handle;
 
-       DBG2("UST app event notifier %s created successfully: app = '%s': pid = %d), object = %p",
+       DBG2("UST app event notifier %s created successfully: app = '%s': pid = %d, object = %p",
                        event_notifier.event.name, app->name, app->pid,
                        ua_event_notifier_rule->obj);
 
@@ -2333,7 +2341,7 @@ static void shadow_copy_event(struct ust_app_event *ua_event,
        if (uevent->exclusion) {
                exclusion_alloc_size = sizeof(struct lttng_event_exclusion) +
                                LTTNG_UST_ABI_SYM_NAME_LEN * uevent->exclusion->count;
-               ua_event->exclusion = (lttng_event_exclusion *) zmalloc(exclusion_alloc_size);
+               ua_event->exclusion = zmalloc<lttng_event_exclusion>(exclusion_alloc_size);
                if (ua_event->exclusion == NULL) {
                        PERROR("malloc");
                } else {
@@ -2401,7 +2409,7 @@ static void shadow_copy_session(struct ust_app_session *ua_sess,
        LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.uid, usess->uid);
        LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.gid, usess->gid);
        ua_sess->buffer_type = usess->buffer_type;
-       ua_sess->bits_per_long = app->bits_per_long;
+       ua_sess->bits_per_long = app->abi.bits_per_long;
 
        /* There is only one consumer object per session possible. */
        consumer_output_get(usess->consumer);
@@ -2422,7 +2430,7 @@ static void shadow_copy_session(struct ust_app_session *ua_sess,
                ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
                                DEFAULT_UST_TRACE_UID_PATH,
                                lttng_credentials_get_uid(&ua_sess->real_credentials),
-                               app->bits_per_long);
+                               app->abi.bits_per_long);
                break;
        default:
                abort();
@@ -2450,7 +2458,7 @@ static void shadow_copy_session(struct ust_app_session *ua_sess,
                case LTTNG_BUFFER_PER_UID:
                        ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
                                        "/" DEFAULT_UST_TRACE_UID_PATH,
-                                       app->uid, app->bits_per_long);
+                                       app->uid, app->abi.bits_per_long);
                        break;
                default:
                        abort();
@@ -2538,17 +2546,13 @@ static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
        }
 
        /* Initialize registry. */
-       ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
-                       app->bits_per_long, app->uint8_t_alignment,
-                       app->uint16_t_alignment, app->uint32_t_alignment,
-                       app->uint64_t_alignment, app->long_alignment,
-                       app->byte_order, app->version.major, app->version.minor,
-                       reg_pid->root_shm_path, reg_pid->shm_path,
+       reg_pid->registry->reg.ust = ust_registry_session_per_pid_create(app, app->abi,
+                       app->version.major, app->version.minor, reg_pid->root_shm_path,
+                       reg_pid->shm_path,
                        lttng_credentials_get_uid(&ua_sess->effective_credentials),
                        lttng_credentials_get_gid(&ua_sess->effective_credentials),
-                       ua_sess->tracing_id,
-                       app->uid);
-       if (ret < 0) {
+                       ua_sess->tracing_id);
+       if (!reg_pid->registry->reg.ust) {
                /*
                 * reg_pid->registry->reg.ust is NULL upon error, so we need to
                 * destroy the buffer registry, because it is always expected
@@ -2591,15 +2595,15 @@ static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
 
        rcu_read_lock();
 
-       reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
+       reg_uid = buffer_reg_uid_find(usess->id, app->abi.bits_per_long, app->uid);
        if (!reg_uid) {
                /*
                 * This is the create channel path meaning that if there is NO
                 * registry available, we have to create one for this session.
                 */
-               ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
-                               LTTNG_DOMAIN_UST, &reg_uid,
-                               ua_sess->root_shm_path, ua_sess->shm_path);
+               ret = buffer_reg_uid_create(usess->id, app->abi.bits_per_long, app->uid,
+                               LTTNG_DOMAIN_UST, &reg_uid, ua_sess->root_shm_path,
+                               ua_sess->shm_path);
                if (ret < 0) {
                        goto error;
                }
@@ -2608,15 +2612,10 @@ static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
        }
 
        /* Initialize registry. */
-       ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
-                       app->bits_per_long, app->uint8_t_alignment,
-                       app->uint16_t_alignment, app->uint32_t_alignment,
-                       app->uint64_t_alignment, app->long_alignment,
-                       app->byte_order, app->version.major,
-                       app->version.minor, reg_uid->root_shm_path,
-                       reg_uid->shm_path, usess->uid, usess->gid,
-                       ua_sess->tracing_id, app->uid);
-       if (ret < 0) {
+       reg_uid->registry->reg.ust = ust_registry_session_per_uid_create(app->abi,
+                       app->version.major, app->version.minor, reg_uid->root_shm_path,
+                       reg_uid->shm_path, usess->uid, usess->gid, ua_sess->tracing_id, app->uid);
+       if (!reg_uid->registry->reg.ust) {
                /*
                 * reg_uid->registry->reg.ust is NULL upon error, so we need to
                 * destroy the buffer registry, because it is always expected
@@ -2626,6 +2625,7 @@ static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
                buffer_reg_uid_destroy(reg_uid, NULL);
                goto error;
        }
+
        /* Add node to teardown list of the session. */
        cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
 
@@ -2823,6 +2823,7 @@ struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
 
        LTTNG_ASSERT(uctx);
        LTTNG_ASSERT(ht);
+       ASSERT_RCU_READ_LOCKED();
 
        /* Lookup using the lttng_ust_context_type and a custom match fct. */
        cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
@@ -2851,6 +2852,8 @@ int create_ust_app_channel_context(struct ust_app_channel *ua_chan,
        int ret = 0;
        struct ust_app_ctx *ua_ctx;
 
+       ASSERT_RCU_READ_LOCKED();
+
        DBG2("UST app adding context to channel %s", ua_chan->name);
 
        ua_ctx = find_ust_app_context(ua_chan->ctx, uctx);
@@ -2885,8 +2888,8 @@ error:
  * Called with UST app session lock held.
  */
 static
-int enable_ust_app_event(struct ust_app_session *ua_sess,
-               struct ust_app_event *ua_event, struct ust_app *app)
+int enable_ust_app_event(struct ust_app_event *ua_event,
+               struct ust_app *app)
 {
        int ret;
 
@@ -2904,8 +2907,8 @@ error:
 /*
  * Disable on the tracer side a ust app event for the session and channel.
  */
-static int disable_ust_app_event(struct ust_app_session *ua_sess,
-               struct ust_app_event *ua_event, struct ust_app *app)
+static int disable_ust_app_event(struct ust_app_event *ua_event,
+               struct ust_app *app)
 {
        int ret;
 
@@ -2952,6 +2955,8 @@ static int enable_ust_app_channel(struct ust_app_session *ua_sess,
        struct lttng_ht_node_str *ua_chan_node;
        struct ust_app_channel *ua_chan;
 
+       ASSERT_RCU_READ_LOCKED();
+
        lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
        ua_chan_node = lttng_ht_iter_get_node_str(&iter);
        if (ua_chan_node == NULL) {
@@ -2980,8 +2985,7 @@ error:
  */
 static int do_consumer_create_channel(struct ltt_ust_session *usess,
                struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
-               int bitness, struct ust_registry_session *registry,
-               uint64_t trace_archive_id)
+               int bitness, lsu::registry_session *registry)
 {
        int ret;
        unsigned int nb_fd = 0;
@@ -3216,11 +3220,14 @@ static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
        buf_reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
 
        /* Create and add a channel registry to session. */
-       ret = ust_registry_channel_add(reg_sess->reg.ust,
-                       ua_chan->tracing_channel_id);
-       if (ret < 0) {
+       try {
+               reg_sess->reg.ust->add_channel(ua_chan->tracing_channel_id);
+       } catch (const std::exception& ex) {
+               ERR("Failed to add a channel registry to userspace registry session: %s", ex.what());
+               ret = -1;
                goto error;
        }
+
        buffer_reg_channel_add(reg_sess, buf_reg_chan);
 
        if (regp) {
@@ -3317,7 +3324,7 @@ static int send_channel_uid_to_ust(struct buffer_reg_channel *buf_reg_chan,
        /* Send all streams to application. */
        pthread_mutex_lock(&buf_reg_chan->stream_list_lock);
        cds_list_for_each_entry(reg_stream, &buf_reg_chan->streams, lnode) {
-               struct ust_app_stream stream;
+               struct ust_app_stream stream = {};
 
                ret = duplicate_stream_object(reg_stream, &stream);
                if (ret < 0) {
@@ -3334,8 +3341,8 @@ static int send_channel_uid_to_ust(struct buffer_reg_channel *buf_reg_chan,
                                 * Treat this the same way as an application
                                 * that is exiting.
                                 */
-                               WARN("Communication with application %d timed out on send_stream for stream \"%s\" of channel \"%s\" of session \"%" PRIu64 "\".",
-                                               app->pid, stream.name,
+                               WARN("Communication with application %d timed out on send_stream for stream of channel \"%s\" of session \"%" PRIu64 "\".",
+                                               app->pid,
                                                ua_chan->name,
                                                ua_sess->tracing_id);
                                ret = -ENOTCONN;
@@ -3350,7 +3357,6 @@ static int send_channel_uid_to_ust(struct buffer_reg_channel *buf_reg_chan,
                 */
                (void) release_ust_app_stream(-1, &stream, app);
        }
-       ua_chan->is_sent = 1;
 
 error_stream_unlock:
        pthread_mutex_unlock(&buf_reg_chan->stream_list_lock);
@@ -3375,16 +3381,16 @@ static int create_channel_per_uid(struct ust_app *app,
        struct buffer_reg_channel *buf_reg_chan;
        struct ltt_session *session = NULL;
        enum lttng_error_code notification_ret;
-       struct ust_registry_channel *ust_reg_chan;
 
        LTTNG_ASSERT(app);
        LTTNG_ASSERT(usess);
        LTTNG_ASSERT(ua_sess);
        LTTNG_ASSERT(ua_chan);
+       ASSERT_RCU_READ_LOCKED();
 
        DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
 
-       reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
+       reg_uid = buffer_reg_uid_find(usess->id, app->abi.bits_per_long, app->uid);
        /*
         * The session creation handles the creation of this global registry
         * object. If none can be find, there is a code flow problem or a
@@ -3408,16 +3414,15 @@ static int create_channel_per_uid(struct ust_app *app,
 
        session = session_find_by_id(ua_sess->tracing_id);
        LTTNG_ASSERT(session);
-       LTTNG_ASSERT(pthread_mutex_trylock(&session->lock));
-       LTTNG_ASSERT(session_trylock_list());
+       ASSERT_LOCKED(session->lock);
+       ASSERT_SESSION_LIST_LOCKED();
 
        /*
         * Create the buffers on the consumer side. This call populates the
         * ust app channel object with all streams and data object.
         */
        ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
-                       app->bits_per_long, reg_uid->registry->reg.ust,
-                       session->most_recent_chunk_id.value);
+                       app->abi.bits_per_long, reg_uid->registry->reg.ust);
        if (ret < 0) {
                ERR("Error creating UST channel \"%s\" on the consumer daemon",
                                ua_chan->name);
@@ -3426,8 +3431,12 @@ static int create_channel_per_uid(struct ust_app *app,
                 * Let's remove the previously created buffer registry channel so
                 * it's not visible anymore in the session registry.
                 */
-               ust_registry_channel_del_free(reg_uid->registry->reg.ust,
-                               ua_chan->tracing_channel_id, false);
+               auto locked_registry = reg_uid->registry->reg.ust->lock();
+               try {
+                       locked_registry->remove_channel(ua_chan->tracing_channel_id, false);
+               } catch (const std::exception &ex) {
+                       DBG("Could not find channel for removal: %s", ex.what());
+               }
                buffer_reg_channel_remove(reg_uid->registry, buf_reg_chan);
                buffer_reg_channel_destroy(buf_reg_chan, LTTNG_DOMAIN_UST);
                goto error;
@@ -3443,15 +3452,14 @@ static int create_channel_per_uid(struct ust_app *app,
                goto error;
        }
 
-       /* Notify the notification subsystem of the channel's creation. */
-       pthread_mutex_lock(&reg_uid->registry->reg.ust->lock);
-       ust_reg_chan = ust_registry_channel_find(reg_uid->registry->reg.ust,
-                       ua_chan->tracing_channel_id);
-       LTTNG_ASSERT(ust_reg_chan);
-       ust_reg_chan->consumer_key = ua_chan->key;
-       ust_reg_chan = NULL;
-       pthread_mutex_unlock(&reg_uid->registry->reg.ust->lock);
+       {
+               auto locked_registry = reg_uid->registry->reg.ust->lock();
+               auto& ust_reg_chan = locked_registry->get_channel(ua_chan->tracing_channel_id);
+
+               ust_reg_chan._consumer_key = ua_chan->key;
+       }
 
+       /* Notify the notification subsystem of the channel's creation. */
        notification_ret = notification_thread_command_add_channel(
                        the_notification_thread_handle, session->name,
                        lttng_credentials_get_uid(
@@ -3496,11 +3504,10 @@ static int create_channel_per_pid(struct ust_app *app,
                struct ust_app_channel *ua_chan)
 {
        int ret;
-       struct ust_registry_session *registry;
+       lsu::registry_session *registry;
        enum lttng_error_code cmd_ret;
        struct ltt_session *session = NULL;
        uint64_t chan_reg_key;
-       struct ust_registry_channel *ust_reg_chan;
 
        LTTNG_ASSERT(app);
        LTTNG_ASSERT(usess);
@@ -3516,23 +3523,23 @@ static int create_channel_per_pid(struct ust_app *app,
        LTTNG_ASSERT(registry);
 
        /* Create and add a new channel registry to session. */
-       ret = ust_registry_channel_add(registry, ua_chan->key);
-       if (ret < 0) {
-               ERR("Error creating the UST channel \"%s\" registry instance",
-                       ua_chan->name);
+       try {
+               registry->add_channel(ua_chan->key);
+       } catch (const std::exception& ex) {
+               ERR("Error creating the UST channel \"%s\" registry instance: %s", ua_chan->name,
+                               ex.what());
+               ret = -1;
                goto error;
        }
 
        session = session_find_by_id(ua_sess->tracing_id);
        LTTNG_ASSERT(session);
-
-       LTTNG_ASSERT(pthread_mutex_trylock(&session->lock));
-       LTTNG_ASSERT(session_trylock_list());
+       ASSERT_LOCKED(session->lock);
+       ASSERT_SESSION_LIST_LOCKED();
 
        /* Create and get channel on the consumer side. */
        ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
-                       app->bits_per_long, registry,
-                       session->most_recent_chunk_id.value);
+                       app->abi.bits_per_long, registry);
        if (ret < 0) {
                ERR("Error creating UST channel \"%s\" on the consumer daemon",
                        ua_chan->name);
@@ -3548,11 +3555,12 @@ static int create_channel_per_pid(struct ust_app *app,
        }
 
        chan_reg_key = ua_chan->key;
-       pthread_mutex_lock(&registry->lock);
-       ust_reg_chan = ust_registry_channel_find(registry, chan_reg_key);
-       LTTNG_ASSERT(ust_reg_chan);
-       ust_reg_chan->consumer_key = ua_chan->key;
-       pthread_mutex_unlock(&registry->lock);
+       {
+               auto locked_registry = registry->lock();
+
+               auto& ust_reg_chan = locked_registry->get_channel(chan_reg_key);
+               ust_reg_chan._consumer_key = ua_chan->key;
+       }
 
        cmd_ret = notification_thread_command_add_channel(
                        the_notification_thread_handle, session->name,
@@ -3570,7 +3578,12 @@ static int create_channel_per_pid(struct ust_app *app,
 
 error_remove_from_registry:
        if (ret) {
-               ust_registry_channel_del_free(registry, ua_chan->key, false);
+               try {
+                       auto locked_registry = registry->lock();
+                       locked_registry->remove_channel(ua_chan->key, false);
+               } catch (const std::exception& ex) {
+                       DBG("Could not find channel for removal: %s", ex.what());
+               }
        }
 error:
        rcu_read_unlock();
@@ -3601,6 +3614,7 @@ static int ust_app_channel_send(struct ust_app *app,
        LTTNG_ASSERT(usess->active);
        LTTNG_ASSERT(ua_sess);
        LTTNG_ASSERT(ua_chan);
+       ASSERT_RCU_READ_LOCKED();
 
        /* Handle buffer type before sending the channel to the application. */
        switch (usess->buffer_type) {
@@ -3651,7 +3665,8 @@ error:
  */
 static int ust_app_channel_allocate(struct ust_app_session *ua_sess,
                struct ltt_ust_channel *uchan,
-               enum lttng_ust_abi_chan_type type, struct ltt_ust_session *usess,
+               enum lttng_ust_abi_chan_type type,
+               struct ltt_ust_session *usess __attribute__((unused)),
                struct ust_app_channel **ua_chanp)
 {
        int ret = 0;
@@ -3659,6 +3674,8 @@ static int ust_app_channel_allocate(struct ust_app_session *ua_sess,
        struct lttng_ht_node_str *ua_chan_node;
        struct ust_app_channel *ua_chan;
 
+       ASSERT_RCU_READ_LOCKED();
+
        /* Lookup channel in the ust app session */
        lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
        ua_chan_node = lttng_ht_iter_get_node_str(&iter);
@@ -3699,13 +3716,15 @@ error:
  * Called with ust app session mutex held.
  */
 static
-int create_ust_app_event(struct ust_app_session *ua_sess,
-               struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
+int create_ust_app_event(struct ust_app_channel *ua_chan,
+               struct ltt_ust_event *uevent,
                struct ust_app *app)
 {
        int ret = 0;
        struct ust_app_event *ua_event;
 
+       ASSERT_RCU_READ_LOCKED();
+
        ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
        if (ua_event == NULL) {
                /* Only failure mode of alloc_ust_app_event(). */
@@ -3715,7 +3734,7 @@ int create_ust_app_event(struct ust_app_session *ua_sess,
        shadow_copy_event(ua_event, uevent);
 
        /* Create it on the tracer side */
-       ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
+       ret = create_ust_event(app, ua_chan, ua_event);
        if (ret < 0) {
                /*
                 * Not found previously means that it does not exist on the
@@ -3760,6 +3779,8 @@ int create_ust_app_event_notifier_rule(struct lttng_trigger *trigger,
        int ret = 0;
        struct ust_app_event_notifier_rule *ua_event_notifier_rule;
 
+       ASSERT_RCU_READ_LOCKED();
+
        ua_event_notifier_rule = alloc_ust_app_event_notifier_rule(trigger);
        if (ua_event_notifier_rule == NULL) {
                ret = -ENOMEM;
@@ -3788,7 +3809,7 @@ int create_ust_app_event_notifier_rule(struct lttng_trigger *trigger,
        lttng_ht_add_unique_u64(app->token_to_event_notifier_rule_ht,
                        &ua_event_notifier_rule->node);
 
-       DBG2("UST app create token event rule completed: app = '%s', pid = %d), token = %" PRIu64,
+       DBG2("UST app create token event rule completed: app = '%s', pid = %d, token = %" PRIu64,
                        app->name, app->pid, lttng_trigger_get_tracer_token(trigger));
 
        goto end;
@@ -3811,21 +3832,19 @@ static int create_ust_app_metadata(struct ust_app_session *ua_sess,
        int ret = 0;
        struct ust_app_channel *metadata;
        struct consumer_socket *socket;
-       struct ust_registry_session *registry;
        struct ltt_session *session = NULL;
 
        LTTNG_ASSERT(ua_sess);
        LTTNG_ASSERT(app);
        LTTNG_ASSERT(consumer);
+       ASSERT_RCU_READ_LOCKED();
 
-       registry = get_session_registry(ua_sess);
+       auto locked_registry = get_locked_session_registry(ua_sess);
        /* The UST app session is held registry shall not be null. */
-       LTTNG_ASSERT(registry);
-
-       pthread_mutex_lock(&registry->lock);
+       LTTNG_ASSERT(locked_registry);
 
        /* Metadata already exists for this registry or it was closed previously */
-       if (registry->metadata_key || registry->metadata_closed) {
+       if (locked_registry->_metadata_key || locked_registry->_metadata_closed) {
                ret = 0;
                goto error;
        }
@@ -3848,7 +3867,7 @@ static int create_ust_app_metadata(struct ust_app_session *ua_sess,
        }
 
        /* Get the right consumer socket for the application. */
-       socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
+       socket = consumer_find_socket_by_bitness(app->abi.bits_per_long, consumer);
        if (!socket) {
                ret = -EINVAL;
                goto error_consumer;
@@ -3860,13 +3879,12 @@ static int create_ust_app_metadata(struct ust_app_session *ua_sess,
         * consumer requesting the metadata and the ask_channel call on our side
         * did not returned yet.
         */
-       registry->metadata_key = metadata->key;
+       locked_registry->_metadata_key = metadata->key;
 
        session = session_find_by_id(ua_sess->tracing_id);
        LTTNG_ASSERT(session);
-
-       LTTNG_ASSERT(pthread_mutex_trylock(&session->lock));
-       LTTNG_ASSERT(session_trylock_list());
+       ASSERT_LOCKED(session->lock);
+       ASSERT_SESSION_LIST_LOCKED();
 
        /*
         * Ask the metadata channel creation to the consumer. The metadata object
@@ -3875,10 +3893,10 @@ static int create_ust_app_metadata(struct ust_app_session *ua_sess,
         * consumer.
         */
        ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
-                       registry, session->current_trace_chunk);
+                       locked_registry.get(), session->current_trace_chunk);
        if (ret < 0) {
                /* Nullify the metadata key so we don't try to close it later on. */
-               registry->metadata_key = 0;
+               locked_registry->_metadata_key = 0;
                goto error_consumer;
        }
 
@@ -3891,7 +3909,7 @@ static int create_ust_app_metadata(struct ust_app_session *ua_sess,
        ret = consumer_setup_metadata(socket, metadata->key);
        if (ret < 0) {
                /* Nullify the metadata key so we don't try to close it later on. */
-               registry->metadata_key = 0;
+               locked_registry->_metadata_key = 0;
                goto error_consumer;
        }
 
@@ -3900,9 +3918,8 @@ static int create_ust_app_metadata(struct ust_app_session *ua_sess,
 
 error_consumer:
        lttng_fd_put(LTTNG_FD_APPS, 1);
-       delete_ust_app_channel(-1, metadata, app);
+       delete_ust_app_channel(-1, metadata, app, locked_registry);
 error:
-       pthread_mutex_unlock(&registry->lock);
        if (session) {
                session_put(session);
        }
@@ -3983,7 +4000,7 @@ struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
                goto error;
        }
 
-       lta = (ust_app *) zmalloc(sizeof(struct ust_app));
+       lta = zmalloc<ust_app>();
        if (lta == NULL) {
                PERROR("malloc");
                goto error_free_pipe;
@@ -3995,13 +4012,17 @@ struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
        lta->uid = msg->uid;
        lta->gid = msg->gid;
 
-       lta->bits_per_long = msg->bits_per_long;
-       lta->uint8_t_alignment = msg->uint8_t_alignment;
-       lta->uint16_t_alignment = msg->uint16_t_alignment;
-       lta->uint32_t_alignment = msg->uint32_t_alignment;
-       lta->uint64_t_alignment = msg->uint64_t_alignment;
-       lta->long_alignment = msg->long_alignment;
-       lta->byte_order = msg->byte_order;
+       lta->abi = {
+               .bits_per_long = msg->bits_per_long,
+               .long_alignment = msg->long_alignment,
+               .uint8_t_alignment = msg->uint8_t_alignment,
+               .uint16_t_alignment = msg->uint16_t_alignment,
+               .uint32_t_alignment = msg->uint32_t_alignment,
+               .uint64_t_alignment = msg->uint64_t_alignment,
+               .byte_order = msg->byte_order == LITTLE_ENDIAN ?
+                               lttng::sessiond::trace::byte_order::LITTLE_ENDIAN_ :
+                                       lttng::sessiond::trace::byte_order::BIG_ENDIAN_,
+       };
 
        lta->v_major = msg->major;
        lta->v_minor = msg->minor;
@@ -4260,8 +4281,6 @@ void ust_app_unregister(int sock)
         */
        cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
                        node.node) {
-               struct ust_registry_session *registry;
-
                ret = lttng_ht_del(lta->sessions, &iter);
                if (ret) {
                        /* The session was already removed so scheduled for teardown. */
@@ -4294,10 +4313,10 @@ void ust_app_unregister(int sock)
                 * The close metadata below nullifies the metadata pointer in the
                 * session so the delete session will NOT push/close a second time.
                 */
-               registry = get_session_registry(ua_sess);
-               if (registry) {
+               auto locked_registry = get_locked_session_registry(ua_sess);
+               if (locked_registry) {
                        /* Push metadata for application before freeing the application. */
-                       (void) push_metadata(registry, ua_sess->consumer);
+                       (void) push_metadata(locked_registry, ua_sess->consumer);
 
                        /*
                         * Don't ask to close metadata for global per UID buffers. Close
@@ -4306,8 +4325,18 @@ void ust_app_unregister(int sock)
                         * close so don't send a close command if closed.
                         */
                        if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
-                               /* And ask to close it for this session registry. */
-                               (void) close_metadata(registry, ua_sess->consumer);
+                               const auto metadata_key = locked_registry->_metadata_key;
+                               const auto consumer_bitness = locked_registry->abi.bits_per_long;
+
+                               if (!locked_registry->_metadata_closed && metadata_key != 0) {
+                                       locked_registry->_metadata_closed = true;
+                               }
+
+                               /* Release lock before communication, see comments in close_metadata(). */
+                               locked_registry.reset();
+                               (void) close_metadata(metadata_key, consumer_bitness, ua_sess->consumer);
+                       } else {
+                               locked_registry.reset();
                        }
                }
                cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
@@ -4359,7 +4388,7 @@ int ust_app_list_events(struct lttng_event **events)
        struct lttng_event *tmp_event;
 
        nbmem = UST_APP_EVENT_LIST_SIZE;
-       tmp_event = (lttng_event *) zmalloc(nbmem * sizeof(struct lttng_event));
+       tmp_event = calloc<lttng_event>(nbmem);
        if (tmp_event == NULL) {
                PERROR("zmalloc ust app events");
                ret = -ENOMEM;
@@ -4494,7 +4523,7 @@ int ust_app_list_event_fields(struct lttng_event_field **fields)
        struct lttng_event_field *tmp_event;
 
        nbmem = UST_APP_EVENT_LIST_SIZE;
-       tmp_event = (lttng_event_field *) zmalloc(nbmem * sizeof(struct lttng_event_field));
+       tmp_event = calloc<lttng_event_field>(nbmem);
        if (tmp_event == NULL) {
                PERROR("zmalloc ust app event fields");
                ret = -ENOMEM;
@@ -4618,8 +4647,6 @@ error:
 
 /*
  * Free and clean all traceable apps of the global list.
- *
- * Should _NOT_ be called with RCU read-side lock held.
  */
 void ust_app_clean_list(void)
 {
@@ -4666,13 +4693,13 @@ void ust_app_clean_list(void)
 
        /* Destroy is done only when the ht is empty */
        if (ust_app_ht) {
-               ht_cleanup_push(ust_app_ht);
+               lttng_ht_destroy(ust_app_ht);
        }
        if (ust_app_ht_by_sock) {
-               ht_cleanup_push(ust_app_ht_by_sock);
+               lttng_ht_destroy(ust_app_ht_by_sock);
        }
        if (ust_app_ht_by_notify_sock) {
-               ht_cleanup_push(ust_app_ht_by_notify_sock);
+               lttng_ht_destroy(ust_app_ht_by_notify_sock);
        }
 }
 
@@ -4850,7 +4877,7 @@ int ust_app_disable_event_glb(struct ltt_ust_session *usess,
                        continue;
                }
 
-               ret = disable_ust_app_event(ua_sess, ua_event, app);
+               ret = disable_ust_app_event(ua_event, app);
                if (ret < 0) {
                        /* XXX: Report error someday... */
                        continue;
@@ -5009,7 +5036,7 @@ int ust_app_enable_event_glb(struct ltt_ust_session *usess,
                        goto next_app;
                }
 
-               ret = enable_ust_app_event(ua_sess, ua_event, app);
+               ret = enable_ust_app_event(ua_event, app);
                if (ret < 0) {
                        pthread_mutex_unlock(&ua_sess->lock);
                        goto error;
@@ -5073,7 +5100,7 @@ int ust_app_create_event_glb(struct ltt_ust_session *usess,
 
                ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
 
-               ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
+               ret = create_ust_app_event(ua_chan, uevent, app);
                pthread_mutex_unlock(&ua_sess->lock);
                if (ret < 0) {
                        if (ret != -LTTNG_UST_ERR_EXIST) {
@@ -5204,7 +5231,6 @@ int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
 {
        int ret = 0;
        struct ust_app_session *ua_sess;
-       struct ust_registry_session *registry;
 
        DBG("Stopping tracing for ust app pid %d", app->pid);
 
@@ -5268,26 +5294,28 @@ int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
        pthread_mutex_unlock(&app->sock_lock);
        if (ret < 0) {
                if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                       DBG3("UST app wait quiescent failed. Application is dead: pid= %d, sock = %d)",
+                       DBG3("UST app wait quiescent failed. Application is dead: pid= %d, sock = %d",
                                        app->pid, app->sock);
                } else if (ret == -EAGAIN) {
-                       WARN("UST app wait quiescent failed. Communication time out: pid= %d, sock = %d)",
+                       WARN("UST app wait quiescent failed. Communication time out: pid= %d, sock = %d",
                                        app->pid, app->sock);
                } else {
-                       ERR("UST app wait quiescent failed with ret %d: pid= %d, sock = %d)",
+                       ERR("UST app wait quiescent failed with ret %d: pid= %d, sock = %d",
                                        ret, app->pid, app->sock);
                }
        }
 
        health_code_update();
 
-       registry = get_session_registry(ua_sess);
+       {
+               auto locked_registry = get_locked_session_registry(ua_sess);
 
-       /* The UST app session is held registry shall not be null. */
-       LTTNG_ASSERT(registry);
+               /* The UST app session is held registry shall not be null. */
+               LTTNG_ASSERT(locked_registry);
 
-       /* Push metadata for application before freeing the application. */
-       (void) push_metadata(registry, ua_sess->consumer);
+               /* Push metadata for application before freeing the application. */
+               (void) push_metadata(locked_registry, ua_sess->consumer);
+       }
 
 end_unlock:
        pthread_mutex_unlock(&ua_sess->lock);
@@ -5329,7 +5357,7 @@ int ust_app_flush_app_session(struct ust_app *app,
        health_code_update();
 
        /* Flushing buffers */
-       socket = consumer_find_socket_by_bitness(app->bits_per_long,
+       socket = consumer_find_socket_by_bitness(app->abi.bits_per_long,
                        ua_sess->consumer);
 
        /* Flush buffers and push metadata. */
@@ -5386,7 +5414,7 @@ int ust_app_flush_session(struct ltt_ust_session *usess)
 
                /* Flush all per UID buffers associated to that session. */
                cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
-                       struct ust_registry_session *ust_session_reg;
+                       lsu::registry_session *ust_session_reg;
                        struct buffer_reg_channel *buf_reg_chan;
                        struct consumer_socket *socket;
 
@@ -5410,7 +5438,8 @@ int ust_app_flush_session(struct ltt_ust_session *usess)
 
                        ust_session_reg = reg->registry->reg.ust;
                        /* Push metadata. */
-                       (void) push_metadata(ust_session_reg, usess->consumer);
+                       auto locked_registry = ust_session_reg->lock();
+                       (void) push_metadata(locked_registry, usess->consumer);
                }
                break;
        }
@@ -5465,11 +5494,11 @@ int ust_app_clear_quiescent_app_session(struct ust_app *app,
 
        health_code_update();
 
-       socket = consumer_find_socket_by_bitness(app->bits_per_long,
+       socket = consumer_find_socket_by_bitness(app->abi.bits_per_long,
                        ua_sess->consumer);
        if (!socket) {
                ERR("Failed to find consumer (%" PRIu32 ") socket",
-                               app->bits_per_long);
+                               app->abi.bits_per_long);
                ret = -1;
                goto end_unlock;
        }
@@ -5626,13 +5655,13 @@ static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
        pthread_mutex_unlock(&app->sock_lock);
        if (ret < 0) {
                if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                       DBG3("UST app wait quiescent failed. Application is dead: pid= %d, sock = %d)",
+                       DBG3("UST app wait quiescent failed. Application is dead: pid= %d, sock = %d",
                                        app->pid, app->sock);
                } else if (ret == -EAGAIN) {
-                       WARN("UST app wait quiescent failed. Communication time out: pid= %d, sock = %d)",
+                       WARN("UST app wait quiescent failed. Communication time out: pid= %d, sock = %d",
                                        app->pid, app->sock);
                } else {
-                       ERR("UST app wait quiescent failed with ret %d: pid= %d, sock = %d)",
+                       ERR("UST app wait quiescent failed with ret %d: pid= %d, sock = %d",
                                        ret, app->pid, app->sock);
                }
        }
@@ -5769,7 +5798,7 @@ end:
 
 static
 int ust_app_channel_synchronize_event(struct ust_app_channel *ua_chan,
-               struct ltt_ust_event *uevent, struct ust_app_session *ua_sess,
+               struct ltt_ust_event *uevent,
                struct ust_app *app)
 {
        int ret = 0;
@@ -5778,15 +5807,15 @@ int ust_app_channel_synchronize_event(struct ust_app_channel *ua_chan,
        ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
                uevent->filter, uevent->attr.loglevel, uevent->exclusion);
        if (!ua_event) {
-               ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
+               ret = create_ust_app_event(ua_chan, uevent, app);
                if (ret < 0) {
                        goto end;
                }
        } else {
                if (ua_event->enabled != uevent->enabled) {
                        ret = uevent->enabled ?
-                               enable_ust_app_event(ua_sess, ua_event, app) :
-                               disable_ust_app_event(ua_sess, ua_event, app);
+                               enable_ust_app_event(ua_event, app) :
+                               disable_ust_app_event(ua_event, app);
                }
        }
 
@@ -5806,6 +5835,8 @@ void ust_app_synchronize_event_notifier_rules(struct ust_app *app)
        struct ust_app_event_notifier_rule *event_notifier_rule;
        unsigned int count, i;
 
+       ASSERT_RCU_READ_LOCKED();
+
        if (!ust_app_supports_notifiers(app)) {
                goto end;
        }
@@ -5957,6 +5988,7 @@ void ust_app_synchronize_all_channels(struct ltt_ust_session *usess,
        LTTNG_ASSERT(usess);
        LTTNG_ASSERT(ua_sess);
        LTTNG_ASSERT(app);
+       ASSERT_RCU_READ_LOCKED();
 
        cds_lfht_for_each_entry(usess->domain_global.channels->ht, &uchan_iter,
                        uchan, node.node) {
@@ -5986,7 +6018,7 @@ void ust_app_synchronize_all_channels(struct ltt_ust_session *usess,
                cds_lfht_for_each_entry(uchan->events->ht, &uevent_iter, uevent,
                                node.node) {
                        ret = ust_app_channel_synchronize_event(ua_chan,
-                               uevent, ua_sess, app);
+                               uevent, app);
                        if (ret) {
                                goto end;
                        }
@@ -6086,6 +6118,7 @@ void ust_app_global_update(struct ltt_ust_session *usess, struct ust_app *app)
 {
        LTTNG_ASSERT(usess);
        LTTNG_ASSERT(usess->active);
+       ASSERT_RCU_READ_LOCKED();
 
        DBG2("UST app global update for app sock %d for session id %" PRIu64,
                        app->sock, usess->id);
@@ -6120,7 +6153,9 @@ void ust_app_global_update(struct ltt_ust_session *usess, struct ust_app *app)
  */
 void ust_app_global_update_event_notifier_rules(struct ust_app *app)
 {
-       DBG2("UST application global event notifier rules update: app = '%s', pid = %d)",
+       ASSERT_RCU_READ_LOCKED();
+
+       DBG2("UST application global event notifier rules update: app = '%s', pid = %d",
                        app->name, app->pid);
 
        if (!app->compatible || !ust_app_supports_notifiers(app)) {
@@ -6128,7 +6163,7 @@ void ust_app_global_update_event_notifier_rules(struct ust_app *app)
        }
 
        if (app->event_notifier_group.object == NULL) {
-               WARN("UST app global update of event notifiers for app skipped since communication handle is null: app = '%s' pid = %d)",
+               WARN("UST app global update of event notifiers for app skipped since communication handle is null: app = '%s', pid = %d",
                                app->name, app->pid);
                return;
        }
@@ -6282,6 +6317,7 @@ static struct ust_app_session *find_session_by_objd(struct ust_app *app,
        struct ust_app_session *ua_sess = NULL;
 
        LTTNG_ASSERT(app);
+       ASSERT_RCU_READ_LOCKED();
 
        lttng_ht_lookup(app->ust_sessions_objd, (void *)((unsigned long) objd), &iter);
        node = lttng_ht_iter_get_node_ulong(&iter);
@@ -6309,6 +6345,7 @@ static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
        struct ust_app_channel *ua_chan = NULL;
 
        LTTNG_ASSERT(app);
+       ASSERT_RCU_READ_LOCKED();
 
        lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
        node = lttng_ht_iter_get_node_ulong(&iter);
@@ -6331,47 +6368,45 @@ error:
  *
  * On success 0 is returned else a negative value.
  */
-static int reply_ust_register_channel(int sock, int cobjd,
-               size_t nr_fields, struct lttng_ust_ctl_field *fields)
+static int handle_app_register_channel_notification(int sock,
+               int cobjd,
+               struct lttng_ust_ctl_field *raw_context_fields,
+               size_t context_field_count)
 {
        int ret, ret_code = 0;
        uint32_t chan_id;
        uint64_t chan_reg_key;
-       enum lttng_ust_ctl_channel_header type;
        struct ust_app *app;
        struct ust_app_channel *ua_chan;
        struct ust_app_session *ua_sess;
-       struct ust_registry_session *registry;
-       struct ust_registry_channel *ust_reg_chan;
+       auto ust_ctl_context_fields = lttng::make_unique_wrapper<lttng_ust_ctl_field, lttng::free>(
+                       raw_context_fields);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock_guard;
 
        /* Lookup application. If not found, there is a code flow error. */
        app = find_app_by_notify_sock(sock);
        if (!app) {
                DBG("Application socket %d is being torn down. Abort event notify",
                                sock);
-               ret = -1;
-               goto error_rcu_unlock;
+               return -1;
        }
 
        /* Lookup channel by UST object descriptor. */
        ua_chan = find_channel_by_objd(app, cobjd);
        if (!ua_chan) {
                DBG("Application channel is being torn down. Abort event notify");
-               ret = 0;
-               goto error_rcu_unlock;
+               return 0;
        }
 
        LTTNG_ASSERT(ua_chan->session);
        ua_sess = ua_chan->session;
 
        /* Get right session registry depending on the session buffer type. */
-       registry = get_session_registry(ua_sess);
-       if (!registry) {
+       auto locked_registry_session = get_locked_session_registry(ua_sess);
+       if (!locked_registry_session) {
                DBG("Application session is being torn down. Abort event notify");
-               ret = 0;
-               goto error_rcu_unlock;
+               return 0;
        };
 
        /* Depending on the buffer type, a different channel key is used. */
@@ -6381,32 +6416,46 @@ static int reply_ust_register_channel(int sock, int cobjd,
                chan_reg_key = ua_chan->key;
        }
 
-       pthread_mutex_lock(&registry->lock);
+       auto& ust_reg_chan = locked_registry_session->get_channel(chan_reg_key);
+
+       /* Channel id is set during the object creation. */
+       chan_id = ust_reg_chan.id;
 
-       ust_reg_chan = ust_registry_channel_find(registry, chan_reg_key);
-       LTTNG_ASSERT(ust_reg_chan);
+       /*
+        * The application returns the typing information of the channel's
+        * context fields. In per-PID buffering mode, this is the first and only
+        * time we get this information. It is our chance to finalize the
+        * initialiation of the channel and serialize it's layout's description
+        * to the trace's metadata.
+        *
+        * However, in per-UID buffering mode, every application will provide
+        * this information (redundantly). The first time will allow us to
+        * complete the initialization. The following times, we simply validate
+        * that all apps provide the same typing for the context fields as a
+        * sanity check.
+        */
+       lst::type::cuptr context_fields = lttng::make_unique<lst::structure_type>(0,
+                       lsu::create_trace_fields_from_ust_ctl_fields(*locked_registry_session,
+                                       ust_ctl_context_fields.get(), context_field_count));
 
-       if (!ust_reg_chan->register_done) {
+       if (!ust_reg_chan.is_registered()) {
+               ust_reg_chan.set_context(std::move(context_fields));
+       } else {
                /*
-                * TODO: eventually use the registry event count for
-                * this channel to better guess header type for per-pid
-                * buffers.
+                * Validate that the context fields match between
+                * registry and newcoming application.
                 */
-               type = LTTNG_UST_CTL_CHANNEL_HEADER_LARGE;
-               ust_reg_chan->nr_ctx_fields = nr_fields;
-               ust_reg_chan->ctx_fields = fields;
-               fields = NULL;
-               ust_reg_chan->header_type = type;
-       } else {
-               /* Get current already assigned values. */
-               type = ust_reg_chan->header_type;
+               if (ust_reg_chan.get_context() != *context_fields) {
+                       ERR("Registering application channel due to context field mismatch: pid = %d, sock = %d",
+                               app->pid, app->sock);
+                       ret_code = -EINVAL;
+                       goto reply;
+               }
        }
-       /* Channel id is set during the object creation. */
-       chan_id = ust_reg_chan->chan_id;
 
        /* Append to metadata */
-       if (!ust_reg_chan->metadata_dumped) {
-               ret_code = ust_metadata_channel_statedump(registry, ust_reg_chan);
+       if (!ust_reg_chan._metadata_dumped) {
+               /*ret_code = ust_metadata_channel_statedump(registry, ust_reg_chan);*/
                if (ret_code) {
                        ERR("Error appending channel metadata (errno = %d)", ret_code);
                        goto reply;
@@ -6415,10 +6464,14 @@ static int reply_ust_register_channel(int sock, int cobjd,
 
 reply:
        DBG3("UST app replying to register channel key %" PRIu64
-                       " with id %u, type = %d, ret = %d", chan_reg_key, chan_id, type,
+                       " with id %u, ret = %d", chan_reg_key, chan_id,
                        ret_code);
 
-       ret = lttng_ust_ctl_reply_register_channel(sock, chan_id, type, ret_code);
+       ret = lttng_ust_ctl_reply_register_channel(sock, chan_id,
+                       ust_reg_chan.header_type == lst::stream_class::header_type::COMPACT ?
+                                       LTTNG_UST_CTL_CHANNEL_HEADER_COMPACT :
+                                             LTTNG_UST_CTL_CHANNEL_HEADER_LARGE,
+                       ret_code);
        if (ret < 0) {
                if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
                        DBG3("UST app reply channel failed. Application died: pid = %d, sock = %d",
@@ -6430,17 +6483,13 @@ reply:
                        ERR("UST app reply channel failed with ret %d: pid = %d, sock = %d",
                                        ret, app->pid, app->sock);
                }
-               goto error;
+
+               return ret;
        }
 
-       /* This channel registry registration is completed. */
-       ust_reg_chan->register_done = 1;
+       /* This channel registry's registration is completed. */
+       ust_reg_chan.set_as_registered();
 
-error:
-       pthread_mutex_unlock(&registry->lock);
-error_rcu_unlock:
-       rcu_read_unlock();
-       free(fields);
        return ret;
 }
 
@@ -6453,9 +6502,9 @@ error_rcu_unlock:
  *
  * On success 0 is returned else a negative value.
  */
-static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
-               char *sig, size_t nr_fields, struct lttng_ust_ctl_field *fields,
-               int loglevel_value, char *model_emf_uri)
+static int add_event_ust_registry(int sock, int sobjd, int cobjd, const char *name,
+               char *raw_signature, size_t nr_fields, struct lttng_ust_ctl_field *raw_fields,
+               int loglevel_value, char *raw_model_emf_uri)
 {
        int ret, ret_code;
        uint32_t event_id = 0;
@@ -6463,57 +6512,69 @@ static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
        struct ust_app *app;
        struct ust_app_channel *ua_chan;
        struct ust_app_session *ua_sess;
-       struct ust_registry_session *registry;
-
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard rcu_lock;
+       auto signature = lttng::make_unique_wrapper<char, lttng::free>(raw_signature);
+       auto fields = lttng::make_unique_wrapper<lttng_ust_ctl_field, lttng::free>(raw_fields);
+       auto model_emf_uri = lttng::make_unique_wrapper<char, lttng::free>(raw_model_emf_uri);
 
        /* Lookup application. If not found, there is a code flow error. */
        app = find_app_by_notify_sock(sock);
        if (!app) {
                DBG("Application socket %d is being torn down. Abort event notify",
                                sock);
-               ret = -1;
-               goto error_rcu_unlock;
+               return -1;
        }
 
        /* Lookup channel by UST object descriptor. */
        ua_chan = find_channel_by_objd(app, cobjd);
        if (!ua_chan) {
                DBG("Application channel is being torn down. Abort event notify");
-               ret = 0;
-               goto error_rcu_unlock;
+               return 0;
        }
 
        LTTNG_ASSERT(ua_chan->session);
        ua_sess = ua_chan->session;
 
-       registry = get_session_registry(ua_sess);
-       if (!registry) {
-               DBG("Application session is being torn down. Abort event notify");
-               ret = 0;
-               goto error_rcu_unlock;
-       }
-
        if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
                chan_reg_key = ua_chan->tracing_channel_id;
        } else {
                chan_reg_key = ua_chan->key;
        }
 
-       pthread_mutex_lock(&registry->lock);
-
-       /*
-        * From this point on, this call acquires the ownership of the sig, fields
-        * and model_emf_uri meaning any free are done inside it if needed. These
-        * three variables MUST NOT be read/write after this.
-        */
-       ret_code = ust_registry_create_event(registry, chan_reg_key,
-                       sobjd, cobjd, name, sig, nr_fields, fields,
-                       loglevel_value, model_emf_uri, ua_sess->buffer_type,
-                       &event_id, app);
-       sig = NULL;
-       fields = NULL;
-       model_emf_uri = NULL;
+       {
+               auto locked_registry = get_locked_session_registry(ua_sess);
+               if (locked_registry) {
+                       /*
+                        * From this point on, this call acquires the ownership of the signature,
+                        * fields and model_emf_uri meaning any free are done inside it if needed.
+                        * These three variables MUST NOT be read/write after this.
+                        */
+                       try {
+                               auto& channel = locked_registry->get_channel(chan_reg_key);
+
+                               /* event_id is set on success. */
+                               channel.add_event(sobjd, cobjd, name, signature.get(),
+                                               lsu::create_trace_fields_from_ust_ctl_fields(
+                                                               *locked_registry, fields.get(),
+                                                               nr_fields),
+                                               loglevel_value,
+                                               model_emf_uri.get() ?
+                                                               nonstd::optional<std::string>(
+                                                                               model_emf_uri.get()) :
+                                                                     nonstd::nullopt,
+                                               ua_sess->buffer_type, *app, event_id);
+                               ret_code = 0;
+                       } catch (const std::exception& ex) {
+                               ERR("Failed to add event `%s` to registry session: %s", name,
+                                               ex.what());
+                               /* Inform the application of the error; don't return directly. */
+                               ret_code = -EINVAL;
+                       }
+               } else {
+                       DBG("Application session is being torn down. Abort event notify");
+                       return 0;
+               }
+       }
 
        /*
         * The return value is returned to ustctl so in case of an error, the
@@ -6536,19 +6597,11 @@ static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
                 * No need to wipe the create event since the application socket will
                 * get close on error hence cleaning up everything by itself.
                 */
-               goto error;
+               return ret;
        }
 
        DBG3("UST registry event %s with id %" PRId32 " added successfully",
                        name, event_id);
-
-error:
-       pthread_mutex_unlock(&registry->lock);
-error_rcu_unlock:
-       rcu_read_unlock();
-       free(sig);
-       free(fields);
-       free(model_emf_uri);
        return ret;
 }
 
@@ -6560,16 +6613,16 @@ error_rcu_unlock:
  *
  * On success 0 is returned else a negative value.
  */
-static int add_enum_ust_registry(int sock, int sobjd, char *name,
-               struct lttng_ust_ctl_enum_entry *entries, size_t nr_entries)
+static int add_enum_ust_registry(int sock, int sobjd, const char *name,
+               struct lttng_ust_ctl_enum_entry *raw_entries, size_t nr_entries)
 {
-       int ret = 0, ret_code;
+       int ret = 0;
        struct ust_app *app;
        struct ust_app_session *ua_sess;
-       struct ust_registry_session *registry;
        uint64_t enum_id = -1ULL;
-
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock_guard;
+       auto entries = lttng::make_unique_wrapper<struct lttng_ust_ctl_enum_entry, lttng::free>(
+                       raw_entries);
 
        /* Lookup application. If not found, there is a code flow error. */
        app = find_app_by_notify_sock(sock);
@@ -6577,9 +6630,7 @@ static int add_enum_ust_registry(int sock, int sobjd, char *name,
                /* Return an error since this is not an error */
                DBG("Application socket %d is being torn down. Aborting enum registration",
                                sock);
-               free(entries);
-               ret = -1;
-               goto error_rcu_unlock;
+               return -1;
        }
 
        /* Lookup session by UST object descriptor. */
@@ -6587,34 +6638,37 @@ static int add_enum_ust_registry(int sock, int sobjd, char *name,
        if (!ua_sess) {
                /* Return an error since this is not an error */
                DBG("Application session is being torn down (session not found). Aborting enum registration.");
-               free(entries);
-               goto error_rcu_unlock;
+               return 0;
        }
 
-       registry = get_session_registry(ua_sess);
-       if (!registry) {
+       auto locked_registry = get_locked_session_registry(ua_sess);
+       if (!locked_registry) {
                DBG("Application session is being torn down (registry not found). Aborting enum registration.");
-               free(entries);
-               goto error_rcu_unlock;
+               return 0;
        }
 
-       pthread_mutex_lock(&registry->lock);
-
        /*
         * From this point on, the callee acquires the ownership of
         * entries. The variable entries MUST NOT be read/written after
         * call.
         */
-       ret_code = ust_registry_create_or_find_enum(registry, sobjd, name,
-                       entries, nr_entries, &enum_id);
-       entries = NULL;
+       int application_reply_code;
+       try {
+               locked_registry->create_or_find_enum(
+                               sobjd, name, entries.release(), nr_entries, &enum_id);
+               application_reply_code = 0;
+       } catch (const std::exception& ex) {
+               ERR("%s: %s", fmt::format("Failed to create or find enumeration provided by application: app = {}, enumeration name = {}",
+                               *app, name).c_str(), ex.what());
+               application_reply_code = -1;
+       }
 
        /*
         * The return value is returned to ustctl so in case of an error, the
         * application can be notified. In case of an error, it's important not to
         * return a negative error or else the application will get closed.
         */
-       ret = lttng_ust_ctl_reply_register_enum(sock, enum_id, ret_code);
+       ret = lttng_ust_ctl_reply_register_enum(sock, enum_id, application_reply_code);
        if (ret < 0) {
                if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
                        DBG3("UST app reply enum failed. Application died: pid = %d, sock = %d",
@@ -6630,16 +6684,11 @@ static int add_enum_ust_registry(int sock, int sobjd, char *name,
                 * No need to wipe the create enum since the application socket will
                 * get close on error hence cleaning up everything by itself.
                 */
-               goto error;
+               return ret;
        }
 
        DBG3("UST registry enum %s added successfully or already found", name);
-
-error:
-       pthread_mutex_unlock(&registry->lock);
-error_rcu_unlock:
-       rcu_read_unlock();
-       return ret;
+       return 0;
 }
 
 /*
@@ -6679,9 +6728,8 @@ int ust_app_recv_notify(int sock)
 
                DBG2("UST app ustctl register event received");
 
-               ret = lttng_ust_ctl_recv_register_event(sock, &sobjd, &cobjd, name,
-                               &loglevel_value, &sig, &nr_fields, &fields,
-                               &model_emf_uri);
+               ret = lttng_ust_ctl_recv_register_event(sock, &sobjd, &cobjd, name, &loglevel_value,
+                               &sig, &nr_fields, &fields, &model_emf_uri);
                if (ret < 0) {
                        if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
                                DBG3("UST app recv event failed. Application died: sock = %d",
@@ -6696,6 +6744,24 @@ int ust_app_recv_notify(int sock)
                        goto error;
                }
 
+               {
+                       lttng::urcu::read_lock_guard rcu_lock;
+                       const struct ust_app *app = find_app_by_notify_sock(sock);
+                       if (!app) {
+                               DBG("Application socket %d is being torn down. Abort event notify", sock);
+                               ret = -1;
+                               goto error;
+                       }
+               }
+
+               if ((!fields && nr_fields > 0) || (fields && nr_fields == 0)) {
+                       ERR("Invalid return value from lttng_ust_ctl_recv_register_event: fields = %p, nr_fields = %zu",
+                                       fields, nr_fields);
+                       ret = -1;
+                       free(fields);
+                       goto error;
+               }
+
                /*
                 * Add event to the UST registry coming from the notify socket. This
                 * call will free if needed the sig, fields and model_emf_uri. This
@@ -6713,13 +6779,13 @@ int ust_app_recv_notify(int sock)
        case LTTNG_UST_CTL_NOTIFY_CMD_CHANNEL:
        {
                int sobjd, cobjd;
-               size_t nr_fields;
-               struct lttng_ust_ctl_field *fields;
+               size_t field_count;
+               struct lttng_ust_ctl_field *context_fields;
 
                DBG2("UST app ustctl register channel received");
 
-               ret = lttng_ust_ctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
-                               &fields);
+               ret = lttng_ust_ctl_recv_register_channel(
+                               sock, &sobjd, &cobjd, &field_count, &context_fields);
                if (ret < 0) {
                        if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
                                DBG3("UST app recv channel failed. Application died: sock = %d",
@@ -6728,8 +6794,8 @@ int ust_app_recv_notify(int sock)
                                WARN("UST app recv channel failed. Communication time out: sock = %d",
                                                sock);
                        } else {
-                               ERR("UST app recv channel failed with ret %d: sock = %d)",
-                                               ret, sock);
+                               ERR("UST app recv channel failed with ret %d: sock = %d", ret,
+                                               sock);
                        }
                        goto error;
                }
@@ -6737,10 +6803,9 @@ int ust_app_recv_notify(int sock)
                /*
                 * The fields ownership are transfered to this function call meaning
                 * that if needed it will be freed. After this, it's invalid to access
-                * fields or clean it up.
+                * fields or clean them up.
                 */
-               ret = reply_ust_register_channel(sock, cobjd, nr_fields,
-                               fields);
+               ret = handle_app_register_channel_notification(sock, cobjd, context_fields, field_count);
                if (ret < 0) {
                        goto error;
                }
@@ -6772,7 +6837,7 @@ int ust_app_recv_notify(int sock)
                        goto error;
                }
 
-               /* Callee assumes ownership of entries */
+               /* Callee assumes ownership of entries. */
                ret = add_enum_ust_registry(sock, sobjd, name,
                                entries, nr_entries);
                if (ret < 0) {
@@ -6810,7 +6875,7 @@ void ust_app_notify_sock_unregister(int sock)
 
        rcu_read_lock();
 
-       obj = (ust_app_notify_sock_obj *) zmalloc(sizeof(*obj));
+       obj = zmalloc<ust_app_notify_sock_obj>();
        if (!obj) {
                /*
                 * An ENOMEM is kind of uncool. If this strikes we continue the
@@ -6887,7 +6952,7 @@ void ust_app_destroy(struct ust_app *app)
  */
 enum lttng_error_code ust_app_snapshot_record(
                const struct ltt_ust_session *usess,
-               const struct consumer_output *output, int wait,
+               const struct consumer_output *output,
                uint64_t nb_packets_per_stream)
 {
        int ret = 0;
@@ -6912,7 +6977,7 @@ enum lttng_error_code ust_app_snapshot_record(
                        char pathname[PATH_MAX];
                        size_t consumer_path_offset = 0;
 
-                       if (!reg->registry->reg.ust->metadata_key) {
+                       if (!reg->registry->reg.ust->_metadata_key) {
                                /* Skip since no metadata is present */
                                continue;
                        }
@@ -6947,17 +7012,15 @@ enum lttng_error_code ust_app_snapshot_record(
                                        buf_reg_chan, node.node) {
                                status = consumer_snapshot_channel(socket,
                                                buf_reg_chan->consumer_key,
-                                               output, 0, usess->uid,
-                                               usess->gid, &trace_path[consumer_path_offset], wait,
+                                               output, 0, &trace_path[consumer_path_offset],
                                                nb_packets_per_stream);
                                if (status != LTTNG_OK) {
                                        goto error;
                                }
                        }
                        status = consumer_snapshot_channel(socket,
-                                       reg->registry->reg.ust->metadata_key, output, 1,
-                                       usess->uid, usess->gid, &trace_path[consumer_path_offset],
-                                       wait, 0);
+                                       reg->registry->reg.ust->_metadata_key, output, 1,
+                                       &trace_path[consumer_path_offset], 0);
                        if (status != LTTNG_OK) {
                                goto error;
                        }
@@ -6971,7 +7034,7 @@ enum lttng_error_code ust_app_snapshot_record(
                        struct lttng_ht_iter chan_iter;
                        struct ust_app_channel *ua_chan;
                        struct ust_app_session *ua_sess;
-                       struct ust_registry_session *registry;
+                       lsu::registry_session *registry;
                        char pathname[PATH_MAX];
                        size_t consumer_path_offset = 0;
 
@@ -6982,7 +7045,7 @@ enum lttng_error_code ust_app_snapshot_record(
                        }
 
                        /* Get the right consumer socket for the application. */
-                       socket = consumer_find_socket_by_bitness(app->bits_per_long,
+                       socket = consumer_find_socket_by_bitness(app->abi.bits_per_long,
                                        output);
                        if (!socket) {
                                status = LTTNG_ERR_INVALID;
@@ -7010,9 +7073,7 @@ enum lttng_error_code ust_app_snapshot_record(
                                        ua_chan, node.node) {
                                status = consumer_snapshot_channel(socket,
                                                ua_chan->key, output, 0,
-                                               lttng_credentials_get_uid(&ua_sess->effective_credentials),
-                                               lttng_credentials_get_gid(&ua_sess->effective_credentials),
-                                               &trace_path[consumer_path_offset], wait,
+                                               &trace_path[consumer_path_offset],
                                                nb_packets_per_stream);
                                switch (status) {
                                case LTTNG_OK:
@@ -7030,10 +7091,8 @@ enum lttng_error_code ust_app_snapshot_record(
                                continue;
                        }
                        status = consumer_snapshot_channel(socket,
-                                       registry->metadata_key, output, 1,
-                                       lttng_credentials_get_uid(&ua_sess->effective_credentials),
-                                       lttng_credentials_get_gid(&ua_sess->effective_credentials),
-                                       &trace_path[consumer_path_offset], wait, 0);
+                                       registry->_metadata_key, output, 1,
+                                       &trace_path[consumer_path_offset], 0);
                        switch (status) {
                        case LTTNG_OK:
                                break;
@@ -7326,7 +7385,6 @@ enum lttng_error_code ust_app_rotate_session(struct ltt_session *session)
                                        buf_reg_chan, node.node) {
                                ret = consumer_rotate_channel(socket,
                                                buf_reg_chan->consumer_key,
-                                               usess->uid, usess->gid,
                                                usess->consumer,
                                                /* is_metadata_channel */ false);
                                if (ret < 0) {
@@ -7343,16 +7401,18 @@ enum lttng_error_code ust_app_rotate_session(struct ltt_session *session)
                         * operations (i.e add context) and lead to data
                         * channels created with no metadata channel.
                         */
-                       if (!reg->registry->reg.ust->metadata_key) {
+                       if (!reg->registry->reg.ust->_metadata_key) {
                                /* Skip since no metadata is present. */
                                continue;
                        }
 
-                       (void) push_metadata(reg->registry->reg.ust, usess->consumer);
+                       {
+                               auto locked_registry = reg->registry->reg.ust->lock();
+                               (void) push_metadata(locked_registry, usess->consumer);
+                       }
 
                        ret = consumer_rotate_channel(socket,
-                                       reg->registry->reg.ust->metadata_key,
-                                       usess->uid, usess->gid,
+                                       reg->registry->reg.ust->_metadata_key,
                                        usess->consumer,
                                        /* is_metadata_channel */ true);
                        if (ret < 0) {
@@ -7369,7 +7429,7 @@ enum lttng_error_code ust_app_rotate_session(struct ltt_session *session)
                        struct lttng_ht_iter chan_iter;
                        struct ust_app_channel *ua_chan;
                        struct ust_app_session *ua_sess;
-                       struct ust_registry_session *registry;
+                       lsu::registry_session *registry;
 
                        ua_sess = lookup_session_by_app(usess, app);
                        if (!ua_sess) {
@@ -7378,7 +7438,7 @@ enum lttng_error_code ust_app_rotate_session(struct ltt_session *session)
                        }
 
                        /* Get the right consumer socket for the application. */
-                       socket = consumer_find_socket_by_bitness(app->bits_per_long,
+                       socket = consumer_find_socket_by_bitness(app->abi.bits_per_long,
                                        usess->consumer);
                        if (!socket) {
                                cmd_ret = LTTNG_ERR_INVALID;
@@ -7396,8 +7456,6 @@ enum lttng_error_code ust_app_rotate_session(struct ltt_session *session)
                                        ua_chan, node.node) {
                                ret = consumer_rotate_channel(socket,
                                                ua_chan->key,
-                                               lttng_credentials_get_uid(&ua_sess->effective_credentials),
-                                               lttng_credentials_get_gid(&ua_sess->effective_credentials),
                                                ua_sess->consumer,
                                                /* is_metadata_channel */ false);
                                if (ret < 0) {
@@ -7410,11 +7468,13 @@ enum lttng_error_code ust_app_rotate_session(struct ltt_session *session)
                        }
 
                        /* Rotate the metadata channel. */
-                       (void) push_metadata(registry, usess->consumer);
+                       {
+                               auto locked_registry = registry->lock();
+
+                               (void) push_metadata(locked_registry, usess->consumer);
+                       }
                        ret = consumer_rotate_channel(socket,
-                                       registry->metadata_key,
-                                       lttng_credentials_get_uid(&ua_sess->effective_credentials),
-                                       lttng_credentials_get_gid(&ua_sess->effective_credentials),
+                                       registry->_metadata_key,
                                        ua_sess->consumer,
                                        /* is_metadata_channel */ true);
                        if (ret < 0) {
@@ -7499,7 +7559,7 @@ enum lttng_error_code ust_app_create_channel_subdirectories(
                cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
                                pid_n.node) {
                        struct ust_app_session *ua_sess;
-                       struct ust_registry_session *registry;
+                       lsu::registry_session *registry;
 
                        ua_sess = lookup_session_by_app(usess, app);
                        if (!ua_sess) {
@@ -7596,7 +7656,10 @@ enum lttng_error_code ust_app_clear_session(struct ltt_session *session)
                                }
                        }
 
-                       (void) push_metadata(reg->registry->reg.ust, usess->consumer);
+                       {
+                               auto locked_registry = reg->registry->reg.ust->lock();
+                               (void) push_metadata(locked_registry, usess->consumer);
+                       }
 
                        /*
                         * Clear the metadata channel.
@@ -7604,7 +7667,7 @@ enum lttng_error_code ust_app_clear_session(struct ltt_session *session)
                         * perform a rotation operation on it behind the scene.
                         */
                        ret = consumer_clear_channel(socket,
-                                       reg->registry->reg.ust->metadata_key);
+                                       reg->registry->reg.ust->_metadata_key);
                        if (ret < 0) {
                                goto error;
                        }
@@ -7618,7 +7681,7 @@ enum lttng_error_code ust_app_clear_session(struct ltt_session *session)
                        struct lttng_ht_iter chan_iter;
                        struct ust_app_channel *ua_chan;
                        struct ust_app_session *ua_sess;
-                       struct ust_registry_session *registry;
+                       lsu::registry_session *registry;
 
                        ua_sess = lookup_session_by_app(usess, app);
                        if (!ua_sess) {
@@ -7627,7 +7690,7 @@ enum lttng_error_code ust_app_clear_session(struct ltt_session *session)
                        }
 
                        /* Get the right consumer socket for the application. */
-                       socket = consumer_find_socket_by_bitness(app->bits_per_long,
+                       socket = consumer_find_socket_by_bitness(app->abi.bits_per_long,
                                        usess->consumer);
                        if (!socket) {
                                cmd_ret = LTTNG_ERR_INVALID;
@@ -7653,14 +7716,17 @@ enum lttng_error_code ust_app_clear_session(struct ltt_session *session)
                                }
                        }
 
-                       (void) push_metadata(registry, usess->consumer);
+                       {
+                               auto locked_registry = registry->lock();
+                               (void) push_metadata(locked_registry, usess->consumer);
+                       }
 
                        /*
                         * Clear the metadata channel.
                         * Metadata channel is not cleared per se but we still need to
                         * perform rotation operation on it behind the scene.
                         */
-                       ret = consumer_clear_channel(socket, registry->metadata_key);
+                       ret = consumer_clear_channel(socket, registry->_metadata_key);
                        if (ret < 0) {
                                /* Per-PID buffer and application going away. */
                                if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
@@ -7762,7 +7828,7 @@ enum lttng_error_code ust_app_open_packets(struct ltt_session *session)
                        struct lttng_ht_iter chan_iter;
                        struct ust_app_channel *ua_chan;
                        struct ust_app_session *ua_sess;
-                       struct ust_registry_session *registry;
+                       lsu::registry_session *registry;
 
                        ua_sess = lookup_session_by_app(usess, app);
                        if (!ua_sess) {
@@ -7772,7 +7838,7 @@ enum lttng_error_code ust_app_open_packets(struct ltt_session *session)
 
                        /* Get the right consumer socket for the application. */
                        socket = consumer_find_socket_by_bitness(
-                                       app->bits_per_long, usess->consumer);
+                                       app->abi.bits_per_long, usess->consumer);
                        if (!socket) {
                                ret = LTTNG_ERR_FATAL;
                                goto error;
This page took 0.056952 seconds and 4 git commands to generate.