* buf_addr is a pointer the the beginning of the preallocated buffer contiguous
* address mapping. It is used only by RING_BUFFER_STATIC configuration. It can
* be set to NULL for other backends.
+ *
+ * priv_data (output) is set to a pointer into a "priv_data_len"-sized
+ * memory area for client-specific data. This memory is managed by lib
+ * ring buffer. priv_data_align is the alignment required for the
+ * private data area.
*/
extern
-struct shm_handle *channel_create(const struct lib_ring_buffer_config *config,
- const char *name, void *priv,
+struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buffer_config *config,
+ const char *name,
+ void **priv_data,
+ size_t priv_data_align,
+ size_t priv_data_size,
+ void *priv_data_init,
void *buf_addr,
size_t subbuf_size, size_t num_subbuf,
unsigned int switch_timer_interval,
- unsigned int read_timer_interval);
+ unsigned int read_timer_interval,
+ int *shm_fd, int *wait_fd,
+ uint64_t *memory_map_size);
+
+/* channel_handle_create - for consumer. */
+extern
+struct lttng_ust_shm_handle *channel_handle_create(int shm_fd, int wait_fd,
+ uint64_t memory_map_size);
+
+/* channel_handle_add_stream - for consumer. */
+extern
+int channel_handle_add_stream(struct lttng_ust_shm_handle *handle,
+ int shm_fd, int wait_fd, uint64_t memory_map_size);
/*
- * channel_destroy returns the private data pointer. It finalizes all channel's
- * buffers, waits for readers to release all references, and destroys the
- * channel.
+ * channel_destroy finalizes all channel's buffers, waits for readers to
+ * release all references, and destroys the channel.
*/
extern
-void *channel_destroy(struct channel *chan, struct shm_handle *handle);
+void channel_destroy(struct channel *chan, struct lttng_ust_shm_handle *handle,
+ int shadow);
/* Buffer read operations */
#define for_each_channel_cpu(cpu, chan) \
for_each_possible_cpu(cpu)
-extern struct lib_ring_buffer *channel_get_ring_buffer(
- const struct lib_ring_buffer_config *config,
+extern struct lttng_ust_lib_ring_buffer *channel_get_ring_buffer(
+ const struct lttng_ust_lib_ring_buffer_config *config,
struct channel *chan, int cpu,
- struct shm_handle *handle);
-extern int lib_ring_buffer_open_read(struct lib_ring_buffer *buf,
- struct shm_handle *handle);
-extern void lib_ring_buffer_release_read(struct lib_ring_buffer *buf,
- struct shm_handle *handle);
+ struct lttng_ust_shm_handle *handle,
+ int *shm_fd, int *wait_fd,
+ uint64_t *memory_map_size);
+extern int lib_ring_buffer_open_read(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle,
+ int shadow);
+extern void lib_ring_buffer_release_read(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle,
+ int shadow);
/*
* Read sequence: snapshot, many get_subbuf/put_subbuf, move_consumer.
*/
-extern int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf,
+extern int lib_ring_buffer_snapshot(struct lttng_ust_lib_ring_buffer *buf,
unsigned long *consumed,
unsigned long *produced,
- struct shm_handle *handle);
-extern void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle);
+extern void lib_ring_buffer_move_consumer(struct lttng_ust_lib_ring_buffer *buf,
unsigned long consumed_new,
- struct shm_handle *handle);
+ struct lttng_ust_shm_handle *handle);
-extern int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf,
+extern int lib_ring_buffer_get_subbuf(struct lttng_ust_lib_ring_buffer *buf,
unsigned long consumed,
- struct shm_handle *handle);
-extern void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf,
- struct shm_handle *handle);
+ struct lttng_ust_shm_handle *handle);
+extern void lib_ring_buffer_put_subbuf(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle);
/*
* lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf are helpers
* to read sub-buffers sequentially.
*/
-static inline int lib_ring_buffer_get_next_subbuf(struct lib_ring_buffer *buf,
- struct shm_handle *handle)
+static inline int lib_ring_buffer_get_next_subbuf(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle)
{
int ret;
}
static inline
-void lib_ring_buffer_put_next_subbuf(struct lib_ring_buffer *buf,
- struct shm_handle *handle)
+void lib_ring_buffer_put_next_subbuf(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle)
{
lib_ring_buffer_put_subbuf(buf, handle);
lib_ring_buffer_move_consumer(buf, subbuf_align(buf->cons_snapshot,
}
extern void channel_reset(struct channel *chan);
-extern void lib_ring_buffer_reset(struct lib_ring_buffer *buf,
- struct shm_handle *handle);
+extern void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle);
static inline
-unsigned long lib_ring_buffer_get_offset(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+unsigned long lib_ring_buffer_get_offset(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf)
{
return v_read(config, &buf->offset);
}
static inline
-unsigned long lib_ring_buffer_get_consumed(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+unsigned long lib_ring_buffer_get_consumed(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf)
{
return uatomic_read(&buf->consumed);
}
* ordering enforced with respect to trace teardown).
*/
static inline
-int lib_ring_buffer_is_finalized(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+int lib_ring_buffer_is_finalized(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf)
{
int finalized = CMM_ACCESS_ONCE(buf->finalized);
/*
static inline
unsigned long lib_ring_buffer_get_read_data_size(
- const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
- struct shm_handle *handle)
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle)
{
return subbuffer_get_read_data_size(config, &buf->backend, handle);
}
static inline
unsigned long lib_ring_buffer_get_records_count(
- const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf)
{
return v_read(config, &buf->records_count);
}
static inline
unsigned long lib_ring_buffer_get_records_overrun(
- const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf)
{
return v_read(config, &buf->records_overrun);
}
static inline
unsigned long lib_ring_buffer_get_records_lost_full(
- const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf)
{
return v_read(config, &buf->records_lost_full);
}
static inline
unsigned long lib_ring_buffer_get_records_lost_wrap(
- const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf)
{
return v_read(config, &buf->records_lost_wrap);
}
static inline
unsigned long lib_ring_buffer_get_records_lost_big(
- const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf)
{
return v_read(config, &buf->records_lost_big);
}
static inline
unsigned long lib_ring_buffer_get_records_read(
- const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf)
{
return v_read(config, &buf->backend.records_read);
}