X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=libringbuffer%2Fring_buffer_frontend.c;h=9721df1634585f028316d13f96b184af7e99cc08;hb=ef1a13e0de71e01d19a9d1b2d4abdac5e34eb9bc;hp=396b0064b5b13dd59cc4488008c9df9e2f75c9fb;hpb=553bbf7f38652084ed7966c7817b8ccb372b14e1;p=lttng-ust.git diff --git a/libringbuffer/ring_buffer_frontend.c b/libringbuffer/ring_buffer_frontend.c index 396b0064..9721df16 100644 --- a/libringbuffer/ring_buffer_frontend.c +++ b/libringbuffer/ring_buffer_frontend.c @@ -118,7 +118,7 @@ struct switch_offsets { switch_old_end:1; }; -DEFINE_URCU_TLS_IE(unsigned int, lib_ring_buffer_nesting); +DEFINE_URCU_TLS(unsigned int, lib_ring_buffer_nesting); /* * wakeup_fd_mutex protects wakeup fd use by timer from concurrent @@ -1014,7 +1014,7 @@ struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buff /* Allocate normal memory for channel (not shared) */ shmobj = shm_object_table_alloc(handle->table, shmsize, SHM_OBJECT_MEM, - -1); + -1, -1); if (!shmobj) goto error_append; /* struct channel is at object 0, offset 0 (hardcoded) */ @@ -1996,9 +1996,11 @@ int lib_ring_buffer_try_switch_slow(enum switch_mode mode, * Force a sub-buffer switch. This operation is completely reentrant : can be * called while tracing is active with absolutely no lock held. * - * Note, however, that as a v_cmpxchg is used for some atomic - * operations, this function must be called from the CPU which owns the buffer - * for a ACTIVE flush. + * For RING_BUFFER_SYNC_PER_CPU ring buffers, as a v_cmpxchg is used for + * some atomic operations, this function must be called from the CPU + * which owns the buffer for a ACTIVE flush. However, for + * RING_BUFFER_SYNC_GLOBAL ring buffers, this function can be called + * from any CPU. */ void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer *buf, enum switch_mode mode, struct lttng_ust_shm_handle *handle)