#include <unistd.h>
#include <urcu/compiler.h>
-#include <ust/ringbuffer-config.h>
+#include <lttng/ringbuffer-config.h>
#include "backend_types.h"
#include "frontend_types.h"
#include "shm.h"
int channel_backend_init(struct channel_backend *chanb,
const char *name,
const struct lttng_ust_lib_ring_buffer_config *config,
- void *priv, size_t subbuf_size,
+ size_t subbuf_size,
size_t num_subbuf, struct lttng_ust_shm_handle *handle);
void channel_backend_free(struct channel_backend *chanb,
struct lttng_ust_shm_handle *handle);
id = CMM_ACCESS_ONCE(shmp_index(handle, bufb->buf_wsb, idx)->id);
for (;;) {
/* This check is called on the fast path for each record. */
- if (likely(!subbuffer_id_is_noref(config, id))) {
+ if (caa_likely(!subbuffer_id_is_noref(config, id))) {
/*
* Store after load dependency ordering the writes to
* the subbuffer after load and test of the noref flag
new_id = id;
subbuffer_id_clear_noref(config, &new_id);
new_id = uatomic_cmpxchg(&shmp_index(handle, bufb->buf_wsb, idx)->id, id, new_id);
- if (likely(new_id == id))
+ if (caa_likely(new_id == id))
break;
id = new_id;
}
* following cmpxchg().
*/
old_id = shmp_index(handle, bufb->buf_wsb, consumed_idx)->id;
- if (unlikely(!subbuffer_id_is_noref(config, old_id)))
+ if (caa_unlikely(!subbuffer_id_is_noref(config, old_id)))
return -EAGAIN;
/*
* Make sure the offset count we are expecting matches the one
* indicated by the writer.
*/
- if (unlikely(!subbuffer_id_compare_offset(config, old_id,
+ if (caa_unlikely(!subbuffer_id_compare_offset(config, old_id,
consumed_count)))
return -EAGAIN;
CHAN_WARN_ON(shmp(handle, bufb->chan),
consumed_count);
new_id = uatomic_cmpxchg(&shmp_index(handle, bufb->buf_wsb, consumed_idx)->id, old_id,
bufb->buf_rsb.id);
- if (unlikely(old_id != new_id))
+ if (caa_unlikely(old_id != new_id))
return -EAGAIN;
bufb->buf_rsb.id = new_id;
} else {
return 0;
}
+#ifndef inline_memcpy
+#define inline_memcpy(dest, src, n) memcpy(dest, src, n)
+#endif
+
/*
* Use the architecture-specific memcpy implementation for constant-sized
* inputs, but rely on an inline memcpy for length statically unknown.
inline_memcpy(dest, src, __len); \
} while (0)
+/* arch-agnostic implementation */
+
+static inline int fls(unsigned int x)
+{
+ int r = 32;
+
+ if (!x)
+ return 0;
+ if (!(x & 0xFFFF0000U)) {
+ x <<= 16;
+ r -= 16;
+ }
+ if (!(x & 0xFF000000U)) {
+ x <<= 8;
+ r -= 8;
+ }
+ if (!(x & 0xF0000000U)) {
+ x <<= 4;
+ r -= 4;
+ }
+ if (!(x & 0xC0000000U)) {
+ x <<= 2;
+ r -= 2;
+ }
+ if (!(x & 0x80000000U)) {
+ x <<= 1;
+ r -= 1;
+ }
+ return r;
+}
+
+static inline int get_count_order(unsigned int count)
+{
+ int order;
+
+ order = fls(count) - 1;
+ if (count & (count - 1))
+ order++;
+ return order;
+}
+
+static inline
+unsigned int hweight32(unsigned int value)
+{
+ unsigned int r;
+
+ r = value;
+ r = r - ((r >> 1) & 0x55555555);
+ r = (r & 0x33333333) + ((r >> 2) & 0x33333333);
+ r += r >> 4;
+ r &= 0x0F0F0F0F;
+ r += r >> 8;
+ r += r >> 16;
+ r &= 0x000000FF;
+ return r;
+}
+
#endif /* _LINUX_RING_BUFFER_BACKEND_INTERNAL_H */