projects
/
lttng-ust.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Do not install libringbuffer into system
[lttng-ust.git]
/
libringbuffer
/
ring_buffer_frontend.c
diff --git
a/libringbuffer/ring_buffer_frontend.c
b/libringbuffer/ring_buffer_frontend.c
index 5d87782ee321e79d51e8164d0f80087369872b06..2cb99a3e608c72a97325ee3c64c7ffe31d78f8e1 100644
(file)
--- a/
libringbuffer/ring_buffer_frontend.c
+++ b/
libringbuffer/ring_buffer_frontend.c
@@
-249,7
+249,7
@@
static void switch_buffer_timer(unsigned long data)
/*
* Only flush buffers periodically if readers are active.
*/
/*
* Only flush buffers periodically if readers are active.
*/
- if (uatomic_read(&buf->active_readers))
+ if (uatomic_read(&buf->active_readers)
|| uatomic_read(&buf->active_shadow_readers)
)
lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE, handle);
//TODO timers
lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE, handle);
//TODO timers
@@
-266,7
+266,7
@@
static void lib_ring_buffer_start_switch_timer(struct lib_ring_buffer *buf,
struct shm_handle *handle)
{
struct channel *chan = shmp(handle, buf->backend.chan);
struct shm_handle *handle)
{
struct channel *chan = shmp(handle, buf->backend.chan);
- const struct lib_ring_buffer_config *config = &chan->backend.config;
+
//
const struct lib_ring_buffer_config *config = &chan->backend.config;
if (!chan->switch_timer_interval || buf->switch_timer_enabled)
return;
if (!chan->switch_timer_interval || buf->switch_timer_enabled)
return;
@@
-307,7
+307,7
@@
static void read_buffer_timer(unsigned long data)
CHAN_WARN_ON(chan, !buf->backend.allocated);
CHAN_WARN_ON(chan, !buf->backend.allocated);
- if (uatomic_read(&buf->active_readers)
+ if (uatomic_read(&buf->active_readers)
|| uatomic_read(&buf->active_shadow_readers))
&& lib_ring_buffer_poll_deliver(config, buf, chan)) {
//TODO
//wake_up_interruptible(&buf->read_wait);
&& lib_ring_buffer_poll_deliver(config, buf, chan)) {
//TODO
//wake_up_interruptible(&buf->read_wait);
@@
-395,11
+395,11
@@
static void channel_unregister_notifiers(struct channel *chan,
//channel_backend_unregister_notifiers(&chan->backend);
}
//channel_backend_unregister_notifiers(&chan->backend);
}
-static void channel_free(struct channel *chan, struct shm_handle *handle)
+static void channel_free(struct channel *chan, struct shm_handle *handle,
+ int shadow)
{
{
- int ret;
-
- channel_backend_free(&chan->backend, handle);
+ if (!shadow)
+ channel_backend_free(&chan->backend, handle);
/* chan is freed by shm teardown */
shm_object_table_destroy(handle->table);
free(handle);
/* chan is freed by shm teardown */
shm_object_table_destroy(handle->table);
free(handle);
@@
-550,9
+550,10
@@
int channel_handle_add_stream(struct shm_handle *handle,
}
static
}
static
-void channel_release(struct channel *chan, struct shm_handle *handle)
+void channel_release(struct channel *chan, struct shm_handle *handle,
+ int shadow)
{
{
- channel_free(chan, handle);
+ channel_free(chan, handle
, shadow
);
}
/**
}
/**
@@
-566,12
+567,18
@@
void channel_release(struct channel *chan, struct shm_handle *handle)
* They should release their handle at that point. Returns the private
* data pointer.
*/
* They should release their handle at that point. Returns the private
* data pointer.
*/
-void *channel_destroy(struct channel *chan, struct shm_handle *handle)
+void *channel_destroy(struct channel *chan, struct shm_handle *handle,
+ int shadow)
{
const struct lib_ring_buffer_config *config = &chan->backend.config;
void *priv;
int cpu;
{
const struct lib_ring_buffer_config *config = &chan->backend.config;
void *priv;
int cpu;
+ if (shadow) {
+ channel_release(chan, handle, shadow);
+ return NULL;
+ }
+
channel_unregister_notifiers(chan, handle);
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
channel_unregister_notifiers(chan, handle);
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
@@
-615,7
+622,7
@@
void *channel_destroy(struct channel *chan, struct shm_handle *handle)
* descriptor directly. No need to refcount.
*/
priv = chan->backend.priv;
* descriptor directly. No need to refcount.
*/
priv = chan->backend.priv;
- channel_release(chan, handle);
+ channel_release(chan, handle
, shadow
);
return priv;
}
return priv;
}
@@
-634,6
+641,8
@@
struct lib_ring_buffer *channel_get_ring_buffer(
memory_map_size);
return shmp(handle, chan->backend.buf[0].shmp);
} else {
memory_map_size);
return shmp(handle, chan->backend.buf[0].shmp);
} else {
+ if (cpu >= num_possible_cpus())
+ return NULL;
ref = &chan->backend.buf[cpu].shmp._ref;
shm_get_object_data(handle, ref, shm_fd, wait_fd,
memory_map_size);
ref = &chan->backend.buf[cpu].shmp._ref;
shm_get_object_data(handle, ref, shm_fd, wait_fd,
memory_map_size);
@@
-642,10
+651,15
@@
struct lib_ring_buffer *channel_get_ring_buffer(
}
int lib_ring_buffer_open_read(struct lib_ring_buffer *buf,
}
int lib_ring_buffer_open_read(struct lib_ring_buffer *buf,
- struct shm_handle *handle)
+ struct shm_handle *handle,
+ int shadow)
{
{
- struct channel *chan = shmp(handle, buf->backend.chan);
-
+ if (shadow) {
+ if (uatomic_cmpxchg(&buf->active_shadow_readers, 0, 1) != 0)
+ return -EBUSY;
+ cmm_smp_mb();
+ return 0;
+ }
if (uatomic_cmpxchg(&buf->active_readers, 0, 1) != 0)
return -EBUSY;
cmm_smp_mb();
if (uatomic_cmpxchg(&buf->active_readers, 0, 1) != 0)
return -EBUSY;
cmm_smp_mb();
@@
-653,10
+667,17
@@
int lib_ring_buffer_open_read(struct lib_ring_buffer *buf,
}
void lib_ring_buffer_release_read(struct lib_ring_buffer *buf,
}
void lib_ring_buffer_release_read(struct lib_ring_buffer *buf,
- struct shm_handle *handle)
+ struct shm_handle *handle,
+ int shadow)
{
struct channel *chan = shmp(handle, buf->backend.chan);
{
struct channel *chan = shmp(handle, buf->backend.chan);
+ if (shadow) {
+ CHAN_WARN_ON(chan, uatomic_read(&buf->active_shadow_readers) != 1);
+ cmm_smp_mb();
+ uatomic_dec(&buf->active_shadow_readers);
+ return;
+ }
CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
cmm_smp_mb();
uatomic_dec(&buf->active_readers);
CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
cmm_smp_mb();
uatomic_dec(&buf->active_readers);
@@
-734,7
+755,8
@@
void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf,
struct channel *chan = shmp(handle, bufb->chan);
unsigned long consumed;
struct channel *chan = shmp(handle, bufb->chan);
unsigned long consumed;
- CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
+ CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1
+ && uatomic_read(&buf->active_shadow_readers) != 1);
/*
* Only push the consumed value forward.
/*
* Only push the consumed value forward.
@@
-857,7
+879,8
@@
void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf,
const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long read_sb_bindex, consumed_idx, consumed;
const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long read_sb_bindex, consumed_idx, consumed;
- CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
+ CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1
+ && uatomic_read(&buf->active_shadow_readers) != 1);
if (!buf->get_subbuf) {
/*
if (!buf->get_subbuf) {
/*
This page took
0.033725 seconds
and
4
git commands to generate.