Fix teardown deadlock
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Sat, 14 May 2011 14:35:32 +0000 (10:35 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Sat, 14 May 2011 14:35:32 +0000 (10:35 -0400)
Don't wait on the readers from the session teardown anymore, because if
the only thread that can complete reading the buffer is the same waiting
on this condition, it deadlocks.

If the session daemon needs to wait on consumers, it can always do that
in user-space through some other mechanism.

Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
lib/ringbuffer/frontend_types.h
lib/ringbuffer/ring_buffer_frontend.c
ltt-events.c

index d269c00a685221086448b249724326f6acc5b81f..0fa2dddfce5efb9339fac01fe77a869786d303f9 100644 (file)
@@ -16,6 +16,7 @@
  * Dual LGPL v2.1/GPL v2 license.
  */
 
+#include <linux/kref.h>
 #include "../../wrapper/ringbuffer/config.h"
 #include "../../wrapper/ringbuffer/backend_types.h"
 #include "../../wrapper/prio_heap.h"   /* For per-CPU read-side iterator */
@@ -62,7 +63,7 @@ struct channel {
        int hp_iter_enable:1;                   /* Enable hp iter notif. */
        wait_queue_head_t read_wait;            /* reader wait queue */
        struct channel_iter iter;               /* Channel read-side iterator */
-       atomic_long_t read_ref;                 /* Reader reference count */
+       struct kref ref;                        /* Reference count */
 };
 
 /* Per-subbuffer commit counters used on the hot path */
index 5e633f438f32e57ad2793c4f4e89e56e5a0ce052..7d4dd8380638adf09262fbe30ddd55b34a486c65 100644 (file)
@@ -624,6 +624,7 @@ struct channel *channel_create(const struct lib_ring_buffer_config *config,
        chan->commit_count_mask = (~0UL >> chan->backend.num_subbuf_order);
        chan->switch_timer_interval = usecs_to_jiffies(switch_timer_interval);
        chan->read_timer_interval = usecs_to_jiffies(read_timer_interval);
+       kref_init(&chan->ref);
        init_waitqueue_head(&chan->read_wait);
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
@@ -685,6 +686,13 @@ error:
 }
 EXPORT_SYMBOL_GPL(channel_create);
 
+static
+void channel_release(struct kref *kref)
+{
+       struct channel *chan = container_of(kref, struct channel, ref);
+       channel_free(chan);
+}
+
 /**
  * channel_destroy - Finalize, wait for q.s. and destroy channel.
  * @chan: channel to destroy
@@ -741,13 +749,8 @@ void *channel_destroy(struct channel *chan)
                wake_up_interruptible(&buf->read_wait);
        }
        wake_up_interruptible(&chan->read_wait);
-
-       while (atomic_long_read(&chan->read_ref) > 0)
-               msleep(100);
-       /* Finish waiting for refcount before free */
-       smp_mb();
+       kref_put(&chan->ref, channel_release);
        priv = chan->backend.priv;
-       channel_free(chan);
        return priv;
 }
 EXPORT_SYMBOL_GPL(channel_destroy);
@@ -769,7 +772,7 @@ int lib_ring_buffer_open_read(struct lib_ring_buffer *buf)
 
        if (!atomic_long_add_unless(&buf->active_readers, 1, 1))
                return -EBUSY;
-       atomic_long_inc(&chan->read_ref);
+       kref_get(&chan->ref);
        smp_mb__after_atomic_inc();
        return 0;
 }
@@ -781,8 +784,8 @@ void lib_ring_buffer_release_read(struct lib_ring_buffer *buf)
 
        CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
        smp_mb__before_atomic_dec();
-       atomic_long_dec(&chan->read_ref);
        atomic_long_dec(&buf->active_readers);
+       kref_put(&chan->ref, channel_release);
 }
 EXPORT_SYMBOL_GPL(lib_ring_buffer_release_read);
 
index cc189b31533cbbd79f8621c4b69fb650ba1a4070..b0191596d4519529be5a15fcfd3744aaf16ce7cf 100644 (file)
@@ -514,9 +514,10 @@ int _ltt_session_metadata_statedump(struct ltt_session *session)
                return 0;
        if (session->metadata_dumped)
                goto skip_session;
-
-
-
+       if (!session->metadata) {
+               printk(KERN_WARNING "LTTng: tracing is starting, but metadata channel is not found\n");
+               return -EPERM;
+       }
 
 skip_session:
        list_for_each_entry(chan, &session->chan, list) {
This page took 0.027666 seconds and 4 git commands to generate.