*/
#include <stdlib.h>
-#include <ust/kernelcompat.h>
#include <ust/marker.h>
#include "channels.h"
-#include "usterr.h"
+#include "usterr_signal_safe.h"
/*
* ltt_channel_mutex may be nested inside the LTT trace mutex.
* ltt_channel_mutex mutex may be nested inside markers mutex.
*/
static DEFINE_MUTEX(ltt_channel_mutex);
-static LIST_HEAD(ltt_channels);
+static CDS_LIST_HEAD(ltt_channels);
/*
* Index of next channel in array. Makes sure that as long as a trace channel is
* allocated, no array index will be re-used when a channel is freed and then
* another channel is allocated. This index is cleared and the array indexeds
- * get reassigned when the index_kref goes back to 0, which indicates that no
+ * get reassigned when the index_urcu_ref goes back to 0, which indicates that no
* more trace channels are allocated.
*/
static unsigned int free_index;
-static struct kref index_kref; /* Keeps track of allocated trace channels */
+static struct urcu_ref index_urcu_ref; /* Keeps track of allocated trace channels */
int ust_channels_overwrite_by_default = 0;
int ust_channels_request_collection_by_default = 1;
{
struct ltt_channel_setting *iter;
- list_for_each_entry(iter, <t_channels, list)
+ cds_list_for_each_entry(iter, <t_channels, list)
if (strcmp(name, iter->name) == 0)
return iter;
return NULL;
*
* Called with lock_markers() and channels mutex held.
*/
-static void release_channel_setting(struct kref *kref)
+static void release_channel_setting(struct urcu_ref *urcu_ref)
{
- struct ltt_channel_setting *setting = container_of(kref,
- struct ltt_channel_setting, kref);
+ struct ltt_channel_setting *setting = _ust_container_of(urcu_ref,
+ struct ltt_channel_setting, urcu_ref);
struct ltt_channel_setting *iter;
- if (uatomic_read(&index_kref.refcount) == 0
- && uatomic_read(&setting->kref.refcount) == 0) {
- list_del(&setting->list);
+ if (uatomic_read(&index_urcu_ref.refcount) == 0
+ && uatomic_read(&setting->urcu_ref.refcount) == 0) {
+ cds_list_del(&setting->list);
free(setting);
free_index = 0;
- list_for_each_entry(iter, <t_channels, list) {
+ cds_list_for_each_entry(iter, <t_channels, list) {
iter->index = free_index++;
iter->free_event_id = 0;
}
*
* Called with lock_markers() and channels mutex held.
*/
-static void release_trace_channel(struct kref *kref)
+static void release_trace_channel(struct urcu_ref *urcu_ref)
{
struct ltt_channel_setting *iter, *n;
- list_for_each_entry_safe(iter, n, <t_channels, list)
- release_channel_setting(&iter->kref);
+ cds_list_for_each_entry_safe(iter, n, <t_channels, list)
+ release_channel_setting(&iter->urcu_ref);
}
/**
struct ltt_channel_setting *setting;
int ret = 0;
- mutex_lock(<t_channel_mutex);
+ pthread_mutex_lock(<t_channel_mutex);
setting = lookup_channel(name);
if (setting) {
- if (uatomic_read(&setting->kref.refcount) == 0)
- goto init_kref;
+ if (uatomic_read(&setting->urcu_ref.refcount) == 0)
+ goto init_urcu_ref;
else {
- kref_get(&setting->kref);
+ urcu_ref_get(&setting->urcu_ref);
goto end;
}
}
ret = -ENOMEM;
goto end;
}
- list_add(&setting->list, <t_channels);
+ cds_list_add(&setting->list, <t_channels);
strncpy(setting->name, name, PATH_MAX-1);
setting->index = free_index++;
-init_kref:
- kref_init(&setting->kref);
+init_urcu_ref:
+ urcu_ref_init(&setting->urcu_ref);
end:
- mutex_unlock(<t_channel_mutex);
+ pthread_mutex_unlock(<t_channel_mutex);
return ret;
}
//ust// EXPORT_SYMBOL_GPL(ltt_channels_register);
struct ltt_channel_setting *setting;
int ret = 0;
- mutex_lock(<t_channel_mutex);
+ pthread_mutex_lock(<t_channel_mutex);
setting = lookup_channel(name);
- if (!setting || uatomic_read(&setting->kref.refcount) == 0) {
+ if (!setting || uatomic_read(&setting->urcu_ref.refcount) == 0) {
ret = -ENOENT;
goto end;
}
- kref_put(&setting->kref, release_channel_setting);
+ urcu_ref_put(&setting->urcu_ref, release_channel_setting);
end:
- mutex_unlock(<t_channel_mutex);
+ pthread_mutex_unlock(<t_channel_mutex);
return ret;
}
//ust// EXPORT_SYMBOL_GPL(ltt_channels_unregister);
struct ltt_channel_setting *setting;
int ret = 0;
- mutex_lock(<t_channel_mutex);
+ pthread_mutex_lock(<t_channel_mutex);
setting = lookup_channel(name);
- if (!setting || uatomic_read(&setting->kref.refcount) == 0) {
+ if (!setting || uatomic_read(&setting->urcu_ref.refcount) == 0) {
ret = -ENOENT;
goto end;
}
setting->subbuf_size = subbuf_size;
setting->subbuf_cnt = subbuf_cnt;
end:
- mutex_unlock(<t_channel_mutex);
+ pthread_mutex_unlock(<t_channel_mutex);
return ret;
}
//ust// EXPORT_SYMBOL_GPL(ltt_channels_set_default);
{
struct ltt_channel_setting *iter;
- list_for_each_entry(iter, <t_channels, list)
- if (iter->index == index && uatomic_read(&iter->kref.refcount))
+ cds_list_for_each_entry(iter, <t_channels, list)
+ if (iter->index == index && uatomic_read(&iter->urcu_ref.refcount))
return iter->name;
return NULL;
}
{
struct ltt_channel_setting *iter;
- list_for_each_entry(iter, <t_channels, list)
+ cds_list_for_each_entry(iter, <t_channels, list)
if (!strcmp(iter->name, name)
- && uatomic_read(&iter->kref.refcount))
+ && uatomic_read(&iter->urcu_ref.refcount))
return iter;
return NULL;
}
struct ust_channel *channel = NULL;
struct ltt_channel_setting *iter;
- mutex_lock(<t_channel_mutex);
+ pthread_mutex_lock(<t_channel_mutex);
if (!free_index) {
WARN("ltt_channels_trace_alloc: no free_index; are there any probes connected?");
goto end;
}
- if (!uatomic_read(&index_kref.refcount))
- kref_init(&index_kref);
+ if (!uatomic_read(&index_urcu_ref.refcount))
+ urcu_ref_init(&index_urcu_ref);
else
- kref_get(&index_kref);
+ urcu_ref_get(&index_urcu_ref);
*nr_channels = free_index;
channel = zmalloc(sizeof(struct ust_channel) * free_index);
if (!channel) {
WARN("ltt_channel_struct: channel null after alloc");
goto end;
}
- list_for_each_entry(iter, <t_channels, list) {
- if (!uatomic_read(&iter->kref.refcount))
+ cds_list_for_each_entry(iter, <t_channels, list) {
+ if (!uatomic_read(&iter->urcu_ref.refcount))
continue;
channel[iter->index].subbuf_size = iter->subbuf_size;
channel[iter->index].subbuf_cnt = iter->subbuf_cnt;
channel[iter->index].channel_name = iter->name;
}
end:
- mutex_unlock(<t_channel_mutex);
+ pthread_mutex_unlock(<t_channel_mutex);
return channel;
}
//ust// EXPORT_SYMBOL_GPL(ltt_channels_trace_alloc);
void ltt_channels_trace_free(struct ust_channel *channels)
{
lock_markers();
- mutex_lock(<t_channel_mutex);
+ pthread_mutex_lock(<t_channel_mutex);
free(channels);
- kref_put(&index_kref, release_trace_channel);
- mutex_unlock(<t_channel_mutex);
+ urcu_ref_put(&index_urcu_ref, release_trace_channel);
+ pthread_mutex_unlock(<t_channel_mutex);
unlock_markers();
}
//ust// EXPORT_SYMBOL_GPL(ltt_channels_trace_free);
{
int ret;
- mutex_lock(<t_channel_mutex);
+ pthread_mutex_lock(<t_channel_mutex);
ret = _ltt_channels_get_event_id(channel, name);
- mutex_unlock(<t_channel_mutex);
+ pthread_mutex_unlock(<t_channel_mutex);
return ret;
}