//ust// }
static CDS_LIST_HEAD(ltt_transport_list);
-
+/* transport mutex, nests inside traces mutex (ltt_lock_traces) */
+static DEFINE_MUTEX(ltt_transport_mutex);
/**
* ltt_transport_register - LTT transport registration
* @transport: transport structure
*/
//ust// vmalloc_sync_all();
- ltt_lock_traces();
+ pthread_mutex_lock(<t_transport_mutex);
cds_list_add_tail(&transport->node, <t_transport_list);
- ltt_unlock_traces();
+ pthread_mutex_unlock(<t_transport_mutex);
}
/**
*/
void ltt_transport_unregister(struct ltt_transport *transport)
{
- ltt_lock_traces();
+ pthread_mutex_lock(<t_transport_mutex);
cds_list_del(&transport->node);
- ltt_unlock_traces();
+ pthread_mutex_unlock(<t_transport_mutex);
}
static inline int is_channel_overwrite(enum ltt_channels chan,
* ltt_release_transport - Release an LTT transport
* @kref : reference count on the transport
*/
-void ltt_release_transport(struct kref *kref)
+void ltt_release_transport(struct urcu_ref *urcu_ref)
{
//ust// struct ust_trace *trace = container_of(kref,
//ust// struct ust_trace, ltt_transport_kref);
* ltt_release_trace - Release a LTT trace
* @kref : reference count on the trace
*/
-void ltt_release_trace(struct kref *kref)
+void ltt_release_trace(struct urcu_ref *urcu_ref)
{
- struct ust_trace *trace = _ust_container_of(kref,
- struct ust_trace, kref);
+ struct ust_trace *trace = _ust_container_of(urcu_ref,
+ struct ust_trace, urcu_ref);
ltt_channels_trace_free(trace->channels);
free(trace);
}
goto traces_error;
}
+ pthread_mutex_lock(<t_transport_mutex);
cds_list_for_each_entry(tran_iter, <t_transport_list, node) {
if (!strcmp(tran_iter->name, trace_type)) {
transport = tran_iter;
break;
}
}
+ pthread_mutex_unlock(<t_transport_mutex);
+
if (!transport) {
ERR("Transport %s is not present", trace_type);
err = -EINVAL;
goto traces_error;
}
- kref_init(&trace->kref);
- kref_init(&trace->ltt_transport_kref);
-//ust// init_waitqueue_head(&trace->kref_wq);
+ urcu_ref_init(&trace->urcu_ref);
+ urcu_ref_init(&trace->ltt_transport_urcu_ref);
+//ust// init_waitqueue_head(&trace->urcu_ref_wq);
trace->active = 0;
//ust// get_trace_clock();
trace->freq_scale = trace_clock_freq_scale();
}
}
- return; /* FIXME: temporary for ust */
-//ust// flush_scheduled_work();
-
/*
* The currently destroyed trace is not in the trace list anymore,
* so it's safe to call the async wakeup ourself. It will deliver
trace->ops->remove_channel(chan);
}
- kref_put(&trace->ltt_transport_kref, ltt_release_transport);
+ urcu_ref_put(&trace->ltt_transport_urcu_ref, ltt_release_transport);
//ust// module_put(trace->transport->owner);
//ust// __wait_event_interruptible(trace->kref_wq,
//ust// (atomic_read(&trace->kref.refcount) == 1), ret);
//ust// }
- kref_put(&trace->kref, ltt_release_trace);
+ urcu_ref_put(&trace->urcu_ref, ltt_release_trace);
}
int ltt_trace_destroy(const char *trace_name, int drop)
ltt_unlock_traces();
/*
- * Call the kernel state dump.
- * Events will be mixed with real kernel events, it's ok.
+ * Call the process-wide state dump.
* Notice that there is no protection on the trace : that's exactly
* why we iterate on the list and check for trace equality instead of
- * directly using this trace handle inside the logging function.
+ * directly using this trace handle inside the logging function: we want
+ * to record events only in a single trace in the trace session list.
*/
ltt_dump_marker_state(trace);