ltt_statedump_default;
struct module *ltt_statedump_owner;
-struct chan_info_struct {
- const char *name;
- unsigned int def_subbufsize;
- unsigned int def_subbufcount;
-} chan_infos[] = {
+struct chan_info_struct chan_infos[] = {
[LTT_CHANNEL_METADATA] = {
LTT_METADATA_CHANNEL,
LTT_DEFAULT_SUBBUF_SIZE_LOW,
//ust//
//ust// }
-static LIST_HEAD(ltt_transport_list);
+static CDS_LIST_HEAD(ltt_transport_list);
/**
* ltt_transport_register - LTT transport registration
//ust// vmalloc_sync_all();
ltt_lock_traces();
- list_add_tail(&transport->node, <t_transport_list);
+ cds_list_add_tail(&transport->node, <t_transport_list);
ltt_unlock_traces();
}
void ltt_transport_unregister(struct ltt_transport *transport)
{
ltt_lock_traces();
- list_del(&transport->node);
+ cds_list_del(&transport->node);
ltt_unlock_traces();
}
//ust// #else
//ust// ltt_lock_traces();
//ust// #endif
-//ust// list_for_each_entry_rcu(trace, <t_traces.head, list) {
+//ust// cds_list_for_each_entry_rcu(trace, <t_traces.head, list) {
//ust// trace_async_wakeup(trace);
//ust// }
//ust// #ifndef CONFIG_PREEMPT_RT
{
struct ust_trace *trace;
- list_for_each_entry(trace, <t_traces.head, list)
+ cds_list_for_each_entry(trace, <t_traces.head, list)
if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
return trace;
{
struct ust_trace *trace;
- list_for_each_entry(trace, <t_traces.setup_head, list)
+ cds_list_for_each_entry(trace, <t_traces.setup_head, list)
if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
return trace;
* ltt_release_transport - Release an LTT transport
* @kref : reference count on the transport
*/
-void ltt_release_transport(struct kref *kref)
+void ltt_release_transport(struct urcu_ref *urcu_ref)
{
//ust// struct ust_trace *trace = container_of(kref,
//ust// struct ust_trace, ltt_transport_kref);
* ltt_release_trace - Release a LTT trace
* @kref : reference count on the trace
*/
-void ltt_release_trace(struct kref *kref)
+void ltt_release_trace(struct urcu_ref *urcu_ref)
{
- struct ust_trace *trace = container_of(kref,
- struct ust_trace, kref);
+ struct ust_trace *trace = _ust_container_of(urcu_ref,
+ struct ust_trace, urcu_ref);
ltt_channels_trace_free(trace->channels);
free(trace);
}
chan_infos[chantype].def_subbufcount;
}
- list_add(&new_trace->list, <t_traces.setup_head);
+ cds_list_add(&new_trace->list, <t_traces.setup_head);
return 0;
trace_free:
/* must be called from within a traces lock. */
static void _ltt_trace_free(struct ust_trace *trace)
{
- list_del(&trace->list);
+ cds_list_del(&trace->list);
free(trace);
}
goto traces_error;
}
- list_for_each_entry(tran_iter, <t_transport_list, node) {
+ cds_list_for_each_entry(tran_iter, <t_transport_list, node) {
if (!strcmp(tran_iter->name, trace_type)) {
transport = tran_iter;
break;
goto traces_error;
}
- kref_init(&trace->kref);
- kref_init(&trace->ltt_transport_kref);
-//ust// init_waitqueue_head(&trace->kref_wq);
+ urcu_ref_init(&trace->urcu_ref);
+ urcu_ref_init(&trace->ltt_transport_urcu_ref);
+//ust// init_waitqueue_head(&trace->urcu_ref_wq);
trace->active = 0;
//ust// get_trace_clock();
trace->freq_scale = trace_clock_freq_scale();
}
}
- list_del(&trace->list);
-//ust// if (list_empty(<t_traces.head)) {
+ cds_list_del(&trace->list);
+//ust// if (cds_list_empty(<t_traces.head)) {
//ust// mod_timer(<t_async_wakeup_timer,
//ust// jiffies + LTT_PERCPU_TIMER_INTERVAL);
//ust// set_kernel_trace_flag_all_tasks();
//ust// }
- list_add_rcu(&trace->list, <t_traces.head);
+ cds_list_add_rcu(&trace->list, <t_traces.head);
//ust// synchronize_sched();
ltt_unlock_traces();
goto active_error;
}
/* Everything went fine */
- list_del_rcu(&trace->list);
+ cds_list_del_rcu(&trace->list);
synchronize_rcu();
- if (list_empty(<t_traces.head)) {
+ if (cds_list_empty(<t_traces.head)) {
//ust// clear_kernel_trace_flag_all_tasks();
/*
* We stop the asynchronous delivery of reader wakeup, but
}
}
- return; /* FIXME: temporary for ust */
-//ust// flush_scheduled_work();
-
/*
* The currently destroyed trace is not in the trace list anymore,
* so it's safe to call the async wakeup ourself. It will deliver
trace->ops->remove_channel(chan);
}
- kref_put(&trace->ltt_transport_kref, ltt_release_transport);
+ urcu_ref_put(&trace->ltt_transport_urcu_ref, ltt_release_transport);
//ust// module_put(trace->transport->owner);
//ust// __wait_event_interruptible(trace->kref_wq,
//ust// (atomic_read(&trace->kref.refcount) == 1), ret);
//ust// }
- kref_put(&trace->kref, ltt_release_trace);
+ urcu_ref_put(&trace->urcu_ref, ltt_release_trace);
}
int ltt_trace_destroy(const char *trace_name, int drop)
ltt_unlock_traces();
/*
- * Call the kernel state dump.
- * Events will be mixed with real kernel events, it's ok.
+ * Call the process-wide state dump.
* Notice that there is no protection on the trace : that's exactly
* why we iterate on the list and check for trace equality instead of
- * directly using this trace handle inside the logging function.
+ * directly using this trace handle inside the logging function: we want
+ * to record events only in a single trace in the trace session list.
*/
ltt_dump_marker_state(trace);