*
* Dual LGPL v2.1/GPL v2 license.
*/
+
+/* This file contains functions for tracepoint custom probes support. */
+
+#define _GNU_SOURCE
+#define _LGPL_SOURCE
#include <urcu/rculist.h>
-#include <ust/type-serializer.h>
#include <ust/core.h>
#include <ust/clock.h>
+#include <urcu-bp.h>
#include "tracer.h"
+#include "type-serializer.h"
notrace
-void _ltt_specialized_trace(const struct marker *mdata, void *probe_data,
+void _ltt_specialized_trace(const struct ust_marker *mdata, void *probe_data,
void *serialize_private, unsigned int data_size,
unsigned int largest_align)
{
cpu = ust_get_cpu();
/* Force volatile access. */
- STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) + 1);
+ CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) + 1);
/*
* asm volatile and "memory" clobber prevent the compiler from moving
* traps, divisions by 0, ...) are triggered within the incremented
* nesting count section.
*/
- barrier();
+ cmm_barrier();
eID = mdata->event_id;
chan_index = mdata->channel_id;
* Iterate on each trace, typically small number of active traces,
* list iteration with prefetch is usually slower.
*/
- list_for_each_entry_rcu(trace, <t_traces.head, list) {
+ cds_list_for_each_entry_rcu(trace, <t_traces.head, list) {
if (unlikely(!trace->active))
continue;
//ust// if (unlikely(!ltt_run_filter(trace, eID)))
if (!chan->active)
continue;
+ /* If a new cpu was plugged since the trace was started, we did
+ * not add it to the trace, and therefore we write the event to
+ * cpu 0.
+ */
+ if(cpu >= chan->n_cpus) {
+ cpu = 0;
+ }
+
/* reserve space : header and data */
ret = ltt_reserve_slot(chan, trace, data_size, largest_align,
cpu, &buf, &slot_size, &buf_offset, &tsc,
* traps, divisions by 0, ...) are triggered within the incremented
* nesting count section.
*/
- barrier();
- STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) - 1);
+ cmm_barrier();
+ CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) - 1);
rcu_read_unlock();
}