Update version to 0.16
[ust.git] / libust / trace_event.c
index af1e3fb1180e55753a8adb63a2a84246f9280509..854a7676b3586d10b84ece785eb97cb65f2aec3d 100644 (file)
@@ -3,8 +3,8 @@
  *
  * This library is free software; you can redistribute it and/or
  * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
  *
  * This library is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  *
  */
 
+#define _LGPL_SOURCE
 #include <errno.h>
 #include <ust/tracepoint.h>
+#include <ust/tracepoint-internal.h>
 #include <ust/core.h>
 #include <ust/kcompat/kcompat.h>
-#include "usterr.h"
-
-#define _LGPL_SOURCE
 #include <urcu-bp.h>
 
-/* libraries that contain trace_events (struct trace_event_lib) */
-static LIST_HEAD(libs);
+#include "usterr_signal_safe.h"
 
+/* libraries that contain trace_events (struct trace_event_lib) */
+static CDS_LIST_HEAD(libs);
+/*
+ * Nested mutex is not required here, but provide the same guaranteed
+ * for start/stop iteration vs nested ops as markers and tracepoints.
+ */
+static __thread int nested_mutex;
 static DEFINE_MUTEX(trace_events_mutex);
 
+static
+int trace_event_get_iter_range(struct trace_event * const **trace_event,
+       struct trace_event * const *begin,
+       struct trace_event * const *end);
+
+static
 void lock_trace_events(void)
 {
-       pthread_mutex_lock(&trace_events_mutex);
+       if (!(nested_mutex++))
+               pthread_mutex_lock(&trace_events_mutex);
 }
 
+static
 void unlock_trace_events(void)
 {
-       pthread_mutex_unlock(&trace_events_mutex);
+       if (!(--nested_mutex))
+               pthread_mutex_unlock(&trace_events_mutex);
 }
 
-
+static
 int lib_get_iter_trace_events(struct trace_event_iter *iter)
 {
        struct trace_event_lib *iter_lib;
        int found = 0;
 
-       list_for_each_entry(iter_lib, &libs, list) {
+       cds_list_for_each_entry(iter_lib, &libs, list) {
                if (iter_lib < iter->lib)
                        continue;
                else if (iter_lib > iter->lib)
@@ -71,16 +85,21 @@ int lib_get_iter_trace_events(struct trace_event_iter *iter)
  *
  * Returns whether a next trace_event has been found (1) or not (0).
  * Will return the first trace_event in the range if the input trace_event is NULL.
+ * Called with trace event mutex held.
  */
-int trace_event_get_iter_range(struct trace_event **trace_event, struct trace_event *begin,
-       struct trace_event *end)
+static
+int trace_event_get_iter_range(struct trace_event * const **trace_event,
+       struct trace_event * const *begin,
+       struct trace_event * const *end)
 {
-       if (!*trace_event && begin != end) {
+       if (!*trace_event && begin != end)
                *trace_event = begin;
-               return 1;
+       while (*trace_event >= begin && *trace_event < end) {
+               if (!**trace_event)
+                       (*trace_event)++;       /* skip dummy */
+               else
+                       return 1;
        }
-       if (*trace_event >= begin && *trace_event < end)
-               return 1;
        return 0;
 }
 
@@ -89,16 +108,20 @@ static void trace_event_get_iter(struct trace_event_iter *iter)
        int found = 0;
 
        found = lib_get_iter_trace_events(iter);
-end:
+
        if (!found)
                trace_event_iter_reset(iter);
 }
 
 void trace_event_iter_start(struct trace_event_iter *iter)
 {
+       lock_trace_events();
        trace_event_get_iter(iter);
 }
 
+/*
+ * Called with trace event mutex held.
+ */
 void trace_event_iter_next(struct trace_event_iter *iter)
 {
        iter->trace_event++;
@@ -110,48 +133,64 @@ void trace_event_iter_next(struct trace_event_iter *iter)
        trace_event_get_iter(iter);
 }
 
+void trace_event_iter_stop(struct trace_event_iter *iter)
+{
+       unlock_trace_events();
+}
+
 void trace_event_iter_reset(struct trace_event_iter *iter)
 {
        iter->lib = NULL;
        iter->trace_event = NULL;
 }
 
-int trace_event_register_lib(struct trace_event *trace_events_start,
+int trace_event_register_lib(struct trace_event * const *trace_events_start,
                             int trace_events_count)
 {
-       struct trace_event_lib *pl;
+       struct trace_event_lib *pl, *iter;
 
        pl = (struct trace_event_lib *) malloc(sizeof(struct trace_event_lib));
 
        pl->trace_events_start = trace_events_start;
        pl->trace_events_count = trace_events_count;
 
-       /* FIXME: maybe protect this with its own mutex? */
-       pthread_mutex_lock(&trace_events_mutex);
-       list_add(&pl->list, &libs);
-       pthread_mutex_unlock(&trace_events_mutex);
+       lock_trace_events();
+       /*
+        * We sort the libs by struct lib pointer address.
+        */
+       cds_list_for_each_entry_reverse(iter, &libs, list) {
+               BUG_ON(iter == pl);    /* Should never be in the list twice */
+               if (iter < pl) {
+                       /* We belong to the location right after iter. */
+                       cds_list_add(&pl->list, &iter->list);
+                       goto lib_added;
+               }
+       }
+       /* We should be added at the head of the list */
+       cds_list_add(&pl->list, &libs);
+lib_added:
+       unlock_trace_events();
 
-       DBG("just registered a trace_events section from %p and having %d trace_events", trace_events_start, trace_events_count);
+       /* trace_events_count - 1: skip dummy */
+       DBG("just registered a trace_events section from %p and having %d trace_events (minus dummy trace_event)", trace_events_start, trace_events_count);
 
        return 0;
 }
 
-int trace_event_unregister_lib(struct trace_event *trace_events_start)
+int trace_event_unregister_lib(struct trace_event * const *trace_events_start)
 {
        struct trace_event_lib *lib;
 
-       pthread_mutex_lock(&trace_events_mutex);
-
-       list_for_each_entry(lib, &libs, list) {
+       unlock_trace_events();
+       cds_list_for_each_entry(lib, &libs, list) {
                if(lib->trace_events_start == trace_events_start) {
                        struct trace_event_lib *lib2free = lib;
-                       list_del(&lib->list);
+                       cds_list_del(&lib->list);
                        free(lib2free);
                        break;
                }
        }
-
-       pthread_mutex_unlock(&trace_events_mutex);
+       unlock_trace_events();
 
        return 0;
 }
This page took 0.024483 seconds and 4 git commands to generate.