* Ported to userspace by Pierre-Marc Fournier.
*/
-//ust// #include <linux/module.h>
-//ust// #include <linux/mutex.h>
-//ust// #include <linux/types.h>
-//ust// #include <linux/jhash.h>
-//ust// #include <linux/list.h>
-//ust// #include <linux/rcupdate.h>
-//ust// #include <linux/tracepoint.h>
-//ust// #include <linux/err.h>
-//ust// #include <linux/slab.h>
-//ust// #include <linux/immediate.h>
-
#include <errno.h>
-
-#include <ust/kernelcompat.h>
#include <ust/tracepoint.h>
+#include <ust/core.h>
+#include <ust/kcompat/kcompat.h>
#include "usterr.h"
-//#include "list.h"
#define _LGPL_SOURCE
#include <urcu-bp.h>
static inline void *allocate_probes(int count)
{
- struct tp_probes *p = kmalloc(count * sizeof(void *)
- + sizeof(struct tp_probes), GFP_KERNEL);
+ struct tp_probes *p = malloc(count * sizeof(void *)
+ + sizeof(struct tp_probes));
return p == NULL ? NULL : p->probes;
}
struct tp_probes, probes[0]);
//ust// call_rcu_sched(&tp_probes->u.rcu, rcu_free_old_probes);
synchronize_rcu();
- kfree(tp_probes);
+ free(tp_probes);
}
}
* Using kmalloc here to allocate a variable length element. Could
* cause some memory fragmentation if overused.
*/
- e = kmalloc(sizeof(struct tracepoint_entry) + name_len, GFP_KERNEL);
+ e = malloc(sizeof(struct tracepoint_entry) + name_len);
if (!e)
return ERR_PTR(-ENOMEM);
memcpy(&e->name[0], name, name_len);
static inline void remove_tracepoint(struct tracepoint_entry *e)
{
hlist_del(&e->hlist);
- kfree(e);
+ free(e);
}
/*
struct tracepoint *iter;
struct tracepoint_entry *mark_entry;
- mutex_lock(&tracepoints_mutex);
+ pthread_mutex_lock(&tracepoints_mutex);
for (iter = begin; iter < end; iter++) {
mark_entry = get_tracepoint(iter->name);
if (mark_entry) {
disable_tracepoint(iter);
}
}
- mutex_unlock(&tracepoints_mutex);
+ pthread_mutex_unlock(&tracepoints_mutex);
}
static void lib_update_tracepoints(void)
{
struct tracepoint_lib *lib;
-//ust// mutex_lock(&module_mutex);
+//ust// pthread_mutex_lock(&module_mutex);
list_for_each_entry(lib, &libs, list)
tracepoint_update_probe_range(lib->tracepoints_start,
lib->tracepoints_start + lib->tracepoints_count);
-//ust// mutex_unlock(&module_mutex);
+//ust// pthread_mutex_unlock(&module_mutex);
}
/*
{
void *old;
- mutex_lock(&tracepoints_mutex);
+ pthread_mutex_lock(&tracepoints_mutex);
old = tracepoint_add_probe(name, probe);
- mutex_unlock(&tracepoints_mutex);
+ pthread_mutex_unlock(&tracepoints_mutex);
if (IS_ERR(old))
return PTR_ERR(old);
{
void *old;
- mutex_lock(&tracepoints_mutex);
+ pthread_mutex_lock(&tracepoints_mutex);
old = tracepoint_remove_probe(name, probe);
- mutex_unlock(&tracepoints_mutex);
+ pthread_mutex_unlock(&tracepoints_mutex);
if (IS_ERR(old))
return PTR_ERR(old);
{
void *old;
- mutex_lock(&tracepoints_mutex);
+ pthread_mutex_lock(&tracepoints_mutex);
old = tracepoint_add_probe(name, probe);
if (IS_ERR(old)) {
- mutex_unlock(&tracepoints_mutex);
+ pthread_mutex_unlock(&tracepoints_mutex);
return PTR_ERR(old);
}
tracepoint_add_old_probes(old);
- mutex_unlock(&tracepoints_mutex);
+ pthread_mutex_unlock(&tracepoints_mutex);
return 0;
}
//ust// EXPORT_SYMBOL_GPL(tracepoint_probe_register_noupdate);
{
void *old;
- mutex_lock(&tracepoints_mutex);
+ pthread_mutex_lock(&tracepoints_mutex);
old = tracepoint_remove_probe(name, probe);
if (IS_ERR(old)) {
- mutex_unlock(&tracepoints_mutex);
+ pthread_mutex_unlock(&tracepoints_mutex);
return PTR_ERR(old);
}
tracepoint_add_old_probes(old);
- mutex_unlock(&tracepoints_mutex);
+ pthread_mutex_unlock(&tracepoints_mutex);
return 0;
}
//ust// EXPORT_SYMBOL_GPL(tracepoint_probe_unregister_noupdate);
LIST_HEAD(release_probes);
struct tp_probes *pos, *next;
- mutex_lock(&tracepoints_mutex);
+ pthread_mutex_lock(&tracepoints_mutex);
if (!need_update) {
- mutex_unlock(&tracepoints_mutex);
+ pthread_mutex_unlock(&tracepoints_mutex);
return;
}
if (!list_empty(&old_probes))
list_replace_init(&old_probes, &release_probes);
need_update = 0;
- mutex_unlock(&tracepoints_mutex);
+ pthread_mutex_unlock(&tracepoints_mutex);
tracepoint_update_probes();
list_for_each_entry_safe(pos, next, &release_probes, u.list) {
list_del(&pos->u.list);
//ust// call_rcu_sched(&pos->u.rcu, rcu_free_old_probes);
synchronize_rcu();
- kfree(pos);
+ free(pos);
}
}
//ust// EXPORT_SYMBOL_GPL(tracepoint_probe_update_all);
struct tracepoint_lib *iter_lib;
int found = 0;
-//ust// mutex_lock(&module_mutex);
+//ust// pthread_mutex_lock(&module_mutex);
list_for_each_entry(iter_lib, &libs, list) {
if (iter_lib < iter->lib)
continue;
break;
}
}
-//ust// mutex_unlock(&module_mutex);
+//ust// pthread_mutex_unlock(&module_mutex);
return found;
}
pl->tracepoints_count = tracepoints_count;
/* FIXME: maybe protect this with its own mutex? */
- mutex_lock(&tracepoints_mutex);
+ pthread_mutex_lock(&tracepoints_mutex);
list_add(&pl->list, &libs);
- mutex_unlock(&tracepoints_mutex);
+ pthread_mutex_unlock(&tracepoints_mutex);
new_tracepoints(tracepoints_start, tracepoints_start + tracepoints_count);
return 0;
}
-int tracepoint_unregister_lib(struct tracepoint *tracepoints_start, int tracepoints_count)
+int tracepoint_unregister_lib(struct tracepoint *tracepoints_start)
{
- /*FIXME: implement; but before implementing, tracepoint_register_lib must
- have appropriate locking. */
+ struct tracepoint_lib *lib;
+
+ pthread_mutex_lock(&tracepoints_mutex);
+
+ list_for_each_entry(lib, &libs, list) {
+ if(lib->tracepoints_start == tracepoints_start) {
+ struct tracepoint_lib *lib2free = lib;
+ list_del(&lib->list);
+ free(lib2free);
+ break;
+ }
+ }
+
+ pthread_mutex_unlock(&tracepoints_mutex);
return 0;
}