Tracepoint and TRACEPOINT_EVENT API cleanup
[ust.git] / libust / tracepoint.c
... / ...
CommitLineData
1/*
2 * Copyright (C) 2008 Mathieu Desnoyers
3 * Copyright (C) 2009 Pierre-Marc Fournier
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation;
8 * version 2.1 of the License.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 *
19 * Ported to userspace by Pierre-Marc Fournier.
20 */
21
22#define _LGPL_SOURCE
23#include <errno.h>
24#include <ust/tracepoint.h>
25#include <ust/tracepoint-internal.h>
26#include <ust/core.h>
27#include <ust/kcompat/kcompat.h>
28#include <urcu-bp.h>
29#include <urcu/hlist.h>
30
31#include "usterr_signal_safe.h"
32
33//extern struct tracepoint __start___tracepoints[] __attribute__((visibility("hidden")));
34//extern struct tracepoint __stop___tracepoints[] __attribute__((visibility("hidden")));
35
36/* Set to 1 to enable tracepoint debug output */
37static const int tracepoint_debug;
38
39/* libraries that contain tracepoints (struct tracepoint_lib) */
40static CDS_LIST_HEAD(libs);
41
42/*
43 * tracepoints_mutex nests inside module_mutex. Tracepoints mutex protects the
44 * builtin and module tracepoints and the hash table.
45 */
46static DEFINE_MUTEX(tracepoints_mutex);
47
48/*
49 * Tracepoint hash table, containing the active tracepoints.
50 * Protected by tracepoints_mutex.
51 */
52#define TRACEPOINT_HASH_BITS 6
53#define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS)
54static struct cds_hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
55
56/*
57 * Note about RCU :
58 * It is used to to delay the free of multiple probes array until a quiescent
59 * state is reached.
60 * Tracepoint entries modifications are protected by the tracepoints_mutex.
61 */
62struct tracepoint_entry {
63 struct cds_hlist_node hlist;
64 struct tracepoint_probe *probes;
65 int refcount; /* Number of times armed. 0 if disarmed. */
66 char name[0];
67};
68
69struct tp_probes {
70 union {
71//ust// struct rcu_head rcu;
72 struct cds_list_head list;
73 } u;
74 struct tracepoint_probe probes[0];
75};
76
77static inline void *allocate_probes(int count)
78{
79 struct tp_probes *p = zmalloc(count * sizeof(struct tracepoint_probe)
80 + sizeof(struct tp_probes));
81 return p == NULL ? NULL : p->probes;
82}
83
84//ust// static void rcu_free_old_probes(struct rcu_head *head)
85//ust// {
86//ust// kfree(container_of(head, struct tp_probes, u.rcu));
87//ust// }
88
89static inline void release_probes(void *old)
90{
91 if (old) {
92 struct tp_probes *tp_probes = _ust_container_of(old,
93 struct tp_probes, probes[0]);
94//ust// call_rcu_sched(&tp_probes->u.rcu, rcu_free_old_probes);
95 synchronize_rcu();
96 free(tp_probes);
97 }
98}
99
100static void debug_print_probes(struct tracepoint_entry *entry)
101{
102 int i;
103
104 if (!tracepoint_debug || !entry->probes)
105 return;
106
107 for (i = 0; entry->probes[i].func; i++)
108 DBG("Probe %d : %p", i, entry->probes[i].func);
109}
110
111static void *
112tracepoint_entry_add_probe(struct tracepoint_entry *entry,
113 void *probe, void *data)
114{
115 int nr_probes = 0;
116 struct tracepoint_probe *old, *new;
117
118 WARN_ON(!probe);
119
120 debug_print_probes(entry);
121 old = entry->probes;
122 if (old) {
123 /* (N -> N+1), (N != 0, 1) probes */
124 for (nr_probes = 0; old[nr_probes].func; nr_probes++)
125 if (old[nr_probes].func == probe &&
126 old[nr_probes].data == data)
127 return ERR_PTR(-EEXIST);
128 }
129 /* + 2 : one for new probe, one for NULL func */
130 new = allocate_probes(nr_probes + 2);
131 if (new == NULL)
132 return ERR_PTR(-ENOMEM);
133 if (old)
134 memcpy(new, old, nr_probes * sizeof(struct tracepoint_probe));
135 new[nr_probes].func = probe;
136 new[nr_probes].data = data;
137 new[nr_probes + 1].func = NULL;
138 entry->refcount = nr_probes + 1;
139 entry->probes = new;
140 debug_print_probes(entry);
141 return old;
142}
143
144static void *
145tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe,
146 void *data)
147{
148 int nr_probes = 0, nr_del = 0, i;
149 struct tracepoint_probe *old, *new;
150
151 old = entry->probes;
152
153 if (!old)
154 return ERR_PTR(-ENOENT);
155
156 debug_print_probes(entry);
157 /* (N -> M), (N > 1, M >= 0) probes */
158 for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
159 if (!probe ||
160 (old[nr_probes].func == probe &&
161 old[nr_probes].data == data))
162 nr_del++;
163 }
164
165 if (nr_probes - nr_del == 0) {
166 /* N -> 0, (N > 1) */
167 entry->probes = NULL;
168 entry->refcount = 0;
169 debug_print_probes(entry);
170 return old;
171 } else {
172 int j = 0;
173 /* N -> M, (N > 1, M > 0) */
174 /* + 1 for NULL */
175 new = allocate_probes(nr_probes - nr_del + 1);
176 if (new == NULL)
177 return ERR_PTR(-ENOMEM);
178 for (i = 0; old[i].func; i++)
179 if (probe &&
180 (old[i].func != probe || old[i].data != data))
181 new[j++] = old[i];
182 new[nr_probes - nr_del].func = NULL;
183 entry->refcount = nr_probes - nr_del;
184 entry->probes = new;
185 }
186 debug_print_probes(entry);
187 return old;
188}
189
190/*
191 * Get tracepoint if the tracepoint is present in the tracepoint hash table.
192 * Must be called with tracepoints_mutex held.
193 * Returns NULL if not present.
194 */
195static struct tracepoint_entry *get_tracepoint(const char *name)
196{
197 struct cds_hlist_head *head;
198 struct cds_hlist_node *node;
199 struct tracepoint_entry *e;
200 u32 hash = jhash(name, strlen(name), 0);
201
202 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
203 cds_hlist_for_each_entry(e, node, head, hlist) {
204 if (!strcmp(name, e->name))
205 return e;
206 }
207 return NULL;
208}
209
210/*
211 * Add the tracepoint to the tracepoint hash table. Must be called with
212 * tracepoints_mutex held.
213 */
214static struct tracepoint_entry *add_tracepoint(const char *name)
215{
216 struct cds_hlist_head *head;
217 struct cds_hlist_node *node;
218 struct tracepoint_entry *e;
219 size_t name_len = strlen(name) + 1;
220 u32 hash = jhash(name, name_len-1, 0);
221
222 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
223 cds_hlist_for_each_entry(e, node, head, hlist) {
224 if (!strcmp(name, e->name)) {
225 DBG("tracepoint %s busy", name);
226 return ERR_PTR(-EEXIST); /* Already there */
227 }
228 }
229 /*
230 * Using zmalloc here to allocate a variable length element. Could
231 * cause some memory fragmentation if overused.
232 */
233 e = zmalloc(sizeof(struct tracepoint_entry) + name_len);
234 if (!e)
235 return ERR_PTR(-ENOMEM);
236 memcpy(&e->name[0], name, name_len);
237 e->probes = NULL;
238 e->refcount = 0;
239 cds_hlist_add_head(&e->hlist, head);
240 return e;
241}
242
243/*
244 * Remove the tracepoint from the tracepoint hash table. Must be called with
245 * mutex_lock held.
246 */
247static inline void remove_tracepoint(struct tracepoint_entry *e)
248{
249 cds_hlist_del(&e->hlist);
250 free(e);
251}
252
253/*
254 * Sets the probe callback corresponding to one tracepoint.
255 */
256static void set_tracepoint(struct tracepoint_entry **entry,
257 struct tracepoint *elem, int active)
258{
259 WARN_ON(strcmp((*entry)->name, elem->name) != 0);
260
261 /*
262 * rcu_assign_pointer has a cmm_smp_wmb() which makes sure that the new
263 * probe callbacks array is consistent before setting a pointer to it.
264 * This array is referenced by __DO_TRACE from
265 * include/linux/tracepoints.h. A matching cmm_smp_read_barrier_depends()
266 * is used.
267 */
268 rcu_assign_pointer(elem->probes, (*entry)->probes);
269 elem->state = active;
270}
271
272/*
273 * Disable a tracepoint and its probe callback.
274 * Note: only waiting an RCU period after setting elem->call to the empty
275 * function insures that the original callback is not used anymore. This insured
276 * by preempt_disable around the call site.
277 */
278static void disable_tracepoint(struct tracepoint *elem)
279{
280 elem->state = 0;
281 rcu_assign_pointer(elem->probes, NULL);
282}
283
284/**
285 * tracepoint_update_probe_range - Update a probe range
286 * @begin: beginning of the range
287 * @end: end of the range
288 *
289 * Updates the probe callback corresponding to a range of tracepoints.
290 */
291void tracepoint_update_probe_range(struct tracepoint * const *begin,
292 struct tracepoint * const *end)
293{
294 struct tracepoint * const *iter;
295 struct tracepoint_entry *mark_entry;
296
297 pthread_mutex_lock(&tracepoints_mutex);
298 for (iter = begin; iter < end; iter++) {
299 if (!*iter)
300 continue; /* skip dummy */
301 if (!(*iter)->name) {
302 disable_tracepoint(*iter);
303 continue;
304 }
305 mark_entry = get_tracepoint((*iter)->name);
306 if (mark_entry) {
307 set_tracepoint(&mark_entry, *iter,
308 !!mark_entry->refcount);
309 } else {
310 disable_tracepoint(*iter);
311 }
312 }
313 pthread_mutex_unlock(&tracepoints_mutex);
314}
315
316static void lib_update_tracepoints(void)
317{
318 struct tracepoint_lib *lib;
319
320//ust// pthread_mutex_lock(&module_mutex);
321 cds_list_for_each_entry(lib, &libs, list)
322 tracepoint_update_probe_range(lib->tracepoints_start,
323 lib->tracepoints_start + lib->tracepoints_count);
324//ust// pthread_mutex_unlock(&module_mutex);
325}
326
327/*
328 * Update probes, removing the faulty probes.
329 */
330static void tracepoint_update_probes(void)
331{
332 /* Core kernel tracepoints */
333//ust// tracepoint_update_probe_range(__start___tracepoints,
334//ust// __stop___tracepoints);
335 /* tracepoints in modules. */
336 lib_update_tracepoints();
337}
338
339static struct tracepoint_probe *
340tracepoint_add_probe(const char *name, void *probe, void *data)
341{
342 struct tracepoint_entry *entry;
343 struct tracepoint_probe *old;
344
345 entry = get_tracepoint(name);
346 if (!entry) {
347 entry = add_tracepoint(name);
348 if (IS_ERR(entry))
349 return (struct tracepoint_probe *)entry;
350 }
351 old = tracepoint_entry_add_probe(entry, probe, data);
352 if (IS_ERR(old) && !entry->refcount)
353 remove_tracepoint(entry);
354 return old;
355}
356
357/**
358 * __tracepoint_probe_register - Connect a probe to a tracepoint
359 * @name: tracepoint name
360 * @probe: probe handler
361 *
362 * Returns 0 if ok, error value on error.
363 * The probe address must at least be aligned on the architecture pointer size.
364 */
365int __tracepoint_probe_register(const char *name, void *probe, void *data)
366{
367 void *old;
368
369 pthread_mutex_lock(&tracepoints_mutex);
370 old = tracepoint_add_probe(name, probe, data);
371 pthread_mutex_unlock(&tracepoints_mutex);
372 if (IS_ERR(old))
373 return PTR_ERR(old);
374
375 tracepoint_update_probes(); /* may update entry */
376 release_probes(old);
377 return 0;
378}
379
380static void *tracepoint_remove_probe(const char *name, void *probe, void *data)
381{
382 struct tracepoint_entry *entry;
383 void *old;
384
385 entry = get_tracepoint(name);
386 if (!entry)
387 return ERR_PTR(-ENOENT);
388 old = tracepoint_entry_remove_probe(entry, probe, data);
389 if (IS_ERR(old))
390 return old;
391 if (!entry->refcount)
392 remove_tracepoint(entry);
393 return old;
394}
395
396/**
397 * tracepoint_probe_unregister - Disconnect a probe from a tracepoint
398 * @name: tracepoint name
399 * @probe: probe function pointer
400 * @probe: probe data pointer
401 *
402 * We do not need to call a synchronize_sched to make sure the probes have
403 * finished running before doing a module unload, because the module unload
404 * itself uses stop_machine(), which insures that every preempt disabled section
405 * have finished.
406 */
407int __tracepoint_probe_unregister(const char *name, void *probe, void *data)
408{
409 void *old;
410
411 pthread_mutex_lock(&tracepoints_mutex);
412 old = tracepoint_remove_probe(name, probe, data);
413 pthread_mutex_unlock(&tracepoints_mutex);
414 if (IS_ERR(old))
415 return PTR_ERR(old);
416
417 tracepoint_update_probes(); /* may update entry */
418 release_probes(old);
419 return 0;
420}
421
422static CDS_LIST_HEAD(old_probes);
423static int need_update;
424
425static void tracepoint_add_old_probes(void *old)
426{
427 need_update = 1;
428 if (old) {
429 struct tp_probes *tp_probes = _ust_container_of(old,
430 struct tp_probes, probes[0]);
431 cds_list_add(&tp_probes->u.list, &old_probes);
432 }
433}
434
435/**
436 * tracepoint_probe_register_noupdate - register a probe but not connect
437 * @name: tracepoint name
438 * @probe: probe handler
439 *
440 * caller must call tracepoint_probe_update_all()
441 */
442int tracepoint_probe_register_noupdate(const char *name, void *probe,
443 void *data)
444{
445 void *old;
446
447 pthread_mutex_lock(&tracepoints_mutex);
448 old = tracepoint_add_probe(name, probe, data);
449 if (IS_ERR(old)) {
450 pthread_mutex_unlock(&tracepoints_mutex);
451 return PTR_ERR(old);
452 }
453 tracepoint_add_old_probes(old);
454 pthread_mutex_unlock(&tracepoints_mutex);
455 return 0;
456}
457//ust// EXPORT_SYMBOL_GPL(tracepoint_probe_register_noupdate);
458
459/**
460 * tracepoint_probe_unregister_noupdate - remove a probe but not disconnect
461 * @name: tracepoint name
462 * @probe: probe function pointer
463 *
464 * caller must call tracepoint_probe_update_all()
465 */
466int tracepoint_probe_unregister_noupdate(const char *name, void *probe,
467 void *data)
468{
469 void *old;
470
471 pthread_mutex_lock(&tracepoints_mutex);
472 old = tracepoint_remove_probe(name, probe, data);
473 if (IS_ERR(old)) {
474 pthread_mutex_unlock(&tracepoints_mutex);
475 return PTR_ERR(old);
476 }
477 tracepoint_add_old_probes(old);
478 pthread_mutex_unlock(&tracepoints_mutex);
479 return 0;
480}
481//ust// EXPORT_SYMBOL_GPL(tracepoint_probe_unregister_noupdate);
482
483/**
484 * tracepoint_probe_update_all - update tracepoints
485 */
486void tracepoint_probe_update_all(void)
487{
488 CDS_LIST_HEAD(release_probes);
489 struct tp_probes *pos, *next;
490
491 pthread_mutex_lock(&tracepoints_mutex);
492 if (!need_update) {
493 pthread_mutex_unlock(&tracepoints_mutex);
494 return;
495 }
496 if (!cds_list_empty(&old_probes))
497 cds_list_replace_init(&old_probes, &release_probes);
498 need_update = 0;
499 pthread_mutex_unlock(&tracepoints_mutex);
500
501 tracepoint_update_probes();
502 cds_list_for_each_entry_safe(pos, next, &release_probes, u.list) {
503 cds_list_del(&pos->u.list);
504//ust// call_rcu_sched(&pos->u.rcu, rcu_free_old_probes);
505 synchronize_rcu();
506 free(pos);
507 }
508}
509//ust// EXPORT_SYMBOL_GPL(tracepoint_probe_update_all);
510
511/*
512 * Returns 0 if current not found.
513 * Returns 1 if current found.
514 */
515int lib_get_iter_tracepoints(struct tracepoint_iter *iter)
516{
517 struct tracepoint_lib *iter_lib;
518 int found = 0;
519
520//ust// pthread_mutex_lock(&module_mutex);
521 cds_list_for_each_entry(iter_lib, &libs, list) {
522 if (iter_lib < iter->lib)
523 continue;
524 else if (iter_lib > iter->lib)
525 iter->tracepoint = NULL;
526 found = tracepoint_get_iter_range(&iter->tracepoint,
527 iter_lib->tracepoints_start,
528 iter_lib->tracepoints_start + iter_lib->tracepoints_count);
529 if (found) {
530 iter->lib = iter_lib;
531 break;
532 }
533 }
534//ust// pthread_mutex_unlock(&module_mutex);
535 return found;
536}
537
538/**
539 * tracepoint_get_iter_range - Get a next tracepoint iterator given a range.
540 * @tracepoint: current tracepoints (in), next tracepoint (out)
541 * @begin: beginning of the range
542 * @end: end of the range
543 *
544 * Returns whether a next tracepoint has been found (1) or not (0).
545 * Will return the first tracepoint in the range if the input tracepoint is
546 * NULL.
547 */
548int tracepoint_get_iter_range(struct tracepoint * const **tracepoint,
549 struct tracepoint * const *begin, struct tracepoint * const *end)
550{
551 if (!*tracepoint && begin != end)
552 *tracepoint = begin;
553 while (*tracepoint >= begin && *tracepoint < end) {
554 if (!**tracepoint)
555 (*tracepoint)++; /* skip dummy */
556 else
557 return 1;
558 }
559 return 0;
560}
561//ust// EXPORT_SYMBOL_GPL(tracepoint_get_iter_range);
562
563static void tracepoint_get_iter(struct tracepoint_iter *iter)
564{
565 int found = 0;
566
567//ust// /* Core kernel tracepoints */
568//ust// if (!iter->module) {
569//ust// found = tracepoint_get_iter_range(&iter->tracepoint,
570//ust// __start___tracepoints, __stop___tracepoints);
571//ust// if (found)
572//ust// goto end;
573//ust// }
574 /* tracepoints in libs. */
575 found = lib_get_iter_tracepoints(iter);
576//ust// end:
577 if (!found)
578 tracepoint_iter_reset(iter);
579}
580
581void tracepoint_iter_start(struct tracepoint_iter *iter)
582{
583 tracepoint_get_iter(iter);
584}
585//ust// EXPORT_SYMBOL_GPL(tracepoint_iter_start);
586
587void tracepoint_iter_next(struct tracepoint_iter *iter)
588{
589 iter->tracepoint++;
590 /*
591 * iter->tracepoint may be invalid because we blindly incremented it.
592 * Make sure it is valid by marshalling on the tracepoints, getting the
593 * tracepoints from following modules if necessary.
594 */
595 tracepoint_get_iter(iter);
596}
597//ust// EXPORT_SYMBOL_GPL(tracepoint_iter_next);
598
599void tracepoint_iter_stop(struct tracepoint_iter *iter)
600{
601}
602//ust// EXPORT_SYMBOL_GPL(tracepoint_iter_stop);
603
604void tracepoint_iter_reset(struct tracepoint_iter *iter)
605{
606//ust// iter->module = NULL;
607 iter->tracepoint = NULL;
608}
609//ust// EXPORT_SYMBOL_GPL(tracepoint_iter_reset);
610
611//ust// #ifdef CONFIG_MODULES
612
613//ust// int tracepoint_module_notify(struct notifier_block *self,
614//ust// unsigned long val, void *data)
615//ust// {
616//ust// struct module *mod = data;
617//ust//
618//ust// switch (val) {
619//ust// case MODULE_STATE_COMING:
620//ust// tracepoint_update_probe_range(mod->tracepoints,
621//ust// mod->tracepoints + mod->num_tracepoints);
622//ust// break;
623//ust// case MODULE_STATE_GOING:
624//ust// tracepoint_update_probe_range(mod->tracepoints,
625//ust// mod->tracepoints + mod->num_tracepoints);
626//ust// break;
627//ust// }
628//ust// return 0;
629//ust// }
630
631//ust// struct notifier_block tracepoint_module_nb = {
632//ust// .notifier_call = tracepoint_module_notify,
633//ust// .priority = 0,
634//ust// };
635
636//ust// static int init_tracepoints(void)
637//ust// {
638//ust// return register_module_notifier(&tracepoint_module_nb);
639//ust// }
640//ust// __initcall(init_tracepoints);
641
642//ust// #endif /* CONFIG_MODULES */
643
644static void (*new_tracepoint_cb)(struct tracepoint *) = NULL;
645
646void tracepoint_set_new_tracepoint_cb(void (*cb)(struct tracepoint *))
647{
648 new_tracepoint_cb = cb;
649}
650
651static void new_tracepoints(struct tracepoint * const *start, struct tracepoint * const *end)
652{
653 if (new_tracepoint_cb) {
654 struct tracepoint * const *t;
655
656 for(t = start; t < end; t++) {
657 if (*t)
658 new_tracepoint_cb(*t);
659 }
660 }
661}
662
663int tracepoint_register_lib(struct tracepoint * const *tracepoints_start, int tracepoints_count)
664{
665 struct tracepoint_lib *pl, *iter;
666
667 pl = (struct tracepoint_lib *) zmalloc(sizeof(struct tracepoint_lib));
668
669 pl->tracepoints_start = tracepoints_start;
670 pl->tracepoints_count = tracepoints_count;
671
672 /* FIXME: maybe protect this with its own mutex? */
673 pthread_mutex_lock(&tracepoints_mutex);
674 /*
675 * We sort the libs by struct lib pointer address.
676 */
677 cds_list_for_each_entry_reverse(iter, &libs, list) {
678 BUG_ON(iter == pl); /* Should never be in the list twice */
679 if (iter < pl) {
680 /* We belong to the location right after iter. */
681 cds_list_add(&pl->list, &iter->list);
682 goto lib_added;
683 }
684 }
685 /* We should be added at the head of the list */
686 cds_list_add(&pl->list, &libs);
687lib_added:
688 pthread_mutex_unlock(&tracepoints_mutex);
689
690 new_tracepoints(tracepoints_start, tracepoints_start + tracepoints_count);
691
692 /* FIXME: update just the loaded lib */
693 lib_update_tracepoints();
694
695 /* tracepoints_count - 1: skip dummy */
696 DBG("just registered a tracepoints section from %p and having %d tracepoints (minus dummy tracepoints)", tracepoints_start, tracepoints_count);
697
698 return 0;
699}
700
701int tracepoint_unregister_lib(struct tracepoint * const *tracepoints_start)
702{
703 struct tracepoint_lib *lib;
704
705 pthread_mutex_lock(&tracepoints_mutex);
706
707 cds_list_for_each_entry(lib, &libs, list) {
708 if (lib->tracepoints_start == tracepoints_start) {
709 struct tracepoint_lib *lib2free = lib;
710 cds_list_del(&lib->list);
711 free(lib2free);
712 break;
713 }
714 }
715
716 pthread_mutex_unlock(&tracepoints_mutex);
717
718 return 0;
719}
This page took 0.023935 seconds and 4 git commands to generate.