#include <linux/slab.h>
#include "../ltt-events.h"
#include "../wrapper/ringbuffer/frontend_types.h"
+#include "../wrapper/vmalloc.h"
#include "../ltt-tracer.h"
static
return 0;
lib_ring_buffer_ctx_init(&ctx, chan->chan, NULL, sizeof(data),
ltt_alignof(data), -1);
- ret = chan->ops->event_reserve(&ctx);
+ ret = chan->ops->event_reserve(&ctx, event->id);
if (ret < 0)
return 0;
lib_ring_buffer_align_ctx(&ctx, ltt_alignof(data));
int lttng_create_kprobe_event(const char *name, struct ltt_event *event)
{
struct lttng_event_field *field;
+ struct lttng_event_desc *desc;
int ret;
- event->desc = kzalloc(sizeof(*event->desc), GFP_KERNEL);
- if (!event->desc)
+ desc = kzalloc(sizeof(*event->desc), GFP_KERNEL);
+ if (!desc)
return -ENOMEM;
- event->desc->name = kstrdup(name, GFP_KERNEL);
- if (!event->desc->name) {
+ desc->name = kstrdup(name, GFP_KERNEL);
+ if (!desc->name) {
ret = -ENOMEM;
goto error_str;
}
- event->desc->nr_fields = 1;
- event->desc->fields = field =
+ desc->nr_fields = 1;
+ desc->fields = field =
kzalloc(1 * sizeof(struct lttng_event_field), GFP_KERNEL);
+ if (!field) {
+ ret = -ENOMEM;
+ goto error_field;
+ }
field->name = "ip";
field->type.atype = atype_integer;
- field->type.u.basic.integer.size = sizeof(unsigned long);
- field->type.u.basic.integer.alignment = ltt_alignof(unsigned long);
+ field->type.u.basic.integer.size = sizeof(unsigned long) * CHAR_BIT;
+ field->type.u.basic.integer.alignment = ltt_alignof(unsigned long) * CHAR_BIT;
field->type.u.basic.integer.signedness = 0;
field->type.u.basic.integer.reverse_byte_order = 0;
+ field->type.u.basic.integer.base = 16;
+ field->type.u.basic.integer.encoding = lttng_encode_none;
+ event->desc = desc;
return 0;
+error_field:
+ kfree(desc->name);
error_str:
- kfree(event->desc);
+ kfree(desc);
return ret;
}
memset(&event->u.kprobe.kp, 0, sizeof(event->u.kprobe.kp));
event->u.kprobe.kp.pre_handler = lttng_kprobes_handler_pre;
event->u.kprobe.symbol_name =
- kzalloc(LTTNG_KPROBE_SYM_NAME_LEN * sizeof(char),
+ kzalloc(LTTNG_SYM_NAME_LEN * sizeof(char),
GFP_KERNEL);
if (!event->u.kprobe.symbol_name) {
ret = -ENOMEM;
goto name_error;
}
memcpy(event->u.kprobe.symbol_name, symbol_name,
- LTTNG_KPROBE_SYM_NAME_LEN * sizeof(char));
+ LTTNG_SYM_NAME_LEN * sizeof(char));
event->u.kprobe.kp.symbol_name =
event->u.kprobe.symbol_name;
event->u.kprobe.kp.offset = offset;
event->u.kprobe.kp.addr = (void *) addr;
+
+ /*
+ * Ensure the memory we just allocated don't trigger page faults.
+ * Well.. kprobes itself puts the page fault handler on the blacklist,
+ * but we can never be too careful.
+ */
+ wrapper_vmalloc_sync_all();
+
ret = register_kprobe(&event->u.kprobe.kp);
if (ret)
goto register_error;
register_error:
kfree(event->u.kprobe.symbol_name);
name_error:
+ kfree(event->desc->fields);
kfree(event->desc->name);
kfree(event->desc);
error:
{
unregister_kprobe(&event->u.kprobe.kp);
kfree(event->u.kprobe.symbol_name);
+ kfree(event->desc->fields);
kfree(event->desc->name);
kfree(event->desc);
}