X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=lttng-context-perf-counters.c;h=3ae2266f948bd8a747064b6f29615d325343df5a;hb=d0255731fe93baede90f26d19aa4388403973554;hp=005c651039e045fb6cb34142b5f8eb355497b515;hpb=2001023e1ab9ae4bf85cad1dfb4c9aefb2e40dcf;p=lttng-modules.git diff --git a/lttng-context-perf-counters.c b/lttng-context-perf-counters.c index 005c6510..3ae2266f 100644 --- a/lttng-context-perf-counters.c +++ b/lttng-context-perf-counters.c @@ -15,6 +15,7 @@ #include "ltt-events.h" #include "wrapper/ringbuffer/frontend_types.h" #include "wrapper/vmalloc.h" +#include "wrapper/perf.h" #include "ltt-tracer.h" static @@ -37,8 +38,12 @@ void perf_counter_record(struct lttng_ctx_field *field, event = field->u.perf_counter->e[ctx->cpu]; if (likely(event)) { - event->pmu->read(event); - value = local64_read(&event->count); + if (unlikely(event->state == PERF_EVENT_STATE_ERROR)) { + value = 0; + } else { + event->pmu->read(event); + value = local64_read(&event->count); + } } else { /* * Perf chooses not to be clever and not to support enabling a @@ -53,12 +58,21 @@ void perf_counter_record(struct lttng_ctx_field *field, chan->ops->event_write(ctx, &value, sizeof(value)); } +#if defined(CONFIG_PERF_EVENTS) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,99)) +static +void overflow_callback(struct perf_event *event, + struct perf_sample_data *data, + struct pt_regs *regs) +{ +} +#else static void overflow_callback(struct perf_event *event, int nmi, struct perf_sample_data *data, struct pt_regs *regs) { } +#endif static void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field) @@ -110,10 +124,14 @@ int __cpuinit lttng_perf_counter_cpu_hp_callback(struct notifier_block *nb, switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: - pevent = perf_event_create_kernel_counter(attr, + pevent = wrapper_perf_event_create_kernel_counter(attr, cpu, NULL, overflow_callback); if (!pevent || IS_ERR(pevent)) return NOTIFY_BAD; + if (pevent->state == PERF_EVENT_STATE_ERROR) { + perf_event_release_kernel(pevent); + return NOTIFY_BAD; + } barrier(); /* Create perf counter before setting event */ events[cpu] = pevent; break; @@ -194,12 +212,16 @@ int lttng_add_perf_counter_to_ctx(uint32_t type, get_online_cpus(); for_each_online_cpu(cpu) { - events[cpu] = perf_event_create_kernel_counter(attr, + events[cpu] = wrapper_perf_event_create_kernel_counter(attr, cpu, NULL, overflow_callback); if (!events[cpu] || IS_ERR(events[cpu])) { ret = -EINVAL; goto counter_error; } + if (events[cpu]->state == PERF_EVENT_STATE_ERROR) { + ret = -EBUSY; + goto counter_busy; + } } put_online_cpus(); @@ -207,9 +229,9 @@ int lttng_add_perf_counter_to_ctx(uint32_t type, field->event_field.name = name_alloc; field->event_field.type.atype = atype_integer; - field->event_field.type.u.basic.integer.size = sizeof(unsigned long) * CHAR_BIT; - field->event_field.type.u.basic.integer.alignment = ltt_alignof(unsigned long) * CHAR_BIT; - field->event_field.type.u.basic.integer.signedness = is_signed_type(unsigned long); + field->event_field.type.u.basic.integer.size = sizeof(uint64_t) * CHAR_BIT; + field->event_field.type.u.basic.integer.alignment = ltt_alignof(uint64_t) * CHAR_BIT; + field->event_field.type.u.basic.integer.signedness = is_signed_type(uint64_t); field->event_field.type.u.basic.integer.reverse_byte_order = 0; field->event_field.type.u.basic.integer.base = 10; field->event_field.type.u.basic.integer.encoding = lttng_encode_none; @@ -221,6 +243,7 @@ int lttng_add_perf_counter_to_ctx(uint32_t type, wrapper_vmalloc_sync_all(); return 0; +counter_busy: counter_error: for_each_online_cpu(cpu) { if (events[cpu] && !IS_ERR(events[cpu]))