Perf counter support: check if busy
[lttng-modules.git] / lttng-context-perf-counters.c
1 /*
2 * (C) Copyright 2009-2011 -
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * LTTng performance monitoring counters (perf-counters) integration module.
6 *
7 * Dual LGPL v2.1/GPL v2 license.
8 */
9
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/perf_event.h>
13 #include <linux/list.h>
14 #include <linux/string.h>
15 #include "ltt-events.h"
16 #include "wrapper/ringbuffer/frontend_types.h"
17 #include "wrapper/vmalloc.h"
18 #include "ltt-tracer.h"
19
20 static
21 size_t perf_counter_get_size(size_t offset)
22 {
23 size_t size = 0;
24
25 size += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
26 size += sizeof(uint64_t);
27 return size;
28 }
29
30 static
31 void perf_counter_record(struct lttng_ctx_field *field,
32 struct lib_ring_buffer_ctx *ctx,
33 struct ltt_channel *chan)
34 {
35 struct perf_event *event;
36 uint64_t value;
37
38 event = field->u.perf_counter->e[ctx->cpu];
39 if (likely(event)) {
40 if (unlikely(event->state == PERF_EVENT_STATE_ERROR)) {
41 value = 0;
42 } else {
43 event->pmu->read(event);
44 value = local64_read(&event->count);
45 }
46 } else {
47 /*
48 * Perf chooses not to be clever and not to support enabling a
49 * perf counter before the cpu is brought up. Therefore, we need
50 * to support having events coming (e.g. scheduler events)
51 * before the counter is setup. Write an arbitrary 0 in this
52 * case.
53 */
54 value = 0;
55 }
56 lib_ring_buffer_align_ctx(ctx, ltt_alignof(value));
57 chan->ops->event_write(ctx, &value, sizeof(value));
58 }
59
60 static
61 void overflow_callback(struct perf_event *event, int nmi,
62 struct perf_sample_data *data,
63 struct pt_regs *regs)
64 {
65 }
66
67 static
68 void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field)
69 {
70 struct perf_event **events = field->u.perf_counter->e;
71 int cpu;
72
73 get_online_cpus();
74 for_each_online_cpu(cpu)
75 perf_event_release_kernel(events[cpu]);
76 put_online_cpus();
77 #ifdef CONFIG_HOTPLUG_CPU
78 unregister_cpu_notifier(&field->u.perf_counter->nb);
79 #endif
80 kfree(field->event_field.name);
81 kfree(field->u.perf_counter->attr);
82 kfree(events);
83 kfree(field->u.perf_counter);
84 }
85
86 #ifdef CONFIG_HOTPLUG_CPU
87
88 /**
89 * lttng_perf_counter_hp_callback - CPU hotplug callback
90 * @nb: notifier block
91 * @action: hotplug action to take
92 * @hcpu: CPU number
93 *
94 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
95 *
96 * We can setup perf counters when the cpu is online (up prepare seems to be too
97 * soon).
98 */
99 static
100 int __cpuinit lttng_perf_counter_cpu_hp_callback(struct notifier_block *nb,
101 unsigned long action,
102 void *hcpu)
103 {
104 unsigned int cpu = (unsigned long) hcpu;
105 struct lttng_perf_counter_field *perf_field =
106 container_of(nb, struct lttng_perf_counter_field, nb);
107 struct perf_event **events = perf_field->e;
108 struct perf_event_attr *attr = perf_field->attr;
109 struct perf_event *pevent;
110
111 if (!perf_field->hp_enable)
112 return NOTIFY_OK;
113
114 switch (action) {
115 case CPU_ONLINE:
116 case CPU_ONLINE_FROZEN:
117 pevent = perf_event_create_kernel_counter(attr,
118 cpu, NULL, overflow_callback);
119 if (!pevent || IS_ERR(pevent))
120 return NOTIFY_BAD;
121 if (pevent->state == PERF_EVENT_STATE_ERROR) {
122 perf_event_release_kernel(pevent);
123 return NOTIFY_BAD;
124 }
125 barrier(); /* Create perf counter before setting event */
126 events[cpu] = pevent;
127 break;
128 case CPU_UP_CANCELED:
129 case CPU_UP_CANCELED_FROZEN:
130 case CPU_DEAD:
131 case CPU_DEAD_FROZEN:
132 pevent = events[cpu];
133 events[cpu] = NULL;
134 barrier(); /* NULLify event before perf counter teardown */
135 perf_event_release_kernel(pevent);
136 break;
137 }
138 return NOTIFY_OK;
139 }
140
141 #endif
142
143 int lttng_add_perf_counter_to_ctx(uint32_t type,
144 uint64_t config,
145 const char *name,
146 struct lttng_ctx **ctx)
147 {
148 struct lttng_ctx_field *field;
149 struct lttng_perf_counter_field *perf_field;
150 struct perf_event **events;
151 struct perf_event_attr *attr;
152 int ret;
153 int cpu;
154 char *name_alloc;
155
156 events = kzalloc(num_possible_cpus() * sizeof(*events), GFP_KERNEL);
157 if (!events)
158 return -ENOMEM;
159
160 attr = kzalloc(sizeof(struct perf_event_attr), GFP_KERNEL);
161 if (!attr) {
162 ret = -ENOMEM;
163 goto error_attr;
164 }
165
166 attr->type = type;
167 attr->config = config;
168 attr->size = sizeof(struct perf_event_attr);
169 attr->pinned = 1;
170 attr->disabled = 0;
171
172 perf_field = kzalloc(sizeof(struct lttng_perf_counter_field), GFP_KERNEL);
173 if (!perf_field) {
174 ret = -ENOMEM;
175 goto error_alloc_perf_field;
176 }
177 perf_field->e = events;
178 perf_field->attr = attr;
179
180 name_alloc = kstrdup(name, GFP_KERNEL);
181 if (!name_alloc) {
182 ret = -ENOMEM;
183 goto name_alloc_error;
184 }
185
186 field = lttng_append_context(ctx);
187 if (!field) {
188 ret = -ENOMEM;
189 goto append_context_error;
190 }
191 if (lttng_find_context(*ctx, name_alloc)) {
192 ret = -EEXIST;
193 goto find_error;
194 }
195
196 #ifdef CONFIG_HOTPLUG_CPU
197 perf_field->nb.notifier_call =
198 lttng_perf_counter_cpu_hp_callback;
199 perf_field->nb.priority = 0;
200 register_cpu_notifier(&perf_field->nb);
201 #endif
202
203 get_online_cpus();
204 for_each_online_cpu(cpu) {
205 events[cpu] = perf_event_create_kernel_counter(attr,
206 cpu, NULL, overflow_callback);
207 if (!events[cpu] || IS_ERR(events[cpu])) {
208 ret = -EINVAL;
209 goto counter_error;
210 }
211 if (events[cpu]->state == PERF_EVENT_STATE_ERROR) {
212 ret = -EBUSY;
213 goto counter_busy;
214 }
215 }
216 put_online_cpus();
217
218 field->destroy = lttng_destroy_perf_counter_field;
219
220 field->event_field.name = name_alloc;
221 field->event_field.type.atype = atype_integer;
222 field->event_field.type.u.basic.integer.size = sizeof(unsigned long) * CHAR_BIT;
223 field->event_field.type.u.basic.integer.alignment = ltt_alignof(unsigned long) * CHAR_BIT;
224 field->event_field.type.u.basic.integer.signedness = is_signed_type(unsigned long);
225 field->event_field.type.u.basic.integer.reverse_byte_order = 0;
226 field->event_field.type.u.basic.integer.base = 10;
227 field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
228 field->get_size = perf_counter_get_size;
229 field->record = perf_counter_record;
230 field->u.perf_counter = perf_field;
231 perf_field->hp_enable = 1;
232
233 wrapper_vmalloc_sync_all();
234 return 0;
235
236 counter_busy:
237 counter_error:
238 for_each_online_cpu(cpu) {
239 if (events[cpu] && !IS_ERR(events[cpu]))
240 perf_event_release_kernel(events[cpu]);
241 }
242 put_online_cpus();
243 #ifdef CONFIG_HOTPLUG_CPU
244 unregister_cpu_notifier(&perf_field->nb);
245 #endif
246 find_error:
247 lttng_remove_context_field(ctx, field);
248 append_context_error:
249 kfree(name_alloc);
250 name_alloc_error:
251 kfree(perf_field);
252 error_alloc_perf_field:
253 kfree(attr);
254 error_attr:
255 kfree(events);
256 return ret;
257 }
258
259 MODULE_LICENSE("GPL and additional rights");
260 MODULE_AUTHOR("Mathieu Desnoyers");
261 MODULE_DESCRIPTION("Linux Trace Toolkit Perf Support");
This page took 0.035734 seconds and 5 git commands to generate.