Perf counter context info needs to be at fixed addresses
[lttng-modules.git] / lttng-context-perf-counters.c
1 /*
2 * (C) Copyright 2009-2011 -
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * LTTng performance monitoring counters (perf-counters) integration module.
6 *
7 * Dual LGPL v2.1/GPL v2 license.
8 */
9
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/perf_event.h>
13 #include <linux/list.h>
14 #include <linux/string.h>
15 #include "ltt-events.h"
16 #include "wrapper/ringbuffer/frontend_types.h"
17 #include "wrapper/vmalloc.h"
18 #include "ltt-tracer.h"
19
20 static
21 size_t perf_counter_get_size(size_t offset)
22 {
23 size_t size = 0;
24
25 size += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
26 size += sizeof(uint64_t);
27 return size;
28 }
29
30 static
31 void perf_counter_record(struct lttng_ctx_field *field,
32 struct lib_ring_buffer_ctx *ctx,
33 struct ltt_channel *chan)
34 {
35 struct perf_event *event;
36 uint64_t value;
37
38 event = field->u.perf_counter->e[ctx->cpu];
39 if (likely(event)) {
40 event->pmu->read(event);
41 value = local64_read(&event->count);
42 } else {
43 /*
44 * Perf chooses not to be clever and not to support enabling a
45 * perf counter before the cpu is brought up. Therefore, we need
46 * to support having events coming (e.g. scheduler events)
47 * before the counter is setup. Write an arbitrary 0 in this
48 * case.
49 */
50 value = 0;
51 }
52 lib_ring_buffer_align_ctx(ctx, ltt_alignof(value));
53 chan->ops->event_write(ctx, &value, sizeof(value));
54 }
55
56 static
57 void overflow_callback(struct perf_event *event, int nmi,
58 struct perf_sample_data *data,
59 struct pt_regs *regs)
60 {
61 }
62
63 static
64 void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field)
65 {
66 struct perf_event **events = field->u.perf_counter->e;
67 int cpu;
68
69 get_online_cpus();
70 for_each_online_cpu(cpu)
71 perf_event_release_kernel(events[cpu]);
72 put_online_cpus();
73 #ifdef CONFIG_HOTPLUG_CPU
74 unregister_cpu_notifier(&field->u.perf_counter->nb);
75 #endif
76 kfree(field->event_field.name);
77 kfree(field->u.perf_counter->attr);
78 kfree(events);
79 kfree(field->u.perf_counter);
80 }
81
82 #ifdef CONFIG_HOTPLUG_CPU
83
84 /**
85 * lttng_perf_counter_hp_callback - CPU hotplug callback
86 * @nb: notifier block
87 * @action: hotplug action to take
88 * @hcpu: CPU number
89 *
90 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
91 *
92 * We can setup perf counters when the cpu is online (up prepare seems to be too
93 * soon).
94 */
95 static
96 int __cpuinit lttng_perf_counter_cpu_hp_callback(struct notifier_block *nb,
97 unsigned long action,
98 void *hcpu)
99 {
100 unsigned int cpu = (unsigned long) hcpu;
101 struct lttng_perf_counter_field *perf_field =
102 container_of(nb, struct lttng_perf_counter_field, nb);
103 struct perf_event **events = perf_field->e;
104 struct perf_event_attr *attr = perf_field->attr;
105 struct perf_event *pevent;
106
107 if (!perf_field->hp_enable)
108 return NOTIFY_OK;
109
110 switch (action) {
111 case CPU_ONLINE:
112 case CPU_ONLINE_FROZEN:
113 pevent = perf_event_create_kernel_counter(attr,
114 cpu, NULL, overflow_callback);
115 if (!pevent || IS_ERR(pevent))
116 return NOTIFY_BAD;
117 barrier(); /* Create perf counter before setting event */
118 events[cpu] = pevent;
119 break;
120 case CPU_UP_CANCELED:
121 case CPU_UP_CANCELED_FROZEN:
122 case CPU_DEAD:
123 case CPU_DEAD_FROZEN:
124 pevent = events[cpu];
125 events[cpu] = NULL;
126 barrier(); /* NULLify event before perf counter teardown */
127 perf_event_release_kernel(pevent);
128 break;
129 }
130 return NOTIFY_OK;
131 }
132
133 #endif
134
135 int lttng_add_perf_counter_to_ctx(uint32_t type,
136 uint64_t config,
137 const char *name,
138 struct lttng_ctx **ctx)
139 {
140 struct lttng_ctx_field *field;
141 struct lttng_perf_counter_field *perf_field;
142 struct perf_event **events;
143 struct perf_event_attr *attr;
144 int ret;
145 int cpu;
146 char *name_alloc;
147
148 events = kzalloc(num_possible_cpus() * sizeof(*events), GFP_KERNEL);
149 if (!events)
150 return -ENOMEM;
151
152 attr = kzalloc(sizeof(struct perf_event_attr), GFP_KERNEL);
153 if (!attr) {
154 ret = -ENOMEM;
155 goto error_attr;
156 }
157
158 attr->type = type;
159 attr->config = config;
160 attr->size = sizeof(struct perf_event_attr);
161 attr->pinned = 1;
162 attr->disabled = 0;
163
164 perf_field = kzalloc(sizeof(struct lttng_perf_counter_field), GFP_KERNEL);
165 if (!perf_field) {
166 ret = -ENOMEM;
167 goto error_alloc_perf_field;
168 }
169 perf_field->e = events;
170 perf_field->attr = attr;
171
172 name_alloc = kstrdup(name, GFP_KERNEL);
173 if (!name_alloc) {
174 ret = -ENOMEM;
175 goto name_alloc_error;
176 }
177
178 field = lttng_append_context(ctx);
179 if (!field) {
180 ret = -ENOMEM;
181 goto append_context_error;
182 }
183 if (lttng_find_context(*ctx, name_alloc)) {
184 ret = -EEXIST;
185 goto find_error;
186 }
187
188 #ifdef CONFIG_HOTPLUG_CPU
189 perf_field->nb.notifier_call =
190 lttng_perf_counter_cpu_hp_callback;
191 perf_field->nb.priority = 0;
192 register_cpu_notifier(&perf_field->nb);
193 #endif
194
195 get_online_cpus();
196 for_each_online_cpu(cpu) {
197 events[cpu] = perf_event_create_kernel_counter(attr,
198 cpu, NULL, overflow_callback);
199 if (!events[cpu] || IS_ERR(events[cpu])) {
200 ret = -EINVAL;
201 goto counter_error;
202 }
203 }
204 put_online_cpus();
205
206 field->destroy = lttng_destroy_perf_counter_field;
207
208 field->event_field.name = name_alloc;
209 field->event_field.type.atype = atype_integer;
210 field->event_field.type.u.basic.integer.size = sizeof(unsigned long) * CHAR_BIT;
211 field->event_field.type.u.basic.integer.alignment = ltt_alignof(unsigned long) * CHAR_BIT;
212 field->event_field.type.u.basic.integer.signedness = is_signed_type(unsigned long);
213 field->event_field.type.u.basic.integer.reverse_byte_order = 0;
214 field->event_field.type.u.basic.integer.base = 10;
215 field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
216 field->get_size = perf_counter_get_size;
217 field->record = perf_counter_record;
218 field->u.perf_counter = perf_field;
219 perf_field->hp_enable = 1;
220
221 wrapper_vmalloc_sync_all();
222 return 0;
223
224 counter_error:
225 for_each_online_cpu(cpu) {
226 if (events[cpu] && !IS_ERR(events[cpu]))
227 perf_event_release_kernel(events[cpu]);
228 }
229 put_online_cpus();
230 #ifdef CONFIG_HOTPLUG_CPU
231 unregister_cpu_notifier(&perf_field->nb);
232 #endif
233 find_error:
234 lttng_remove_context_field(ctx, field);
235 append_context_error:
236 kfree(name_alloc);
237 name_alloc_error:
238 kfree(perf_field);
239 error_alloc_perf_field:
240 kfree(attr);
241 error_attr:
242 kfree(events);
243 return ret;
244 }
245
246 MODULE_LICENSE("GPL and additional rights");
247 MODULE_AUTHOR("Mathieu Desnoyers");
248 MODULE_DESCRIPTION("Linux Trace Toolkit Perf Support");
This page took 0.035386 seconds and 5 git commands to generate.