Add system calls definitions for powerpc-32
[lttng-modules.git] / lttng-context-perf-counters.c
CommitLineData
833ad6a0 1/*
886d51a3 2 * lttng-context-perf-counters.c
833ad6a0
MD
3 *
4 * LTTng performance monitoring counters (perf-counters) integration module.
5 *
886d51a3
MD
6 * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
833ad6a0
MD
21 */
22
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/perf_event.h>
26#include <linux/list.h>
c24a0d71 27#include <linux/string.h>
a90917c3 28#include "lttng-events.h"
c24a0d71
MD
29#include "wrapper/ringbuffer/frontend_types.h"
30#include "wrapper/vmalloc.h"
90f5546c 31#include "wrapper/perf.h"
a90917c3 32#include "lttng-tracer.h"
833ad6a0 33
f1676205
MD
34static
35size_t perf_counter_get_size(size_t offset)
36{
37 size_t size = 0;
38
a90917c3 39 size += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
f1676205
MD
40 size += sizeof(uint64_t);
41 return size;
42}
43
833ad6a0
MD
44static
45void perf_counter_record(struct lttng_ctx_field *field,
46 struct lib_ring_buffer_ctx *ctx,
a90917c3 47 struct lttng_channel *chan)
833ad6a0
MD
48{
49 struct perf_event *event;
50 uint64_t value;
51
2001023e 52 event = field->u.perf_counter->e[ctx->cpu];
0478c519 53 if (likely(event)) {
7b745a96
MD
54 if (unlikely(event->state == PERF_EVENT_STATE_ERROR)) {
55 value = 0;
56 } else {
57 event->pmu->read(event);
58 value = local64_read(&event->count);
59 }
f91fd73b
MD
60 } else {
61 /*
62 * Perf chooses not to be clever and not to support enabling a
63 * perf counter before the cpu is brought up. Therefore, we need
64 * to support having events coming (e.g. scheduler events)
65 * before the counter is setup. Write an arbitrary 0 in this
66 * case.
67 */
68 value = 0;
69 }
a90917c3 70 lib_ring_buffer_align_ctx(ctx, lttng_alignof(value));
833ad6a0
MD
71 chan->ops->event_write(ctx, &value, sizeof(value));
72}
73
90f5546c
MD
74#if defined(CONFIG_PERF_EVENTS) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,99))
75static
76void overflow_callback(struct perf_event *event,
77 struct perf_sample_data *data,
78 struct pt_regs *regs)
79{
80}
81#else
833ad6a0
MD
82static
83void overflow_callback(struct perf_event *event, int nmi,
84 struct perf_sample_data *data,
85 struct pt_regs *regs)
86{
87}
90f5546c 88#endif
833ad6a0 89
2dccf128
MD
90static
91void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field)
92{
2001023e 93 struct perf_event **events = field->u.perf_counter->e;
2dccf128
MD
94 int cpu;
95
8289661d 96 get_online_cpus();
2dccf128
MD
97 for_each_online_cpu(cpu)
98 perf_event_release_kernel(events[cpu]);
8289661d
MD
99 put_online_cpus();
100#ifdef CONFIG_HOTPLUG_CPU
2001023e 101 unregister_cpu_notifier(&field->u.perf_counter->nb);
8289661d 102#endif
c24a0d71 103 kfree(field->event_field.name);
2001023e 104 kfree(field->u.perf_counter->attr);
2dccf128 105 kfree(events);
2001023e 106 kfree(field->u.perf_counter);
2dccf128
MD
107}
108
8289661d
MD
109#ifdef CONFIG_HOTPLUG_CPU
110
111/**
112 * lttng_perf_counter_hp_callback - CPU hotplug callback
113 * @nb: notifier block
114 * @action: hotplug action to take
115 * @hcpu: CPU number
116 *
117 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
118 *
119 * We can setup perf counters when the cpu is online (up prepare seems to be too
120 * soon).
121 */
122static
123int __cpuinit lttng_perf_counter_cpu_hp_callback(struct notifier_block *nb,
124 unsigned long action,
125 void *hcpu)
126{
127 unsigned int cpu = (unsigned long) hcpu;
2001023e
MD
128 struct lttng_perf_counter_field *perf_field =
129 container_of(nb, struct lttng_perf_counter_field, nb);
130 struct perf_event **events = perf_field->e;
131 struct perf_event_attr *attr = perf_field->attr;
f91fd73b 132 struct perf_event *pevent;
8289661d 133
2001023e 134 if (!perf_field->hp_enable)
8289661d
MD
135 return NOTIFY_OK;
136
137 switch (action) {
138 case CPU_ONLINE:
139 case CPU_ONLINE_FROZEN:
90f5546c 140 pevent = wrapper_perf_event_create_kernel_counter(attr,
8289661d 141 cpu, NULL, overflow_callback);
0478c519 142 if (!pevent || IS_ERR(pevent))
8289661d 143 return NOTIFY_BAD;
7b745a96
MD
144 if (pevent->state == PERF_EVENT_STATE_ERROR) {
145 perf_event_release_kernel(pevent);
146 return NOTIFY_BAD;
147 }
f91fd73b
MD
148 barrier(); /* Create perf counter before setting event */
149 events[cpu] = pevent;
8289661d
MD
150 break;
151 case CPU_UP_CANCELED:
152 case CPU_UP_CANCELED_FROZEN:
153 case CPU_DEAD:
154 case CPU_DEAD_FROZEN:
f91fd73b
MD
155 pevent = events[cpu];
156 events[cpu] = NULL;
157 barrier(); /* NULLify event before perf counter teardown */
158 perf_event_release_kernel(pevent);
8289661d
MD
159 break;
160 }
161 return NOTIFY_OK;
162}
163
164#endif
165
833ad6a0
MD
166int lttng_add_perf_counter_to_ctx(uint32_t type,
167 uint64_t config,
c24a0d71 168 const char *name,
2dccf128 169 struct lttng_ctx **ctx)
833ad6a0
MD
170{
171 struct lttng_ctx_field *field;
2001023e 172 struct lttng_perf_counter_field *perf_field;
833ad6a0
MD
173 struct perf_event **events;
174 struct perf_event_attr *attr;
175 int ret;
176 int cpu;
c24a0d71 177 char *name_alloc;
833ad6a0
MD
178
179 events = kzalloc(num_possible_cpus() * sizeof(*events), GFP_KERNEL);
180 if (!events)
181 return -ENOMEM;
182
2001023e 183 attr = kzalloc(sizeof(struct perf_event_attr), GFP_KERNEL);
833ad6a0
MD
184 if (!attr) {
185 ret = -ENOMEM;
186 goto error_attr;
187 }
188
189 attr->type = type;
190 attr->config = config;
191 attr->size = sizeof(struct perf_event_attr);
192 attr->pinned = 1;
193 attr->disabled = 0;
194
2001023e
MD
195 perf_field = kzalloc(sizeof(struct lttng_perf_counter_field), GFP_KERNEL);
196 if (!perf_field) {
197 ret = -ENOMEM;
198 goto error_alloc_perf_field;
199 }
200 perf_field->e = events;
201 perf_field->attr = attr;
202
c24a0d71 203 name_alloc = kstrdup(name, GFP_KERNEL);
bef96e48
MD
204 if (!name_alloc) {
205 ret = -ENOMEM;
c24a0d71 206 goto name_alloc_error;
bef96e48 207 }
c24a0d71 208
2dccf128
MD
209 field = lttng_append_context(ctx);
210 if (!field) {
211 ret = -ENOMEM;
8289661d 212 goto append_context_error;
833ad6a0 213 }
2001023e 214 if (lttng_find_context(*ctx, name_alloc)) {
44252f0f
MD
215 ret = -EEXIST;
216 goto find_error;
217 }
8289661d
MD
218
219#ifdef CONFIG_HOTPLUG_CPU
2001023e 220 perf_field->nb.notifier_call =
8289661d 221 lttng_perf_counter_cpu_hp_callback;
2001023e
MD
222 perf_field->nb.priority = 0;
223 register_cpu_notifier(&perf_field->nb);
8289661d
MD
224#endif
225
226 get_online_cpus();
227 for_each_online_cpu(cpu) {
90f5546c 228 events[cpu] = wrapper_perf_event_create_kernel_counter(attr,
8289661d 229 cpu, NULL, overflow_callback);
0478c519 230 if (!events[cpu] || IS_ERR(events[cpu])) {
8289661d
MD
231 ret = -EINVAL;
232 goto counter_error;
233 }
7b745a96
MD
234 if (events[cpu]->state == PERF_EVENT_STATE_ERROR) {
235 ret = -EBUSY;
236 goto counter_busy;
237 }
8289661d
MD
238 }
239 put_online_cpus();
240
2dccf128 241 field->destroy = lttng_destroy_perf_counter_field;
833ad6a0 242
c24a0d71 243 field->event_field.name = name_alloc;
8070f5c0 244 field->event_field.type.atype = atype_integer;
9d7d747f 245 field->event_field.type.u.basic.integer.size = sizeof(uint64_t) * CHAR_BIT;
a90917c3 246 field->event_field.type.u.basic.integer.alignment = lttng_alignof(uint64_t) * CHAR_BIT;
06254b0f 247 field->event_field.type.u.basic.integer.signedness = lttng_is_signed_type(uint64_t);
8070f5c0
MD
248 field->event_field.type.u.basic.integer.reverse_byte_order = 0;
249 field->event_field.type.u.basic.integer.base = 10;
250 field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
f1676205
MD
251 field->get_size = perf_counter_get_size;
252 field->record = perf_counter_record;
2001023e
MD
253 field->u.perf_counter = perf_field;
254 perf_field->hp_enable = 1;
833ad6a0
MD
255
256 wrapper_vmalloc_sync_all();
257 return 0;
258
7b745a96 259counter_busy:
8289661d 260counter_error:
833ad6a0 261 for_each_online_cpu(cpu) {
0478c519 262 if (events[cpu] && !IS_ERR(events[cpu]))
833ad6a0
MD
263 perf_event_release_kernel(events[cpu]);
264 }
8289661d
MD
265 put_online_cpus();
266#ifdef CONFIG_HOTPLUG_CPU
2001023e 267 unregister_cpu_notifier(&perf_field->nb);
8289661d 268#endif
44252f0f 269find_error:
8289661d
MD
270 lttng_remove_context_field(ctx, field);
271append_context_error:
272 kfree(name_alloc);
273name_alloc_error:
2001023e
MD
274 kfree(perf_field);
275error_alloc_perf_field:
833ad6a0
MD
276 kfree(attr);
277error_attr:
278 kfree(events);
279 return ret;
280}
281
833ad6a0
MD
282MODULE_LICENSE("GPL and additional rights");
283MODULE_AUTHOR("Mathieu Desnoyers");
284MODULE_DESCRIPTION("Linux Trace Toolkit Perf Support");
This page took 0.040874 seconds and 4 git commands to generate.