Merge branch 'master' of ssh://git.lttng.org/home/git/lttng-modules
[lttng-modules.git] / lttng-context-perf-counters.c
1 /*
2 * (C) Copyright 2009-2011 -
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * LTTng performance monitoring counters (perf-counters) integration module.
6 *
7 * Dual LGPL v2.1/GPL v2 license.
8 */
9
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/perf_event.h>
13 #include <linux/list.h>
14 #include <linux/string.h>
15 #include "ltt-events.h"
16 #include "wrapper/ringbuffer/frontend_types.h"
17 #include "wrapper/vmalloc.h"
18 #include "ltt-tracer.h"
19
20 static
21 size_t perf_counter_get_size(size_t offset)
22 {
23 size_t size = 0;
24
25 size += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
26 size += sizeof(uint64_t);
27 return size;
28 }
29
30 static
31 void perf_counter_record(struct lttng_ctx_field *field,
32 struct lib_ring_buffer_ctx *ctx,
33 struct ltt_channel *chan)
34 {
35 struct perf_event *event;
36 uint64_t value;
37
38 event = field->u.perf_counter.e[ctx->cpu];
39 if (likely(event)) {
40 event->pmu->read(event);
41 value = local64_read(&event->count);
42 } else {
43 /*
44 * Perf chooses not to be clever and not to support enabling a
45 * perf counter before the cpu is brought up. Therefore, we need
46 * to support having events coming (e.g. scheduler events)
47 * before the counter is setup. Write an arbitrary 0 in this
48 * case.
49 */
50 value = 0;
51 }
52 lib_ring_buffer_align_ctx(ctx, ltt_alignof(value));
53 chan->ops->event_write(ctx, &value, sizeof(value));
54 }
55
56 static
57 void overflow_callback(struct perf_event *event, int nmi,
58 struct perf_sample_data *data,
59 struct pt_regs *regs)
60 {
61 }
62
63 static
64 void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field)
65 {
66 struct perf_event **events = field->u.perf_counter.e;
67 int cpu;
68
69 get_online_cpus();
70 for_each_online_cpu(cpu)
71 perf_event_release_kernel(events[cpu]);
72 put_online_cpus();
73 #ifdef CONFIG_HOTPLUG_CPU
74 unregister_cpu_notifier(&field->u.perf_counter.nb);
75 #endif
76 kfree(field->event_field.name);
77 kfree(field->u.perf_counter.attr);
78 kfree(events);
79 }
80
81 #ifdef CONFIG_HOTPLUG_CPU
82
83 /**
84 * lttng_perf_counter_hp_callback - CPU hotplug callback
85 * @nb: notifier block
86 * @action: hotplug action to take
87 * @hcpu: CPU number
88 *
89 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
90 *
91 * We can setup perf counters when the cpu is online (up prepare seems to be too
92 * soon).
93 */
94 static
95 int __cpuinit lttng_perf_counter_cpu_hp_callback(struct notifier_block *nb,
96 unsigned long action,
97 void *hcpu)
98 {
99 unsigned int cpu = (unsigned long) hcpu;
100 struct lttng_ctx_field *field =
101 container_of(nb, struct lttng_ctx_field, u.perf_counter.nb);
102 struct perf_event **events = field->u.perf_counter.e;
103 struct perf_event_attr *attr = field->u.perf_counter.attr;
104 struct perf_event *pevent;
105
106 if (!field->u.perf_counter.hp_enable)
107 return NOTIFY_OK;
108
109 switch (action) {
110 case CPU_ONLINE:
111 case CPU_ONLINE_FROZEN:
112 pevent = perf_event_create_kernel_counter(attr,
113 cpu, NULL, overflow_callback);
114 if (!pevent)
115 return NOTIFY_BAD;
116 barrier(); /* Create perf counter before setting event */
117 events[cpu] = pevent;
118 break;
119 case CPU_UP_CANCELED:
120 case CPU_UP_CANCELED_FROZEN:
121 case CPU_DEAD:
122 case CPU_DEAD_FROZEN:
123 pevent = events[cpu];
124 events[cpu] = NULL;
125 barrier(); /* NULLify event before perf counter teardown */
126 perf_event_release_kernel(pevent);
127 break;
128 }
129 return NOTIFY_OK;
130 }
131
132 #endif
133
134 int lttng_add_perf_counter_to_ctx(uint32_t type,
135 uint64_t config,
136 const char *name,
137 struct lttng_ctx **ctx)
138 {
139 struct lttng_ctx_field *field;
140 struct perf_event **events;
141 struct perf_event_attr *attr;
142 int ret;
143 int cpu;
144 char *name_alloc;
145
146 events = kzalloc(num_possible_cpus() * sizeof(*events), GFP_KERNEL);
147 if (!events)
148 return -ENOMEM;
149
150 attr = kzalloc(sizeof(*field->u.perf_counter.attr), GFP_KERNEL);
151 if (!attr) {
152 ret = -ENOMEM;
153 goto error_attr;
154 }
155
156 attr->type = type;
157 attr->config = config;
158 attr->size = sizeof(struct perf_event_attr);
159 attr->pinned = 1;
160 attr->disabled = 0;
161
162 name_alloc = kstrdup(name, GFP_KERNEL);
163 if (!name_alloc) {
164 ret = -ENOMEM;
165 goto name_alloc_error;
166 }
167
168 field = lttng_append_context(ctx);
169 if (!field) {
170 ret = -ENOMEM;
171 goto append_context_error;
172 }
173
174 #ifdef CONFIG_HOTPLUG_CPU
175 field->u.perf_counter.nb.notifier_call =
176 lttng_perf_counter_cpu_hp_callback;
177 field->u.perf_counter.nb.priority = 0;
178 register_cpu_notifier(&field->u.perf_counter.nb);
179 #endif
180
181 get_online_cpus();
182 for_each_online_cpu(cpu) {
183 events[cpu] = perf_event_create_kernel_counter(attr,
184 cpu, NULL, overflow_callback);
185 if (!events[cpu]) {
186 ret = -EINVAL;
187 goto counter_error;
188 }
189 }
190 put_online_cpus();
191
192 field->destroy = lttng_destroy_perf_counter_field;
193
194 field->event_field.name = name_alloc;
195 field->event_field.type.atype = atype_integer;
196 field->event_field.type.u.basic.integer.size = sizeof(unsigned long) * CHAR_BIT;
197 field->event_field.type.u.basic.integer.alignment = ltt_alignof(unsigned long) * CHAR_BIT;
198 field->event_field.type.u.basic.integer.signedness = is_signed_type(unsigned long);
199 field->event_field.type.u.basic.integer.reverse_byte_order = 0;
200 field->event_field.type.u.basic.integer.base = 10;
201 field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
202 field->get_size = perf_counter_get_size;
203 field->record = perf_counter_record;
204 field->u.perf_counter.e = events;
205 field->u.perf_counter.attr = attr;
206 field->u.perf_counter.hp_enable = 1;
207
208 wrapper_vmalloc_sync_all();
209 return 0;
210
211 counter_error:
212 for_each_online_cpu(cpu) {
213 if (events[cpu])
214 perf_event_release_kernel(events[cpu]);
215 }
216 put_online_cpus();
217 #ifdef CONFIG_HOTPLUG_CPU
218 unregister_cpu_notifier(&field->u.perf_counter.nb);
219 #endif
220 lttng_remove_context_field(ctx, field);
221 append_context_error:
222 kfree(name_alloc);
223 name_alloc_error:
224 kfree(attr);
225 error_attr:
226 kfree(events);
227 return ret;
228 }
229
230 MODULE_LICENSE("GPL and additional rights");
231 MODULE_AUTHOR("Mathieu Desnoyers");
232 MODULE_DESCRIPTION("Linux Trace Toolkit Perf Support");
This page took 0.034634 seconds and 5 git commands to generate.