2 * (C) Copyright 2009-2011 -
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 * LTTng performance monitoring counters (perf-counters) integration module.
7 * Dual LGPL v2.1/GPL v2 license.
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/perf_event.h>
13 #include <linux/list.h>
14 #include <linux/string.h>
15 #include "ltt-events.h"
16 #include "wrapper/ringbuffer/frontend_types.h"
17 #include "wrapper/vmalloc.h"
18 #include "ltt-tracer.h"
21 size_t perf_counter_get_size(size_t offset
)
25 size
+= lib_ring_buffer_align(offset
, ltt_alignof(uint64_t));
26 size
+= sizeof(uint64_t);
31 void perf_counter_record(struct lttng_ctx_field
*field
,
32 struct lib_ring_buffer_ctx
*ctx
,
33 struct ltt_channel
*chan
)
35 struct perf_event
*event
;
38 event
= field
->u
.perf_counter
.e
[ctx
->cpu
];
40 event
->pmu
->read(event
);
41 value
= local64_read(&event
->count
);
44 * Perf chooses not to be clever and not to support enabling a
45 * perf counter before the cpu is brought up. Therefore, we need
46 * to support having events coming (e.g. scheduler events)
47 * before the counter is setup. Write an arbitrary 0 in this
52 lib_ring_buffer_align_ctx(ctx
, ltt_alignof(value
));
53 chan
->ops
->event_write(ctx
, &value
, sizeof(value
));
57 void overflow_callback(struct perf_event
*event
, int nmi
,
58 struct perf_sample_data
*data
,
64 void lttng_destroy_perf_counter_field(struct lttng_ctx_field
*field
)
66 struct perf_event
**events
= field
->u
.perf_counter
.e
;
70 for_each_online_cpu(cpu
)
71 perf_event_release_kernel(events
[cpu
]);
73 #ifdef CONFIG_HOTPLUG_CPU
74 unregister_cpu_notifier(&field
->u
.perf_counter
.nb
);
76 kfree(field
->event_field
.name
);
77 kfree(field
->u
.perf_counter
.attr
);
81 #ifdef CONFIG_HOTPLUG_CPU
84 * lttng_perf_counter_hp_callback - CPU hotplug callback
86 * @action: hotplug action to take
89 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
91 * We can setup perf counters when the cpu is online (up prepare seems to be too
95 int __cpuinit
lttng_perf_counter_cpu_hp_callback(struct notifier_block
*nb
,
99 unsigned int cpu
= (unsigned long) hcpu
;
100 struct lttng_ctx_field
*field
=
101 container_of(nb
, struct lttng_ctx_field
, u
.perf_counter
.nb
);
102 struct perf_event
**events
= field
->u
.perf_counter
.e
;
103 struct perf_event_attr
*attr
= field
->u
.perf_counter
.attr
;
104 struct perf_event
*pevent
;
106 if (!field
->u
.perf_counter
.hp_enable
)
111 case CPU_ONLINE_FROZEN
:
112 pevent
= perf_event_create_kernel_counter(attr
,
113 cpu
, NULL
, overflow_callback
);
114 if (!pevent
|| IS_ERR(pevent
))
116 barrier(); /* Create perf counter before setting event */
117 events
[cpu
] = pevent
;
119 case CPU_UP_CANCELED
:
120 case CPU_UP_CANCELED_FROZEN
:
122 case CPU_DEAD_FROZEN
:
123 pevent
= events
[cpu
];
125 barrier(); /* NULLify event before perf counter teardown */
126 perf_event_release_kernel(pevent
);
134 int lttng_add_perf_counter_to_ctx(uint32_t type
,
137 struct lttng_ctx
**ctx
)
139 struct lttng_ctx_field
*field
;
140 struct perf_event
**events
;
141 struct perf_event_attr
*attr
;
146 events
= kzalloc(num_possible_cpus() * sizeof(*events
), GFP_KERNEL
);
150 attr
= kzalloc(sizeof(*field
->u
.perf_counter
.attr
), GFP_KERNEL
);
157 attr
->config
= config
;
158 attr
->size
= sizeof(struct perf_event_attr
);
162 name_alloc
= kstrdup(name
, GFP_KERNEL
);
165 goto name_alloc_error
;
168 field
= lttng_append_context(ctx
);
171 goto append_context_error
;
173 if (lttng_find_context(*ctx
, name_alloc
)) {
178 #ifdef CONFIG_HOTPLUG_CPU
179 field
->u
.perf_counter
.nb
.notifier_call
=
180 lttng_perf_counter_cpu_hp_callback
;
181 field
->u
.perf_counter
.nb
.priority
= 0;
182 register_cpu_notifier(&field
->u
.perf_counter
.nb
);
186 for_each_online_cpu(cpu
) {
187 events
[cpu
] = perf_event_create_kernel_counter(attr
,
188 cpu
, NULL
, overflow_callback
);
189 if (!events
[cpu
] || IS_ERR(events
[cpu
])) {
196 field
->destroy
= lttng_destroy_perf_counter_field
;
198 field
->event_field
.name
= name_alloc
;
199 field
->event_field
.type
.atype
= atype_integer
;
200 field
->event_field
.type
.u
.basic
.integer
.size
= sizeof(unsigned long) * CHAR_BIT
;
201 field
->event_field
.type
.u
.basic
.integer
.alignment
= ltt_alignof(unsigned long) * CHAR_BIT
;
202 field
->event_field
.type
.u
.basic
.integer
.signedness
= is_signed_type(unsigned long);
203 field
->event_field
.type
.u
.basic
.integer
.reverse_byte_order
= 0;
204 field
->event_field
.type
.u
.basic
.integer
.base
= 10;
205 field
->event_field
.type
.u
.basic
.integer
.encoding
= lttng_encode_none
;
206 field
->get_size
= perf_counter_get_size
;
207 field
->record
= perf_counter_record
;
208 field
->u
.perf_counter
.e
= events
;
209 field
->u
.perf_counter
.attr
= attr
;
210 field
->u
.perf_counter
.hp_enable
= 1;
212 wrapper_vmalloc_sync_all();
216 for_each_online_cpu(cpu
) {
217 if (events
[cpu
] && !IS_ERR(events
[cpu
]))
218 perf_event_release_kernel(events
[cpu
]);
221 #ifdef CONFIG_HOTPLUG_CPU
222 unregister_cpu_notifier(&field
->u
.perf_counter
.nb
);
225 lttng_remove_context_field(ctx
, field
);
226 append_context_error
:
235 MODULE_LICENSE("GPL and additional rights");
236 MODULE_AUTHOR("Mathieu Desnoyers");
237 MODULE_DESCRIPTION("Linux Trace Toolkit Perf Support");