Fix: pass private data to context callbacks
[lttng-ust.git] / libcounter / counter.c
1 /* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
2 *
3 * counter.c
4 *
5 * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 */
7
8 #include <errno.h>
9 #include "counter.h"
10 #include "counter-internal.h"
11 #include <urcu/system.h>
12 #include <urcu/compiler.h>
13 #include <stdbool.h>
14 #include <ust-helper.h>
15 #include "smp.h"
16 #include "shm.h"
17 #include "ust-compat.h"
18
19 #include "ust-bitmap.h"
20
21 static size_t lttng_counter_get_dimension_nr_elements(struct lib_counter_dimension *dimension)
22 {
23 return dimension->max_nr_elem;
24 }
25
26 static int lttng_counter_init_stride(
27 const struct lib_counter_config *config __attribute__((unused)),
28 struct lib_counter *counter)
29 {
30 size_t nr_dimensions = counter->nr_dimensions;
31 size_t stride = 1;
32 ssize_t i;
33
34 for (i = nr_dimensions - 1; i >= 0; i--) {
35 struct lib_counter_dimension *dimension = &counter->dimensions[i];
36 size_t nr_elem;
37
38 nr_elem = lttng_counter_get_dimension_nr_elements(dimension);
39 dimension->stride = stride;
40 /* nr_elem should be minimum 1 for each dimension. */
41 if (!nr_elem)
42 return -EINVAL;
43 stride *= nr_elem;
44 if (stride > SIZE_MAX / nr_elem)
45 return -EINVAL;
46 }
47 return 0;
48 }
49
50 static int lttng_counter_layout_init(struct lib_counter *counter, int cpu, int shm_fd)
51 {
52 struct lib_counter_layout *layout;
53 size_t counter_size;
54 size_t nr_elem = counter->allocated_elem;
55 size_t shm_length = 0, counters_offset, overflow_offset, underflow_offset;
56 struct lttng_counter_shm_object *shm_object;
57
58 if (shm_fd < 0)
59 return 0; /* Skip, will be populated later. */
60
61 if (cpu == -1)
62 layout = &counter->global_counters;
63 else
64 layout = &counter->percpu_counters[cpu];
65 switch (counter->config.counter_size) {
66 case COUNTER_SIZE_8_BIT:
67 case COUNTER_SIZE_16_BIT:
68 case COUNTER_SIZE_32_BIT:
69 case COUNTER_SIZE_64_BIT:
70 counter_size = (size_t) counter->config.counter_size;
71 break;
72 default:
73 return -EINVAL;
74 }
75 layout->shm_fd = shm_fd;
76 counters_offset = shm_length;
77 shm_length += counter_size * nr_elem;
78 overflow_offset = shm_length;
79 shm_length += LTTNG_UST_ALIGN(nr_elem, 8) / 8;
80 underflow_offset = shm_length;
81 shm_length += LTTNG_UST_ALIGN(nr_elem, 8) / 8;
82 layout->shm_len = shm_length;
83 if (counter->is_daemon) {
84 /* Allocate and clear shared memory. */
85 shm_object = lttng_counter_shm_object_table_alloc(counter->object_table,
86 shm_length, LTTNG_COUNTER_SHM_OBJECT_SHM, shm_fd, cpu);
87 if (!shm_object)
88 return -ENOMEM;
89 } else {
90 /* Map pre-existing shared memory. */
91 shm_object = lttng_counter_shm_object_table_append_shm(counter->object_table,
92 shm_fd, shm_length);
93 if (!shm_object)
94 return -ENOMEM;
95 }
96 layout->counters = shm_object->memory_map + counters_offset;
97 layout->overflow_bitmap = (unsigned long *)(shm_object->memory_map + overflow_offset);
98 layout->underflow_bitmap = (unsigned long *)(shm_object->memory_map + underflow_offset);
99 return 0;
100 }
101
102 int lttng_counter_set_global_shm(struct lib_counter *counter, int fd)
103 {
104 struct lib_counter_config *config = &counter->config;
105 struct lib_counter_layout *layout;
106
107 if (!(config->alloc & COUNTER_ALLOC_GLOBAL))
108 return -EINVAL;
109 layout = &counter->global_counters;
110 if (layout->shm_fd >= 0)
111 return -EBUSY;
112 return lttng_counter_layout_init(counter, -1, fd);
113 }
114
115 int lttng_counter_set_cpu_shm(struct lib_counter *counter, int cpu, int fd)
116 {
117 struct lib_counter_config *config = &counter->config;
118 struct lib_counter_layout *layout;
119
120 if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus())
121 return -EINVAL;
122
123 if (!(config->alloc & COUNTER_ALLOC_PER_CPU))
124 return -EINVAL;
125 layout = &counter->percpu_counters[cpu];
126 if (layout->shm_fd >= 0)
127 return -EBUSY;
128 return lttng_counter_layout_init(counter, cpu, fd);
129 }
130
131 static
132 int lttng_counter_set_global_sum_step(struct lib_counter *counter,
133 int64_t global_sum_step)
134 {
135 if (global_sum_step < 0)
136 return -EINVAL;
137
138 switch (counter->config.counter_size) {
139 case COUNTER_SIZE_8_BIT:
140 if (global_sum_step > INT8_MAX)
141 return -EINVAL;
142 counter->global_sum_step.s8 = (int8_t) global_sum_step;
143 break;
144 case COUNTER_SIZE_16_BIT:
145 if (global_sum_step > INT16_MAX)
146 return -EINVAL;
147 counter->global_sum_step.s16 = (int16_t) global_sum_step;
148 break;
149 case COUNTER_SIZE_32_BIT:
150 if (global_sum_step > INT32_MAX)
151 return -EINVAL;
152 counter->global_sum_step.s32 = (int32_t) global_sum_step;
153 break;
154 case COUNTER_SIZE_64_BIT:
155 counter->global_sum_step.s64 = global_sum_step;
156 break;
157 default:
158 return -EINVAL;
159 }
160
161 return 0;
162 }
163
164 static
165 int validate_args(const struct lib_counter_config *config,
166 size_t nr_dimensions __attribute__((unused)),
167 const size_t *max_nr_elem,
168 int64_t global_sum_step,
169 int global_counter_fd,
170 int nr_counter_cpu_fds,
171 const int *counter_cpu_fds)
172 {
173 int nr_cpus = lttng_counter_num_possible_cpus();
174
175 if (CAA_BITS_PER_LONG != 64 && config->counter_size == COUNTER_SIZE_64_BIT) {
176 WARN_ON_ONCE(1);
177 return -1;
178 }
179 if (!max_nr_elem)
180 return -1;
181 /*
182 * global sum step is only useful with allocating both per-cpu
183 * and global counters.
184 */
185 if (global_sum_step && (!(config->alloc & COUNTER_ALLOC_GLOBAL) ||
186 !(config->alloc & COUNTER_ALLOC_PER_CPU)))
187 return -1;
188 if (!(config->alloc & COUNTER_ALLOC_GLOBAL) && global_counter_fd >= 0)
189 return -1;
190 if (!(config->alloc & COUNTER_ALLOC_PER_CPU) && counter_cpu_fds)
191 return -1;
192 if (!(config->alloc & COUNTER_ALLOC_PER_CPU) && nr_counter_cpu_fds >= 0)
193 return -1;
194 if (counter_cpu_fds && nr_cpus != nr_counter_cpu_fds)
195 return -1;
196 return 0;
197 }
198
199 struct lib_counter *lttng_counter_create(const struct lib_counter_config *config,
200 size_t nr_dimensions,
201 const size_t *max_nr_elem,
202 int64_t global_sum_step,
203 int global_counter_fd,
204 int nr_counter_cpu_fds,
205 const int *counter_cpu_fds,
206 bool is_daemon)
207 {
208 struct lib_counter *counter;
209 size_t dimension, nr_elem = 1;
210 int cpu, ret;
211 int nr_handles = 0;
212 int nr_cpus = lttng_counter_num_possible_cpus();
213
214 if (validate_args(config, nr_dimensions, max_nr_elem,
215 global_sum_step, global_counter_fd, nr_counter_cpu_fds,
216 counter_cpu_fds))
217 return NULL;
218 counter = zmalloc(sizeof(struct lib_counter));
219 if (!counter)
220 return NULL;
221 counter->global_counters.shm_fd = -1;
222 counter->config = *config;
223 counter->is_daemon = is_daemon;
224 if (lttng_counter_set_global_sum_step(counter, global_sum_step))
225 goto error_sum_step;
226 counter->nr_dimensions = nr_dimensions;
227 counter->dimensions = zmalloc(nr_dimensions * sizeof(*counter->dimensions));
228 if (!counter->dimensions)
229 goto error_dimensions;
230 for (dimension = 0; dimension < nr_dimensions; dimension++)
231 counter->dimensions[dimension].max_nr_elem = max_nr_elem[dimension];
232 if (config->alloc & COUNTER_ALLOC_PER_CPU) {
233 counter->percpu_counters = zmalloc(sizeof(struct lib_counter_layout) * nr_cpus);
234 if (!counter->percpu_counters)
235 goto error_alloc_percpu;
236 lttng_counter_for_each_possible_cpu(cpu)
237 counter->percpu_counters[cpu].shm_fd = -1;
238 }
239
240 if (lttng_counter_init_stride(config, counter))
241 goto error_init_stride;
242 //TODO saturation values.
243 for (dimension = 0; dimension < counter->nr_dimensions; dimension++)
244 nr_elem *= lttng_counter_get_dimension_nr_elements(&counter->dimensions[dimension]);
245 counter->allocated_elem = nr_elem;
246
247 if (config->alloc & COUNTER_ALLOC_GLOBAL)
248 nr_handles++;
249 if (config->alloc & COUNTER_ALLOC_PER_CPU)
250 nr_handles += nr_cpus;
251 /* Allocate table for global and per-cpu counters. */
252 counter->object_table = lttng_counter_shm_object_table_create(nr_handles);
253 if (!counter->object_table)
254 goto error_alloc_object_table;
255
256 if (config->alloc & COUNTER_ALLOC_GLOBAL) {
257 ret = lttng_counter_layout_init(counter, -1, global_counter_fd); /* global */
258 if (ret)
259 goto layout_init_error;
260 }
261 if ((config->alloc & COUNTER_ALLOC_PER_CPU) && counter_cpu_fds) {
262 lttng_counter_for_each_possible_cpu(cpu) {
263 ret = lttng_counter_layout_init(counter, cpu, counter_cpu_fds[cpu]);
264 if (ret)
265 goto layout_init_error;
266 }
267 }
268 return counter;
269
270 layout_init_error:
271 lttng_counter_shm_object_table_destroy(counter->object_table, is_daemon);
272 error_alloc_object_table:
273 error_init_stride:
274 free(counter->percpu_counters);
275 error_alloc_percpu:
276 free(counter->dimensions);
277 error_dimensions:
278 error_sum_step:
279 free(counter);
280 return NULL;
281 }
282
283 void lttng_counter_destroy(struct lib_counter *counter)
284 {
285 struct lib_counter_config *config = &counter->config;
286
287 if (config->alloc & COUNTER_ALLOC_PER_CPU)
288 free(counter->percpu_counters);
289 lttng_counter_shm_object_table_destroy(counter->object_table, counter->is_daemon);
290 free(counter->dimensions);
291 free(counter);
292 }
293
294 int lttng_counter_get_global_shm(struct lib_counter *counter, int *fd, size_t *len)
295 {
296 int shm_fd;
297
298 shm_fd = counter->global_counters.shm_fd;
299 if (shm_fd < 0)
300 return -1;
301 *fd = shm_fd;
302 *len = counter->global_counters.shm_len;
303 return 0;
304 }
305
306 int lttng_counter_get_cpu_shm(struct lib_counter *counter, int cpu, int *fd, size_t *len)
307 {
308 struct lib_counter_layout *layout;
309 int shm_fd;
310
311 if (cpu >= lttng_counter_num_possible_cpus())
312 return -1;
313 layout = &counter->percpu_counters[cpu];
314 shm_fd = layout->shm_fd;
315 if (shm_fd < 0)
316 return -1;
317 *fd = shm_fd;
318 *len = layout->shm_len;
319 return 0;
320 }
321
322 int lttng_counter_read(const struct lib_counter_config *config,
323 struct lib_counter *counter,
324 const size_t *dimension_indexes,
325 int cpu, int64_t *value, bool *overflow,
326 bool *underflow)
327 {
328 size_t index;
329 struct lib_counter_layout *layout;
330
331 if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
332 return -EOVERFLOW;
333 index = lttng_counter_get_index(config, counter, dimension_indexes);
334
335 switch (config->alloc) {
336 case COUNTER_ALLOC_PER_CPU:
337 if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus())
338 return -EINVAL;
339 layout = &counter->percpu_counters[cpu];
340 break;
341 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
342 if (cpu >= 0) {
343 if (cpu >= lttng_counter_num_possible_cpus())
344 return -EINVAL;
345 layout = &counter->percpu_counters[cpu];
346 } else {
347 layout = &counter->global_counters;
348 }
349 break;
350 case COUNTER_ALLOC_GLOBAL:
351 if (cpu >= 0)
352 return -EINVAL;
353 layout = &counter->global_counters;
354 break;
355 default:
356 return -EINVAL;
357 }
358 if (caa_unlikely(!layout->counters))
359 return -ENODEV;
360
361 switch (config->counter_size) {
362 case COUNTER_SIZE_8_BIT:
363 {
364 int8_t *int_p = (int8_t *) layout->counters + index;
365 *value = (int64_t) CMM_LOAD_SHARED(*int_p);
366 break;
367 }
368 case COUNTER_SIZE_16_BIT:
369 {
370 int16_t *int_p = (int16_t *) layout->counters + index;
371 *value = (int64_t) CMM_LOAD_SHARED(*int_p);
372 break;
373 }
374 case COUNTER_SIZE_32_BIT:
375 {
376 int32_t *int_p = (int32_t *) layout->counters + index;
377 *value = (int64_t) CMM_LOAD_SHARED(*int_p);
378 break;
379 }
380 #if CAA_BITS_PER_LONG == 64
381 case COUNTER_SIZE_64_BIT:
382 {
383 int64_t *int_p = (int64_t *) layout->counters + index;
384 *value = CMM_LOAD_SHARED(*int_p);
385 break;
386 }
387 #endif
388 default:
389 return -EINVAL;
390 }
391 *overflow = lttng_bitmap_test_bit(index, layout->overflow_bitmap);
392 *underflow = lttng_bitmap_test_bit(index, layout->underflow_bitmap);
393 return 0;
394 }
395
396 int lttng_counter_aggregate(const struct lib_counter_config *config,
397 struct lib_counter *counter,
398 const size_t *dimension_indexes,
399 int64_t *value, bool *overflow,
400 bool *underflow)
401 {
402 int cpu, ret;
403 int64_t v, sum = 0;
404 bool of, uf;
405
406 *overflow = false;
407 *underflow = false;
408
409 switch (config->alloc) {
410 case COUNTER_ALLOC_GLOBAL: /* Fallthrough */
411 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
412 /* Read global counter. */
413 ret = lttng_counter_read(config, counter, dimension_indexes,
414 -1, &v, &of, &uf);
415 if (ret < 0)
416 return ret;
417 sum += v;
418 *overflow |= of;
419 *underflow |= uf;
420 break;
421 case COUNTER_ALLOC_PER_CPU:
422 break;
423 default:
424 return -EINVAL;
425 }
426
427 switch (config->alloc) {
428 case COUNTER_ALLOC_GLOBAL:
429 break;
430 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: /* Fallthrough */
431 case COUNTER_ALLOC_PER_CPU:
432 lttng_counter_for_each_possible_cpu(cpu) {
433 int64_t old = sum;
434
435 ret = lttng_counter_read(config, counter, dimension_indexes,
436 cpu, &v, &of, &uf);
437 if (ret < 0)
438 return ret;
439 *overflow |= of;
440 *underflow |= uf;
441 /* Overflow is defined on unsigned types. */
442 sum = (int64_t) ((uint64_t) old + (uint64_t) v);
443 if (v > 0 && sum < old)
444 *overflow = true;
445 else if (v < 0 && sum > old)
446 *underflow = true;
447 }
448 break;
449 default:
450 return -EINVAL;
451 }
452 *value = sum;
453 return 0;
454 }
455
456 static
457 int lttng_counter_clear_cpu(const struct lib_counter_config *config,
458 struct lib_counter *counter,
459 const size_t *dimension_indexes,
460 int cpu)
461 {
462 size_t index;
463 struct lib_counter_layout *layout;
464
465 if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
466 return -EOVERFLOW;
467 index = lttng_counter_get_index(config, counter, dimension_indexes);
468
469 switch (config->alloc) {
470 case COUNTER_ALLOC_PER_CPU:
471 if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus())
472 return -EINVAL;
473 layout = &counter->percpu_counters[cpu];
474 break;
475 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
476 if (cpu >= 0) {
477 if (cpu >= lttng_counter_num_possible_cpus())
478 return -EINVAL;
479 layout = &counter->percpu_counters[cpu];
480 } else {
481 layout = &counter->global_counters;
482 }
483 break;
484 case COUNTER_ALLOC_GLOBAL:
485 if (cpu >= 0)
486 return -EINVAL;
487 layout = &counter->global_counters;
488 break;
489 default:
490 return -EINVAL;
491 }
492 if (caa_unlikely(!layout->counters))
493 return -ENODEV;
494
495 switch (config->counter_size) {
496 case COUNTER_SIZE_8_BIT:
497 {
498 int8_t *int_p = (int8_t *) layout->counters + index;
499 CMM_STORE_SHARED(*int_p, 0);
500 break;
501 }
502 case COUNTER_SIZE_16_BIT:
503 {
504 int16_t *int_p = (int16_t *) layout->counters + index;
505 CMM_STORE_SHARED(*int_p, 0);
506 break;
507 }
508 case COUNTER_SIZE_32_BIT:
509 {
510 int32_t *int_p = (int32_t *) layout->counters + index;
511 CMM_STORE_SHARED(*int_p, 0);
512 break;
513 }
514 #if CAA_BITS_PER_LONG == 64
515 case COUNTER_SIZE_64_BIT:
516 {
517 int64_t *int_p = (int64_t *) layout->counters + index;
518 CMM_STORE_SHARED(*int_p, 0);
519 break;
520 }
521 #endif
522 default:
523 return -EINVAL;
524 }
525 lttng_bitmap_clear_bit(index, layout->overflow_bitmap);
526 lttng_bitmap_clear_bit(index, layout->underflow_bitmap);
527 return 0;
528 }
529
530 int lttng_counter_clear(const struct lib_counter_config *config,
531 struct lib_counter *counter,
532 const size_t *dimension_indexes)
533 {
534 int cpu, ret;
535
536 switch (config->alloc) {
537 case COUNTER_ALLOC_PER_CPU:
538 break;
539 case COUNTER_ALLOC_GLOBAL: /* Fallthrough */
540 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
541 /* Clear global counter. */
542 ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, -1);
543 if (ret < 0)
544 return ret;
545 break;
546 default:
547 return -EINVAL;
548 }
549
550 switch (config->alloc) {
551 case COUNTER_ALLOC_PER_CPU: /* Fallthrough */
552 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
553 lttng_counter_for_each_possible_cpu(cpu) {
554 ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, cpu);
555 if (ret < 0)
556 return ret;
557 }
558 break;
559 case COUNTER_ALLOC_GLOBAL:
560 break;
561 default:
562 return -EINVAL;
563 }
564 return 0;
565 }
This page took 0.040602 seconds and 4 git commands to generate.