1 /* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
5 * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 #include "counter-internal.h"
11 #include <urcu/system.h>
12 #include <urcu/compiler.h>
14 #include <ust-helper.h>
17 #include "ust-compat.h"
19 #include "ust-bitmap.h"
21 static size_t lttng_counter_get_dimension_nr_elements(struct lib_counter_dimension
*dimension
)
23 return dimension
->max_nr_elem
;
26 static int lttng_counter_init_stride(
27 const struct lib_counter_config
*config
__attribute__((unused
)),
28 struct lib_counter
*counter
)
30 size_t nr_dimensions
= counter
->nr_dimensions
;
34 for (i
= nr_dimensions
- 1; i
>= 0; i
--) {
35 struct lib_counter_dimension
*dimension
= &counter
->dimensions
[i
];
38 nr_elem
= lttng_counter_get_dimension_nr_elements(dimension
);
39 dimension
->stride
= stride
;
40 /* nr_elem should be minimum 1 for each dimension. */
44 if (stride
> SIZE_MAX
/ nr_elem
)
50 static int lttng_counter_layout_init(struct lib_counter
*counter
, int cpu
, int shm_fd
)
52 struct lib_counter_layout
*layout
;
54 size_t nr_elem
= counter
->allocated_elem
;
55 size_t shm_length
= 0, counters_offset
, overflow_offset
, underflow_offset
;
56 struct lttng_counter_shm_object
*shm_object
;
59 return 0; /* Skip, will be populated later. */
62 layout
= &counter
->global_counters
;
64 layout
= &counter
->percpu_counters
[cpu
];
65 switch (counter
->config
.counter_size
) {
66 case COUNTER_SIZE_8_BIT
:
67 case COUNTER_SIZE_16_BIT
:
68 case COUNTER_SIZE_32_BIT
:
69 case COUNTER_SIZE_64_BIT
:
70 counter_size
= (size_t) counter
->config
.counter_size
;
75 layout
->shm_fd
= shm_fd
;
76 counters_offset
= shm_length
;
77 shm_length
+= counter_size
* nr_elem
;
78 overflow_offset
= shm_length
;
79 shm_length
+= LTTNG_UST_ALIGN(nr_elem
, 8) / 8;
80 underflow_offset
= shm_length
;
81 shm_length
+= LTTNG_UST_ALIGN(nr_elem
, 8) / 8;
82 layout
->shm_len
= shm_length
;
83 if (counter
->is_daemon
) {
84 /* Allocate and clear shared memory. */
85 shm_object
= lttng_counter_shm_object_table_alloc(counter
->object_table
,
86 shm_length
, LTTNG_COUNTER_SHM_OBJECT_SHM
, shm_fd
, cpu
);
90 /* Map pre-existing shared memory. */
91 shm_object
= lttng_counter_shm_object_table_append_shm(counter
->object_table
,
96 layout
->counters
= shm_object
->memory_map
+ counters_offset
;
97 layout
->overflow_bitmap
= (unsigned long *)(shm_object
->memory_map
+ overflow_offset
);
98 layout
->underflow_bitmap
= (unsigned long *)(shm_object
->memory_map
+ underflow_offset
);
102 int lttng_counter_set_global_shm(struct lib_counter
*counter
, int fd
)
104 struct lib_counter_config
*config
= &counter
->config
;
105 struct lib_counter_layout
*layout
;
107 if (!(config
->alloc
& COUNTER_ALLOC_GLOBAL
))
109 layout
= &counter
->global_counters
;
110 if (layout
->shm_fd
>= 0)
112 return lttng_counter_layout_init(counter
, -1, fd
);
115 int lttng_counter_set_cpu_shm(struct lib_counter
*counter
, int cpu
, int fd
)
117 struct lib_counter_config
*config
= &counter
->config
;
118 struct lib_counter_layout
*layout
;
120 if (cpu
< 0 || cpu
>= lttng_counter_num_possible_cpus())
123 if (!(config
->alloc
& COUNTER_ALLOC_PER_CPU
))
125 layout
= &counter
->percpu_counters
[cpu
];
126 if (layout
->shm_fd
>= 0)
128 return lttng_counter_layout_init(counter
, cpu
, fd
);
132 int lttng_counter_set_global_sum_step(struct lib_counter
*counter
,
133 int64_t global_sum_step
)
135 if (global_sum_step
< 0)
138 switch (counter
->config
.counter_size
) {
139 case COUNTER_SIZE_8_BIT
:
140 if (global_sum_step
> INT8_MAX
)
142 counter
->global_sum_step
.s8
= (int8_t) global_sum_step
;
144 case COUNTER_SIZE_16_BIT
:
145 if (global_sum_step
> INT16_MAX
)
147 counter
->global_sum_step
.s16
= (int16_t) global_sum_step
;
149 case COUNTER_SIZE_32_BIT
:
150 if (global_sum_step
> INT32_MAX
)
152 counter
->global_sum_step
.s32
= (int32_t) global_sum_step
;
154 case COUNTER_SIZE_64_BIT
:
155 counter
->global_sum_step
.s64
= global_sum_step
;
165 int validate_args(const struct lib_counter_config
*config
,
166 size_t nr_dimensions
__attribute__((unused
)),
167 const size_t *max_nr_elem
,
168 int64_t global_sum_step
,
169 int global_counter_fd
,
170 int nr_counter_cpu_fds
,
171 const int *counter_cpu_fds
)
173 int nr_cpus
= lttng_counter_num_possible_cpus();
175 if (CAA_BITS_PER_LONG
!= 64 && config
->counter_size
== COUNTER_SIZE_64_BIT
) {
182 * global sum step is only useful with allocating both per-cpu
183 * and global counters.
185 if (global_sum_step
&& (!(config
->alloc
& COUNTER_ALLOC_GLOBAL
) ||
186 !(config
->alloc
& COUNTER_ALLOC_PER_CPU
)))
188 if (!(config
->alloc
& COUNTER_ALLOC_GLOBAL
) && global_counter_fd
>= 0)
190 if (!(config
->alloc
& COUNTER_ALLOC_PER_CPU
) && counter_cpu_fds
)
192 if (!(config
->alloc
& COUNTER_ALLOC_PER_CPU
) && nr_counter_cpu_fds
>= 0)
194 if (counter_cpu_fds
&& nr_cpus
!= nr_counter_cpu_fds
)
199 struct lib_counter
*lttng_counter_create(const struct lib_counter_config
*config
,
200 size_t nr_dimensions
,
201 const size_t *max_nr_elem
,
202 int64_t global_sum_step
,
203 int global_counter_fd
,
204 int nr_counter_cpu_fds
,
205 const int *counter_cpu_fds
,
208 struct lib_counter
*counter
;
209 size_t dimension
, nr_elem
= 1;
212 int nr_cpus
= lttng_counter_num_possible_cpus();
214 if (validate_args(config
, nr_dimensions
, max_nr_elem
,
215 global_sum_step
, global_counter_fd
, nr_counter_cpu_fds
,
218 counter
= zmalloc(sizeof(struct lib_counter
));
221 counter
->global_counters
.shm_fd
= -1;
222 counter
->config
= *config
;
223 counter
->is_daemon
= is_daemon
;
224 if (lttng_counter_set_global_sum_step(counter
, global_sum_step
))
226 counter
->nr_dimensions
= nr_dimensions
;
227 counter
->dimensions
= zmalloc(nr_dimensions
* sizeof(*counter
->dimensions
));
228 if (!counter
->dimensions
)
229 goto error_dimensions
;
230 for (dimension
= 0; dimension
< nr_dimensions
; dimension
++)
231 counter
->dimensions
[dimension
].max_nr_elem
= max_nr_elem
[dimension
];
232 if (config
->alloc
& COUNTER_ALLOC_PER_CPU
) {
233 counter
->percpu_counters
= zmalloc(sizeof(struct lib_counter_layout
) * nr_cpus
);
234 if (!counter
->percpu_counters
)
235 goto error_alloc_percpu
;
236 lttng_counter_for_each_possible_cpu(cpu
)
237 counter
->percpu_counters
[cpu
].shm_fd
= -1;
240 if (lttng_counter_init_stride(config
, counter
))
241 goto error_init_stride
;
242 //TODO saturation values.
243 for (dimension
= 0; dimension
< counter
->nr_dimensions
; dimension
++)
244 nr_elem
*= lttng_counter_get_dimension_nr_elements(&counter
->dimensions
[dimension
]);
245 counter
->allocated_elem
= nr_elem
;
247 if (config
->alloc
& COUNTER_ALLOC_GLOBAL
)
249 if (config
->alloc
& COUNTER_ALLOC_PER_CPU
)
250 nr_handles
+= nr_cpus
;
251 /* Allocate table for global and per-cpu counters. */
252 counter
->object_table
= lttng_counter_shm_object_table_create(nr_handles
);
253 if (!counter
->object_table
)
254 goto error_alloc_object_table
;
256 if (config
->alloc
& COUNTER_ALLOC_GLOBAL
) {
257 ret
= lttng_counter_layout_init(counter
, -1, global_counter_fd
); /* global */
259 goto layout_init_error
;
261 if ((config
->alloc
& COUNTER_ALLOC_PER_CPU
) && counter_cpu_fds
) {
262 lttng_counter_for_each_possible_cpu(cpu
) {
263 ret
= lttng_counter_layout_init(counter
, cpu
, counter_cpu_fds
[cpu
]);
265 goto layout_init_error
;
271 lttng_counter_shm_object_table_destroy(counter
->object_table
, is_daemon
);
272 error_alloc_object_table
:
274 free(counter
->percpu_counters
);
276 free(counter
->dimensions
);
283 void lttng_counter_destroy(struct lib_counter
*counter
)
285 struct lib_counter_config
*config
= &counter
->config
;
287 if (config
->alloc
& COUNTER_ALLOC_PER_CPU
)
288 free(counter
->percpu_counters
);
289 lttng_counter_shm_object_table_destroy(counter
->object_table
, counter
->is_daemon
);
290 free(counter
->dimensions
);
294 int lttng_counter_get_global_shm(struct lib_counter
*counter
, int *fd
, size_t *len
)
298 shm_fd
= counter
->global_counters
.shm_fd
;
302 *len
= counter
->global_counters
.shm_len
;
306 int lttng_counter_get_cpu_shm(struct lib_counter
*counter
, int cpu
, int *fd
, size_t *len
)
308 struct lib_counter_layout
*layout
;
311 if (cpu
>= lttng_counter_num_possible_cpus())
313 layout
= &counter
->percpu_counters
[cpu
];
314 shm_fd
= layout
->shm_fd
;
318 *len
= layout
->shm_len
;
322 int lttng_counter_read(const struct lib_counter_config
*config
,
323 struct lib_counter
*counter
,
324 const size_t *dimension_indexes
,
325 int cpu
, int64_t *value
, bool *overflow
,
329 struct lib_counter_layout
*layout
;
331 if (caa_unlikely(lttng_counter_validate_indexes(config
, counter
, dimension_indexes
)))
333 index
= lttng_counter_get_index(config
, counter
, dimension_indexes
);
335 switch (config
->alloc
) {
336 case COUNTER_ALLOC_PER_CPU
:
337 if (cpu
< 0 || cpu
>= lttng_counter_num_possible_cpus())
339 layout
= &counter
->percpu_counters
[cpu
];
341 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
343 if (cpu
>= lttng_counter_num_possible_cpus())
345 layout
= &counter
->percpu_counters
[cpu
];
347 layout
= &counter
->global_counters
;
350 case COUNTER_ALLOC_GLOBAL
:
353 layout
= &counter
->global_counters
;
358 if (caa_unlikely(!layout
->counters
))
361 switch (config
->counter_size
) {
362 case COUNTER_SIZE_8_BIT
:
364 int8_t *int_p
= (int8_t *) layout
->counters
+ index
;
365 *value
= (int64_t) CMM_LOAD_SHARED(*int_p
);
368 case COUNTER_SIZE_16_BIT
:
370 int16_t *int_p
= (int16_t *) layout
->counters
+ index
;
371 *value
= (int64_t) CMM_LOAD_SHARED(*int_p
);
374 case COUNTER_SIZE_32_BIT
:
376 int32_t *int_p
= (int32_t *) layout
->counters
+ index
;
377 *value
= (int64_t) CMM_LOAD_SHARED(*int_p
);
380 #if CAA_BITS_PER_LONG == 64
381 case COUNTER_SIZE_64_BIT
:
383 int64_t *int_p
= (int64_t *) layout
->counters
+ index
;
384 *value
= CMM_LOAD_SHARED(*int_p
);
391 *overflow
= lttng_bitmap_test_bit(index
, layout
->overflow_bitmap
);
392 *underflow
= lttng_bitmap_test_bit(index
, layout
->underflow_bitmap
);
396 int lttng_counter_aggregate(const struct lib_counter_config
*config
,
397 struct lib_counter
*counter
,
398 const size_t *dimension_indexes
,
399 int64_t *value
, bool *overflow
,
409 switch (config
->alloc
) {
410 case COUNTER_ALLOC_GLOBAL
: /* Fallthrough */
411 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
412 /* Read global counter. */
413 ret
= lttng_counter_read(config
, counter
, dimension_indexes
,
421 case COUNTER_ALLOC_PER_CPU
:
427 switch (config
->alloc
) {
428 case COUNTER_ALLOC_GLOBAL
:
430 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
: /* Fallthrough */
431 case COUNTER_ALLOC_PER_CPU
:
432 lttng_counter_for_each_possible_cpu(cpu
) {
435 ret
= lttng_counter_read(config
, counter
, dimension_indexes
,
441 /* Overflow is defined on unsigned types. */
442 sum
= (int64_t) ((uint64_t) old
+ (uint64_t) v
);
443 if (v
> 0 && sum
< old
)
445 else if (v
< 0 && sum
> old
)
457 int lttng_counter_clear_cpu(const struct lib_counter_config
*config
,
458 struct lib_counter
*counter
,
459 const size_t *dimension_indexes
,
463 struct lib_counter_layout
*layout
;
465 if (caa_unlikely(lttng_counter_validate_indexes(config
, counter
, dimension_indexes
)))
467 index
= lttng_counter_get_index(config
, counter
, dimension_indexes
);
469 switch (config
->alloc
) {
470 case COUNTER_ALLOC_PER_CPU
:
471 if (cpu
< 0 || cpu
>= lttng_counter_num_possible_cpus())
473 layout
= &counter
->percpu_counters
[cpu
];
475 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
477 if (cpu
>= lttng_counter_num_possible_cpus())
479 layout
= &counter
->percpu_counters
[cpu
];
481 layout
= &counter
->global_counters
;
484 case COUNTER_ALLOC_GLOBAL
:
487 layout
= &counter
->global_counters
;
492 if (caa_unlikely(!layout
->counters
))
495 switch (config
->counter_size
) {
496 case COUNTER_SIZE_8_BIT
:
498 int8_t *int_p
= (int8_t *) layout
->counters
+ index
;
499 CMM_STORE_SHARED(*int_p
, 0);
502 case COUNTER_SIZE_16_BIT
:
504 int16_t *int_p
= (int16_t *) layout
->counters
+ index
;
505 CMM_STORE_SHARED(*int_p
, 0);
508 case COUNTER_SIZE_32_BIT
:
510 int32_t *int_p
= (int32_t *) layout
->counters
+ index
;
511 CMM_STORE_SHARED(*int_p
, 0);
514 #if CAA_BITS_PER_LONG == 64
515 case COUNTER_SIZE_64_BIT
:
517 int64_t *int_p
= (int64_t *) layout
->counters
+ index
;
518 CMM_STORE_SHARED(*int_p
, 0);
525 lttng_bitmap_clear_bit(index
, layout
->overflow_bitmap
);
526 lttng_bitmap_clear_bit(index
, layout
->underflow_bitmap
);
530 int lttng_counter_clear(const struct lib_counter_config
*config
,
531 struct lib_counter
*counter
,
532 const size_t *dimension_indexes
)
536 switch (config
->alloc
) {
537 case COUNTER_ALLOC_PER_CPU
:
539 case COUNTER_ALLOC_GLOBAL
: /* Fallthrough */
540 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
541 /* Clear global counter. */
542 ret
= lttng_counter_clear_cpu(config
, counter
, dimension_indexes
, -1);
550 switch (config
->alloc
) {
551 case COUNTER_ALLOC_PER_CPU
: /* Fallthrough */
552 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
553 lttng_counter_for_each_possible_cpu(cpu
) {
554 ret
= lttng_counter_clear_cpu(config
, counter
, dimension_indexes
, cpu
);
559 case COUNTER_ALLOC_GLOBAL
: