1 /* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
5 * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 #include "counter-internal.h"
11 #include <urcu/system.h>
12 #include <urcu/compiler.h>
15 #include "common/macros.h"
16 #include "common/align.h"
17 #include "common/bitmap.h"
19 #include "common/smp.h"
20 #include "common/populate.h"
23 static size_t lttng_counter_get_dimension_nr_elements(struct lib_counter_dimension
*dimension
)
25 return dimension
->max_nr_elem
;
28 static int lttng_counter_init_stride(
29 const struct lib_counter_config
*config
__attribute__((unused
)),
30 struct lib_counter
*counter
)
32 size_t nr_dimensions
= counter
->nr_dimensions
;
36 for (i
= nr_dimensions
- 1; i
>= 0; i
--) {
37 struct lib_counter_dimension
*dimension
= &counter
->dimensions
[i
];
40 nr_elem
= lttng_counter_get_dimension_nr_elements(dimension
);
41 dimension
->stride
= stride
;
42 /* nr_elem should be minimum 1 for each dimension. */
46 if (stride
> SIZE_MAX
/ nr_elem
)
52 static int lttng_counter_layout_init(struct lib_counter
*counter
, int cpu
, int shm_fd
)
54 struct lib_counter_layout
*layout
;
56 size_t nr_elem
= counter
->allocated_elem
;
57 size_t shm_length
= 0, counters_offset
, overflow_offset
, underflow_offset
;
58 struct lttng_counter_shm_object
*shm_object
;
61 return 0; /* Skip, will be populated later. */
64 layout
= &counter
->global_counters
;
66 layout
= &counter
->percpu_counters
[cpu
];
67 switch (counter
->config
.counter_size
) {
68 case COUNTER_SIZE_8_BIT
:
69 case COUNTER_SIZE_16_BIT
:
70 case COUNTER_SIZE_32_BIT
:
71 case COUNTER_SIZE_64_BIT
:
72 counter_size
= (size_t) counter
->config
.counter_size
;
77 layout
->shm_fd
= shm_fd
;
78 counters_offset
= shm_length
;
79 shm_length
+= counter_size
* nr_elem
;
80 overflow_offset
= shm_length
;
81 shm_length
+= LTTNG_UST_ALIGN(nr_elem
, 8) / 8;
82 underflow_offset
= shm_length
;
83 shm_length
+= LTTNG_UST_ALIGN(nr_elem
, 8) / 8;
84 layout
->shm_len
= shm_length
;
85 if (counter
->is_daemon
) {
86 /* Allocate and clear shared memory. */
87 shm_object
= lttng_counter_shm_object_table_alloc(counter
->object_table
,
88 shm_length
, LTTNG_COUNTER_SHM_OBJECT_SHM
, shm_fd
, cpu
,
89 lttng_ust_map_populate_cpu_is_enabled(cpu
));
93 /* Map pre-existing shared memory. */
94 shm_object
= lttng_counter_shm_object_table_append_shm(counter
->object_table
,
95 shm_fd
, shm_length
, lttng_ust_map_populate_cpu_is_enabled(cpu
));
99 layout
->counters
= shm_object
->memory_map
+ counters_offset
;
100 layout
->overflow_bitmap
= (unsigned long *)(shm_object
->memory_map
+ overflow_offset
);
101 layout
->underflow_bitmap
= (unsigned long *)(shm_object
->memory_map
+ underflow_offset
);
105 int lttng_counter_set_global_shm(struct lib_counter
*counter
, int fd
)
107 struct lib_counter_config
*config
= &counter
->config
;
108 struct lib_counter_layout
*layout
;
110 if (!(config
->alloc
& COUNTER_ALLOC_GLOBAL
))
112 layout
= &counter
->global_counters
;
113 if (layout
->shm_fd
>= 0)
115 return lttng_counter_layout_init(counter
, -1, fd
);
118 int lttng_counter_set_cpu_shm(struct lib_counter
*counter
, int cpu
, int fd
)
120 struct lib_counter_config
*config
= &counter
->config
;
121 struct lib_counter_layout
*layout
;
123 if (cpu
< 0 || cpu
>= get_possible_cpus_array_len())
126 if (!(config
->alloc
& COUNTER_ALLOC_PER_CPU
))
128 layout
= &counter
->percpu_counters
[cpu
];
129 if (layout
->shm_fd
>= 0)
131 return lttng_counter_layout_init(counter
, cpu
, fd
);
135 int lttng_counter_set_global_sum_step(struct lib_counter
*counter
,
136 int64_t global_sum_step
)
138 if (global_sum_step
< 0)
141 switch (counter
->config
.counter_size
) {
142 case COUNTER_SIZE_8_BIT
:
143 if (global_sum_step
> INT8_MAX
)
145 counter
->global_sum_step
.s8
= (int8_t) global_sum_step
;
147 case COUNTER_SIZE_16_BIT
:
148 if (global_sum_step
> INT16_MAX
)
150 counter
->global_sum_step
.s16
= (int16_t) global_sum_step
;
152 case COUNTER_SIZE_32_BIT
:
153 if (global_sum_step
> INT32_MAX
)
155 counter
->global_sum_step
.s32
= (int32_t) global_sum_step
;
157 case COUNTER_SIZE_64_BIT
:
158 counter
->global_sum_step
.s64
= global_sum_step
;
168 int validate_args(const struct lib_counter_config
*config
,
169 size_t nr_dimensions
__attribute__((unused
)),
170 const size_t *max_nr_elem
,
171 int64_t global_sum_step
,
172 int global_counter_fd
,
173 int nr_counter_cpu_fds
,
174 const int *counter_cpu_fds
)
176 int nr_cpus
= get_possible_cpus_array_len();
178 if (CAA_BITS_PER_LONG
!= 64 && config
->counter_size
== COUNTER_SIZE_64_BIT
) {
185 * global sum step is only useful with allocating both per-cpu
186 * and global counters.
188 if (global_sum_step
&& (!(config
->alloc
& COUNTER_ALLOC_GLOBAL
) ||
189 !(config
->alloc
& COUNTER_ALLOC_PER_CPU
)))
191 if (!(config
->alloc
& COUNTER_ALLOC_GLOBAL
) && global_counter_fd
>= 0)
193 if (!(config
->alloc
& COUNTER_ALLOC_PER_CPU
) && counter_cpu_fds
)
195 if (!(config
->alloc
& COUNTER_ALLOC_PER_CPU
) && nr_counter_cpu_fds
>= 0)
197 if (counter_cpu_fds
&& nr_cpus
!= nr_counter_cpu_fds
)
202 struct lib_counter
*lttng_counter_create(const struct lib_counter_config
*config
,
203 size_t nr_dimensions
,
204 const size_t *max_nr_elem
,
205 int64_t global_sum_step
,
206 int global_counter_fd
,
207 int nr_counter_cpu_fds
,
208 const int *counter_cpu_fds
,
211 struct lib_counter
*counter
;
212 size_t dimension
, nr_elem
= 1;
215 int nr_cpus
= get_possible_cpus_array_len();
216 bool populate
= lttng_ust_map_populate_is_enabled();
218 if (validate_args(config
, nr_dimensions
, max_nr_elem
,
219 global_sum_step
, global_counter_fd
, nr_counter_cpu_fds
,
222 counter
= zmalloc_populate(sizeof(struct lib_counter
), populate
);
225 counter
->global_counters
.shm_fd
= -1;
226 counter
->config
= *config
;
227 counter
->is_daemon
= is_daemon
;
228 if (lttng_counter_set_global_sum_step(counter
, global_sum_step
))
230 counter
->nr_dimensions
= nr_dimensions
;
231 counter
->dimensions
= zmalloc_populate(nr_dimensions
* sizeof(*counter
->dimensions
), populate
);
232 if (!counter
->dimensions
)
233 goto error_dimensions
;
234 for (dimension
= 0; dimension
< nr_dimensions
; dimension
++)
235 counter
->dimensions
[dimension
].max_nr_elem
= max_nr_elem
[dimension
];
236 if (config
->alloc
& COUNTER_ALLOC_PER_CPU
) {
237 counter
->percpu_counters
= zmalloc_populate(sizeof(struct lib_counter_layout
) * nr_cpus
, populate
);
238 if (!counter
->percpu_counters
)
239 goto error_alloc_percpu
;
240 for_each_possible_cpu(cpu
)
241 counter
->percpu_counters
[cpu
].shm_fd
= -1;
244 if (lttng_counter_init_stride(config
, counter
))
245 goto error_init_stride
;
246 //TODO saturation values.
247 for (dimension
= 0; dimension
< counter
->nr_dimensions
; dimension
++)
248 nr_elem
*= lttng_counter_get_dimension_nr_elements(&counter
->dimensions
[dimension
]);
249 counter
->allocated_elem
= nr_elem
;
251 if (config
->alloc
& COUNTER_ALLOC_GLOBAL
)
253 if (config
->alloc
& COUNTER_ALLOC_PER_CPU
)
254 nr_handles
+= nr_cpus
;
255 /* Allocate table for global and per-cpu counters. */
256 counter
->object_table
= lttng_counter_shm_object_table_create(nr_handles
, populate
);
257 if (!counter
->object_table
)
258 goto error_alloc_object_table
;
260 if (config
->alloc
& COUNTER_ALLOC_GLOBAL
) {
261 ret
= lttng_counter_layout_init(counter
, -1, global_counter_fd
); /* global */
263 goto layout_init_error
;
265 if ((config
->alloc
& COUNTER_ALLOC_PER_CPU
) && counter_cpu_fds
) {
266 for_each_possible_cpu(cpu
) {
267 ret
= lttng_counter_layout_init(counter
, cpu
, counter_cpu_fds
[cpu
]);
269 goto layout_init_error
;
275 lttng_counter_shm_object_table_destroy(counter
->object_table
, is_daemon
);
276 error_alloc_object_table
:
278 free(counter
->percpu_counters
);
280 free(counter
->dimensions
);
287 void lttng_counter_destroy(struct lib_counter
*counter
)
289 struct lib_counter_config
*config
= &counter
->config
;
291 if (config
->alloc
& COUNTER_ALLOC_PER_CPU
)
292 free(counter
->percpu_counters
);
293 lttng_counter_shm_object_table_destroy(counter
->object_table
, counter
->is_daemon
);
294 free(counter
->dimensions
);
298 int lttng_counter_get_global_shm(struct lib_counter
*counter
, int *fd
, size_t *len
)
302 shm_fd
= counter
->global_counters
.shm_fd
;
306 *len
= counter
->global_counters
.shm_len
;
310 int lttng_counter_get_cpu_shm(struct lib_counter
*counter
, int cpu
, int *fd
, size_t *len
)
312 struct lib_counter_layout
*layout
;
315 if (cpu
>= get_possible_cpus_array_len())
317 layout
= &counter
->percpu_counters
[cpu
];
318 shm_fd
= layout
->shm_fd
;
322 *len
= layout
->shm_len
;
326 int lttng_counter_read(const struct lib_counter_config
*config
,
327 struct lib_counter
*counter
,
328 const size_t *dimension_indexes
,
329 int cpu
, int64_t *value
, bool *overflow
,
333 struct lib_counter_layout
*layout
;
335 if (caa_unlikely(lttng_counter_validate_indexes(config
, counter
, dimension_indexes
)))
337 index
= lttng_counter_get_index(config
, counter
, dimension_indexes
);
339 switch (config
->alloc
) {
340 case COUNTER_ALLOC_PER_CPU
:
341 if (cpu
< 0 || cpu
>= get_possible_cpus_array_len())
343 layout
= &counter
->percpu_counters
[cpu
];
345 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
347 if (cpu
>= get_possible_cpus_array_len())
349 layout
= &counter
->percpu_counters
[cpu
];
351 layout
= &counter
->global_counters
;
354 case COUNTER_ALLOC_GLOBAL
:
357 layout
= &counter
->global_counters
;
362 if (caa_unlikely(!layout
->counters
))
365 switch (config
->counter_size
) {
366 case COUNTER_SIZE_8_BIT
:
368 int8_t *int_p
= (int8_t *) layout
->counters
+ index
;
369 *value
= (int64_t) CMM_LOAD_SHARED(*int_p
);
372 case COUNTER_SIZE_16_BIT
:
374 int16_t *int_p
= (int16_t *) layout
->counters
+ index
;
375 *value
= (int64_t) CMM_LOAD_SHARED(*int_p
);
378 case COUNTER_SIZE_32_BIT
:
380 int32_t *int_p
= (int32_t *) layout
->counters
+ index
;
381 *value
= (int64_t) CMM_LOAD_SHARED(*int_p
);
384 #if CAA_BITS_PER_LONG == 64
385 case COUNTER_SIZE_64_BIT
:
387 int64_t *int_p
= (int64_t *) layout
->counters
+ index
;
388 *value
= CMM_LOAD_SHARED(*int_p
);
395 *overflow
= lttng_bitmap_test_bit(index
, layout
->overflow_bitmap
);
396 *underflow
= lttng_bitmap_test_bit(index
, layout
->underflow_bitmap
);
400 int lttng_counter_aggregate(const struct lib_counter_config
*config
,
401 struct lib_counter
*counter
,
402 const size_t *dimension_indexes
,
403 int64_t *value
, bool *overflow
,
413 switch (config
->alloc
) {
414 case COUNTER_ALLOC_GLOBAL
: /* Fallthrough */
415 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
416 /* Read global counter. */
417 ret
= lttng_counter_read(config
, counter
, dimension_indexes
,
425 case COUNTER_ALLOC_PER_CPU
:
431 switch (config
->alloc
) {
432 case COUNTER_ALLOC_GLOBAL
:
434 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
: /* Fallthrough */
435 case COUNTER_ALLOC_PER_CPU
:
436 for_each_possible_cpu(cpu
) {
439 ret
= lttng_counter_read(config
, counter
, dimension_indexes
,
445 /* Overflow is defined on unsigned types. */
446 sum
= (int64_t) ((uint64_t) old
+ (uint64_t) v
);
447 if (v
> 0 && sum
< old
)
449 else if (v
< 0 && sum
> old
)
461 int lttng_counter_clear_cpu(const struct lib_counter_config
*config
,
462 struct lib_counter
*counter
,
463 const size_t *dimension_indexes
,
467 struct lib_counter_layout
*layout
;
469 if (caa_unlikely(lttng_counter_validate_indexes(config
, counter
, dimension_indexes
)))
471 index
= lttng_counter_get_index(config
, counter
, dimension_indexes
);
473 switch (config
->alloc
) {
474 case COUNTER_ALLOC_PER_CPU
:
475 if (cpu
< 0 || cpu
>= get_possible_cpus_array_len())
477 layout
= &counter
->percpu_counters
[cpu
];
479 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
481 if (cpu
>= get_possible_cpus_array_len())
483 layout
= &counter
->percpu_counters
[cpu
];
485 layout
= &counter
->global_counters
;
488 case COUNTER_ALLOC_GLOBAL
:
491 layout
= &counter
->global_counters
;
496 if (caa_unlikely(!layout
->counters
))
499 switch (config
->counter_size
) {
500 case COUNTER_SIZE_8_BIT
:
502 int8_t *int_p
= (int8_t *) layout
->counters
+ index
;
503 CMM_STORE_SHARED(*int_p
, 0);
506 case COUNTER_SIZE_16_BIT
:
508 int16_t *int_p
= (int16_t *) layout
->counters
+ index
;
509 CMM_STORE_SHARED(*int_p
, 0);
512 case COUNTER_SIZE_32_BIT
:
514 int32_t *int_p
= (int32_t *) layout
->counters
+ index
;
515 CMM_STORE_SHARED(*int_p
, 0);
518 #if CAA_BITS_PER_LONG == 64
519 case COUNTER_SIZE_64_BIT
:
521 int64_t *int_p
= (int64_t *) layout
->counters
+ index
;
522 CMM_STORE_SHARED(*int_p
, 0);
529 lttng_bitmap_clear_bit(index
, layout
->overflow_bitmap
);
530 lttng_bitmap_clear_bit(index
, layout
->underflow_bitmap
);
534 int lttng_counter_clear(const struct lib_counter_config
*config
,
535 struct lib_counter
*counter
,
536 const size_t *dimension_indexes
)
540 switch (config
->alloc
) {
541 case COUNTER_ALLOC_PER_CPU
:
543 case COUNTER_ALLOC_GLOBAL
: /* Fallthrough */
544 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
545 /* Clear global counter. */
546 ret
= lttng_counter_clear_cpu(config
, counter
, dimension_indexes
, -1);
554 switch (config
->alloc
) {
555 case COUNTER_ALLOC_PER_CPU
: /* Fallthrough */
556 case COUNTER_ALLOC_PER_CPU
| COUNTER_ALLOC_GLOBAL
:
557 for_each_possible_cpu(cpu
) {
558 ret
= lttng_counter_clear_cpu(config
, counter
, dimension_indexes
, cpu
);
563 case COUNTER_ALLOC_GLOBAL
: