X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=src%2Fcommon%2Fcounter%2Fcounter.c;h=99a46af6d12549416cba4fb4575db9cf97791fe6;hb=HEAD;hp=0393bed30ab2fe81f9a44c41b695a08bd6e82c34;hpb=cdff92e0e0e875348b6ec549ef9d2282161faf80;p=lttng-ust.git diff --git a/src/common/counter/counter.c b/src/common/counter/counter.c index 0393bed3..99a46af6 100644 --- a/src/common/counter/counter.c +++ b/src/common/counter/counter.c @@ -16,7 +16,8 @@ #include "common/align.h" #include "common/bitmap.h" -#include "smp.h" +#include "common/smp.h" +#include "common/populate.h" #include "shm.h" static size_t lttng_counter_get_dimension_nr_elements(struct lib_counter_dimension *dimension) @@ -84,13 +85,14 @@ static int lttng_counter_layout_init(struct lib_counter *counter, int cpu, int s if (counter->is_daemon) { /* Allocate and clear shared memory. */ shm_object = lttng_counter_shm_object_table_alloc(counter->object_table, - shm_length, LTTNG_COUNTER_SHM_OBJECT_SHM, shm_fd, cpu); + shm_length, LTTNG_COUNTER_SHM_OBJECT_SHM, shm_fd, cpu, + lttng_ust_map_populate_cpu_is_enabled(cpu)); if (!shm_object) return -ENOMEM; } else { /* Map pre-existing shared memory. */ shm_object = lttng_counter_shm_object_table_append_shm(counter->object_table, - shm_fd, shm_length); + shm_fd, shm_length, lttng_ust_map_populate_cpu_is_enabled(cpu)); if (!shm_object) return -ENOMEM; } @@ -118,7 +120,7 @@ int lttng_counter_set_cpu_shm(struct lib_counter *counter, int cpu, int fd) struct lib_counter_config *config = &counter->config; struct lib_counter_layout *layout; - if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus()) + if (cpu < 0 || cpu >= get_possible_cpus_array_len()) return -EINVAL; if (!(config->alloc & COUNTER_ALLOC_PER_CPU)) @@ -171,7 +173,7 @@ int validate_args(const struct lib_counter_config *config, int nr_counter_cpu_fds, const int *counter_cpu_fds) { - int nr_cpus = lttng_counter_num_possible_cpus(); + int nr_cpus = get_possible_cpus_array_len(); if (CAA_BITS_PER_LONG != 64 && config->counter_size == COUNTER_SIZE_64_BIT) { WARN_ON_ONCE(1); @@ -210,13 +212,14 @@ struct lib_counter *lttng_counter_create(const struct lib_counter_config *config size_t dimension, nr_elem = 1; int cpu, ret; int nr_handles = 0; - int nr_cpus = lttng_counter_num_possible_cpus(); + int nr_cpus = get_possible_cpus_array_len(); + bool populate = lttng_ust_map_populate_is_enabled(); if (validate_args(config, nr_dimensions, max_nr_elem, global_sum_step, global_counter_fd, nr_counter_cpu_fds, counter_cpu_fds)) return NULL; - counter = zmalloc(sizeof(struct lib_counter)); + counter = zmalloc_populate(sizeof(struct lib_counter), populate); if (!counter) return NULL; counter->global_counters.shm_fd = -1; @@ -225,16 +228,16 @@ struct lib_counter *lttng_counter_create(const struct lib_counter_config *config if (lttng_counter_set_global_sum_step(counter, global_sum_step)) goto error_sum_step; counter->nr_dimensions = nr_dimensions; - counter->dimensions = zmalloc(nr_dimensions * sizeof(*counter->dimensions)); + counter->dimensions = zmalloc_populate(nr_dimensions * sizeof(*counter->dimensions), populate); if (!counter->dimensions) goto error_dimensions; for (dimension = 0; dimension < nr_dimensions; dimension++) counter->dimensions[dimension].max_nr_elem = max_nr_elem[dimension]; if (config->alloc & COUNTER_ALLOC_PER_CPU) { - counter->percpu_counters = zmalloc(sizeof(struct lib_counter_layout) * nr_cpus); + counter->percpu_counters = zmalloc_populate(sizeof(struct lib_counter_layout) * nr_cpus, populate); if (!counter->percpu_counters) goto error_alloc_percpu; - lttng_counter_for_each_possible_cpu(cpu) + for_each_possible_cpu(cpu) counter->percpu_counters[cpu].shm_fd = -1; } @@ -250,7 +253,7 @@ struct lib_counter *lttng_counter_create(const struct lib_counter_config *config if (config->alloc & COUNTER_ALLOC_PER_CPU) nr_handles += nr_cpus; /* Allocate table for global and per-cpu counters. */ - counter->object_table = lttng_counter_shm_object_table_create(nr_handles); + counter->object_table = lttng_counter_shm_object_table_create(nr_handles, populate); if (!counter->object_table) goto error_alloc_object_table; @@ -260,7 +263,7 @@ struct lib_counter *lttng_counter_create(const struct lib_counter_config *config goto layout_init_error; } if ((config->alloc & COUNTER_ALLOC_PER_CPU) && counter_cpu_fds) { - lttng_counter_for_each_possible_cpu(cpu) { + for_each_possible_cpu(cpu) { ret = lttng_counter_layout_init(counter, cpu, counter_cpu_fds[cpu]); if (ret) goto layout_init_error; @@ -309,7 +312,7 @@ int lttng_counter_get_cpu_shm(struct lib_counter *counter, int cpu, int *fd, siz struct lib_counter_layout *layout; int shm_fd; - if (cpu >= lttng_counter_num_possible_cpus()) + if (cpu >= get_possible_cpus_array_len()) return -1; layout = &counter->percpu_counters[cpu]; shm_fd = layout->shm_fd; @@ -335,13 +338,13 @@ int lttng_counter_read(const struct lib_counter_config *config, switch (config->alloc) { case COUNTER_ALLOC_PER_CPU: - if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus()) + if (cpu < 0 || cpu >= get_possible_cpus_array_len()) return -EINVAL; layout = &counter->percpu_counters[cpu]; break; case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: if (cpu >= 0) { - if (cpu >= lttng_counter_num_possible_cpus()) + if (cpu >= get_possible_cpus_array_len()) return -EINVAL; layout = &counter->percpu_counters[cpu]; } else { @@ -430,7 +433,7 @@ int lttng_counter_aggregate(const struct lib_counter_config *config, break; case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: /* Fallthrough */ case COUNTER_ALLOC_PER_CPU: - lttng_counter_for_each_possible_cpu(cpu) { + for_each_possible_cpu(cpu) { int64_t old = sum; ret = lttng_counter_read(config, counter, dimension_indexes, @@ -469,13 +472,13 @@ int lttng_counter_clear_cpu(const struct lib_counter_config *config, switch (config->alloc) { case COUNTER_ALLOC_PER_CPU: - if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus()) + if (cpu < 0 || cpu >= get_possible_cpus_array_len()) return -EINVAL; layout = &counter->percpu_counters[cpu]; break; case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: if (cpu >= 0) { - if (cpu >= lttng_counter_num_possible_cpus()) + if (cpu >= get_possible_cpus_array_len()) return -EINVAL; layout = &counter->percpu_counters[cpu]; } else { @@ -551,7 +554,7 @@ int lttng_counter_clear(const struct lib_counter_config *config, switch (config->alloc) { case COUNTER_ALLOC_PER_CPU: /* Fallthrough */ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: - lttng_counter_for_each_possible_cpu(cpu) { + for_each_possible_cpu(cpu) { ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, cpu); if (ret < 0) return ret;