From: Mathieu Desnoyers Date: Thu, 9 May 2024 19:09:17 +0000 (-0400) Subject: ust-fd: Add close_range declaration X-Git-Url: http://git.liburcu.org/?a=commitdiff_plain;h=refs%2Fheads%2Fmaster;hp=4b01076fea0f635af6af6762a8edce1be03e5d39;p=lttng-ust.git ust-fd: Add close_range declaration Old libc headers do not contain a declaration of close_range(). Emit our own declaration to prevent compiler warnings. Signed-off-by: Mathieu Desnoyers Change-Id: If6ca8193895efbb6ce1ba46e092939b8099bcff6 --- diff --git a/configure.ac b/configure.ac index a1c12412..05c343b7 100644 --- a/configure.ac +++ b/configure.ac @@ -609,6 +609,9 @@ AC_SUBST(AM_CPPFLAGS) AC_SUBST(JNI_CPPFLAGS) +# Used in man pages +AC_SUBST([LTTNG_UST_MAJOR_VERSION], ust_version_major) +AC_SUBST([LTTNG_UST_MINOR_VERSION], ust_version_minor) ## ## ## Output files generated by configure ## diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am index 29c06739..94510d4e 100644 --- a/doc/man/Makefile.am +++ b/doc/man/Makefile.am @@ -76,13 +76,14 @@ xmlto_verbose_out_ = $(xmlto_verbose_out_@AM_DEFAULT_V@) xmlto_verbose_out_0 = 2>/dev/null # Tools to execute: -ADOC = $(asciidoc_verbose)$(ASCIIDOC) -f $(ASCIIDOC_CONF) -d manpage \ +ADOC = $(asciidoc_verbose)$(ASCIIDOC) -v -f $(ASCIIDOC_CONF) -d manpage \ -a mansource="LTTng" \ -a manmanual="LTTng Manual" \ - -a manversion="$(PACKAGE_VERSION)" + -a manversion="$(PACKAGE_VERSION)" \ + -a lttng_version="$(LTTNG_UST_MAJOR_VERSION).$(LTTNG_UST_MINOR_VERSION)" ADOC_DOCBOOK = $(ADOC) -b docbook -XTO = $(xmlto_verbose)$(XMLTO) -m $(XSL_FILE) man +XTO = $(xmlto_verbose)$(XMLTO) -v -m $(XSL_FILE) man # Recipes: %.1.xml: $(srcdir)/%.1.txt $(COMMON_DEPS) diff --git a/doc/man/lttng-ust.3.txt b/doc/man/lttng-ust.3.txt index 12f2d65d..601ebec8 100644 --- a/doc/man/lttng-ust.3.txt +++ b/doc/man/lttng-ust.3.txt @@ -1230,8 +1230,9 @@ if (lttng_ust_loaded) { [[example]] EXAMPLE ------- + NOTE: A few examples are available in the -https://github.com/lttng/lttng-ust/tree/v{lttng_version}/doc/examples[`doc/examples`] +https://github.com/lttng/lttng-ust/tree/stable-{lttng_version}/doc/examples[`doc/examples`] directory of LTTng-UST's source tree. This example shows all the features documented in the previous @@ -1545,7 +1546,7 @@ affect application timings. Path to the shared object which acts as the clock override plugin. An example of such a plugin can be found in the LTTng-UST documentation under - https://github.com/lttng/lttng-ust/tree/v{lttng_version}/doc/examples/clock-override[`examples/clock-override`]. + https://github.com/lttng/lttng-ust/tree/stable-{lttng_version}/doc/examples/clock-override[`examples/clock-override`]. `LTTNG_UST_DEBUG`:: If set, enable `liblttng-ust`'s debug and error output. @@ -1554,7 +1555,24 @@ affect application timings. Path to the shared object which acts as the `getcpu()` override plugin. An example of such a plugin can be found in the LTTng-UST documentation under - https://github.com/lttng/lttng-ust/tree/v{lttng_version}/doc/examples/getcpu-override[`examples/getcpu-override`]. + https://github.com/lttng/lttng-ust/tree/stable-{lttng_version}/doc/examples/getcpu-override[`examples/getcpu-override`]. + +`LTTNG_UST_MAP_POPULATE_POLICY`:: ++ +-- +If set, override the policy used to populate shared memory pages +within the application. The expected values are: + +`none`::: + Do not pre-populate any pages, take minor faults on first access + while tracing. + +`cpu_possible`::: + Pre-populate pages for all possible CPUs in the system, as + listed by `/sys/devices/system/cpu/possible`. +-- ++ +Default: `none`. If the policy is unknown, use the default. `LTTNG_UST_REGISTER_TIMEOUT`:: Waiting time for the _registration done_ session daemon command diff --git a/src/common/Makefile.am b/src/common/Makefile.am index 05d08ade..ad889d16 100644 --- a/src/common/Makefile.am +++ b/src/common/Makefile.am @@ -171,6 +171,8 @@ libcommon_la_SOURCES = \ logging.h \ smp.c \ smp.h \ + populate.c \ + populate.h \ strutils.c \ strutils.h \ utils.c \ diff --git a/src/common/counter/counter.c b/src/common/counter/counter.c index 60edad0c..99a46af6 100644 --- a/src/common/counter/counter.c +++ b/src/common/counter/counter.c @@ -17,6 +17,7 @@ #include "common/bitmap.h" #include "common/smp.h" +#include "common/populate.h" #include "shm.h" static size_t lttng_counter_get_dimension_nr_elements(struct lib_counter_dimension *dimension) @@ -84,13 +85,14 @@ static int lttng_counter_layout_init(struct lib_counter *counter, int cpu, int s if (counter->is_daemon) { /* Allocate and clear shared memory. */ shm_object = lttng_counter_shm_object_table_alloc(counter->object_table, - shm_length, LTTNG_COUNTER_SHM_OBJECT_SHM, shm_fd, cpu); + shm_length, LTTNG_COUNTER_SHM_OBJECT_SHM, shm_fd, cpu, + lttng_ust_map_populate_cpu_is_enabled(cpu)); if (!shm_object) return -ENOMEM; } else { /* Map pre-existing shared memory. */ shm_object = lttng_counter_shm_object_table_append_shm(counter->object_table, - shm_fd, shm_length); + shm_fd, shm_length, lttng_ust_map_populate_cpu_is_enabled(cpu)); if (!shm_object) return -ENOMEM; } @@ -211,12 +213,13 @@ struct lib_counter *lttng_counter_create(const struct lib_counter_config *config int cpu, ret; int nr_handles = 0; int nr_cpus = get_possible_cpus_array_len(); + bool populate = lttng_ust_map_populate_is_enabled(); if (validate_args(config, nr_dimensions, max_nr_elem, global_sum_step, global_counter_fd, nr_counter_cpu_fds, counter_cpu_fds)) return NULL; - counter = zmalloc(sizeof(struct lib_counter)); + counter = zmalloc_populate(sizeof(struct lib_counter), populate); if (!counter) return NULL; counter->global_counters.shm_fd = -1; @@ -225,13 +228,13 @@ struct lib_counter *lttng_counter_create(const struct lib_counter_config *config if (lttng_counter_set_global_sum_step(counter, global_sum_step)) goto error_sum_step; counter->nr_dimensions = nr_dimensions; - counter->dimensions = zmalloc(nr_dimensions * sizeof(*counter->dimensions)); + counter->dimensions = zmalloc_populate(nr_dimensions * sizeof(*counter->dimensions), populate); if (!counter->dimensions) goto error_dimensions; for (dimension = 0; dimension < nr_dimensions; dimension++) counter->dimensions[dimension].max_nr_elem = max_nr_elem[dimension]; if (config->alloc & COUNTER_ALLOC_PER_CPU) { - counter->percpu_counters = zmalloc(sizeof(struct lib_counter_layout) * nr_cpus); + counter->percpu_counters = zmalloc_populate(sizeof(struct lib_counter_layout) * nr_cpus, populate); if (!counter->percpu_counters) goto error_alloc_percpu; for_each_possible_cpu(cpu) @@ -250,7 +253,7 @@ struct lib_counter *lttng_counter_create(const struct lib_counter_config *config if (config->alloc & COUNTER_ALLOC_PER_CPU) nr_handles += nr_cpus; /* Allocate table for global and per-cpu counters. */ - counter->object_table = lttng_counter_shm_object_table_create(nr_handles); + counter->object_table = lttng_counter_shm_object_table_create(nr_handles, populate); if (!counter->object_table) goto error_alloc_object_table; diff --git a/src/common/counter/shm.c b/src/common/counter/shm.c index 8b65d1fc..6f7ae37a 100644 --- a/src/common/counter/shm.c +++ b/src/common/counter/shm.c @@ -69,12 +69,12 @@ error: return ret; } -struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj) +struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj, bool populate) { struct lttng_counter_shm_object_table *table; - table = zmalloc(sizeof(struct lttng_counter_shm_object_table) + - max_nb_obj * sizeof(table->objects[0])); + table = zmalloc_populate(sizeof(struct lttng_counter_shm_object_table) + + max_nb_obj * sizeof(table->objects[0]), populate); if (!table) return NULL; table->size = max_nb_obj; @@ -84,10 +84,11 @@ struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(siz static struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_shm(struct lttng_counter_shm_object_table *table, size_t memory_map_size, - int cpu_fd) + int cpu_fd, bool populate) { - int shmfd, ret; struct lttng_counter_shm_object *obj; + int flags = MAP_SHARED; + int shmfd, ret; char *memory_map; if (cpu_fd < 0) @@ -121,9 +122,11 @@ struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_shm(struc obj->shm_fd_ownership = 0; obj->shm_fd = shmfd; + if (populate) + flags |= LTTNG_MAP_POPULATE; /* memory_map: mmap */ memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE, - MAP_SHARED | LTTNG_MAP_POPULATE, shmfd, 0); + flags, shmfd, 0); if (memory_map == MAP_FAILED) { PERROR("mmap"); goto error_mmap; @@ -145,7 +148,7 @@ error_zero_file: static struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_mem(struct lttng_counter_shm_object_table *table, - size_t memory_map_size) + size_t memory_map_size, bool populate) { struct lttng_counter_shm_object *obj; void *memory_map; @@ -154,7 +157,7 @@ struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_mem(struc return NULL; obj = &table->objects[table->allocated_len]; - memory_map = zmalloc(memory_map_size); + memory_map = zmalloc_populate(memory_map_size, populate); if (!memory_map) goto alloc_error; @@ -197,13 +200,15 @@ struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct ltt size_t memory_map_size, enum lttng_counter_shm_object_type type, int cpu_fd, - int cpu) + int cpu, + bool populate) #else struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct lttng_counter_shm_object_table *table, size_t memory_map_size, enum lttng_counter_shm_object_type type, int cpu_fd, - int cpu __attribute__((unused))) + int cpu __attribute__((unused)), + bool populate) #endif { struct lttng_counter_shm_object *shm_object; @@ -226,10 +231,11 @@ struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct ltt switch (type) { case LTTNG_COUNTER_SHM_OBJECT_SHM: shm_object = _lttng_counter_shm_object_table_alloc_shm(table, memory_map_size, - cpu_fd); + cpu_fd, populate); break; case LTTNG_COUNTER_SHM_OBJECT_MEM: - shm_object = _lttng_counter_shm_object_table_alloc_mem(table, memory_map_size); + shm_object = _lttng_counter_shm_object_table_alloc_mem(table, memory_map_size, + populate); break; default: assert(0); @@ -242,10 +248,10 @@ struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct ltt } struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_shm(struct lttng_counter_shm_object_table *table, - int shm_fd, - size_t memory_map_size) + int shm_fd, size_t memory_map_size, bool populate) { struct lttng_counter_shm_object *obj; + int flags = MAP_SHARED; char *memory_map; if (table->allocated_len >= table->size) @@ -256,9 +262,11 @@ struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_shm(struc obj->shm_fd = shm_fd; obj->shm_fd_ownership = 1; + if (populate) + flags |= LTTNG_MAP_POPULATE; /* memory_map: mmap */ memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE, - MAP_SHARED | LTTNG_MAP_POPULATE, shm_fd, 0); + flags, shm_fd, 0); if (memory_map == MAP_FAILED) { PERROR("mmap"); goto error_mmap; diff --git a/src/common/counter/shm.h b/src/common/counter/shm.h index 689edb0a..1293a7b0 100644 --- a/src/common/counter/shm.h +++ b/src/common/counter/shm.h @@ -10,6 +10,7 @@ #include #include #include +#include #include "common/logging.h" #include #include "shm_types.h" @@ -73,18 +74,18 @@ void _lttng_counter_set_shmp(struct lttng_counter_shm_ref *ref, struct lttng_cou #define lttng_counter_set_shmp(ref, src) _lttng_counter_set_shmp(&(ref)._ref, src) -struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj) +struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj, bool populate) __attribute__((visibility("hidden"))); struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct lttng_counter_shm_object_table *table, size_t memory_map_size, enum lttng_counter_shm_object_type type, const int cpu_fd, - int cpu) + int cpu, bool populate) __attribute__((visibility("hidden"))); struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_shm(struct lttng_counter_shm_object_table *table, - int shm_fd, size_t memory_map_size) + int shm_fd, size_t memory_map_size, bool populate) __attribute__((visibility("hidden"))); /* mem ownership is passed to lttng_counter_shm_object_table_append_mem(). */ diff --git a/src/common/getenv.c b/src/common/getenv.c index 7f7b8534..120225e6 100644 --- a/src/common/getenv.c +++ b/src/common/getenv.c @@ -42,6 +42,7 @@ static struct lttng_env lttng_env[] = { /* Env. var. which can be used in setuid/setgid executables. */ { "LTTNG_UST_WITHOUT_BADDR_STATEDUMP", LTTNG_ENV_NOT_SECURE, NULL, }, { "LTTNG_UST_REGISTER_TIMEOUT", LTTNG_ENV_NOT_SECURE, NULL, }, + { "LTTNG_UST_MAP_POPULATE_POLICY", LTTNG_ENV_NOT_SECURE, NULL, }, /* Env. var. which are not fetched in setuid/setgid executables. */ { "LTTNG_UST_CLOCK_PLUGIN", LTTNG_ENV_SECURE, NULL, }, diff --git a/src/common/macros.h b/src/common/macros.h index 308a1dfc..e8965b38 100644 --- a/src/common/macros.h +++ b/src/common/macros.h @@ -8,9 +8,32 @@ #define _UST_COMMON_MACROS_H #include +#include +#include #include +/* + * calloc() does not always populate the page table for the allocated + * memory. Optionally enforce page table populate. + */ +static inline +void *zmalloc_populate(size_t len, bool populate) + __attribute__((always_inline)); +static inline +void *zmalloc_populate(size_t len, bool populate) +{ + if (populate) { + void *ret = malloc(len); + if (ret == NULL) + return ret; + bzero(ret, len); + return ret; + } else { + return calloc(len, 1); + } +} + /* * Memory allocation zeroed */ @@ -20,7 +43,7 @@ void *zmalloc(size_t len) static inline void *zmalloc(size_t len) { - return calloc(len, 1); + return zmalloc_populate(len, false); } #define max_t(type, x, y) \ diff --git a/src/common/populate.c b/src/common/populate.c new file mode 100644 index 00000000..b7f6bcce --- /dev/null +++ b/src/common/populate.c @@ -0,0 +1,86 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright (C) 2024-2012 Mathieu Desnoyers + */ + +#define _LGPL_SOURCE +#include "common/getenv.h" +#include "common/logging.h" +#include "common/populate.h" + +enum populate_policy { + POPULATE_UNSET, + + POPULATE_NONE, + POPULATE_CPU_POSSIBLE, + + POPULATE_UNKNOWN, +}; + +static enum populate_policy map_populate_policy = POPULATE_UNSET; + +static void init_map_populate_policy(void) +{ + const char *populate_env_str; + + if (map_populate_policy != POPULATE_UNSET) + return; + + populate_env_str = lttng_ust_getenv("LTTNG_UST_MAP_POPULATE_POLICY"); + if (!populate_env_str) { + map_populate_policy = POPULATE_NONE; + return; + } + if (!strcmp(populate_env_str, "none")) { + map_populate_policy = POPULATE_NONE; + } else if (!strcmp(populate_env_str, "cpu_possible")) { + map_populate_policy = POPULATE_CPU_POSSIBLE; + } else { + /* + * populate_env_str is an untrusted environment variable + * input (can be provided to setuid/setgid binaries), so + * don't even try to print it. + */ + WARN("Unknown policy for LTTNG_UST_MAP_POPULATE_POLICY environment variable."); + map_populate_policy = POPULATE_UNKNOWN; + } +} + +/* + * Return the shared page populate policy for global pages. Returns true + * if shared memory pages should be pre-populated, false otherwise. + */ +bool lttng_ust_map_populate_is_enabled(void) +{ + init_map_populate_policy(); + + switch (map_populate_policy) { + case POPULATE_UNKNOWN: /* Fall-through */ + case POPULATE_NONE: + return false; + case POPULATE_CPU_POSSIBLE: + return true; + default: + abort(); + } + return false; +} + +/* + * Return the shared page populate policy based on the @cpu number + * provided as input. Returns true if shared memory pages should be + * pre-populated, false otherwise. + * + * The @cpu argument is currently unused except for negative value + * validation. It is present to eventually match cpu affinity or cpu + * online masks if those features are added in the future. + */ +bool lttng_ust_map_populate_cpu_is_enabled(int cpu) +{ + /* Reject invalid cpu number. */ + if (cpu < 0) + return false; + + return lttng_ust_map_populate_is_enabled(); +} diff --git a/src/common/populate.h b/src/common/populate.h new file mode 100644 index 00000000..f65c4851 --- /dev/null +++ b/src/common/populate.h @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright (C) 2024 Mathieu Desnoyers + */ + +#ifndef _UST_COMMON_POPULATE_H +#define _UST_COMMON_POPULATE_H + +#include + +bool lttng_ust_map_populate_cpu_is_enabled(int cpu) + __attribute__((visibility("hidden"))); + +bool lttng_ust_map_populate_is_enabled(void) + __attribute__((visibility("hidden"))); + +#endif /* _UST_COMMON_POPULATE_H */ diff --git a/src/common/ringbuffer-clients/metadata-template.h b/src/common/ringbuffer-clients/metadata-template.h index 56d95516..080288d5 100644 --- a/src/common/ringbuffer-clients/metadata-template.h +++ b/src/common/ringbuffer-clients/metadata-template.h @@ -93,7 +93,7 @@ static size_t client_packet_header_size(void) } static void client_buffer_begin(struct lttng_ust_ring_buffer *buf, - uint64_t tsc __attribute__((unused)), + uint64_t timestamp __attribute__((unused)), unsigned int subbuf_idx, struct lttng_ust_shm_handle *handle) { @@ -125,7 +125,7 @@ static void client_buffer_begin(struct lttng_ust_ring_buffer *buf, * subbuffer. data_size is between 1 and subbuf_size. */ static void client_buffer_end(struct lttng_ust_ring_buffer *buf, - uint64_t tsc __attribute__((unused)), + uint64_t timestamp __attribute__((unused)), unsigned int subbuf_idx, unsigned long data_size, struct lttng_ust_shm_handle *handle, const struct lttng_ust_ring_buffer_ctx *ctx) @@ -193,7 +193,7 @@ static const struct lttng_ust_ring_buffer_config client_config = { .cb.buffer_create = client_buffer_create, .cb.buffer_finalize = client_buffer_finalize, - .tsc_bits = 0, + .timestamp_bits = 0, .alloc = RING_BUFFER_ALLOC_GLOBAL, .sync = RING_BUFFER_SYNC_GLOBAL, .mode = RING_BUFFER_MODE_TEMPLATE, diff --git a/src/common/ringbuffer-clients/template.h b/src/common/ringbuffer-clients/template.h index fe8f8e02..58a8400d 100644 --- a/src/common/ringbuffer-clients/template.h +++ b/src/common/ringbuffer-clients/template.h @@ -19,8 +19,8 @@ #include "common/clock.h" #include "common/ringbuffer/frontend_types.h" -#define LTTNG_COMPACT_EVENT_BITS 5 -#define LTTNG_COMPACT_TSC_BITS 27 +#define LTTNG_COMPACT_EVENT_BITS 5 +#define LTTNG_COMPACT_TIMESTAMP_BITS 27 /* * Keep the natural field alignment for _each field_ within this structure if @@ -156,7 +156,7 @@ size_t record_header_size( case 1: /* compact */ padding = lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint32_t)); offset += padding; - if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) { + if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TIMESTAMP | LTTNG_RFLAG_EXTENDED))) { offset += sizeof(uint32_t); /* id and timestamp */ } else { /* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */ @@ -172,7 +172,7 @@ size_t record_header_size( padding = lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint16_t)); offset += padding; offset += sizeof(uint16_t); - if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) { + if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TIMESTAMP | LTTNG_RFLAG_EXTENDED))) { offset += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint32_t)); offset += sizeof(uint32_t); /* timestamp */ } else { @@ -235,14 +235,14 @@ void lttng_write_event_header(const struct lttng_ust_ring_buffer_config *config, event_id); bt_bitfield_write(&id_time, uint32_t, LTTNG_COMPACT_EVENT_BITS, - LTTNG_COMPACT_TSC_BITS, - ctx->priv->tsc); + LTTNG_COMPACT_TIMESTAMP_BITS, + ctx->priv->timestamp); lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time)); break; } case 2: /* large */ { - uint32_t timestamp = (uint32_t) ctx->priv->tsc; + uint32_t timestamp = (uint32_t) ctx->priv->timestamp; uint16_t id = event_id; lib_ring_buffer_write(config, ctx, &id, sizeof(id)); @@ -275,7 +275,7 @@ void lttng_write_event_header_slow(const struct lttng_ust_ring_buffer_config *co switch (lttng_chan->priv->header_type) { case 1: /* compact */ - if (!(ctx_private->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) { + if (!(ctx_private->rflags & (RING_BUFFER_RFLAG_FULL_TIMESTAMP | LTTNG_RFLAG_EXTENDED))) { uint32_t id_time = 0; bt_bitfield_write(&id_time, uint32_t, @@ -284,12 +284,12 @@ void lttng_write_event_header_slow(const struct lttng_ust_ring_buffer_config *co event_id); bt_bitfield_write(&id_time, uint32_t, LTTNG_COMPACT_EVENT_BITS, - LTTNG_COMPACT_TSC_BITS, - ctx_private->tsc); + LTTNG_COMPACT_TIMESTAMP_BITS, + ctx_private->timestamp); lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time)); } else { uint8_t id = 0; - uint64_t timestamp = ctx_private->tsc; + uint64_t timestamp = ctx_private->timestamp; bt_bitfield_write(&id, uint8_t, 0, @@ -305,8 +305,8 @@ void lttng_write_event_header_slow(const struct lttng_ust_ring_buffer_config *co break; case 2: /* large */ { - if (!(ctx_private->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) { - uint32_t timestamp = (uint32_t) ctx_private->tsc; + if (!(ctx_private->rflags & (RING_BUFFER_RFLAG_FULL_TIMESTAMP | LTTNG_RFLAG_EXTENDED))) { + uint32_t timestamp = (uint32_t) ctx_private->timestamp; uint16_t id = event_id; lib_ring_buffer_write(config, ctx, &id, sizeof(id)); @@ -314,7 +314,7 @@ void lttng_write_event_header_slow(const struct lttng_ust_ring_buffer_config *co lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp)); } else { uint16_t id = 65535; - uint64_t timestamp = ctx_private->tsc; + uint64_t timestamp = ctx_private->timestamp; lib_ring_buffer_write(config, ctx, &id, sizeof(id)); /* Align extended struct on largest member */ @@ -364,7 +364,7 @@ static size_t client_packet_header_size(void) return offsetof(struct packet_header, ctx.header_end); } -static void client_buffer_begin(struct lttng_ust_ring_buffer *buf, uint64_t tsc, +static void client_buffer_begin(struct lttng_ust_ring_buffer *buf, uint64_t timestamp, unsigned int subbuf_idx, struct lttng_ust_shm_handle *handle) { @@ -384,7 +384,7 @@ static void client_buffer_begin(struct lttng_ust_ring_buffer *buf, uint64_t tsc, memcpy(header->uuid, lttng_chan->priv->uuid, sizeof(lttng_chan->priv->uuid)); header->stream_id = lttng_chan->priv->id; header->stream_instance_id = buf->backend.cpu; - header->ctx.timestamp_begin = tsc; + header->ctx.timestamp_begin = timestamp; header->ctx.timestamp_end = 0; header->ctx.content_size = ~0ULL; /* for debugging */ header->ctx.packet_size = ~0ULL; @@ -397,7 +397,7 @@ static void client_buffer_begin(struct lttng_ust_ring_buffer *buf, uint64_t tsc, * offset is assumed to never be 0 here : never deliver a completely empty * subbuffer. data_size is between 1 and subbuf_size. */ -static void client_buffer_end(struct lttng_ust_ring_buffer *buf, uint64_t tsc, +static void client_buffer_end(struct lttng_ust_ring_buffer *buf, uint64_t timestamp, unsigned int subbuf_idx, unsigned long data_size, struct lttng_ust_shm_handle *handle, const struct lttng_ust_ring_buffer_ctx *ctx) @@ -413,7 +413,7 @@ static void client_buffer_end(struct lttng_ust_ring_buffer *buf, uint64_t tsc, assert(header); if (!header) return; - header->ctx.timestamp_end = tsc; + header->ctx.timestamp_end = timestamp; header->ctx.content_size = (uint64_t) data_size * CHAR_BIT; /* in bits */ header->ctx.packet_size = @@ -614,7 +614,7 @@ static const struct lttng_ust_ring_buffer_config client_config = { .cb.content_size_field = client_content_size_field, .cb.packet_size_field = client_packet_size_field, - .tsc_bits = LTTNG_COMPACT_TSC_BITS, + .timestamp_bits = LTTNG_COMPACT_TIMESTAMP_BITS, .alloc = RING_BUFFER_ALLOC_PER_CPU, .sync = RING_BUFFER_SYNC_GLOBAL, .mode = RING_BUFFER_MODE_TEMPLATE, diff --git a/src/common/ringbuffer/backend_types.h b/src/common/ringbuffer/backend_types.h index a4e207f4..c9cc4025 100644 --- a/src/common/ringbuffer/backend_types.h +++ b/src/common/ringbuffer/backend_types.h @@ -87,7 +87,7 @@ struct channel_backend { unsigned int buf_size_order; /* Order of buffer size */ unsigned int extra_reader_sb:1; /* has extra reader subbuffer ? */ unsigned long num_subbuf; /* Number of sub-buffers for writer */ - uint64_t start_tsc; /* Channel creation TSC value */ + uint64_t start_timestamp; /* Channel creation timestamp value */ DECLARE_SHMP(void *, priv_data);/* Client-specific information */ struct lttng_ust_ring_buffer_config config; /* Ring buffer configuration */ char name[NAME_MAX]; /* Channel name */ diff --git a/src/common/ringbuffer/frontend_api.h b/src/common/ringbuffer/frontend_api.h index e3507073..030169ff 100644 --- a/src/common/ringbuffer/frontend_api.h +++ b/src/common/ringbuffer/frontend_api.h @@ -82,8 +82,8 @@ int lib_ring_buffer_try_reserve(const struct lttng_ust_ring_buffer_config *confi *o_begin = v_read(config, &buf->offset); *o_old = *o_begin; - ctx_private->tsc = lib_ring_buffer_clock_read(chan); - if ((int64_t) ctx_private->tsc == -EIO) + ctx_private->timestamp = lib_ring_buffer_clock_read(chan); + if ((int64_t) ctx_private->timestamp == -EIO) return 1; /* @@ -93,8 +93,8 @@ int lib_ring_buffer_try_reserve(const struct lttng_ust_ring_buffer_config *confi */ //prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]); - if (last_tsc_overflow(config, buf, ctx_private->tsc)) - ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TSC; + if (last_timestamp_overflow(config, buf, ctx_private->timestamp)) + ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TIMESTAMP; if (caa_unlikely(subbuf_offset(*o_begin, chan) == 0)) return 1; @@ -130,7 +130,8 @@ int lib_ring_buffer_try_reserve(const struct lttng_ust_ring_buffer_config *confi * @ctx: ring buffer context. (input and output) Must be already initialized. * * Atomic wait-free slot reservation. The reserved space starts at the context - * "pre_offset". Its length is "slot_size". The associated time-stamp is "tsc". + * "pre_offset". Its length is "slot_size". The associated time-stamp is + * "timestamp". * * Return : * 0 on success. @@ -179,12 +180,12 @@ int lib_ring_buffer_reserve(const struct lttng_ust_ring_buffer_config *config, goto slow_path; /* - * Atomically update last_tsc. This update races against concurrent - * atomic updates, but the race will always cause supplementary full TSC - * record headers, never the opposite (missing a full TSC record header - * when it would be needed). + * Atomically update last_timestamp. This update races against concurrent + * atomic updates, but the race will always cause supplementary full + * timestamp record headers, never the opposite (missing a full + * timestamp record header when it would be needed). */ - save_last_tsc(config, buf, ctx_private->tsc); + save_last_timestamp(config, buf, ctx_private->timestamp); /* * Push the reader if necessary @@ -317,17 +318,17 @@ int lib_ring_buffer_try_discard_reserve(const struct lttng_ust_ring_buffer_confi /* * We need to ensure that if the cmpxchg succeeds and discards the - * record, the next record will record a full TSC, because it cannot - * rely on the last_tsc associated with the discarded record to detect - * overflows. The only way to ensure this is to set the last_tsc to 0 - * (assuming no 64-bit TSC overflow), which forces to write a 64-bit + * record, the next record will record a full timestamp, because it cannot + * rely on the last_timestamp associated with the discarded record to detect + * overflows. The only way to ensure this is to set the last_timestamp to 0 + * (assuming no 64-bit timestamp overflow), which forces to write a 64-bit * timestamp in the next record. * - * Note: if discard fails, we must leave the TSC in the record header. - * It is needed to keep track of TSC overflows for the following + * Note: if discard fails, we must leave the timestamp in the record header. + * It is needed to keep track of timestamp overflows for the following * records. */ - save_last_tsc(config, buf, 0ULL); + save_last_timestamp(config, buf, 0ULL); if (caa_likely(v_cmpxchg(config, &buf->offset, end_offset, ctx_private->pre_offset) != end_offset)) diff --git a/src/common/ringbuffer/frontend_internal.h b/src/common/ringbuffer/frontend_internal.h index 1dc816a3..d9f16a51 100644 --- a/src/common/ringbuffer/frontend_internal.h +++ b/src/common/ringbuffer/frontend_internal.h @@ -85,62 +85,62 @@ unsigned long subbuf_index(unsigned long offset, } /* - * Last TSC comparison functions. Check if the current TSC overflows tsc_bits - * bits from the last TSC read. When overflows are detected, the full 64-bit - * timestamp counter should be written in the record header. Reads and writes - * last_tsc atomically. + * Last timestamp comparison functions. Check if the current timestamp overflows + * timestamp_bits bits from the last timestamp read. When overflows are + * detected, the full 64-bit timestamp counter should be written in the record + * header. Reads and writes last_timestamp atomically. */ #if (CAA_BITS_PER_LONG == 32) static inline -void save_last_tsc(const struct lttng_ust_ring_buffer_config *config, - struct lttng_ust_ring_buffer *buf, uint64_t tsc) +void save_last_timestamp(const struct lttng_ust_ring_buffer_config *config, + struct lttng_ust_ring_buffer *buf, uint64_t timestamp) { - if (config->tsc_bits == 0 || config->tsc_bits == 64) + if (config->timestamp_bits == 0 || config->timestamp_bits == 64) return; /* * Ensure the compiler performs this update in a single instruction. */ - v_set(config, &buf->last_tsc, (unsigned long)(tsc >> config->tsc_bits)); + v_set(config, &buf->last_timestamp, (unsigned long)(timestamp >> config->timestamp_bits)); } static inline -int last_tsc_overflow(const struct lttng_ust_ring_buffer_config *config, - struct lttng_ust_ring_buffer *buf, uint64_t tsc) +int last_timestamp_overflow(const struct lttng_ust_ring_buffer_config *config, + struct lttng_ust_ring_buffer *buf, uint64_t timestamp) { - unsigned long tsc_shifted; + unsigned long timestamp_shifted; - if (config->tsc_bits == 0 || config->tsc_bits == 64) + if (config->timestamp_bits == 0 || config->timestamp_bits == 64) return 0; - tsc_shifted = (unsigned long)(tsc >> config->tsc_bits); - if (caa_unlikely(tsc_shifted - - (unsigned long)v_read(config, &buf->last_tsc))) + timestamp_shifted = (unsigned long)(timestamp >> config->timestamp_bits); + if (caa_unlikely(timestamp_shifted + - (unsigned long)v_read(config, &buf->last_timestamp))) return 1; else return 0; } #else static inline -void save_last_tsc(const struct lttng_ust_ring_buffer_config *config, - struct lttng_ust_ring_buffer *buf, uint64_t tsc) +void save_last_timestamp(const struct lttng_ust_ring_buffer_config *config, + struct lttng_ust_ring_buffer *buf, uint64_t timestamp) { - if (config->tsc_bits == 0 || config->tsc_bits == 64) + if (config->timestamp_bits == 0 || config->timestamp_bits == 64) return; - v_set(config, &buf->last_tsc, (unsigned long)tsc); + v_set(config, &buf->last_timestamp, (unsigned long)timestamp); } static inline -int last_tsc_overflow(const struct lttng_ust_ring_buffer_config *config, - struct lttng_ust_ring_buffer *buf, uint64_t tsc) +int last_timestamp_overflow(const struct lttng_ust_ring_buffer_config *config, + struct lttng_ust_ring_buffer *buf, uint64_t timestamp) { - if (config->tsc_bits == 0 || config->tsc_bits == 64) + if (config->timestamp_bits == 0 || config->timestamp_bits == 64) return 0; - if (caa_unlikely((tsc - v_read(config, &buf->last_tsc)) - >> config->tsc_bits)) + if (caa_unlikely((timestamp - v_read(config, &buf->last_timestamp)) + >> config->timestamp_bits)) return 1; else return 0; @@ -287,7 +287,7 @@ int lib_ring_buffer_reserve_committed(const struct lttng_ust_ring_buffer_config } /* - * Receive end of subbuffer TSC as parameter. It has been read in the + * Receive end of subbuffer timestamp as parameter. It has been read in the * space reservation loop of either reserve or switch, which ensures it * progresses monotonically with event records in the buffer. Therefore, * it ensures that the end timestamp of a subbuffer is <= begin diff --git a/src/common/ringbuffer/frontend_types.h b/src/common/ringbuffer/frontend_types.h index 1b0e1a08..3be7ec1b 100644 --- a/src/common/ringbuffer/frontend_types.h +++ b/src/common/ringbuffer/frontend_types.h @@ -181,7 +181,7 @@ struct lttng_ust_ring_buffer { int record_disabled; /* End of cache-hot 32 bytes cacheline */ - union v_atomic last_tsc; /* + union v_atomic last_timestamp; /* * Last timestamp written in the buffer. */ @@ -251,7 +251,7 @@ struct lttng_ust_ring_buffer_ctx_private { * prior to record header alignment * padding. */ - uint64_t tsc; /* time-stamp counter value */ + uint64_t timestamp; /* time-stamp counter value */ unsigned int rflags; /* reservation flags */ struct lttng_ust_ring_buffer *buf; /* * buffer corresponding to processor id diff --git a/src/common/ringbuffer/ring_buffer_backend.c b/src/common/ringbuffer/ring_buffer_backend.c index 27c335d5..d2fadb77 100644 --- a/src/common/ringbuffer/ring_buffer_backend.c +++ b/src/common/ringbuffer/ring_buffer_backend.c @@ -21,6 +21,7 @@ #include "common/smp.h" #include "shm.h" #include "common/align.h" +#include "common/populate.h" /** * lib_ring_buffer_backend_allocate - allocate a channel buffer @@ -234,7 +235,7 @@ void channel_backend_reset(struct channel_backend *chanb) * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf, * priv, notifiers, config, cpumask and name. */ - chanb->start_tsc = config->cb.ring_buffer_clock_read(chan); + chanb->start_timestamp = config->cb.ring_buffer_clock_read(chan); } /** @@ -346,7 +347,8 @@ int channel_backend_init(struct channel_backend *chanb, struct shm_object *shmobj; shmobj = shm_object_table_alloc(handle->table, shmsize, - SHM_OBJECT_SHM, stream_fds[i], i); + SHM_OBJECT_SHM, stream_fds[i], i, + lttng_ust_map_populate_cpu_is_enabled(i)); if (!shmobj) goto end; align_shm(shmobj, __alignof__(struct lttng_ust_ring_buffer)); @@ -365,7 +367,8 @@ int channel_backend_init(struct channel_backend *chanb, struct lttng_ust_ring_buffer *buf; shmobj = shm_object_table_alloc(handle->table, shmsize, - SHM_OBJECT_SHM, stream_fds[0], -1); + SHM_OBJECT_SHM, stream_fds[0], -1, + lttng_ust_map_populate_is_enabled()); if (!shmobj) goto end; align_shm(shmobj, __alignof__(struct lttng_ust_ring_buffer)); @@ -379,7 +382,7 @@ int channel_backend_init(struct channel_backend *chanb, if (ret) goto free_bufs; } - chanb->start_tsc = config->cb.ring_buffer_clock_read(chan); + chanb->start_timestamp = config->cb.ring_buffer_clock_read(chan); return 0; diff --git a/src/common/ringbuffer/ring_buffer_frontend.c b/src/common/ringbuffer/ring_buffer_frontend.c index 5dcc0be7..ab1fc0ff 100644 --- a/src/common/ringbuffer/ring_buffer_frontend.c +++ b/src/common/ringbuffer/ring_buffer_frontend.c @@ -63,6 +63,7 @@ #include "shm.h" #include "rb-init.h" #include "common/compat/errno.h" /* For ENODATA */ +#include "common/populate.h" /* Print DBG() messages about events lost only every 1048576 hits */ #define DBG_PRINT_NR_LOST (1UL << 20) @@ -202,7 +203,7 @@ void lib_ring_buffer_reset(struct lttng_ust_ring_buffer *buf, } uatomic_set(&buf->consumed, 0); uatomic_set(&buf->record_disabled, 0); - v_set(config, &buf->last_tsc, 0); + v_set(config, &buf->last_timestamp, 0); lib_ring_buffer_backend_reset(&buf->backend, handle); /* Don't reset number of active readers */ v_set(config, &buf->records_lost_full, 0); @@ -340,7 +341,7 @@ int lib_ring_buffer_create(struct lttng_ust_ring_buffer *buf, struct commit_counters_hot *cc_hot; void *priv = channel_get_private_config(chan); size_t subbuf_header_size; - uint64_t tsc; + uint64_t timestamp; int ret; /* Test for cpu hotplug */ @@ -397,8 +398,8 @@ int lib_ring_buffer_create(struct lttng_ust_ring_buffer *buf, ret = -EPERM; goto free_chanbuf; } - tsc = config->cb.ring_buffer_clock_read(shmp_chan); - config->cb.buffer_begin(buf, tsc, 0, handle); + timestamp = config->cb.ring_buffer_clock_read(shmp_chan); + config->cb.buffer_begin(buf, timestamp, 0, handle); cc_hot = shmp_index(handle, buf->commit_hot, 0); if (!cc_hot) { ret = -EPERM; @@ -980,6 +981,7 @@ struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_ring_buffer_c struct shm_object *shmobj; unsigned int nr_streams; int64_t blocking_timeout_ms; + bool populate = lttng_ust_map_populate_is_enabled(); if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) nr_streams = get_possible_cpus_array_len(); @@ -1006,12 +1008,12 @@ struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_ring_buffer_c read_timer_interval)) return NULL; - handle = zmalloc(sizeof(struct lttng_ust_shm_handle)); + handle = zmalloc_populate(sizeof(struct lttng_ust_shm_handle), populate); if (!handle) return NULL; /* Allocate table for channel + per-cpu buffers */ - handle->table = shm_object_table_create(1 + get_possible_cpus_array_len()); + handle->table = shm_object_table_create(1 + get_possible_cpus_array_len(), populate); if (!handle->table) goto error_table_alloc; @@ -1026,7 +1028,7 @@ struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_ring_buffer_c /* Allocate normal memory for channel (not shared) */ shmobj = shm_object_table_alloc(handle->table, shmsize, SHM_OBJECT_MEM, - -1, -1); + -1, -1, populate); if (!shmobj) goto error_append; /* struct lttng_ust_ring_buffer_channel is at object 0, offset 0 (hardcoded) */ @@ -1089,13 +1091,14 @@ struct lttng_ust_shm_handle *channel_handle_create(void *data, { struct lttng_ust_shm_handle *handle; struct shm_object *object; + bool populate = lttng_ust_map_populate_is_enabled(); - handle = zmalloc(sizeof(struct lttng_ust_shm_handle)); + handle = zmalloc_populate(sizeof(struct lttng_ust_shm_handle), populate); if (!handle) return NULL; /* Allocate table for channel + per-cpu buffers */ - handle->table = shm_object_table_create(1 + get_possible_cpus_array_len()); + handle->table = shm_object_table_create(1 + get_possible_cpus_array_len(), populate); if (!handle->table) goto error_table_alloc; /* Add channel object */ @@ -1124,7 +1127,7 @@ int channel_handle_add_stream(struct lttng_ust_shm_handle *handle, /* Add stream object */ object = shm_object_table_append_shm(handle->table, shm_fd, wakeup_fd, stream_nr, - memory_map_size); + memory_map_size, lttng_ust_map_populate_cpu_is_enabled(stream_nr)); if (!object) return -EINVAL; return 0; @@ -1771,7 +1774,7 @@ void lib_ring_buffer_switch_old_start(struct lttng_ust_ring_buffer *buf, unsigned long commit_count; struct commit_counters_hot *cc_hot; - config->cb.buffer_begin(buf, ctx->priv->tsc, oldidx, handle); + config->cb.buffer_begin(buf, ctx->priv->timestamp, oldidx, handle); /* * Order all writes to buffer before the commit count update that will @@ -1829,7 +1832,7 @@ void lib_ring_buffer_switch_old_end(struct lttng_ust_ring_buffer *buf, * postponed until the commit counter is incremented for the * current space reservation. */ - *ts_end = ctx->priv->tsc; + *ts_end = ctx->priv->timestamp; /* * Order all writes to buffer and store to ts_end before the commit @@ -1867,7 +1870,7 @@ void lib_ring_buffer_switch_new_start(struct lttng_ust_ring_buffer *buf, unsigned long commit_count; struct commit_counters_hot *cc_hot; - config->cb.buffer_begin(buf, ctx->priv->tsc, beginidx, handle); + config->cb.buffer_begin(buf, ctx->priv->timestamp, beginidx, handle); /* * Order all writes to buffer before the commit count update that will @@ -1921,7 +1924,7 @@ void lib_ring_buffer_switch_new_end(struct lttng_ust_ring_buffer *buf, * postponed until the commit counter is incremented for the * current space reservation. */ - *ts_end = ctx->priv->tsc; + *ts_end = ctx->priv->timestamp; } /* @@ -1945,7 +1948,7 @@ int lib_ring_buffer_try_switch_slow(enum switch_mode mode, offsets->switch_old_start = 0; off = subbuf_offset(offsets->begin, chan); - ctx->priv->tsc = config->cb.ring_buffer_clock_read(chan); + ctx->priv->timestamp = config->cb.ring_buffer_clock_read(chan); /* * Ensure we flush the header of an empty subbuffer when doing the @@ -2081,12 +2084,12 @@ void lib_ring_buffer_switch_slow(struct lttng_ust_ring_buffer *buf, enum switch_ != offsets.old); /* - * Atomically update last_tsc. This update races against concurrent - * atomic updates, but the race will always cause supplementary full TSC - * records, never the opposite (missing a full TSC record when it would - * be needed). + * Atomically update last_timestamp. This update races against concurrent + * atomic updates, but the race will always cause supplementary full + * timestamp records, never the opposite (missing a full timestamp + * record when it would be needed). */ - save_last_tsc(config, buf, ctx.priv->tsc); + save_last_timestamp(config, buf, ctx.priv->timestamp); /* * Push the reader if necessary @@ -2155,12 +2158,12 @@ retry: offsets->switch_old_end = 0; offsets->pre_header_padding = 0; - ctx_private->tsc = config->cb.ring_buffer_clock_read(chan); - if ((int64_t) ctx_private->tsc == -EIO) + ctx_private->timestamp = config->cb.ring_buffer_clock_read(chan); + if ((int64_t) ctx_private->timestamp == -EIO) return -EIO; - if (last_tsc_overflow(config, buf, ctx_private->tsc)) - ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TSC; + if (last_timestamp_overflow(config, buf, ctx_private->timestamp)) + ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TIMESTAMP; if (caa_unlikely(subbuf_offset(offsets->begin, chan) == 0)) { offsets->switch_new_start = 1; /* For offsets->begin */ @@ -2368,12 +2371,12 @@ int lib_ring_buffer_reserve_slow(struct lttng_ust_ring_buffer_ctx *ctx, != offsets.old)); /* - * Atomically update last_tsc. This update races against concurrent - * atomic updates, but the race will always cause supplementary full TSC - * records, never the opposite (missing a full TSC record when it would - * be needed). + * Atomically update last_timestamp. This update races against concurrent + * atomic updates, but the race will always cause supplementary full + * timestamp records, never the opposite (missing a full timestamp + * record when it would be needed). */ - save_last_tsc(config, buf, ctx_private->tsc); + save_last_timestamp(config, buf, ctx_private->timestamp); /* * Push the reader if necessary diff --git a/src/common/ringbuffer/ringbuffer-config.h b/src/common/ringbuffer/ringbuffer-config.h index 61386174..83efea9e 100644 --- a/src/common/ringbuffer/ringbuffer-config.h +++ b/src/common/ringbuffer/ringbuffer-config.h @@ -46,10 +46,10 @@ struct lttng_ust_ring_buffer_client_cb { /* Slow path only, at subbuffer switch */ size_t (*subbuffer_header_size) (void); - void (*buffer_begin) (struct lttng_ust_ring_buffer *buf, uint64_t tsc, + void (*buffer_begin) (struct lttng_ust_ring_buffer *buf, uint64_t timestamp, unsigned int subbuf_idx, struct lttng_ust_shm_handle *handle); - void (*buffer_end) (struct lttng_ust_ring_buffer *buf, uint64_t tsc, + void (*buffer_end) (struct lttng_ust_ring_buffer *buf, uint64_t timestamp, unsigned int subbuf_idx, unsigned long data_size, struct lttng_ust_shm_handle *handle, const struct lttng_ust_ring_buffer_ctx *ctx); @@ -185,10 +185,10 @@ struct lttng_ust_ring_buffer_config { enum lttng_ust_ring_buffer_ipi_types ipi; enum lttng_ust_ring_buffer_wakeup_types wakeup; /* - * tsc_bits: timestamp bits saved at each record. + * timestamp_bits: timestamp bits saved at each record. * 0 and 64 disable the timestamp compression scheme. */ - unsigned int tsc_bits; + unsigned int timestamp_bits; struct lttng_ust_ring_buffer_client_cb cb; /* * client_type is used by the consumer process (which is in a @@ -204,18 +204,18 @@ struct lttng_ust_ring_buffer_config { /* * Reservation flags. * - * RING_BUFFER_RFLAG_FULL_TSC + * RING_BUFFER_RFLAG_FULL_TIMESTAMP * * This flag is passed to record_header_size() and to the primitive used to * write the record header. It indicates that the full 64-bit time value is * needed in the record header. If this flag is not set, the record header needs - * only to contain "tsc_bits" bit of time value. + * only to contain "timestamp_bits" bit of time value. * * Reservation flags can be added by the client, starting from * "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from * record_header_size() to lib_ring_buffer_write_record_header(). */ -#define RING_BUFFER_RFLAG_FULL_TSC (1U << 0) +#define RING_BUFFER_RFLAG_FULL_TIMESTAMP (1U << 0) #define RING_BUFFER_RFLAG_END (1U << 1) /* diff --git a/src/common/ringbuffer/shm.c b/src/common/ringbuffer/shm.c index a1ef3d69..347f9af0 100644 --- a/src/common/ringbuffer/shm.c +++ b/src/common/ringbuffer/shm.c @@ -69,12 +69,12 @@ error: return ret; } -struct shm_object_table *shm_object_table_create(size_t max_nb_obj) +struct shm_object_table *shm_object_table_create(size_t max_nb_obj, bool populate) { struct shm_object_table *table; - table = zmalloc(sizeof(struct shm_object_table) + - max_nb_obj * sizeof(table->objects[0])); + table = zmalloc_populate(sizeof(struct shm_object_table) + + max_nb_obj * sizeof(table->objects[0]), populate); if (!table) return NULL; table->size = max_nb_obj; @@ -84,9 +84,11 @@ struct shm_object_table *shm_object_table_create(size_t max_nb_obj) static struct shm_object *_shm_object_table_alloc_shm(struct shm_object_table *table, size_t memory_map_size, - int stream_fd) + int stream_fd, + bool populate) { int shmfd, waitfd[2], ret, i; + int flags = MAP_SHARED; struct shm_object *obj; char *memory_map; @@ -145,9 +147,11 @@ struct shm_object *_shm_object_table_alloc_shm(struct shm_object_table *table, obj->shm_fd_ownership = 0; obj->shm_fd = shmfd; + if (populate) + flags |= LTTNG_MAP_POPULATE; /* memory_map: mmap */ memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE, - MAP_SHARED | LTTNG_MAP_POPULATE, shmfd, 0); + flags, shmfd, 0); if (memory_map == MAP_FAILED) { PERROR("mmap"); goto error_mmap; @@ -178,7 +182,7 @@ error_pipe: static struct shm_object *_shm_object_table_alloc_mem(struct shm_object_table *table, - size_t memory_map_size) + size_t memory_map_size, bool populate) { struct shm_object *obj; void *memory_map; @@ -188,7 +192,7 @@ struct shm_object *_shm_object_table_alloc_mem(struct shm_object_table *table, return NULL; obj = &table->objects[table->allocated_len]; - memory_map = zmalloc(memory_map_size); + memory_map = zmalloc_populate(memory_map_size, populate); if (!memory_map) goto alloc_error; @@ -255,13 +259,15 @@ struct shm_object *shm_object_table_alloc(struct shm_object_table *table, size_t memory_map_size, enum shm_object_type type, int stream_fd, - int cpu) + int cpu, + bool populate) #else struct shm_object *shm_object_table_alloc(struct shm_object_table *table, size_t memory_map_size, enum shm_object_type type, int stream_fd, - int cpu __attribute__((unused))) + int cpu __attribute__((unused)), + bool populate) #endif { struct shm_object *shm_object; @@ -284,10 +290,11 @@ struct shm_object *shm_object_table_alloc(struct shm_object_table *table, switch (type) { case SHM_OBJECT_SHM: shm_object = _shm_object_table_alloc_shm(table, memory_map_size, - stream_fd); + stream_fd, populate); break; case SHM_OBJECT_MEM: - shm_object = _shm_object_table_alloc_mem(table, memory_map_size); + shm_object = _shm_object_table_alloc_mem(table, memory_map_size, + populate); break; default: assert(0); @@ -301,8 +308,9 @@ struct shm_object *shm_object_table_alloc(struct shm_object_table *table, struct shm_object *shm_object_table_append_shm(struct shm_object_table *table, int shm_fd, int wakeup_fd, uint32_t stream_nr, - size_t memory_map_size) + size_t memory_map_size, bool populate) { + int flags = MAP_SHARED; struct shm_object *obj; char *memory_map; int ret; @@ -328,9 +336,11 @@ struct shm_object *shm_object_table_append_shm(struct shm_object_table *table, goto error_fcntl; } + if (populate) + flags |= LTTNG_MAP_POPULATE; /* memory_map: mmap */ memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE, - MAP_SHARED | LTTNG_MAP_POPULATE, shm_fd, 0); + flags, shm_fd, 0); if (memory_map == MAP_FAILED) { PERROR("mmap"); goto error_mmap; diff --git a/src/common/ringbuffer/shm.h b/src/common/ringbuffer/shm.h index 6e4f7f7b..944410d8 100644 --- a/src/common/ringbuffer/shm.h +++ b/src/common/ringbuffer/shm.h @@ -71,19 +71,19 @@ void _set_shmp(struct shm_ref *ref, struct shm_ref src) #define set_shmp(ref, src) _set_shmp(&(ref)._ref, src) -struct shm_object_table *shm_object_table_create(size_t max_nb_obj) +struct shm_object_table *shm_object_table_create(size_t max_nb_obj, bool populate) __attribute__((visibility("hidden"))); struct shm_object *shm_object_table_alloc(struct shm_object_table *table, size_t memory_map_size, enum shm_object_type type, const int stream_fd, - int cpu) + int cpu, bool populate) __attribute__((visibility("hidden"))); struct shm_object *shm_object_table_append_shm(struct shm_object_table *table, int shm_fd, int wakeup_fd, uint32_t stream_nr, - size_t memory_map_size) + size_t memory_map_size, bool populate) __attribute__((visibility("hidden"))); /* mem ownership is passed to shm_object_table_append_mem(). */ diff --git a/src/common/smp.c b/src/common/smp.c index 36967ccc..10b9954a 100644 --- a/src/common/smp.c +++ b/src/common/smp.c @@ -167,7 +167,7 @@ int get_cpu_mask_from_sysfs(char *buf, size_t max_bytes, const char *path) total_bytes_read += bytes_read; assert(total_bytes_read <= max_bytes); - } while (max_bytes > total_bytes_read && bytes_read > 0); + } while (max_bytes > total_bytes_read && bytes_read != 0); /* * Make sure the mask read is a null terminated string. diff --git a/src/lib/lttng-ust-fd/lttng-ust-fd.c b/src/lib/lttng-ust-fd/lttng-ust-fd.c index 0360b6f2..01decd1e 100644 --- a/src/lib/lttng-ust-fd/lttng-ust-fd.c +++ b/src/lib/lttng-ust-fd/lttng-ust-fd.c @@ -148,6 +148,9 @@ int fclose(FILE *stream) __lttng_ust_fd_plibc_fclose); } +/* Old libc headers don't contain a close_range() declaration. */ +int close_range(unsigned int first, unsigned int last, int flags); + /* * Override the libc close_range() symbol with our own, allowing * applications to close arbitrary file descriptors. If the fd is owned diff --git a/tests/unit/libringbuffer/shm.c b/tests/unit/libringbuffer/shm.c index 13c74c3d..8b78da25 100644 --- a/tests/unit/libringbuffer/shm.c +++ b/tests/unit/libringbuffer/shm.c @@ -34,12 +34,12 @@ int main(void) ok(shmfd > 0, "Open a POSIX shm fd"); /* Create a dummy shm object table to test the allocation function */ - table = shm_object_table_create(1); + table = shm_object_table_create(1, false); ok(table, "Create a shm object table"); assert(table); /* This function sets the initial size of the shm with ftruncate and zeros it */ - shmobj = shm_object_table_alloc(table, shmsize, SHM_OBJECT_SHM, shmfd, -1); + shmobj = shm_object_table_alloc(table, shmsize, SHM_OBJECT_SHM, shmfd, -1, false); ok(shmobj, "Allocate the shm object table"); assert(shmobj);