X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=src%2Fcommon%2Fringbuffer%2Fring_buffer_frontend.c;h=f3f82e828d4ee598775e1b36ae7d4676224978bb;hb=refs%2Fheads%2Fmaster;hp=5dcc0be7263940a8e54973474b89d59f648a7a0d;hpb=4b01076fea0f635af6af6762a8edce1be03e5d39;p=lttng-ust.git diff --git a/src/common/ringbuffer/ring_buffer_frontend.c b/src/common/ringbuffer/ring_buffer_frontend.c index 5dcc0be7..ab1fc0ff 100644 --- a/src/common/ringbuffer/ring_buffer_frontend.c +++ b/src/common/ringbuffer/ring_buffer_frontend.c @@ -63,6 +63,7 @@ #include "shm.h" #include "rb-init.h" #include "common/compat/errno.h" /* For ENODATA */ +#include "common/populate.h" /* Print DBG() messages about events lost only every 1048576 hits */ #define DBG_PRINT_NR_LOST (1UL << 20) @@ -202,7 +203,7 @@ void lib_ring_buffer_reset(struct lttng_ust_ring_buffer *buf, } uatomic_set(&buf->consumed, 0); uatomic_set(&buf->record_disabled, 0); - v_set(config, &buf->last_tsc, 0); + v_set(config, &buf->last_timestamp, 0); lib_ring_buffer_backend_reset(&buf->backend, handle); /* Don't reset number of active readers */ v_set(config, &buf->records_lost_full, 0); @@ -340,7 +341,7 @@ int lib_ring_buffer_create(struct lttng_ust_ring_buffer *buf, struct commit_counters_hot *cc_hot; void *priv = channel_get_private_config(chan); size_t subbuf_header_size; - uint64_t tsc; + uint64_t timestamp; int ret; /* Test for cpu hotplug */ @@ -397,8 +398,8 @@ int lib_ring_buffer_create(struct lttng_ust_ring_buffer *buf, ret = -EPERM; goto free_chanbuf; } - tsc = config->cb.ring_buffer_clock_read(shmp_chan); - config->cb.buffer_begin(buf, tsc, 0, handle); + timestamp = config->cb.ring_buffer_clock_read(shmp_chan); + config->cb.buffer_begin(buf, timestamp, 0, handle); cc_hot = shmp_index(handle, buf->commit_hot, 0); if (!cc_hot) { ret = -EPERM; @@ -980,6 +981,7 @@ struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_ring_buffer_c struct shm_object *shmobj; unsigned int nr_streams; int64_t blocking_timeout_ms; + bool populate = lttng_ust_map_populate_is_enabled(); if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) nr_streams = get_possible_cpus_array_len(); @@ -1006,12 +1008,12 @@ struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_ring_buffer_c read_timer_interval)) return NULL; - handle = zmalloc(sizeof(struct lttng_ust_shm_handle)); + handle = zmalloc_populate(sizeof(struct lttng_ust_shm_handle), populate); if (!handle) return NULL; /* Allocate table for channel + per-cpu buffers */ - handle->table = shm_object_table_create(1 + get_possible_cpus_array_len()); + handle->table = shm_object_table_create(1 + get_possible_cpus_array_len(), populate); if (!handle->table) goto error_table_alloc; @@ -1026,7 +1028,7 @@ struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_ring_buffer_c /* Allocate normal memory for channel (not shared) */ shmobj = shm_object_table_alloc(handle->table, shmsize, SHM_OBJECT_MEM, - -1, -1); + -1, -1, populate); if (!shmobj) goto error_append; /* struct lttng_ust_ring_buffer_channel is at object 0, offset 0 (hardcoded) */ @@ -1089,13 +1091,14 @@ struct lttng_ust_shm_handle *channel_handle_create(void *data, { struct lttng_ust_shm_handle *handle; struct shm_object *object; + bool populate = lttng_ust_map_populate_is_enabled(); - handle = zmalloc(sizeof(struct lttng_ust_shm_handle)); + handle = zmalloc_populate(sizeof(struct lttng_ust_shm_handle), populate); if (!handle) return NULL; /* Allocate table for channel + per-cpu buffers */ - handle->table = shm_object_table_create(1 + get_possible_cpus_array_len()); + handle->table = shm_object_table_create(1 + get_possible_cpus_array_len(), populate); if (!handle->table) goto error_table_alloc; /* Add channel object */ @@ -1124,7 +1127,7 @@ int channel_handle_add_stream(struct lttng_ust_shm_handle *handle, /* Add stream object */ object = shm_object_table_append_shm(handle->table, shm_fd, wakeup_fd, stream_nr, - memory_map_size); + memory_map_size, lttng_ust_map_populate_cpu_is_enabled(stream_nr)); if (!object) return -EINVAL; return 0; @@ -1771,7 +1774,7 @@ void lib_ring_buffer_switch_old_start(struct lttng_ust_ring_buffer *buf, unsigned long commit_count; struct commit_counters_hot *cc_hot; - config->cb.buffer_begin(buf, ctx->priv->tsc, oldidx, handle); + config->cb.buffer_begin(buf, ctx->priv->timestamp, oldidx, handle); /* * Order all writes to buffer before the commit count update that will @@ -1829,7 +1832,7 @@ void lib_ring_buffer_switch_old_end(struct lttng_ust_ring_buffer *buf, * postponed until the commit counter is incremented for the * current space reservation. */ - *ts_end = ctx->priv->tsc; + *ts_end = ctx->priv->timestamp; /* * Order all writes to buffer and store to ts_end before the commit @@ -1867,7 +1870,7 @@ void lib_ring_buffer_switch_new_start(struct lttng_ust_ring_buffer *buf, unsigned long commit_count; struct commit_counters_hot *cc_hot; - config->cb.buffer_begin(buf, ctx->priv->tsc, beginidx, handle); + config->cb.buffer_begin(buf, ctx->priv->timestamp, beginidx, handle); /* * Order all writes to buffer before the commit count update that will @@ -1921,7 +1924,7 @@ void lib_ring_buffer_switch_new_end(struct lttng_ust_ring_buffer *buf, * postponed until the commit counter is incremented for the * current space reservation. */ - *ts_end = ctx->priv->tsc; + *ts_end = ctx->priv->timestamp; } /* @@ -1945,7 +1948,7 @@ int lib_ring_buffer_try_switch_slow(enum switch_mode mode, offsets->switch_old_start = 0; off = subbuf_offset(offsets->begin, chan); - ctx->priv->tsc = config->cb.ring_buffer_clock_read(chan); + ctx->priv->timestamp = config->cb.ring_buffer_clock_read(chan); /* * Ensure we flush the header of an empty subbuffer when doing the @@ -2081,12 +2084,12 @@ void lib_ring_buffer_switch_slow(struct lttng_ust_ring_buffer *buf, enum switch_ != offsets.old); /* - * Atomically update last_tsc. This update races against concurrent - * atomic updates, but the race will always cause supplementary full TSC - * records, never the opposite (missing a full TSC record when it would - * be needed). + * Atomically update last_timestamp. This update races against concurrent + * atomic updates, but the race will always cause supplementary full + * timestamp records, never the opposite (missing a full timestamp + * record when it would be needed). */ - save_last_tsc(config, buf, ctx.priv->tsc); + save_last_timestamp(config, buf, ctx.priv->timestamp); /* * Push the reader if necessary @@ -2155,12 +2158,12 @@ retry: offsets->switch_old_end = 0; offsets->pre_header_padding = 0; - ctx_private->tsc = config->cb.ring_buffer_clock_read(chan); - if ((int64_t) ctx_private->tsc == -EIO) + ctx_private->timestamp = config->cb.ring_buffer_clock_read(chan); + if ((int64_t) ctx_private->timestamp == -EIO) return -EIO; - if (last_tsc_overflow(config, buf, ctx_private->tsc)) - ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TSC; + if (last_timestamp_overflow(config, buf, ctx_private->timestamp)) + ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TIMESTAMP; if (caa_unlikely(subbuf_offset(offsets->begin, chan) == 0)) { offsets->switch_new_start = 1; /* For offsets->begin */ @@ -2368,12 +2371,12 @@ int lib_ring_buffer_reserve_slow(struct lttng_ust_ring_buffer_ctx *ctx, != offsets.old)); /* - * Atomically update last_tsc. This update races against concurrent - * atomic updates, but the race will always cause supplementary full TSC - * records, never the opposite (missing a full TSC record when it would - * be needed). + * Atomically update last_timestamp. This update races against concurrent + * atomic updates, but the race will always cause supplementary full + * timestamp records, never the opposite (missing a full timestamp + * record when it would be needed). */ - save_last_tsc(config, buf, ctx_private->tsc); + save_last_timestamp(config, buf, ctx_private->timestamp); /* * Push the reader if necessary