/* Allocate backend pages array elements */
for (i = 0; i < num_subbuf_alloc; i++) {
align_shm(shmobj, __alignof__(struct lib_ring_buffer_backend_pages));
- set_shmp(shmp(handle, bufb->array)[i].shmp,
+ set_shmp(shmp_index(handle, bufb->array, i)->shmp,
zalloc_shm(shmobj,
sizeof(struct lib_ring_buffer_backend_pages)));
- if (!shmp(handle, shmp(handle, bufb->array)[i].shmp))
+ if (!shmp(handle, shmp_index(handle, bufb->array, i)->shmp))
goto free_array;
}
goto free_array;
for (i = 0; i < num_subbuf; i++)
- shmp(handle, bufb->buf_wsb)[i].id = subbuffer_id(config, 0, 1, i);
+ shmp_index(handle, bufb->buf_wsb, i)->id = subbuffer_id(config, 0, 1, i);
/* Assign read-side subbuffer table */
if (extra_reader_sb)
ref.offset = bufb->memory_map._ref.offset;
ref.offset += i * subbuf_size;
- set_shmp(shmp(handle, shmp(handle, bufb->array)[i].shmp)->p,
+ set_shmp(shmp(handle, shmp_index(handle, bufb->array, i)->shmp)->p,
ref);
if (config->output == RING_BUFFER_MMAP) {
- shmp(handle, shmp(handle, bufb->array)[i].shmp)->mmap_offset = mmap_offset;
+ shmp(handle, shmp_index(handle, bufb->array, i)->shmp)->mmap_offset = mmap_offset;
mmap_offset += subbuf_size;
}
}
num_subbuf_alloc++;
for (i = 0; i < chanb->num_subbuf; i++)
- shmp(handle, bufb->buf_wsb)[i].id = subbuffer_id(config, 0, 1, i);
+ shmp_index(handle, bufb->buf_wsb, i)->id = subbuffer_id(config, 0, 1, i);
if (chanb->extra_reader_sb)
bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
num_subbuf_alloc - 1);
for (i = 0; i < num_subbuf_alloc; i++) {
/* Don't reset mmap_offset */
- v_set(config, &shmp(handle, shmp(handle, bufb->array)[i].shmp)->records_commit, 0);
- v_set(config, &shmp(handle, shmp(handle, bufb->array)[i].shmp)->records_unread, 0);
- shmp(handle, shmp(handle, bufb->array)[i].shmp)->data_size = 0;
+ v_set(config, &shmp(handle, shmp_index(handle, bufb->array, i)->shmp)->records_commit, 0);
+ v_set(config, &shmp(handle, shmp_index(handle, bufb->array, i)->shmp)->records_unread, 0);
+ shmp(handle, shmp_index(handle, bufb->array, i)->shmp)->data_size = 0;
/* Don't reset backend page and virt addresses */
}
/* Don't reset num_pages_per_subbuf, cpu, allocated */
struct channel *chan = caa_container_of(chanb, struct channel, backend);
unsigned int i;
int ret;
- size_t shmsize = 0, bufshmsize = 0, num_subbuf_alloc;
+ size_t shmsize = 0, num_subbuf_alloc;
if (!name)
return -EPERM;
num_subbuf_alloc = num_subbuf + 1;
shmsize += offset_align(shmsize, __alignof__(struct lib_ring_buffer_backend_pages_shmp));
shmsize += sizeof(struct lib_ring_buffer_backend_pages_shmp) * num_subbuf_alloc;
- shmsize += offset_align(bufshmsize, PAGE_SIZE);
+ shmsize += offset_align(shmsize, PAGE_SIZE);
shmsize += subbuf_size * num_subbuf_alloc;
- shmsize += offset_align(bufshmsize, __alignof__(struct lib_ring_buffer_backend_pages));
+ shmsize += offset_align(shmsize, __alignof__(struct lib_ring_buffer_backend_pages));
shmsize += sizeof(struct lib_ring_buffer_backend_pages) * num_subbuf_alloc;
- shmsize += offset_align(bufshmsize, __alignof__(struct lib_ring_buffer_backend_subbuffer));
+ shmsize += offset_align(shmsize, __alignof__(struct lib_ring_buffer_backend_subbuffer));
shmsize += sizeof(struct lib_ring_buffer_backend_subbuffer) * num_subbuf;
/* Per-cpu buffer size: control (after backend) */
shmsize += offset_align(shmsize, __alignof__(struct commit_counters_hot));
struct shm_object *shmobj;
shmobj = shm_object_table_append(handle->table, shmsize);
+ if (!shmobj)
+ goto end;
align_shm(shmobj, __alignof__(struct lib_ring_buffer));
set_shmp(chanb->buf[i].shmp, zalloc_shm(shmobj, sizeof(struct lib_ring_buffer)));
buf = shmp(handle, chanb->buf[i].shmp);
struct lib_ring_buffer *buf;
shmobj = shm_object_table_append(handle->table, shmsize);
+ if (!shmobj)
+ goto end;
align_shm(shmobj, __alignof__(struct lib_ring_buffer));
set_shmp(chanb->buf[0].shmp, zalloc_shm(shmobj, sizeof(struct lib_ring_buffer)));
buf = shmp(handle, chanb->buf[0].shmp);
return 0;
id = bufb->buf_rsb.id;
sb_bindex = subbuffer_id_get_index(config, id);
- rpages = &shmp(handle, bufb->array)[sb_bindex];
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
/*
* Underlying layer should never ask for reads across
* subbuffers.
CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
- memcpy(dest, shmp(handle, shmp(handle, rpages->shmp)->p) + (offset & ~(chanb->subbuf_size - 1)), len);
+ memcpy(dest, shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & (chanb->subbuf_size - 1)), len);
return orig_len;
}
orig_offset = offset;
id = bufb->buf_rsb.id;
sb_bindex = subbuffer_id_get_index(config, id);
- rpages = &shmp(handle, bufb->array)[sb_bindex];
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
/*
* Underlying layer should never ask for reads across
* subbuffers.
CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
- str = (char *)shmp(handle, shmp(handle, rpages->shmp)->p) + (offset & ~(chanb->subbuf_size - 1));
+ str = shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & (chanb->subbuf_size - 1));
string_len = strnlen(str, len);
if (dest && len) {
memcpy(dest, str, string_len);
offset &= chanb->buf_size - 1;
id = bufb->buf_rsb.id;
sb_bindex = subbuffer_id_get_index(config, id);
- rpages = &shmp(handle, bufb->array)[sb_bindex];
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
- return shmp(handle, shmp(handle, rpages->shmp)->p) + (offset & ~(chanb->subbuf_size - 1));
+ return shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & (chanb->subbuf_size - 1));
}
/**
struct channel_backend *chanb = &shmp(handle, bufb->chan)->backend;
const struct lib_ring_buffer_config *config = chanb->config;
unsigned long sb_bindex, id;
+ void *ret;
offset &= chanb->buf_size - 1;
sbidx = offset >> chanb->subbuf_size_order;
- id = shmp(handle, bufb->buf_wsb)[sbidx].id;
+ id = shmp_index(handle, bufb->buf_wsb, sbidx)->id;
sb_bindex = subbuffer_id_get_index(config, id);
- rpages = &shmp(handle, bufb->array)[sb_bindex];
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
- return shmp(handle, shmp(handle, rpages->shmp)->p) + (offset & ~(chanb->subbuf_size - 1));
+ return shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & (chanb->subbuf_size - 1));
}