/* Allocate backend pages array elements */
for (i = 0; i < num_subbuf_alloc; i++) {
align_shm(shmobj, __alignof__(struct lib_ring_buffer_backend_pages));
- set_shmp(shmp(handle, bufb->array)[i].shmp,
+ set_shmp(shmp_index(handle, bufb->array, i)->shmp,
zalloc_shm(shmobj,
sizeof(struct lib_ring_buffer_backend_pages)));
if (!shmp(handle, shmp_index(handle, bufb->array, i)->shmp))
struct channel *chan = caa_container_of(chanb, struct channel, backend);
unsigned int i;
int ret;
- size_t shmsize = 0, bufshmsize = 0, num_subbuf_alloc;
+ size_t shmsize = 0, num_subbuf_alloc;
if (!name)
return -EPERM;
num_subbuf_alloc = num_subbuf + 1;
shmsize += offset_align(shmsize, __alignof__(struct lib_ring_buffer_backend_pages_shmp));
shmsize += sizeof(struct lib_ring_buffer_backend_pages_shmp) * num_subbuf_alloc;
- shmsize += offset_align(bufshmsize, PAGE_SIZE);
+ shmsize += offset_align(shmsize, PAGE_SIZE);
shmsize += subbuf_size * num_subbuf_alloc;
- shmsize += offset_align(bufshmsize, __alignof__(struct lib_ring_buffer_backend_pages));
+ shmsize += offset_align(shmsize, __alignof__(struct lib_ring_buffer_backend_pages));
shmsize += sizeof(struct lib_ring_buffer_backend_pages) * num_subbuf_alloc;
- shmsize += offset_align(bufshmsize, __alignof__(struct lib_ring_buffer_backend_subbuffer));
+ shmsize += offset_align(shmsize, __alignof__(struct lib_ring_buffer_backend_subbuffer));
shmsize += sizeof(struct lib_ring_buffer_backend_subbuffer) * num_subbuf;
/* Per-cpu buffer size: control (after backend) */
shmsize += offset_align(shmsize, __alignof__(struct commit_counters_hot));
struct shm_object *shmobj;
shmobj = shm_object_table_append(handle->table, shmsize);
+ if (!shmobj)
+ goto end;
align_shm(shmobj, __alignof__(struct lib_ring_buffer));
set_shmp(chanb->buf[i].shmp, zalloc_shm(shmobj, sizeof(struct lib_ring_buffer)));
buf = shmp(handle, chanb->buf[i].shmp);
struct lib_ring_buffer *buf;
shmobj = shm_object_table_append(handle->table, shmsize);
+ if (!shmobj)
+ goto end;
align_shm(shmobj, __alignof__(struct lib_ring_buffer));
set_shmp(chanb->buf[0].shmp, zalloc_shm(shmobj, sizeof(struct lib_ring_buffer)));
buf = shmp(handle, chanb->buf[0].shmp);
CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
- memcpy(dest, shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & ~(chanb->subbuf_size - 1)), len);
+ memcpy(dest, shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & (chanb->subbuf_size - 1)), len);
return orig_len;
}
CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
- str = shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & ~(chanb->subbuf_size - 1));
+ str = shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & (chanb->subbuf_size - 1));
string_len = strnlen(str, len);
if (dest && len) {
memcpy(dest, str, string_len);
rpages = shmp_index(handle, bufb->array, sb_bindex);
CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
- return shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & ~(chanb->subbuf_size - 1));
+ return shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & (chanb->subbuf_size - 1));
}
/**
struct channel_backend *chanb = &shmp(handle, bufb->chan)->backend;
const struct lib_ring_buffer_config *config = chanb->config;
unsigned long sb_bindex, id;
+ void *ret;
offset &= chanb->buf_size - 1;
sbidx = offset >> chanb->subbuf_size_order;
rpages = shmp_index(handle, bufb->array, sb_bindex);
CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
- return shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & ~(chanb->subbuf_size - 1));
+ return shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & (chanb->subbuf_size - 1));
}