#include "config.h"
#include "backend_types.h"
#include "frontend_types.h"
+#include "shm.h"
/* Buffer offset macros */
unsigned long idx)
{
if (config->oops == RING_BUFFER_OOPS_CONSISTENCY)
- v_set(config, &buf->commit_hot[idx].seq, commit_count);
+ v_set(config, &shmp(buf->commit_hot)[idx].seq, commit_count);
}
static inline
consumed_old = uatomic_read(&buf->consumed);
consumed_idx = subbuf_index(consumed_old, chan);
- commit_count = v_read(config, &buf->commit_cold[consumed_idx].cc_sb);
+ commit_count = v_read(config, &shmp(buf->commit_cold)[consumed_idx].cc_sb);
/*
* No memory barrier here, since we are only interested
* in a statistically correct polling result. The next poll will
do {
offset = v_read(config, &buf->offset);
idx = subbuf_index(offset, chan);
- commit_count = v_read(config, &buf->commit_hot[idx].cc);
+ commit_count = v_read(config, &shmp(buf->commit_hot)[idx].cc);
} while (offset != v_read(config, &buf->offset));
return ((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
* The subbuffer size is least 2 bytes (minimum size: 1 page).
* This guarantees that old_commit_count + 1 != commit_count.
*/
- if (likely(v_cmpxchg(config, &buf->commit_cold[idx].cc_sb,
+ if (likely(v_cmpxchg(config, &shmp(buf->commit_cold)[idx].cc_sb,
old_commit_count, old_commit_count + 1)
== old_commit_count)) {
/*
*/
cmm_smp_mb();
/* End of exclusive subbuffer access */
- v_set(config, &buf->commit_cold[idx].cc_sb,
+ v_set(config, &shmp(buf->commit_cold)[idx].cc_sb,
commit_count);
lib_ring_buffer_vmcore_check_deliver(config, buf,
commit_count, idx);
if (unlikely(subbuf_offset(offset - commit_count, chan)))
return;
- commit_seq_old = v_read(config, &buf->commit_hot[idx].seq);
+ commit_seq_old = v_read(config, &shmp(buf->commit_hot)[idx].seq);
while ((long) (commit_seq_old - commit_count) < 0)
- commit_seq_old = v_cmpxchg(config, &buf->commit_hot[idx].seq,
+ commit_seq_old = v_cmpxchg(config, &shmp(buf->commit_hot)[idx].seq,
commit_seq_old, commit_count);
}
extern int lib_ring_buffer_create(struct lib_ring_buffer *buf,
- struct channel_backend *chanb, int cpu);
+ struct channel_backend *chanb, int cpu,
+ struct shm_header *shm_header);
extern void lib_ring_buffer_free(struct lib_ring_buffer *buf);
/* Keep track of trap nesting inside ring buffer code */