Import lib ring buffer into LTTng modules
[lttng-modules.git] / lib / ringbuffer / frontend_types.h
1 #ifndef _LINUX_RING_BUFFER_FRONTEND_TYPES_H
2 #define _LINUX_RING_BUFFER_FRONTEND_TYPES_H
3
4 /*
5 * linux/ringbuffer/frontend_types.h
6 *
7 * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Ring Buffer Library Synchronization Header (types).
10 *
11 * Author:
12 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
13 *
14 * See ring_buffer_frontend.c for more information on wait-free algorithms.
15 *
16 * Dual LGPL v2.1/GPL v2 license.
17 */
18
19 #include "../../wrapper/ringbuffer/config.h"
20 #include "../../wrapper/ringbuffer/backend_types.h"
21 #include "../../wrapper/prio_heap.h" /* For per-CPU read-side iterator */
22
23 /*
24 * A switch is done during tracing or as a final flush after tracing (so it
25 * won't write in the new sub-buffer).
26 */
27 enum switch_mode { SWITCH_ACTIVE, SWITCH_FLUSH };
28
29 /* channel-level read-side iterator */
30 struct channel_iter {
31 /* Prio heap of buffers. Lowest timestamps at the top. */
32 struct ptr_heap heap; /* Heap of struct lib_ring_buffer ptrs */
33 struct list_head empty_head; /* Empty buffers linked-list head */
34 int read_open; /* Opened for reading ? */
35 u64 last_qs; /* Last quiescent state timestamp */
36 u64 last_timestamp; /* Last timestamp (for WARN_ON) */
37 int last_cpu; /* Last timestamp cpu */
38 /*
39 * read() file operation state.
40 */
41 unsigned long len_left;
42 };
43
44 /* channel: collection of per-cpu ring buffers. */
45 struct channel {
46 atomic_t record_disabled;
47 unsigned long commit_count_mask; /*
48 * Commit count mask, removing
49 * the MSBs corresponding to
50 * bits used to represent the
51 * subbuffer index.
52 */
53
54 struct channel_backend backend; /* Associated backend */
55
56 unsigned long switch_timer_interval; /* Buffer flush (jiffies) */
57 unsigned long read_timer_interval; /* Reader wakeup (jiffies) */
58 struct notifier_block cpu_hp_notifier; /* CPU hotplug notifier */
59 struct notifier_block tick_nohz_notifier; /* CPU nohz notifier */
60 struct notifier_block hp_iter_notifier; /* hotplug iterator notifier */
61 int cpu_hp_enable:1; /* Enable CPU hotplug notif. */
62 int hp_iter_enable:1; /* Enable hp iter notif. */
63 wait_queue_head_t read_wait; /* reader wait queue */
64 struct channel_iter iter; /* Channel read-side iterator */
65 atomic_long_t read_ref; /* Reader reference count */
66 };
67
68 /* Per-subbuffer commit counters used on the hot path */
69 struct commit_counters_hot {
70 union v_atomic cc; /* Commit counter */
71 union v_atomic seq; /* Consecutive commits */
72 };
73
74 /* Per-subbuffer commit counters used only on cold paths */
75 struct commit_counters_cold {
76 union v_atomic cc_sb; /* Incremented _once_ at sb switch */
77 };
78
79 /* Per-buffer read iterator */
80 struct lib_ring_buffer_iter {
81 u64 timestamp; /* Current record timestamp */
82 size_t header_len; /* Current record header length */
83 size_t payload_len; /* Current record payload length */
84
85 struct list_head empty_node; /* Linked list of empty buffers */
86 unsigned long consumed, read_offset, data_size;
87 enum {
88 ITER_GET_SUBBUF = 0,
89 ITER_TEST_RECORD,
90 ITER_NEXT_RECORD,
91 ITER_PUT_SUBBUF,
92 } state;
93 int allocated:1;
94 int read_open:1; /* Opened for reading ? */
95 };
96
97 /* ring buffer state */
98 struct lib_ring_buffer {
99 /* First 32 bytes cache-hot cacheline */
100 union v_atomic offset; /* Current offset in the buffer */
101 struct commit_counters_hot *commit_hot;
102 /* Commit count per sub-buffer */
103 atomic_long_t consumed; /*
104 * Current offset in the buffer
105 * standard atomic access (shared)
106 */
107 atomic_t record_disabled;
108 /* End of first 32 bytes cacheline */
109 union v_atomic last_tsc; /*
110 * Last timestamp written in the buffer.
111 */
112
113 struct lib_ring_buffer_backend backend; /* Associated backend */
114
115 struct commit_counters_cold *commit_cold;
116 /* Commit count per sub-buffer */
117 atomic_long_t active_readers; /*
118 * Active readers count
119 * standard atomic access (shared)
120 */
121 /* Dropped records */
122 union v_atomic records_lost_full; /* Buffer full */
123 union v_atomic records_lost_wrap; /* Nested wrap-around */
124 union v_atomic records_lost_big; /* Events too big */
125 union v_atomic records_count; /* Number of records written */
126 union v_atomic records_overrun; /* Number of overwritten records */
127 wait_queue_head_t read_wait; /* reader buffer-level wait queue */
128 int finalized; /* buffer has been finalized */
129 struct timer_list switch_timer; /* timer for periodical switch */
130 struct timer_list read_timer; /* timer for read poll */
131 raw_spinlock_t raw_tick_nohz_spinlock; /* nohz entry lock/trylock */
132 struct lib_ring_buffer_iter iter; /* read-side iterator */
133 unsigned long get_subbuf_consumed; /* Read-side consumed */
134 unsigned long prod_snapshot; /* Producer count snapshot */
135 unsigned long cons_snapshot; /* Consumer count snapshot */
136 int get_subbuf:1; /* Sub-buffer being held by reader */
137 int switch_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */
138 int read_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */
139 };
140
141 /*
142 * Issue warnings and disable channels upon internal error.
143 * Can receive struct lib_ring_buffer or struct lib_ring_buffer_backend
144 * parameters.
145 */
146 #define CHAN_WARN_ON(c, cond) \
147 ({ \
148 struct channel *__chan; \
149 int _____ret = unlikely(cond); \
150 if (_____ret) { \
151 if (__same_type(*(c), struct channel_backend)) \
152 __chan = container_of((void *) (c), \
153 struct channel, \
154 backend); \
155 else if (__same_type(*(c), struct channel)) \
156 __chan = (void *) (c); \
157 else \
158 BUG_ON(1); \
159 atomic_inc(&__chan->record_disabled); \
160 WARN_ON(1); \
161 } \
162 _____ret; \
163 })
164
165 #endif /* _LINUX_RING_BUFFER_FRONTEND_TYPES_H */
This page took 0.03334 seconds and 5 git commands to generate.