Commit | Line | Data |
---|---|---|
f3bc08c5 MD |
1 | #ifndef _LINUX_RING_BUFFER_FRONTEND_TYPES_H |
2 | #define _LINUX_RING_BUFFER_FRONTEND_TYPES_H | |
3 | ||
4 | /* | |
5 | * linux/ringbuffer/frontend_types.h | |
6 | * | |
7 | * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
8 | * | |
9 | * Ring Buffer Library Synchronization Header (types). | |
10 | * | |
11 | * Author: | |
12 | * Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
13 | * | |
14 | * See ring_buffer_frontend.c for more information on wait-free algorithms. | |
15 | * | |
16 | * Dual LGPL v2.1/GPL v2 license. | |
17 | */ | |
18 | ||
f40270ad | 19 | #include <linux/kref.h> |
f3bc08c5 MD |
20 | #include "../../wrapper/ringbuffer/config.h" |
21 | #include "../../wrapper/ringbuffer/backend_types.h" | |
a88db018 | 22 | #include "../../lib/prio_heap/lttng_prio_heap.h" /* For per-CPU read-side iterator */ |
f3bc08c5 MD |
23 | |
24 | /* | |
25 | * A switch is done during tracing or as a final flush after tracing (so it | |
26 | * won't write in the new sub-buffer). | |
27 | */ | |
28 | enum switch_mode { SWITCH_ACTIVE, SWITCH_FLUSH }; | |
29 | ||
30 | /* channel-level read-side iterator */ | |
31 | struct channel_iter { | |
32 | /* Prio heap of buffers. Lowest timestamps at the top. */ | |
a88db018 | 33 | struct lttng_ptr_heap heap; /* Heap of struct lib_ring_buffer ptrs */ |
f3bc08c5 MD |
34 | struct list_head empty_head; /* Empty buffers linked-list head */ |
35 | int read_open; /* Opened for reading ? */ | |
36 | u64 last_qs; /* Last quiescent state timestamp */ | |
37 | u64 last_timestamp; /* Last timestamp (for WARN_ON) */ | |
38 | int last_cpu; /* Last timestamp cpu */ | |
39 | /* | |
40 | * read() file operation state. | |
41 | */ | |
42 | unsigned long len_left; | |
43 | }; | |
44 | ||
45 | /* channel: collection of per-cpu ring buffers. */ | |
46 | struct channel { | |
47 | atomic_t record_disabled; | |
48 | unsigned long commit_count_mask; /* | |
49 | * Commit count mask, removing | |
50 | * the MSBs corresponding to | |
51 | * bits used to represent the | |
52 | * subbuffer index. | |
53 | */ | |
54 | ||
55 | struct channel_backend backend; /* Associated backend */ | |
56 | ||
57 | unsigned long switch_timer_interval; /* Buffer flush (jiffies) */ | |
58 | unsigned long read_timer_interval; /* Reader wakeup (jiffies) */ | |
59 | struct notifier_block cpu_hp_notifier; /* CPU hotplug notifier */ | |
60 | struct notifier_block tick_nohz_notifier; /* CPU nohz notifier */ | |
61 | struct notifier_block hp_iter_notifier; /* hotplug iterator notifier */ | |
62 | int cpu_hp_enable:1; /* Enable CPU hotplug notif. */ | |
63 | int hp_iter_enable:1; /* Enable hp iter notif. */ | |
64 | wait_queue_head_t read_wait; /* reader wait queue */ | |
24cedcfe MD |
65 | wait_queue_head_t hp_wait; /* CPU hotplug wait queue */ |
66 | int finalized; /* Has channel been finalized */ | |
f3bc08c5 | 67 | struct channel_iter iter; /* Channel read-side iterator */ |
f40270ad | 68 | struct kref ref; /* Reference count */ |
f3bc08c5 MD |
69 | }; |
70 | ||
71 | /* Per-subbuffer commit counters used on the hot path */ | |
72 | struct commit_counters_hot { | |
73 | union v_atomic cc; /* Commit counter */ | |
74 | union v_atomic seq; /* Consecutive commits */ | |
75 | }; | |
76 | ||
77 | /* Per-subbuffer commit counters used only on cold paths */ | |
78 | struct commit_counters_cold { | |
79 | union v_atomic cc_sb; /* Incremented _once_ at sb switch */ | |
80 | }; | |
81 | ||
82 | /* Per-buffer read iterator */ | |
83 | struct lib_ring_buffer_iter { | |
84 | u64 timestamp; /* Current record timestamp */ | |
85 | size_t header_len; /* Current record header length */ | |
86 | size_t payload_len; /* Current record payload length */ | |
87 | ||
88 | struct list_head empty_node; /* Linked list of empty buffers */ | |
89 | unsigned long consumed, read_offset, data_size; | |
90 | enum { | |
91 | ITER_GET_SUBBUF = 0, | |
92 | ITER_TEST_RECORD, | |
93 | ITER_NEXT_RECORD, | |
94 | ITER_PUT_SUBBUF, | |
95 | } state; | |
96 | int allocated:1; | |
97 | int read_open:1; /* Opened for reading ? */ | |
98 | }; | |
99 | ||
100 | /* ring buffer state */ | |
101 | struct lib_ring_buffer { | |
102 | /* First 32 bytes cache-hot cacheline */ | |
103 | union v_atomic offset; /* Current offset in the buffer */ | |
104 | struct commit_counters_hot *commit_hot; | |
105 | /* Commit count per sub-buffer */ | |
106 | atomic_long_t consumed; /* | |
107 | * Current offset in the buffer | |
108 | * standard atomic access (shared) | |
109 | */ | |
110 | atomic_t record_disabled; | |
111 | /* End of first 32 bytes cacheline */ | |
112 | union v_atomic last_tsc; /* | |
113 | * Last timestamp written in the buffer. | |
114 | */ | |
115 | ||
116 | struct lib_ring_buffer_backend backend; /* Associated backend */ | |
117 | ||
118 | struct commit_counters_cold *commit_cold; | |
119 | /* Commit count per sub-buffer */ | |
120 | atomic_long_t active_readers; /* | |
121 | * Active readers count | |
122 | * standard atomic access (shared) | |
123 | */ | |
124 | /* Dropped records */ | |
125 | union v_atomic records_lost_full; /* Buffer full */ | |
126 | union v_atomic records_lost_wrap; /* Nested wrap-around */ | |
127 | union v_atomic records_lost_big; /* Events too big */ | |
128 | union v_atomic records_count; /* Number of records written */ | |
129 | union v_atomic records_overrun; /* Number of overwritten records */ | |
130 | wait_queue_head_t read_wait; /* reader buffer-level wait queue */ | |
131 | int finalized; /* buffer has been finalized */ | |
132 | struct timer_list switch_timer; /* timer for periodical switch */ | |
133 | struct timer_list read_timer; /* timer for read poll */ | |
134 | raw_spinlock_t raw_tick_nohz_spinlock; /* nohz entry lock/trylock */ | |
135 | struct lib_ring_buffer_iter iter; /* read-side iterator */ | |
136 | unsigned long get_subbuf_consumed; /* Read-side consumed */ | |
137 | unsigned long prod_snapshot; /* Producer count snapshot */ | |
138 | unsigned long cons_snapshot; /* Consumer count snapshot */ | |
139 | int get_subbuf:1; /* Sub-buffer being held by reader */ | |
140 | int switch_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */ | |
141 | int read_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */ | |
142 | }; | |
143 | ||
9115fbdc MD |
144 | static inline |
145 | void *channel_get_private(struct channel *chan) | |
146 | { | |
147 | return chan->backend.priv; | |
148 | } | |
149 | ||
f3bc08c5 MD |
150 | /* |
151 | * Issue warnings and disable channels upon internal error. | |
152 | * Can receive struct lib_ring_buffer or struct lib_ring_buffer_backend | |
153 | * parameters. | |
154 | */ | |
155 | #define CHAN_WARN_ON(c, cond) \ | |
156 | ({ \ | |
157 | struct channel *__chan; \ | |
158 | int _____ret = unlikely(cond); \ | |
159 | if (_____ret) { \ | |
160 | if (__same_type(*(c), struct channel_backend)) \ | |
161 | __chan = container_of((void *) (c), \ | |
162 | struct channel, \ | |
163 | backend); \ | |
164 | else if (__same_type(*(c), struct channel)) \ | |
165 | __chan = (void *) (c); \ | |
166 | else \ | |
167 | BUG_ON(1); \ | |
168 | atomic_inc(&__chan->record_disabled); \ | |
169 | WARN_ON(1); \ | |
170 | } \ | |
171 | _____ret; \ | |
172 | }) | |
173 | ||
174 | #endif /* _LINUX_RING_BUFFER_FRONTEND_TYPES_H */ |