Fix teardown deadlock
[lttng-modules.git] / lib / ringbuffer / frontend_types.h
1 #ifndef _LINUX_RING_BUFFER_FRONTEND_TYPES_H
2 #define _LINUX_RING_BUFFER_FRONTEND_TYPES_H
3
4 /*
5 * linux/ringbuffer/frontend_types.h
6 *
7 * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Ring Buffer Library Synchronization Header (types).
10 *
11 * Author:
12 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
13 *
14 * See ring_buffer_frontend.c for more information on wait-free algorithms.
15 *
16 * Dual LGPL v2.1/GPL v2 license.
17 */
18
19 #include <linux/kref.h>
20 #include "../../wrapper/ringbuffer/config.h"
21 #include "../../wrapper/ringbuffer/backend_types.h"
22 #include "../../wrapper/prio_heap.h" /* For per-CPU read-side iterator */
23
24 /*
25 * A switch is done during tracing or as a final flush after tracing (so it
26 * won't write in the new sub-buffer).
27 */
28 enum switch_mode { SWITCH_ACTIVE, SWITCH_FLUSH };
29
30 /* channel-level read-side iterator */
31 struct channel_iter {
32 /* Prio heap of buffers. Lowest timestamps at the top. */
33 struct ptr_heap heap; /* Heap of struct lib_ring_buffer ptrs */
34 struct list_head empty_head; /* Empty buffers linked-list head */
35 int read_open; /* Opened for reading ? */
36 u64 last_qs; /* Last quiescent state timestamp */
37 u64 last_timestamp; /* Last timestamp (for WARN_ON) */
38 int last_cpu; /* Last timestamp cpu */
39 /*
40 * read() file operation state.
41 */
42 unsigned long len_left;
43 };
44
45 /* channel: collection of per-cpu ring buffers. */
46 struct channel {
47 atomic_t record_disabled;
48 unsigned long commit_count_mask; /*
49 * Commit count mask, removing
50 * the MSBs corresponding to
51 * bits used to represent the
52 * subbuffer index.
53 */
54
55 struct channel_backend backend; /* Associated backend */
56
57 unsigned long switch_timer_interval; /* Buffer flush (jiffies) */
58 unsigned long read_timer_interval; /* Reader wakeup (jiffies) */
59 struct notifier_block cpu_hp_notifier; /* CPU hotplug notifier */
60 struct notifier_block tick_nohz_notifier; /* CPU nohz notifier */
61 struct notifier_block hp_iter_notifier; /* hotplug iterator notifier */
62 int cpu_hp_enable:1; /* Enable CPU hotplug notif. */
63 int hp_iter_enable:1; /* Enable hp iter notif. */
64 wait_queue_head_t read_wait; /* reader wait queue */
65 struct channel_iter iter; /* Channel read-side iterator */
66 struct kref ref; /* Reference count */
67 };
68
69 /* Per-subbuffer commit counters used on the hot path */
70 struct commit_counters_hot {
71 union v_atomic cc; /* Commit counter */
72 union v_atomic seq; /* Consecutive commits */
73 };
74
75 /* Per-subbuffer commit counters used only on cold paths */
76 struct commit_counters_cold {
77 union v_atomic cc_sb; /* Incremented _once_ at sb switch */
78 };
79
80 /* Per-buffer read iterator */
81 struct lib_ring_buffer_iter {
82 u64 timestamp; /* Current record timestamp */
83 size_t header_len; /* Current record header length */
84 size_t payload_len; /* Current record payload length */
85
86 struct list_head empty_node; /* Linked list of empty buffers */
87 unsigned long consumed, read_offset, data_size;
88 enum {
89 ITER_GET_SUBBUF = 0,
90 ITER_TEST_RECORD,
91 ITER_NEXT_RECORD,
92 ITER_PUT_SUBBUF,
93 } state;
94 int allocated:1;
95 int read_open:1; /* Opened for reading ? */
96 };
97
98 /* ring buffer state */
99 struct lib_ring_buffer {
100 /* First 32 bytes cache-hot cacheline */
101 union v_atomic offset; /* Current offset in the buffer */
102 struct commit_counters_hot *commit_hot;
103 /* Commit count per sub-buffer */
104 atomic_long_t consumed; /*
105 * Current offset in the buffer
106 * standard atomic access (shared)
107 */
108 atomic_t record_disabled;
109 /* End of first 32 bytes cacheline */
110 union v_atomic last_tsc; /*
111 * Last timestamp written in the buffer.
112 */
113
114 struct lib_ring_buffer_backend backend; /* Associated backend */
115
116 struct commit_counters_cold *commit_cold;
117 /* Commit count per sub-buffer */
118 atomic_long_t active_readers; /*
119 * Active readers count
120 * standard atomic access (shared)
121 */
122 /* Dropped records */
123 union v_atomic records_lost_full; /* Buffer full */
124 union v_atomic records_lost_wrap; /* Nested wrap-around */
125 union v_atomic records_lost_big; /* Events too big */
126 union v_atomic records_count; /* Number of records written */
127 union v_atomic records_overrun; /* Number of overwritten records */
128 wait_queue_head_t read_wait; /* reader buffer-level wait queue */
129 int finalized; /* buffer has been finalized */
130 struct timer_list switch_timer; /* timer for periodical switch */
131 struct timer_list read_timer; /* timer for read poll */
132 raw_spinlock_t raw_tick_nohz_spinlock; /* nohz entry lock/trylock */
133 struct lib_ring_buffer_iter iter; /* read-side iterator */
134 unsigned long get_subbuf_consumed; /* Read-side consumed */
135 unsigned long prod_snapshot; /* Producer count snapshot */
136 unsigned long cons_snapshot; /* Consumer count snapshot */
137 int get_subbuf:1; /* Sub-buffer being held by reader */
138 int switch_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */
139 int read_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */
140 };
141
142 /*
143 * Issue warnings and disable channels upon internal error.
144 * Can receive struct lib_ring_buffer or struct lib_ring_buffer_backend
145 * parameters.
146 */
147 #define CHAN_WARN_ON(c, cond) \
148 ({ \
149 struct channel *__chan; \
150 int _____ret = unlikely(cond); \
151 if (_____ret) { \
152 if (__same_type(*(c), struct channel_backend)) \
153 __chan = container_of((void *) (c), \
154 struct channel, \
155 backend); \
156 else if (__same_type(*(c), struct channel)) \
157 __chan = (void *) (c); \
158 else \
159 BUG_ON(1); \
160 atomic_inc(&__chan->record_disabled); \
161 WARN_ON(1); \
162 } \
163 _____ret; \
164 })
165
166 #endif /* _LINUX_RING_BUFFER_FRONTEND_TYPES_H */
This page took 0.033943 seconds and 5 git commands to generate.