Commit | Line | Data |
---|---|---|
786ee85b MD |
1 | /* |
2 | * urcu-defer.c | |
3 | * | |
4 | * Userspace RCU library - batch memory reclamation | |
5 | * | |
6 | * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> | |
7 | * | |
8 | * This library is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU Lesser General Public | |
10 | * License as published by the Free Software Foundation; either | |
11 | * version 2.1 of the License, or (at your option) any later version. | |
12 | * | |
13 | * This library is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 | * Lesser General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU Lesser General Public | |
19 | * License along with this library; if not, write to the Free Software | |
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
21 | */ | |
22 | ||
23 | #include <stdio.h> | |
24 | #include <pthread.h> | |
25 | #include <signal.h> | |
26 | #include <assert.h> | |
27 | #include <stdlib.h> | |
28 | #include <string.h> | |
29 | #include <errno.h> | |
30 | #include <poll.h> | |
4ce9e4f2 MD |
31 | #include <sys/time.h> |
32 | #include <syscall.h> | |
33 | #include <unistd.h> | |
786ee85b | 34 | |
0854ccff | 35 | #include "urcu/urcu-futex.h" |
786ee85b MD |
36 | #include "urcu-defer-static.h" |
37 | /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */ | |
38 | #include "urcu-defer.h" | |
39 | ||
40 | void __attribute__((destructor)) urcu_defer_exit(void); | |
41 | ||
42 | extern void synchronize_rcu(void); | |
43 | ||
44 | /* | |
45 | * urcu_defer_mutex nests inside defer_thread_mutex. | |
46 | */ | |
47 | static pthread_mutex_t urcu_defer_mutex = PTHREAD_MUTEX_INITIALIZER; | |
48 | static pthread_mutex_t defer_thread_mutex = PTHREAD_MUTEX_INITIALIZER; | |
49 | ||
3dce4bfa | 50 | static int defer_thread_futex; |
4ce9e4f2 | 51 | |
786ee85b MD |
52 | /* |
53 | * Written to only by each individual deferer. Read by both the deferer and | |
54 | * the reclamation tread. | |
55 | */ | |
3dce4bfa | 56 | static struct defer_queue __thread defer_queue; |
dbc6128f | 57 | static LIST_HEAD(registry); |
786ee85b | 58 | static pthread_t tid_defer; |
4ce9e4f2 | 59 | |
786ee85b MD |
60 | static void internal_urcu_lock(pthread_mutex_t *mutex) |
61 | { | |
62 | int ret; | |
63 | ||
64 | #ifndef DISTRUST_SIGNALS_EXTREME | |
65 | ret = pthread_mutex_lock(mutex); | |
66 | if (ret) { | |
67 | perror("Error in pthread mutex lock"); | |
68 | exit(-1); | |
69 | } | |
70 | #else /* #ifndef DISTRUST_SIGNALS_EXTREME */ | |
71 | while ((ret = pthread_mutex_trylock(mutex)) != 0) { | |
72 | if (ret != EBUSY && ret != EINTR) { | |
73 | printf("ret = %d, errno = %d\n", ret, errno); | |
74 | perror("Error in pthread mutex lock"); | |
75 | exit(-1); | |
76 | } | |
4ce9e4f2 | 77 | pthread_testcancel(); |
786ee85b MD |
78 | poll(NULL,0,10); |
79 | } | |
80 | #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */ | |
81 | } | |
82 | ||
83 | static void internal_urcu_unlock(pthread_mutex_t *mutex) | |
84 | { | |
85 | int ret; | |
86 | ||
87 | ret = pthread_mutex_unlock(mutex); | |
88 | if (ret) { | |
89 | perror("Error in pthread mutex unlock"); | |
90 | exit(-1); | |
91 | } | |
92 | } | |
93 | ||
04eb9c4f MD |
94 | /* |
95 | * Wake-up any waiting defer thread. Called from many concurrent threads. | |
96 | */ | |
97 | static void wake_up_defer(void) | |
98 | { | |
ec4e58a3 MD |
99 | if (unlikely(uatomic_read(&defer_thread_futex) == -1)) { |
100 | uatomic_set(&defer_thread_futex, 0); | |
0854ccff | 101 | futex_noasync(&defer_thread_futex, FUTEX_WAKE, 1, |
04eb9c4f MD |
102 | NULL, NULL, 0); |
103 | } | |
104 | } | |
105 | ||
106 | static unsigned long rcu_defer_num_callbacks(void) | |
107 | { | |
108 | unsigned long num_items = 0, head; | |
dbc6128f | 109 | struct defer_queue *index; |
04eb9c4f MD |
110 | |
111 | internal_urcu_lock(&urcu_defer_mutex); | |
dbc6128f MD |
112 | list_for_each_entry(index, ®istry, list) { |
113 | head = LOAD_SHARED(index->head); | |
114 | num_items += head - index->tail; | |
04eb9c4f MD |
115 | } |
116 | internal_urcu_unlock(&urcu_defer_mutex); | |
117 | return num_items; | |
118 | } | |
119 | ||
120 | /* | |
121 | * Defer thread waiting. Single thread. | |
122 | */ | |
123 | static void wait_defer(void) | |
124 | { | |
ec4e58a3 | 125 | uatomic_dec(&defer_thread_futex); |
04eb9c4f MD |
126 | smp_mb(); /* Write futex before read queue */ |
127 | if (rcu_defer_num_callbacks()) { | |
128 | smp_mb(); /* Read queue before write futex */ | |
129 | /* Callbacks are queued, don't wait. */ | |
ec4e58a3 | 130 | uatomic_set(&defer_thread_futex, 0); |
04eb9c4f MD |
131 | } else { |
132 | smp_rmb(); /* Read queue before read futex */ | |
ec4e58a3 | 133 | if (uatomic_read(&defer_thread_futex) == -1) |
0854ccff | 134 | futex_noasync(&defer_thread_futex, FUTEX_WAIT, -1, |
04eb9c4f MD |
135 | NULL, NULL, 0); |
136 | } | |
137 | } | |
138 | ||
786ee85b MD |
139 | /* |
140 | * Must be called after Q.S. is reached. | |
141 | */ | |
142 | static void rcu_defer_barrier_queue(struct defer_queue *queue, | |
804b4375 | 143 | unsigned long head) |
786ee85b MD |
144 | { |
145 | unsigned long i; | |
804b4375 MD |
146 | void (*fct)(void *p); |
147 | void *p; | |
786ee85b MD |
148 | |
149 | /* | |
150 | * Tail is only modified when lock is held. | |
151 | * Head is only modified by owner thread. | |
152 | */ | |
153 | ||
804b4375 | 154 | for (i = queue->tail; i != head;) { |
786ee85b | 155 | smp_rmb(); /* read head before q[]. */ |
804b4375 MD |
156 | p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); |
157 | if (unlikely(DQ_IS_FCT_BIT(p))) { | |
804b4375 MD |
158 | DQ_CLEAR_FCT_BIT(p); |
159 | queue->last_fct_out = p; | |
160 | p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); | |
161 | } else if (unlikely(p == DQ_FCT_MARK)) { | |
804b4375 MD |
162 | p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); |
163 | queue->last_fct_out = p; | |
164 | p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); | |
29cdb8d8 | 165 | } |
804b4375 | 166 | fct = queue->last_fct_out; |
804b4375 | 167 | fct(p); |
786ee85b MD |
168 | } |
169 | smp_mb(); /* push tail after having used q[] */ | |
170 | STORE_SHARED(queue->tail, i); | |
171 | } | |
172 | ||
173 | static void _rcu_defer_barrier_thread(void) | |
174 | { | |
0d0e6c21 | 175 | unsigned long head, num_items; |
786ee85b MD |
176 | |
177 | head = defer_queue.head; | |
0d0e6c21 MD |
178 | num_items = head - defer_queue.tail; |
179 | if (unlikely(!num_items)) | |
180 | return; | |
786ee85b MD |
181 | synchronize_rcu(); |
182 | rcu_defer_barrier_queue(&defer_queue, head); | |
183 | } | |
184 | ||
786ee85b MD |
185 | void rcu_defer_barrier_thread(void) |
186 | { | |
187 | internal_urcu_lock(&urcu_defer_mutex); | |
188 | _rcu_defer_barrier_thread(); | |
189 | internal_urcu_unlock(&urcu_defer_mutex); | |
190 | } | |
191 | ||
0d0e6c21 MD |
192 | /* |
193 | * rcu_defer_barrier - Execute all queued rcu callbacks. | |
194 | * | |
195 | * Execute all RCU callbacks queued before rcu_defer_barrier() execution. | |
196 | * All callbacks queued on the local thread prior to a rcu_defer_barrier() call | |
197 | * are guaranteed to be executed. | |
198 | * Callbacks queued by other threads concurrently with rcu_defer_barrier() | |
199 | * execution are not guaranteed to be executed in the current batch (could | |
200 | * be left for the next batch). These callbacks queued by other threads are only | |
201 | * guaranteed to be executed if there is explicit synchronization between | |
202 | * the thread adding to the queue and the thread issuing the defer_barrier call. | |
203 | */ | |
204 | ||
786ee85b MD |
205 | void rcu_defer_barrier(void) |
206 | { | |
dbc6128f | 207 | struct defer_queue *index; |
0d0e6c21 | 208 | unsigned long num_items = 0; |
786ee85b | 209 | |
dbc6128f | 210 | if (list_empty(®istry)) |
786ee85b MD |
211 | return; |
212 | ||
213 | internal_urcu_lock(&urcu_defer_mutex); | |
dbc6128f MD |
214 | list_for_each_entry(index, ®istry, list) { |
215 | index->last_head = LOAD_SHARED(index->head); | |
216 | num_items += index->last_head - index->tail; | |
0d0e6c21 MD |
217 | } |
218 | if (likely(!num_items)) { | |
219 | /* | |
220 | * We skip the grace period because there are no queued | |
221 | * callbacks to execute. | |
222 | */ | |
223 | goto end; | |
224 | } | |
786ee85b | 225 | synchronize_rcu(); |
dbc6128f MD |
226 | list_for_each_entry(index, ®istry, list) |
227 | rcu_defer_barrier_queue(index, index->last_head); | |
0d0e6c21 | 228 | end: |
786ee85b MD |
229 | internal_urcu_unlock(&urcu_defer_mutex); |
230 | } | |
231 | ||
2c22932b MD |
232 | /* |
233 | * _rcu_defer_queue - Queue a RCU callback. | |
234 | */ | |
235 | void _rcu_defer_queue(void (*fct)(void *p), void *p) | |
236 | { | |
237 | unsigned long head, tail; | |
238 | ||
239 | /* | |
240 | * Head is only modified by ourself. Tail can be modified by reclamation | |
241 | * thread. | |
242 | */ | |
243 | head = defer_queue.head; | |
244 | tail = LOAD_SHARED(defer_queue.tail); | |
245 | ||
246 | /* | |
247 | * If queue is full, empty it ourself. | |
248 | * Worse-case: must allow 2 supplementary entries for fct pointer. | |
249 | */ | |
250 | if (unlikely(head - tail >= DEFER_QUEUE_SIZE - 2)) { | |
251 | assert(head - tail <= DEFER_QUEUE_SIZE); | |
252 | rcu_defer_barrier_thread(); | |
253 | assert(head - LOAD_SHARED(defer_queue.tail) == 0); | |
254 | } | |
255 | ||
256 | if (unlikely(defer_queue.last_fct_in != fct)) { | |
257 | defer_queue.last_fct_in = fct; | |
258 | if (unlikely(DQ_IS_FCT_BIT(fct) || fct == DQ_FCT_MARK)) { | |
259 | /* | |
260 | * If the function to encode is not aligned or the | |
261 | * marker, write DQ_FCT_MARK followed by the function | |
262 | * pointer. | |
263 | */ | |
264 | _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], | |
265 | DQ_FCT_MARK); | |
266 | _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], | |
267 | fct); | |
268 | } else { | |
269 | DQ_SET_FCT_BIT(fct); | |
270 | _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], | |
271 | fct); | |
272 | } | |
273 | } else { | |
274 | if (unlikely(DQ_IS_FCT_BIT(p) || p == DQ_FCT_MARK)) { | |
275 | /* | |
276 | * If the data to encode is not aligned or the marker, | |
277 | * write DQ_FCT_MARK followed by the function pointer. | |
278 | */ | |
279 | _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], | |
280 | DQ_FCT_MARK); | |
281 | _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], | |
282 | fct); | |
283 | } | |
284 | } | |
285 | _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], p); | |
286 | smp_wmb(); /* Publish new pointer before head */ | |
287 | /* Write q[] before head. */ | |
288 | STORE_SHARED(defer_queue.head, head); | |
04eb9c4f | 289 | smp_mb(); /* Write queue head before read futex */ |
2c22932b MD |
290 | /* |
291 | * Wake-up any waiting defer thread. | |
292 | */ | |
293 | wake_up_defer(); | |
294 | } | |
295 | ||
786ee85b MD |
296 | void *thr_defer(void *args) |
297 | { | |
298 | for (;;) { | |
4ce9e4f2 | 299 | pthread_testcancel(); |
4ce9e4f2 MD |
300 | /* |
301 | * "Be green". Don't wake up the CPU if there is no RCU work | |
302 | * to perform whatsoever. Aims at saving laptop battery life by | |
303 | * leaving the processor in sleep state when idle. | |
304 | */ | |
4ce9e4f2 | 305 | wait_defer(); |
4ce9e4f2 | 306 | /* Sleeping after wait_defer to let many callbacks enqueue */ |
71df5ef4 | 307 | poll(NULL,0,100); /* wait for 100ms */ |
786ee85b MD |
308 | rcu_defer_barrier(); |
309 | } | |
310 | ||
311 | return NULL; | |
312 | } | |
313 | ||
314 | /* | |
315 | * library wrappers to be used by non-LGPL compatible source code. | |
316 | */ | |
317 | ||
804b4375 | 318 | void rcu_defer_queue(void (*fct)(void *p), void *p) |
786ee85b | 319 | { |
804b4375 | 320 | _rcu_defer_queue(fct, p); |
786ee85b MD |
321 | } |
322 | ||
786ee85b MD |
323 | static void start_defer_thread(void) |
324 | { | |
325 | int ret; | |
326 | ||
dbc6128f | 327 | ret = pthread_create(&tid_defer, NULL, thr_defer, NULL); |
786ee85b MD |
328 | assert(!ret); |
329 | } | |
330 | ||
331 | static void stop_defer_thread(void) | |
332 | { | |
333 | int ret; | |
334 | void *tret; | |
335 | ||
4ce9e4f2 MD |
336 | pthread_cancel(tid_defer); |
337 | wake_up_defer(); | |
786ee85b MD |
338 | ret = pthread_join(tid_defer, &tret); |
339 | assert(!ret); | |
340 | } | |
341 | ||
342 | void rcu_defer_register_thread(void) | |
343 | { | |
dbc6128f MD |
344 | int was_empty; |
345 | ||
346 | assert(defer_queue.last_head == 0); | |
347 | assert(defer_queue.q == NULL); | |
348 | defer_queue.q = malloc(sizeof(void *) * DEFER_QUEUE_SIZE); | |
786ee85b MD |
349 | |
350 | internal_urcu_lock(&defer_thread_mutex); | |
351 | internal_urcu_lock(&urcu_defer_mutex); | |
dbc6128f MD |
352 | was_empty = list_empty(®istry); |
353 | list_add(&defer_queue.list, ®istry); | |
786ee85b MD |
354 | internal_urcu_unlock(&urcu_defer_mutex); |
355 | ||
dbc6128f | 356 | if (was_empty) |
786ee85b MD |
357 | start_defer_thread(); |
358 | internal_urcu_unlock(&defer_thread_mutex); | |
359 | } | |
360 | ||
361 | void rcu_defer_unregister_thread(void) | |
362 | { | |
dbc6128f | 363 | int is_empty; |
786ee85b MD |
364 | |
365 | internal_urcu_lock(&defer_thread_mutex); | |
366 | internal_urcu_lock(&urcu_defer_mutex); | |
dbc6128f | 367 | list_del(&defer_queue.list); |
786ee85b MD |
368 | _rcu_defer_barrier_thread(); |
369 | free(defer_queue.q); | |
370 | defer_queue.q = NULL; | |
dbc6128f | 371 | is_empty = list_empty(®istry); |
786ee85b MD |
372 | internal_urcu_unlock(&urcu_defer_mutex); |
373 | ||
dbc6128f | 374 | if (is_empty) |
786ee85b MD |
375 | stop_defer_thread(); |
376 | internal_urcu_unlock(&defer_thread_mutex); | |
377 | } | |
378 | ||
379 | void urcu_defer_exit(void) | |
380 | { | |
dbc6128f | 381 | assert(list_empty(®istry)); |
786ee85b | 382 | } |