Commit | Line | Data |
---|---|---|
786ee85b MD |
1 | /* |
2 | * urcu-defer.c | |
3 | * | |
4 | * Userspace RCU library - batch memory reclamation | |
5 | * | |
6982d6d7 | 6 | * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
786ee85b MD |
7 | * |
8 | * This library is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU Lesser General Public | |
10 | * License as published by the Free Software Foundation; either | |
11 | * version 2.1 of the License, or (at your option) any later version. | |
12 | * | |
13 | * This library is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 | * Lesser General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU Lesser General Public | |
19 | * License along with this library; if not, write to the Free Software | |
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
21 | */ | |
22 | ||
23 | #include <stdio.h> | |
24 | #include <pthread.h> | |
25 | #include <signal.h> | |
26 | #include <assert.h> | |
27 | #include <stdlib.h> | |
28 | #include <string.h> | |
29 | #include <errno.h> | |
30 | #include <poll.h> | |
4ce9e4f2 MD |
31 | #include <sys/time.h> |
32 | #include <syscall.h> | |
33 | #include <unistd.h> | |
786ee85b | 34 | |
0854ccff | 35 | #include "urcu/urcu-futex.h" |
786ee85b MD |
36 | #include "urcu-defer-static.h" |
37 | /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */ | |
38 | #include "urcu-defer.h" | |
39 | ||
02be5561 | 40 | void __attribute__((destructor)) rcu_defer_exit(void); |
786ee85b MD |
41 | |
42 | extern void synchronize_rcu(void); | |
43 | ||
44 | /* | |
02be5561 | 45 | * rcu_defer_mutex nests inside defer_thread_mutex. |
786ee85b | 46 | */ |
02be5561 | 47 | static pthread_mutex_t rcu_defer_mutex = PTHREAD_MUTEX_INITIALIZER; |
786ee85b MD |
48 | static pthread_mutex_t defer_thread_mutex = PTHREAD_MUTEX_INITIALIZER; |
49 | ||
3dce4bfa | 50 | static int defer_thread_futex; |
4ce9e4f2 | 51 | |
786ee85b MD |
52 | /* |
53 | * Written to only by each individual deferer. Read by both the deferer and | |
54 | * the reclamation tread. | |
55 | */ | |
3dce4bfa | 56 | static struct defer_queue __thread defer_queue; |
16aa9ee8 | 57 | static CDS_LIST_HEAD(registry); |
786ee85b | 58 | static pthread_t tid_defer; |
4ce9e4f2 | 59 | |
6abb4bd5 | 60 | static void mutex_lock(pthread_mutex_t *mutex) |
786ee85b MD |
61 | { |
62 | int ret; | |
63 | ||
64 | #ifndef DISTRUST_SIGNALS_EXTREME | |
65 | ret = pthread_mutex_lock(mutex); | |
66 | if (ret) { | |
67 | perror("Error in pthread mutex lock"); | |
68 | exit(-1); | |
69 | } | |
70 | #else /* #ifndef DISTRUST_SIGNALS_EXTREME */ | |
71 | while ((ret = pthread_mutex_trylock(mutex)) != 0) { | |
72 | if (ret != EBUSY && ret != EINTR) { | |
73 | printf("ret = %d, errno = %d\n", ret, errno); | |
74 | perror("Error in pthread mutex lock"); | |
75 | exit(-1); | |
76 | } | |
4ce9e4f2 | 77 | pthread_testcancel(); |
786ee85b MD |
78 | poll(NULL,0,10); |
79 | } | |
80 | #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */ | |
81 | } | |
82 | ||
6abb4bd5 | 83 | static void mutex_unlock(pthread_mutex_t *mutex) |
786ee85b MD |
84 | { |
85 | int ret; | |
86 | ||
87 | ret = pthread_mutex_unlock(mutex); | |
88 | if (ret) { | |
89 | perror("Error in pthread mutex unlock"); | |
90 | exit(-1); | |
91 | } | |
92 | } | |
93 | ||
04eb9c4f MD |
94 | /* |
95 | * Wake-up any waiting defer thread. Called from many concurrent threads. | |
96 | */ | |
97 | static void wake_up_defer(void) | |
98 | { | |
ec4e58a3 MD |
99 | if (unlikely(uatomic_read(&defer_thread_futex) == -1)) { |
100 | uatomic_set(&defer_thread_futex, 0); | |
0854ccff | 101 | futex_noasync(&defer_thread_futex, FUTEX_WAKE, 1, |
04eb9c4f MD |
102 | NULL, NULL, 0); |
103 | } | |
104 | } | |
105 | ||
106 | static unsigned long rcu_defer_num_callbacks(void) | |
107 | { | |
108 | unsigned long num_items = 0, head; | |
dbc6128f | 109 | struct defer_queue *index; |
04eb9c4f | 110 | |
6abb4bd5 | 111 | mutex_lock(&rcu_defer_mutex); |
16aa9ee8 | 112 | cds_list_for_each_entry(index, ®istry, list) { |
6cf3827c | 113 | head = CMM_LOAD_SHARED(index->head); |
dbc6128f | 114 | num_items += head - index->tail; |
04eb9c4f | 115 | } |
6abb4bd5 | 116 | mutex_unlock(&rcu_defer_mutex); |
04eb9c4f MD |
117 | return num_items; |
118 | } | |
119 | ||
120 | /* | |
121 | * Defer thread waiting. Single thread. | |
122 | */ | |
123 | static void wait_defer(void) | |
124 | { | |
ec4e58a3 | 125 | uatomic_dec(&defer_thread_futex); |
5481ddb3 | 126 | cmm_smp_mb(); /* Write futex before read queue */ |
04eb9c4f | 127 | if (rcu_defer_num_callbacks()) { |
5481ddb3 | 128 | cmm_smp_mb(); /* Read queue before write futex */ |
04eb9c4f | 129 | /* Callbacks are queued, don't wait. */ |
ec4e58a3 | 130 | uatomic_set(&defer_thread_futex, 0); |
04eb9c4f | 131 | } else { |
5481ddb3 | 132 | cmm_smp_rmb(); /* Read queue before read futex */ |
ec4e58a3 | 133 | if (uatomic_read(&defer_thread_futex) == -1) |
0854ccff | 134 | futex_noasync(&defer_thread_futex, FUTEX_WAIT, -1, |
04eb9c4f MD |
135 | NULL, NULL, 0); |
136 | } | |
137 | } | |
138 | ||
786ee85b MD |
139 | /* |
140 | * Must be called after Q.S. is reached. | |
141 | */ | |
142 | static void rcu_defer_barrier_queue(struct defer_queue *queue, | |
804b4375 | 143 | unsigned long head) |
786ee85b MD |
144 | { |
145 | unsigned long i; | |
804b4375 MD |
146 | void (*fct)(void *p); |
147 | void *p; | |
786ee85b MD |
148 | |
149 | /* | |
150 | * Tail is only modified when lock is held. | |
151 | * Head is only modified by owner thread. | |
152 | */ | |
153 | ||
804b4375 | 154 | for (i = queue->tail; i != head;) { |
5481ddb3 | 155 | cmm_smp_rmb(); /* read head before q[]. */ |
6cf3827c | 156 | p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); |
804b4375 | 157 | if (unlikely(DQ_IS_FCT_BIT(p))) { |
804b4375 MD |
158 | DQ_CLEAR_FCT_BIT(p); |
159 | queue->last_fct_out = p; | |
6cf3827c | 160 | p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); |
804b4375 | 161 | } else if (unlikely(p == DQ_FCT_MARK)) { |
6cf3827c | 162 | p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); |
804b4375 | 163 | queue->last_fct_out = p; |
6cf3827c | 164 | p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); |
29cdb8d8 | 165 | } |
804b4375 | 166 | fct = queue->last_fct_out; |
804b4375 | 167 | fct(p); |
786ee85b | 168 | } |
5481ddb3 | 169 | cmm_smp_mb(); /* push tail after having used q[] */ |
6cf3827c | 170 | CMM_STORE_SHARED(queue->tail, i); |
786ee85b MD |
171 | } |
172 | ||
173 | static void _rcu_defer_barrier_thread(void) | |
174 | { | |
0d0e6c21 | 175 | unsigned long head, num_items; |
786ee85b MD |
176 | |
177 | head = defer_queue.head; | |
0d0e6c21 MD |
178 | num_items = head - defer_queue.tail; |
179 | if (unlikely(!num_items)) | |
180 | return; | |
786ee85b MD |
181 | synchronize_rcu(); |
182 | rcu_defer_barrier_queue(&defer_queue, head); | |
183 | } | |
184 | ||
786ee85b MD |
185 | void rcu_defer_barrier_thread(void) |
186 | { | |
6abb4bd5 | 187 | mutex_lock(&rcu_defer_mutex); |
786ee85b | 188 | _rcu_defer_barrier_thread(); |
6abb4bd5 | 189 | mutex_unlock(&rcu_defer_mutex); |
786ee85b MD |
190 | } |
191 | ||
0d0e6c21 MD |
192 | /* |
193 | * rcu_defer_barrier - Execute all queued rcu callbacks. | |
194 | * | |
195 | * Execute all RCU callbacks queued before rcu_defer_barrier() execution. | |
196 | * All callbacks queued on the local thread prior to a rcu_defer_barrier() call | |
197 | * are guaranteed to be executed. | |
198 | * Callbacks queued by other threads concurrently with rcu_defer_barrier() | |
199 | * execution are not guaranteed to be executed in the current batch (could | |
200 | * be left for the next batch). These callbacks queued by other threads are only | |
201 | * guaranteed to be executed if there is explicit synchronization between | |
202 | * the thread adding to the queue and the thread issuing the defer_barrier call. | |
203 | */ | |
204 | ||
786ee85b MD |
205 | void rcu_defer_barrier(void) |
206 | { | |
dbc6128f | 207 | struct defer_queue *index; |
0d0e6c21 | 208 | unsigned long num_items = 0; |
786ee85b | 209 | |
16aa9ee8 | 210 | if (cds_list_empty(®istry)) |
786ee85b MD |
211 | return; |
212 | ||
6abb4bd5 | 213 | mutex_lock(&rcu_defer_mutex); |
16aa9ee8 | 214 | cds_list_for_each_entry(index, ®istry, list) { |
6cf3827c | 215 | index->last_head = CMM_LOAD_SHARED(index->head); |
dbc6128f | 216 | num_items += index->last_head - index->tail; |
0d0e6c21 MD |
217 | } |
218 | if (likely(!num_items)) { | |
219 | /* | |
220 | * We skip the grace period because there are no queued | |
221 | * callbacks to execute. | |
222 | */ | |
223 | goto end; | |
224 | } | |
786ee85b | 225 | synchronize_rcu(); |
16aa9ee8 | 226 | cds_list_for_each_entry(index, ®istry, list) |
dbc6128f | 227 | rcu_defer_barrier_queue(index, index->last_head); |
0d0e6c21 | 228 | end: |
6abb4bd5 | 229 | mutex_unlock(&rcu_defer_mutex); |
786ee85b MD |
230 | } |
231 | ||
2c22932b | 232 | /* |
b4f313b7 | 233 | * _defer_rcu - Queue a RCU callback. |
2c22932b | 234 | */ |
3614f13c | 235 | void _defer_rcu(void (*fct)(void *p), void *p) |
2c22932b MD |
236 | { |
237 | unsigned long head, tail; | |
238 | ||
239 | /* | |
240 | * Head is only modified by ourself. Tail can be modified by reclamation | |
241 | * thread. | |
242 | */ | |
243 | head = defer_queue.head; | |
6cf3827c | 244 | tail = CMM_LOAD_SHARED(defer_queue.tail); |
2c22932b MD |
245 | |
246 | /* | |
ec8e44cf | 247 | * If queue is full, or reached threshold. Empty queue ourself. |
2c22932b MD |
248 | * Worse-case: must allow 2 supplementary entries for fct pointer. |
249 | */ | |
62f71961 | 250 | if (unlikely(head - tail >= DEFER_QUEUE_SIZE - 2)) { |
2c22932b MD |
251 | assert(head - tail <= DEFER_QUEUE_SIZE); |
252 | rcu_defer_barrier_thread(); | |
6cf3827c | 253 | assert(head - CMM_LOAD_SHARED(defer_queue.tail) == 0); |
2c22932b MD |
254 | } |
255 | ||
256 | if (unlikely(defer_queue.last_fct_in != fct)) { | |
257 | defer_queue.last_fct_in = fct; | |
258 | if (unlikely(DQ_IS_FCT_BIT(fct) || fct == DQ_FCT_MARK)) { | |
259 | /* | |
260 | * If the function to encode is not aligned or the | |
261 | * marker, write DQ_FCT_MARK followed by the function | |
262 | * pointer. | |
263 | */ | |
6cf3827c | 264 | _CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], |
2c22932b | 265 | DQ_FCT_MARK); |
6cf3827c | 266 | _CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], |
2c22932b MD |
267 | fct); |
268 | } else { | |
269 | DQ_SET_FCT_BIT(fct); | |
6cf3827c | 270 | _CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], |
2c22932b MD |
271 | fct); |
272 | } | |
273 | } else { | |
274 | if (unlikely(DQ_IS_FCT_BIT(p) || p == DQ_FCT_MARK)) { | |
275 | /* | |
276 | * If the data to encode is not aligned or the marker, | |
277 | * write DQ_FCT_MARK followed by the function pointer. | |
278 | */ | |
6cf3827c | 279 | _CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], |
2c22932b | 280 | DQ_FCT_MARK); |
6cf3827c | 281 | _CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], |
2c22932b MD |
282 | fct); |
283 | } | |
284 | } | |
6cf3827c | 285 | _CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], p); |
5481ddb3 | 286 | cmm_smp_wmb(); /* Publish new pointer before head */ |
2c22932b | 287 | /* Write q[] before head. */ |
6cf3827c | 288 | CMM_STORE_SHARED(defer_queue.head, head); |
5481ddb3 | 289 | cmm_smp_mb(); /* Write queue head before read futex */ |
2c22932b MD |
290 | /* |
291 | * Wake-up any waiting defer thread. | |
292 | */ | |
293 | wake_up_defer(); | |
294 | } | |
295 | ||
786ee85b MD |
296 | void *thr_defer(void *args) |
297 | { | |
298 | for (;;) { | |
4ce9e4f2 | 299 | pthread_testcancel(); |
4ce9e4f2 MD |
300 | /* |
301 | * "Be green". Don't wake up the CPU if there is no RCU work | |
302 | * to perform whatsoever. Aims at saving laptop battery life by | |
303 | * leaving the processor in sleep state when idle. | |
304 | */ | |
4ce9e4f2 | 305 | wait_defer(); |
4ce9e4f2 | 306 | /* Sleeping after wait_defer to let many callbacks enqueue */ |
71df5ef4 | 307 | poll(NULL,0,100); /* wait for 100ms */ |
786ee85b MD |
308 | rcu_defer_barrier(); |
309 | } | |
310 | ||
311 | return NULL; | |
312 | } | |
313 | ||
314 | /* | |
315 | * library wrappers to be used by non-LGPL compatible source code. | |
316 | */ | |
317 | ||
3614f13c | 318 | void defer_rcu(void (*fct)(void *p), void *p) |
786ee85b | 319 | { |
3614f13c | 320 | _defer_rcu(fct, p); |
786ee85b MD |
321 | } |
322 | ||
786ee85b MD |
323 | static void start_defer_thread(void) |
324 | { | |
325 | int ret; | |
326 | ||
dbc6128f | 327 | ret = pthread_create(&tid_defer, NULL, thr_defer, NULL); |
786ee85b MD |
328 | assert(!ret); |
329 | } | |
330 | ||
331 | static void stop_defer_thread(void) | |
332 | { | |
333 | int ret; | |
334 | void *tret; | |
335 | ||
4ce9e4f2 MD |
336 | pthread_cancel(tid_defer); |
337 | wake_up_defer(); | |
786ee85b MD |
338 | ret = pthread_join(tid_defer, &tret); |
339 | assert(!ret); | |
340 | } | |
341 | ||
342 | void rcu_defer_register_thread(void) | |
343 | { | |
dbc6128f MD |
344 | int was_empty; |
345 | ||
346 | assert(defer_queue.last_head == 0); | |
347 | assert(defer_queue.q == NULL); | |
348 | defer_queue.q = malloc(sizeof(void *) * DEFER_QUEUE_SIZE); | |
786ee85b | 349 | |
6abb4bd5 MD |
350 | mutex_lock(&defer_thread_mutex); |
351 | mutex_lock(&rcu_defer_mutex); | |
16aa9ee8 DG |
352 | was_empty = cds_list_empty(®istry); |
353 | cds_list_add(&defer_queue.list, ®istry); | |
6abb4bd5 | 354 | mutex_unlock(&rcu_defer_mutex); |
786ee85b | 355 | |
dbc6128f | 356 | if (was_empty) |
786ee85b | 357 | start_defer_thread(); |
6abb4bd5 | 358 | mutex_unlock(&defer_thread_mutex); |
786ee85b MD |
359 | } |
360 | ||
361 | void rcu_defer_unregister_thread(void) | |
362 | { | |
dbc6128f | 363 | int is_empty; |
786ee85b | 364 | |
6abb4bd5 MD |
365 | mutex_lock(&defer_thread_mutex); |
366 | mutex_lock(&rcu_defer_mutex); | |
16aa9ee8 | 367 | cds_list_del(&defer_queue.list); |
786ee85b MD |
368 | _rcu_defer_barrier_thread(); |
369 | free(defer_queue.q); | |
370 | defer_queue.q = NULL; | |
16aa9ee8 | 371 | is_empty = cds_list_empty(®istry); |
6abb4bd5 | 372 | mutex_unlock(&rcu_defer_mutex); |
786ee85b | 373 | |
dbc6128f | 374 | if (is_empty) |
786ee85b | 375 | stop_defer_thread(); |
6abb4bd5 | 376 | mutex_unlock(&defer_thread_mutex); |
786ee85b MD |
377 | } |
378 | ||
02be5561 | 379 | void rcu_defer_exit(void) |
786ee85b | 380 | { |
16aa9ee8 | 381 | assert(cds_list_empty(®istry)); |
786ee85b | 382 | } |