4 * Userspace RCU library - batch memory reclamation
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
35 #include "urcu/urcu-futex.h"
36 #include "urcu-defer-static.h"
37 /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
38 #include "urcu-defer.h"
40 void __attribute__((destructor
)) rcu_defer_exit(void);
42 extern void synchronize_rcu(void);
45 * rcu_defer_mutex nests inside defer_thread_mutex.
47 static pthread_mutex_t rcu_defer_mutex
= PTHREAD_MUTEX_INITIALIZER
;
48 static pthread_mutex_t defer_thread_mutex
= PTHREAD_MUTEX_INITIALIZER
;
50 static int defer_thread_futex
;
53 * Written to only by each individual deferer. Read by both the deferer and
54 * the reclamation tread.
56 static struct defer_queue __thread defer_queue
;
57 static CDS_LIST_HEAD(registry
);
58 static pthread_t tid_defer
;
60 static void mutex_lock(pthread_mutex_t
*mutex
)
64 #ifndef DISTRUST_SIGNALS_EXTREME
65 ret
= pthread_mutex_lock(mutex
);
67 perror("Error in pthread mutex lock");
70 #else /* #ifndef DISTRUST_SIGNALS_EXTREME */
71 while ((ret
= pthread_mutex_trylock(mutex
)) != 0) {
72 if (ret
!= EBUSY
&& ret
!= EINTR
) {
73 printf("ret = %d, errno = %d\n", ret
, errno
);
74 perror("Error in pthread mutex lock");
80 #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
83 static void mutex_unlock(pthread_mutex_t
*mutex
)
87 ret
= pthread_mutex_unlock(mutex
);
89 perror("Error in pthread mutex unlock");
95 * Wake-up any waiting defer thread. Called from many concurrent threads.
97 static void wake_up_defer(void)
99 if (unlikely(uatomic_read(&defer_thread_futex
) == -1)) {
100 uatomic_set(&defer_thread_futex
, 0);
101 futex_noasync(&defer_thread_futex
, FUTEX_WAKE
, 1,
106 static unsigned long rcu_defer_num_callbacks(void)
108 unsigned long num_items
= 0, head
;
109 struct defer_queue
*index
;
111 mutex_lock(&rcu_defer_mutex
);
112 cds_list_for_each_entry(index
, ®istry
, list
) {
113 head
= CMM_LOAD_SHARED(index
->head
);
114 num_items
+= head
- index
->tail
;
116 mutex_unlock(&rcu_defer_mutex
);
121 * Defer thread waiting. Single thread.
123 static void wait_defer(void)
125 uatomic_dec(&defer_thread_futex
);
126 cmm_smp_mb(); /* Write futex before read queue */
127 if (rcu_defer_num_callbacks()) {
128 cmm_smp_mb(); /* Read queue before write futex */
129 /* Callbacks are queued, don't wait. */
130 uatomic_set(&defer_thread_futex
, 0);
132 cmm_smp_rmb(); /* Read queue before read futex */
133 if (uatomic_read(&defer_thread_futex
) == -1)
134 futex_noasync(&defer_thread_futex
, FUTEX_WAIT
, -1,
140 * Must be called after Q.S. is reached.
142 static void rcu_defer_barrier_queue(struct defer_queue
*queue
,
146 void (*fct
)(void *p
);
150 * Tail is only modified when lock is held.
151 * Head is only modified by owner thread.
154 for (i
= queue
->tail
; i
!= head
;) {
155 cmm_smp_rmb(); /* read head before q[]. */
156 p
= CMM_LOAD_SHARED(queue
->q
[i
++ & DEFER_QUEUE_MASK
]);
157 if (unlikely(DQ_IS_FCT_BIT(p
))) {
159 queue
->last_fct_out
= p
;
160 p
= CMM_LOAD_SHARED(queue
->q
[i
++ & DEFER_QUEUE_MASK
]);
161 } else if (unlikely(p
== DQ_FCT_MARK
)) {
162 p
= CMM_LOAD_SHARED(queue
->q
[i
++ & DEFER_QUEUE_MASK
]);
163 queue
->last_fct_out
= p
;
164 p
= CMM_LOAD_SHARED(queue
->q
[i
++ & DEFER_QUEUE_MASK
]);
166 fct
= queue
->last_fct_out
;
169 cmm_smp_mb(); /* push tail after having used q[] */
170 CMM_STORE_SHARED(queue
->tail
, i
);
173 static void _rcu_defer_barrier_thread(void)
175 unsigned long head
, num_items
;
177 head
= defer_queue
.head
;
178 num_items
= head
- defer_queue
.tail
;
179 if (unlikely(!num_items
))
182 rcu_defer_barrier_queue(&defer_queue
, head
);
185 void rcu_defer_barrier_thread(void)
187 mutex_lock(&rcu_defer_mutex
);
188 _rcu_defer_barrier_thread();
189 mutex_unlock(&rcu_defer_mutex
);
193 * rcu_defer_barrier - Execute all queued rcu callbacks.
195 * Execute all RCU callbacks queued before rcu_defer_barrier() execution.
196 * All callbacks queued on the local thread prior to a rcu_defer_barrier() call
197 * are guaranteed to be executed.
198 * Callbacks queued by other threads concurrently with rcu_defer_barrier()
199 * execution are not guaranteed to be executed in the current batch (could
200 * be left for the next batch). These callbacks queued by other threads are only
201 * guaranteed to be executed if there is explicit synchronization between
202 * the thread adding to the queue and the thread issuing the defer_barrier call.
205 void rcu_defer_barrier(void)
207 struct defer_queue
*index
;
208 unsigned long num_items
= 0;
210 if (cds_list_empty(®istry
))
213 mutex_lock(&rcu_defer_mutex
);
214 cds_list_for_each_entry(index
, ®istry
, list
) {
215 index
->last_head
= CMM_LOAD_SHARED(index
->head
);
216 num_items
+= index
->last_head
- index
->tail
;
218 if (likely(!num_items
)) {
220 * We skip the grace period because there are no queued
221 * callbacks to execute.
226 cds_list_for_each_entry(index
, ®istry
, list
)
227 rcu_defer_barrier_queue(index
, index
->last_head
);
229 mutex_unlock(&rcu_defer_mutex
);
233 * _defer_rcu - Queue a RCU callback.
235 void _defer_rcu(void (*fct
)(void *p
), void *p
)
237 unsigned long head
, tail
;
240 * Head is only modified by ourself. Tail can be modified by reclamation
243 head
= defer_queue
.head
;
244 tail
= CMM_LOAD_SHARED(defer_queue
.tail
);
247 * If queue is full, or reached threshold. Empty queue ourself.
248 * Worse-case: must allow 2 supplementary entries for fct pointer.
250 if (unlikely(head
- tail
>= DEFER_QUEUE_SIZE
- 2)) {
251 assert(head
- tail
<= DEFER_QUEUE_SIZE
);
252 rcu_defer_barrier_thread();
253 assert(head
- CMM_LOAD_SHARED(defer_queue
.tail
) == 0);
256 if (unlikely(defer_queue
.last_fct_in
!= fct
)) {
257 defer_queue
.last_fct_in
= fct
;
258 if (unlikely(DQ_IS_FCT_BIT(fct
) || fct
== DQ_FCT_MARK
)) {
260 * If the function to encode is not aligned or the
261 * marker, write DQ_FCT_MARK followed by the function
264 _CMM_STORE_SHARED(defer_queue
.q
[head
++ & DEFER_QUEUE_MASK
],
266 _CMM_STORE_SHARED(defer_queue
.q
[head
++ & DEFER_QUEUE_MASK
],
270 _CMM_STORE_SHARED(defer_queue
.q
[head
++ & DEFER_QUEUE_MASK
],
274 if (unlikely(DQ_IS_FCT_BIT(p
) || p
== DQ_FCT_MARK
)) {
276 * If the data to encode is not aligned or the marker,
277 * write DQ_FCT_MARK followed by the function pointer.
279 _CMM_STORE_SHARED(defer_queue
.q
[head
++ & DEFER_QUEUE_MASK
],
281 _CMM_STORE_SHARED(defer_queue
.q
[head
++ & DEFER_QUEUE_MASK
],
285 _CMM_STORE_SHARED(defer_queue
.q
[head
++ & DEFER_QUEUE_MASK
], p
);
286 cmm_smp_wmb(); /* Publish new pointer before head */
287 /* Write q[] before head. */
288 CMM_STORE_SHARED(defer_queue
.head
, head
);
289 cmm_smp_mb(); /* Write queue head before read futex */
291 * Wake-up any waiting defer thread.
296 void *thr_defer(void *args
)
299 pthread_testcancel();
301 * "Be green". Don't wake up the CPU if there is no RCU work
302 * to perform whatsoever. Aims at saving laptop battery life by
303 * leaving the processor in sleep state when idle.
306 /* Sleeping after wait_defer to let many callbacks enqueue */
307 poll(NULL
,0,100); /* wait for 100ms */
315 * library wrappers to be used by non-LGPL compatible source code.
318 void defer_rcu(void (*fct
)(void *p
), void *p
)
323 static void start_defer_thread(void)
327 ret
= pthread_create(&tid_defer
, NULL
, thr_defer
, NULL
);
331 static void stop_defer_thread(void)
336 pthread_cancel(tid_defer
);
338 ret
= pthread_join(tid_defer
, &tret
);
342 void rcu_defer_register_thread(void)
346 assert(defer_queue
.last_head
== 0);
347 assert(defer_queue
.q
== NULL
);
348 defer_queue
.q
= malloc(sizeof(void *) * DEFER_QUEUE_SIZE
);
350 mutex_lock(&defer_thread_mutex
);
351 mutex_lock(&rcu_defer_mutex
);
352 was_empty
= cds_list_empty(®istry
);
353 cds_list_add(&defer_queue
.list
, ®istry
);
354 mutex_unlock(&rcu_defer_mutex
);
357 start_defer_thread();
358 mutex_unlock(&defer_thread_mutex
);
361 void rcu_defer_unregister_thread(void)
365 mutex_lock(&defer_thread_mutex
);
366 mutex_lock(&rcu_defer_mutex
);
367 cds_list_del(&defer_queue
.list
);
368 _rcu_defer_barrier_thread();
370 defer_queue
.q
= NULL
;
371 is_empty
= cds_list_empty(®istry
);
372 mutex_unlock(&rcu_defer_mutex
);
376 mutex_unlock(&defer_thread_mutex
);
379 void rcu_defer_exit(void)
381 assert(cds_list_empty(®istry
));