4 * Userspace RCU library - batch memory reclamation with kernel API
6 * Copyright (c) 2010 Paul E. McKenney <paulmck@linux.vnet.ibm.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
36 #include "urcu/wfqueue.h"
37 #include "urcu-call-rcu.h"
38 #include "urcu-pointer.h"
40 /* Data structure that identifies a call_rcu thread. */
42 struct call_rcu_data
{
43 struct cds_wfq_queue cbs
;
49 } __attribute__((aligned(CAA_CACHE_LINE_SIZE
)));
51 /* Link a thread using call_rcu() to its call_rcu thread. */
53 static __thread
struct call_rcu_data
*thread_call_rcu_data
;
55 /* Guard call_rcu thread creation. */
57 static pthread_mutex_t call_rcu_mutex
= PTHREAD_MUTEX_INITIALIZER
;
59 /* If a given thread does not have its own call_rcu thread, this is default. */
61 static struct call_rcu_data
*default_call_rcu_data
;
63 extern void synchronize_rcu(void);
66 * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
67 * available, then we can have call_rcu threads assigned to individual
68 * CPUs rather than only to specific threads.
71 #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
74 * Pointer to array of pointers to per-CPU call_rcu_data structures
78 static struct call_rcu_data
**per_cpu_call_rcu_data
;
81 /* Allocate the array if it has not already been allocated. */
83 static void alloc_cpu_call_rcu_data(void)
85 struct call_rcu_data
**p
;
86 static int warned
= 0;
90 maxcpus
= sysconf(_SC_NPROCESSORS_CONF
);
94 p
= malloc(maxcpus
* sizeof(*per_cpu_call_rcu_data
));
96 memset(p
, '\0', maxcpus
* sizeof(*per_cpu_call_rcu_data
));
97 per_cpu_call_rcu_data
= p
;
100 fprintf(stderr
, "[error] liburcu: unable to allocate per-CPU pointer array\n");
106 #else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
108 static const struct call_rcu_data
**per_cpu_call_rcu_data
= NULL
;
109 static const long maxcpus
= -1;
111 static void alloc_cpu_call_rcu_data(void)
115 static int sched_getcpu(void)
120 #endif /* #else #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
122 /* Acquire the specified pthread mutex. */
124 static void call_rcu_lock(pthread_mutex_t
*pmp
)
126 if (pthread_mutex_lock(pmp
) != 0) {
127 perror("pthread_mutex_lock");
132 /* Release the specified pthread mutex. */
134 static void call_rcu_unlock(pthread_mutex_t
*pmp
)
136 if (pthread_mutex_unlock(pmp
) != 0) {
137 perror("pthread_mutex_unlock");
142 /* This is the code run by each call_rcu thread. */
144 static void *call_rcu_thread(void *arg
)
146 unsigned long cbcount
;
147 struct cds_wfq_node
*cbs
;
148 struct cds_wfq_node
**cbs_tail
;
149 struct call_rcu_data
*crdp
= (struct call_rcu_data
*)arg
;
150 struct rcu_head
*rhp
;
152 thread_call_rcu_data
= crdp
;
154 if (&crdp
->cbs
.head
!= _CMM_LOAD_SHARED(crdp
->cbs
.tail
)) {
155 while ((cbs
= _CMM_LOAD_SHARED(crdp
->cbs
.head
)) == NULL
)
157 _CMM_STORE_SHARED(crdp
->cbs
.head
, NULL
);
158 cbs_tail
= (struct cds_wfq_node
**)
159 uatomic_xchg(&crdp
->cbs
.tail
, &crdp
->cbs
.head
);
163 while (cbs
->next
== NULL
&&
164 &cbs
->next
!= cbs_tail
)
166 if (cbs
== &crdp
->cbs
.dummy
) {
170 rhp
= (struct rcu_head
*)cbs
;
174 } while (cbs
!= NULL
);
175 uatomic_sub(&crdp
->qlen
, cbcount
);
177 if (crdp
->flags
& URCU_CALL_RCU_RT
)
180 call_rcu_lock(&crdp
->mtx
);
181 _CMM_STORE_SHARED(crdp
->flags
,
182 crdp
->flags
& ~URCU_CALL_RCU_RUNNING
);
183 if (&crdp
->cbs
.head
==
184 _CMM_LOAD_SHARED(crdp
->cbs
.tail
) &&
185 pthread_cond_wait(&crdp
->cond
, &crdp
->mtx
) != 0) {
186 perror("pthread_cond_wait");
189 _CMM_STORE_SHARED(crdp
->flags
,
190 crdp
->flags
| URCU_CALL_RCU_RUNNING
);
192 call_rcu_unlock(&crdp
->mtx
);
195 return NULL
; /* NOTREACHED */
199 * Create both a call_rcu thread and the corresponding call_rcu_data
200 * structure, linking the structure in as specified.
203 void call_rcu_data_init(struct call_rcu_data
**crdpp
, unsigned long flags
)
205 struct call_rcu_data
*crdp
;
207 crdp
= malloc(sizeof(*crdp
));
209 fprintf(stderr
, "Out of memory.\n");
212 memset(crdp
, '\0', sizeof(*crdp
));
213 cds_wfq_init(&crdp
->cbs
);
215 if (pthread_mutex_init(&crdp
->mtx
, NULL
) != 0) {
216 perror("pthread_mutex_init");
219 if (pthread_cond_init(&crdp
->cond
, NULL
) != 0) {
220 perror("pthread_cond_init");
223 crdp
->flags
= flags
| URCU_CALL_RCU_RUNNING
;
224 cmm_smp_mb(); /* Structure initialized before pointer is planted. */
226 if (pthread_create(&crdp
->tid
, NULL
, call_rcu_thread
, crdp
) != 0) {
227 perror("pthread_create");
233 * Return a pointer to the call_rcu_data structure for the specified
234 * CPU, returning NULL if there is none. We cannot automatically
235 * created it because the platform we are running on might not define
239 struct call_rcu_data
*get_cpu_call_rcu_data(int cpu
)
241 static int warned
= 0;
243 if (per_cpu_call_rcu_data
== NULL
)
245 if (!warned
&& maxcpus
> 0 && (cpu
< 0 || maxcpus
<= cpu
)) {
246 fprintf(stderr
, "[error] liburcu: get CPU # out of range\n");
249 if (cpu
< 0 || maxcpus
<= cpu
)
251 return per_cpu_call_rcu_data
[cpu
];
255 * Return the tid corresponding to the call_rcu thread whose
256 * call_rcu_data structure is specified.
259 pthread_t
get_call_rcu_thread(struct call_rcu_data
*crdp
)
265 * Create a call_rcu_data structure (with thread) and return a pointer.
268 struct call_rcu_data
*create_call_rcu_data(unsigned long flags
)
270 struct call_rcu_data
*crdp
;
272 call_rcu_data_init(&crdp
, flags
);
277 * Set the specified CPU to use the specified call_rcu_data structure.
280 int set_cpu_call_rcu_data(int cpu
, struct call_rcu_data
*crdp
)
284 call_rcu_lock(&call_rcu_mutex
);
285 if (cpu
< 0 || maxcpus
<= cpu
) {
287 fprintf(stderr
, "[error] liburcu: set CPU # out of range\n");
290 call_rcu_unlock(&call_rcu_mutex
);
294 alloc_cpu_call_rcu_data();
295 call_rcu_unlock(&call_rcu_mutex
);
296 if (per_cpu_call_rcu_data
== NULL
) {
300 per_cpu_call_rcu_data
[cpu
] = crdp
;
305 * Return a pointer to the default call_rcu_data structure, creating
306 * one if need be. Because we never free call_rcu_data structures,
307 * we don't need to be in an RCU read-side critical section.
310 struct call_rcu_data
*get_default_call_rcu_data(void)
312 if (default_call_rcu_data
!= NULL
)
313 return rcu_dereference(default_call_rcu_data
);
314 call_rcu_lock(&call_rcu_mutex
);
315 if (default_call_rcu_data
!= NULL
) {
316 call_rcu_unlock(&call_rcu_mutex
);
317 return default_call_rcu_data
;
319 call_rcu_data_init(&default_call_rcu_data
, 0);
320 call_rcu_unlock(&call_rcu_mutex
);
321 return default_call_rcu_data
;
325 * Return the call_rcu_data structure that applies to the currently
326 * running thread. Any call_rcu_data structure assigned specifically
327 * to this thread has first priority, followed by any call_rcu_data
328 * structure assigned to the CPU on which the thread is running,
329 * followed by the default call_rcu_data structure. If there is not
330 * yet a default call_rcu_data structure, one will be created.
332 struct call_rcu_data
*get_call_rcu_data(void)
335 static int warned
= 0;
337 if (thread_call_rcu_data
!= NULL
)
338 return thread_call_rcu_data
;
340 return get_default_call_rcu_data();
341 curcpu
= sched_getcpu();
342 if (!warned
&& (curcpu
< 0 || maxcpus
<= curcpu
)) {
343 fprintf(stderr
, "[error] liburcu: gcrd CPU # out of range\n");
346 if (curcpu
>= 0 && maxcpus
> curcpu
&&
347 per_cpu_call_rcu_data
!= NULL
&&
348 per_cpu_call_rcu_data
[curcpu
] != NULL
)
349 return per_cpu_call_rcu_data
[curcpu
];
350 return get_default_call_rcu_data();
354 * Return a pointer to this task's call_rcu_data if there is one.
357 struct call_rcu_data
*get_thread_call_rcu_data(void)
359 return thread_call_rcu_data
;
363 * Set this task's call_rcu_data structure as specified, regardless
364 * of whether or not this task already had one. (This allows switching
365 * to and from real-time call_rcu threads, for example.)
368 void set_thread_call_rcu_data(struct call_rcu_data
*crdp
)
370 thread_call_rcu_data
= crdp
;
374 * Create a separate call_rcu thread for each CPU. This does not
375 * replace a pre-existing call_rcu thread -- use the set_cpu_call_rcu_data()
376 * function if you want that behavior.
379 int create_all_cpu_call_rcu_data(unsigned long flags
)
382 struct call_rcu_data
*crdp
;
385 call_rcu_lock(&call_rcu_mutex
);
386 alloc_cpu_call_rcu_data();
387 call_rcu_unlock(&call_rcu_mutex
);
392 if (per_cpu_call_rcu_data
== NULL
) {
396 for (i
= 0; i
< maxcpus
; i
++) {
397 call_rcu_lock(&call_rcu_mutex
);
398 if (get_cpu_call_rcu_data(i
)) {
399 call_rcu_unlock(&call_rcu_mutex
);
402 crdp
= create_call_rcu_data(flags
);
404 call_rcu_unlock(&call_rcu_mutex
);
408 call_rcu_unlock(&call_rcu_mutex
);
409 if ((ret
= set_cpu_call_rcu_data(i
, crdp
)) != 0) {
410 /* FIXME: Leaks crdp for now. */
411 return ret
; /* Can happen on race. */
418 * Schedule a function to be invoked after a following grace period.
419 * This is the only function that must be called -- the others are
420 * only present to allow applications to tune their use of RCU for
421 * maximum performance.
423 * Note that unless a call_rcu thread has not already been created,
424 * the first invocation of call_rcu() will create one. So, if you
425 * need the first invocation of call_rcu() to be fast, make sure
426 * to create a call_rcu thread first. One way to accomplish this is
427 * "get_call_rcu_data();", and another is create_all_cpu_call_rcu_data().
430 void call_rcu(struct rcu_head
*head
,
431 void (*func
)(struct rcu_head
*head
))
433 struct call_rcu_data
*crdp
;
435 cds_wfq_node_init(&head
->next
);
437 crdp
= get_call_rcu_data();
438 cds_wfq_enqueue(&crdp
->cbs
, &head
->next
);
439 uatomic_inc(&crdp
->qlen
);
440 if (!(_CMM_LOAD_SHARED(crdp
->flags
) & URCU_CALL_RCU_RT
)) {
441 call_rcu_lock(&crdp
->mtx
);
442 if (!(_CMM_LOAD_SHARED(crdp
->flags
) & URCU_CALL_RCU_RUNNING
)) {
443 if (pthread_cond_signal(&crdp
->cond
) != 0) {
444 perror("pthread_cond_signal");
448 call_rcu_unlock(&crdp
->mtx
);