4 * Userspace RCU library - batch memory reclamation with kernel API
6 * Copyright (c) 2010 Paul E. McKenney <paulmck@linux.vnet.ibm.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
38 #include "urcu/wfqueue.h"
39 #include "urcu-call-rcu.h"
40 #include "urcu-pointer.h"
41 #include "urcu/list.h"
42 #include "urcu/futex.h"
43 #include "urcu/tls-compat.h"
46 #define SET_AFFINITY_CHECK_PERIOD (1U << 8) /* 256 */
47 #define SET_AFFINITY_CHECK_PERIOD_MASK (SET_AFFINITY_CHECK_PERIOD - 1)
49 /* Data structure that identifies a call_rcu thread. */
51 struct call_rcu_data
{
52 struct cds_wfq_queue cbs
;
55 unsigned long qlen
; /* maintained for debugging. */
58 unsigned long gp_count
;
59 struct cds_list_head list
;
60 } __attribute__((aligned(CAA_CACHE_LINE_SIZE
)));
63 * List of all call_rcu_data structures to keep valgrind happy.
64 * Protected by call_rcu_mutex.
67 CDS_LIST_HEAD(call_rcu_data_list
);
69 /* Link a thread using call_rcu() to its call_rcu thread. */
71 static DEFINE_URCU_TLS(struct call_rcu_data
*, thread_call_rcu_data
);
74 * Guard call_rcu thread creation and atfork handlers.
76 static pthread_mutex_t call_rcu_mutex
= PTHREAD_MUTEX_INITIALIZER
;
78 /* If a given thread does not have its own call_rcu thread, this is default. */
80 static struct call_rcu_data
*default_call_rcu_data
;
83 * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
84 * available, then we can have call_rcu threads assigned to individual
85 * CPUs rather than only to specific threads.
88 #ifdef HAVE_SCHED_GETCPU
90 static int urcu_sched_getcpu(void)
92 return sched_getcpu();
95 #else /* #ifdef HAVE_SCHED_GETCPU */
97 static int urcu_sched_getcpu(void)
102 #endif /* #else #ifdef HAVE_SCHED_GETCPU */
104 #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU)
107 * Pointer to array of pointers to per-CPU call_rcu_data structures
108 * and # CPUs. per_cpu_call_rcu_data is a RCU-protected pointer to an
109 * array of RCU-protected pointers to call_rcu_data. call_rcu acts as a
110 * RCU read-side and reads per_cpu_call_rcu_data and the per-cpu pointer
111 * without mutex. The call_rcu_mutex protects updates.
114 static struct call_rcu_data
**per_cpu_call_rcu_data
;
117 static void maxcpus_reset(void)
122 /* Allocate the array if it has not already been allocated. */
124 static void alloc_cpu_call_rcu_data(void)
126 struct call_rcu_data
**p
;
127 static int warned
= 0;
131 maxcpus
= sysconf(_SC_NPROCESSORS_CONF
);
135 p
= malloc(maxcpus
* sizeof(*per_cpu_call_rcu_data
));
137 memset(p
, '\0', maxcpus
* sizeof(*per_cpu_call_rcu_data
));
138 rcu_set_pointer(&per_cpu_call_rcu_data
, p
);
141 fprintf(stderr
, "[error] liburcu: unable to allocate per-CPU pointer array\n");
147 #else /* #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */
150 * per_cpu_call_rcu_data should be constant, but some functions below, used both
151 * for cases where cpu number is available and not available, assume it it not
154 static struct call_rcu_data
**per_cpu_call_rcu_data
= NULL
;
155 static const long maxcpus
= -1;
157 static void maxcpus_reset(void)
161 static void alloc_cpu_call_rcu_data(void)
165 #endif /* #else #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */
167 /* Acquire the specified pthread mutex. */
169 static void call_rcu_lock(pthread_mutex_t
*pmp
)
173 ret
= pthread_mutex_lock(pmp
);
178 /* Release the specified pthread mutex. */
180 static void call_rcu_unlock(pthread_mutex_t
*pmp
)
184 ret
= pthread_mutex_unlock(pmp
);
190 * Periodically retry setting CPU affinity if we migrate.
191 * Losing affinity can be caused by CPU hotunplug/hotplug, or by
194 #if HAVE_SCHED_SETAFFINITY
196 int set_thread_cpu_affinity(struct call_rcu_data
*crdp
)
201 if (crdp
->cpu_affinity
< 0)
203 if (++crdp
->gp_count
& SET_AFFINITY_CHECK_PERIOD_MASK
)
205 if (urcu_sched_getcpu() == crdp
->cpu_affinity
)
209 CPU_SET(crdp
->cpu_affinity
, &mask
);
210 #if SCHED_SETAFFINITY_ARGS == 2
211 ret
= sched_setaffinity(0, &mask
);
213 ret
= sched_setaffinity(0, sizeof(mask
), &mask
);
216 * EINVAL is fine: can be caused by hotunplugged CPUs, or by
217 * cpuset(7). This is why we should always retry if we detect
220 if (ret
&& errno
== EINVAL
) {
228 int set_thread_cpu_affinity(struct call_rcu_data
*crdp
)
234 static void call_rcu_wait(struct call_rcu_data
*crdp
)
236 /* Read call_rcu list before read futex */
238 if (uatomic_read(&crdp
->futex
) != -1)
240 while (futex_async(&crdp
->futex
, FUTEX_WAIT
, -1,
244 /* Value already changed. */
247 /* Retry if interrupted by signal. */
248 break; /* Get out of switch. */
250 /* Unexpected error. */
256 static void call_rcu_wake_up(struct call_rcu_data
*crdp
)
258 /* Write to call_rcu list before reading/writing futex */
260 if (caa_unlikely(uatomic_read(&crdp
->futex
) == -1)) {
261 uatomic_set(&crdp
->futex
, 0);
262 if (futex_async(&crdp
->futex
, FUTEX_WAKE
, 1,
268 /* This is the code run by each call_rcu thread. */
270 static void *call_rcu_thread(void *arg
)
272 unsigned long cbcount
;
273 struct cds_wfq_node
*cbs
;
274 struct cds_wfq_node
**cbs_tail
;
275 struct call_rcu_data
*crdp
= (struct call_rcu_data
*)arg
;
276 struct rcu_head
*rhp
;
277 int rt
= !!(uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_RT
);
279 if (set_thread_cpu_affinity(crdp
))
283 * If callbacks take a read-side lock, we need to be registered.
285 rcu_register_thread();
287 URCU_TLS(thread_call_rcu_data
) = crdp
;
289 uatomic_dec(&crdp
->futex
);
290 /* Decrement futex before reading call_rcu list */
294 if (set_thread_cpu_affinity(crdp
))
297 if (uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_PAUSE
) {
299 * Pause requested. Become quiescent: remove
300 * ourself from all global lists, and don't
301 * process any callback. The callback lists may
302 * still be non-empty though.
304 rcu_unregister_thread();
305 cmm_smp_mb__before_uatomic_or();
306 uatomic_or(&crdp
->flags
, URCU_CALL_RCU_PAUSED
);
307 while ((uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_PAUSE
) != 0)
309 uatomic_and(&crdp
->flags
, ~URCU_CALL_RCU_PAUSED
);
310 cmm_smp_mb__after_uatomic_and();
311 rcu_register_thread();
314 if (&crdp
->cbs
.head
!= _CMM_LOAD_SHARED(crdp
->cbs
.tail
)) {
315 while ((cbs
= _CMM_LOAD_SHARED(crdp
->cbs
.head
)) == NULL
)
317 _CMM_STORE_SHARED(crdp
->cbs
.head
, NULL
);
318 cbs_tail
= (struct cds_wfq_node
**)
319 uatomic_xchg(&crdp
->cbs
.tail
, &crdp
->cbs
.head
);
323 while (cbs
->next
== NULL
&&
324 &cbs
->next
!= cbs_tail
)
326 if (cbs
== &crdp
->cbs
.dummy
) {
330 rhp
= (struct rcu_head
*)cbs
;
334 } while (cbs
!= NULL
);
335 uatomic_sub(&crdp
->qlen
, cbcount
);
337 if (uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_STOP
)
339 rcu_thread_offline();
342 == _CMM_LOAD_SHARED(crdp
->cbs
.tail
)) {
345 uatomic_dec(&crdp
->futex
);
347 * Decrement futex before reading
361 * Read call_rcu list before write futex.
364 uatomic_set(&crdp
->futex
, 0);
366 uatomic_or(&crdp
->flags
, URCU_CALL_RCU_STOPPED
);
367 rcu_unregister_thread();
372 * Create both a call_rcu thread and the corresponding call_rcu_data
373 * structure, linking the structure in as specified. Caller must hold
377 static void call_rcu_data_init(struct call_rcu_data
**crdpp
,
381 struct call_rcu_data
*crdp
;
384 crdp
= malloc(sizeof(*crdp
));
387 memset(crdp
, '\0', sizeof(*crdp
));
388 cds_wfq_init(&crdp
->cbs
);
392 cds_list_add(&crdp
->list
, &call_rcu_data_list
);
393 crdp
->cpu_affinity
= cpu_affinity
;
395 cmm_smp_mb(); /* Structure initialized before pointer is planted. */
397 ret
= pthread_create(&crdp
->tid
, NULL
, call_rcu_thread
, crdp
);
403 * Return a pointer to the call_rcu_data structure for the specified
404 * CPU, returning NULL if there is none. We cannot automatically
405 * created it because the platform we are running on might not define
406 * urcu_sched_getcpu().
408 * The call to this function and use of the returned call_rcu_data
409 * should be protected by RCU read-side lock.
412 struct call_rcu_data
*get_cpu_call_rcu_data(int cpu
)
414 static int warned
= 0;
415 struct call_rcu_data
**pcpu_crdp
;
417 pcpu_crdp
= rcu_dereference(per_cpu_call_rcu_data
);
418 if (pcpu_crdp
== NULL
)
420 if (!warned
&& maxcpus
> 0 && (cpu
< 0 || maxcpus
<= cpu
)) {
421 fprintf(stderr
, "[error] liburcu: get CPU # out of range\n");
424 if (cpu
< 0 || maxcpus
<= cpu
)
426 return rcu_dereference(pcpu_crdp
[cpu
]);
430 * Return the tid corresponding to the call_rcu thread whose
431 * call_rcu_data structure is specified.
434 pthread_t
get_call_rcu_thread(struct call_rcu_data
*crdp
)
440 * Create a call_rcu_data structure (with thread) and return a pointer.
443 static struct call_rcu_data
*__create_call_rcu_data(unsigned long flags
,
446 struct call_rcu_data
*crdp
;
448 call_rcu_data_init(&crdp
, flags
, cpu_affinity
);
452 struct call_rcu_data
*create_call_rcu_data(unsigned long flags
,
455 struct call_rcu_data
*crdp
;
457 call_rcu_lock(&call_rcu_mutex
);
458 crdp
= __create_call_rcu_data(flags
, cpu_affinity
);
459 call_rcu_unlock(&call_rcu_mutex
);
464 * Set the specified CPU to use the specified call_rcu_data structure.
466 * Use NULL to remove a CPU's call_rcu_data structure, but it is
467 * the caller's responsibility to dispose of the removed structure.
468 * Use get_cpu_call_rcu_data() to obtain a pointer to the old structure
469 * (prior to NULLing it out, of course).
471 * The caller must wait for a grace-period to pass between return from
472 * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the
473 * previous call rcu data as argument.
476 int set_cpu_call_rcu_data(int cpu
, struct call_rcu_data
*crdp
)
478 static int warned
= 0;
480 call_rcu_lock(&call_rcu_mutex
);
481 alloc_cpu_call_rcu_data();
482 if (cpu
< 0 || maxcpus
<= cpu
) {
484 fprintf(stderr
, "[error] liburcu: set CPU # out of range\n");
487 call_rcu_unlock(&call_rcu_mutex
);
492 if (per_cpu_call_rcu_data
== NULL
) {
493 call_rcu_unlock(&call_rcu_mutex
);
498 if (per_cpu_call_rcu_data
[cpu
] != NULL
&& crdp
!= NULL
) {
499 call_rcu_unlock(&call_rcu_mutex
);
504 rcu_set_pointer(&per_cpu_call_rcu_data
[cpu
], crdp
);
505 call_rcu_unlock(&call_rcu_mutex
);
510 * Return a pointer to the default call_rcu_data structure, creating
511 * one if need be. Because we never free call_rcu_data structures,
512 * we don't need to be in an RCU read-side critical section.
515 struct call_rcu_data
*get_default_call_rcu_data(void)
517 if (default_call_rcu_data
!= NULL
)
518 return rcu_dereference(default_call_rcu_data
);
519 call_rcu_lock(&call_rcu_mutex
);
520 if (default_call_rcu_data
!= NULL
) {
521 call_rcu_unlock(&call_rcu_mutex
);
522 return default_call_rcu_data
;
524 call_rcu_data_init(&default_call_rcu_data
, 0, -1);
525 call_rcu_unlock(&call_rcu_mutex
);
526 return default_call_rcu_data
;
530 * Return the call_rcu_data structure that applies to the currently
531 * running thread. Any call_rcu_data structure assigned specifically
532 * to this thread has first priority, followed by any call_rcu_data
533 * structure assigned to the CPU on which the thread is running,
534 * followed by the default call_rcu_data structure. If there is not
535 * yet a default call_rcu_data structure, one will be created.
537 * Calls to this function and use of the returned call_rcu_data should
538 * be protected by RCU read-side lock.
540 struct call_rcu_data
*get_call_rcu_data(void)
542 struct call_rcu_data
*crd
;
544 if (URCU_TLS(thread_call_rcu_data
) != NULL
)
545 return URCU_TLS(thread_call_rcu_data
);
548 crd
= get_cpu_call_rcu_data(urcu_sched_getcpu());
553 return get_default_call_rcu_data();
557 * Return a pointer to this task's call_rcu_data if there is one.
560 struct call_rcu_data
*get_thread_call_rcu_data(void)
562 return URCU_TLS(thread_call_rcu_data
);
566 * Set this task's call_rcu_data structure as specified, regardless
567 * of whether or not this task already had one. (This allows switching
568 * to and from real-time call_rcu threads, for example.)
570 * Use NULL to remove a thread's call_rcu_data structure, but it is
571 * the caller's responsibility to dispose of the removed structure.
572 * Use get_thread_call_rcu_data() to obtain a pointer to the old structure
573 * (prior to NULLing it out, of course).
576 void set_thread_call_rcu_data(struct call_rcu_data
*crdp
)
578 URCU_TLS(thread_call_rcu_data
) = crdp
;
582 * Create a separate call_rcu thread for each CPU. This does not
583 * replace a pre-existing call_rcu thread -- use the set_cpu_call_rcu_data()
584 * function if you want that behavior. Should be paired with
585 * free_all_cpu_call_rcu_data() to teardown these call_rcu worker
589 int create_all_cpu_call_rcu_data(unsigned long flags
)
592 struct call_rcu_data
*crdp
;
595 call_rcu_lock(&call_rcu_mutex
);
596 alloc_cpu_call_rcu_data();
597 call_rcu_unlock(&call_rcu_mutex
);
602 if (per_cpu_call_rcu_data
== NULL
) {
606 for (i
= 0; i
< maxcpus
; i
++) {
607 call_rcu_lock(&call_rcu_mutex
);
608 if (get_cpu_call_rcu_data(i
)) {
609 call_rcu_unlock(&call_rcu_mutex
);
612 crdp
= __create_call_rcu_data(flags
, i
);
614 call_rcu_unlock(&call_rcu_mutex
);
618 call_rcu_unlock(&call_rcu_mutex
);
619 if ((ret
= set_cpu_call_rcu_data(i
, crdp
)) != 0) {
620 call_rcu_data_free(crdp
);
622 /* it has been created by other thread */
633 * Wake up the call_rcu thread corresponding to the specified
634 * call_rcu_data structure.
636 static void wake_call_rcu_thread(struct call_rcu_data
*crdp
)
638 if (!(_CMM_LOAD_SHARED(crdp
->flags
) & URCU_CALL_RCU_RT
))
639 call_rcu_wake_up(crdp
);
643 * Schedule a function to be invoked after a following grace period.
644 * This is the only function that must be called -- the others are
645 * only present to allow applications to tune their use of RCU for
646 * maximum performance.
648 * Note that unless a call_rcu thread has not already been created,
649 * the first invocation of call_rcu() will create one. So, if you
650 * need the first invocation of call_rcu() to be fast, make sure
651 * to create a call_rcu thread first. One way to accomplish this is
652 * "get_call_rcu_data();", and another is create_all_cpu_call_rcu_data().
654 * call_rcu must be called by registered RCU read-side threads.
657 void call_rcu(struct rcu_head
*head
,
658 void (*func
)(struct rcu_head
*head
))
660 struct call_rcu_data
*crdp
;
662 cds_wfq_node_init(&head
->next
);
664 /* Holding rcu read-side lock across use of per-cpu crdp */
666 crdp
= get_call_rcu_data();
667 cds_wfq_enqueue(&crdp
->cbs
, &head
->next
);
668 uatomic_inc(&crdp
->qlen
);
669 wake_call_rcu_thread(crdp
);
674 * Free up the specified call_rcu_data structure, terminating the
675 * associated call_rcu thread. The caller must have previously
676 * removed the call_rcu_data structure from per-thread or per-CPU
677 * usage. For example, set_cpu_call_rcu_data(cpu, NULL) for per-CPU
678 * call_rcu_data structures or set_thread_call_rcu_data(NULL) for
679 * per-thread call_rcu_data structures.
681 * We silently refuse to free up the default call_rcu_data structure
682 * because that is where we put any leftover callbacks. Note that
683 * the possibility of self-spawning callbacks makes it impossible
684 * to execute all the callbacks in finite time without putting any
685 * newly spawned callbacks somewhere else. The "somewhere else" of
686 * last resort is the default call_rcu_data structure.
688 * We also silently refuse to free NULL pointers. This simplifies
691 * The caller must wait for a grace-period to pass between return from
692 * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the
693 * previous call rcu data as argument.
695 void call_rcu_data_free(struct call_rcu_data
*crdp
)
697 struct cds_wfq_node
*cbs
;
698 struct cds_wfq_node
**cbs_tail
;
699 struct cds_wfq_node
**cbs_endprev
;
701 if (crdp
== NULL
|| crdp
== default_call_rcu_data
) {
704 if ((uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_STOPPED
) == 0) {
705 uatomic_or(&crdp
->flags
, URCU_CALL_RCU_STOP
);
706 wake_call_rcu_thread(crdp
);
707 while ((uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_STOPPED
) == 0)
710 if (&crdp
->cbs
.head
!= _CMM_LOAD_SHARED(crdp
->cbs
.tail
)) {
711 while ((cbs
= _CMM_LOAD_SHARED(crdp
->cbs
.head
)) == NULL
)
713 _CMM_STORE_SHARED(crdp
->cbs
.head
, NULL
);
714 cbs_tail
= (struct cds_wfq_node
**)
715 uatomic_xchg(&crdp
->cbs
.tail
, &crdp
->cbs
.head
);
716 /* Create default call rcu data if need be */
717 (void) get_default_call_rcu_data();
718 cbs_endprev
= (struct cds_wfq_node
**)
719 uatomic_xchg(&default_call_rcu_data
->cbs
.tail
,
721 _CMM_STORE_SHARED(*cbs_endprev
, cbs
);
722 uatomic_add(&default_call_rcu_data
->qlen
,
723 uatomic_read(&crdp
->qlen
));
724 wake_call_rcu_thread(default_call_rcu_data
);
727 call_rcu_lock(&call_rcu_mutex
);
728 cds_list_del(&crdp
->list
);
729 call_rcu_unlock(&call_rcu_mutex
);
735 * Clean up all the per-CPU call_rcu threads.
737 void free_all_cpu_call_rcu_data(void)
740 struct call_rcu_data
**crdp
;
741 static int warned
= 0;
746 crdp
= malloc(sizeof(*crdp
) * maxcpus
);
749 fprintf(stderr
, "[error] liburcu: unable to allocate per-CPU pointer array\n");
755 for (cpu
= 0; cpu
< maxcpus
; cpu
++) {
756 crdp
[cpu
] = get_cpu_call_rcu_data(cpu
);
757 if (crdp
[cpu
] == NULL
)
759 set_cpu_call_rcu_data(cpu
, NULL
);
762 * Wait for call_rcu sites acting as RCU readers of the
763 * call_rcu_data to become quiescent.
766 for (cpu
= 0; cpu
< maxcpus
; cpu
++) {
767 if (crdp
[cpu
] == NULL
)
769 call_rcu_data_free(crdp
[cpu
]);
775 * Acquire the call_rcu_mutex in order to ensure that the child sees
776 * all of the call_rcu() data structures in a consistent state. Ensure
777 * that all call_rcu threads are in a quiescent state across fork.
778 * Suitable for pthread_atfork() and friends.
780 void call_rcu_before_fork(void)
782 struct call_rcu_data
*crdp
;
784 call_rcu_lock(&call_rcu_mutex
);
786 cds_list_for_each_entry(crdp
, &call_rcu_data_list
, list
) {
787 uatomic_or(&crdp
->flags
, URCU_CALL_RCU_PAUSE
);
788 cmm_smp_mb__after_uatomic_or();
789 wake_call_rcu_thread(crdp
);
791 cds_list_for_each_entry(crdp
, &call_rcu_data_list
, list
) {
792 while ((uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_PAUSED
) == 0)
798 * Clean up call_rcu data structures in the parent of a successful fork()
799 * that is not followed by exec() in the child. Suitable for
800 * pthread_atfork() and friends.
802 void call_rcu_after_fork_parent(void)
804 struct call_rcu_data
*crdp
;
806 cds_list_for_each_entry(crdp
, &call_rcu_data_list
, list
)
807 uatomic_and(&crdp
->flags
, ~URCU_CALL_RCU_PAUSE
);
808 cds_list_for_each_entry(crdp
, &call_rcu_data_list
, list
) {
809 while ((uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_PAUSED
) != 0)
812 call_rcu_unlock(&call_rcu_mutex
);
816 * Clean up call_rcu data structures in the child of a successful fork()
817 * that is not followed by exec(). Suitable for pthread_atfork() and
820 void call_rcu_after_fork_child(void)
822 struct call_rcu_data
*crdp
, *next
;
824 /* Release the mutex. */
825 call_rcu_unlock(&call_rcu_mutex
);
827 /* Do nothing when call_rcu() has not been used */
828 if (cds_list_empty(&call_rcu_data_list
))
832 * Allocate a new default call_rcu_data structure in order
833 * to get a working call_rcu thread to go with it.
835 default_call_rcu_data
= NULL
;
836 (void)get_default_call_rcu_data();
838 /* Cleanup call_rcu_data pointers before use */
840 free(per_cpu_call_rcu_data
);
841 rcu_set_pointer(&per_cpu_call_rcu_data
, NULL
);
842 URCU_TLS(thread_call_rcu_data
) = NULL
;
845 * Dispose of all of the rest of the call_rcu_data structures.
846 * Leftover call_rcu callbacks will be merged into the new
847 * default call_rcu thread queue.
849 cds_list_for_each_entry_safe(crdp
, next
, &call_rcu_data_list
, list
) {
850 if (crdp
== default_call_rcu_data
)
852 uatomic_set(&crdp
->flags
, URCU_CALL_RCU_STOPPED
);
853 call_rcu_data_free(crdp
);