4 * Userspace RCU library
6 * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
8 * Distributed under GPLv2
20 pthread_mutex_t urcu_mutex
= PTHREAD_MUTEX_INITIALIZER
;
23 * Global grace period counter.
24 * Contains the current RCU_GP_CTR_BIT.
25 * Also has a RCU_GP_CTR_BIT of 1, to accelerate the reader fast path.
27 long urcu_gp_ctr
= RCU_GP_COUNT
;
29 long __thread urcu_active_readers
;
31 /* Thread IDs of registered readers */
32 #define INIT_NUM_THREADS 4
36 long *urcu_active_readers
;
40 unsigned int yield_active
;
41 unsigned int __thread rand_yield
;
44 static struct reader_data
*reader_data
;
45 static int num_readers
, alloc_readers
;
48 void internal_urcu_lock(void)
51 ret
= pthread_mutex_lock(&urcu_mutex
);
53 perror("Error in pthread mutex lock");
58 void internal_urcu_unlock(void)
62 ret
= pthread_mutex_unlock(&urcu_mutex
);
64 perror("Error in pthread mutex unlock");
70 * called with urcu_mutex held.
72 static void switch_next_urcu_qparity(void)
74 urcu_gp_ctr
^= RCU_GP_CTR_BIT
;
78 static void force_mb_all_threads(void)
83 static void force_mb_all_threads(void)
85 struct reader_data
*index
;
87 * Ask for each threads to execute a mb() so we can consider the
88 * compiler barriers around rcu read lock as real memory barriers.
95 mb(); /* write sig_done before sending the signals */
97 for (index
= reader_data
; index
< reader_data
+ num_readers
; index
++) {
98 pthread_kill(index
->tid
, SIGURCU
);
102 * Wait for sighandler (and thus mb()) to execute on every thread.
105 while (sig_done
< num_readers
)
108 mb(); /* read sig_done before ending the barrier */
113 void wait_for_quiescent_state(void)
115 struct reader_data
*index
;
119 /* Wait for each thread urcu_active_readers count to become 0.
121 for (index
= reader_data
; index
< reader_data
+ num_readers
; index
++) {
125 while (rcu_old_gp_ongoing(index
->urcu_active_readers
))
130 void synchronize_rcu(void)
132 /* All threads should read qparity before accessing data structure
133 * where new ptr points to. */
134 /* Write new ptr before changing the qparity */
135 force_mb_all_threads();
138 internal_urcu_lock();
141 switch_next_urcu_qparity(); /* 0 -> 1 */
145 * Must commit qparity update to memory before waiting for parity
146 * 0 quiescent state. Failure to do so could result in the writer
147 * waiting forever while new readers are always accessing data (no
153 * Wait for previous parity to be empty of readers.
155 wait_for_quiescent_state(); /* Wait readers in parity 0 */
159 * Must finish waiting for quiescent state for parity 0 before
160 * committing qparity update to memory. Failure to do so could result in
161 * the writer waiting forever while new readers are always accessing
162 * data (no progress).
166 switch_next_urcu_qparity(); /* 1 -> 0 */
170 * Must commit qparity update to memory before waiting for parity
171 * 1 quiescent state. Failure to do so could result in the writer
172 * waiting forever while new readers are always accessing data (no
178 * Wait for previous parity to be empty of readers.
180 wait_for_quiescent_state(); /* Wait readers in parity 1 */
183 internal_urcu_unlock();
186 /* All threads should finish using the data referred to by old ptr
187 * before decrementing their urcu_active_readers count */
188 /* Finish waiting for reader threads before letting the old ptr being
190 force_mb_all_threads();
194 void urcu_add_reader(pthread_t id
)
196 struct reader_data
*oldarray
;
199 alloc_readers
= INIT_NUM_THREADS
;
202 malloc(sizeof(struct reader_data
) * alloc_readers
);
204 if (alloc_readers
< num_readers
+ 1) {
205 oldarray
= reader_data
;
206 reader_data
= malloc(sizeof(struct reader_data
)
207 * (alloc_readers
<< 1));
208 memcpy(reader_data
, oldarray
,
209 sizeof(struct reader_data
) * alloc_readers
);
213 reader_data
[num_readers
].tid
= id
;
214 /* reference to the TLS of _this_ reader thread. */
215 reader_data
[num_readers
].urcu_active_readers
= &urcu_active_readers
;
220 * Never shrink (implementation limitation).
221 * This is O(nb threads). Eventually use a hash table.
223 void urcu_remove_reader(pthread_t id
)
225 struct reader_data
*index
;
227 assert(reader_data
!= NULL
);
228 for (index
= reader_data
; index
< reader_data
+ num_readers
; index
++) {
229 if (pthread_equal(index
->tid
, id
)) {
230 memcpy(index
, &reader_data
[num_readers
- 1],
231 sizeof(struct reader_data
));
232 reader_data
[num_readers
- 1].tid
= 0;
233 reader_data
[num_readers
- 1].urcu_active_readers
= NULL
;
238 /* Hrm not found, forgot to register ? */
242 void urcu_register_thread(void)
244 internal_urcu_lock();
245 urcu_add_reader(pthread_self());
246 internal_urcu_unlock();
249 void urcu_unregister_thread(void)
251 internal_urcu_lock();
252 urcu_remove_reader(pthread_self());
253 internal_urcu_unlock();
256 #ifndef DEBUG_FULL_MB
257 void sigurcu_handler(int signo
, siginfo_t
*siginfo
, void *context
)
260 atomic_inc(&sig_done
);
263 void __attribute__((constructor
)) urcu_init(void)
265 struct sigaction act
;
268 act
.sa_sigaction
= sigurcu_handler
;
269 ret
= sigaction(SIGURCU
, &act
, NULL
);
271 perror("Error in sigaction");
276 void __attribute__((destructor
)) urcu_exit(void)
278 struct sigaction act
;
281 ret
= sigaction(SIGURCU
, NULL
, &act
);
283 perror("Error in sigaction");
286 assert(act
.sa_sigaction
== sigurcu_handler
);