uatomic/x86: Remove redundant memory barriers
[urcu.git] / src / urcu-defer-impl.h
1 // SPDX-FileCopyrightText: 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
2 // SPDX-FileCopyrightText: 2009 Paul E. McKenney, IBM Corporation.
3 //
4 // SPDX-License-Identifier: LGPL-2.1-or-later
5
6 #ifndef _URCU_DEFER_IMPL_H
7 #define _URCU_DEFER_IMPL_H
8
9 /*
10 * Userspace RCU header - memory reclamation.
11 *
12 * TO BE INCLUDED ONLY FROM URCU LIBRARY CODE. See urcu-defer.h for linking
13 * dynamically with the userspace rcu reclamation library.
14 *
15 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
16 */
17
18 #include <stdlib.h>
19 #include <pthread.h>
20 #include <stdio.h>
21 #include <signal.h>
22 #include <string.h>
23 #include <errno.h>
24 #include <poll.h>
25 #include <sys/time.h>
26 #include <unistd.h>
27 #include <stdint.h>
28
29 #include "urcu/futex.h"
30
31 #include <urcu/assert.h>
32 #include <urcu/compiler.h>
33 #include <urcu/arch.h>
34 #include <urcu/uatomic.h>
35 #include <urcu/list.h>
36 #include <urcu/system.h>
37 #include <urcu/tls-compat.h>
38 #include "urcu-die.h"
39 #include "urcu-utils.h"
40
41 /*
42 * Number of entries in the per-thread defer queue. Must be power of 2.
43 */
44 #define DEFER_QUEUE_SIZE (1 << 12)
45 #define DEFER_QUEUE_MASK (DEFER_QUEUE_SIZE - 1)
46
47 /*
48 * Typically, data is aligned at least on the architecture size.
49 * Use lowest bit to indicate that the current callback is changing.
50 * Assumes that (void *)-2L is not used often. Used to encode non-aligned
51 * functions and non-aligned data using extra space.
52 * We encode the (void *)-2L fct as: -2L, fct, data.
53 * We encode the (void *)-2L data as either:
54 * fct | DQ_FCT_BIT, data (if fct is aligned), or
55 * -2L, fct, data (if fct is not aligned).
56 * Here, DQ_FCT_MARK == ~DQ_FCT_BIT. Required for the test order.
57 */
58 #define DQ_FCT_BIT (1 << 0)
59 #define DQ_IS_FCT_BIT(x) ((unsigned long)(x) & DQ_FCT_BIT)
60 #define DQ_SET_FCT_BIT(x) \
61 (x = (void *)((unsigned long)(x) | DQ_FCT_BIT))
62 #define DQ_CLEAR_FCT_BIT(x) \
63 (x = (void *)((unsigned long)(x) & ~DQ_FCT_BIT))
64 #define DQ_FCT_MARK ((void *)(~DQ_FCT_BIT))
65
66 /*
67 * This code section can only be included in LGPL 2.1 compatible source code.
68 * See below for the function call wrappers which can be used in code meant to
69 * be only linked with the Userspace RCU library. This comes with a small
70 * performance degradation on the read-side due to the added function calls.
71 * This is required to permit relinking with newer versions of the library.
72 */
73
74 /*
75 * defer queue.
76 * Contains pointers. Encoded to save space when same callback is often used.
77 * When looking up the next item:
78 * - if DQ_FCT_BIT is set, set the current callback to DQ_CLEAR_FCT_BIT(ptr)
79 * - next element contains pointer to data.
80 * - else if item == DQ_FCT_MARK
81 * - set the current callback to next element ptr
82 * - following next element contains pointer to data.
83 * - else current element contains data
84 */
85 struct defer_queue {
86 unsigned long head; /* add element at head */
87 void *last_fct_in; /* last fct pointer encoded */
88 unsigned long tail; /* next element to remove at tail */
89 void *last_fct_out; /* last fct pointer encoded */
90 void **q;
91 /* registry information */
92 unsigned long last_head;
93 struct cds_list_head list; /* list of thread queues */
94 };
95
96 /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
97 #include <urcu/defer.h>
98
99 void __attribute__((destructor)) rcu_defer_exit(void);
100
101 extern void synchronize_rcu(void);
102
103 /*
104 * rcu_defer_mutex nests inside defer_thread_mutex.
105 */
106 static pthread_mutex_t rcu_defer_mutex = PTHREAD_MUTEX_INITIALIZER;
107 static pthread_mutex_t defer_thread_mutex = PTHREAD_MUTEX_INITIALIZER;
108
109 static int32_t defer_thread_futex;
110 static int32_t defer_thread_stop;
111
112 /*
113 * Written to only by each individual deferer. Read by both the deferer and
114 * the reclamation tread.
115 */
116 static DEFINE_URCU_TLS(struct defer_queue, defer_queue);
117 static CDS_LIST_HEAD(registry_defer);
118 static pthread_t tid_defer;
119
120 static void mutex_lock_defer(pthread_mutex_t *mutex)
121 {
122 int ret;
123
124 #ifndef DISTRUST_SIGNALS_EXTREME
125 ret = pthread_mutex_lock(mutex);
126 if (ret)
127 urcu_die(ret);
128 #else /* #ifndef DISTRUST_SIGNALS_EXTREME */
129 while ((ret = pthread_mutex_trylock(mutex)) != 0) {
130 if (ret != EBUSY && ret != EINTR)
131 urcu_die(ret);
132 (void) poll(NULL,0,10);
133 }
134 #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
135 }
136
137 /*
138 * Wake-up any waiting defer thread. Called from many concurrent threads.
139 */
140 static void wake_up_defer(void)
141 {
142 if (caa_unlikely(uatomic_read(&defer_thread_futex) == -1)) {
143 uatomic_set(&defer_thread_futex, 0);
144 if (futex_noasync(&defer_thread_futex, FUTEX_WAKE, 1,
145 NULL, NULL, 0) < 0)
146 urcu_die(errno);
147 }
148 }
149
150 static unsigned long rcu_defer_num_callbacks(void)
151 {
152 unsigned long num_items = 0, head;
153 struct defer_queue *index;
154
155 mutex_lock_defer(&rcu_defer_mutex);
156 cds_list_for_each_entry(index, &registry_defer, list) {
157 head = CMM_LOAD_SHARED(index->head);
158 num_items += head - index->tail;
159 }
160 mutex_unlock(&rcu_defer_mutex);
161 return num_items;
162 }
163
164 /*
165 * Defer thread waiting. Single thread.
166 */
167 static void wait_defer(void)
168 {
169 uatomic_dec(&defer_thread_futex);
170 /* Write futex before read queue */
171 /* Write futex before read defer_thread_stop */
172 cmm_smp_mb();
173 if (_CMM_LOAD_SHARED(defer_thread_stop)) {
174 uatomic_set(&defer_thread_futex, 0);
175 pthread_exit(0);
176 }
177 if (rcu_defer_num_callbacks()) {
178 cmm_smp_mb(); /* Read queue before write futex */
179 /* Callbacks are queued, don't wait. */
180 uatomic_set(&defer_thread_futex, 0);
181 } else {
182 cmm_smp_rmb(); /* Read queue before read futex */
183 while (uatomic_read(&defer_thread_futex) == -1) {
184 if (!futex_noasync(&defer_thread_futex, FUTEX_WAIT, -1, NULL, NULL, 0)) {
185 /*
186 * Prior queued wakeups queued by unrelated code
187 * using the same address can cause futex wait to
188 * return 0 even through the futex value is still
189 * -1 (spurious wakeups). Check the value again
190 * in user-space to validate whether it really
191 * differs from -1.
192 */
193 continue;
194 }
195 switch (errno) {
196 case EAGAIN:
197 /* Value already changed. */
198 return;
199 case EINTR:
200 /* Retry if interrupted by signal. */
201 break; /* Get out of switch. Check again. */
202 default:
203 /* Unexpected error. */
204 urcu_die(errno);
205 }
206 }
207 }
208 }
209
210 /*
211 * Must be called after Q.S. is reached.
212 */
213 static void rcu_defer_barrier_queue(struct defer_queue *queue,
214 unsigned long head)
215 {
216 unsigned long i;
217 void (*fct)(void *p);
218 void *p;
219
220 /*
221 * Tail is only modified when lock is held.
222 * Head is only modified by owner thread.
223 */
224
225 for (i = queue->tail; i != head;) {
226 cmm_smp_rmb(); /* read head before q[]. */
227 p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
228 if (caa_unlikely(DQ_IS_FCT_BIT(p))) {
229 DQ_CLEAR_FCT_BIT(p);
230 queue->last_fct_out = p;
231 p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
232 } else if (caa_unlikely(p == DQ_FCT_MARK)) {
233 p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
234 queue->last_fct_out = p;
235 p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
236 }
237 fct = queue->last_fct_out;
238 fct(p);
239 }
240 cmm_smp_mb(); /* push tail after having used q[] */
241 CMM_STORE_SHARED(queue->tail, i);
242 }
243
244 static void _rcu_defer_barrier_thread(void)
245 {
246 unsigned long head, num_items;
247
248 head = URCU_TLS(defer_queue).head;
249 num_items = head - URCU_TLS(defer_queue).tail;
250 if (caa_unlikely(!num_items))
251 return;
252 synchronize_rcu();
253 rcu_defer_barrier_queue(&URCU_TLS(defer_queue), head);
254 }
255
256 void rcu_defer_barrier_thread(void)
257 {
258 mutex_lock_defer(&rcu_defer_mutex);
259 _rcu_defer_barrier_thread();
260 mutex_unlock(&rcu_defer_mutex);
261 }
262
263 /*
264 * rcu_defer_barrier - Execute all queued rcu callbacks.
265 *
266 * Execute all RCU callbacks queued before rcu_defer_barrier() execution.
267 * All callbacks queued on the local thread prior to a rcu_defer_barrier() call
268 * are guaranteed to be executed.
269 * Callbacks queued by other threads concurrently with rcu_defer_barrier()
270 * execution are not guaranteed to be executed in the current batch (could
271 * be left for the next batch). These callbacks queued by other threads are only
272 * guaranteed to be executed if there is explicit synchronization between
273 * the thread adding to the queue and the thread issuing the defer_barrier call.
274 */
275
276 void rcu_defer_barrier(void)
277 {
278 struct defer_queue *index;
279 unsigned long num_items = 0;
280
281 if (cds_list_empty(&registry_defer))
282 return;
283
284 mutex_lock_defer(&rcu_defer_mutex);
285 cds_list_for_each_entry(index, &registry_defer, list) {
286 index->last_head = CMM_LOAD_SHARED(index->head);
287 num_items += index->last_head - index->tail;
288 }
289 if (caa_likely(!num_items)) {
290 /*
291 * We skip the grace period because there are no queued
292 * callbacks to execute.
293 */
294 goto end;
295 }
296 synchronize_rcu();
297 cds_list_for_each_entry(index, &registry_defer, list)
298 rcu_defer_barrier_queue(index, index->last_head);
299 end:
300 mutex_unlock(&rcu_defer_mutex);
301 }
302
303 /*
304 * _defer_rcu - Queue a RCU callback.
305 */
306 static void _defer_rcu(void (*fct)(void *p), void *p)
307 {
308 unsigned long head, tail;
309
310 /*
311 * Head is only modified by ourself. Tail can be modified by reclamation
312 * thread.
313 */
314 head = URCU_TLS(defer_queue).head;
315 tail = CMM_LOAD_SHARED(URCU_TLS(defer_queue).tail);
316
317 /*
318 * If queue is full, or reached threshold. Empty queue ourself.
319 * Worse-case: must allow 2 supplementary entries for fct pointer.
320 */
321 if (caa_unlikely(head - tail >= DEFER_QUEUE_SIZE - 2)) {
322 urcu_posix_assert(head - tail <= DEFER_QUEUE_SIZE);
323 rcu_defer_barrier_thread();
324 urcu_posix_assert(head - CMM_LOAD_SHARED(URCU_TLS(defer_queue).tail) == 0);
325 }
326
327 /*
328 * Encode:
329 * if the function is not changed and the data is aligned and it is
330 * not the marker:
331 * store the data
332 * otherwise if the function is aligned and its not the marker:
333 * store the function with DQ_FCT_BIT
334 * store the data
335 * otherwise:
336 * store the marker (DQ_FCT_MARK)
337 * store the function
338 * store the data
339 *
340 * Decode: see the comments before 'struct defer_queue'
341 * or the code in rcu_defer_barrier_queue().
342 */
343 if (caa_unlikely(URCU_TLS(defer_queue).last_fct_in != fct
344 || DQ_IS_FCT_BIT(p)
345 || p == DQ_FCT_MARK)) {
346 URCU_TLS(defer_queue).last_fct_in = fct;
347 if (caa_unlikely(DQ_IS_FCT_BIT(fct) || fct == DQ_FCT_MARK)) {
348 _CMM_STORE_SHARED(URCU_TLS(defer_queue).q[head++ & DEFER_QUEUE_MASK],
349 DQ_FCT_MARK);
350 _CMM_STORE_SHARED(URCU_TLS(defer_queue).q[head++ & DEFER_QUEUE_MASK],
351 fct);
352 } else {
353 DQ_SET_FCT_BIT(fct);
354 _CMM_STORE_SHARED(URCU_TLS(defer_queue).q[head++ & DEFER_QUEUE_MASK],
355 fct);
356 }
357 }
358 _CMM_STORE_SHARED(URCU_TLS(defer_queue).q[head++ & DEFER_QUEUE_MASK], p);
359 cmm_smp_wmb(); /* Publish new pointer before head */
360 /* Write q[] before head. */
361 CMM_STORE_SHARED(URCU_TLS(defer_queue).head, head);
362 cmm_smp_mb(); /* Write queue head before read futex */
363 /*
364 * Wake-up any waiting defer thread.
365 */
366 wake_up_defer();
367 }
368
369 static void *thr_defer(void *args __attribute__((unused)))
370 {
371 for (;;) {
372 /*
373 * "Be green". Don't wake up the CPU if there is no RCU work
374 * to perform whatsoever. Aims at saving laptop battery life by
375 * leaving the processor in sleep state when idle.
376 */
377 wait_defer();
378 /* Sleeping after wait_defer to let many callbacks enqueue */
379 (void) poll(NULL,0,100); /* wait for 100ms */
380 rcu_defer_barrier();
381 }
382
383 return NULL;
384 }
385
386 /*
387 * library wrappers to be used by non-LGPL compatible source code.
388 */
389
390 void defer_rcu(void (*fct)(void *p), void *p)
391 {
392 _defer_rcu(fct, p);
393 }
394
395 static void start_defer_thread(void)
396 {
397 int ret;
398 sigset_t newmask, oldmask;
399
400 ret = sigfillset(&newmask);
401 urcu_posix_assert(!ret);
402 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
403 urcu_posix_assert(!ret);
404
405 ret = pthread_create(&tid_defer, NULL, thr_defer, NULL);
406 if (ret)
407 urcu_die(ret);
408
409 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
410 urcu_posix_assert(!ret);
411 }
412
413 static void stop_defer_thread(void)
414 {
415 int ret;
416 void *tret;
417
418 _CMM_STORE_SHARED(defer_thread_stop, 1);
419 /* Store defer_thread_stop before testing futex */
420 cmm_smp_mb();
421 wake_up_defer();
422
423 ret = pthread_join(tid_defer, &tret);
424 urcu_posix_assert(!ret);
425
426 CMM_STORE_SHARED(defer_thread_stop, 0);
427 /* defer thread should always exit when futex value is 0 */
428 urcu_posix_assert(uatomic_read(&defer_thread_futex) == 0);
429 }
430
431 int rcu_defer_register_thread(void)
432 {
433 int was_empty;
434
435 urcu_posix_assert(URCU_TLS(defer_queue).last_head == 0);
436 urcu_posix_assert(URCU_TLS(defer_queue).q == NULL);
437 URCU_TLS(defer_queue).q = malloc(sizeof(void *) * DEFER_QUEUE_SIZE);
438 if (!URCU_TLS(defer_queue).q)
439 return -ENOMEM;
440
441 mutex_lock_defer(&defer_thread_mutex);
442 mutex_lock_defer(&rcu_defer_mutex);
443 was_empty = cds_list_empty(&registry_defer);
444 cds_list_add(&URCU_TLS(defer_queue).list, &registry_defer);
445 mutex_unlock(&rcu_defer_mutex);
446
447 if (was_empty)
448 start_defer_thread();
449 mutex_unlock(&defer_thread_mutex);
450 return 0;
451 }
452
453 void rcu_defer_unregister_thread(void)
454 {
455 int is_empty;
456
457 mutex_lock_defer(&defer_thread_mutex);
458 mutex_lock_defer(&rcu_defer_mutex);
459 cds_list_del(&URCU_TLS(defer_queue).list);
460 _rcu_defer_barrier_thread();
461 free(URCU_TLS(defer_queue).q);
462 URCU_TLS(defer_queue).q = NULL;
463 is_empty = cds_list_empty(&registry_defer);
464 mutex_unlock(&rcu_defer_mutex);
465
466 if (is_empty)
467 stop_defer_thread();
468 mutex_unlock(&defer_thread_mutex);
469 }
470
471 void rcu_defer_exit(void)
472 {
473 urcu_posix_assert(cds_list_empty(&registry_defer));
474 }
475
476 #endif /* _URCU_DEFER_IMPL_H */
This page took 0.037917 seconds and 5 git commands to generate.