qsbr urcu: make it safe to call rcu_sychronize from a registered thread.
[urcu.git] / urcu-qsbr.c
CommitLineData
9f1621ca 1/*
7ac06cef 2 * urcu-qsbr.c
9f1621ca 3 *
7ac06cef 4 * Userspace RCU QSBR library
9f1621ca
MD
5 *
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
24 */
25
26#include <stdio.h>
27#include <pthread.h>
28#include <signal.h>
29#include <assert.h>
30#include <stdlib.h>
31#include <string.h>
32#include <errno.h>
33#include <poll.h>
34
7ac06cef 35#include "urcu-qsbr-static.h"
9f1621ca 36/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
7ac06cef 37#include "urcu-qsbr.h"
9f1621ca 38
9f1621ca
MD
39pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER;
40
41/*
42 * Global grace period counter.
43 */
44long urcu_gp_ctr = 0;
45
46/*
47 * Written to only by each individual reader. Read by both the reader and the
48 * writers.
49 */
3395d46c 50long __thread rcu_reader_qs_gp;
9f1621ca
MD
51
52/* Thread IDs of registered readers */
53#define INIT_NUM_THREADS 4
54
55struct reader_registry {
56 pthread_t tid;
3395d46c 57 long *rcu_reader_qs_gp;
9f1621ca
MD
58};
59
60#ifdef DEBUG_YIELD
61unsigned int yield_active;
62unsigned int __thread rand_yield;
63#endif
64
65static struct reader_registry *registry;
9f1621ca
MD
66static int num_readers, alloc_readers;
67
90c1618a 68static void internal_urcu_lock(void)
9f1621ca
MD
69{
70 int ret;
71
72#ifndef DISTRUST_SIGNALS_EXTREME
73 ret = pthread_mutex_lock(&urcu_mutex);
74 if (ret) {
75 perror("Error in pthread mutex lock");
76 exit(-1);
77 }
78#else /* #ifndef DISTRUST_SIGNALS_EXTREME */
79 while ((ret = pthread_mutex_trylock(&urcu_mutex)) != 0) {
80 if (ret != EBUSY && ret != EINTR) {
81 printf("ret = %d, errno = %d\n", ret, errno);
82 perror("Error in pthread mutex lock");
83 exit(-1);
84 }
9f1621ca
MD
85 poll(NULL,0,10);
86 }
87#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
88}
89
90c1618a 90static void internal_urcu_unlock(void)
9f1621ca
MD
91{
92 int ret;
93
94 ret = pthread_mutex_unlock(&urcu_mutex);
95 if (ret) {
96 perror("Error in pthread mutex unlock");
97 exit(-1);
98 }
99}
100
9f1621ca
MD
101#ifdef HAS_INCOHERENT_CACHES
102static void force_mb_single_thread(struct reader_registry *index)
103{
104 smp_mb();
105}
106#endif /* #ifdef HAS_INCOHERENT_CACHES */
107
108static void force_mb_all_threads(void)
109{
110 smp_mb();
111}
9f1621ca 112
90c1618a 113static void wait_for_quiescent_state(void)
9f1621ca
MD
114{
115 struct reader_registry *index;
116
d1238495
PH
117 if (rcu_reader_qs_gp & 1)
118 rcu_reader_qs_gp = urcu_gp_ctr + 1;
119
9f1621ca
MD
120 if (!registry)
121 return;
122 /*
3395d46c 123 * Wait for each thread rcu_reader_qs_gp count to become 0.
9f1621ca
MD
124 */
125 for (index = registry; index < registry + num_readers; index++) {
126#ifndef HAS_INCOHERENT_CACHES
3395d46c
MD
127 while (rcu_gp_ongoing(index->rcu_reader_qs_gp) &&
128 (*index->rcu_reader_qs_gp - urcu_gp_ctr < 0))
9f1621ca
MD
129 cpu_relax();
130#else /* #ifndef HAS_INCOHERENT_CACHES */
131 int wait_loops = 0;
132 /*
133 * BUSY-LOOP. Force the reader thread to commit its
3395d46c 134 * rcu_reader_qs_gp update to memory if we wait for too long.
9f1621ca 135 */
3395d46c
MD
136 while (rcu_gp_ongoing(index->rcu_reader_qs_gp) &&
137 (*index->rcu_reader_qs_gp - urcu_gp_ctr < 0)) {
9f1621ca
MD
138 if (wait_loops++ == KICK_READER_LOOPS) {
139 force_mb_single_thread(index);
140 wait_loops = 0;
141 } else {
142 cpu_relax();
143 }
144 }
145#endif /* #else #ifndef HAS_INCOHERENT_CACHES */
146 }
147}
148
149void synchronize_rcu(void)
150{
151 internal_urcu_lock();
152 force_mb_all_threads();
153 urcu_gp_ctr += 2;
154 wait_for_quiescent_state();
155 force_mb_all_threads();
156 internal_urcu_unlock();
157}
158
159/*
160 * library wrappers to be used by non-LGPL compatible source code.
161 */
162
163void rcu_read_lock(void)
164{
165 _rcu_read_lock();
166}
167
168void rcu_read_unlock(void)
169{
170 _rcu_read_unlock();
171}
172
173void *rcu_dereference(void *p)
174{
175 return _rcu_dereference(p);
176}
177
178void *rcu_assign_pointer_sym(void **p, void *v)
179{
180 wmb();
181 return STORE_SHARED(p, v);
182}
183
184void *rcu_xchg_pointer_sym(void **p, void *v)
185{
186 wmb();
187 return xchg(p, v);
188}
189
190void *rcu_publish_content_sym(void **p, void *v)
191{
192 void *oldptr;
193
194 oldptr = _rcu_xchg_pointer(p, v);
195 synchronize_rcu();
196 return oldptr;
197}
198
7ac06cef
MD
199void rcu_quiescent_state(void)
200{
201 _rcu_quiescent_state();
202}
203
204void rcu_thread_offline(void)
205{
206 _rcu_thread_offline();
207}
208
209void rcu_thread_online(void)
210{
211 _rcu_thread_online();
212}
213
9f1621ca
MD
214static void rcu_add_reader(pthread_t id)
215{
216 struct reader_registry *oldarray;
217
218 if (!registry) {
219 alloc_readers = INIT_NUM_THREADS;
220 num_readers = 0;
221 registry =
222 malloc(sizeof(struct reader_registry) * alloc_readers);
223 }
224 if (alloc_readers < num_readers + 1) {
225 oldarray = registry;
226 registry = malloc(sizeof(struct reader_registry)
227 * (alloc_readers << 1));
228 memcpy(registry, oldarray,
229 sizeof(struct reader_registry) * alloc_readers);
230 alloc_readers <<= 1;
231 free(oldarray);
232 }
233 registry[num_readers].tid = id;
234 /* reference to the TLS of _this_ reader thread. */
3395d46c 235 registry[num_readers].rcu_reader_qs_gp = &rcu_reader_qs_gp;
9f1621ca
MD
236 num_readers++;
237}
238
239/*
240 * Never shrink (implementation limitation).
241 * This is O(nb threads). Eventually use a hash table.
242 */
243static void rcu_remove_reader(pthread_t id)
244{
245 struct reader_registry *index;
246
247 assert(registry != NULL);
248 for (index = registry; index < registry + num_readers; index++) {
249 if (pthread_equal(index->tid, id)) {
250 memcpy(index, &registry[num_readers - 1],
251 sizeof(struct reader_registry));
252 registry[num_readers - 1].tid = 0;
3395d46c 253 registry[num_readers - 1].rcu_reader_qs_gp = NULL;
9f1621ca
MD
254 num_readers--;
255 return;
256 }
257 }
258 /* Hrm not found, forgot to register ? */
259 assert(0);
260}
261
262void rcu_register_thread(void)
263{
264 internal_urcu_lock();
9f1621ca
MD
265 rcu_add_reader(pthread_self());
266 internal_urcu_unlock();
5f373c84 267 _rcu_thread_online();
9f1621ca
MD
268}
269
270void rcu_unregister_thread(void)
271{
76f3022f
MD
272 /*
273 * We have to make the thread offline otherwise we end up dealocking
274 * with a waiting writer.
275 */
276 _rcu_thread_offline();
9f1621ca
MD
277 internal_urcu_lock();
278 rcu_remove_reader(pthread_self());
279 internal_urcu_unlock();
280}
This page took 0.047894 seconds and 4 git commands to generate.