urcu-bp: don't copy old region upon mremap
[urcu.git] / urcu-bp.c
1 /*
2 * urcu-bp.c
3 *
4 * Userspace RCU library, "bulletproof" version.
5 *
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
24 */
25
26 #define _GNU_SOURCE
27 #include <stdio.h>
28 #include <pthread.h>
29 #include <signal.h>
30 #include <assert.h>
31 #include <stdlib.h>
32 #include <string.h>
33 #include <errno.h>
34 #include <poll.h>
35 #include <unistd.h>
36 #include <sys/mman.h>
37
38 #include "urcu/map/urcu-bp.h"
39
40 #include "urcu/static/urcu-bp.h"
41 /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
42 #include "urcu-bp.h"
43
44 #ifndef MAP_ANONYMOUS
45 #define MAP_ANONYMOUS MAP_ANON
46 #endif
47
48 /* Sleep delay in us */
49 #define RCU_SLEEP_DELAY 1000
50 #define ARENA_INIT_ALLOC 16
51
52 void __attribute__((destructor)) rcu_bp_exit(void);
53
54 static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
55
56 #ifdef DEBUG_YIELD
57 unsigned int yield_active;
58 unsigned int __thread rand_yield;
59 #endif
60
61 /*
62 * Global grace period counter.
63 * Contains the current RCU_GP_CTR_PHASE.
64 * Also has a RCU_GP_COUNT of 1, to accelerate the reader fast path.
65 * Written to only by writer with mutex taken. Read by both writer and readers.
66 */
67 long rcu_gp_ctr = RCU_GP_COUNT;
68
69 /*
70 * Pointer to registry elements. Written to only by each individual reader. Read
71 * by both the reader and the writers.
72 */
73 struct rcu_reader __thread *rcu_reader;
74
75 static CDS_LIST_HEAD(registry);
76
77 struct registry_arena {
78 void *p;
79 size_t len;
80 size_t used;
81 };
82
83 static struct registry_arena registry_arena;
84
85 /* Saved fork signal mask, protected by rcu_gp_lock */
86 static sigset_t saved_fork_signal_mask;
87
88 static void rcu_gc_registry(void);
89
90 static void mutex_lock(pthread_mutex_t *mutex)
91 {
92 int ret;
93
94 #ifndef DISTRUST_SIGNALS_EXTREME
95 ret = pthread_mutex_lock(mutex);
96 if (ret) {
97 perror("Error in pthread mutex lock");
98 exit(-1);
99 }
100 #else /* #ifndef DISTRUST_SIGNALS_EXTREME */
101 while ((ret = pthread_mutex_trylock(mutex)) != 0) {
102 if (ret != EBUSY && ret != EINTR) {
103 printf("ret = %d, errno = %d\n", ret, errno);
104 perror("Error in pthread mutex lock");
105 exit(-1);
106 }
107 poll(NULL,0,10);
108 }
109 #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
110 }
111
112 static void mutex_unlock(pthread_mutex_t *mutex)
113 {
114 int ret;
115
116 ret = pthread_mutex_unlock(mutex);
117 if (ret) {
118 perror("Error in pthread mutex unlock");
119 exit(-1);
120 }
121 }
122
123 void update_counter_and_wait(void)
124 {
125 CDS_LIST_HEAD(qsreaders);
126 int wait_loops = 0;
127 struct rcu_reader *index, *tmp;
128
129 /* Switch parity: 0 -> 1, 1 -> 0 */
130 CMM_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE);
131
132 /*
133 * Must commit qparity update to memory before waiting for other parity
134 * quiescent state. Failure to do so could result in the writer waiting
135 * forever while new readers are always accessing data (no progress).
136 * Ensured by CMM_STORE_SHARED and CMM_LOAD_SHARED.
137 */
138
139 /*
140 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
141 * model easier to understand. It does not have a big performance impact
142 * anyway, given this is the write-side.
143 */
144 cmm_smp_mb();
145
146 /*
147 * Wait for each thread rcu_reader.ctr count to become 0.
148 */
149 for (;;) {
150 wait_loops++;
151 cds_list_for_each_entry_safe(index, tmp, &registry, node) {
152 if (!rcu_old_gp_ongoing(&index->ctr))
153 cds_list_move(&index->node, &qsreaders);
154 }
155
156 if (cds_list_empty(&registry)) {
157 break;
158 } else {
159 if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS)
160 usleep(RCU_SLEEP_DELAY);
161 else
162 caa_cpu_relax();
163 }
164 }
165 /* put back the reader list in the registry */
166 cds_list_splice(&qsreaders, &registry);
167 }
168
169 void synchronize_rcu(void)
170 {
171 sigset_t newmask, oldmask;
172 int ret;
173
174 ret = sigemptyset(&newmask);
175 assert(!ret);
176 ret = pthread_sigmask(SIG_SETMASK, &newmask, &oldmask);
177 assert(!ret);
178
179 mutex_lock(&rcu_gp_lock);
180
181 if (cds_list_empty(&registry))
182 goto out;
183
184 /* All threads should read qparity before accessing data structure
185 * where new ptr points to. */
186 /* Write new ptr before changing the qparity */
187 cmm_smp_mb();
188
189 /* Remove old registry elements */
190 rcu_gc_registry();
191
192 /*
193 * Wait for previous parity to be empty of readers.
194 */
195 update_counter_and_wait(); /* 0 -> 1, wait readers in parity 0 */
196
197 /*
198 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
199 * model easier to understand. It does not have a big performance impact
200 * anyway, given this is the write-side.
201 */
202 cmm_smp_mb();
203
204 /*
205 * Wait for previous parity to be empty of readers.
206 */
207 update_counter_and_wait(); /* 1 -> 0, wait readers in parity 1 */
208
209 /*
210 * Finish waiting for reader threads before letting the old ptr being
211 * freed.
212 */
213 cmm_smp_mb();
214 out:
215 mutex_unlock(&rcu_gp_lock);
216 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
217 assert(!ret);
218 }
219
220 /*
221 * library wrappers to be used by non-LGPL compatible source code.
222 */
223
224 void rcu_read_lock(void)
225 {
226 _rcu_read_lock();
227 }
228
229 void rcu_read_unlock(void)
230 {
231 _rcu_read_unlock();
232 }
233
234 /*
235 * only grow for now.
236 */
237 static void resize_arena(struct registry_arena *arena, size_t len)
238 {
239 void *new_arena;
240
241 if (!arena->p)
242 new_arena = mmap(arena->p, len,
243 PROT_READ | PROT_WRITE,
244 MAP_ANONYMOUS | MAP_PRIVATE,
245 -1, 0);
246 else
247 new_arena = mremap(arena->p, arena->len,
248 len, MREMAP_MAYMOVE);
249 assert(new_arena != MAP_FAILED);
250
251 /*
252 * re-used the same region ?
253 */
254 if (new_arena == arena->p)
255 return;
256
257 bzero(new_arena + arena->len, len - arena->len);
258 arena->p = new_arena;
259 }
260
261 /* Called with signals off and mutex locked */
262 static void add_thread(void)
263 {
264 struct rcu_reader *rcu_reader_reg;
265
266 if (registry_arena.len
267 < registry_arena.used + sizeof(struct rcu_reader))
268 resize_arena(&registry_arena,
269 max(registry_arena.len << 1, ARENA_INIT_ALLOC));
270 /*
271 * Find a free spot.
272 */
273 for (rcu_reader_reg = registry_arena.p;
274 (void *)rcu_reader_reg < registry_arena.p + registry_arena.len;
275 rcu_reader_reg++) {
276 if (!rcu_reader_reg->alloc)
277 break;
278 }
279 rcu_reader_reg->alloc = 1;
280 registry_arena.used += sizeof(struct rcu_reader);
281
282 /* Add to registry */
283 rcu_reader_reg->tid = pthread_self();
284 assert(rcu_reader_reg->ctr == 0);
285 cds_list_add(&rcu_reader_reg->node, &registry);
286 rcu_reader = rcu_reader_reg;
287 }
288
289 /* Called with signals off and mutex locked */
290 static void rcu_gc_registry(void)
291 {
292 struct rcu_reader *rcu_reader_reg;
293 pthread_t tid;
294 int ret;
295
296 for (rcu_reader_reg = registry_arena.p;
297 (void *)rcu_reader_reg < registry_arena.p + registry_arena.len;
298 rcu_reader_reg++) {
299 if (!rcu_reader_reg->alloc)
300 continue;
301 tid = rcu_reader_reg->tid;
302 ret = pthread_kill(tid, 0);
303 assert(ret != EINVAL);
304 if (ret == ESRCH) {
305 cds_list_del(&rcu_reader_reg->node);
306 rcu_reader_reg->ctr = 0;
307 rcu_reader_reg->alloc = 0;
308 registry_arena.used -= sizeof(struct rcu_reader);
309 }
310 }
311 }
312
313 /* Disable signals, take mutex, add to registry */
314 void rcu_bp_register(void)
315 {
316 sigset_t newmask, oldmask;
317 int ret;
318
319 ret = sigemptyset(&newmask);
320 assert(!ret);
321 ret = pthread_sigmask(SIG_SETMASK, &newmask, &oldmask);
322 assert(!ret);
323
324 /*
325 * Check if a signal concurrently registered our thread since
326 * the check in rcu_read_lock(). */
327 if (rcu_reader)
328 goto end;
329
330 mutex_lock(&rcu_gp_lock);
331 add_thread();
332 mutex_unlock(&rcu_gp_lock);
333 end:
334 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
335 assert(!ret);
336 }
337
338 void rcu_bp_exit()
339 {
340 munmap(registry_arena.p, registry_arena.len);
341 }
342
343 /*
344 * Holding the rcu_gp_lock across fork will make sure we fork() don't race with
345 * a concurrent thread executing with this same lock held. This ensures that the
346 * registry is in a coherent state in the child.
347 */
348 void rcu_bp_before_fork(void)
349 {
350 sigset_t newmask, oldmask;
351 int ret;
352
353 ret = sigemptyset(&newmask);
354 assert(!ret);
355 ret = pthread_sigmask(SIG_SETMASK, &newmask, &oldmask);
356 assert(!ret);
357 mutex_lock(&rcu_gp_lock);
358 saved_fork_signal_mask = oldmask;
359 }
360
361 void rcu_bp_after_fork_parent(void)
362 {
363 sigset_t oldmask;
364 int ret;
365
366 oldmask = saved_fork_signal_mask;
367 mutex_unlock(&rcu_gp_lock);
368 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
369 assert(!ret);
370 }
371
372 void rcu_bp_after_fork_child(void)
373 {
374 sigset_t oldmask;
375 int ret;
376
377 rcu_gc_registry();
378 oldmask = saved_fork_signal_mask;
379 mutex_unlock(&rcu_gp_lock);
380 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
381 assert(!ret);
382 }
383
384 #include "urcu-call-rcu-impl.h"
385 #include "urcu-defer-impl.h"
This page took 0.036948 seconds and 5 git commands to generate.