Add rcu-reclaim.so library
[urcu.git] / urcu-reclaim.c
CommitLineData
90075a50
MD
1/*
2 * urcu-reclaim.c
3 *
4 * Userspace RCU library - batch memory reclamation
5 *
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23#include <stdio.h>
24#include <pthread.h>
25#include <signal.h>
26#include <assert.h>
27#include <stdlib.h>
28#include <string.h>
29#include <errno.h>
30#include <poll.h>
31
32#include "urcu-reclaim-static.h"
33/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
34#include "urcu-reclaim.h"
35
36void __attribute__((constructor)) urcu_reclaim_init(void);
37void __attribute__((destructor)) urcu_reclaim_exit(void);
38
39extern void synchronize_rcu(void);
40
41static int init_done;
42
43/*
44 * urcu_reclaim_mutex nests inside reclaim_thread_mutex.
45 */
46static pthread_mutex_t urcu_reclaim_mutex = PTHREAD_MUTEX_INITIALIZER;
47static pthread_mutex_t reclaim_thread_mutex = PTHREAD_MUTEX_INITIALIZER;
48
49/*
50 * Written to only by each individual reader. Read by both the reader and the
51 * writers.
52 */
53struct reclaim_queue __thread reclaim_queue;
54
55/* Thread IDs of registered readers */
56#define INIT_NUM_THREADS 4
57
58struct reader_registry {
59 pthread_t tid;
60 struct reclaim_queue *reclaim_queue;
61 unsigned long last_head;
62};
63
64static struct reader_registry *registry;
65static int num_readers, alloc_readers;
66
67static pthread_t tid_reclaim;
68static int exit_reclaim;
69
70static void internal_urcu_lock(pthread_mutex_t *mutex)
71{
72 int ret;
73
74#ifndef DISTRUST_SIGNALS_EXTREME
75 ret = pthread_mutex_lock(mutex);
76 if (ret) {
77 perror("Error in pthread mutex lock");
78 exit(-1);
79 }
80#else /* #ifndef DISTRUST_SIGNALS_EXTREME */
81 while ((ret = pthread_mutex_trylock(mutex)) != 0) {
82 if (ret != EBUSY && ret != EINTR) {
83 printf("ret = %d, errno = %d\n", ret, errno);
84 perror("Error in pthread mutex lock");
85 exit(-1);
86 }
87 poll(NULL,0,10);
88 }
89#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
90}
91
92static void internal_urcu_unlock(pthread_mutex_t *mutex)
93{
94 int ret;
95
96 ret = pthread_mutex_unlock(mutex);
97 if (ret) {
98 perror("Error in pthread mutex unlock");
99 exit(-1);
100 }
101}
102
103/*
104 * Must be called after Q.S. is reached.
105 */
106static void rcu_reclaim_barrier_queue(struct reclaim_queue *queue,
107 unsigned long head)
108{
109 unsigned long i;
110
111 /*
112 * Tail is only modified when lock is held.
113 * Head is only modified by owner thread.
114 */
115
116 for (i = queue->tail; i != head; i++) {
117 smp_rmb(); /* read head before q[]. */
118 free(LOAD_SHARED(queue->q[i & RECLAIM_QUEUE_MASK]));
119 }
120 smp_mb(); /* push tail after having used q[] */
121 STORE_SHARED(queue->tail, i);
122}
123
124static void _rcu_reclaim_barrier_thread(void)
125{
126 unsigned long head;
127
128 head = reclaim_queue.head;
129 synchronize_rcu();
130 rcu_reclaim_barrier_queue(&reclaim_queue, head);
131}
132
133
134void rcu_reclaim_barrier_thread(void)
135{
136 internal_urcu_lock(&urcu_reclaim_mutex);
137 _rcu_reclaim_barrier_thread();
138 internal_urcu_unlock(&urcu_reclaim_mutex);
139}
140
141void rcu_reclaim_barrier(void)
142{
143 struct reader_registry *index;
144
145 if (!registry)
146 return;
147
148 internal_urcu_lock(&urcu_reclaim_mutex);
149 for (index = registry; index < registry + num_readers; index++)
150 index->last_head = LOAD_SHARED(index->reclaim_queue->head);
151 synchronize_rcu();
152 for (index = registry; index < registry + num_readers; index++)
153 rcu_reclaim_barrier_queue(index->reclaim_queue,
154 index->last_head);
155 internal_urcu_unlock(&urcu_reclaim_mutex);
156}
157
158void *thr_reclaim(void *args)
159{
160 for (;;) {
161 if (LOAD_SHARED(exit_reclaim))
162 break;
163 poll(NULL,0,100); /* wait for 100ms */
164 rcu_reclaim_barrier();
165 }
166
167 return NULL;
168}
169
170/*
171 * library wrappers to be used by non-LGPL compatible source code.
172 */
173
174void rcu_reclaim_queue(void *p)
175{
176 _rcu_reclaim_queue(p);
177}
178
179static void rcu_add_reader(pthread_t id)
180{
181 struct reader_registry *oldarray;
182
183 if (!registry) {
184 alloc_readers = INIT_NUM_THREADS;
185 num_readers = 0;
186 registry =
187 malloc(sizeof(struct reader_registry) * alloc_readers);
188 }
189 if (alloc_readers < num_readers + 1) {
190 oldarray = registry;
191 registry = malloc(sizeof(struct reader_registry)
192 * (alloc_readers << 1));
193 memcpy(registry, oldarray,
194 sizeof(struct reader_registry) * alloc_readers);
195 alloc_readers <<= 1;
196 free(oldarray);
197 }
198 registry[num_readers].tid = id;
199 /* reference to the TLS of _this_ reader thread. */
200 registry[num_readers].reclaim_queue = &reclaim_queue;
201 num_readers++;
202}
203
204/*
205 * Never shrink (implementation limitation).
206 * This is O(nb threads). Eventually use a hash table.
207 */
208static void rcu_remove_reader(pthread_t id)
209{
210 struct reader_registry *index;
211
212 assert(registry != NULL);
213 for (index = registry; index < registry + num_readers; index++) {
214 if (pthread_equal(index->tid, id)) {
215 memcpy(index, &registry[num_readers - 1],
216 sizeof(struct reader_registry));
217 registry[num_readers - 1].tid = 0;
218 registry[num_readers - 1].reclaim_queue = NULL;
219 num_readers--;
220 return;
221 }
222 }
223 /* Hrm not found, forgot to register ? */
224 assert(0);
225}
226
227static void start_reclaim_thread(void)
228{
229 int ret;
230
231 ret = pthread_create(&tid_reclaim, NULL, thr_reclaim,
232 NULL);
233 assert(!ret);
234}
235
236static void stop_reclaim_thread(void)
237{
238 int ret;
239 void *tret;
240
241 STORE_SHARED(exit_reclaim, 1);
242 ret = pthread_join(tid_reclaim, &tret);
243 assert(!ret);
244}
245
246void rcu_reclaim_register_thread(void)
247{
248 int readers;
249
250 internal_urcu_lock(&reclaim_thread_mutex);
251 internal_urcu_lock(&urcu_reclaim_mutex);
252 /* In case gcc does not support constructor attribute */
253 urcu_reclaim_init();
254 reclaim_queue.q = malloc(sizeof(void *) * RECLAIM_QUEUE_SIZE);
255 rcu_add_reader(pthread_self());
256 readers = num_readers;
257 internal_urcu_unlock(&urcu_reclaim_mutex);
258
259 if (readers == 1)
260 start_reclaim_thread();
261 internal_urcu_unlock(&reclaim_thread_mutex);
262}
263
264void rcu_reclaim_unregister_thread(void)
265{
266 int readers;
267
268 internal_urcu_lock(&reclaim_thread_mutex);
269 internal_urcu_lock(&urcu_reclaim_mutex);
270 rcu_remove_reader(pthread_self());
271 _rcu_reclaim_barrier_thread();
272 free(reclaim_queue.q);
273 reclaim_queue.q = NULL;
274 readers = num_readers;
275 internal_urcu_unlock(&urcu_reclaim_mutex);
276
277 if (readers == 0)
278 stop_reclaim_thread();
279 internal_urcu_unlock(&reclaim_thread_mutex);
280}
281
282/*
283 * urcu_init constructor. Called when the library is linked, but also when
284 * reader threads are calling rcu_register_thread(). Should only be called by a
285 * single thread at a given time. This is ensured by holing the
286 * internal_urcu_lock(&urcu_reclaim_mutex) from rcu_register_thread() or by
287 * running at library load time, which should not be executed by multiple
288 * threads nor concurrently with rcu_register_thread() anyway.
289 */
290void urcu_reclaim_init(void)
291{
292 if (init_done)
293 return;
294 init_done = 1;
295}
296
297void urcu_reclaim_exit(void)
298{
299 free(registry);
300}
This page took 0.032029 seconds and 4 git commands to generate.