urcu-reclaim cleanup
[urcu.git] / urcu-reclaim.c
CommitLineData
90075a50
MD
1/*
2 * urcu-reclaim.c
3 *
4 * Userspace RCU library - batch memory reclamation
5 *
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23#include <stdio.h>
24#include <pthread.h>
25#include <signal.h>
26#include <assert.h>
27#include <stdlib.h>
28#include <string.h>
29#include <errno.h>
30#include <poll.h>
31
32#include "urcu-reclaim-static.h"
33/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
34#include "urcu-reclaim.h"
35
90075a50
MD
36void __attribute__((destructor)) urcu_reclaim_exit(void);
37
38extern void synchronize_rcu(void);
39
90075a50
MD
40/*
41 * urcu_reclaim_mutex nests inside reclaim_thread_mutex.
42 */
43static pthread_mutex_t urcu_reclaim_mutex = PTHREAD_MUTEX_INITIALIZER;
44static pthread_mutex_t reclaim_thread_mutex = PTHREAD_MUTEX_INITIALIZER;
45
46/*
983e806f
MD
47 * Written to only by each individual reclaimer. Read by both the reclaimer and
48 * the reclamation tread.
90075a50
MD
49 */
50struct reclaim_queue __thread reclaim_queue;
51
983e806f 52/* Thread IDs of registered reclaimers */
90075a50
MD
53#define INIT_NUM_THREADS 4
54
983e806f 55struct reclaimer_registry {
90075a50
MD
56 pthread_t tid;
57 struct reclaim_queue *reclaim_queue;
58 unsigned long last_head;
59};
60
983e806f
MD
61static struct reclaimer_registry *registry;
62static int num_reclaimers, alloc_reclaimers;
90075a50
MD
63
64static pthread_t tid_reclaim;
65static int exit_reclaim;
66
67static void internal_urcu_lock(pthread_mutex_t *mutex)
68{
69 int ret;
70
71#ifndef DISTRUST_SIGNALS_EXTREME
72 ret = pthread_mutex_lock(mutex);
73 if (ret) {
74 perror("Error in pthread mutex lock");
75 exit(-1);
76 }
77#else /* #ifndef DISTRUST_SIGNALS_EXTREME */
78 while ((ret = pthread_mutex_trylock(mutex)) != 0) {
79 if (ret != EBUSY && ret != EINTR) {
80 printf("ret = %d, errno = %d\n", ret, errno);
81 perror("Error in pthread mutex lock");
82 exit(-1);
83 }
84 poll(NULL,0,10);
85 }
86#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
87}
88
89static void internal_urcu_unlock(pthread_mutex_t *mutex)
90{
91 int ret;
92
93 ret = pthread_mutex_unlock(mutex);
94 if (ret) {
95 perror("Error in pthread mutex unlock");
96 exit(-1);
97 }
98}
99
100/*
101 * Must be called after Q.S. is reached.
102 */
103static void rcu_reclaim_barrier_queue(struct reclaim_queue *queue,
104 unsigned long head)
105{
106 unsigned long i;
107
108 /*
109 * Tail is only modified when lock is held.
110 * Head is only modified by owner thread.
111 */
112
113 for (i = queue->tail; i != head; i++) {
114 smp_rmb(); /* read head before q[]. */
115 free(LOAD_SHARED(queue->q[i & RECLAIM_QUEUE_MASK]));
116 }
117 smp_mb(); /* push tail after having used q[] */
118 STORE_SHARED(queue->tail, i);
119}
120
121static void _rcu_reclaim_barrier_thread(void)
122{
123 unsigned long head;
124
125 head = reclaim_queue.head;
126 synchronize_rcu();
127 rcu_reclaim_barrier_queue(&reclaim_queue, head);
128}
129
130
131void rcu_reclaim_barrier_thread(void)
132{
133 internal_urcu_lock(&urcu_reclaim_mutex);
134 _rcu_reclaim_barrier_thread();
135 internal_urcu_unlock(&urcu_reclaim_mutex);
136}
137
138void rcu_reclaim_barrier(void)
139{
983e806f 140 struct reclaimer_registry *index;
90075a50
MD
141
142 if (!registry)
143 return;
144
145 internal_urcu_lock(&urcu_reclaim_mutex);
983e806f 146 for (index = registry; index < registry + num_reclaimers; index++)
90075a50
MD
147 index->last_head = LOAD_SHARED(index->reclaim_queue->head);
148 synchronize_rcu();
983e806f 149 for (index = registry; index < registry + num_reclaimers; index++)
90075a50
MD
150 rcu_reclaim_barrier_queue(index->reclaim_queue,
151 index->last_head);
152 internal_urcu_unlock(&urcu_reclaim_mutex);
153}
154
155void *thr_reclaim(void *args)
156{
157 for (;;) {
158 if (LOAD_SHARED(exit_reclaim))
159 break;
160 poll(NULL,0,100); /* wait for 100ms */
161 rcu_reclaim_barrier();
162 }
163
164 return NULL;
165}
166
167/*
168 * library wrappers to be used by non-LGPL compatible source code.
169 */
170
171void rcu_reclaim_queue(void *p)
172{
173 _rcu_reclaim_queue(p);
174}
175
983e806f 176static void rcu_add_reclaimer(pthread_t id)
90075a50 177{
983e806f 178 struct reclaimer_registry *oldarray;
90075a50
MD
179
180 if (!registry) {
983e806f
MD
181 alloc_reclaimers = INIT_NUM_THREADS;
182 num_reclaimers = 0;
90075a50 183 registry =
983e806f 184 malloc(sizeof(struct reclaimer_registry) * alloc_reclaimers);
90075a50 185 }
983e806f 186 if (alloc_reclaimers < num_reclaimers + 1) {
90075a50 187 oldarray = registry;
983e806f
MD
188 registry = malloc(sizeof(struct reclaimer_registry)
189 * (alloc_reclaimers << 1));
90075a50 190 memcpy(registry, oldarray,
983e806f
MD
191 sizeof(struct reclaimer_registry) * alloc_reclaimers);
192 alloc_reclaimers <<= 1;
90075a50
MD
193 free(oldarray);
194 }
983e806f
MD
195 registry[num_reclaimers].tid = id;
196 /* reference to the TLS of _this_ reclaimer thread. */
197 registry[num_reclaimers].reclaim_queue = &reclaim_queue;
198 num_reclaimers++;
90075a50
MD
199}
200
201/*
202 * Never shrink (implementation limitation).
203 * This is O(nb threads). Eventually use a hash table.
204 */
983e806f 205static void rcu_remove_reclaimer(pthread_t id)
90075a50 206{
983e806f 207 struct reclaimer_registry *index;
90075a50
MD
208
209 assert(registry != NULL);
983e806f 210 for (index = registry; index < registry + num_reclaimers; index++) {
90075a50 211 if (pthread_equal(index->tid, id)) {
983e806f
MD
212 memcpy(index, &registry[num_reclaimers - 1],
213 sizeof(struct reclaimer_registry));
214 registry[num_reclaimers - 1].tid = 0;
215 registry[num_reclaimers - 1].reclaim_queue = NULL;
216 num_reclaimers--;
90075a50
MD
217 return;
218 }
219 }
220 /* Hrm not found, forgot to register ? */
221 assert(0);
222}
223
224static void start_reclaim_thread(void)
225{
226 int ret;
227
228 ret = pthread_create(&tid_reclaim, NULL, thr_reclaim,
229 NULL);
230 assert(!ret);
231}
232
233static void stop_reclaim_thread(void)
234{
235 int ret;
236 void *tret;
237
238 STORE_SHARED(exit_reclaim, 1);
239 ret = pthread_join(tid_reclaim, &tret);
240 assert(!ret);
241}
242
243void rcu_reclaim_register_thread(void)
244{
983e806f 245 int reclaimers;
90075a50
MD
246
247 internal_urcu_lock(&reclaim_thread_mutex);
248 internal_urcu_lock(&urcu_reclaim_mutex);
90075a50 249 reclaim_queue.q = malloc(sizeof(void *) * RECLAIM_QUEUE_SIZE);
983e806f
MD
250 rcu_add_reclaimer(pthread_self());
251 reclaimers = num_reclaimers;
90075a50
MD
252 internal_urcu_unlock(&urcu_reclaim_mutex);
253
983e806f 254 if (reclaimers == 1)
90075a50
MD
255 start_reclaim_thread();
256 internal_urcu_unlock(&reclaim_thread_mutex);
257}
258
259void rcu_reclaim_unregister_thread(void)
260{
983e806f 261 int reclaimers;
90075a50
MD
262
263 internal_urcu_lock(&reclaim_thread_mutex);
264 internal_urcu_lock(&urcu_reclaim_mutex);
983e806f 265 rcu_remove_reclaimer(pthread_self());
90075a50
MD
266 _rcu_reclaim_barrier_thread();
267 free(reclaim_queue.q);
268 reclaim_queue.q = NULL;
983e806f 269 reclaimers = num_reclaimers;
90075a50
MD
270 internal_urcu_unlock(&urcu_reclaim_mutex);
271
983e806f 272 if (reclaimers == 0)
90075a50
MD
273 stop_reclaim_thread();
274 internal_urcu_unlock(&reclaim_thread_mutex);
275}
276
90075a50
MD
277void urcu_reclaim_exit(void)
278{
279 free(registry);
280}
This page took 0.032704 seconds and 4 git commands to generate.