init version
[urcu.git] / urcu.c
CommitLineData
27b012e2
MD
1#include <stdio.h>
2#include <pthread.h>
3#include <signal.h>
4#include <assert.h>
5
6#include "urcu.h"
7
8pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER;
9
10/* Global quiescent period parity */
11int urcu_qparity;
12
13int __thread urcu_active_readers[2];
14
15/* Thread IDs of registered readers */
16#define INIT_NUM_THREADS 4
17
18struct reader_data {
19 pthread_t tid;
20 int **urcu_active_readers;
21};
22
23static struct reader_data *reader_data;
24static int num_readers, alloc_readers;
25static int sig_done;
26
27/*
28 * called with urcu_mutex held.
29 */
30static int switch_next_urcu_qparity(void)
31{
32 int old_parity = urcu_qparity;
33 urcu_qparity = 1 - old_parity;
34 return old_parity;
35}
36
37static void force_mb_all_threads(void)
38{
39 pthread_t *index;
40 /*
41 * Ask for each threads to execute a mb() so we can consider the
42 * compiler barriers around rcu read lock as real memory barriers.
43 */
44 if (!reader_data)
45 return;
46 sigtask = TASK_FORCE_MB;
47 sig_done = 0;
48 mb(); /* write sig_done and sigtask before sending the signals */
49 for (index = reader_data; index < reader_data + num_readers; index++)
50 pthread_kill(*index, SIGURCU);
51 /*
52 * Wait for sighandler (and thus mb()) to execute on every thread.
53 * BUSY-LOOP.
54 */
55 while (sig_done < num_readers)
56 barrier();
57 mb(); /* read sig_done before writing sigtask */
58 sigtask = TASK_NONE;
59}
60
61void wait_for_quiescent_state(int parity)
62{
63
64 if (!reader_data)
65 return;
66 /* Wait for each thread urcu_active_readers count to become 0.
67 */
68 for (index = readers_data; index < reader_data + num_readers; index++) {
69 /*
70 * BUSY-LOOP.
71 */
72 while (*index->urcu_active_readers != 0)
73 barrier();
74 }
75 /*
76 * Locally : read *index->urcu_active_readers before freeing old
77 * pointer.
78 * Remote (reader threads) : Order urcu_qparity update and other
79 * thread's quiescent state counter read.
80 */
81 force_mb_all_threads();
82}
83
84/*
85 * Return old pointer, OK to free, no more reference exist.
86 */
87void *urcu_publish_content(void **ptr, void *new)
88{
89 int ret, prev_parity;
90 void *oldptr;
91
92 ret = pthread_mutex_lock(&urcu_mutex);
93 if (ret) {
94 perror("Error in %s pthread mutex lock", __func__);
95 exit(-1);
96 }
97
98 /*
99 * We can publish the new pointer before we change the current qparity.
100 * Readers seeing the new pointer while being in the previous qparity
101 * window will make us wait until the end of the quiescent state before
102 * we release the unrelated memory area. However, given we hold the
103 * urcu_mutex, we are making sure that no further garbage collection can
104 * occur until we release the mutex, therefore we guarantee that this
105 * given reader will have completed its execution using the new pointer
106 * when the next quiescent state window will be over.
107 */
108 oldptr = *ptr;
109 *ptr = new;
110 wmb(); /* Write ptr before changing the qparity */
111 /* All threads should read qparity before ptr */
112 force_rmb_all_threads();
113 prev_parity = switch_next_urcu_qparity();
114
115 /*
116 * Wait for previous parity to be empty of readers.
117 */
118 wait_for_quiescent_state(prev_parity);
119 /*
120 * Deleting old data is ok !
121 */
122
123 ret = pthread_mutex_unlock(&urcu_mutex);
124 if (ret) {
125 perror("Error in %s pthread mutex lock", __func__);
126 exit(-1);
127 }
128 return oldptr;
129}
130
131void urcu_add_reader(pthread_t id)
132{
133 if (!reader_data) {
134 alloc_readers = INIT_NUM_THREADS;
135 num_readers = 1;
136 reader_data =
137 malloc(sizeof(struct reader_data) * alloc_readers);
138 return;
139 }
140 if (alloc_readers < num_readers + 1) {
141 pthread_t *oldarray;
142 oldarray = reader_data;
143 reader_data = malloc(sizeof(struct reader_data)
144 * (alloc_readers << 1));
145 memcpy(reader_data, oldarray,
146 sizeof(struct reader_data) * alloc_readers);
147 alloc_readers <<= 1;
148 free(oldarray);
149 }
150 reader_data[num_readers].tid = id;
151 /* reference to the TLS of _this_ reader thread. */
152 reader_data[num_readers].urcu_active_readers = urcu_active_readers;
153 num_readers++;
154}
155
156/*
157 * Never shrink (implementation limitation).
158 * This is O(nb threads). Eventually use a hash table.
159 */
160void urcu_remove_reader(pthread_t id)
161{
162 struct reader_data *index;
163
164 assert(reader_data != NULL);
165 for (index = reader_data; index < reader_data + num_readers; index++) {
166 if (index->tid == id) {
167 memcpy(index, &reader_data[num_readers - 1],
168 sizeof(struct reader_data));
169 reader_data[num_readers - 1].tid = 0;
170 reader_data[num_readers - 1].urcu_active_readers = NULL;
171 num_readers--;
172 return;
173 }
174 }
175 /* Hrm not found, forgot to register ? */
176 assert(0);
177}
178
179void urcu_register_thread(void)
180{
181 pthread_t self = pthread_self();
182
183 ret = pthread_mutex_lock(&urcu_mutex);
184 if (ret) {
185 perror("Error in %s pthread mutex lock", __func__);
186 exit(-1);
187 }
188
189 urcu_add_reader(self);
190
191
192 ret = pthread_mutex_unlock(&urcu_mutex);
193 if (ret) {
194 perror("Error in %s pthread mutex unlock", __func__);
195 exit(-1);
196 }
197}
198
199void urcu_register_thread(void)
200{
201 pthread_t self = pthread_self();
202
203 ret = pthread_mutex_lock(&urcu_mutex);
204 if (ret) {
205 perror("Error in %s pthread mutex lock", __func__);
206 exit(-1);
207 }
208
209 urcu_remove_reader(self);
210
211 ret = pthread_mutex_unlock(&urcu_mutex);
212 if (ret) {
213 perror("Error in %s pthread mutex unlock", __func__);
214 exit(-1);
215 }
216
217}
218
219void handler(int signo, siginfo_t *siginfo, void *context)
220{
221 mb();
222 atomic_inc(&sig_done);
223}
224
225void __attribute__((constructor)) urcu_init(void)
226{
227 struct sigaction act;
228 int ret;
229
230 act.sa_sigaction = sigurcu_handler;
231 ret = sigaction(SIGURCU, &act, NULL);
232 if (!ret) {
233 perror("Error in %s sigaction", __func__);
234 exit(-1);
235 }
236}
237
238void __attribute__((destructor)) urcu_exit(void)
239{
240 struct sigaction act;
241 int ret;
242
243 ret = sigaction(SIGURCU, NULL, &act);
244 if (!ret) {
245 perror("Error in %s sigaction", __func__);
246 exit(-1);
247 }
248 assert(act.sa_sigaction == sigurcu_handler);
249 free(reader_data);
250}
This page took 0.030171 seconds and 4 git commands to generate.