570da40231773d139e2c80166b584b0585136da3
[urcu.git] / formal-model / urcu / urcu.spin
1 /*
2 * mem.spin: Promela code to validate memory barriers with OOO memory.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (c) 2009 Mathieu Desnoyers
19 */
20
21 /* Promela validation variables. */
22
23 #define NR_READERS 1
24 #define NR_WRITERS 1
25
26 #define NR_PROCS 2
27
28 #define get_pid() (_pid)
29
30 /*
31 * Each process have its own data in cache. Caches are randomly updated.
32 * smp_wmb and smp_rmb forces cache updates (write and read), wmb_mb forces
33 * both.
34 */
35
36 #define DECLARE_CACHED_VAR(type, x, v) \
37 type mem_##x = v; \
38 type cached_##x[NR_PROCS] = v; \
39 bit cache_dirty_##x[NR_PROCS] = 0
40
41 #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x[id])
42
43 #define READ_CACHED_VAR(x) (cached_##x[get_pid()])
44
45 #define WRITE_CACHED_VAR(x, v) \
46 atomic { \
47 cached_##x[get_pid()] = v; \
48 cache_dirty_##x[get_pid()] = 1; \
49 }
50
51 #define CACHE_WRITE_TO_MEM(x, id) \
52 if \
53 :: IS_CACHE_DIRTY(x, id) -> \
54 mem_##x = cached_##x[id]; \
55 cache_dirty_##x[id] = 0; \
56 :: else -> \
57 skip \
58 fi;
59
60 #define CACHE_READ_FROM_MEM(x, id) \
61 if \
62 :: !IS_CACHE_DIRTY(x, id) -> \
63 cached_##x[id] = mem_##x;\
64 :: else -> \
65 skip \
66 fi;
67
68 /*
69 * May update other caches if cache is dirty, or not.
70 */
71 #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
72 if \
73 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
74 :: 1 -> skip \
75 fi;
76
77 #define RANDOM_CACHE_READ_FROM_MEM(x, id)\
78 if \
79 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
80 :: 1 -> skip \
81 fi;
82
83 /*
84 * Remote barriers tests the scheme where a signal (or IPI) is sent to all
85 * reader threads to promote their compiler barrier to a smp_mb().
86 */
87 #ifdef REMOTE_BARRIERS
88
89 inline smp_rmb_pid(i)
90 {
91 atomic {
92 CACHE_READ_FROM_MEM(urcu_gp_ctr, i);
93 CACHE_READ_FROM_MEM(urcu_active_readers_one, i);
94 CACHE_READ_FROM_MEM(generation_ptr, i);
95 }
96 }
97
98 inline smp_wmb_pid(i)
99 {
100 atomic {
101 CACHE_WRITE_TO_MEM(urcu_gp_ctr, i);
102 CACHE_WRITE_TO_MEM(urcu_active_readers_one, i);
103 CACHE_WRITE_TO_MEM(generation_ptr, i);
104 }
105 }
106
107 inline smp_mb_pid(i)
108 {
109 atomic {
110 #ifndef NO_WMB
111 smp_wmb_pid(i);
112 #endif
113 #ifndef NO_RMB
114 smp_rmb_pid(i);
115 #endif
116 skip;
117 }
118 }
119
120 /*
121 * Readers do a simple barrier(), writers are doing a smp_mb() _and_ sending a
122 * signal or IPI to have all readers execute a smp_mb.
123 * We are not modeling the whole rendez-vous between readers and writers here,
124 * we just let the writer update each reader's caches remotely.
125 */
126 inline smp_mb(i)
127 {
128 if
129 :: get_pid() >= NR_READERS ->
130 smp_mb_pid(get_pid());
131 i = 0;
132 do
133 :: i < NR_READERS ->
134 smp_mb_pid(i);
135 i++;
136 :: i >= NR_READERS -> break
137 od;
138 smp_mb_pid(get_pid());
139 :: else -> skip;
140 fi;
141 }
142
143 #else
144
145 inline smp_rmb(i)
146 {
147 atomic {
148 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
149 CACHE_READ_FROM_MEM(urcu_active_readers_one, get_pid());
150 CACHE_READ_FROM_MEM(generation_ptr, get_pid());
151 }
152 }
153
154 inline smp_wmb(i)
155 {
156 atomic {
157 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
158 CACHE_WRITE_TO_MEM(urcu_active_readers_one, get_pid());
159 CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
160 }
161 }
162
163 inline smp_mb(i)
164 {
165 atomic {
166 #ifndef NO_WMB
167 smp_wmb(i);
168 #endif
169 #ifndef NO_RMB
170 smp_rmb(i);
171 #endif
172 skip;
173 }
174 }
175
176 #endif
177
178 /* Keep in sync manually with smp_rmb, wmp_wmb and ooo_mem */
179 DECLARE_CACHED_VAR(byte, urcu_gp_ctr, 1);
180 /* Note ! currently only one reader */
181 DECLARE_CACHED_VAR(byte, urcu_active_readers_one, 0);
182 /* pointer generation */
183 DECLARE_CACHED_VAR(byte, generation_ptr, 0);
184
185 byte last_free_gen = 0;
186 bit free_done = 0;
187 byte read_generation = 1;
188 bit data_access = 0;
189
190 bit write_lock = 0;
191
192 inline ooo_mem(i)
193 {
194 atomic {
195 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
196 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers_one,
197 get_pid());
198 RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
199 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
200 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers_one,
201 get_pid());
202 RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid());
203 }
204 }
205
206 #define get_readerid() (get_pid())
207 #define get_writerid() (get_readerid() + NR_READERS)
208
209 inline wait_for_reader(tmp, id, i)
210 {
211 do
212 :: 1 ->
213 ooo_mem(i);
214 tmp = READ_CACHED_VAR(urcu_active_readers_one);
215 if
216 :: (tmp & RCU_GP_CTR_NEST_MASK)
217 && ((tmp ^ READ_CACHED_VAR(urcu_gp_ctr))
218 & RCU_GP_CTR_BIT) ->
219 #ifndef GEN_ERROR_WRITER_PROGRESS
220 smp_mb(i);
221 #else
222 skip;
223 #endif
224 :: else ->
225 break;
226 fi;
227 od;
228 }
229
230 inline wait_for_quiescent_state(tmp, i, j)
231 {
232 i = 0;
233 do
234 :: i < NR_READERS ->
235 wait_for_reader(tmp, i, j);
236 i++
237 :: i >= NR_READERS -> break
238 od;
239 }
240
241 /* Model the RCU read-side critical section. */
242
243 inline urcu_one_read(i, nest_i, tmp, tmp2)
244 {
245 nest_i = 0;
246 do
247 :: nest_i < READER_NEST_LEVEL ->
248 ooo_mem(i);
249 tmp = READ_CACHED_VAR(urcu_active_readers_one);
250 ooo_mem(i);
251 if
252 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
253 ->
254 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
255 ooo_mem(i);
256 WRITE_CACHED_VAR(urcu_active_readers_one, tmp2);
257 :: else ->
258 WRITE_CACHED_VAR(urcu_active_readers_one,
259 tmp + 1);
260 fi;
261 ooo_mem(i);
262 smp_mb(i);
263 nest_i++;
264 :: nest_i >= READER_NEST_LEVEL -> break;
265 od;
266
267 ooo_mem(i);
268 read_generation = READ_CACHED_VAR(generation_ptr);
269 ooo_mem(i);
270 data_access = 1;
271 ooo_mem(i);
272 data_access = 0;
273
274 nest_i = 0;
275 do
276 :: nest_i < READER_NEST_LEVEL ->
277 ooo_mem(i);
278 smp_mb(i);
279 ooo_mem(i);
280 tmp2 = READ_CACHED_VAR(urcu_active_readers_one);
281 ooo_mem(i);
282 WRITE_CACHED_VAR(urcu_active_readers_one, tmp2 - 1);
283 nest_i++;
284 :: nest_i >= READER_NEST_LEVEL -> break;
285 od;
286 ooo_mem(i);
287 //smp_mc(i); /* added */
288 }
289
290 active [NR_READERS] proctype urcu_reader()
291 {
292 byte i, nest_i;
293 byte tmp, tmp2;
294
295 assert(get_pid() < NR_PROCS);
296
297 end_reader:
298 do
299 :: 1 ->
300 /*
301 * We do not test reader's progress here, because we are mainly
302 * interested in writer's progress. The reader never blocks
303 * anyway. We have to test for reader/writer's progress
304 * separately, otherwise we could think the writer is doing
305 * progress when it's blocked by an always progressing reader.
306 */
307 #ifdef READER_PROGRESS
308 progress_reader:
309 #endif
310 urcu_one_read(i, nest_i, tmp, tmp2);
311 od;
312 }
313
314 /* Model the RCU update process. */
315
316 active [NR_WRITERS] proctype urcu_writer()
317 {
318 byte i, j;
319 byte tmp;
320 byte old_gen;
321
322 assert(get_pid() < NR_PROCS);
323
324 do
325 :: (READ_CACHED_VAR(generation_ptr) < 5) ->
326 #ifdef WRITER_PROGRESS
327 progress_writer1:
328 #endif
329 ooo_mem(i);
330 atomic {
331 old_gen = READ_CACHED_VAR(generation_ptr);
332 WRITE_CACHED_VAR(generation_ptr, old_gen + 1);
333 }
334 ooo_mem(i);
335
336 do
337 :: 1 ->
338 atomic {
339 if
340 :: write_lock == 0 ->
341 write_lock = 1;
342 break;
343 :: else ->
344 skip;
345 fi;
346 }
347 od;
348 smp_mb(i);
349 ooo_mem(i);
350 tmp = READ_CACHED_VAR(urcu_gp_ctr);
351 ooo_mem(i);
352 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
353 ooo_mem(i);
354 //smp_mc(i);
355 wait_for_quiescent_state(tmp, i, j);
356 //smp_mc(i);
357 #ifndef SINGLE_FLIP
358 ooo_mem(i);
359 tmp = READ_CACHED_VAR(urcu_gp_ctr);
360 ooo_mem(i);
361 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
362 //smp_mc(i);
363 ooo_mem(i);
364 wait_for_quiescent_state(tmp, i, j);
365 #endif
366 ooo_mem(i);
367 smp_mb(i);
368 ooo_mem(i);
369 write_lock = 0;
370 /* free-up step, e.g., kfree(). */
371 atomic {
372 last_free_gen = old_gen;
373 free_done = 1;
374 }
375 :: else -> break;
376 od;
377 /*
378 * Given the reader loops infinitely, let the writer also busy-loop
379 * with progress here so, with weak fairness, we can test the
380 * writer's progress.
381 */
382 end_writer:
383 do
384 :: 1 ->
385 #ifdef WRITER_PROGRESS
386 progress_writer2:
387 #endif
388 skip;
389 od;
390 }
This page took 0.035944 seconds and 3 git commands to generate.