hash table comment fix.
[urcu.git] / formal-model / urcu-nosched-model / result-signal-over-writer / testmerge / urcu_free_single_flip.spin.input
1 #define SINGLE_FLIP
2
3 #define NR_READERS 1
4 #define NR_WRITERS 1
5
6 #define NR_PROCS 2
7
8
9 #if (NR_READERS == 1)
10
11 #define read_free_race (read_generation[0] == last_free_gen)
12 #define read_free (free_done && data_access[0])
13
14 #elif (NR_READERS == 2)
15
16 #define read_free_race (read_generation[0] == last_free_gen || read_generation[1] == last_free_gen)
17 #define read_free (free_done && (data_access[0] || data_access[1]))
18
19 #else
20
21 #error "Too many readers"
22
23 #endif
24
25 #define RCU_GP_CTR_BIT (1 << 7)
26 #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
27
28 #ifndef READER_NEST_LEVEL
29 #define READER_NEST_LEVEL 2
30 #endif
31
32 #define REMOTE_BARRIERS
33 /*
34 * mem.spin: Promela code to validate memory barriers with OOO memory.
35 *
36 * This program is free software; you can redistribute it and/or modify
37 * it under the terms of the GNU General Public License as published by
38 * the Free Software Foundation; either version 2 of the License, or
39 * (at your option) any later version.
40 *
41 * This program is distributed in the hope that it will be useful,
42 * but WITHOUT ANY WARRANTY; without even the implied warranty of
43 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
44 * GNU General Public License for more details.
45 *
46 * You should have received a copy of the GNU General Public License
47 * along with this program; if not, write to the Free Software
48 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
49 *
50 * Copyright (c) 2009 Mathieu Desnoyers
51 */
52
53 /* Promela validation variables. */
54
55 /* specific defines "included" here */
56 /* DEFINES file "included" here */
57
58 #define get_pid() (_pid)
59
60 /*
61 * Each process have its own data in cache. Caches are randomly updated.
62 * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
63 * both.
64 */
65
66 typedef per_proc_byte {
67 byte val[NR_PROCS];
68 };
69
70 /* Bitfield has a maximum of 8 procs */
71 typedef per_proc_bit {
72 byte bitfield;
73 };
74
75 #define DECLARE_CACHED_VAR(type, x) \
76 type mem_##x; \
77 per_proc_##type cached_##x; \
78 per_proc_bit cache_dirty_##x;
79
80 #define INIT_CACHED_VAR(x, v, j) \
81 mem_##x = v; \
82 cache_dirty_##x.bitfield = 0; \
83 j = 0; \
84 do \
85 :: j < NR_PROCS -> \
86 cached_##x.val[j] = v; \
87 j++ \
88 :: j >= NR_PROCS -> break \
89 od;
90
91 #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x.bitfield & (1 << id))
92
93 #define READ_CACHED_VAR(x) (cached_##x.val[get_pid()])
94
95 #define WRITE_CACHED_VAR(x, v) \
96 atomic { \
97 cached_##x.val[get_pid()] = v; \
98 cache_dirty_##x.bitfield = \
99 cache_dirty_##x.bitfield | (1 << get_pid()); \
100 }
101
102 #define CACHE_WRITE_TO_MEM(x, id) \
103 if \
104 :: IS_CACHE_DIRTY(x, id) -> \
105 mem_##x = cached_##x.val[id]; \
106 cache_dirty_##x.bitfield = \
107 cache_dirty_##x.bitfield & (~(1 << id)); \
108 :: else -> \
109 skip \
110 fi;
111
112 #define CACHE_READ_FROM_MEM(x, id) \
113 if \
114 :: !IS_CACHE_DIRTY(x, id) -> \
115 cached_##x.val[id] = mem_##x;\
116 :: else -> \
117 skip \
118 fi;
119
120 /*
121 * May update other caches if cache is dirty, or not.
122 */
123 #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
124 if \
125 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
126 :: 1 -> skip \
127 fi;
128
129 #define RANDOM_CACHE_READ_FROM_MEM(x, id)\
130 if \
131 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
132 :: 1 -> skip \
133 fi;
134
135 /*
136 * Remote barriers tests the scheme where a signal (or IPI) is sent to all
137 * reader threads to promote their compiler barrier to a smp_mb().
138 */
139 #ifdef REMOTE_BARRIERS
140
141 inline smp_rmb_pid(i, j)
142 {
143 atomic {
144 CACHE_READ_FROM_MEM(urcu_gp_ctr, i);
145 j = 0;
146 do
147 :: j < NR_READERS ->
148 CACHE_READ_FROM_MEM(urcu_active_readers[j], i);
149 j++
150 :: j >= NR_READERS -> break
151 od;
152 CACHE_READ_FROM_MEM(generation_ptr, i);
153 }
154 }
155
156 inline smp_wmb_pid(i, j)
157 {
158 atomic {
159 CACHE_WRITE_TO_MEM(urcu_gp_ctr, i);
160 j = 0;
161 do
162 :: j < NR_READERS ->
163 CACHE_WRITE_TO_MEM(urcu_active_readers[j], i);
164 j++
165 :: j >= NR_READERS -> break
166 od;
167 CACHE_WRITE_TO_MEM(generation_ptr, i);
168 }
169 }
170
171 inline smp_mb_pid(i, j)
172 {
173 atomic {
174 #ifndef NO_WMB
175 smp_wmb_pid(i, j);
176 #endif
177 #ifndef NO_RMB
178 smp_rmb_pid(i, j);
179 #endif
180 #ifdef NO_WMB
181 #ifdef NO_RMB
182 ooo_mem(i);
183 #endif
184 #endif
185 }
186 }
187
188 /*
189 * Readers do a simple barrier(), writers are doing a smp_mb() _and_ sending a
190 * signal or IPI to have all readers execute a smp_mb.
191 * We are not modeling the whole rendez-vous between readers and writers here,
192 * we just let the writer update each reader's caches remotely.
193 */
194 inline smp_mb(i, j)
195 {
196 if
197 :: get_pid() >= NR_READERS ->
198 smp_mb_pid(get_pid(), j);
199 i = 0;
200 do
201 :: i < NR_READERS ->
202 smp_mb_pid(i, j);
203 i++;
204 :: i >= NR_READERS -> break
205 od;
206 smp_mb_pid(get_pid(), j);
207 :: else -> skip;
208 fi;
209 }
210
211 #else
212
213 inline smp_rmb(i, j)
214 {
215 atomic {
216 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
217 i = 0;
218 do
219 :: i < NR_READERS ->
220 CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
221 i++
222 :: i >= NR_READERS -> break
223 od;
224 CACHE_READ_FROM_MEM(generation_ptr, get_pid());
225 }
226 }
227
228 inline smp_wmb(i, j)
229 {
230 atomic {
231 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
232 i = 0;
233 do
234 :: i < NR_READERS ->
235 CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
236 i++
237 :: i >= NR_READERS -> break
238 od;
239 CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
240 }
241 }
242
243 inline smp_mb(i, j)
244 {
245 atomic {
246 #ifndef NO_WMB
247 smp_wmb(i, j);
248 #endif
249 #ifndef NO_RMB
250 smp_rmb(i, j);
251 #endif
252 #ifdef NO_WMB
253 #ifdef NO_RMB
254 ooo_mem(i);
255 #endif
256 #endif
257 }
258 }
259
260 #endif
261
262 /* Keep in sync manually with smp_rmb, wmp_wmb, ooo_mem and init() */
263 DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
264 /* Note ! currently only two readers */
265 DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
266 /* pointer generation */
267 DECLARE_CACHED_VAR(byte, generation_ptr);
268
269 byte last_free_gen = 0;
270 bit free_done = 0;
271 byte read_generation[NR_READERS];
272 bit data_access[NR_READERS];
273
274 bit write_lock = 0;
275
276 bit init_done = 0;
277
278 inline wait_init_done()
279 {
280 do
281 :: init_done == 0 -> skip;
282 :: else -> break;
283 od;
284 }
285
286 inline ooo_mem(i)
287 {
288 atomic {
289 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
290 i = 0;
291 do
292 :: i < NR_READERS ->
293 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
294 get_pid());
295 i++
296 :: i >= NR_READERS -> break
297 od;
298 RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
299 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
300 i = 0;
301 do
302 :: i < NR_READERS ->
303 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
304 get_pid());
305 i++
306 :: i >= NR_READERS -> break
307 od;
308 RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid());
309 }
310 }
311
312 #define get_readerid() (get_pid())
313 #define get_writerid() (get_readerid() + NR_READERS)
314
315 inline wait_for_reader(tmp, tmp2, i, j)
316 {
317 do
318 :: 1 ->
319 tmp2 = READ_CACHED_VAR(urcu_active_readers[tmp]);
320 ooo_mem(i);
321 if
322 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
323 && ((tmp2 ^ READ_CACHED_VAR(urcu_gp_ctr))
324 & RCU_GP_CTR_BIT) ->
325 #ifndef GEN_ERROR_WRITER_PROGRESS
326 smp_mb(i, j);
327 #else
328 ooo_mem(i);
329 #endif
330 :: else ->
331 break;
332 fi;
333 od;
334 }
335
336 inline wait_for_quiescent_state(tmp, tmp2, i, j)
337 {
338 tmp = 0;
339 do
340 :: tmp < NR_READERS ->
341 wait_for_reader(tmp, tmp2, i, j);
342 if
343 :: (NR_READERS > 1) && (tmp < NR_READERS - 1)
344 -> ooo_mem(i);
345 :: else
346 -> skip;
347 fi;
348 tmp++
349 :: tmp >= NR_READERS -> break
350 od;
351 }
352
353 /* Model the RCU read-side critical section. */
354
355 inline urcu_one_read(i, j, nest_i, tmp, tmp2)
356 {
357 nest_i = 0;
358 do
359 :: nest_i < READER_NEST_LEVEL ->
360 ooo_mem(i);
361 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
362 ooo_mem(i);
363 if
364 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
365 ->
366 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
367 ooo_mem(i);
368 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
369 tmp2);
370 :: else ->
371 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
372 tmp + 1);
373 fi;
374 smp_mb(i, j);
375 nest_i++;
376 :: nest_i >= READER_NEST_LEVEL -> break;
377 od;
378
379 ooo_mem(i);
380 read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr);
381 ooo_mem(i);
382 data_access[get_readerid()] = 1;
383 ooo_mem(i);
384 data_access[get_readerid()] = 0;
385
386 nest_i = 0;
387 do
388 :: nest_i < READER_NEST_LEVEL ->
389 smp_mb(i, j);
390 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
391 ooo_mem(i);
392 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);
393 nest_i++;
394 :: nest_i >= READER_NEST_LEVEL -> break;
395 od;
396 ooo_mem(i);
397 //smp_mc(i); /* added */
398 }
399
400 active [NR_READERS] proctype urcu_reader()
401 {
402 byte i, j, nest_i;
403 byte tmp, tmp2;
404
405 wait_init_done();
406
407 assert(get_pid() < NR_PROCS);
408
409 end_reader:
410 do
411 :: 1 ->
412 /*
413 * We do not test reader's progress here, because we are mainly
414 * interested in writer's progress. The reader never blocks
415 * anyway. We have to test for reader/writer's progress
416 * separately, otherwise we could think the writer is doing
417 * progress when it's blocked by an always progressing reader.
418 */
419 #ifdef READER_PROGRESS
420 /* Only test progress of one random reader. They are all the
421 * same. */
422 atomic {
423 if
424 :: get_readerid() == 0 ->
425 progress_reader:
426 skip;
427 :: else ->
428 skip;
429 fi;
430 }
431 #endif
432 urcu_one_read(i, j, nest_i, tmp, tmp2);
433 od;
434 }
435
436 /* Model the RCU update process. */
437
438 active proctype urcu_writer()
439 {
440 byte i, j;
441 byte tmp, tmp2;
442 byte old_gen;
443
444 wait_init_done();
445
446 assert(get_pid() < NR_PROCS);
447
448 do
449 :: (READ_CACHED_VAR(generation_ptr) < 5) ->
450 #ifdef WRITER_PROGRESS
451 progress_writer1:
452 #endif
453 ooo_mem(i);
454 atomic {
455 old_gen = READ_CACHED_VAR(generation_ptr);
456 WRITE_CACHED_VAR(generation_ptr, old_gen + 1);
457 }
458 ooo_mem(i);
459
460 do
461 :: 1 ->
462 atomic {
463 if
464 :: write_lock == 0 ->
465 write_lock = 1;
466 break;
467 :: else ->
468 skip;
469 fi;
470 }
471 od;
472 smp_mb(i, j);
473 tmp = READ_CACHED_VAR(urcu_gp_ctr);
474 ooo_mem(i);
475 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
476 ooo_mem(i);
477 //smp_mc(i);
478 wait_for_quiescent_state(tmp, tmp2, i, j);
479 //smp_mc(i);
480 #ifndef SINGLE_FLIP
481 ooo_mem(i);
482 tmp = READ_CACHED_VAR(urcu_gp_ctr);
483 ooo_mem(i);
484 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
485 //smp_mc(i);
486 ooo_mem(i);
487 wait_for_quiescent_state(tmp, tmp2, i, j);
488 #endif
489 smp_mb(i, j);
490 write_lock = 0;
491 /* free-up step, e.g., kfree(). */
492 atomic {
493 last_free_gen = old_gen;
494 free_done = 1;
495 }
496 :: else -> break;
497 od;
498 /*
499 * Given the reader loops infinitely, let the writer also busy-loop
500 * with progress here so, with weak fairness, we can test the
501 * writer's progress.
502 */
503 end_writer:
504 do
505 :: 1 ->
506 #ifdef WRITER_PROGRESS
507 progress_writer2:
508 #endif
509 skip;
510 od;
511 }
512
513 /* Leave after the readers and writers so the pid count is ok. */
514 init {
515 byte i, j;
516
517 atomic {
518 INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
519 INIT_CACHED_VAR(generation_ptr, 0, j);
520
521 i = 0;
522 do
523 :: i < NR_READERS ->
524 INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
525 read_generation[i] = 1;
526 data_access[i] = 0;
527 i++;
528 :: i >= NR_READERS -> break
529 od;
530 init_done = 1;
531 }
532 }
This page took 0.041798 seconds and 4 git commands to generate.