hash table comment fix.
[urcu.git] / formal-model / urcu-nosched-model / result-signal-over-writer / asserts.spin.input
1
2 #define read_free_race (read_generation[0] == last_free_gen)
3 #define read_free (free_done && data_access[0])
4
5 #define TEST_SIGNAL
6 //#define TEST_SIGNAL_ON_READ
7 #define TEST_SIGNAL_ON_WRITE
8
9 #define RCU_GP_CTR_BIT (1 << 7)
10 #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
11
12 #ifndef READER_NEST_LEVEL
13 #define READER_NEST_LEVEL 1
14 #endif
15
16 #define REMOTE_BARRIERS
17 /*
18 * mem.spin: Promela code to validate memory barriers with OOO memory.
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
33 *
34 * Copyright (c) 2009 Mathieu Desnoyers
35 */
36
37 /* Promela validation variables. */
38
39 /* specific defines "included" here */
40 /* DEFINES file "included" here */
41
42 /* All signal readers have same PID and uses same reader variable */
43 #ifdef TEST_SIGNAL_ON_WRITE
44
45 #define NR_READERS 1 /* the writer is also a signal reader */
46 #define NR_WRITERS 1
47
48 #define NR_PROCS 1
49
50 #define get_pid() (0)
51
52 #elif defined(TEST_SIGNAL_ON_READ)
53
54 #define get_pid() ((_pid < 2) -> 0 : 1)
55
56 #define NR_READERS 1
57 #define NR_WRITERS 1
58
59 #define NR_PROCS 2
60
61 #else
62
63 #define get_pid() (_pid)
64
65 #define NR_READERS 1
66 #define NR_WRITERS 1
67
68 #define NR_PROCS 2
69
70 #endif
71
72 #define get_readerid() (get_pid())
73
74 /*
75 * Each process have its own data in cache. Caches are randomly updated.
76 * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
77 * both.
78 */
79
80 typedef per_proc_byte {
81 byte val[NR_PROCS];
82 };
83
84 /* Bitfield has a maximum of 8 procs */
85 typedef per_proc_bit {
86 byte bitfield;
87 };
88
89 #define DECLARE_CACHED_VAR(type, x) \
90 type mem_##x; \
91 per_proc_##type cached_##x; \
92 per_proc_bit cache_dirty_##x;
93
94 #define INIT_CACHED_VAR(x, v, j) \
95 mem_##x = v; \
96 cache_dirty_##x.bitfield = 0; \
97 j = 0; \
98 do \
99 :: j < NR_PROCS -> \
100 cached_##x.val[j] = v; \
101 j++ \
102 :: j >= NR_PROCS -> break \
103 od;
104
105 #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x.bitfield & (1 << id))
106
107 #define READ_CACHED_VAR(x) (cached_##x.val[get_pid()])
108
109 #define WRITE_CACHED_VAR(x, v) \
110 atomic { \
111 cached_##x.val[get_pid()] = v; \
112 cache_dirty_##x.bitfield = \
113 cache_dirty_##x.bitfield | (1 << get_pid()); \
114 }
115
116 #define CACHE_WRITE_TO_MEM(x, id) \
117 if \
118 :: IS_CACHE_DIRTY(x, id) -> \
119 mem_##x = cached_##x.val[id]; \
120 cache_dirty_##x.bitfield = \
121 cache_dirty_##x.bitfield & (~(1 << id)); \
122 :: else -> \
123 skip \
124 fi;
125
126 #define CACHE_READ_FROM_MEM(x, id) \
127 if \
128 :: !IS_CACHE_DIRTY(x, id) -> \
129 cached_##x.val[id] = mem_##x;\
130 :: else -> \
131 skip \
132 fi;
133
134 /*
135 * May update other caches if cache is dirty, or not.
136 */
137 #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
138 if \
139 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
140 :: 1 -> skip \
141 fi;
142
143 #define RANDOM_CACHE_READ_FROM_MEM(x, id)\
144 if \
145 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
146 :: 1 -> skip \
147 fi;
148
149 /*
150 * Remote barriers tests the scheme where a signal (or IPI) is sent to all
151 * reader threads to promote their compiler barrier to a smp_mb().
152 */
153 #ifdef REMOTE_BARRIERS
154
155 inline smp_rmb_pid(i, j)
156 {
157 atomic {
158 CACHE_READ_FROM_MEM(urcu_gp_ctr, i);
159 j = 0;
160 do
161 :: j < NR_READERS ->
162 CACHE_READ_FROM_MEM(urcu_active_readers[j], i);
163 j++
164 :: j >= NR_READERS -> break
165 od;
166 CACHE_READ_FROM_MEM(generation_ptr, i);
167 }
168 }
169
170 inline smp_wmb_pid(i, j)
171 {
172 atomic {
173 CACHE_WRITE_TO_MEM(urcu_gp_ctr, i);
174 j = 0;
175 do
176 :: j < NR_READERS ->
177 CACHE_WRITE_TO_MEM(urcu_active_readers[j], i);
178 j++
179 :: j >= NR_READERS -> break
180 od;
181 CACHE_WRITE_TO_MEM(generation_ptr, i);
182 }
183 }
184
185 inline smp_mb_pid(i, j)
186 {
187 atomic {
188 #ifndef NO_WMB
189 smp_wmb_pid(i, j);
190 #endif
191 #ifndef NO_RMB
192 smp_rmb_pid(i, j);
193 #endif
194 #ifdef NO_WMB
195 #ifdef NO_RMB
196 ooo_mem(j);
197 #endif
198 #endif
199 }
200 }
201
202 /*
203 * Readers do a simple barrier(), writers are doing a smp_mb() _and_ sending a
204 * signal or IPI to have all readers execute a smp_mb.
205 * We are not modeling the whole rendez-vous between readers and writers here,
206 * we just let the writer update each reader's caches remotely.
207 */
208 inline smp_mb_writer(i, j)
209 {
210 smp_mb_pid(get_pid(), j);
211 i = 0;
212 do
213 :: i < NR_READERS ->
214 smp_mb_pid(i, j);
215 i++;
216 :: i >= NR_READERS -> break
217 od;
218 smp_mb_pid(get_pid(), j);
219 }
220
221 inline smp_mb_reader(i, j)
222 {
223 skip;
224 }
225
226 #else
227
228 inline smp_rmb(i, j)
229 {
230 atomic {
231 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
232 i = 0;
233 do
234 :: i < NR_READERS ->
235 CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
236 i++
237 :: i >= NR_READERS -> break
238 od;
239 CACHE_READ_FROM_MEM(generation_ptr, get_pid());
240 }
241 }
242
243 inline smp_wmb(i, j)
244 {
245 atomic {
246 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
247 i = 0;
248 do
249 :: i < NR_READERS ->
250 CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
251 i++
252 :: i >= NR_READERS -> break
253 od;
254 CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
255 }
256 }
257
258 inline smp_mb(i, j)
259 {
260 atomic {
261 #ifndef NO_WMB
262 smp_wmb(i, j);
263 #endif
264 #ifndef NO_RMB
265 smp_rmb(i, j);
266 #endif
267 #ifdef NO_WMB
268 #ifdef NO_RMB
269 ooo_mem(i);
270 #endif
271 #endif
272 }
273 }
274
275 inline smp_mb_writer(i, j)
276 {
277 smp_mb(i, j);
278 }
279
280 inline smp_mb_reader(i, j)
281 {
282 smp_mb(i, j);
283 }
284
285 #endif
286
287 /* Keep in sync manually with smp_rmb, wmp_wmb, ooo_mem and init() */
288 DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
289 /* Note ! currently only two readers */
290 DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
291 /* pointer generation */
292 DECLARE_CACHED_VAR(byte, generation_ptr);
293
294 byte last_free_gen = 0;
295 bit free_done = 0;
296 byte read_generation[NR_READERS];
297 bit data_access[NR_READERS];
298
299 bit write_lock = 0;
300
301 bit init_done = 0;
302
303 bit sighand_exec = 0;
304
305 inline wait_init_done()
306 {
307 do
308 :: init_done == 0 -> skip;
309 :: else -> break;
310 od;
311 }
312
313 #ifdef TEST_SIGNAL
314
315 inline wait_for_sighand_exec()
316 {
317 sighand_exec = 0;
318 do
319 :: sighand_exec == 0 -> skip;
320 :: else -> break;
321 od;
322 }
323
324 #ifdef TOO_BIG_STATE_SPACE
325 inline wait_for_sighand_exec()
326 {
327 sighand_exec = 0;
328 do
329 :: sighand_exec == 0 -> skip;
330 :: else ->
331 if
332 :: 1 -> break;
333 :: 1 -> sighand_exec = 0;
334 skip;
335 fi;
336 od;
337 }
338 #endif
339
340 #else
341
342 inline wait_for_sighand_exec()
343 {
344 skip;
345 }
346
347 #endif
348
349 #ifdef TEST_SIGNAL_ON_WRITE
350 /* Block on signal handler execution */
351 inline dispatch_sighand_write_exec()
352 {
353 sighand_exec = 1;
354 do
355 :: sighand_exec == 1 ->
356 skip;
357 :: else ->
358 break;
359 od;
360 }
361
362 #else
363
364 inline dispatch_sighand_write_exec()
365 {
366 skip;
367 }
368
369 #endif
370
371 #ifdef TEST_SIGNAL_ON_READ
372 /* Block on signal handler execution */
373 inline dispatch_sighand_read_exec()
374 {
375 sighand_exec = 1;
376 do
377 :: sighand_exec == 1 ->
378 skip;
379 :: else ->
380 break;
381 od;
382 }
383
384 #else
385
386 inline dispatch_sighand_read_exec()
387 {
388 skip;
389 }
390
391 #endif
392
393
394 inline ooo_mem(i)
395 {
396 atomic {
397 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
398 i = 0;
399 do
400 :: i < NR_READERS ->
401 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
402 get_pid());
403 i++
404 :: i >= NR_READERS -> break
405 od;
406 RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
407 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
408 i = 0;
409 do
410 :: i < NR_READERS ->
411 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
412 get_pid());
413 i++
414 :: i >= NR_READERS -> break
415 od;
416 RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid());
417 }
418 }
419
420 inline wait_for_reader(tmp, tmp2, i, j)
421 {
422 do
423 :: 1 ->
424 tmp2 = READ_CACHED_VAR(urcu_active_readers[tmp]);
425 ooo_mem(i);
426 dispatch_sighand_write_exec();
427 if
428 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
429 && ((tmp2 ^ READ_CACHED_VAR(urcu_gp_ctr))
430 & RCU_GP_CTR_BIT) ->
431 #ifndef GEN_ERROR_WRITER_PROGRESS
432 smp_mb_writer(i, j);
433 #else
434 ooo_mem(i);
435 #endif
436 dispatch_sighand_write_exec();
437 :: else ->
438 break;
439 fi;
440 od;
441 }
442
443 inline wait_for_quiescent_state(tmp, tmp2, i, j)
444 {
445 tmp = 0;
446 do
447 :: tmp < NR_READERS ->
448 wait_for_reader(tmp, tmp2, i, j);
449 if
450 :: (NR_READERS > 1) && (tmp < NR_READERS - 1)
451 -> ooo_mem(i);
452 dispatch_sighand_write_exec();
453 :: else
454 -> skip;
455 fi;
456 tmp++
457 :: tmp >= NR_READERS -> break
458 od;
459 }
460
461 /* Model the RCU read-side critical section. */
462
463 #ifndef TEST_SIGNAL_ON_WRITE
464
465 inline urcu_one_read(i, j, nest_i, tmp, tmp2)
466 {
467 nest_i = 0;
468 do
469 :: nest_i < READER_NEST_LEVEL ->
470 ooo_mem(i);
471 dispatch_sighand_read_exec();
472 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
473 ooo_mem(i);
474 dispatch_sighand_read_exec();
475 if
476 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
477 ->
478 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
479 ooo_mem(i);
480 dispatch_sighand_read_exec();
481 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
482 tmp2);
483 :: else ->
484 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
485 tmp + 1);
486 fi;
487 smp_mb_reader(i, j);
488 dispatch_sighand_read_exec();
489 nest_i++;
490 :: nest_i >= READER_NEST_LEVEL -> break;
491 od;
492
493 read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr);
494 data_access[get_readerid()] = 1;
495 data_access[get_readerid()] = 0;
496
497 nest_i = 0;
498 do
499 :: nest_i < READER_NEST_LEVEL ->
500 smp_mb_reader(i, j);
501 dispatch_sighand_read_exec();
502 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
503 ooo_mem(i);
504 dispatch_sighand_read_exec();
505 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);
506 nest_i++;
507 :: nest_i >= READER_NEST_LEVEL -> break;
508 od;
509 //ooo_mem(i);
510 //dispatch_sighand_read_exec();
511 //smp_mc(i); /* added */
512 }
513
514 active proctype urcu_reader()
515 {
516 byte i, j, nest_i;
517 byte tmp, tmp2;
518
519 wait_init_done();
520
521 assert(get_pid() < NR_PROCS);
522
523 end_reader:
524 do
525 :: 1 ->
526 /*
527 * We do not test reader's progress here, because we are mainly
528 * interested in writer's progress. The reader never blocks
529 * anyway. We have to test for reader/writer's progress
530 * separately, otherwise we could think the writer is doing
531 * progress when it's blocked by an always progressing reader.
532 */
533 #ifdef READER_PROGRESS
534 progress_reader:
535 #endif
536 urcu_one_read(i, j, nest_i, tmp, tmp2);
537 od;
538 }
539
540 #endif //!TEST_SIGNAL_ON_WRITE
541
542 #ifdef TEST_SIGNAL
543 /* signal handler reader */
544
545 inline urcu_one_read_sig(i, j, nest_i, tmp, tmp2)
546 {
547 nest_i = 0;
548 do
549 :: nest_i < READER_NEST_LEVEL ->
550 ooo_mem(i);
551 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
552 ooo_mem(i);
553 if
554 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
555 ->
556 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
557 ooo_mem(i);
558 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
559 tmp2);
560 :: else ->
561 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
562 tmp + 1);
563 fi;
564 smp_mb_reader(i, j);
565 nest_i++;
566 :: nest_i >= READER_NEST_LEVEL -> break;
567 od;
568
569 read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr);
570 data_access[get_readerid()] = 1;
571 data_access[get_readerid()] = 0;
572
573 nest_i = 0;
574 do
575 :: nest_i < READER_NEST_LEVEL ->
576 smp_mb_reader(i, j);
577 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
578 ooo_mem(i);
579 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);
580 nest_i++;
581 :: nest_i >= READER_NEST_LEVEL -> break;
582 od;
583 //ooo_mem(i);
584 //smp_mc(i); /* added */
585 }
586
587 active proctype urcu_reader_sig()
588 {
589 byte i, j, nest_i;
590 byte tmp, tmp2;
591
592 wait_init_done();
593
594 assert(get_pid() < NR_PROCS);
595
596 end_reader:
597 do
598 :: 1 ->
599 wait_for_sighand_exec();
600 /*
601 * We do not test reader's progress here, because we are mainly
602 * interested in writer's progress. The reader never blocks
603 * anyway. We have to test for reader/writer's progress
604 * separately, otherwise we could think the writer is doing
605 * progress when it's blocked by an always progressing reader.
606 */
607 #ifdef READER_PROGRESS
608 progress_reader:
609 #endif
610 urcu_one_read_sig(i, j, nest_i, tmp, tmp2);
611 od;
612 }
613
614 #endif
615
616 /* Model the RCU update process. */
617
618 active proctype urcu_writer()
619 {
620 byte i, j;
621 byte tmp, tmp2;
622 byte old_gen;
623
624 wait_init_done();
625
626 assert(get_pid() < NR_PROCS);
627
628 do
629 :: (READ_CACHED_VAR(generation_ptr) < 5) ->
630 #ifdef WRITER_PROGRESS
631 progress_writer1:
632 #endif
633 ooo_mem(i);
634 dispatch_sighand_write_exec();
635 atomic {
636 old_gen = READ_CACHED_VAR(generation_ptr);
637 WRITE_CACHED_VAR(generation_ptr, old_gen + 1);
638 }
639 ooo_mem(i);
640 dispatch_sighand_write_exec();
641
642 do
643 :: 1 ->
644 atomic {
645 if
646 :: write_lock == 0 ->
647 write_lock = 1;
648 break;
649 :: else ->
650 skip;
651 fi;
652 }
653 od;
654 smp_mb_writer(i, j);
655 dispatch_sighand_write_exec();
656 tmp = READ_CACHED_VAR(urcu_gp_ctr);
657 ooo_mem(i);
658 dispatch_sighand_write_exec();
659 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
660 ooo_mem(i);
661 dispatch_sighand_write_exec();
662 //smp_mc(i);
663 wait_for_quiescent_state(tmp, tmp2, i, j);
664 //smp_mc(i);
665 #ifndef SINGLE_FLIP
666 ooo_mem(i);
667 dispatch_sighand_write_exec();
668 tmp = READ_CACHED_VAR(urcu_gp_ctr);
669 ooo_mem(i);
670 dispatch_sighand_write_exec();
671 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
672 //smp_mc(i);
673 ooo_mem(i);
674 dispatch_sighand_write_exec();
675 wait_for_quiescent_state(tmp, tmp2, i, j);
676 #endif
677 smp_mb_writer(i, j);
678 dispatch_sighand_write_exec();
679 write_lock = 0;
680 /* free-up step, e.g., kfree(). */
681 atomic {
682 last_free_gen = old_gen;
683 free_done = 1;
684 }
685 :: else -> break;
686 od;
687 /*
688 * Given the reader loops infinitely, let the writer also busy-loop
689 * with progress here so, with weak fairness, we can test the
690 * writer's progress.
691 */
692 end_writer:
693 do
694 :: 1 ->
695 #ifdef WRITER_PROGRESS
696 progress_writer2:
697 #endif
698 dispatch_sighand_write_exec();
699 od;
700 }
701
702 /* Leave after the readers and writers so the pid count is ok. */
703 init {
704 byte i, j;
705
706 atomic {
707 INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
708 INIT_CACHED_VAR(generation_ptr, 0, j);
709
710 i = 0;
711 do
712 :: i < NR_READERS ->
713 INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
714 read_generation[i] = 1;
715 data_access[i] = 0;
716 i++;
717 :: i >= NR_READERS -> break
718 od;
719 init_done = 1;
720 }
721 }
This page took 0.044188 seconds and 4 git commands to generate.