Execute sig handler unconditionnally
[urcu.git] / formal-model / urcu / urcu.spin
1 /*
2 * mem.spin: Promela code to validate memory barriers with OOO memory.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (c) 2009 Mathieu Desnoyers
19 */
20
21 /* Promela validation variables. */
22
23 /* specific defines "included" here */
24 /* DEFINES file "included" here */
25
26 /* All signal readers have same PID and uses same reader variable */
27 #ifdef TEST_SIGNAL_ON_WRITE
28 #define get_pid() ((_pid < 1) -> 0 : 1)
29 #elif defined(TEST_SIGNAL_ON_READ)
30 #define get_pid() ((_pid < 2) -> 0 : 1)
31 #else
32 #define get_pid() (_pid)
33 #endif
34
35 #define get_readerid() (get_pid())
36
37 /*
38 * Each process have its own data in cache. Caches are randomly updated.
39 * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
40 * both.
41 */
42
43 typedef per_proc_byte {
44 byte val[NR_PROCS];
45 };
46
47 /* Bitfield has a maximum of 8 procs */
48 typedef per_proc_bit {
49 byte bitfield;
50 };
51
52 #define DECLARE_CACHED_VAR(type, x) \
53 type mem_##x; \
54 per_proc_##type cached_##x; \
55 per_proc_bit cache_dirty_##x;
56
57 #define INIT_CACHED_VAR(x, v, j) \
58 mem_##x = v; \
59 cache_dirty_##x.bitfield = 0; \
60 j = 0; \
61 do \
62 :: j < NR_PROCS -> \
63 cached_##x.val[j] = v; \
64 j++ \
65 :: j >= NR_PROCS -> break \
66 od;
67
68 #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x.bitfield & (1 << id))
69
70 #define READ_CACHED_VAR(x) (cached_##x.val[get_pid()])
71
72 #define WRITE_CACHED_VAR(x, v) \
73 atomic { \
74 cached_##x.val[get_pid()] = v; \
75 cache_dirty_##x.bitfield = \
76 cache_dirty_##x.bitfield | (1 << get_pid()); \
77 }
78
79 #define CACHE_WRITE_TO_MEM(x, id) \
80 if \
81 :: IS_CACHE_DIRTY(x, id) -> \
82 mem_##x = cached_##x.val[id]; \
83 cache_dirty_##x.bitfield = \
84 cache_dirty_##x.bitfield & (~(1 << id)); \
85 :: else -> \
86 skip \
87 fi;
88
89 #define CACHE_READ_FROM_MEM(x, id) \
90 if \
91 :: !IS_CACHE_DIRTY(x, id) -> \
92 cached_##x.val[id] = mem_##x;\
93 :: else -> \
94 skip \
95 fi;
96
97 /*
98 * May update other caches if cache is dirty, or not.
99 */
100 #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
101 if \
102 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
103 :: 1 -> skip \
104 fi;
105
106 #define RANDOM_CACHE_READ_FROM_MEM(x, id)\
107 if \
108 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
109 :: 1 -> skip \
110 fi;
111
112 /*
113 * Remote barriers tests the scheme where a signal (or IPI) is sent to all
114 * reader threads to promote their compiler barrier to a smp_mb().
115 */
116 #ifdef REMOTE_BARRIERS
117
118 inline smp_rmb_pid(i, j)
119 {
120 atomic {
121 CACHE_READ_FROM_MEM(urcu_gp_ctr, i);
122 j = 0;
123 do
124 :: j < NR_READERS ->
125 CACHE_READ_FROM_MEM(urcu_active_readers[j], i);
126 j++
127 :: j >= NR_READERS -> break
128 od;
129 CACHE_READ_FROM_MEM(generation_ptr, i);
130 }
131 }
132
133 inline smp_wmb_pid(i, j)
134 {
135 atomic {
136 CACHE_WRITE_TO_MEM(urcu_gp_ctr, i);
137 j = 0;
138 do
139 :: j < NR_READERS ->
140 CACHE_WRITE_TO_MEM(urcu_active_readers[j], i);
141 j++
142 :: j >= NR_READERS -> break
143 od;
144 CACHE_WRITE_TO_MEM(generation_ptr, i);
145 }
146 }
147
148 inline smp_mb_pid(i, j)
149 {
150 atomic {
151 #ifndef NO_WMB
152 smp_wmb_pid(i, j);
153 #endif
154 #ifndef NO_RMB
155 smp_rmb_pid(i, j);
156 #endif
157 #ifdef NO_WMB
158 #ifdef NO_RMB
159 ooo_mem(j);
160 #endif
161 #endif
162 }
163 }
164
165 /*
166 * Readers do a simple barrier(), writers are doing a smp_mb() _and_ sending a
167 * signal or IPI to have all readers execute a smp_mb.
168 * We are not modeling the whole rendez-vous between readers and writers here,
169 * we just let the writer update each reader's caches remotely.
170 */
171 inline smp_mb(i, j)
172 {
173 if
174 :: get_pid() >= NR_READERS ->
175 smp_mb_pid(get_pid(), j);
176 i = 0;
177 do
178 :: i < NR_READERS ->
179 smp_mb_pid(i, j);
180 i++;
181 :: i >= NR_READERS -> break
182 od;
183 smp_mb_pid(get_pid(), j);
184 :: else -> skip;
185 fi;
186 }
187
188 #else
189
190 inline smp_rmb(i, j)
191 {
192 atomic {
193 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
194 i = 0;
195 do
196 :: i < NR_READERS ->
197 CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
198 i++
199 :: i >= NR_READERS -> break
200 od;
201 CACHE_READ_FROM_MEM(generation_ptr, get_pid());
202 }
203 }
204
205 inline smp_wmb(i, j)
206 {
207 atomic {
208 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
209 i = 0;
210 do
211 :: i < NR_READERS ->
212 CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
213 i++
214 :: i >= NR_READERS -> break
215 od;
216 CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
217 }
218 }
219
220 inline smp_mb(i, j)
221 {
222 atomic {
223 #ifndef NO_WMB
224 smp_wmb(i, j);
225 #endif
226 #ifndef NO_RMB
227 smp_rmb(i, j);
228 #endif
229 #ifdef NO_WMB
230 #ifdef NO_RMB
231 ooo_mem(i);
232 #endif
233 #endif
234 }
235 }
236
237 #endif
238
239 /* Keep in sync manually with smp_rmb, wmp_wmb, ooo_mem and init() */
240 DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
241 /* Note ! currently only two readers */
242 DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
243 /* pointer generation */
244 DECLARE_CACHED_VAR(byte, generation_ptr);
245
246 byte last_free_gen = 0;
247 bit free_done = 0;
248 byte read_generation[NR_READERS];
249 bit data_access[NR_READERS];
250
251 bit write_lock = 0;
252
253 bit init_done = 0;
254
255 bit sighand_exec = 0;
256
257 inline wait_init_done()
258 {
259 do
260 :: init_done == 0 -> skip;
261 :: else -> break;
262 od;
263 }
264
265 #ifdef TEST_SIGNAL
266
267 inline wait_for_sighand_exec()
268 {
269 sighand_exec = 0;
270 do
271 :: sighand_exec == 0 -> skip;
272 :: else -> break;
273 od;
274 }
275
276 #ifdef TOO_BIG_STATE_SPACE
277 inline wait_for_sighand_exec()
278 {
279 sighand_exec = 0;
280 do
281 :: sighand_exec == 0 -> skip;
282 :: else ->
283 if
284 :: 1 -> break;
285 :: 1 -> sighand_exec = 0;
286 skip;
287 fi;
288 od;
289 }
290 #endif
291
292 #else
293
294 inline wait_for_sighand_exec()
295 {
296 skip;
297 }
298
299 #endif
300
301 #ifdef TEST_SIGNAL_ON_WRITE
302 /* Block on signal handler execution */
303 inline dispatch_sighand_write_exec()
304 {
305 sighand_exec = 1;
306 do
307 :: sighand_exec == 1 ->
308 skip;
309 :: else ->
310 break;
311 od;
312 }
313
314 #else
315
316 inline dispatch_sighand_write_exec()
317 {
318 skip;
319 }
320
321 #endif
322
323 #ifdef TEST_SIGNAL_ON_READ
324 /* Block on signal handler execution */
325 inline dispatch_sighand_read_exec()
326 {
327 sighand_exec = 1;
328 do
329 :: sighand_exec == 1 ->
330 skip;
331 :: else ->
332 break;
333 od;
334 }
335
336 #else
337
338 inline dispatch_sighand_read_exec()
339 {
340 skip;
341 }
342
343 #endif
344
345
346 inline ooo_mem(i)
347 {
348 atomic {
349 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
350 i = 0;
351 do
352 :: i < NR_READERS ->
353 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
354 get_pid());
355 i++
356 :: i >= NR_READERS -> break
357 od;
358 RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
359 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
360 i = 0;
361 do
362 :: i < NR_READERS ->
363 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
364 get_pid());
365 i++
366 :: i >= NR_READERS -> break
367 od;
368 RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid());
369 }
370 }
371
372 inline wait_for_reader(tmp, tmp2, i, j)
373 {
374 do
375 :: 1 ->
376 tmp2 = READ_CACHED_VAR(urcu_active_readers[tmp]);
377 ooo_mem(i);
378 dispatch_sighand_write_exec();
379 if
380 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
381 && ((tmp2 ^ READ_CACHED_VAR(urcu_gp_ctr))
382 & RCU_GP_CTR_BIT) ->
383 #ifndef GEN_ERROR_WRITER_PROGRESS
384 smp_mb(i, j);
385 #else
386 ooo_mem(i);
387 #endif
388 dispatch_sighand_write_exec();
389 :: else ->
390 break;
391 fi;
392 od;
393 }
394
395 inline wait_for_quiescent_state(tmp, tmp2, i, j)
396 {
397 tmp = 0;
398 do
399 :: tmp < NR_READERS ->
400 wait_for_reader(tmp, tmp2, i, j);
401 if
402 :: (NR_READERS > 1) && (tmp < NR_READERS - 1)
403 -> ooo_mem(i);
404 dispatch_sighand_write_exec();
405 :: else
406 -> skip;
407 fi;
408 tmp++
409 :: tmp >= NR_READERS -> break
410 od;
411 }
412
413 /* Model the RCU read-side critical section. */
414
415 inline urcu_one_read(i, j, nest_i, tmp, tmp2)
416 {
417 nest_i = 0;
418 do
419 :: nest_i < READER_NEST_LEVEL ->
420 ooo_mem(i);
421 dispatch_sighand_read_exec();
422 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
423 ooo_mem(i);
424 dispatch_sighand_read_exec();
425 if
426 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
427 ->
428 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
429 ooo_mem(i);
430 dispatch_sighand_read_exec();
431 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
432 tmp2);
433 :: else ->
434 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
435 tmp + 1);
436 fi;
437 smp_mb(i, j);
438 dispatch_sighand_read_exec();
439 nest_i++;
440 :: nest_i >= READER_NEST_LEVEL -> break;
441 od;
442
443 read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr);
444 data_access[get_readerid()] = 1;
445 data_access[get_readerid()] = 0;
446
447 nest_i = 0;
448 do
449 :: nest_i < READER_NEST_LEVEL ->
450 smp_mb(i, j);
451 dispatch_sighand_read_exec();
452 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
453 ooo_mem(i);
454 dispatch_sighand_read_exec();
455 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);
456 nest_i++;
457 :: nest_i >= READER_NEST_LEVEL -> break;
458 od;
459 //ooo_mem(i);
460 //dispatch_sighand_read_exec();
461 //smp_mc(i); /* added */
462 }
463
464 active proctype urcu_reader()
465 {
466 byte i, j, nest_i;
467 byte tmp, tmp2;
468
469 wait_init_done();
470
471 assert(get_pid() < NR_PROCS);
472
473 end_reader:
474 do
475 :: 1 ->
476 /*
477 * We do not test reader's progress here, because we are mainly
478 * interested in writer's progress. The reader never blocks
479 * anyway. We have to test for reader/writer's progress
480 * separately, otherwise we could think the writer is doing
481 * progress when it's blocked by an always progressing reader.
482 */
483 #ifdef READER_PROGRESS
484 /* Only test progress of one random reader. They are all the
485 * same. */
486 if
487 :: get_readerid() == 0 ->
488 progress_reader:
489 skip;
490 fi;
491 #endif
492 urcu_one_read(i, j, nest_i, tmp, tmp2);
493 od;
494 }
495
496 #ifdef TEST_SIGNAL
497 /* signal handler reader */
498
499 inline urcu_one_read_sig(i, j, nest_i, tmp, tmp2)
500 {
501 nest_i = 0;
502 do
503 :: nest_i < READER_NEST_LEVEL ->
504 ooo_mem(i);
505 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
506 ooo_mem(i);
507 if
508 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
509 ->
510 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
511 ooo_mem(i);
512 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
513 tmp2);
514 :: else ->
515 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
516 tmp + 1);
517 fi;
518 smp_mb(i, j);
519 nest_i++;
520 :: nest_i >= READER_NEST_LEVEL -> break;
521 od;
522
523 read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr);
524 data_access[get_readerid()] = 1;
525 data_access[get_readerid()] = 0;
526
527 nest_i = 0;
528 do
529 :: nest_i < READER_NEST_LEVEL ->
530 smp_mb(i, j);
531 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
532 ooo_mem(i);
533 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);
534 nest_i++;
535 :: nest_i >= READER_NEST_LEVEL -> break;
536 od;
537 //ooo_mem(i);
538 //smp_mc(i); /* added */
539 }
540
541 active proctype urcu_reader_sig()
542 {
543 byte i, j, nest_i;
544 byte tmp, tmp2;
545
546 wait_init_done();
547
548 assert(get_pid() < NR_PROCS);
549
550 end_reader:
551 do
552 :: 1 ->
553 wait_for_sighand_exec();
554 /*
555 * We do not test reader's progress here, because we are mainly
556 * interested in writer's progress. The reader never blocks
557 * anyway. We have to test for reader/writer's progress
558 * separately, otherwise we could think the writer is doing
559 * progress when it's blocked by an always progressing reader.
560 */
561 #ifdef READER_PROGRESS
562 /* Only test progress of one random reader. They are all the
563 * same. */
564 if
565 :: get_readerid() == 0 ->
566 progress_reader:
567 skip;
568 fi;
569 #endif
570 urcu_one_read_sig(i, j, nest_i, tmp, tmp2);
571 od;
572 }
573
574 #endif
575
576 /* Model the RCU update process. */
577
578 active proctype urcu_writer()
579 {
580 byte i, j;
581 byte tmp, tmp2;
582 byte old_gen;
583
584 wait_init_done();
585
586 assert(get_pid() < NR_PROCS);
587
588 do
589 :: (READ_CACHED_VAR(generation_ptr) < 5) ->
590 #ifdef WRITER_PROGRESS
591 progress_writer1:
592 #endif
593 ooo_mem(i);
594 dispatch_sighand_write_exec();
595 atomic {
596 old_gen = READ_CACHED_VAR(generation_ptr);
597 WRITE_CACHED_VAR(generation_ptr, old_gen + 1);
598 }
599 ooo_mem(i);
600 dispatch_sighand_write_exec();
601
602 do
603 :: 1 ->
604 atomic {
605 if
606 :: write_lock == 0 ->
607 write_lock = 1;
608 break;
609 :: else ->
610 skip;
611 fi;
612 }
613 od;
614 smp_mb(i, j);
615 dispatch_sighand_write_exec();
616 tmp = READ_CACHED_VAR(urcu_gp_ctr);
617 ooo_mem(i);
618 dispatch_sighand_write_exec();
619 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
620 ooo_mem(i);
621 dispatch_sighand_write_exec();
622 //smp_mc(i);
623 wait_for_quiescent_state(tmp, tmp2, i, j);
624 //smp_mc(i);
625 #ifndef SINGLE_FLIP
626 ooo_mem(i);
627 dispatch_sighand_write_exec();
628 tmp = READ_CACHED_VAR(urcu_gp_ctr);
629 ooo_mem(i);
630 dispatch_sighand_write_exec();
631 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
632 //smp_mc(i);
633 ooo_mem(i);
634 dispatch_sighand_write_exec();
635 wait_for_quiescent_state(tmp, tmp2, i, j);
636 #endif
637 smp_mb(i, j);
638 dispatch_sighand_write_exec();
639 write_lock = 0;
640 /* free-up step, e.g., kfree(). */
641 atomic {
642 last_free_gen = old_gen;
643 free_done = 1;
644 }
645 :: else -> break;
646 od;
647 /*
648 * Given the reader loops infinitely, let the writer also busy-loop
649 * with progress here so, with weak fairness, we can test the
650 * writer's progress.
651 */
652 end_writer:
653 do
654 :: 1 ->
655 #ifdef WRITER_PROGRESS
656 progress_writer2:
657 #endif
658 dispatch_sighand_write_exec();
659 od;
660 }
661
662 /* Leave after the readers and writers so the pid count is ok. */
663 init {
664 byte i, j;
665
666 atomic {
667 INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
668 INIT_CACHED_VAR(generation_ptr, 0, j);
669
670 i = 0;
671 do
672 :: i < NR_READERS ->
673 INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
674 read_generation[i] = 1;
675 data_access[i] = 0;
676 i++;
677 :: i >= NR_READERS -> break
678 od;
679 init_done = 1;
680 }
681 }
This page took 0.0427 seconds and 4 git commands to generate.