1 #define READER_NEST_LEVEL 1
8 #define read_free_race (read_generation[0] == last_free_gen)
9 #define read_free (free_done && data_access[0])
12 #define TEST_SIGNAL_ON_READ
14 #define RCU_GP_CTR_BIT (1 << 7)
15 #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
17 #ifndef READER_NEST_LEVEL
18 #define READER_NEST_LEVEL 2
21 #define REMOTE_BARRIERS
23 * mem.spin: Promela code to validate memory barriers with OOO memory.
25 * This program is free software; you can redistribute it and/or modify
26 * it under the terms of the GNU General Public License as published by
27 * the Free Software Foundation; either version 2 of the License, or
28 * (at your option) any later version.
30 * This program is distributed in the hope that it will be useful,
31 * but WITHOUT ANY WARRANTY; without even the implied warranty of
32 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
33 * GNU General Public License for more details.
35 * You should have received a copy of the GNU General Public License
36 * along with this program; if not, write to the Free Software
37 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
39 * Copyright (c) 2009 Mathieu Desnoyers
42 /* Promela validation variables. */
44 /* specific defines "included" here */
45 /* DEFINES file "included" here */
47 /* All signal readers have same PID and uses same reader variable */
48 #ifdef TEST_SIGNAL_ON_WRITE
49 #define get_pid() ((_pid < 1) -> 0 : 1)
50 #elif defined(TEST_SIGNAL_ON_READ)
51 #define get_pid() ((_pid < 2) -> 0 : 1)
53 #define get_pid() (_pid)
57 * Each process have its own data in cache. Caches are randomly updated.
58 * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
62 typedef per_proc_byte {
66 /* Bitfield has a maximum of 8 procs */
67 typedef per_proc_bit {
71 #define DECLARE_CACHED_VAR(type, x) \
73 per_proc_##type cached_##x; \
74 per_proc_bit cache_dirty_##x;
76 #define INIT_CACHED_VAR(x, v, j) \
78 cache_dirty_##x.bitfield = 0; \
82 cached_##x.val[j] = v; \
84 :: j >= NR_PROCS -> break \
87 #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x.bitfield & (1 << id))
89 #define READ_CACHED_VAR(x) (cached_##x.val[get_pid()])
91 #define WRITE_CACHED_VAR(x, v) \
93 cached_##x.val[get_pid()] = v; \
94 cache_dirty_##x.bitfield = \
95 cache_dirty_##x.bitfield | (1 << get_pid()); \
98 #define CACHE_WRITE_TO_MEM(x, id) \
100 :: IS_CACHE_DIRTY(x, id) -> \
101 mem_##x = cached_##x.val[id]; \
102 cache_dirty_##x.bitfield = \
103 cache_dirty_##x.bitfield & (~(1 << id)); \
108 #define CACHE_READ_FROM_MEM(x, id) \
110 :: !IS_CACHE_DIRTY(x, id) -> \
111 cached_##x.val[id] = mem_##x;\
117 * May update other caches if cache is dirty, or not.
119 #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
121 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
125 #define RANDOM_CACHE_READ_FROM_MEM(x, id)\
127 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
132 * Remote barriers tests the scheme where a signal (or IPI) is sent to all
133 * reader threads to promote their compiler barrier to a smp_mb().
135 #ifdef REMOTE_BARRIERS
137 inline smp_rmb_pid(i, j)
140 CACHE_READ_FROM_MEM(urcu_gp_ctr, i);
144 CACHE_READ_FROM_MEM(urcu_active_readers[j], i);
146 :: j >= NR_READERS -> break
148 CACHE_READ_FROM_MEM(generation_ptr, i);
152 inline smp_wmb_pid(i, j)
155 CACHE_WRITE_TO_MEM(urcu_gp_ctr, i);
159 CACHE_WRITE_TO_MEM(urcu_active_readers[j], i);
161 :: j >= NR_READERS -> break
163 CACHE_WRITE_TO_MEM(generation_ptr, i);
167 inline smp_mb_pid(i, j)
185 * Readers do a simple barrier(), writers are doing a smp_mb() _and_ sending a
186 * signal or IPI to have all readers execute a smp_mb.
187 * We are not modeling the whole rendez-vous between readers and writers here,
188 * we just let the writer update each reader's caches remotely.
193 :: get_pid() >= NR_READERS ->
194 smp_mb_pid(get_pid(), j);
200 :: i >= NR_READERS -> break
202 smp_mb_pid(get_pid(), j);
212 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
216 CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
218 :: i >= NR_READERS -> break
220 CACHE_READ_FROM_MEM(generation_ptr, get_pid());
227 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
231 CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
233 :: i >= NR_READERS -> break
235 CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
258 /* Keep in sync manually with smp_rmb, wmp_wmb, ooo_mem and init() */
259 DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
260 /* Note ! currently only two readers */
261 DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
262 /* pointer generation */
263 DECLARE_CACHED_VAR(byte, generation_ptr);
265 byte last_free_gen = 0;
267 byte read_generation[NR_READERS];
268 bit data_access[NR_READERS];
274 bit sighand_exec = 0;
276 inline wait_init_done()
279 :: init_done == 0 -> skip;
286 inline wait_for_sighand_exec()
290 :: sighand_exec == 0 -> skip;
297 inline wait_for_sighand_exec()
304 #ifdef TEST_SIGNAL_ON_WRITE
305 /* Block on signal handler execution */
306 inline dispatch_sighand_write_exec()
310 :: sighand_exec == 1 ->
319 inline dispatch_sighand_write_exec()
326 #ifdef TEST_SIGNAL_ON_READ
327 /* Block on signal handler execution */
328 inline dispatch_sighand_read_exec()
332 :: sighand_exec == 1 ->
341 inline dispatch_sighand_read_exec()
352 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
356 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
359 :: i >= NR_READERS -> break
361 RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
362 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
366 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
369 :: i >= NR_READERS -> break
371 RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid());
375 #define get_readerid() (get_pid())
376 #define get_writerid() (get_readerid() + NR_READERS)
378 inline wait_for_reader(tmp, tmp2, i, j)
382 tmp2 = READ_CACHED_VAR(urcu_active_readers[tmp]);
384 dispatch_sighand_write_exec();
386 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
387 && ((tmp2 ^ READ_CACHED_VAR(urcu_gp_ctr))
389 #ifndef GEN_ERROR_WRITER_PROGRESS
394 dispatch_sighand_write_exec();
401 inline wait_for_quiescent_state(tmp, tmp2, i, j)
405 :: tmp < NR_READERS ->
406 wait_for_reader(tmp, tmp2, i, j);
408 :: (NR_READERS > 1) && (tmp < NR_READERS - 1)
410 dispatch_sighand_write_exec();
415 :: tmp >= NR_READERS -> break
419 /* Model the RCU read-side critical section. */
421 inline urcu_one_read(i, j, nest_i, tmp, tmp2)
425 :: nest_i < READER_NEST_LEVEL ->
427 dispatch_sighand_read_exec();
428 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
430 dispatch_sighand_read_exec();
432 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
434 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
436 dispatch_sighand_read_exec();
437 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
440 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
444 dispatch_sighand_read_exec();
446 :: nest_i >= READER_NEST_LEVEL -> break;
450 dispatch_sighand_read_exec();
451 read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr);
453 dispatch_sighand_read_exec();
454 data_access[get_readerid()] = 1;
456 dispatch_sighand_read_exec();
457 data_access[get_readerid()] = 0;
461 :: nest_i < READER_NEST_LEVEL ->
463 dispatch_sighand_read_exec();
464 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
466 dispatch_sighand_read_exec();
467 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);
469 :: nest_i >= READER_NEST_LEVEL -> break;
472 dispatch_sighand_read_exec();
473 //smp_mc(i); /* added */
476 active proctype urcu_reader()
483 assert(get_pid() < NR_PROCS);
489 * We do not test reader's progress here, because we are mainly
490 * interested in writer's progress. The reader never blocks
491 * anyway. We have to test for reader/writer's progress
492 * separately, otherwise we could think the writer is doing
493 * progress when it's blocked by an always progressing reader.
495 #ifdef READER_PROGRESS
496 /* Only test progress of one random reader. They are all the
499 :: get_readerid() == 0 ->
504 urcu_one_read(i, j, nest_i, tmp, tmp2);
509 /* signal handler reader */
511 inline urcu_one_read_sig(i, j, nest_i, tmp, tmp2)
515 :: nest_i < READER_NEST_LEVEL ->
517 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
520 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
522 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
524 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
527 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
532 :: nest_i >= READER_NEST_LEVEL -> break;
536 read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr);
538 data_access[get_readerid()] = 1;
540 data_access[get_readerid()] = 0;
544 :: nest_i < READER_NEST_LEVEL ->
546 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
548 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);
550 :: nest_i >= READER_NEST_LEVEL -> break;
553 //smp_mc(i); /* added */
556 active proctype urcu_reader_sig()
563 assert(get_pid() < NR_PROCS);
568 wait_for_sighand_exec();
570 * We do not test reader's progress here, because we are mainly
571 * interested in writer's progress. The reader never blocks
572 * anyway. We have to test for reader/writer's progress
573 * separately, otherwise we could think the writer is doing
574 * progress when it's blocked by an always progressing reader.
576 #ifdef READER_PROGRESS
577 /* Only test progress of one random reader. They are all the
580 :: get_readerid() == 0 ->
585 urcu_one_read(i, j, nest_i, tmp, tmp2);
591 /* Model the RCU update process. */
593 active proctype urcu_writer()
601 assert(get_pid() < NR_PROCS);
604 :: (READ_CACHED_VAR(generation_ptr) < 5) ->
605 #ifdef WRITER_PROGRESS
609 dispatch_sighand_write_exec();
611 old_gen = READ_CACHED_VAR(generation_ptr);
612 WRITE_CACHED_VAR(generation_ptr, old_gen + 1);
615 dispatch_sighand_write_exec();
621 :: write_lock == 0 ->
630 dispatch_sighand_write_exec();
631 tmp = READ_CACHED_VAR(urcu_gp_ctr);
633 dispatch_sighand_write_exec();
634 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
636 dispatch_sighand_write_exec();
638 wait_for_quiescent_state(tmp, tmp2, i, j);
642 dispatch_sighand_write_exec();
643 tmp = READ_CACHED_VAR(urcu_gp_ctr);
645 dispatch_sighand_write_exec();
646 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
649 dispatch_sighand_write_exec();
650 wait_for_quiescent_state(tmp, tmp2, i, j);
653 dispatch_sighand_write_exec();
655 /* free-up step, e.g., kfree(). */
657 last_free_gen = old_gen;
663 * Given the reader loops infinitely, let the writer also busy-loop
664 * with progress here so, with weak fairness, we can test the
670 #ifdef WRITER_PROGRESS
673 dispatch_sighand_write_exec();
677 /* Leave after the readers and writers so the pid count is ok. */
682 INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
683 INIT_CACHED_VAR(generation_ptr, 0, j);
688 INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
689 read_generation[i] = 1;
692 :: i >= NR_READERS -> break