10 #define read_free_race (read_generation[0] == last_free_gen)
11 #define read_free (free_done && data_access[0])
13 #elif (NR_READERS == 2)
15 #define read_free_race (read_generation[0] == last_free_gen || read_generation[1] == last_free_gen)
16 #define read_free (free_done && (data_access[0] || data_access[1]))
20 #error "Too many readers"
24 #define RCU_GP_CTR_BIT (1 << 7)
25 #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
27 #ifndef READER_NEST_LEVEL
28 #define READER_NEST_LEVEL 2
31 #define REMOTE_BARRIERS
33 * mem.spin: Promela code to validate memory barriers with OOO memory.
35 * This program is free software; you can redistribute it and/or modify
36 * it under the terms of the GNU General Public License as published by
37 * the Free Software Foundation; either version 2 of the License, or
38 * (at your option) any later version.
40 * This program is distributed in the hope that it will be useful,
41 * but WITHOUT ANY WARRANTY; without even the implied warranty of
42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
43 * GNU General Public License for more details.
45 * You should have received a copy of the GNU General Public License
46 * along with this program; if not, write to the Free Software
47 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
49 * Copyright (c) 2009 Mathieu Desnoyers
52 /* Promela validation variables. */
54 /* specific defines "included" here */
55 /* DEFINES file "included" here */
57 #define get_pid() (_pid)
60 * Each process have its own data in cache. Caches are randomly updated.
61 * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
65 typedef per_proc_byte {
69 /* Bitfield has a maximum of 8 procs */
70 typedef per_proc_bit {
74 #define DECLARE_CACHED_VAR(type, x) \
76 per_proc_##type cached_##x; \
77 per_proc_bit cache_dirty_##x;
79 #define INIT_CACHED_VAR(x, v, j) \
81 cache_dirty_##x.bitfield = 0; \
85 cached_##x.val[j] = v; \
87 :: j >= NR_PROCS -> break \
90 #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x.bitfield & (1 << id))
92 #define READ_CACHED_VAR(x) (cached_##x.val[get_pid()])
94 #define WRITE_CACHED_VAR(x, v) \
96 cached_##x.val[get_pid()] = v; \
97 cache_dirty_##x.bitfield = \
98 cache_dirty_##x.bitfield | (1 << get_pid()); \
101 #define CACHE_WRITE_TO_MEM(x, id) \
103 :: IS_CACHE_DIRTY(x, id) -> \
104 mem_##x = cached_##x.val[id]; \
105 cache_dirty_##x.bitfield = \
106 cache_dirty_##x.bitfield & (~(1 << id)); \
111 #define CACHE_READ_FROM_MEM(x, id) \
113 :: !IS_CACHE_DIRTY(x, id) -> \
114 cached_##x.val[id] = mem_##x;\
120 * May update other caches if cache is dirty, or not.
122 #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
124 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
128 #define RANDOM_CACHE_READ_FROM_MEM(x, id)\
130 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
135 * Remote barriers tests the scheme where a signal (or IPI) is sent to all
136 * reader threads to promote their compiler barrier to a smp_mb().
138 #ifdef REMOTE_BARRIERS
140 inline smp_rmb_pid(i, j)
143 CACHE_READ_FROM_MEM(urcu_gp_ctr, i);
147 CACHE_READ_FROM_MEM(urcu_active_readers[j], i);
149 :: j >= NR_READERS -> break
151 CACHE_READ_FROM_MEM(generation_ptr, i);
155 inline smp_wmb_pid(i, j)
158 CACHE_WRITE_TO_MEM(urcu_gp_ctr, i);
162 CACHE_WRITE_TO_MEM(urcu_active_readers[j], i);
164 :: j >= NR_READERS -> break
166 CACHE_WRITE_TO_MEM(generation_ptr, i);
170 inline smp_mb_pid(i, j)
188 * Readers do a simple barrier(), writers are doing a smp_mb() _and_ sending a
189 * signal or IPI to have all readers execute a smp_mb.
190 * We are not modeling the whole rendez-vous between readers and writers here,
191 * we just let the writer update each reader's caches remotely.
196 :: get_pid() >= NR_READERS ->
197 smp_mb_pid(get_pid(), j);
203 :: i >= NR_READERS -> break
205 smp_mb_pid(get_pid(), j);
215 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
219 CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
221 :: i >= NR_READERS -> break
223 CACHE_READ_FROM_MEM(generation_ptr, get_pid());
230 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
234 CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
236 :: i >= NR_READERS -> break
238 CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
261 /* Keep in sync manually with smp_rmb, wmp_wmb, ooo_mem and init() */
262 DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
263 /* Note ! currently only two readers */
264 DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
265 /* pointer generation */
266 DECLARE_CACHED_VAR(byte, generation_ptr);
268 byte last_free_gen = 0;
270 byte read_generation[NR_READERS];
271 bit data_access[NR_READERS];
277 inline wait_init_done()
280 :: init_done == 0 -> skip;
288 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
292 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
295 :: i >= NR_READERS -> break
297 RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
298 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
302 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
305 :: i >= NR_READERS -> break
307 RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid());
311 #define get_readerid() (get_pid())
312 #define get_writerid() (get_readerid() + NR_READERS)
314 inline wait_for_reader(tmp, tmp2, i, j)
318 tmp2 = READ_CACHED_VAR(urcu_active_readers[tmp]);
321 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
322 && ((tmp2 ^ READ_CACHED_VAR(urcu_gp_ctr))
324 #ifndef GEN_ERROR_WRITER_PROGRESS
335 inline wait_for_quiescent_state(tmp, tmp2, i, j)
339 :: tmp < NR_READERS ->
340 wait_for_reader(tmp, tmp2, i, j);
342 :: (NR_READERS > 1) && (tmp < NR_READERS - 1)
348 :: tmp >= NR_READERS -> break
352 /* Model the RCU read-side critical section. */
354 inline urcu_one_read(i, j, nest_i, tmp, tmp2)
358 :: nest_i < READER_NEST_LEVEL ->
360 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
363 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
365 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
367 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
370 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
375 :: nest_i >= READER_NEST_LEVEL -> break;
379 read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr);
381 data_access[get_readerid()] = 1;
383 data_access[get_readerid()] = 0;
387 :: nest_i < READER_NEST_LEVEL ->
389 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
391 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);
393 :: nest_i >= READER_NEST_LEVEL -> break;
396 //smp_mc(i); /* added */
399 active [NR_READERS] proctype urcu_reader()
406 assert(get_pid() < NR_PROCS);
412 * We do not test reader's progress here, because we are mainly
413 * interested in writer's progress. The reader never blocks
414 * anyway. We have to test for reader/writer's progress
415 * separately, otherwise we could think the writer is doing
416 * progress when it's blocked by an always progressing reader.
418 #ifdef READER_PROGRESS
419 /* Only test progress of one random reader. They are all the
423 :: get_readerid() == 0 ->
431 urcu_one_read(i, j, nest_i, tmp, tmp2);
435 /* Model the RCU update process. */
437 active proctype urcu_writer()
445 assert(get_pid() < NR_PROCS);
448 :: (READ_CACHED_VAR(generation_ptr) < 5) ->
449 #ifdef WRITER_PROGRESS
454 old_gen = READ_CACHED_VAR(generation_ptr);
455 WRITE_CACHED_VAR(generation_ptr, old_gen + 1);
463 :: write_lock == 0 ->
472 tmp = READ_CACHED_VAR(urcu_gp_ctr);
474 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
477 wait_for_quiescent_state(tmp, tmp2, i, j);
481 tmp = READ_CACHED_VAR(urcu_gp_ctr);
483 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
486 wait_for_quiescent_state(tmp, tmp2, i, j);
490 /* free-up step, e.g., kfree(). */
492 last_free_gen = old_gen;
498 * Given the reader loops infinitely, let the writer also busy-loop
499 * with progress here so, with weak fairness, we can test the
505 #ifdef WRITER_PROGRESS
512 /* Leave after the readers and writers so the pid count is ok. */
517 INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
518 INIT_CACHED_VAR(generation_ptr, 0, j);
523 INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
524 read_generation[i] = 1;
527 :: i >= NR_READERS -> break