12 #define read_free_race (read_generation[0] == last_free_gen)
13 #define read_free (free_done && data_access[0])
15 #elif (NR_READERS == 2)
17 #define read_free_race (read_generation[0] == last_free_gen || read_generation[1] == last_free_gen)
18 #define read_free (free_done && (data_access[0] || data_access[1]))
22 #error "Too many readers"
26 #define RCU_GP_CTR_BIT (1 << 7)
27 #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
29 #ifndef READER_NEST_LEVEL
30 #define READER_NEST_LEVEL 2
33 #define REMOTE_BARRIERS
35 * mem.spin: Promela code to validate memory barriers with OOO memory.
37 * This program is free software; you can redistribute it and/or modify
38 * it under the terms of the GNU General Public License as published by
39 * the Free Software Foundation; either version 2 of the License, or
40 * (at your option) any later version.
42 * This program is distributed in the hope that it will be useful,
43 * but WITHOUT ANY WARRANTY; without even the implied warranty of
44 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
45 * GNU General Public License for more details.
47 * You should have received a copy of the GNU General Public License
48 * along with this program; if not, write to the Free Software
49 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
51 * Copyright (c) 2009 Mathieu Desnoyers
54 /* Promela validation variables. */
56 /* specific defines "included" here */
57 /* DEFINES file "included" here */
59 #define get_pid() (_pid)
62 * Each process have its own data in cache. Caches are randomly updated.
63 * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
67 typedef per_proc_byte {
71 /* Bitfield has a maximum of 8 procs */
72 typedef per_proc_bit {
76 #define DECLARE_CACHED_VAR(type, x) \
78 per_proc_##type cached_##x; \
79 per_proc_bit cache_dirty_##x;
81 #define INIT_CACHED_VAR(x, v, j) \
83 cache_dirty_##x.bitfield = 0; \
87 cached_##x.val[j] = v; \
89 :: j >= NR_PROCS -> break \
92 #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x.bitfield & (1 << id))
94 #define READ_CACHED_VAR(x) (cached_##x.val[get_pid()])
96 #define WRITE_CACHED_VAR(x, v) \
98 cached_##x.val[get_pid()] = v; \
99 cache_dirty_##x.bitfield = \
100 cache_dirty_##x.bitfield | (1 << get_pid()); \
103 #define CACHE_WRITE_TO_MEM(x, id) \
105 :: IS_CACHE_DIRTY(x, id) -> \
106 mem_##x = cached_##x.val[id]; \
107 cache_dirty_##x.bitfield = \
108 cache_dirty_##x.bitfield & (~(1 << id)); \
113 #define CACHE_READ_FROM_MEM(x, id) \
115 :: !IS_CACHE_DIRTY(x, id) -> \
116 cached_##x.val[id] = mem_##x;\
122 * May update other caches if cache is dirty, or not.
124 #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
126 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
130 #define RANDOM_CACHE_READ_FROM_MEM(x, id)\
132 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
137 * Remote barriers tests the scheme where a signal (or IPI) is sent to all
138 * reader threads to promote their compiler barrier to a smp_mb().
140 #ifdef REMOTE_BARRIERS
142 inline smp_rmb_pid(i, j)
145 CACHE_READ_FROM_MEM(urcu_gp_ctr, i);
149 CACHE_READ_FROM_MEM(urcu_active_readers[j], i);
151 :: j >= NR_READERS -> break
153 CACHE_READ_FROM_MEM(generation_ptr, i);
157 inline smp_wmb_pid(i, j)
160 CACHE_WRITE_TO_MEM(urcu_gp_ctr, i);
164 CACHE_WRITE_TO_MEM(urcu_active_readers[j], i);
166 :: j >= NR_READERS -> break
168 CACHE_WRITE_TO_MEM(generation_ptr, i);
172 inline smp_mb_pid(i, j)
190 * Readers do a simple barrier(), writers are doing a smp_mb() _and_ sending a
191 * signal or IPI to have all readers execute a smp_mb.
192 * We are not modeling the whole rendez-vous between readers and writers here,
193 * we just let the writer update each reader's caches remotely.
198 :: get_pid() >= NR_READERS ->
199 smp_mb_pid(get_pid(), j);
205 :: i >= NR_READERS -> break
207 smp_mb_pid(get_pid(), j);
217 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
221 CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
223 :: i >= NR_READERS -> break
225 CACHE_READ_FROM_MEM(generation_ptr, get_pid());
232 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
236 CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
238 :: i >= NR_READERS -> break
240 CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
263 /* Keep in sync manually with smp_rmb, wmp_wmb, ooo_mem and init() */
264 DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
265 /* Note ! currently only two readers */
266 DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
267 /* pointer generation */
268 DECLARE_CACHED_VAR(byte, generation_ptr);
270 byte last_free_gen = 0;
272 byte read_generation[NR_READERS];
273 bit data_access[NR_READERS];
279 inline wait_init_done()
282 :: init_done == 0 -> skip;
290 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
294 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
297 :: i >= NR_READERS -> break
299 RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
300 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
304 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
307 :: i >= NR_READERS -> break
309 RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid());
313 #define get_readerid() (get_pid())
314 #define get_writerid() (get_readerid() + NR_READERS)
316 inline wait_for_reader(tmp, tmp2, i, j)
320 tmp2 = READ_CACHED_VAR(urcu_active_readers[tmp]);
323 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
324 && ((tmp2 ^ READ_CACHED_VAR(urcu_gp_ctr))
326 #ifndef GEN_ERROR_WRITER_PROGRESS
337 inline wait_for_quiescent_state(tmp, tmp2, i, j)
341 :: tmp < NR_READERS ->
342 wait_for_reader(tmp, tmp2, i, j);
344 :: (NR_READERS > 1) && (tmp < NR_READERS - 1)
350 :: tmp >= NR_READERS -> break
354 /* Model the RCU read-side critical section. */
356 inline urcu_one_read(i, j, nest_i, tmp, tmp2)
360 :: nest_i < READER_NEST_LEVEL ->
362 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
365 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
367 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
369 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
372 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
377 :: nest_i >= READER_NEST_LEVEL -> break;
381 read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr);
383 data_access[get_readerid()] = 1;
385 data_access[get_readerid()] = 0;
389 :: nest_i < READER_NEST_LEVEL ->
391 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
393 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);
395 :: nest_i >= READER_NEST_LEVEL -> break;
398 //smp_mc(i); /* added */
401 active [NR_READERS] proctype urcu_reader()
408 assert(get_pid() < NR_PROCS);
414 * We do not test reader's progress here, because we are mainly
415 * interested in writer's progress. The reader never blocks
416 * anyway. We have to test for reader/writer's progress
417 * separately, otherwise we could think the writer is doing
418 * progress when it's blocked by an always progressing reader.
420 #ifdef READER_PROGRESS
421 /* Only test progress of one random reader. They are all the
425 :: get_readerid() == 0 ->
433 urcu_one_read(i, j, nest_i, tmp, tmp2);
437 /* Model the RCU update process. */
439 active proctype urcu_writer()
447 assert(get_pid() < NR_PROCS);
450 :: (READ_CACHED_VAR(generation_ptr) < 5) ->
451 #ifdef WRITER_PROGRESS
456 old_gen = READ_CACHED_VAR(generation_ptr);
457 WRITE_CACHED_VAR(generation_ptr, old_gen + 1);
465 :: write_lock == 0 ->
474 tmp = READ_CACHED_VAR(urcu_gp_ctr);
476 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
479 wait_for_quiescent_state(tmp, tmp2, i, j);
483 tmp = READ_CACHED_VAR(urcu_gp_ctr);
485 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
488 wait_for_quiescent_state(tmp, tmp2, i, j);
492 /* free-up step, e.g., kfree(). */
494 last_free_gen = old_gen;
500 * Given the reader loops infinitely, let the writer also busy-loop
501 * with progress here so, with weak fairness, we can test the
507 #ifdef WRITER_PROGRESS
514 /* Leave after the readers and writers so the pid count is ok. */
519 INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
520 INIT_CACHED_VAR(generation_ptr, 0, j);
525 INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
526 read_generation[i] = 1;
529 :: i >= NR_READERS -> break