add missing ooo_mem() to writer model
[urcu.git] / formal-model / urcu / urcu.spin
CommitLineData
60a1db9d
MD
1/*
2 * mem.spin: Promela code to validate memory barriers with OOO memory.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (c) 2009 Mathieu Desnoyers
19 */
20
21/* Promela validation variables. */
22
23#define NR_READERS 1
89674313 24#define NR_WRITERS 1
60a1db9d 25
89674313 26#define NR_PROCS 2
60a1db9d
MD
27
28#define get_pid() (_pid)
29
30/*
31 * Each process have its own data in cache. Caches are randomly updated.
32 * smp_wmb and smp_rmb forces cache updates (write and read), wmb_mb forces
33 * both.
34 */
35
36#define DECLARE_CACHED_VAR(type, x, v) \
37 type mem_##x = v; \
38 type cached_##x[NR_PROCS] = v; \
39 bit cache_dirty_##x[NR_PROCS] = 0
40
41#define IS_CACHE_DIRTY(x, id) (cache_dirty_##x[id])
42
43#define READ_CACHED_VAR(x) (cached_##x[get_pid()])
44
45#define WRITE_CACHED_VAR(x, v) \
46 atomic { \
47 cached_##x[get_pid()] = v; \
48 cache_dirty_##x[get_pid()] = 1; \
49 }
50
51#define CACHE_WRITE_TO_MEM(x, id) \
52 if \
53 :: IS_CACHE_DIRTY(x, id) -> \
54 mem_##x = cached_##x[id]; \
55 cache_dirty_##x[id] = 0; \
56 :: else -> \
57 skip \
58 fi;
59
60#define CACHE_READ_FROM_MEM(x, id) \
61 if \
62 :: !IS_CACHE_DIRTY(x, id) -> \
63 cached_##x[id] = mem_##x;\
64 :: else -> \
65 skip \
66 fi;
67
68/*
69 * May update other caches if cache is dirty, or not.
70 */
71#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
72 if \
73 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
74 :: 1 -> skip \
75 fi;
76
77#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
78 if \
79 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
80 :: 1 -> skip \
81 fi;
82
cc76fd1d
MD
83/*
84 * Remote barriers tests the scheme where a signal (or IPI) is sent to all
85 * reader threads to promote their compiler barrier to a smp_mb().
86 */
87#ifdef REMOTE_BARRIERS
88
89inline smp_rmb_pid(i)
90{
91 atomic {
92 CACHE_READ_FROM_MEM(urcu_gp_ctr, i);
93 CACHE_READ_FROM_MEM(urcu_active_readers_one, i);
94 CACHE_READ_FROM_MEM(generation_ptr, i);
95 }
96}
97
98inline smp_wmb_pid(i)
99{
100 atomic {
101 CACHE_WRITE_TO_MEM(urcu_gp_ctr, i);
102 CACHE_WRITE_TO_MEM(urcu_active_readers_one, i);
103 CACHE_WRITE_TO_MEM(generation_ptr, i);
104 }
105}
106
107inline smp_mb_pid(i)
108{
109 atomic {
110#ifndef NO_WMB
111 smp_wmb_pid(i);
112#endif
113#ifndef NO_RMB
114 smp_rmb_pid(i);
115#endif
116 skip;
117 }
118}
119
120/*
121 * Readers do a simple barrier(), writers are doing a smp_mb() _and_ sending a
122 * signal or IPI to have all readers execute a smp_mb.
123 * We are not modeling the whole rendez-vous between readers and writers here,
124 * we just let the writer update each reader's caches remotely.
125 */
126inline smp_mb(i)
127{
128 if
129 :: get_pid() >= NR_READERS ->
130 smp_mb_pid(get_pid());
131 i = 0;
132 do
133 :: i < NR_READERS ->
134 smp_mb_pid(i);
135 i++;
136 :: i >= NR_READERS -> break
137 od;
138 smp_mb_pid(get_pid());
139 :: else -> skip;
140 fi;
141}
142
143#else
144
60a1db9d
MD
145inline smp_rmb(i)
146{
147 atomic {
148 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
149 CACHE_READ_FROM_MEM(urcu_active_readers_one, get_pid());
150 CACHE_READ_FROM_MEM(generation_ptr, get_pid());
151 }
152}
153
154inline smp_wmb(i)
155{
156 atomic {
157 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
158 CACHE_WRITE_TO_MEM(urcu_active_readers_one, get_pid());
159 CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
160 }
161}
162
163inline smp_mb(i)
164{
165 atomic {
166#ifndef NO_WMB
167 smp_wmb(i);
168#endif
169#ifndef NO_RMB
170 smp_rmb(i);
171#endif
172 skip;
173 }
174}
175
cc76fd1d
MD
176#endif
177
60a1db9d
MD
178/* Keep in sync manually with smp_rmb, wmp_wmb and ooo_mem */
179DECLARE_CACHED_VAR(byte, urcu_gp_ctr, 1);
180/* Note ! currently only one reader */
181DECLARE_CACHED_VAR(byte, urcu_active_readers_one, 0);
182/* pointer generation */
183DECLARE_CACHED_VAR(byte, generation_ptr, 0);
184
185byte last_free_gen = 0;
186bit free_done = 0;
187byte read_generation = 1;
188bit data_access = 0;
189
2ba2a48d
MD
190bit write_lock = 0;
191
60a1db9d
MD
192inline ooo_mem(i)
193{
194 atomic {
195 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
196 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers_one,
197 get_pid());
198 RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
199 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
200 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers_one,
201 get_pid());
202 RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid());
203 }
204}
205
206#define get_readerid() (get_pid())
207#define get_writerid() (get_readerid() + NR_READERS)
208
209inline wait_for_reader(tmp, id, i)
210{
60a1db9d 211 do
89674313
MD
212 :: 1 ->
213 ooo_mem(i);
214 tmp = READ_CACHED_VAR(urcu_active_readers_one);
a570e118 215 ooo_mem(i);
89674313
MD
216 if
217 :: (tmp & RCU_GP_CTR_NEST_MASK)
218 && ((tmp ^ READ_CACHED_VAR(urcu_gp_ctr))
219 & RCU_GP_CTR_BIT) ->
220#ifndef GEN_ERROR_WRITER_PROGRESS
221 smp_mb(i);
222#else
60a1db9d 223 skip;
89674313
MD
224#endif
225 :: else ->
60a1db9d 226 break;
89674313 227 fi;
60a1db9d
MD
228 od;
229}
230
231inline wait_for_quiescent_state(tmp, i, j)
232{
233 i = 0;
234 do
235 :: i < NR_READERS ->
236 wait_for_reader(tmp, i, j);
237 i++
238 :: i >= NR_READERS -> break
239 od;
240}
241
242/* Model the RCU read-side critical section. */
243
6ae334b0 244inline urcu_one_read(i, nest_i, tmp, tmp2)
245{
246 nest_i = 0;
247 do
248 :: nest_i < READER_NEST_LEVEL ->
249 ooo_mem(i);
250 tmp = READ_CACHED_VAR(urcu_active_readers_one);
251 ooo_mem(i);
252 if
253 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
254 ->
255 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
256 ooo_mem(i);
257 WRITE_CACHED_VAR(urcu_active_readers_one, tmp2);
258 :: else ->
259 WRITE_CACHED_VAR(urcu_active_readers_one,
260 tmp + 1);
261 fi;
262 ooo_mem(i);
263 smp_mb(i);
264 nest_i++;
265 :: nest_i >= READER_NEST_LEVEL -> break;
266 od;
267
268 ooo_mem(i);
269 read_generation = READ_CACHED_VAR(generation_ptr);
270 ooo_mem(i);
271 data_access = 1;
272 ooo_mem(i);
273 data_access = 0;
274
275 nest_i = 0;
276 do
277 :: nest_i < READER_NEST_LEVEL ->
278 ooo_mem(i);
279 smp_mb(i);
280 ooo_mem(i);
281 tmp2 = READ_CACHED_VAR(urcu_active_readers_one);
282 ooo_mem(i);
283 WRITE_CACHED_VAR(urcu_active_readers_one, tmp2 - 1);
284 nest_i++;
285 :: nest_i >= READER_NEST_LEVEL -> break;
286 od;
287 ooo_mem(i);
288 //smp_mc(i); /* added */
289}
290
60a1db9d
MD
291active [NR_READERS] proctype urcu_reader()
292{
06d6106d 293 byte i, nest_i;
60a1db9d
MD
294 byte tmp, tmp2;
295
296 assert(get_pid() < NR_PROCS);
297
89674313
MD
298end_reader:
299 do
300 :: 1 ->
301 /*
302 * We do not test reader's progress here, because we are mainly
303 * interested in writer's progress. The reader never blocks
304 * anyway. We have to test for reader/writer's progress
305 * separately, otherwise we could think the writer is doing
306 * progress when it's blocked by an always progressing reader.
307 */
308#ifdef READER_PROGRESS
309progress_reader:
310#endif
6ae334b0 311 urcu_one_read(i, nest_i, tmp, tmp2);
89674313 312 od;
60a1db9d
MD
313}
314
60a1db9d
MD
315/* Model the RCU update process. */
316
317active [NR_WRITERS] proctype urcu_writer()
318{
319 byte i, j;
320 byte tmp;
321 byte old_gen;
322
323 assert(get_pid() < NR_PROCS);
324
2ba2a48d 325 do
89674313
MD
326 :: (READ_CACHED_VAR(generation_ptr) < 5) ->
327#ifdef WRITER_PROGRESS
328progress_writer1:
329#endif
330 ooo_mem(i);
710b09b7 331 atomic {
89674313
MD
332 old_gen = READ_CACHED_VAR(generation_ptr);
333 WRITE_CACHED_VAR(generation_ptr, old_gen + 1);
710b09b7 334 }
89674313
MD
335 ooo_mem(i);
336
337 do
338 :: 1 ->
339 atomic {
340 if
341 :: write_lock == 0 ->
342 write_lock = 1;
343 break;
344 :: else ->
345 skip;
346 fi;
347 }
348 od;
349 smp_mb(i);
350 ooo_mem(i);
351 tmp = READ_CACHED_VAR(urcu_gp_ctr);
352 ooo_mem(i);
353 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
354 ooo_mem(i);
355 //smp_mc(i);
356 wait_for_quiescent_state(tmp, i, j);
357 //smp_mc(i);
d4e437ba 358#ifndef SINGLE_FLIP
89674313
MD
359 ooo_mem(i);
360 tmp = READ_CACHED_VAR(urcu_gp_ctr);
361 ooo_mem(i);
362 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
363 //smp_mc(i);
364 ooo_mem(i);
365 wait_for_quiescent_state(tmp, i, j);
d4e437ba 366#endif
89674313
MD
367 ooo_mem(i);
368 smp_mb(i);
369 ooo_mem(i);
370 write_lock = 0;
371 /* free-up step, e.g., kfree(). */
372 atomic {
373 last_free_gen = old_gen;
374 free_done = 1;
375 }
376 :: else -> break;
2ba2a48d 377 od;
89674313
MD
378 /*
379 * Given the reader loops infinitely, let the writer also busy-loop
380 * with progress here so, with weak fairness, we can test the
381 * writer's progress.
382 */
383end_writer:
384 do
385 :: 1 ->
386#ifdef WRITER_PROGRESS
387progress_writer2:
2ba2a48d 388#endif
89674313
MD
389 skip;
390 od;
60a1db9d 391}
This page took 0.037065 seconds and 4 git commands to generate.