hash table comment fix.
[urcu.git] / formal-model / urcu / result-signal-over-writer / testmerge / urcu.spin.bkp2
CommitLineData
8baf2c95
MD
1/*
2 * mem.spin: Promela code to validate memory barriers with OOO memory.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (c) 2009 Mathieu Desnoyers
19 */
20
21/* Promela validation variables. */
22
23#define NR_WRITERS 1
24#define NR_READERS 3
25
26/* Number of reader and writer processes */
27#define NR_PROCS 5
28
29/* Includes reader, writer and init process */
30#define MAX_NR_PROCS 5
31
32#define get_pid() (_pid)
33
34/*
35 * Each process have its own data in cache. Caches are randomly updated.
36 * smp_wmb and smp_rmb forces cache updates (write and read), wmb_mb forces
37 * both.
38 */
39
40#define DECLARE_CACHED_VAR(type, x, v) \
41 type mem_##x = v; \
42 type cached_##x[NR_PROCS] = v; \
43 bit cache_dirty_##x[NR_PROCS] = 0
44
45#define IS_CACHE_DIRTY(x, id) (cache_dirty_##x[id])
46
47#define READ_CACHED_VAR(x) (cached_##x[get_pid()])
48
49#define WRITE_CACHED_VAR(x, v) \
50 atomic { \
51 cached_##x[get_pid()] = v; \
52 cache_dirty_##x[get_pid()] = 1; \
53 }
54
55#define CACHE_WRITE_TO_MEM(x, id) \
56 if \
57 :: IS_CACHE_DIRTY(x, id) -> \
58 mem_##x = cached_##x[id]; \
59 cache_dirty_##x[id] = 0; \
60 :: else -> \
61 skip \
62 fi;
63
64#define CACHE_READ_FROM_MEM(x, id) \
65 if \
66 :: !IS_CACHE_DIRTY(x, id) -> \
67 cached_##x[id] = mem_##x;\
68 :: else -> \
69 skip \
70 fi;
71
72/*
73 * May update other caches if cache is dirty, or not.
74 */
75#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
76 if \
77 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
78 :: 1 -> skip \
79 fi;
80
81#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
82 if \
83 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
84 :: 1 -> skip \
85 fi;
86
87/*
88 * Remote barriers tests the scheme where a signal (or IPI) is sent to all
89 * reader threads to promote their compiler barrier to a smp_mb().
90 */
91#ifdef REMOTE_BARRIERS
92
93inline smp_rmb_pid(i)
94{
95 atomic {
96 CACHE_READ_FROM_MEM(urcu_gp_ctr, i);
97 CACHE_READ_FROM_MEM(urcu_active_readers_one, i);
98 CACHE_READ_FROM_MEM(generation_ptr, i);
99 }
100}
101
102inline smp_wmb_pid(i)
103{
104 atomic {
105 CACHE_WRITE_TO_MEM(urcu_gp_ctr, i);
106 CACHE_WRITE_TO_MEM(urcu_active_readers_one, i);
107 CACHE_WRITE_TO_MEM(generation_ptr, i);
108 }
109}
110
111inline smp_mb_pid(i)
112{
113 atomic {
114#ifndef NO_WMB
115 smp_wmb_pid(i);
116#endif
117#ifndef NO_RMB
118 smp_rmb_pid(i);
119#endif
120 skip;
121 }
122}
123
124/*
125 * Readers do a simple barrier(), writers are doing a smp_mb() _and_ sending a
126 * signal or IPI to have all readers execute a smp_mb.
127 * We are not modeling the whole rendez-vous between readers and writers here,
128 * we just let the writer update each reader's caches remotely.
129 */
130inline smp_mb(i)
131{
132 if
133 :: get_pid() >= NR_READERS ->
134 smp_mb_pid(get_pid());
135 i = 0;
136 do
137 :: i < NR_READERS ->
138 smp_mb_pid(i);
139 i++;
140 :: i >= NR_READERS -> break
141 od;
142 smp_mb_pid(get_pid());
143 :: else -> skip;
144 fi;
145}
146
147#else
148
149inline smp_rmb(i)
150{
151 atomic {
152 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
153 CACHE_READ_FROM_MEM(urcu_active_readers_one, get_pid());
154 CACHE_READ_FROM_MEM(generation_ptr, get_pid());
155 }
156}
157
158inline smp_wmb(i)
159{
160 atomic {
161 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
162 CACHE_WRITE_TO_MEM(urcu_active_readers_one, get_pid());
163 CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
164 }
165}
166
167inline smp_mb(i)
168{
169 atomic {
170#ifndef NO_WMB
171 smp_wmb(i);
172#endif
173#ifndef NO_RMB
174 smp_rmb(i);
175#endif
176 skip;
177 }
178}
179
180#endif
181
182/* Keep in sync manually with smp_rmb, wmp_wmb and ooo_mem */
183DECLARE_CACHED_VAR(byte, urcu_gp_ctr, 1);
184/* Note ! currently only one reader */
185DECLARE_CACHED_VAR(byte, urcu_active_readers_one, 0);
186/* pointer generation */
187DECLARE_CACHED_VAR(byte, generation_ptr, 0);
188
189byte last_free_gen = 0;
190bit free_done = 0;
191byte read_generation = 1;
192bit data_access = 0;
193
194bit write_lock = 0;
195
196inline ooo_mem(i)
197{
198 atomic {
199 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
200 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers_one,
201 get_pid());
202 RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
203 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
204 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers_one,
205 get_pid());
206 RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid());
207 }
208}
209
210#define get_writerid() (get_pid())
211#define get_readerid() (get_writerrid() + NR_READERS)
212
213inline wait_for_reader(tmp, id, i)
214{
215 do
216 :: 1 ->
217 ooo_mem(i);
218 tmp = READ_CACHED_VAR(urcu_active_readers_one);
219 ooo_mem(i);
220 if
221 :: (tmp & RCU_GP_CTR_NEST_MASK)
222 && ((tmp ^ READ_CACHED_VAR(urcu_gp_ctr))
223 & RCU_GP_CTR_BIT) ->
224#ifndef GEN_ERROR_WRITER_PROGRESS
225 smp_mb(i);
226#else
227 skip;
228#endif
229 :: else ->
230 break;
231 fi;
232 od;
233}
234
235inline wait_for_quiescent_state(tmp, i, j)
236{
237 i = 0;
238 do
239 :: i < NR_READERS ->
240 wait_for_reader(tmp, i, j);
241 i++
242 :: i >= NR_READERS -> break
243 od;
244}
245
246/* Model the RCU read-side critical section. */
247
248inline urcu_one_read(i, nest_i, tmp, tmp2, nest)
249{
250 nest_i = 0;
251 do
252 :: nest_i < nest ->
253 ooo_mem(i);
254 tmp = READ_CACHED_VAR(urcu_active_readers_one);
255 ooo_mem(i);
256 if
257 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
258 ->
259 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
260 ooo_mem(i);
261 WRITE_CACHED_VAR(urcu_active_readers_one, tmp2);
262 :: else ->
263 WRITE_CACHED_VAR(urcu_active_readers_one,
264 tmp + 1);
265 fi;
266 ooo_mem(i);
267 smp_mb(i);
268 nest_i++;
269 :: nest_i >= nest -> break;
270 od;
271
272 ooo_mem(i);
273 read_generation = READ_CACHED_VAR(generation_ptr);
274 ooo_mem(i);
275 data_access = 1;
276 ooo_mem(i);
277 data_access = 0;
278
279 nest_i = 0;
280 do
281 :: nest_i < nest ->
282 ooo_mem(i);
283 smp_mb(i);
284 ooo_mem(i);
285 tmp2 = READ_CACHED_VAR(urcu_active_readers_one);
286 ooo_mem(i);
287 WRITE_CACHED_VAR(urcu_active_readers_one, tmp2 - 1);
288 nest_i++;
289 :: nest_i >= nest -> break;
290 od;
291 ooo_mem(i);
292 //smp_mc(i); /* added */
293}
294
295active [2] proctype urcu_reader()
296{
297 byte i, nest_i;
298 byte tmp, tmp2;
299
300 assert(get_pid() < NR_PROCS);
301
302end_reader:
303 do
304 :: 1 ->
305 /*
306 * We do not test reader's progress here, because we are mainly
307 * interested in writer's progress. The reader never blocks
308 * anyway. We have to test for reader/writer's progress
309 * separately, otherwise we could think the writer is doing
310 * progress when it's blocked by an always progressing reader.
311 */
312#ifdef READER_PROGRESS
313progress_reader:
314#endif
315 urcu_one_read(i, nest_i, tmp, tmp2, 2);
316 od;
317}
318
319/* Model the RCU update process. */
320
321active [1] proctype urcu_writer()
322{
323 byte i, j;
324 byte tmp;
325 byte old_gen;
326
327 assert(get_pid() < NR_PROCS);
328
329 do
330 :: (READ_CACHED_VAR(generation_ptr) < 5) ->
331#ifdef WRITER_PROGRESS
332progress_writer1:
333#endif
334 ooo_mem(i);
335 atomic {
336 old_gen = READ_CACHED_VAR(generation_ptr);
337 WRITE_CACHED_VAR(generation_ptr, old_gen + 1);
338 }
339 ooo_mem(i);
340
341 do
342 :: 1 ->
343 atomic {
344 if
345 :: write_lock == 0 ->
346 write_lock = 1;
347 break;
348 :: else ->
349 skip;
350 fi;
351 }
352 od;
353 smp_mb(i);
354 ooo_mem(i);
355 tmp = READ_CACHED_VAR(urcu_gp_ctr);
356 ooo_mem(i);
357 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
358 ooo_mem(i);
359 //smp_mc(i);
360 wait_for_quiescent_state(tmp, i, j);
361 //smp_mc(i);
362#ifndef SINGLE_FLIP
363 ooo_mem(i);
364 tmp = READ_CACHED_VAR(urcu_gp_ctr);
365 ooo_mem(i);
366 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
367 //smp_mc(i);
368 ooo_mem(i);
369 wait_for_quiescent_state(tmp, i, j);
370#endif
371 ooo_mem(i);
372 smp_mb(i);
373 ooo_mem(i);
374 write_lock = 0;
375 /* free-up step, e.g., kfree(). */
376 atomic {
377 last_free_gen = old_gen;
378 free_done = 1;
379 }
380 :: else -> break;
381 od;
382 /*
383 * Given the reader loops infinitely, let the writer also busy-loop
384 * with progress here so, with weak fairness, we can test the
385 * writer's progress.
386 */
387end_writer:
388 do
389 :: 1 ->
390#ifdef WRITER_PROGRESS
391progress_writer2:
392#endif
393 skip;
394 od;
395}
This page took 0.036406 seconds and 4 git commands to generate.