hash table comment fix.
[urcu.git] / formal-model / urcu / result-signal-over-writer / testmerge / urcu_free_no_mb.spin.input
CommitLineData
8baf2c95
MD
1#define NO_RMB
2#define NO_WMB
3
4#define NR_READERS 1
5#define NR_WRITERS 1
6
7#define NR_PROCS 2
8
9
10#if (NR_READERS == 1)
11
12#define read_free_race (read_generation[0] == last_free_gen)
13#define read_free (free_done && data_access[0])
14
15#elif (NR_READERS == 2)
16
17#define read_free_race (read_generation[0] == last_free_gen || read_generation[1] == last_free_gen)
18#define read_free (free_done && (data_access[0] || data_access[1]))
19
20#else
21
22#error "Too many readers"
23
24#endif
25
26#define RCU_GP_CTR_BIT (1 << 7)
27#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
28
29#ifndef READER_NEST_LEVEL
30#define READER_NEST_LEVEL 2
31#endif
32
33#define REMOTE_BARRIERS
34/*
35 * mem.spin: Promela code to validate memory barriers with OOO memory.
36 *
37 * This program is free software; you can redistribute it and/or modify
38 * it under the terms of the GNU General Public License as published by
39 * the Free Software Foundation; either version 2 of the License, or
40 * (at your option) any later version.
41 *
42 * This program is distributed in the hope that it will be useful,
43 * but WITHOUT ANY WARRANTY; without even the implied warranty of
44 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
45 * GNU General Public License for more details.
46 *
47 * You should have received a copy of the GNU General Public License
48 * along with this program; if not, write to the Free Software
49 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
50 *
51 * Copyright (c) 2009 Mathieu Desnoyers
52 */
53
54/* Promela validation variables. */
55
56/* specific defines "included" here */
57/* DEFINES file "included" here */
58
59#define get_pid() (_pid)
60
61/*
62 * Each process have its own data in cache. Caches are randomly updated.
63 * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
64 * both.
65 */
66
67typedef per_proc_byte {
68 byte val[NR_PROCS];
69};
70
71/* Bitfield has a maximum of 8 procs */
72typedef per_proc_bit {
73 byte bitfield;
74};
75
76#define DECLARE_CACHED_VAR(type, x) \
77 type mem_##x; \
78 per_proc_##type cached_##x; \
79 per_proc_bit cache_dirty_##x;
80
81#define INIT_CACHED_VAR(x, v, j) \
82 mem_##x = v; \
83 cache_dirty_##x.bitfield = 0; \
84 j = 0; \
85 do \
86 :: j < NR_PROCS -> \
87 cached_##x.val[j] = v; \
88 j++ \
89 :: j >= NR_PROCS -> break \
90 od;
91
92#define IS_CACHE_DIRTY(x, id) (cache_dirty_##x.bitfield & (1 << id))
93
94#define READ_CACHED_VAR(x) (cached_##x.val[get_pid()])
95
96#define WRITE_CACHED_VAR(x, v) \
97 atomic { \
98 cached_##x.val[get_pid()] = v; \
99 cache_dirty_##x.bitfield = \
100 cache_dirty_##x.bitfield | (1 << get_pid()); \
101 }
102
103#define CACHE_WRITE_TO_MEM(x, id) \
104 if \
105 :: IS_CACHE_DIRTY(x, id) -> \
106 mem_##x = cached_##x.val[id]; \
107 cache_dirty_##x.bitfield = \
108 cache_dirty_##x.bitfield & (~(1 << id)); \
109 :: else -> \
110 skip \
111 fi;
112
113#define CACHE_READ_FROM_MEM(x, id) \
114 if \
115 :: !IS_CACHE_DIRTY(x, id) -> \
116 cached_##x.val[id] = mem_##x;\
117 :: else -> \
118 skip \
119 fi;
120
121/*
122 * May update other caches if cache is dirty, or not.
123 */
124#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
125 if \
126 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
127 :: 1 -> skip \
128 fi;
129
130#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
131 if \
132 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
133 :: 1 -> skip \
134 fi;
135
136/*
137 * Remote barriers tests the scheme where a signal (or IPI) is sent to all
138 * reader threads to promote their compiler barrier to a smp_mb().
139 */
140#ifdef REMOTE_BARRIERS
141
142inline smp_rmb_pid(i, j)
143{
144 atomic {
145 CACHE_READ_FROM_MEM(urcu_gp_ctr, i);
146 j = 0;
147 do
148 :: j < NR_READERS ->
149 CACHE_READ_FROM_MEM(urcu_active_readers[j], i);
150 j++
151 :: j >= NR_READERS -> break
152 od;
153 CACHE_READ_FROM_MEM(generation_ptr, i);
154 }
155}
156
157inline smp_wmb_pid(i, j)
158{
159 atomic {
160 CACHE_WRITE_TO_MEM(urcu_gp_ctr, i);
161 j = 0;
162 do
163 :: j < NR_READERS ->
164 CACHE_WRITE_TO_MEM(urcu_active_readers[j], i);
165 j++
166 :: j >= NR_READERS -> break
167 od;
168 CACHE_WRITE_TO_MEM(generation_ptr, i);
169 }
170}
171
172inline smp_mb_pid(i, j)
173{
174 atomic {
175#ifndef NO_WMB
176 smp_wmb_pid(i, j);
177#endif
178#ifndef NO_RMB
179 smp_rmb_pid(i, j);
180#endif
181#ifdef NO_WMB
182#ifdef NO_RMB
183 ooo_mem(j);
184#endif
185#endif
186 }
187}
188
189/*
190 * Readers do a simple barrier(), writers are doing a smp_mb() _and_ sending a
191 * signal or IPI to have all readers execute a smp_mb.
192 * We are not modeling the whole rendez-vous between readers and writers here,
193 * we just let the writer update each reader's caches remotely.
194 */
195inline smp_mb(i, j)
196{
197 if
198 :: get_pid() >= NR_READERS ->
199 smp_mb_pid(get_pid(), j);
200 i = 0;
201 do
202 :: i < NR_READERS ->
203 smp_mb_pid(i, j);
204 i++;
205 :: i >= NR_READERS -> break
206 od;
207 smp_mb_pid(get_pid(), j);
208 :: else -> skip;
209 fi;
210}
211
212#else
213
214inline smp_rmb(i, j)
215{
216 atomic {
217 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
218 i = 0;
219 do
220 :: i < NR_READERS ->
221 CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
222 i++
223 :: i >= NR_READERS -> break
224 od;
225 CACHE_READ_FROM_MEM(generation_ptr, get_pid());
226 }
227}
228
229inline smp_wmb(i, j)
230{
231 atomic {
232 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
233 i = 0;
234 do
235 :: i < NR_READERS ->
236 CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
237 i++
238 :: i >= NR_READERS -> break
239 od;
240 CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
241 }
242}
243
244inline smp_mb(i, j)
245{
246 atomic {
247#ifndef NO_WMB
248 smp_wmb(i, j);
249#endif
250#ifndef NO_RMB
251 smp_rmb(i, j);
252#endif
253#ifdef NO_WMB
254#ifdef NO_RMB
255 ooo_mem(i);
256#endif
257#endif
258 }
259}
260
261#endif
262
263/* Keep in sync manually with smp_rmb, wmp_wmb, ooo_mem and init() */
264DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
265/* Note ! currently only two readers */
266DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
267/* pointer generation */
268DECLARE_CACHED_VAR(byte, generation_ptr);
269
270byte last_free_gen = 0;
271bit free_done = 0;
272byte read_generation[NR_READERS];
273bit data_access[NR_READERS];
274
275bit write_lock = 0;
276
277bit init_done = 0;
278
279inline wait_init_done()
280{
281 do
282 :: init_done == 0 -> skip;
283 :: else -> break;
284 od;
285}
286
287inline ooo_mem(i)
288{
289 atomic {
290 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
291 i = 0;
292 do
293 :: i < NR_READERS ->
294 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
295 get_pid());
296 i++
297 :: i >= NR_READERS -> break
298 od;
299 RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
300 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
301 i = 0;
302 do
303 :: i < NR_READERS ->
304 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
305 get_pid());
306 i++
307 :: i >= NR_READERS -> break
308 od;
309 RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid());
310 }
311}
312
313#define get_readerid() (get_pid())
314#define get_writerid() (get_readerid() + NR_READERS)
315
316inline wait_for_reader(tmp, tmp2, i, j)
317{
318 do
319 :: 1 ->
320 tmp2 = READ_CACHED_VAR(urcu_active_readers[tmp]);
321 ooo_mem(i);
322 if
323 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
324 && ((tmp2 ^ READ_CACHED_VAR(urcu_gp_ctr))
325 & RCU_GP_CTR_BIT) ->
326#ifndef GEN_ERROR_WRITER_PROGRESS
327 smp_mb(i, j);
328#else
329 ooo_mem(i);
330#endif
331 :: else ->
332 break;
333 fi;
334 od;
335}
336
337inline wait_for_quiescent_state(tmp, tmp2, i, j)
338{
339 tmp = 0;
340 do
341 :: tmp < NR_READERS ->
342 wait_for_reader(tmp, tmp2, i, j);
343 if
344 :: (NR_READERS > 1) && (tmp < NR_READERS - 1)
345 -> ooo_mem(i);
346 :: else
347 -> skip;
348 fi;
349 tmp++
350 :: tmp >= NR_READERS -> break
351 od;
352}
353
354/* Model the RCU read-side critical section. */
355
356inline urcu_one_read(i, j, nest_i, tmp, tmp2)
357{
358 nest_i = 0;
359 do
360 :: nest_i < READER_NEST_LEVEL ->
361 ooo_mem(i);
362 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
363 ooo_mem(i);
364 if
365 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
366 ->
367 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
368 ooo_mem(i);
369 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
370 tmp2);
371 :: else ->
372 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
373 tmp + 1);
374 fi;
375 smp_mb(i, j);
376 nest_i++;
377 :: nest_i >= READER_NEST_LEVEL -> break;
378 od;
379
380 ooo_mem(i);
381 read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr);
382 ooo_mem(i);
383 data_access[get_readerid()] = 1;
384 ooo_mem(i);
385 data_access[get_readerid()] = 0;
386
387 nest_i = 0;
388 do
389 :: nest_i < READER_NEST_LEVEL ->
390 smp_mb(i, j);
391 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
392 ooo_mem(i);
393 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);
394 nest_i++;
395 :: nest_i >= READER_NEST_LEVEL -> break;
396 od;
397 ooo_mem(i);
398 //smp_mc(i); /* added */
399}
400
401active [NR_READERS] proctype urcu_reader()
402{
403 byte i, j, nest_i;
404 byte tmp, tmp2;
405
406 wait_init_done();
407
408 assert(get_pid() < NR_PROCS);
409
410end_reader:
411 do
412 :: 1 ->
413 /*
414 * We do not test reader's progress here, because we are mainly
415 * interested in writer's progress. The reader never blocks
416 * anyway. We have to test for reader/writer's progress
417 * separately, otherwise we could think the writer is doing
418 * progress when it's blocked by an always progressing reader.
419 */
420#ifdef READER_PROGRESS
421 /* Only test progress of one random reader. They are all the
422 * same. */
423 atomic {
424 if
425 :: get_readerid() == 0 ->
426progress_reader:
427 skip;
428 :: else ->
429 skip;
430 fi;
431 }
432#endif
433 urcu_one_read(i, j, nest_i, tmp, tmp2);
434 od;
435}
436
437/* Model the RCU update process. */
438
439active proctype urcu_writer()
440{
441 byte i, j;
442 byte tmp, tmp2;
443 byte old_gen;
444
445 wait_init_done();
446
447 assert(get_pid() < NR_PROCS);
448
449 do
450 :: (READ_CACHED_VAR(generation_ptr) < 5) ->
451#ifdef WRITER_PROGRESS
452progress_writer1:
453#endif
454 ooo_mem(i);
455 atomic {
456 old_gen = READ_CACHED_VAR(generation_ptr);
457 WRITE_CACHED_VAR(generation_ptr, old_gen + 1);
458 }
459 ooo_mem(i);
460
461 do
462 :: 1 ->
463 atomic {
464 if
465 :: write_lock == 0 ->
466 write_lock = 1;
467 break;
468 :: else ->
469 skip;
470 fi;
471 }
472 od;
473 smp_mb(i, j);
474 tmp = READ_CACHED_VAR(urcu_gp_ctr);
475 ooo_mem(i);
476 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
477 ooo_mem(i);
478 //smp_mc(i);
479 wait_for_quiescent_state(tmp, tmp2, i, j);
480 //smp_mc(i);
481#ifndef SINGLE_FLIP
482 ooo_mem(i);
483 tmp = READ_CACHED_VAR(urcu_gp_ctr);
484 ooo_mem(i);
485 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
486 //smp_mc(i);
487 ooo_mem(i);
488 wait_for_quiescent_state(tmp, tmp2, i, j);
489#endif
490 smp_mb(i, j);
491 write_lock = 0;
492 /* free-up step, e.g., kfree(). */
493 atomic {
494 last_free_gen = old_gen;
495 free_done = 1;
496 }
497 :: else -> break;
498 od;
499 /*
500 * Given the reader loops infinitely, let the writer also busy-loop
501 * with progress here so, with weak fairness, we can test the
502 * writer's progress.
503 */
504end_writer:
505 do
506 :: 1 ->
507#ifdef WRITER_PROGRESS
508progress_writer2:
509#endif
510 skip;
511 od;
512}
513
514/* Leave after the readers and writers so the pid count is ok. */
515init {
516 byte i, j;
517
518 atomic {
519 INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
520 INIT_CACHED_VAR(generation_ptr, 0, j);
521
522 i = 0;
523 do
524 :: i < NR_READERS ->
525 INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
526 read_generation[i] = 1;
527 data_access[i] = 0;
528 i++;
529 :: i >= NR_READERS -> break
530 od;
531 init_done = 1;
532 }
533}
This page took 0.04173 seconds and 4 git commands to generate.