Add phase 3 : scalability run
[urcu.git] / formal-model / results / urcu-controldataflow-ipi / urcu.spin
CommitLineData
dbf69285
MD
1/*
2 * mem.spin: Promela code to validate memory barriers with OOO memory
3 * and out-of-order instruction scheduling.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * Copyright (c) 2009 Mathieu Desnoyers
20 */
21
22/* Promela validation variables. */
23
24/* specific defines "included" here */
25/* DEFINES file "included" here */
26
27#define NR_READERS 1
28#define NR_WRITERS 1
29
30#define NR_PROCS 2
31
32#define get_pid() (_pid)
33
34#define get_readerid() (get_pid())
35
36/*
37 * Produced process control and data flow. Updated after each instruction to
38 * show which variables are ready. Using one-hot bit encoding per variable to
39 * save state space. Used as triggers to execute the instructions having those
40 * variables as input. Leaving bits active to inhibit instruction execution.
41 * Scheme used to make instruction disabling and automatic dependency fall-back
42 * automatic.
43 */
44
45#define CONSUME_TOKENS(state, bits, notbits) \
46 ((!(state & (notbits))) && (state & (bits)) == (bits))
47
48#define PRODUCE_TOKENS(state, bits) \
49 state = state | (bits);
50
51#define CLEAR_TOKENS(state, bits) \
52 state = state & ~(bits)
53
54/*
55 * Types of dependency :
56 *
57 * Data dependency
58 *
59 * - True dependency, Read-after-Write (RAW)
60 *
61 * This type of dependency happens when a statement depends on the result of a
62 * previous statement. This applies to any statement which needs to read a
63 * variable written by a preceding statement.
64 *
65 * - False dependency, Write-after-Read (WAR)
66 *
67 * Typically, variable renaming can ensure that this dependency goes away.
68 * However, if the statements must read and then write from/to the same variable
69 * in the OOO memory model, renaming may be impossible, and therefore this
70 * causes a WAR dependency.
71 *
72 * - Output dependency, Write-after-Write (WAW)
73 *
74 * Two writes to the same variable in subsequent statements. Variable renaming
75 * can ensure this is not needed, but can be required when writing multiple
76 * times to the same OOO mem model variable.
77 *
78 * Control dependency
79 *
80 * Execution of a given instruction depends on a previous instruction evaluating
81 * in a way that allows its execution. E.g. : branches.
82 *
83 * Useful considerations for joining dependencies after branch
84 *
85 * - Pre-dominance
86 *
87 * "We say box i dominates box j if every path (leading from input to output
88 * through the diagram) which passes through box j must also pass through box
89 * i. Thus box i dominates box j if box j is subordinate to box i in the
90 * program."
91 *
92 * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
93 * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
94 *
95 * - Post-dominance
96 *
97 * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
98 * output exchanged. Therefore, i post-dominating j ensures that every path
99 * passing by j will pass by i before reaching the output.
100 *
101 * Other considerations
102 *
103 * Note about "volatile" keyword dependency : The compiler will order volatile
104 * accesses so they appear in the right order on a given CPU. They can be
105 * reordered by the CPU instruction scheduling. This therefore cannot be
106 * considered as a depencency.
107 *
108 * References :
109 *
110 * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
111 * Kaufmann. ISBN 1-55860-698-X.
112 * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
113 * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
114 * 1-55860-286-0.
115 * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
116 * Morgan Kaufmann. ISBN 1-55860-320-4.
117 */
118
119/*
120 * Note about loops and nested calls
121 *
122 * To keep this model simple, loops expressed in the framework will behave as if
123 * there was a core synchronizing instruction between loops. To see the effect
124 * of loop unrolling, manually unrolling loops is required. Note that if loops
125 * end or start with a core synchronizing instruction, the model is appropriate.
126 * Nested calls are not supported.
127 */
128
129/*
130 * Each process have its own data in cache. Caches are randomly updated.
131 * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
132 * both.
133 */
134
135typedef per_proc_byte {
136 byte val[NR_PROCS];
137};
138
139typedef per_proc_bit {
140 bit val[NR_PROCS];
141};
142
143/* Bitfield has a maximum of 8 procs */
144typedef per_proc_bitfield {
145 byte bitfield;
146};
147
148#define DECLARE_CACHED_VAR(type, x) \
149 type mem_##x; \
150 per_proc_##type cached_##x; \
151 per_proc_bitfield cache_dirty_##x;
152
153#define INIT_CACHED_VAR(x, v, j) \
154 mem_##x = v; \
155 cache_dirty_##x.bitfield = 0; \
156 j = 0; \
157 do \
158 :: j < NR_PROCS -> \
159 cached_##x.val[j] = v; \
160 j++ \
161 :: j >= NR_PROCS -> break \
162 od;
163
164#define IS_CACHE_DIRTY(x, id) (cache_dirty_##x.bitfield & (1 << id))
165
166#define READ_CACHED_VAR(x) (cached_##x.val[get_pid()])
167
168#define WRITE_CACHED_VAR(x, v) \
169 atomic { \
170 cached_##x.val[get_pid()] = v; \
171 cache_dirty_##x.bitfield = \
172 cache_dirty_##x.bitfield | (1 << get_pid()); \
173 }
174
175#define CACHE_WRITE_TO_MEM(x, id) \
176 if \
177 :: IS_CACHE_DIRTY(x, id) -> \
178 mem_##x = cached_##x.val[id]; \
179 cache_dirty_##x.bitfield = \
180 cache_dirty_##x.bitfield & (~(1 << id)); \
181 :: else -> \
182 skip \
183 fi;
184
185#define CACHE_READ_FROM_MEM(x, id) \
186 if \
187 :: !IS_CACHE_DIRTY(x, id) -> \
188 cached_##x.val[id] = mem_##x;\
189 :: else -> \
190 skip \
191 fi;
192
193/*
194 * May update other caches if cache is dirty, or not.
195 */
196#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
197 if \
198 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
199 :: 1 -> skip \
200 fi;
201
202#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
203 if \
204 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
205 :: 1 -> skip \
206 fi;
207
208/* Must consume all prior read tokens. All subsequent reads depend on it. */
209inline smp_rmb(i, j)
210{
211 atomic {
212 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
213 i = 0;
214 do
215 :: i < NR_READERS ->
216 CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
217 i++
218 :: i >= NR_READERS -> break
219 od;
220 CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
221 i = 0;
222 do
223 :: i < SLAB_SIZE ->
224 CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
225 i++
226 :: i >= SLAB_SIZE -> break
227 od;
228 }
229}
230
231/* Must consume all prior write tokens. All subsequent writes depend on it. */
232inline smp_wmb(i, j)
233{
234 atomic {
235 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
236 i = 0;
237 do
238 :: i < NR_READERS ->
239 CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
240 i++
241 :: i >= NR_READERS -> break
242 od;
243 CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
244 i = 0;
245 do
246 :: i < SLAB_SIZE ->
247 CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
248 i++
249 :: i >= SLAB_SIZE -> break
250 od;
251 }
252}
253
254/* Synchronization point. Must consume all prior read and write tokens. All
255 * subsequent reads and writes depend on it. */
256inline smp_mb(i, j)
257{
258 atomic {
259 smp_wmb(i, j);
260 smp_rmb(i, j);
261 }
262}
263
264#ifdef REMOTE_BARRIERS
265
266bit reader_barrier[NR_READERS];
267
268/*
269 * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
270 * because they would add unexisting core synchronization and would therefore
271 * create an incomplete model.
272 * Therefore, we model the read-side memory barriers by completely disabling the
273 * memory barriers and their dependencies from the read-side. One at a time
274 * (different verification runs), we make a different instruction listen for
275 * signals.
276 */
277
278#define smp_mb_reader(i, j)
279
280/*
281 * Service 0, 1 or many barrier requests.
282 */
283inline smp_mb_recv(i, j)
284{
285 do
286 :: (reader_barrier[get_readerid()] == 1) ->
287 /*
288 * We choose to ignore cycles caused by writer busy-looping,
289 * waiting for the reader, sending barrier requests, and the
290 * reader always services them without continuing execution.
291 */
292progress_ignoring_mb1:
293 smp_mb(i, j);
294 reader_barrier[get_readerid()] = 0;
295 :: 1 ->
296 /*
297 * We choose to ignore writer's non-progress caused by the
298 * reader ignoring the writer's mb() requests.
299 */
300progress_ignoring_mb2:
301 break;
302 od;
303}
304
305//#ifdef WRITER_PROGRESS
306//#define PROGRESS_LABEL(progressid)
307//#else
308//#define PROGRESS_LABEL(progressid)
309//#endif
310
311#define PROGRESS_LABEL(progressid) progress_writer_progid_##progressid:
312
313#define smp_mb_send(i, j, progressid) \
314{ \
315 smp_mb(i, j); \
316 i = 0; \
317 do \
318 :: i < NR_READERS -> \
319 reader_barrier[i] = 1; \
320 /* \
321 * Busy-looping waiting for reader barrier handling is of little\
322 * interest, given the reader has the ability to totally ignore \
323 * barrier requests. \
324 */ \
325 do \
326 :: (reader_barrier[i] == 1) -> \
327PROGRESS_LABEL(progressid) \
328 skip; \
329 :: (reader_barrier[i] == 0) -> break; \
330 od; \
331 i++; \
332 :: i >= NR_READERS -> \
333 break \
334 od; \
335 smp_mb(i, j); \
336}
337
338#else
339
340#define smp_mb_send(i, j, progressid) smp_mb(i, j)
341#define smp_mb_reader smp_mb
342#define smp_mb_recv(i, j)
343
344#endif
345
346/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
347DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
348/* Note ! currently only one reader */
349DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
350/* RCU data */
351DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
352
353/* RCU pointer */
354#if (SLAB_SIZE == 2)
355DECLARE_CACHED_VAR(bit, rcu_ptr);
356bit ptr_read_first[NR_READERS];
357bit ptr_read_second[NR_READERS];
358#else
359DECLARE_CACHED_VAR(byte, rcu_ptr);
360byte ptr_read_first[NR_READERS];
361byte ptr_read_second[NR_READERS];
362#endif
363
364bit data_read_first[NR_READERS];
365bit data_read_second[NR_READERS];
366
367bit init_done = 0;
368
369inline wait_init_done()
370{
371 do
372 :: init_done == 0 -> skip;
373 :: else -> break;
374 od;
375}
376
377inline ooo_mem(i)
378{
379 atomic {
380 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
381 i = 0;
382 do
383 :: i < NR_READERS ->
384 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
385 get_pid());
386 i++
387 :: i >= NR_READERS -> break
388 od;
389 RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
390 i = 0;
391 do
392 :: i < SLAB_SIZE ->
393 RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
394 i++
395 :: i >= SLAB_SIZE -> break
396 od;
397 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
398 i = 0;
399 do
400 :: i < NR_READERS ->
401 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
402 get_pid());
403 i++
404 :: i >= NR_READERS -> break
405 od;
406 RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
407 i = 0;
408 do
409 :: i < SLAB_SIZE ->
410 RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
411 i++
412 :: i >= SLAB_SIZE -> break
413 od;
414 }
415}
416
417/*
418 * Bit encoding, urcu_reader :
419 */
420
421int _proc_urcu_reader;
422#define proc_urcu_reader _proc_urcu_reader
423
424/* Body of PROCEDURE_READ_LOCK */
425#define READ_PROD_A_READ (1 << 0)
426#define READ_PROD_B_IF_TRUE (1 << 1)
427#define READ_PROD_B_IF_FALSE (1 << 2)
428#define READ_PROD_C_IF_TRUE_READ (1 << 3)
429
430#define PROCEDURE_READ_LOCK(base, consumetoken, producetoken) \
431 :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, READ_PROD_A_READ << base) -> \
432 ooo_mem(i); \
433 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]); \
434 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base); \
435 :: CONSUME_TOKENS(proc_urcu_reader, \
436 READ_PROD_A_READ << base, /* RAW, pre-dominant */ \
437 (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) -> \
438 if \
439 :: (!(tmp & RCU_GP_CTR_NEST_MASK)) -> \
440 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base); \
441 :: else -> \
442 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
443 fi; \
444 /* IF TRUE */ \
445 :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base, \
446 READ_PROD_C_IF_TRUE_READ << base) -> \
447 ooo_mem(i); \
448 tmp2 = READ_CACHED_VAR(urcu_gp_ctr); \
449 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base); \
450 :: CONSUME_TOKENS(proc_urcu_reader, \
451 (READ_PROD_C_IF_TRUE_READ /* pre-dominant */ \
452 | READ_PROD_A_READ) << base, /* WAR */ \
453 producetoken) -> \
454 ooo_mem(i); \
455 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2); \
456 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
457 /* IF_MERGE implies \
458 * post-dominance */ \
459 /* ELSE */ \
460 :: CONSUME_TOKENS(proc_urcu_reader, \
461 (READ_PROD_B_IF_FALSE /* pre-dominant */ \
462 | READ_PROD_A_READ) << base, /* WAR */ \
463 producetoken) -> \
464 ooo_mem(i); \
465 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], \
466 tmp + 1); \
467 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
468 /* IF_MERGE implies \
469 * post-dominance */ \
470 /* ENDIF */ \
471 skip
472
473/* Body of PROCEDURE_READ_LOCK */
474#define READ_PROC_READ_UNLOCK (1 << 0)
475
476#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken) \
477 :: CONSUME_TOKENS(proc_urcu_reader, \
478 consumetoken, \
479 READ_PROC_READ_UNLOCK << base) -> \
480 ooo_mem(i); \
481 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]); \
482 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base); \
483 :: CONSUME_TOKENS(proc_urcu_reader, \
484 consumetoken \
485 | (READ_PROC_READ_UNLOCK << base), /* WAR */ \
486 producetoken) -> \
487 ooo_mem(i); \
488 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1); \
489 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
490 skip
491
492
493#define READ_PROD_NONE (1 << 0)
494
495/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
496#define READ_LOCK_BASE 1
497#define READ_LOCK_OUT (1 << 5)
498
499#define READ_PROC_FIRST_MB (1 << 6)
500
501/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
502#define READ_LOCK_NESTED_BASE 7
503#define READ_LOCK_NESTED_OUT (1 << 11)
504
505#define READ_PROC_READ_GEN (1 << 12)
506#define READ_PROC_ACCESS_GEN (1 << 13)
507
508/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
509#define READ_UNLOCK_NESTED_BASE 14
510#define READ_UNLOCK_NESTED_OUT (1 << 15)
511
512#define READ_PROC_SECOND_MB (1 << 16)
513
514/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
515#define READ_UNLOCK_BASE 17
516#define READ_UNLOCK_OUT (1 << 18)
517
518/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
519#define READ_LOCK_UNROLL_BASE 19
520#define READ_LOCK_OUT_UNROLL (1 << 23)
521
522#define READ_PROC_THIRD_MB (1 << 24)
523
524#define READ_PROC_READ_GEN_UNROLL (1 << 25)
525#define READ_PROC_ACCESS_GEN_UNROLL (1 << 26)
526
527#define READ_PROC_FOURTH_MB (1 << 27)
528
529/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
530#define READ_UNLOCK_UNROLL_BASE 28
531#define READ_UNLOCK_OUT_UNROLL (1 << 29)
532
533
534/* Should not include branches */
535#define READ_PROC_ALL_TOKENS (READ_PROD_NONE \
536 | READ_LOCK_OUT \
537 | READ_PROC_FIRST_MB \
538 | READ_LOCK_NESTED_OUT \
539 | READ_PROC_READ_GEN \
540 | READ_PROC_ACCESS_GEN \
541 | READ_UNLOCK_NESTED_OUT \
542 | READ_PROC_SECOND_MB \
543 | READ_UNLOCK_OUT \
544 | READ_LOCK_OUT_UNROLL \
545 | READ_PROC_THIRD_MB \
546 | READ_PROC_READ_GEN_UNROLL \
547 | READ_PROC_ACCESS_GEN_UNROLL \
548 | READ_PROC_FOURTH_MB \
549 | READ_UNLOCK_OUT_UNROLL)
550
551/* Must clear all tokens, including branches */
552#define READ_PROC_ALL_TOKENS_CLEAR ((1 << 30) - 1)
553
554inline urcu_one_read(i, j, nest_i, tmp, tmp2)
555{
556 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
557
558#ifdef NO_MB
559 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
560 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
561 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
562 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
563#endif
564
565#ifdef REMOTE_BARRIERS
566 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
567 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
568 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
569 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
570#endif
571
572 do
573 :: 1 ->
574
575#ifdef REMOTE_BARRIERS
576 /*
577 * Signal-based memory barrier will only execute when the
578 * execution order appears in program order.
579 */
580 if
581 :: 1 ->
582 atomic {
583 if
584 :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
585 READ_LOCK_OUT | READ_LOCK_NESTED_OUT
586 | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
587 | READ_UNLOCK_OUT
588 | READ_LOCK_OUT_UNROLL
589 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
590 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
591 READ_LOCK_NESTED_OUT
592 | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
593 | READ_UNLOCK_OUT
594 | READ_LOCK_OUT_UNROLL
595 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
596 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
597 READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
598 | READ_UNLOCK_OUT
599 | READ_LOCK_OUT_UNROLL
600 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
601 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
602 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
603 READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
604 | READ_UNLOCK_OUT
605 | READ_LOCK_OUT_UNROLL
606 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
607 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
608 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
609 READ_UNLOCK_NESTED_OUT
610 | READ_UNLOCK_OUT
611 | READ_LOCK_OUT_UNROLL
612 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
613 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
614 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
615 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
616 READ_UNLOCK_OUT
617 | READ_LOCK_OUT_UNROLL
618 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
619 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
620 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
621 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
622 | READ_UNLOCK_OUT,
623 READ_LOCK_OUT_UNROLL
624 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
625 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
626 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
627 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
628 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
629 READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
630 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
631 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
632 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
633 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
634 | READ_PROC_READ_GEN_UNROLL,
635 READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
636 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
637 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
638 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
639 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
640 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
641 READ_UNLOCK_OUT_UNROLL)
642 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
643 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
644 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
645 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
646 0) ->
647 goto non_atomic3;
648non_atomic3_end:
649 skip;
650 fi;
651 }
652 fi;
653
654 goto non_atomic3_skip;
655non_atomic3:
656 smp_mb_recv(i, j);
657 goto non_atomic3_end;
658non_atomic3_skip:
659
660#endif /* REMOTE_BARRIERS */
661
662 atomic {
663 if
664 PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, READ_LOCK_OUT);
665
666 :: CONSUME_TOKENS(proc_urcu_reader,
667 READ_LOCK_OUT, /* post-dominant */
668 READ_PROC_FIRST_MB) ->
669 smp_mb_reader(i, j);
670 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
671
672 PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB | READ_LOCK_OUT,
673 READ_LOCK_NESTED_OUT);
674
675 :: CONSUME_TOKENS(proc_urcu_reader,
676 READ_PROC_FIRST_MB, /* mb() orders reads */
677 READ_PROC_READ_GEN) ->
678 ooo_mem(i);
679 ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
680 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
681
682 :: CONSUME_TOKENS(proc_urcu_reader,
683 READ_PROC_FIRST_MB /* mb() orders reads */
684 | READ_PROC_READ_GEN,
685 READ_PROC_ACCESS_GEN) ->
686 /* smp_read_barrier_depends */
687 goto rmb1;
688rmb1_end:
689 data_read_first[get_readerid()] =
690 READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
691 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
692
693
694 /* Note : we remove the nested memory barrier from the read unlock
695 * model, given it is not usually needed. The implementation has the barrier
696 * because the performance impact added by a branch in the common case does not
697 * justify it.
698 */
699
700 PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
701 READ_PROC_FIRST_MB
702 | READ_LOCK_OUT
703 | READ_LOCK_NESTED_OUT,
704 READ_UNLOCK_NESTED_OUT);
705
706
707 :: CONSUME_TOKENS(proc_urcu_reader,
708 READ_PROC_ACCESS_GEN /* mb() orders reads */
709 | READ_PROC_READ_GEN /* mb() orders reads */
710 | READ_PROC_FIRST_MB /* mb() ordered */
711 | READ_LOCK_OUT /* post-dominant */
712 | READ_LOCK_NESTED_OUT /* post-dominant */
713 | READ_UNLOCK_NESTED_OUT,
714 READ_PROC_SECOND_MB) ->
715 smp_mb_reader(i, j);
716 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
717
718 PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
719 READ_PROC_SECOND_MB /* mb() orders reads */
720 | READ_PROC_FIRST_MB /* mb() orders reads */
721 | READ_LOCK_NESTED_OUT /* RAW */
722 | READ_LOCK_OUT /* RAW */
723 | READ_UNLOCK_NESTED_OUT, /* RAW */
724 READ_UNLOCK_OUT);
725
726 /* Unrolling loop : second consecutive lock */
727 /* reading urcu_active_readers, which have been written by
728 * READ_UNLOCK_OUT : RAW */
729 PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
730 READ_UNLOCK_OUT /* RAW */
731 | READ_PROC_SECOND_MB /* mb() orders reads */
732 | READ_PROC_FIRST_MB /* mb() orders reads */
733 | READ_LOCK_NESTED_OUT /* RAW */
734 | READ_LOCK_OUT /* RAW */
735 | READ_UNLOCK_NESTED_OUT, /* RAW */
736 READ_LOCK_OUT_UNROLL);
737
738
739 :: CONSUME_TOKENS(proc_urcu_reader,
740 READ_PROC_FIRST_MB /* mb() ordered */
741 | READ_PROC_SECOND_MB /* mb() ordered */
742 | READ_LOCK_OUT_UNROLL /* post-dominant */
743 | READ_LOCK_NESTED_OUT
744 | READ_LOCK_OUT
745 | READ_UNLOCK_NESTED_OUT
746 | READ_UNLOCK_OUT,
747 READ_PROC_THIRD_MB) ->
748 smp_mb_reader(i, j);
749 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
750
751 :: CONSUME_TOKENS(proc_urcu_reader,
752 READ_PROC_FIRST_MB /* mb() orders reads */
753 | READ_PROC_SECOND_MB /* mb() orders reads */
754 | READ_PROC_THIRD_MB, /* mb() orders reads */
755 READ_PROC_READ_GEN_UNROLL) ->
756 ooo_mem(i);
757 ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
758 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
759
760 :: CONSUME_TOKENS(proc_urcu_reader,
761 READ_PROC_READ_GEN_UNROLL
762 | READ_PROC_FIRST_MB /* mb() orders reads */
763 | READ_PROC_SECOND_MB /* mb() orders reads */
764 | READ_PROC_THIRD_MB, /* mb() orders reads */
765 READ_PROC_ACCESS_GEN_UNROLL) ->
766 /* smp_read_barrier_depends */
767 goto rmb2;
768rmb2_end:
769 data_read_second[get_readerid()] =
770 READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
771 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
772
773 :: CONSUME_TOKENS(proc_urcu_reader,
774 READ_PROC_READ_GEN_UNROLL /* mb() orders reads */
775 | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
776 | READ_PROC_FIRST_MB /* mb() ordered */
777 | READ_PROC_SECOND_MB /* mb() ordered */
778 | READ_PROC_THIRD_MB /* mb() ordered */
779 | READ_LOCK_OUT_UNROLL /* post-dominant */
780 | READ_LOCK_NESTED_OUT
781 | READ_LOCK_OUT
782 | READ_UNLOCK_NESTED_OUT
783 | READ_UNLOCK_OUT,
784 READ_PROC_FOURTH_MB) ->
785 smp_mb_reader(i, j);
786 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
787
788 PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
789 READ_PROC_FOURTH_MB /* mb() orders reads */
790 | READ_PROC_THIRD_MB /* mb() orders reads */
791 | READ_LOCK_OUT_UNROLL /* RAW */
792 | READ_PROC_SECOND_MB /* mb() orders reads */
793 | READ_PROC_FIRST_MB /* mb() orders reads */
794 | READ_LOCK_NESTED_OUT /* RAW */
795 | READ_LOCK_OUT /* RAW */
796 | READ_UNLOCK_NESTED_OUT, /* RAW */
797 READ_UNLOCK_OUT_UNROLL);
798 :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
799 CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
800 break;
801 fi;
802 }
803 od;
804 /*
805 * Dependency between consecutive loops :
806 * RAW dependency on
807 * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
808 * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
809 * between loops.
810 * _WHEN THE MB()s are in place_, they add full ordering of the
811 * generation pointer read wrt active reader count read, which ensures
812 * execution will not spill across loop execution.
813 * However, in the event mb()s are removed (execution using signal
814 * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
815 * to spill its execution on other loop's execution.
816 */
817 goto end;
818rmb1:
819#ifndef NO_RMB
820 smp_rmb(i, j);
821#else
822 ooo_mem(i);
823#endif
824 goto rmb1_end;
825rmb2:
826#ifndef NO_RMB
827 smp_rmb(i, j);
828#else
829 ooo_mem(i);
830#endif
831 goto rmb2_end;
832end:
833 skip;
834}
835
836
837
838active proctype urcu_reader()
839{
840 byte i, j, nest_i;
841 byte tmp, tmp2;
842
843 wait_init_done();
844
845 assert(get_pid() < NR_PROCS);
846
847end_reader:
848 do
849 :: 1 ->
850 /*
851 * We do not test reader's progress here, because we are mainly
852 * interested in writer's progress. The reader never blocks
853 * anyway. We have to test for reader/writer's progress
854 * separately, otherwise we could think the writer is doing
855 * progress when it's blocked by an always progressing reader.
856 */
857#ifdef READER_PROGRESS
858progress_reader:
859#endif
860 urcu_one_read(i, j, nest_i, tmp, tmp2);
861 od;
862}
863
864/* no name clash please */
865#undef proc_urcu_reader
866
867
868/* Model the RCU update process. */
869
870/*
871 * Bit encoding, urcu_writer :
872 * Currently only supports one reader.
873 */
874
875int _proc_urcu_writer;
876#define proc_urcu_writer _proc_urcu_writer
877
878#define WRITE_PROD_NONE (1 << 0)
879
880#define WRITE_DATA (1 << 1)
881#define WRITE_PROC_WMB (1 << 2)
882#define WRITE_XCHG_PTR (1 << 3)
883
884#define WRITE_PROC_FIRST_MB (1 << 4)
885
886/* first flip */
887#define WRITE_PROC_FIRST_READ_GP (1 << 5)
888#define WRITE_PROC_FIRST_WRITE_GP (1 << 6)
889#define WRITE_PROC_FIRST_WAIT (1 << 7)
890#define WRITE_PROC_FIRST_WAIT_LOOP (1 << 8)
891
892/* second flip */
893#define WRITE_PROC_SECOND_READ_GP (1 << 9)
894#define WRITE_PROC_SECOND_WRITE_GP (1 << 10)
895#define WRITE_PROC_SECOND_WAIT (1 << 11)
896#define WRITE_PROC_SECOND_WAIT_LOOP (1 << 12)
897
898#define WRITE_PROC_SECOND_MB (1 << 13)
899
900#define WRITE_FREE (1 << 14)
901
902#define WRITE_PROC_ALL_TOKENS (WRITE_PROD_NONE \
903 | WRITE_DATA \
904 | WRITE_PROC_WMB \
905 | WRITE_XCHG_PTR \
906 | WRITE_PROC_FIRST_MB \
907 | WRITE_PROC_FIRST_READ_GP \
908 | WRITE_PROC_FIRST_WRITE_GP \
909 | WRITE_PROC_FIRST_WAIT \
910 | WRITE_PROC_SECOND_READ_GP \
911 | WRITE_PROC_SECOND_WRITE_GP \
912 | WRITE_PROC_SECOND_WAIT \
913 | WRITE_PROC_SECOND_MB \
914 | WRITE_FREE)
915
916#define WRITE_PROC_ALL_TOKENS_CLEAR ((1 << 15) - 1)
917
918/*
919 * Mutexes are implied around writer execution. A single writer at a time.
920 */
921active proctype urcu_writer()
922{
923 byte i, j;
924 byte tmp, tmp2, tmpa;
925 byte cur_data = 0, old_data, loop_nr = 0;
926 byte cur_gp_val = 0; /*
927 * Keep a local trace of the current parity so
928 * we don't add non-existing dependencies on the global
929 * GP update. Needed to test single flip case.
930 */
931
932 wait_init_done();
933
934 assert(get_pid() < NR_PROCS);
935
936 do
937 :: (loop_nr < 3) ->
938#ifdef WRITER_PROGRESS
939progress_writer1:
940#endif
941 loop_nr = loop_nr + 1;
942
943 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
944
945#ifdef NO_WMB
946 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
947#endif
948
949#ifdef NO_MB
950 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
951 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
952#endif
953
954#ifdef SINGLE_FLIP
955 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
956 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
957 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
958 /* For single flip, we need to know the current parity */
959 cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
960#endif
961
962 do :: 1 ->
963 atomic {
964 if
965
966 :: CONSUME_TOKENS(proc_urcu_writer,
967 WRITE_PROD_NONE,
968 WRITE_DATA) ->
969 ooo_mem(i);
970 cur_data = (cur_data + 1) % SLAB_SIZE;
971 WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
972 PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
973
974
975 :: CONSUME_TOKENS(proc_urcu_writer,
976 WRITE_DATA,
977 WRITE_PROC_WMB) ->
978 smp_wmb(i, j);
979 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
980
981 :: CONSUME_TOKENS(proc_urcu_writer,
982 WRITE_PROC_WMB,
983 WRITE_XCHG_PTR) ->
984 /* rcu_xchg_pointer() */
985 atomic {
986 old_data = READ_CACHED_VAR(rcu_ptr);
987 WRITE_CACHED_VAR(rcu_ptr, cur_data);
988 }
989 PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
990
991 :: CONSUME_TOKENS(proc_urcu_writer,
992 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
993 WRITE_PROC_FIRST_MB) ->
994 goto smp_mb_send1;
995smp_mb_send1_end:
996 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
997
998 /* first flip */
999 :: CONSUME_TOKENS(proc_urcu_writer,
1000 WRITE_PROC_FIRST_MB,
1001 WRITE_PROC_FIRST_READ_GP) ->
1002 tmpa = READ_CACHED_VAR(urcu_gp_ctr);
1003 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
1004 :: CONSUME_TOKENS(proc_urcu_writer,
1005 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
1006 | WRITE_PROC_FIRST_READ_GP,
1007 WRITE_PROC_FIRST_WRITE_GP) ->
1008 ooo_mem(i);
1009 WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
1010 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
1011
1012 :: CONSUME_TOKENS(proc_urcu_writer,
1013 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1014 WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1015 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
1016 ooo_mem(i);
1017 /* ONLY WAITING FOR READER 0 */
1018 tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
1019#ifndef SINGLE_FLIP
1020 /* In normal execution, we are always starting by
1021 * waiting for the even parity.
1022 */
1023 cur_gp_val = RCU_GP_CTR_BIT;
1024#endif
1025 if
1026 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
1027 && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
1028 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
1029 :: else ->
1030 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
1031 fi;
1032
1033 :: CONSUME_TOKENS(proc_urcu_writer,
1034 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1035 WRITE_PROC_FIRST_WRITE_GP
1036 | WRITE_PROC_FIRST_READ_GP
1037 | WRITE_PROC_FIRST_WAIT_LOOP
1038 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
1039 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1040 0) ->
1041#ifndef GEN_ERROR_WRITER_PROGRESS
1042 goto smp_mb_send2;
1043smp_mb_send2_end:
1044#else
1045 ooo_mem(i);
1046#endif
1047 /* This instruction loops to WRITE_PROC_FIRST_WAIT */
1048 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
1049
1050 /* second flip */
1051 :: CONSUME_TOKENS(proc_urcu_writer,
1052 WRITE_PROC_FIRST_WAIT /* Control dependency : need to branch out of
1053 * the loop to execute the next flip (CHECK) */
1054 | WRITE_PROC_FIRST_WRITE_GP
1055 | WRITE_PROC_FIRST_READ_GP
1056 | WRITE_PROC_FIRST_MB,
1057 WRITE_PROC_SECOND_READ_GP) ->
1058 ooo_mem(i);
1059 tmpa = READ_CACHED_VAR(urcu_gp_ctr);
1060 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
1061 :: CONSUME_TOKENS(proc_urcu_writer,
1062 WRITE_PROC_FIRST_MB
1063 | WRITE_PROC_WMB
1064 | WRITE_PROC_FIRST_READ_GP
1065 | WRITE_PROC_FIRST_WRITE_GP
1066 | WRITE_PROC_SECOND_READ_GP,
1067 WRITE_PROC_SECOND_WRITE_GP) ->
1068 ooo_mem(i);
1069 WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
1070 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
1071
1072 :: CONSUME_TOKENS(proc_urcu_writer,
1073 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1074 WRITE_PROC_FIRST_WAIT
1075 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1076 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
1077 ooo_mem(i);
1078 /* ONLY WAITING FOR READER 0 */
1079 tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
1080 if
1081 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
1082 && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
1083 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
1084 :: else ->
1085 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
1086 fi;
1087
1088 :: CONSUME_TOKENS(proc_urcu_writer,
1089 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1090 WRITE_PROC_SECOND_WRITE_GP
1091 | WRITE_PROC_FIRST_WRITE_GP
1092 | WRITE_PROC_SECOND_READ_GP
1093 | WRITE_PROC_FIRST_READ_GP
1094 | WRITE_PROC_SECOND_WAIT_LOOP
1095 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
1096 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1097 0) ->
1098#ifndef GEN_ERROR_WRITER_PROGRESS
1099 goto smp_mb_send3;
1100smp_mb_send3_end:
1101#else
1102 ooo_mem(i);
1103#endif
1104 /* This instruction loops to WRITE_PROC_SECOND_WAIT */
1105 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
1106
1107
1108 :: CONSUME_TOKENS(proc_urcu_writer,
1109 WRITE_PROC_FIRST_WAIT
1110 | WRITE_PROC_SECOND_WAIT
1111 | WRITE_PROC_FIRST_READ_GP
1112 | WRITE_PROC_SECOND_READ_GP
1113 | WRITE_PROC_FIRST_WRITE_GP
1114 | WRITE_PROC_SECOND_WRITE_GP
1115 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
1116 | WRITE_PROC_FIRST_MB,
1117 WRITE_PROC_SECOND_MB) ->
1118 goto smp_mb_send4;
1119smp_mb_send4_end:
1120 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
1121
1122 :: CONSUME_TOKENS(proc_urcu_writer,
1123 WRITE_XCHG_PTR
1124 | WRITE_PROC_FIRST_WAIT
1125 | WRITE_PROC_SECOND_WAIT
1126 | WRITE_PROC_WMB /* No dependency on
1127 * WRITE_DATA because we
1128 * write to a
1129 * different location. */
1130 | WRITE_PROC_SECOND_MB
1131 | WRITE_PROC_FIRST_MB,
1132 WRITE_FREE) ->
1133 WRITE_CACHED_VAR(rcu_data[old_data], POISON);
1134 PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
1135
1136 :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
1137 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
1138 break;
1139 fi;
1140 }
1141 od;
1142 /*
1143 * Note : Promela model adds implicit serialization of the
1144 * WRITE_FREE instruction. Normally, it would be permitted to
1145 * spill on the next loop execution. Given the validation we do
1146 * checks for the data entry read to be poisoned, it's ok if
1147 * we do not check "late arriving" memory poisoning.
1148 */
1149 :: else -> break;
1150 od;
1151 /*
1152 * Given the reader loops infinitely, let the writer also busy-loop
1153 * with progress here so, with weak fairness, we can test the
1154 * writer's progress.
1155 */
1156end_writer:
1157 do
1158 :: 1 ->
1159#ifdef WRITER_PROGRESS
1160progress_writer2:
1161#endif
1162#ifdef READER_PROGRESS
1163 /*
1164 * Make sure we don't block the reader's progress.
1165 */
1166 smp_mb_send(i, j, 5);
1167#endif
1168 skip;
1169 od;
1170
1171 /* Non-atomic parts of the loop */
1172 goto end;
1173smp_mb_send1:
1174 smp_mb_send(i, j, 1);
1175 goto smp_mb_send1_end;
1176#ifndef GEN_ERROR_WRITER_PROGRESS
1177smp_mb_send2:
1178 smp_mb_send(i, j, 2);
1179 goto smp_mb_send2_end;
1180smp_mb_send3:
1181 smp_mb_send(i, j, 3);
1182 goto smp_mb_send3_end;
1183#endif
1184smp_mb_send4:
1185 smp_mb_send(i, j, 4);
1186 goto smp_mb_send4_end;
1187end:
1188 skip;
1189}
1190
1191/* no name clash please */
1192#undef proc_urcu_writer
1193
1194
1195/* Leave after the readers and writers so the pid count is ok. */
1196init {
1197 byte i, j;
1198
1199 atomic {
1200 INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
1201 INIT_CACHED_VAR(rcu_ptr, 0, j);
1202
1203 i = 0;
1204 do
1205 :: i < NR_READERS ->
1206 INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
1207 ptr_read_first[i] = 1;
1208 ptr_read_second[i] = 1;
1209 data_read_first[i] = WINE;
1210 data_read_second[i] = WINE;
1211 i++;
1212 :: i >= NR_READERS -> break
1213 od;
1214 INIT_CACHED_VAR(rcu_data[0], WINE, j);
1215 i = 1;
1216 do
1217 :: i < SLAB_SIZE ->
1218 INIT_CACHED_VAR(rcu_data[i], POISON, j);
1219 i++
1220 :: i >= SLAB_SIZE -> break
1221 od;
1222
1223 init_done = 1;
1224 }
1225}
This page took 0.066315 seconds and 4 git commands to generate.