Add phase 3 : scalability run
[urcu.git] / formal-model / results / urcu-controldataflow-ipi-intel / urcu_free_no_mb.spin.input
CommitLineData
f2b3a82d
MD
1#define NO_MB
2
3// Poison value for freed memory
4#define POISON 1
5// Memory with correct data
6#define WINE 0
7#define SLAB_SIZE 2
8
9#define read_poison (data_read_first[0] == POISON || data_read_second[0] == POISON)
10
11#define RCU_GP_CTR_BIT (1 << 7)
12#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
13
14//disabled
15#define REMOTE_BARRIERS
16
17//#define ARCH_ALPHA
18#define ARCH_INTEL
19//#define ARCH_POWERPC
20/*
21 * mem.spin: Promela code to validate memory barriers with OOO memory
22 * and out-of-order instruction scheduling.
23 *
24 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License as published by
26 * the Free Software Foundation; either version 2 of the License, or
27 * (at your option) any later version.
28 *
29 * This program is distributed in the hope that it will be useful,
30 * but WITHOUT ANY WARRANTY; without even the implied warranty of
31 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
32 * GNU General Public License for more details.
33 *
34 * You should have received a copy of the GNU General Public License
35 * along with this program; if not, write to the Free Software
36 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
37 *
38 * Copyright (c) 2009 Mathieu Desnoyers
39 */
40
41/* Promela validation variables. */
42
43/* specific defines "included" here */
44/* DEFINES file "included" here */
45
46#define NR_READERS 1
47#define NR_WRITERS 1
48
49#define NR_PROCS 2
50
51#define get_pid() (_pid)
52
53#define get_readerid() (get_pid())
54
55/*
56 * Produced process control and data flow. Updated after each instruction to
57 * show which variables are ready. Using one-hot bit encoding per variable to
58 * save state space. Used as triggers to execute the instructions having those
59 * variables as input. Leaving bits active to inhibit instruction execution.
60 * Scheme used to make instruction disabling and automatic dependency fall-back
61 * automatic.
62 */
63
64#define CONSUME_TOKENS(state, bits, notbits) \
65 ((!(state & (notbits))) && (state & (bits)) == (bits))
66
67#define PRODUCE_TOKENS(state, bits) \
68 state = state | (bits);
69
70#define CLEAR_TOKENS(state, bits) \
71 state = state & ~(bits)
72
73/*
74 * Types of dependency :
75 *
76 * Data dependency
77 *
78 * - True dependency, Read-after-Write (RAW)
79 *
80 * This type of dependency happens when a statement depends on the result of a
81 * previous statement. This applies to any statement which needs to read a
82 * variable written by a preceding statement.
83 *
84 * - False dependency, Write-after-Read (WAR)
85 *
86 * Typically, variable renaming can ensure that this dependency goes away.
87 * However, if the statements must read and then write from/to the same variable
88 * in the OOO memory model, renaming may be impossible, and therefore this
89 * causes a WAR dependency.
90 *
91 * - Output dependency, Write-after-Write (WAW)
92 *
93 * Two writes to the same variable in subsequent statements. Variable renaming
94 * can ensure this is not needed, but can be required when writing multiple
95 * times to the same OOO mem model variable.
96 *
97 * Control dependency
98 *
99 * Execution of a given instruction depends on a previous instruction evaluating
100 * in a way that allows its execution. E.g. : branches.
101 *
102 * Useful considerations for joining dependencies after branch
103 *
104 * - Pre-dominance
105 *
106 * "We say box i dominates box j if every path (leading from input to output
107 * through the diagram) which passes through box j must also pass through box
108 * i. Thus box i dominates box j if box j is subordinate to box i in the
109 * program."
110 *
111 * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
112 * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
113 *
114 * - Post-dominance
115 *
116 * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
117 * output exchanged. Therefore, i post-dominating j ensures that every path
118 * passing by j will pass by i before reaching the output.
119 *
120 * Other considerations
121 *
122 * Note about "volatile" keyword dependency : The compiler will order volatile
123 * accesses so they appear in the right order on a given CPU. They can be
124 * reordered by the CPU instruction scheduling. This therefore cannot be
125 * considered as a depencency.
126 *
127 * References :
128 *
129 * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
130 * Kaufmann. ISBN 1-55860-698-X.
131 * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
132 * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
133 * 1-55860-286-0.
134 * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
135 * Morgan Kaufmann. ISBN 1-55860-320-4.
136 */
137
138/*
139 * Note about loops and nested calls
140 *
141 * To keep this model simple, loops expressed in the framework will behave as if
142 * there was a core synchronizing instruction between loops. To see the effect
143 * of loop unrolling, manually unrolling loops is required. Note that if loops
144 * end or start with a core synchronizing instruction, the model is appropriate.
145 * Nested calls are not supported.
146 */
147
148/*
149 * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
150 * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
151 * http://www.linuxjournal.com/article/8212)
152#ifdef ARCH_ALPHA
153#define HAVE_OOO_CACHE_READ
154#endif
155
156/*
157 * Each process have its own data in cache. Caches are randomly updated.
158 * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
159 * both.
160 */
161
162typedef per_proc_byte {
163 byte val[NR_PROCS];
164};
165
166typedef per_proc_bit {
167 bit val[NR_PROCS];
168};
169
170/* Bitfield has a maximum of 8 procs */
171typedef per_proc_bitfield {
172 byte bitfield;
173};
174
175#define DECLARE_CACHED_VAR(type, x) \
176 type mem_##x; \
177 per_proc_##type cached_##x; \
178 per_proc_bitfield cache_dirty_##x;
179
180#define INIT_CACHED_VAR(x, v, j) \
181 mem_##x = v; \
182 cache_dirty_##x.bitfield = 0; \
183 j = 0; \
184 do \
185 :: j < NR_PROCS -> \
186 cached_##x.val[j] = v; \
187 j++ \
188 :: j >= NR_PROCS -> break \
189 od;
190
191#define IS_CACHE_DIRTY(x, id) (cache_dirty_##x.bitfield & (1 << id))
192
193#define READ_CACHED_VAR(x) (cached_##x.val[get_pid()])
194
195#define WRITE_CACHED_VAR(x, v) \
196 atomic { \
197 cached_##x.val[get_pid()] = v; \
198 cache_dirty_##x.bitfield = \
199 cache_dirty_##x.bitfield | (1 << get_pid()); \
200 }
201
202#define CACHE_WRITE_TO_MEM(x, id) \
203 if \
204 :: IS_CACHE_DIRTY(x, id) -> \
205 mem_##x = cached_##x.val[id]; \
206 cache_dirty_##x.bitfield = \
207 cache_dirty_##x.bitfield & (~(1 << id)); \
208 :: else -> \
209 skip \
210 fi;
211
212#define CACHE_READ_FROM_MEM(x, id) \
213 if \
214 :: !IS_CACHE_DIRTY(x, id) -> \
215 cached_##x.val[id] = mem_##x;\
216 :: else -> \
217 skip \
218 fi;
219
220/*
221 * May update other caches if cache is dirty, or not.
222 */
223#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
224 if \
225 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
226 :: 1 -> skip \
227 fi;
228
229#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
230 if \
231 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
232 :: 1 -> skip \
233 fi;
234
235/* Must consume all prior read tokens. All subsequent reads depend on it. */
236inline smp_rmb(i)
237{
238 atomic {
239 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
240 i = 0;
241 do
242 :: i < NR_READERS ->
243 CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
244 i++
245 :: i >= NR_READERS -> break
246 od;
247 CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
248 i = 0;
249 do
250 :: i < SLAB_SIZE ->
251 CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
252 i++
253 :: i >= SLAB_SIZE -> break
254 od;
255 }
256}
257
258/* Must consume all prior write tokens. All subsequent writes depend on it. */
259inline smp_wmb(i)
260{
261 atomic {
262 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
263 i = 0;
264 do
265 :: i < NR_READERS ->
266 CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
267 i++
268 :: i >= NR_READERS -> break
269 od;
270 CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
271 i = 0;
272 do
273 :: i < SLAB_SIZE ->
274 CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
275 i++
276 :: i >= SLAB_SIZE -> break
277 od;
278 }
279}
280
281/* Synchronization point. Must consume all prior read and write tokens. All
282 * subsequent reads and writes depend on it. */
283inline smp_mb(i)
284{
285 atomic {
286 smp_wmb(i);
287 smp_rmb(i);
288 }
289}
290
291#ifdef REMOTE_BARRIERS
292
293bit reader_barrier[NR_READERS];
294
295/*
296 * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
297 * because they would add unexisting core synchronization and would therefore
298 * create an incomplete model.
299 * Therefore, we model the read-side memory barriers by completely disabling the
300 * memory barriers and their dependencies from the read-side. One at a time
301 * (different verification runs), we make a different instruction listen for
302 * signals.
303 */
304
305#define smp_mb_reader(i, j)
306
307/*
308 * Service 0, 1 or many barrier requests.
309 */
310inline smp_mb_recv(i, j)
311{
312 do
313 :: (reader_barrier[get_readerid()] == 1) ->
314 /*
315 * We choose to ignore cycles caused by writer busy-looping,
316 * waiting for the reader, sending barrier requests, and the
317 * reader always services them without continuing execution.
318 */
319progress_ignoring_mb1:
320 smp_mb(i);
321 reader_barrier[get_readerid()] = 0;
322 :: 1 ->
323 /*
324 * We choose to ignore writer's non-progress caused by the
325 * reader ignoring the writer's mb() requests.
326 */
327progress_ignoring_mb2:
328 break;
329 od;
330}
331
332#define PROGRESS_LABEL(progressid) progress_writer_progid_##progressid:
333
334#define smp_mb_send(i, j, progressid) \
335{ \
336 smp_mb(i); \
337 i = 0; \
338 do \
339 :: i < NR_READERS -> \
340 reader_barrier[i] = 1; \
341 /* \
342 * Busy-looping waiting for reader barrier handling is of little\
343 * interest, given the reader has the ability to totally ignore \
344 * barrier requests. \
345 */ \
346 do \
347 :: (reader_barrier[i] == 1) -> \
348PROGRESS_LABEL(progressid) \
349 skip; \
350 :: (reader_barrier[i] == 0) -> break; \
351 od; \
352 i++; \
353 :: i >= NR_READERS -> \
354 break \
355 od; \
356 smp_mb(i); \
357}
358
359#else
360
361#define smp_mb_send(i, j, progressid) smp_mb(i)
362#define smp_mb_reader smp_mb(i)
363#define smp_mb_recv(i, j)
364
365#endif
366
367/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
368DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
369/* Note ! currently only one reader */
370DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
371/* RCU data */
372DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
373
374/* RCU pointer */
375#if (SLAB_SIZE == 2)
376DECLARE_CACHED_VAR(bit, rcu_ptr);
377bit ptr_read_first[NR_READERS];
378bit ptr_read_second[NR_READERS];
379#else
380DECLARE_CACHED_VAR(byte, rcu_ptr);
381byte ptr_read_first[NR_READERS];
382byte ptr_read_second[NR_READERS];
383#endif
384
385bit data_read_first[NR_READERS];
386bit data_read_second[NR_READERS];
387
388bit init_done = 0;
389
390inline wait_init_done()
391{
392 do
393 :: init_done == 0 -> skip;
394 :: else -> break;
395 od;
396}
397
398inline ooo_mem(i)
399{
400 atomic {
401 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
402 i = 0;
403 do
404 :: i < NR_READERS ->
405 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
406 get_pid());
407 i++
408 :: i >= NR_READERS -> break
409 od;
410 RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
411 i = 0;
412 do
413 :: i < SLAB_SIZE ->
414 RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
415 i++
416 :: i >= SLAB_SIZE -> break
417 od;
418#ifdef HAVE_OOO_CACHE_READ
419 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
420 i = 0;
421 do
422 :: i < NR_READERS ->
423 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
424 get_pid());
425 i++
426 :: i >= NR_READERS -> break
427 od;
428 RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
429 i = 0;
430 do
431 :: i < SLAB_SIZE ->
432 RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
433 i++
434 :: i >= SLAB_SIZE -> break
435 od;
436#else
437 smp_rmb(i);
438#endif /* HAVE_OOO_CACHE_READ */
439 }
440}
441
442/*
443 * Bit encoding, urcu_reader :
444 */
445
446int _proc_urcu_reader;
447#define proc_urcu_reader _proc_urcu_reader
448
449/* Body of PROCEDURE_READ_LOCK */
450#define READ_PROD_A_READ (1 << 0)
451#define READ_PROD_B_IF_TRUE (1 << 1)
452#define READ_PROD_B_IF_FALSE (1 << 2)
453#define READ_PROD_C_IF_TRUE_READ (1 << 3)
454
455#define PROCEDURE_READ_LOCK(base, consumetoken, producetoken) \
456 :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, READ_PROD_A_READ << base) -> \
457 ooo_mem(i); \
458 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]); \
459 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base); \
460 :: CONSUME_TOKENS(proc_urcu_reader, \
461 READ_PROD_A_READ << base, /* RAW, pre-dominant */ \
462 (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) -> \
463 if \
464 :: (!(tmp & RCU_GP_CTR_NEST_MASK)) -> \
465 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base); \
466 :: else -> \
467 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
468 fi; \
469 /* IF TRUE */ \
470 :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base, \
471 READ_PROD_C_IF_TRUE_READ << base) -> \
472 ooo_mem(i); \
473 tmp2 = READ_CACHED_VAR(urcu_gp_ctr); \
474 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base); \
475 :: CONSUME_TOKENS(proc_urcu_reader, \
476 (READ_PROD_C_IF_TRUE_READ /* pre-dominant */ \
477 | READ_PROD_A_READ) << base, /* WAR */ \
478 producetoken) -> \
479 ooo_mem(i); \
480 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2); \
481 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
482 /* IF_MERGE implies \
483 * post-dominance */ \
484 /* ELSE */ \
485 :: CONSUME_TOKENS(proc_urcu_reader, \
486 (READ_PROD_B_IF_FALSE /* pre-dominant */ \
487 | READ_PROD_A_READ) << base, /* WAR */ \
488 producetoken) -> \
489 ooo_mem(i); \
490 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], \
491 tmp + 1); \
492 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
493 /* IF_MERGE implies \
494 * post-dominance */ \
495 /* ENDIF */ \
496 skip
497
498/* Body of PROCEDURE_READ_LOCK */
499#define READ_PROC_READ_UNLOCK (1 << 0)
500
501#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken) \
502 :: CONSUME_TOKENS(proc_urcu_reader, \
503 consumetoken, \
504 READ_PROC_READ_UNLOCK << base) -> \
505 ooo_mem(i); \
506 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]); \
507 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base); \
508 :: CONSUME_TOKENS(proc_urcu_reader, \
509 consumetoken \
510 | (READ_PROC_READ_UNLOCK << base), /* WAR */ \
511 producetoken) -> \
512 ooo_mem(i); \
513 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1); \
514 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
515 skip
516
517
518#define READ_PROD_NONE (1 << 0)
519
520/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
521#define READ_LOCK_BASE 1
522#define READ_LOCK_OUT (1 << 5)
523
524#define READ_PROC_FIRST_MB (1 << 6)
525
526/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
527#define READ_LOCK_NESTED_BASE 7
528#define READ_LOCK_NESTED_OUT (1 << 11)
529
530#define READ_PROC_READ_GEN (1 << 12)
531#define READ_PROC_ACCESS_GEN (1 << 13)
532
533/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
534#define READ_UNLOCK_NESTED_BASE 14
535#define READ_UNLOCK_NESTED_OUT (1 << 15)
536
537#define READ_PROC_SECOND_MB (1 << 16)
538
539/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
540#define READ_UNLOCK_BASE 17
541#define READ_UNLOCK_OUT (1 << 18)
542
543/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
544#define READ_LOCK_UNROLL_BASE 19
545#define READ_LOCK_OUT_UNROLL (1 << 23)
546
547#define READ_PROC_THIRD_MB (1 << 24)
548
549#define READ_PROC_READ_GEN_UNROLL (1 << 25)
550#define READ_PROC_ACCESS_GEN_UNROLL (1 << 26)
551
552#define READ_PROC_FOURTH_MB (1 << 27)
553
554/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
555#define READ_UNLOCK_UNROLL_BASE 28
556#define READ_UNLOCK_OUT_UNROLL (1 << 29)
557
558
559/* Should not include branches */
560#define READ_PROC_ALL_TOKENS (READ_PROD_NONE \
561 | READ_LOCK_OUT \
562 | READ_PROC_FIRST_MB \
563 | READ_LOCK_NESTED_OUT \
564 | READ_PROC_READ_GEN \
565 | READ_PROC_ACCESS_GEN \
566 | READ_UNLOCK_NESTED_OUT \
567 | READ_PROC_SECOND_MB \
568 | READ_UNLOCK_OUT \
569 | READ_LOCK_OUT_UNROLL \
570 | READ_PROC_THIRD_MB \
571 | READ_PROC_READ_GEN_UNROLL \
572 | READ_PROC_ACCESS_GEN_UNROLL \
573 | READ_PROC_FOURTH_MB \
574 | READ_UNLOCK_OUT_UNROLL)
575
576/* Must clear all tokens, including branches */
577#define READ_PROC_ALL_TOKENS_CLEAR ((1 << 30) - 1)
578
579inline urcu_one_read(i, j, nest_i, tmp, tmp2)
580{
581 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
582
583#ifdef NO_MB
584 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
585 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
586 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
587 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
588#endif
589
590#ifdef REMOTE_BARRIERS
591 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
592 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
593 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
594 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
595#endif
596
597 do
598 :: 1 ->
599
600#ifdef REMOTE_BARRIERS
601 /*
602 * Signal-based memory barrier will only execute when the
603 * execution order appears in program order.
604 */
605 if
606 :: 1 ->
607 atomic {
608 if
609 :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
610 READ_LOCK_OUT | READ_LOCK_NESTED_OUT
611 | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
612 | READ_UNLOCK_OUT
613 | READ_LOCK_OUT_UNROLL
614 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
615 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
616 READ_LOCK_NESTED_OUT
617 | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
618 | READ_UNLOCK_OUT
619 | READ_LOCK_OUT_UNROLL
620 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
621 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
622 READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
623 | READ_UNLOCK_OUT
624 | READ_LOCK_OUT_UNROLL
625 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
626 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
627 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
628 READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
629 | READ_UNLOCK_OUT
630 | READ_LOCK_OUT_UNROLL
631 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
632 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
633 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
634 READ_UNLOCK_NESTED_OUT
635 | READ_UNLOCK_OUT
636 | READ_LOCK_OUT_UNROLL
637 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
638 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
639 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
640 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
641 READ_UNLOCK_OUT
642 | READ_LOCK_OUT_UNROLL
643 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
644 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
645 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
646 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
647 | READ_UNLOCK_OUT,
648 READ_LOCK_OUT_UNROLL
649 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
650 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
651 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
652 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
653 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
654 READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
655 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
656 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
657 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
658 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
659 | READ_PROC_READ_GEN_UNROLL,
660 READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
661 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
662 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
663 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
664 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
665 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
666 READ_UNLOCK_OUT_UNROLL)
667 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
668 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
669 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
670 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
671 0) ->
672 goto non_atomic3;
673non_atomic3_end:
674 skip;
675 fi;
676 }
677 fi;
678
679 goto non_atomic3_skip;
680non_atomic3:
681 smp_mb_recv(i, j);
682 goto non_atomic3_end;
683non_atomic3_skip:
684
685#endif /* REMOTE_BARRIERS */
686
687 atomic {
688 if
689 PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, READ_LOCK_OUT);
690
691 :: CONSUME_TOKENS(proc_urcu_reader,
692 READ_LOCK_OUT, /* post-dominant */
693 READ_PROC_FIRST_MB) ->
694 smp_mb_reader(i, j);
695 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
696
697 PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB | READ_LOCK_OUT,
698 READ_LOCK_NESTED_OUT);
699
700 :: CONSUME_TOKENS(proc_urcu_reader,
701 READ_PROC_FIRST_MB, /* mb() orders reads */
702 READ_PROC_READ_GEN) ->
703 ooo_mem(i);
704 ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
705 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
706
707 :: CONSUME_TOKENS(proc_urcu_reader,
708 READ_PROC_FIRST_MB /* mb() orders reads */
709 | READ_PROC_READ_GEN,
710 READ_PROC_ACCESS_GEN) ->
711 /* smp_read_barrier_depends */
712 goto rmb1;
713rmb1_end:
714 data_read_first[get_readerid()] =
715 READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
716 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
717
718
719 /* Note : we remove the nested memory barrier from the read unlock
720 * model, given it is not usually needed. The implementation has the barrier
721 * because the performance impact added by a branch in the common case does not
722 * justify it.
723 */
724
725 PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
726 READ_PROC_FIRST_MB
727 | READ_LOCK_OUT
728 | READ_LOCK_NESTED_OUT,
729 READ_UNLOCK_NESTED_OUT);
730
731
732 :: CONSUME_TOKENS(proc_urcu_reader,
733 READ_PROC_ACCESS_GEN /* mb() orders reads */
734 | READ_PROC_READ_GEN /* mb() orders reads */
735 | READ_PROC_FIRST_MB /* mb() ordered */
736 | READ_LOCK_OUT /* post-dominant */
737 | READ_LOCK_NESTED_OUT /* post-dominant */
738 | READ_UNLOCK_NESTED_OUT,
739 READ_PROC_SECOND_MB) ->
740 smp_mb_reader(i, j);
741 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
742
743 PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
744 READ_PROC_SECOND_MB /* mb() orders reads */
745 | READ_PROC_FIRST_MB /* mb() orders reads */
746 | READ_LOCK_NESTED_OUT /* RAW */
747 | READ_LOCK_OUT /* RAW */
748 | READ_UNLOCK_NESTED_OUT, /* RAW */
749 READ_UNLOCK_OUT);
750
751 /* Unrolling loop : second consecutive lock */
752 /* reading urcu_active_readers, which have been written by
753 * READ_UNLOCK_OUT : RAW */
754 PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
755 READ_UNLOCK_OUT /* RAW */
756 | READ_PROC_SECOND_MB /* mb() orders reads */
757 | READ_PROC_FIRST_MB /* mb() orders reads */
758 | READ_LOCK_NESTED_OUT /* RAW */
759 | READ_LOCK_OUT /* RAW */
760 | READ_UNLOCK_NESTED_OUT, /* RAW */
761 READ_LOCK_OUT_UNROLL);
762
763
764 :: CONSUME_TOKENS(proc_urcu_reader,
765 READ_PROC_FIRST_MB /* mb() ordered */
766 | READ_PROC_SECOND_MB /* mb() ordered */
767 | READ_LOCK_OUT_UNROLL /* post-dominant */
768 | READ_LOCK_NESTED_OUT
769 | READ_LOCK_OUT
770 | READ_UNLOCK_NESTED_OUT
771 | READ_UNLOCK_OUT,
772 READ_PROC_THIRD_MB) ->
773 smp_mb_reader(i, j);
774 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
775
776 :: CONSUME_TOKENS(proc_urcu_reader,
777 READ_PROC_FIRST_MB /* mb() orders reads */
778 | READ_PROC_SECOND_MB /* mb() orders reads */
779 | READ_PROC_THIRD_MB, /* mb() orders reads */
780 READ_PROC_READ_GEN_UNROLL) ->
781 ooo_mem(i);
782 ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
783 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
784
785 :: CONSUME_TOKENS(proc_urcu_reader,
786 READ_PROC_READ_GEN_UNROLL
787 | READ_PROC_FIRST_MB /* mb() orders reads */
788 | READ_PROC_SECOND_MB /* mb() orders reads */
789 | READ_PROC_THIRD_MB, /* mb() orders reads */
790 READ_PROC_ACCESS_GEN_UNROLL) ->
791 /* smp_read_barrier_depends */
792 goto rmb2;
793rmb2_end:
794 data_read_second[get_readerid()] =
795 READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
796 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
797
798 :: CONSUME_TOKENS(proc_urcu_reader,
799 READ_PROC_READ_GEN_UNROLL /* mb() orders reads */
800 | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
801 | READ_PROC_FIRST_MB /* mb() ordered */
802 | READ_PROC_SECOND_MB /* mb() ordered */
803 | READ_PROC_THIRD_MB /* mb() ordered */
804 | READ_LOCK_OUT_UNROLL /* post-dominant */
805 | READ_LOCK_NESTED_OUT
806 | READ_LOCK_OUT
807 | READ_UNLOCK_NESTED_OUT
808 | READ_UNLOCK_OUT,
809 READ_PROC_FOURTH_MB) ->
810 smp_mb_reader(i, j);
811 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
812
813 PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
814 READ_PROC_FOURTH_MB /* mb() orders reads */
815 | READ_PROC_THIRD_MB /* mb() orders reads */
816 | READ_LOCK_OUT_UNROLL /* RAW */
817 | READ_PROC_SECOND_MB /* mb() orders reads */
818 | READ_PROC_FIRST_MB /* mb() orders reads */
819 | READ_LOCK_NESTED_OUT /* RAW */
820 | READ_LOCK_OUT /* RAW */
821 | READ_UNLOCK_NESTED_OUT, /* RAW */
822 READ_UNLOCK_OUT_UNROLL);
823 :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
824 CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
825 break;
826 fi;
827 }
828 od;
829 /*
830 * Dependency between consecutive loops :
831 * RAW dependency on
832 * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
833 * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
834 * between loops.
835 * _WHEN THE MB()s are in place_, they add full ordering of the
836 * generation pointer read wrt active reader count read, which ensures
837 * execution will not spill across loop execution.
838 * However, in the event mb()s are removed (execution using signal
839 * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
840 * to spill its execution on other loop's execution.
841 */
842 goto end;
843rmb1:
844#ifndef NO_RMB
845 smp_rmb(i);
846#else
847 ooo_mem(i);
848#endif
849 goto rmb1_end;
850rmb2:
851#ifndef NO_RMB
852 smp_rmb(i);
853#else
854 ooo_mem(i);
855#endif
856 goto rmb2_end;
857end:
858 skip;
859}
860
861
862
863active proctype urcu_reader()
864{
865 byte i, j, nest_i;
866 byte tmp, tmp2;
867
868 wait_init_done();
869
870 assert(get_pid() < NR_PROCS);
871
872end_reader:
873 do
874 :: 1 ->
875 /*
876 * We do not test reader's progress here, because we are mainly
877 * interested in writer's progress. The reader never blocks
878 * anyway. We have to test for reader/writer's progress
879 * separately, otherwise we could think the writer is doing
880 * progress when it's blocked by an always progressing reader.
881 */
882#ifdef READER_PROGRESS
883progress_reader:
884#endif
885 urcu_one_read(i, j, nest_i, tmp, tmp2);
886 od;
887}
888
889/* no name clash please */
890#undef proc_urcu_reader
891
892
893/* Model the RCU update process. */
894
895/*
896 * Bit encoding, urcu_writer :
897 * Currently only supports one reader.
898 */
899
900int _proc_urcu_writer;
901#define proc_urcu_writer _proc_urcu_writer
902
903#define WRITE_PROD_NONE (1 << 0)
904
905#define WRITE_DATA (1 << 1)
906#define WRITE_PROC_WMB (1 << 2)
907#define WRITE_XCHG_PTR (1 << 3)
908
909#define WRITE_PROC_FIRST_MB (1 << 4)
910
911/* first flip */
912#define WRITE_PROC_FIRST_READ_GP (1 << 5)
913#define WRITE_PROC_FIRST_WRITE_GP (1 << 6)
914#define WRITE_PROC_FIRST_WAIT (1 << 7)
915#define WRITE_PROC_FIRST_WAIT_LOOP (1 << 8)
916
917/* second flip */
918#define WRITE_PROC_SECOND_READ_GP (1 << 9)
919#define WRITE_PROC_SECOND_WRITE_GP (1 << 10)
920#define WRITE_PROC_SECOND_WAIT (1 << 11)
921#define WRITE_PROC_SECOND_WAIT_LOOP (1 << 12)
922
923#define WRITE_PROC_SECOND_MB (1 << 13)
924
925#define WRITE_FREE (1 << 14)
926
927#define WRITE_PROC_ALL_TOKENS (WRITE_PROD_NONE \
928 | WRITE_DATA \
929 | WRITE_PROC_WMB \
930 | WRITE_XCHG_PTR \
931 | WRITE_PROC_FIRST_MB \
932 | WRITE_PROC_FIRST_READ_GP \
933 | WRITE_PROC_FIRST_WRITE_GP \
934 | WRITE_PROC_FIRST_WAIT \
935 | WRITE_PROC_SECOND_READ_GP \
936 | WRITE_PROC_SECOND_WRITE_GP \
937 | WRITE_PROC_SECOND_WAIT \
938 | WRITE_PROC_SECOND_MB \
939 | WRITE_FREE)
940
941#define WRITE_PROC_ALL_TOKENS_CLEAR ((1 << 15) - 1)
942
943/*
944 * Mutexes are implied around writer execution. A single writer at a time.
945 */
946active proctype urcu_writer()
947{
948 byte i, j;
949 byte tmp, tmp2, tmpa;
950 byte cur_data = 0, old_data, loop_nr = 0;
951 byte cur_gp_val = 0; /*
952 * Keep a local trace of the current parity so
953 * we don't add non-existing dependencies on the global
954 * GP update. Needed to test single flip case.
955 */
956
957 wait_init_done();
958
959 assert(get_pid() < NR_PROCS);
960
961 do
962 :: (loop_nr < 3) ->
963#ifdef WRITER_PROGRESS
964progress_writer1:
965#endif
966 loop_nr = loop_nr + 1;
967
968 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
969
970#ifdef NO_WMB
971 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
972#endif
973
974#ifdef NO_MB
975 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
976 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
977#endif
978
979#ifdef SINGLE_FLIP
980 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
981 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
982 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
983 /* For single flip, we need to know the current parity */
984 cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
985#endif
986
987 do :: 1 ->
988 atomic {
989 if
990
991 :: CONSUME_TOKENS(proc_urcu_writer,
992 WRITE_PROD_NONE,
993 WRITE_DATA) ->
994 ooo_mem(i);
995 cur_data = (cur_data + 1) % SLAB_SIZE;
996 WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
997 PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
998
999
1000 :: CONSUME_TOKENS(proc_urcu_writer,
1001 WRITE_DATA,
1002 WRITE_PROC_WMB) ->
1003 smp_wmb(i);
1004 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
1005
1006 :: CONSUME_TOKENS(proc_urcu_writer,
1007 WRITE_PROC_WMB,
1008 WRITE_XCHG_PTR) ->
1009 /* rcu_xchg_pointer() */
1010 atomic {
1011 old_data = READ_CACHED_VAR(rcu_ptr);
1012 WRITE_CACHED_VAR(rcu_ptr, cur_data);
1013 }
1014 PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
1015
1016 :: CONSUME_TOKENS(proc_urcu_writer,
1017 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
1018 WRITE_PROC_FIRST_MB) ->
1019 goto smp_mb_send1;
1020smp_mb_send1_end:
1021 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
1022
1023 /* first flip */
1024 :: CONSUME_TOKENS(proc_urcu_writer,
1025 WRITE_PROC_FIRST_MB,
1026 WRITE_PROC_FIRST_READ_GP) ->
1027 tmpa = READ_CACHED_VAR(urcu_gp_ctr);
1028 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
1029 :: CONSUME_TOKENS(proc_urcu_writer,
1030 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
1031 | WRITE_PROC_FIRST_READ_GP,
1032 WRITE_PROC_FIRST_WRITE_GP) ->
1033 ooo_mem(i);
1034 WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
1035 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
1036
1037 :: CONSUME_TOKENS(proc_urcu_writer,
1038 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1039 WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1040 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
1041 ooo_mem(i);
1042 /* ONLY WAITING FOR READER 0 */
1043 tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
1044#ifndef SINGLE_FLIP
1045 /* In normal execution, we are always starting by
1046 * waiting for the even parity.
1047 */
1048 cur_gp_val = RCU_GP_CTR_BIT;
1049#endif
1050 if
1051 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
1052 && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
1053 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
1054 :: else ->
1055 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
1056 fi;
1057
1058 :: CONSUME_TOKENS(proc_urcu_writer,
1059 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1060 WRITE_PROC_FIRST_WRITE_GP
1061 | WRITE_PROC_FIRST_READ_GP
1062 | WRITE_PROC_FIRST_WAIT_LOOP
1063 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
1064 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1065 0) ->
1066#ifndef GEN_ERROR_WRITER_PROGRESS
1067 goto smp_mb_send2;
1068smp_mb_send2_end:
1069#else
1070 ooo_mem(i);
1071#endif
1072 /* This instruction loops to WRITE_PROC_FIRST_WAIT */
1073 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
1074
1075 /* second flip */
1076 :: CONSUME_TOKENS(proc_urcu_writer,
1077 WRITE_PROC_FIRST_WAIT /* Control dependency : need to branch out of
1078 * the loop to execute the next flip (CHECK) */
1079 | WRITE_PROC_FIRST_WRITE_GP
1080 | WRITE_PROC_FIRST_READ_GP
1081 | WRITE_PROC_FIRST_MB,
1082 WRITE_PROC_SECOND_READ_GP) ->
1083 ooo_mem(i);
1084 tmpa = READ_CACHED_VAR(urcu_gp_ctr);
1085 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
1086 :: CONSUME_TOKENS(proc_urcu_writer,
1087 WRITE_PROC_FIRST_MB
1088 | WRITE_PROC_WMB
1089 | WRITE_PROC_FIRST_READ_GP
1090 | WRITE_PROC_FIRST_WRITE_GP
1091 | WRITE_PROC_SECOND_READ_GP,
1092 WRITE_PROC_SECOND_WRITE_GP) ->
1093 ooo_mem(i);
1094 WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
1095 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
1096
1097 :: CONSUME_TOKENS(proc_urcu_writer,
1098 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1099 WRITE_PROC_FIRST_WAIT
1100 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1101 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
1102 ooo_mem(i);
1103 /* ONLY WAITING FOR READER 0 */
1104 tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
1105 if
1106 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
1107 && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
1108 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
1109 :: else ->
1110 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
1111 fi;
1112
1113 :: CONSUME_TOKENS(proc_urcu_writer,
1114 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1115 WRITE_PROC_SECOND_WRITE_GP
1116 | WRITE_PROC_FIRST_WRITE_GP
1117 | WRITE_PROC_SECOND_READ_GP
1118 | WRITE_PROC_FIRST_READ_GP
1119 | WRITE_PROC_SECOND_WAIT_LOOP
1120 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
1121 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1122 0) ->
1123#ifndef GEN_ERROR_WRITER_PROGRESS
1124 goto smp_mb_send3;
1125smp_mb_send3_end:
1126#else
1127 ooo_mem(i);
1128#endif
1129 /* This instruction loops to WRITE_PROC_SECOND_WAIT */
1130 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
1131
1132
1133 :: CONSUME_TOKENS(proc_urcu_writer,
1134 WRITE_PROC_FIRST_WAIT
1135 | WRITE_PROC_SECOND_WAIT
1136 | WRITE_PROC_FIRST_READ_GP
1137 | WRITE_PROC_SECOND_READ_GP
1138 | WRITE_PROC_FIRST_WRITE_GP
1139 | WRITE_PROC_SECOND_WRITE_GP
1140 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
1141 | WRITE_PROC_FIRST_MB,
1142 WRITE_PROC_SECOND_MB) ->
1143 goto smp_mb_send4;
1144smp_mb_send4_end:
1145 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
1146
1147 :: CONSUME_TOKENS(proc_urcu_writer,
1148 WRITE_XCHG_PTR
1149 | WRITE_PROC_FIRST_WAIT
1150 | WRITE_PROC_SECOND_WAIT
1151 | WRITE_PROC_WMB /* No dependency on
1152 * WRITE_DATA because we
1153 * write to a
1154 * different location. */
1155 | WRITE_PROC_SECOND_MB
1156 | WRITE_PROC_FIRST_MB,
1157 WRITE_FREE) ->
1158 WRITE_CACHED_VAR(rcu_data[old_data], POISON);
1159 PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
1160
1161 :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
1162 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
1163 break;
1164 fi;
1165 }
1166 od;
1167 /*
1168 * Note : Promela model adds implicit serialization of the
1169 * WRITE_FREE instruction. Normally, it would be permitted to
1170 * spill on the next loop execution. Given the validation we do
1171 * checks for the data entry read to be poisoned, it's ok if
1172 * we do not check "late arriving" memory poisoning.
1173 */
1174 :: else -> break;
1175 od;
1176 /*
1177 * Given the reader loops infinitely, let the writer also busy-loop
1178 * with progress here so, with weak fairness, we can test the
1179 * writer's progress.
1180 */
1181end_writer:
1182 do
1183 :: 1 ->
1184#ifdef WRITER_PROGRESS
1185progress_writer2:
1186#endif
1187#ifdef READER_PROGRESS
1188 /*
1189 * Make sure we don't block the reader's progress.
1190 */
1191 smp_mb_send(i, j, 5);
1192#endif
1193 skip;
1194 od;
1195
1196 /* Non-atomic parts of the loop */
1197 goto end;
1198smp_mb_send1:
1199 smp_mb_send(i, j, 1);
1200 goto smp_mb_send1_end;
1201#ifndef GEN_ERROR_WRITER_PROGRESS
1202smp_mb_send2:
1203 smp_mb_send(i, j, 2);
1204 goto smp_mb_send2_end;
1205smp_mb_send3:
1206 smp_mb_send(i, j, 3);
1207 goto smp_mb_send3_end;
1208#endif
1209smp_mb_send4:
1210 smp_mb_send(i, j, 4);
1211 goto smp_mb_send4_end;
1212end:
1213 skip;
1214}
1215
1216/* no name clash please */
1217#undef proc_urcu_writer
1218
1219
1220/* Leave after the readers and writers so the pid count is ok. */
1221init {
1222 byte i, j;
1223
1224 atomic {
1225 INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
1226 INIT_CACHED_VAR(rcu_ptr, 0, j);
1227
1228 i = 0;
1229 do
1230 :: i < NR_READERS ->
1231 INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
1232 ptr_read_first[i] = 1;
1233 ptr_read_second[i] = 1;
1234 data_read_first[i] = WINE;
1235 data_read_second[i] = WINE;
1236 i++;
1237 :: i >= NR_READERS -> break
1238 od;
1239 INIT_CACHED_VAR(rcu_data[0], WINE, j);
1240 i = 1;
1241 do
1242 :: i < SLAB_SIZE ->
1243 INIT_CACHED_VAR(rcu_data[i], POISON, j);
1244 i++
1245 :: i >= SLAB_SIZE -> break
1246 od;
1247
1248 init_done = 1;
1249 }
1250}
This page took 0.068746 seconds and 4 git commands to generate.