Add phase 3 : scalability run
[urcu.git] / formal-model / results / urcu-controldataflow-no-ipi / result-ipi-urcu_free / urcu_free.spin.input
CommitLineData
dbf69285
MD
1
2// Poison value for freed memory
3#define POISON 1
4// Memory with correct data
5#define WINE 0
6#define SLAB_SIZE 2
7
8#define read_poison (data_read_first[0] == POISON || data_read_second[0] == POISON)
9
10#define RCU_GP_CTR_BIT (1 << 7)
11#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
12
13//disabled
14#define REMOTE_BARRIERS
15/*
16 * mem.spin: Promela code to validate memory barriers with OOO memory
17 * and out-of-order instruction scheduling.
18 *
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License as published by
21 * the Free Software Foundation; either version 2 of the License, or
22 * (at your option) any later version.
23 *
24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
28 *
29 * You should have received a copy of the GNU General Public License
30 * along with this program; if not, write to the Free Software
31 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
32 *
33 * Copyright (c) 2009 Mathieu Desnoyers
34 */
35
36/* Promela validation variables. */
37
38/* specific defines "included" here */
39/* DEFINES file "included" here */
40
41#define NR_READERS 1
42#define NR_WRITERS 1
43
44#define NR_PROCS 2
45
46#define get_pid() (_pid)
47
48#define get_readerid() (get_pid())
49
50/*
51 * Produced process control and data flow. Updated after each instruction to
52 * show which variables are ready. Using one-hot bit encoding per variable to
53 * save state space. Used as triggers to execute the instructions having those
54 * variables as input. Leaving bits active to inhibit instruction execution.
55 * Scheme used to make instruction disabling and automatic dependency fall-back
56 * automatic.
57 */
58
59#define CONSUME_TOKENS(state, bits, notbits) \
60 ((!(state & (notbits))) && (state & (bits)) == (bits))
61
62#define PRODUCE_TOKENS(state, bits) \
63 state = state | (bits);
64
65#define CLEAR_TOKENS(state, bits) \
66 state = state & ~(bits)
67
68/*
69 * Types of dependency :
70 *
71 * Data dependency
72 *
73 * - True dependency, Read-after-Write (RAW)
74 *
75 * This type of dependency happens when a statement depends on the result of a
76 * previous statement. This applies to any statement which needs to read a
77 * variable written by a preceding statement.
78 *
79 * - False dependency, Write-after-Read (WAR)
80 *
81 * Typically, variable renaming can ensure that this dependency goes away.
82 * However, if the statements must read and then write from/to the same variable
83 * in the OOO memory model, renaming may be impossible, and therefore this
84 * causes a WAR dependency.
85 *
86 * - Output dependency, Write-after-Write (WAW)
87 *
88 * Two writes to the same variable in subsequent statements. Variable renaming
89 * can ensure this is not needed, but can be required when writing multiple
90 * times to the same OOO mem model variable.
91 *
92 * Control dependency
93 *
94 * Execution of a given instruction depends on a previous instruction evaluating
95 * in a way that allows its execution. E.g. : branches.
96 *
97 * Useful considerations for joining dependencies after branch
98 *
99 * - Pre-dominance
100 *
101 * "We say box i dominates box j if every path (leading from input to output
102 * through the diagram) which passes through box j must also pass through box
103 * i. Thus box i dominates box j if box j is subordinate to box i in the
104 * program."
105 *
106 * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
107 * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
108 *
109 * - Post-dominance
110 *
111 * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
112 * output exchanged. Therefore, i post-dominating j ensures that every path
113 * passing by j will pass by i before reaching the output.
114 *
115 * Other considerations
116 *
117 * Note about "volatile" keyword dependency : The compiler will order volatile
118 * accesses so they appear in the right order on a given CPU. They can be
119 * reordered by the CPU instruction scheduling. This therefore cannot be
120 * considered as a depencency.
121 *
122 * References :
123 *
124 * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
125 * Kaufmann. ISBN 1-55860-698-X.
126 * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
127 * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
128 * 1-55860-286-0.
129 * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
130 * Morgan Kaufmann. ISBN 1-55860-320-4.
131 */
132
133/*
134 * Note about loops and nested calls
135 *
136 * To keep this model simple, loops expressed in the framework will behave as if
137 * there was a core synchronizing instruction between loops. To see the effect
138 * of loop unrolling, manually unrolling loops is required. Note that if loops
139 * end or start with a core synchronizing instruction, the model is appropriate.
140 * Nested calls are not supported.
141 */
142
143/*
144 * Each process have its own data in cache. Caches are randomly updated.
145 * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
146 * both.
147 */
148
149typedef per_proc_byte {
150 byte val[NR_PROCS];
151};
152
153typedef per_proc_bit {
154 bit val[NR_PROCS];
155};
156
157/* Bitfield has a maximum of 8 procs */
158typedef per_proc_bitfield {
159 byte bitfield;
160};
161
162#define DECLARE_CACHED_VAR(type, x) \
163 type mem_##x; \
164 per_proc_##type cached_##x; \
165 per_proc_bitfield cache_dirty_##x;
166
167#define INIT_CACHED_VAR(x, v, j) \
168 mem_##x = v; \
169 cache_dirty_##x.bitfield = 0; \
170 j = 0; \
171 do \
172 :: j < NR_PROCS -> \
173 cached_##x.val[j] = v; \
174 j++ \
175 :: j >= NR_PROCS -> break \
176 od;
177
178#define IS_CACHE_DIRTY(x, id) (cache_dirty_##x.bitfield & (1 << id))
179
180#define READ_CACHED_VAR(x) (cached_##x.val[get_pid()])
181
182#define WRITE_CACHED_VAR(x, v) \
183 atomic { \
184 cached_##x.val[get_pid()] = v; \
185 cache_dirty_##x.bitfield = \
186 cache_dirty_##x.bitfield | (1 << get_pid()); \
187 }
188
189#define CACHE_WRITE_TO_MEM(x, id) \
190 if \
191 :: IS_CACHE_DIRTY(x, id) -> \
192 mem_##x = cached_##x.val[id]; \
193 cache_dirty_##x.bitfield = \
194 cache_dirty_##x.bitfield & (~(1 << id)); \
195 :: else -> \
196 skip \
197 fi;
198
199#define CACHE_READ_FROM_MEM(x, id) \
200 if \
201 :: !IS_CACHE_DIRTY(x, id) -> \
202 cached_##x.val[id] = mem_##x;\
203 :: else -> \
204 skip \
205 fi;
206
207/*
208 * May update other caches if cache is dirty, or not.
209 */
210#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
211 if \
212 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
213 :: 1 -> skip \
214 fi;
215
216#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
217 if \
218 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
219 :: 1 -> skip \
220 fi;
221
222/* Must consume all prior read tokens. All subsequent reads depend on it. */
223inline smp_rmb(i, j)
224{
225 atomic {
226 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
227 i = 0;
228 do
229 :: i < NR_READERS ->
230 CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
231 i++
232 :: i >= NR_READERS -> break
233 od;
234 CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
235 i = 0;
236 do
237 :: i < SLAB_SIZE ->
238 CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
239 i++
240 :: i >= SLAB_SIZE -> break
241 od;
242 }
243}
244
245/* Must consume all prior write tokens. All subsequent writes depend on it. */
246inline smp_wmb(i, j)
247{
248 atomic {
249 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
250 i = 0;
251 do
252 :: i < NR_READERS ->
253 CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
254 i++
255 :: i >= NR_READERS -> break
256 od;
257 CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
258 i = 0;
259 do
260 :: i < SLAB_SIZE ->
261 CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
262 i++
263 :: i >= SLAB_SIZE -> break
264 od;
265 }
266}
267
268/* Synchronization point. Must consume all prior read and write tokens. All
269 * subsequent reads and writes depend on it. */
270inline smp_mb(i, j)
271{
272 atomic {
273 smp_wmb(i, j);
274 smp_rmb(i, j);
275 }
276}
277
278#ifdef REMOTE_BARRIERS
279
280bit reader_barrier[NR_READERS];
281
282/*
283 * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
284 * because they would add unexisting core synchronization and would therefore
285 * create an incomplete model.
286 * Therefore, we model the read-side memory barriers by completely disabling the
287 * memory barriers and their dependencies from the read-side. One at a time
288 * (different verification runs), we make a different instruction listen for
289 * signals.
290 */
291
292#define smp_mb_reader(i, j)
293
294/*
295 * Service 0, 1 or many barrier requests.
296 */
297inline smp_mb_recv(i, j)
298{
299 do
300 :: (reader_barrier[get_readerid()] == 1) ->
301 smp_mb(i, j);
302 reader_barrier[get_readerid()] = 0;
303 :: 1 ->
304 /* We choose to ignore writer's non-progress caused from the
305 * reader ignoring the writer's mb() requests */
306#ifdef WRITER_PROGRESS
307progress_writer_from_reader:
308#endif
309 break;
310 od;
311}
312
313#ifdef WRITER_PROGRESS
314//#define PROGRESS_LABEL(progressid) progress_writer_progid_##progressid:
315#define PROGRESS_LABEL(progressid)
316#else
317#define PROGRESS_LABEL(progressid)
318#endif
319
320#define smp_mb_send(i, j, progressid) \
321{ \
322 smp_mb(i, j); \
323 i = 0; \
324 do \
325 :: i < NR_READERS -> \
326 reader_barrier[i] = 1; \
327 /* \
328 * Busy-looping waiting for reader barrier handling is of little\
329 * interest, given the reader has the ability to totally ignore \
330 * barrier requests. \
331 */ \
332PROGRESS_LABEL(progressid) \
333 do \
334 :: (reader_barrier[i] == 1) -> skip; \
335 :: (reader_barrier[i] == 0) -> break; \
336 od; \
337 i++; \
338 :: i >= NR_READERS -> \
339 break \
340 od; \
341 smp_mb(i, j); \
342}
343
344#else
345
346#define smp_mb_send(i, j, progressid) smp_mb(i, j)
347#define smp_mb_reader smp_mb
348#define smp_mb_recv(i, j)
349
350#endif
351
352/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
353DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
354/* Note ! currently only one reader */
355DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
356/* RCU data */
357DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
358
359/* RCU pointer */
360#if (SLAB_SIZE == 2)
361DECLARE_CACHED_VAR(bit, rcu_ptr);
362bit ptr_read_first[NR_READERS];
363bit ptr_read_second[NR_READERS];
364#else
365DECLARE_CACHED_VAR(byte, rcu_ptr);
366byte ptr_read_first[NR_READERS];
367byte ptr_read_second[NR_READERS];
368#endif
369
370bit data_read_first[NR_READERS];
371bit data_read_second[NR_READERS];
372
373bit init_done = 0;
374
375inline wait_init_done()
376{
377 do
378 :: init_done == 0 -> skip;
379 :: else -> break;
380 od;
381}
382
383inline ooo_mem(i)
384{
385 atomic {
386 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
387 i = 0;
388 do
389 :: i < NR_READERS ->
390 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
391 get_pid());
392 i++
393 :: i >= NR_READERS -> break
394 od;
395 RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
396 i = 0;
397 do
398 :: i < SLAB_SIZE ->
399 RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
400 i++
401 :: i >= SLAB_SIZE -> break
402 od;
403 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
404 i = 0;
405 do
406 :: i < NR_READERS ->
407 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
408 get_pid());
409 i++
410 :: i >= NR_READERS -> break
411 od;
412 RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
413 i = 0;
414 do
415 :: i < SLAB_SIZE ->
416 RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
417 i++
418 :: i >= SLAB_SIZE -> break
419 od;
420 }
421}
422
423/*
424 * Bit encoding, urcu_reader :
425 */
426
427int _proc_urcu_reader;
428#define proc_urcu_reader _proc_urcu_reader
429
430/* Body of PROCEDURE_READ_LOCK */
431#define READ_PROD_A_READ (1 << 0)
432#define READ_PROD_B_IF_TRUE (1 << 1)
433#define READ_PROD_B_IF_FALSE (1 << 2)
434#define READ_PROD_C_IF_TRUE_READ (1 << 3)
435
436#define PROCEDURE_READ_LOCK(base, consumetoken, producetoken) \
437 :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, READ_PROD_A_READ << base) -> \
438 ooo_mem(i); \
439 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]); \
440 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base); \
441 :: CONSUME_TOKENS(proc_urcu_reader, \
442 READ_PROD_A_READ << base, /* RAW, pre-dominant */ \
443 (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) -> \
444 if \
445 :: (!(tmp & RCU_GP_CTR_NEST_MASK)) -> \
446 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base); \
447 :: else -> \
448 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
449 fi; \
450 /* IF TRUE */ \
451 :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base, \
452 READ_PROD_C_IF_TRUE_READ << base) -> \
453 ooo_mem(i); \
454 tmp2 = READ_CACHED_VAR(urcu_gp_ctr); \
455 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base); \
456 :: CONSUME_TOKENS(proc_urcu_reader, \
457 (READ_PROD_C_IF_TRUE_READ /* pre-dominant */ \
458 | READ_PROD_A_READ) << base, /* WAR */ \
459 producetoken) -> \
460 ooo_mem(i); \
461 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2); \
462 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
463 /* IF_MERGE implies \
464 * post-dominance */ \
465 /* ELSE */ \
466 :: CONSUME_TOKENS(proc_urcu_reader, \
467 (READ_PROD_B_IF_FALSE /* pre-dominant */ \
468 | READ_PROD_A_READ) << base, /* WAR */ \
469 producetoken) -> \
470 ooo_mem(i); \
471 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], \
472 tmp + 1); \
473 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
474 /* IF_MERGE implies \
475 * post-dominance */ \
476 /* ENDIF */ \
477 skip
478
479/* Body of PROCEDURE_READ_LOCK */
480#define READ_PROC_READ_UNLOCK (1 << 0)
481
482#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken) \
483 :: CONSUME_TOKENS(proc_urcu_reader, \
484 consumetoken, \
485 READ_PROC_READ_UNLOCK << base) -> \
486 ooo_mem(i); \
487 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]); \
488 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base); \
489 :: CONSUME_TOKENS(proc_urcu_reader, \
490 consumetoken \
491 | (READ_PROC_READ_UNLOCK << base), /* WAR */ \
492 producetoken) -> \
493 ooo_mem(i); \
494 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1); \
495 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
496 skip
497
498
499#define READ_PROD_NONE (1 << 0)
500
501/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
502#define READ_LOCK_BASE 1
503#define READ_LOCK_OUT (1 << 5)
504
505#define READ_PROC_FIRST_MB (1 << 6)
506
507/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
508#define READ_LOCK_NESTED_BASE 7
509#define READ_LOCK_NESTED_OUT (1 << 11)
510
511#define READ_PROC_READ_GEN (1 << 12)
512#define READ_PROC_ACCESS_GEN (1 << 13)
513
514/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
515#define READ_UNLOCK_NESTED_BASE 14
516#define READ_UNLOCK_NESTED_OUT (1 << 15)
517
518#define READ_PROC_SECOND_MB (1 << 16)
519
520/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
521#define READ_UNLOCK_BASE 17
522#define READ_UNLOCK_OUT (1 << 18)
523
524/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
525#define READ_LOCK_UNROLL_BASE 19
526#define READ_LOCK_OUT_UNROLL (1 << 23)
527
528#define READ_PROC_THIRD_MB (1 << 24)
529
530#define READ_PROC_READ_GEN_UNROLL (1 << 25)
531#define READ_PROC_ACCESS_GEN_UNROLL (1 << 26)
532
533#define READ_PROC_FOURTH_MB (1 << 27)
534
535/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
536#define READ_UNLOCK_UNROLL_BASE 28
537#define READ_UNLOCK_OUT_UNROLL (1 << 29)
538
539
540/* Should not include branches */
541#define READ_PROC_ALL_TOKENS (READ_PROD_NONE \
542 | READ_LOCK_OUT \
543 | READ_PROC_FIRST_MB \
544 | READ_LOCK_NESTED_OUT \
545 | READ_PROC_READ_GEN \
546 | READ_PROC_ACCESS_GEN \
547 | READ_UNLOCK_NESTED_OUT \
548 | READ_PROC_SECOND_MB \
549 | READ_UNLOCK_OUT \
550 | READ_LOCK_OUT_UNROLL \
551 | READ_PROC_THIRD_MB \
552 | READ_PROC_READ_GEN_UNROLL \
553 | READ_PROC_ACCESS_GEN_UNROLL \
554 | READ_PROC_FOURTH_MB \
555 | READ_UNLOCK_OUT_UNROLL)
556
557/* Must clear all tokens, including branches */
558#define READ_PROC_ALL_TOKENS_CLEAR ((1 << 30) - 1)
559
560inline urcu_one_read(i, j, nest_i, tmp, tmp2)
561{
562 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
563
564#ifdef NO_MB
565 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
566 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
567 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
568 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
569#endif
570
571#ifdef REMOTE_BARRIERS
572 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
573 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
574 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
575 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
576#endif
577
578 do
579 :: 1 ->
580
581#ifdef REMOTE_BARRIERS
582 /*
583 * Signal-based memory barrier will only execute when the
584 * execution order appears in program order.
585 */
586 if
587 :: 1 ->
588 atomic {
589 if
590 :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
591 READ_LOCK_OUT | READ_LOCK_NESTED_OUT
592 | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
593 | READ_UNLOCK_OUT
594 | READ_LOCK_OUT_UNROLL
595 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
596 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
597 READ_LOCK_NESTED_OUT
598 | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
599 | READ_UNLOCK_OUT
600 | READ_LOCK_OUT_UNROLL
601 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
602 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
603 READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
604 | READ_UNLOCK_OUT
605 | READ_LOCK_OUT_UNROLL
606 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
607 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
608 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
609 READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
610 | READ_UNLOCK_OUT
611 | READ_LOCK_OUT_UNROLL
612 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
613 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
614 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
615 READ_UNLOCK_NESTED_OUT
616 | READ_UNLOCK_OUT
617 | READ_LOCK_OUT_UNROLL
618 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
619 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
620 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
621 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
622 READ_UNLOCK_OUT
623 | READ_LOCK_OUT_UNROLL
624 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
625 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
626 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
627 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
628 | READ_UNLOCK_OUT,
629 READ_LOCK_OUT_UNROLL
630 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
631 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
632 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
633 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
634 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
635 READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
636 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
637 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
638 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
639 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
640 | READ_PROC_READ_GEN_UNROLL,
641 READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
642 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
643 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
644 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
645 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
646 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
647 READ_UNLOCK_OUT_UNROLL)
648 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
649 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
650 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
651 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
652 0) ->
653 goto non_atomic3;
654non_atomic3_end:
655 skip;
656 fi;
657 }
658 :: 1 -> skip;
659 fi;
660
661 goto non_atomic3_skip;
662non_atomic3:
663 smp_mb_recv(i, j);
664 goto non_atomic3_end;
665non_atomic3_skip:
666
667#endif /* REMOTE_BARRIERS */
668
669 atomic {
670 if
671 PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, READ_LOCK_OUT);
672
673 :: CONSUME_TOKENS(proc_urcu_reader,
674 READ_LOCK_OUT, /* post-dominant */
675 READ_PROC_FIRST_MB) ->
676 smp_mb_reader(i, j);
677 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
678
679 PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB | READ_LOCK_OUT,
680 READ_LOCK_NESTED_OUT);
681
682 :: CONSUME_TOKENS(proc_urcu_reader,
683 READ_PROC_FIRST_MB, /* mb() orders reads */
684 READ_PROC_READ_GEN) ->
685 ooo_mem(i);
686 ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
687 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
688
689 :: CONSUME_TOKENS(proc_urcu_reader,
690 READ_PROC_FIRST_MB /* mb() orders reads */
691 | READ_PROC_READ_GEN,
692 READ_PROC_ACCESS_GEN) ->
693 /* smp_read_barrier_depends */
694 goto rmb1;
695rmb1_end:
696 data_read_first[get_readerid()] =
697 READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
698 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
699
700
701 /* Note : we remove the nested memory barrier from the read unlock
702 * model, given it is not usually needed. The implementation has the barrier
703 * because the performance impact added by a branch in the common case does not
704 * justify it.
705 */
706
707 PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
708 READ_PROC_FIRST_MB
709 | READ_LOCK_OUT
710 | READ_LOCK_NESTED_OUT,
711 READ_UNLOCK_NESTED_OUT);
712
713
714 :: CONSUME_TOKENS(proc_urcu_reader,
715 READ_PROC_ACCESS_GEN /* mb() orders reads */
716 | READ_PROC_READ_GEN /* mb() orders reads */
717 | READ_PROC_FIRST_MB /* mb() ordered */
718 | READ_LOCK_OUT /* post-dominant */
719 | READ_LOCK_NESTED_OUT /* post-dominant */
720 | READ_UNLOCK_NESTED_OUT,
721 READ_PROC_SECOND_MB) ->
722 smp_mb_reader(i, j);
723 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
724
725 PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
726 READ_PROC_SECOND_MB /* mb() orders reads */
727 | READ_PROC_FIRST_MB /* mb() orders reads */
728 | READ_LOCK_NESTED_OUT /* RAW */
729 | READ_LOCK_OUT /* RAW */
730 | READ_UNLOCK_NESTED_OUT, /* RAW */
731 READ_UNLOCK_OUT);
732
733 /* Unrolling loop : second consecutive lock */
734 /* reading urcu_active_readers, which have been written by
735 * READ_UNLOCK_OUT : RAW */
736 PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
737 READ_UNLOCK_OUT /* RAW */
738 | READ_PROC_SECOND_MB /* mb() orders reads */
739 | READ_PROC_FIRST_MB /* mb() orders reads */
740 | READ_LOCK_NESTED_OUT /* RAW */
741 | READ_LOCK_OUT /* RAW */
742 | READ_UNLOCK_NESTED_OUT, /* RAW */
743 READ_LOCK_OUT_UNROLL);
744
745
746 :: CONSUME_TOKENS(proc_urcu_reader,
747 READ_PROC_FIRST_MB /* mb() ordered */
748 | READ_PROC_SECOND_MB /* mb() ordered */
749 | READ_LOCK_OUT_UNROLL /* post-dominant */
750 | READ_LOCK_NESTED_OUT
751 | READ_LOCK_OUT
752 | READ_UNLOCK_NESTED_OUT
753 | READ_UNLOCK_OUT,
754 READ_PROC_THIRD_MB) ->
755 smp_mb_reader(i, j);
756 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
757
758 :: CONSUME_TOKENS(proc_urcu_reader,
759 READ_PROC_FIRST_MB /* mb() orders reads */
760 | READ_PROC_SECOND_MB /* mb() orders reads */
761 | READ_PROC_THIRD_MB, /* mb() orders reads */
762 READ_PROC_READ_GEN_UNROLL) ->
763 ooo_mem(i);
764 ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
765 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
766
767 :: CONSUME_TOKENS(proc_urcu_reader,
768 READ_PROC_READ_GEN_UNROLL
769 | READ_PROC_FIRST_MB /* mb() orders reads */
770 | READ_PROC_SECOND_MB /* mb() orders reads */
771 | READ_PROC_THIRD_MB, /* mb() orders reads */
772 READ_PROC_ACCESS_GEN_UNROLL) ->
773 /* smp_read_barrier_depends */
774 goto rmb2;
775rmb2_end:
776 data_read_second[get_readerid()] =
777 READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
778 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
779
780 :: CONSUME_TOKENS(proc_urcu_reader,
781 READ_PROC_READ_GEN_UNROLL /* mb() orders reads */
782 | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
783 | READ_PROC_FIRST_MB /* mb() ordered */
784 | READ_PROC_SECOND_MB /* mb() ordered */
785 | READ_PROC_THIRD_MB /* mb() ordered */
786 | READ_LOCK_OUT_UNROLL /* post-dominant */
787 | READ_LOCK_NESTED_OUT
788 | READ_LOCK_OUT
789 | READ_UNLOCK_NESTED_OUT
790 | READ_UNLOCK_OUT,
791 READ_PROC_FOURTH_MB) ->
792 smp_mb_reader(i, j);
793 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
794
795 PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
796 READ_PROC_FOURTH_MB /* mb() orders reads */
797 | READ_PROC_THIRD_MB /* mb() orders reads */
798 | READ_LOCK_OUT_UNROLL /* RAW */
799 | READ_PROC_SECOND_MB /* mb() orders reads */
800 | READ_PROC_FIRST_MB /* mb() orders reads */
801 | READ_LOCK_NESTED_OUT /* RAW */
802 | READ_LOCK_OUT /* RAW */
803 | READ_UNLOCK_NESTED_OUT, /* RAW */
804 READ_UNLOCK_OUT_UNROLL);
805 :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
806 CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
807 break;
808 fi;
809 }
810 od;
811 /*
812 * Dependency between consecutive loops :
813 * RAW dependency on
814 * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
815 * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
816 * between loops.
817 * _WHEN THE MB()s are in place_, they add full ordering of the
818 * generation pointer read wrt active reader count read, which ensures
819 * execution will not spill across loop execution.
820 * However, in the event mb()s are removed (execution using signal
821 * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
822 * to spill its execution on other loop's execution.
823 */
824 goto end;
825rmb1:
826#ifndef NO_RMB
827 smp_rmb(i, j);
828#else
829 ooo_mem(i);
830#endif
831 goto rmb1_end;
832rmb2:
833#ifndef NO_RMB
834 smp_rmb(i, j);
835#else
836 ooo_mem(i);
837#endif
838 goto rmb2_end;
839end:
840 skip;
841}
842
843
844
845active proctype urcu_reader()
846{
847 byte i, j, nest_i;
848 byte tmp, tmp2;
849
850 wait_init_done();
851
852 assert(get_pid() < NR_PROCS);
853
854end_reader:
855 do
856 :: 1 ->
857 /*
858 * We do not test reader's progress here, because we are mainly
859 * interested in writer's progress. The reader never blocks
860 * anyway. We have to test for reader/writer's progress
861 * separately, otherwise we could think the writer is doing
862 * progress when it's blocked by an always progressing reader.
863 */
864#ifdef READER_PROGRESS
865progress_reader:
866#endif
867 urcu_one_read(i, j, nest_i, tmp, tmp2);
868 od;
869}
870
871/* no name clash please */
872#undef proc_urcu_reader
873
874
875/* Model the RCU update process. */
876
877/*
878 * Bit encoding, urcu_writer :
879 * Currently only supports one reader.
880 */
881
882int _proc_urcu_writer;
883#define proc_urcu_writer _proc_urcu_writer
884
885#define WRITE_PROD_NONE (1 << 0)
886
887#define WRITE_DATA (1 << 1)
888#define WRITE_PROC_WMB (1 << 2)
889#define WRITE_XCHG_PTR (1 << 3)
890
891#define WRITE_PROC_FIRST_MB (1 << 4)
892
893/* first flip */
894#define WRITE_PROC_FIRST_READ_GP (1 << 5)
895#define WRITE_PROC_FIRST_WRITE_GP (1 << 6)
896#define WRITE_PROC_FIRST_WAIT (1 << 7)
897#define WRITE_PROC_FIRST_WAIT_LOOP (1 << 8)
898
899/* second flip */
900#define WRITE_PROC_SECOND_READ_GP (1 << 9)
901#define WRITE_PROC_SECOND_WRITE_GP (1 << 10)
902#define WRITE_PROC_SECOND_WAIT (1 << 11)
903#define WRITE_PROC_SECOND_WAIT_LOOP (1 << 12)
904
905#define WRITE_PROC_SECOND_MB (1 << 13)
906
907#define WRITE_FREE (1 << 14)
908
909#define WRITE_PROC_ALL_TOKENS (WRITE_PROD_NONE \
910 | WRITE_DATA \
911 | WRITE_PROC_WMB \
912 | WRITE_XCHG_PTR \
913 | WRITE_PROC_FIRST_MB \
914 | WRITE_PROC_FIRST_READ_GP \
915 | WRITE_PROC_FIRST_WRITE_GP \
916 | WRITE_PROC_FIRST_WAIT \
917 | WRITE_PROC_SECOND_READ_GP \
918 | WRITE_PROC_SECOND_WRITE_GP \
919 | WRITE_PROC_SECOND_WAIT \
920 | WRITE_PROC_SECOND_MB \
921 | WRITE_FREE)
922
923#define WRITE_PROC_ALL_TOKENS_CLEAR ((1 << 15) - 1)
924
925/*
926 * Mutexes are implied around writer execution. A single writer at a time.
927 */
928active proctype urcu_writer()
929{
930 byte i, j;
931 byte tmp, tmp2, tmpa;
932 byte cur_data = 0, old_data, loop_nr = 0;
933 byte cur_gp_val = 0; /*
934 * Keep a local trace of the current parity so
935 * we don't add non-existing dependencies on the global
936 * GP update. Needed to test single flip case.
937 */
938
939 wait_init_done();
940
941 assert(get_pid() < NR_PROCS);
942
943 do
944 :: (loop_nr < 3) ->
945#ifdef WRITER_PROGRESS
946progress_writer1:
947#endif
948 loop_nr = loop_nr + 1;
949
950 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
951
952#ifdef NO_WMB
953 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
954#endif
955
956#ifdef NO_MB
957 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
958 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
959#endif
960
961#ifdef SINGLE_FLIP
962 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
963 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
964 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
965 /* For single flip, we need to know the current parity */
966 cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
967#endif
968
969 do :: 1 ->
970 atomic {
971 if
972
973 :: CONSUME_TOKENS(proc_urcu_writer,
974 WRITE_PROD_NONE,
975 WRITE_DATA) ->
976 ooo_mem(i);
977 cur_data = (cur_data + 1) % SLAB_SIZE;
978 WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
979 PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
980
981
982 :: CONSUME_TOKENS(proc_urcu_writer,
983 WRITE_DATA,
984 WRITE_PROC_WMB) ->
985 smp_wmb(i, j);
986 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
987
988 :: CONSUME_TOKENS(proc_urcu_writer,
989 WRITE_PROC_WMB,
990 WRITE_XCHG_PTR) ->
991 /* rcu_xchg_pointer() */
992 atomic {
993 old_data = READ_CACHED_VAR(rcu_ptr);
994 WRITE_CACHED_VAR(rcu_ptr, cur_data);
995 }
996 PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
997
998 :: CONSUME_TOKENS(proc_urcu_writer,
999 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
1000 WRITE_PROC_FIRST_MB) ->
1001 goto smp_mb_send1;
1002smp_mb_send1_end:
1003 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
1004
1005 /* first flip */
1006 :: CONSUME_TOKENS(proc_urcu_writer,
1007 WRITE_PROC_FIRST_MB,
1008 WRITE_PROC_FIRST_READ_GP) ->
1009 tmpa = READ_CACHED_VAR(urcu_gp_ctr);
1010 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
1011 :: CONSUME_TOKENS(proc_urcu_writer,
1012 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
1013 | WRITE_PROC_FIRST_READ_GP,
1014 WRITE_PROC_FIRST_WRITE_GP) ->
1015 ooo_mem(i);
1016 WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
1017 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
1018
1019 :: CONSUME_TOKENS(proc_urcu_writer,
1020 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1021 WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1022 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
1023 ooo_mem(i);
1024 /* ONLY WAITING FOR READER 0 */
1025 tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
1026#ifndef SINGLE_FLIP
1027 /* In normal execution, we are always starting by
1028 * waiting for the even parity.
1029 */
1030 cur_gp_val = RCU_GP_CTR_BIT;
1031#endif
1032 if
1033 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
1034 && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
1035 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
1036 :: else ->
1037 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
1038 fi;
1039
1040 :: CONSUME_TOKENS(proc_urcu_writer,
1041 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1042 WRITE_PROC_FIRST_WRITE_GP
1043 | WRITE_PROC_FIRST_READ_GP
1044 | WRITE_PROC_FIRST_WAIT_LOOP
1045 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
1046 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1047 0) ->
1048#ifndef GEN_ERROR_WRITER_PROGRESS
1049 goto smp_mb_send2;
1050smp_mb_send2_end:
1051#else
1052 ooo_mem(i);
1053#endif
1054 /* This instruction loops to WRITE_PROC_FIRST_WAIT */
1055 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
1056
1057 /* second flip */
1058 :: CONSUME_TOKENS(proc_urcu_writer,
1059 WRITE_PROC_FIRST_WAIT /* Control dependency : need to branch out of
1060 * the loop to execute the next flip (CHECK) */
1061 | WRITE_PROC_FIRST_WRITE_GP
1062 | WRITE_PROC_FIRST_READ_GP
1063 | WRITE_PROC_FIRST_MB,
1064 WRITE_PROC_SECOND_READ_GP) ->
1065 ooo_mem(i);
1066 tmpa = READ_CACHED_VAR(urcu_gp_ctr);
1067 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
1068 :: CONSUME_TOKENS(proc_urcu_writer,
1069 WRITE_PROC_FIRST_MB
1070 | WRITE_PROC_WMB
1071 | WRITE_PROC_FIRST_READ_GP
1072 | WRITE_PROC_FIRST_WRITE_GP
1073 | WRITE_PROC_SECOND_READ_GP,
1074 WRITE_PROC_SECOND_WRITE_GP) ->
1075 ooo_mem(i);
1076 WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
1077 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
1078
1079 :: CONSUME_TOKENS(proc_urcu_writer,
1080 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1081 WRITE_PROC_FIRST_WAIT
1082 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1083 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
1084 ooo_mem(i);
1085 /* ONLY WAITING FOR READER 0 */
1086 tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
1087 if
1088 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
1089 && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
1090 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
1091 :: else ->
1092 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
1093 fi;
1094
1095 :: CONSUME_TOKENS(proc_urcu_writer,
1096 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1097 WRITE_PROC_SECOND_WRITE_GP
1098 | WRITE_PROC_FIRST_WRITE_GP
1099 | WRITE_PROC_SECOND_READ_GP
1100 | WRITE_PROC_FIRST_READ_GP
1101 | WRITE_PROC_SECOND_WAIT_LOOP
1102 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
1103 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1104 0) ->
1105#ifndef GEN_ERROR_WRITER_PROGRESS
1106 goto smp_mb_send3;
1107smp_mb_send3_end:
1108#else
1109 ooo_mem(i);
1110#endif
1111 /* This instruction loops to WRITE_PROC_SECOND_WAIT */
1112 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
1113
1114
1115 :: CONSUME_TOKENS(proc_urcu_writer,
1116 WRITE_PROC_FIRST_WAIT
1117 | WRITE_PROC_SECOND_WAIT
1118 | WRITE_PROC_FIRST_READ_GP
1119 | WRITE_PROC_SECOND_READ_GP
1120 | WRITE_PROC_FIRST_WRITE_GP
1121 | WRITE_PROC_SECOND_WRITE_GP
1122 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
1123 | WRITE_PROC_FIRST_MB,
1124 WRITE_PROC_SECOND_MB) ->
1125 goto smp_mb_send4;
1126smp_mb_send4_end:
1127 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
1128
1129 :: CONSUME_TOKENS(proc_urcu_writer,
1130 WRITE_XCHG_PTR
1131 | WRITE_PROC_FIRST_WAIT
1132 | WRITE_PROC_SECOND_WAIT
1133 | WRITE_PROC_WMB /* No dependency on
1134 * WRITE_DATA because we
1135 * write to a
1136 * different location. */
1137 | WRITE_PROC_SECOND_MB
1138 | WRITE_PROC_FIRST_MB,
1139 WRITE_FREE) ->
1140 WRITE_CACHED_VAR(rcu_data[old_data], POISON);
1141 PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
1142
1143 :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
1144 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
1145 break;
1146 fi;
1147 }
1148 od;
1149 /*
1150 * Note : Promela model adds implicit serialization of the
1151 * WRITE_FREE instruction. Normally, it would be permitted to
1152 * spill on the next loop execution. Given the validation we do
1153 * checks for the data entry read to be poisoned, it's ok if
1154 * we do not check "late arriving" memory poisoning.
1155 */
1156 :: else -> break;
1157 od;
1158 /*
1159 * Given the reader loops infinitely, let the writer also busy-loop
1160 * with progress here so, with weak fairness, we can test the
1161 * writer's progress.
1162 */
1163end_writer:
1164 do
1165 :: 1 ->
1166#ifdef WRITER_PROGRESS
1167progress_writer2:
1168#endif
1169 skip;
1170 od;
1171
1172 /* Non-atomic parts of the loop */
1173 goto end;
1174smp_mb_send1:
1175 smp_mb_send(i, j, 1);
1176 goto smp_mb_send1_end;
1177#ifndef GEN_ERROR_WRITER_PROGRESS
1178smp_mb_send2:
1179 smp_mb_send(i, j, 2);
1180 goto smp_mb_send2_end;
1181smp_mb_send3:
1182 smp_mb_send(i, j, 3);
1183 goto smp_mb_send3_end;
1184#endif
1185smp_mb_send4:
1186 smp_mb_send(i, j, 4);
1187 goto smp_mb_send4_end;
1188end:
1189 skip;
1190}
1191
1192/* no name clash please */
1193#undef proc_urcu_writer
1194
1195
1196/* Leave after the readers and writers so the pid count is ok. */
1197init {
1198 byte i, j;
1199
1200 atomic {
1201 INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
1202 INIT_CACHED_VAR(rcu_ptr, 0, j);
1203
1204 i = 0;
1205 do
1206 :: i < NR_READERS ->
1207 INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
1208 ptr_read_first[i] = 1;
1209 ptr_read_second[i] = 1;
1210 data_read_first[i] = WINE;
1211 data_read_second[i] = WINE;
1212 i++;
1213 :: i >= NR_READERS -> break
1214 od;
1215 INIT_CACHED_VAR(rcu_data[0], WINE, j);
1216 i = 1;
1217 do
1218 :: i < SLAB_SIZE ->
1219 INIT_CACHED_VAR(rcu_data[i], POISON, j);
1220 i++
1221 :: i >= SLAB_SIZE -> break
1222 od;
1223
1224 init_done = 1;
1225 }
1226}
This page took 0.06967 seconds and 4 git commands to generate.