Add Intel ipi urcu model run results
[urcu.git] / formal-model / results / urcu-controldataflow-ipi-intel / urcu.spin
CommitLineData
f2b3a82d
MD
1/*
2 * mem.spin: Promela code to validate memory barriers with OOO memory
3 * and out-of-order instruction scheduling.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * Copyright (c) 2009 Mathieu Desnoyers
20 */
21
22/* Promela validation variables. */
23
24/* specific defines "included" here */
25/* DEFINES file "included" here */
26
27#define NR_READERS 1
28#define NR_WRITERS 1
29
30#define NR_PROCS 2
31
32#define get_pid() (_pid)
33
34#define get_readerid() (get_pid())
35
36/*
37 * Produced process control and data flow. Updated after each instruction to
38 * show which variables are ready. Using one-hot bit encoding per variable to
39 * save state space. Used as triggers to execute the instructions having those
40 * variables as input. Leaving bits active to inhibit instruction execution.
41 * Scheme used to make instruction disabling and automatic dependency fall-back
42 * automatic.
43 */
44
45#define CONSUME_TOKENS(state, bits, notbits) \
46 ((!(state & (notbits))) && (state & (bits)) == (bits))
47
48#define PRODUCE_TOKENS(state, bits) \
49 state = state | (bits);
50
51#define CLEAR_TOKENS(state, bits) \
52 state = state & ~(bits)
53
54/*
55 * Types of dependency :
56 *
57 * Data dependency
58 *
59 * - True dependency, Read-after-Write (RAW)
60 *
61 * This type of dependency happens when a statement depends on the result of a
62 * previous statement. This applies to any statement which needs to read a
63 * variable written by a preceding statement.
64 *
65 * - False dependency, Write-after-Read (WAR)
66 *
67 * Typically, variable renaming can ensure that this dependency goes away.
68 * However, if the statements must read and then write from/to the same variable
69 * in the OOO memory model, renaming may be impossible, and therefore this
70 * causes a WAR dependency.
71 *
72 * - Output dependency, Write-after-Write (WAW)
73 *
74 * Two writes to the same variable in subsequent statements. Variable renaming
75 * can ensure this is not needed, but can be required when writing multiple
76 * times to the same OOO mem model variable.
77 *
78 * Control dependency
79 *
80 * Execution of a given instruction depends on a previous instruction evaluating
81 * in a way that allows its execution. E.g. : branches.
82 *
83 * Useful considerations for joining dependencies after branch
84 *
85 * - Pre-dominance
86 *
87 * "We say box i dominates box j if every path (leading from input to output
88 * through the diagram) which passes through box j must also pass through box
89 * i. Thus box i dominates box j if box j is subordinate to box i in the
90 * program."
91 *
92 * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
93 * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
94 *
95 * - Post-dominance
96 *
97 * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
98 * output exchanged. Therefore, i post-dominating j ensures that every path
99 * passing by j will pass by i before reaching the output.
100 *
101 * Other considerations
102 *
103 * Note about "volatile" keyword dependency : The compiler will order volatile
104 * accesses so they appear in the right order on a given CPU. They can be
105 * reordered by the CPU instruction scheduling. This therefore cannot be
106 * considered as a depencency.
107 *
108 * References :
109 *
110 * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
111 * Kaufmann. ISBN 1-55860-698-X.
112 * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
113 * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
114 * 1-55860-286-0.
115 * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
116 * Morgan Kaufmann. ISBN 1-55860-320-4.
117 */
118
119/*
120 * Note about loops and nested calls
121 *
122 * To keep this model simple, loops expressed in the framework will behave as if
123 * there was a core synchronizing instruction between loops. To see the effect
124 * of loop unrolling, manually unrolling loops is required. Note that if loops
125 * end or start with a core synchronizing instruction, the model is appropriate.
126 * Nested calls are not supported.
127 */
128
129/*
130 * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
131 * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
132 * http://www.linuxjournal.com/article/8212)
133#ifdef ARCH_ALPHA
134#define HAVE_OOO_CACHE_READ
135#endif
136
137/*
138 * Each process have its own data in cache. Caches are randomly updated.
139 * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
140 * both.
141 */
142
143typedef per_proc_byte {
144 byte val[NR_PROCS];
145};
146
147typedef per_proc_bit {
148 bit val[NR_PROCS];
149};
150
151/* Bitfield has a maximum of 8 procs */
152typedef per_proc_bitfield {
153 byte bitfield;
154};
155
156#define DECLARE_CACHED_VAR(type, x) \
157 type mem_##x; \
158 per_proc_##type cached_##x; \
159 per_proc_bitfield cache_dirty_##x;
160
161#define INIT_CACHED_VAR(x, v, j) \
162 mem_##x = v; \
163 cache_dirty_##x.bitfield = 0; \
164 j = 0; \
165 do \
166 :: j < NR_PROCS -> \
167 cached_##x.val[j] = v; \
168 j++ \
169 :: j >= NR_PROCS -> break \
170 od;
171
172#define IS_CACHE_DIRTY(x, id) (cache_dirty_##x.bitfield & (1 << id))
173
174#define READ_CACHED_VAR(x) (cached_##x.val[get_pid()])
175
176#define WRITE_CACHED_VAR(x, v) \
177 atomic { \
178 cached_##x.val[get_pid()] = v; \
179 cache_dirty_##x.bitfield = \
180 cache_dirty_##x.bitfield | (1 << get_pid()); \
181 }
182
183#define CACHE_WRITE_TO_MEM(x, id) \
184 if \
185 :: IS_CACHE_DIRTY(x, id) -> \
186 mem_##x = cached_##x.val[id]; \
187 cache_dirty_##x.bitfield = \
188 cache_dirty_##x.bitfield & (~(1 << id)); \
189 :: else -> \
190 skip \
191 fi;
192
193#define CACHE_READ_FROM_MEM(x, id) \
194 if \
195 :: !IS_CACHE_DIRTY(x, id) -> \
196 cached_##x.val[id] = mem_##x;\
197 :: else -> \
198 skip \
199 fi;
200
201/*
202 * May update other caches if cache is dirty, or not.
203 */
204#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
205 if \
206 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
207 :: 1 -> skip \
208 fi;
209
210#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
211 if \
212 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
213 :: 1 -> skip \
214 fi;
215
216/* Must consume all prior read tokens. All subsequent reads depend on it. */
217inline smp_rmb(i)
218{
219 atomic {
220 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
221 i = 0;
222 do
223 :: i < NR_READERS ->
224 CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
225 i++
226 :: i >= NR_READERS -> break
227 od;
228 CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
229 i = 0;
230 do
231 :: i < SLAB_SIZE ->
232 CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
233 i++
234 :: i >= SLAB_SIZE -> break
235 od;
236 }
237}
238
239/* Must consume all prior write tokens. All subsequent writes depend on it. */
240inline smp_wmb(i)
241{
242 atomic {
243 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
244 i = 0;
245 do
246 :: i < NR_READERS ->
247 CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
248 i++
249 :: i >= NR_READERS -> break
250 od;
251 CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
252 i = 0;
253 do
254 :: i < SLAB_SIZE ->
255 CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
256 i++
257 :: i >= SLAB_SIZE -> break
258 od;
259 }
260}
261
262/* Synchronization point. Must consume all prior read and write tokens. All
263 * subsequent reads and writes depend on it. */
264inline smp_mb(i)
265{
266 atomic {
267 smp_wmb(i);
268 smp_rmb(i);
269 }
270}
271
272#ifdef REMOTE_BARRIERS
273
274bit reader_barrier[NR_READERS];
275
276/*
277 * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
278 * because they would add unexisting core synchronization and would therefore
279 * create an incomplete model.
280 * Therefore, we model the read-side memory barriers by completely disabling the
281 * memory barriers and their dependencies from the read-side. One at a time
282 * (different verification runs), we make a different instruction listen for
283 * signals.
284 */
285
286#define smp_mb_reader(i, j)
287
288/*
289 * Service 0, 1 or many barrier requests.
290 */
291inline smp_mb_recv(i, j)
292{
293 do
294 :: (reader_barrier[get_readerid()] == 1) ->
295 /*
296 * We choose to ignore cycles caused by writer busy-looping,
297 * waiting for the reader, sending barrier requests, and the
298 * reader always services them without continuing execution.
299 */
300progress_ignoring_mb1:
301 smp_mb(i);
302 reader_barrier[get_readerid()] = 0;
303 :: 1 ->
304 /*
305 * We choose to ignore writer's non-progress caused by the
306 * reader ignoring the writer's mb() requests.
307 */
308progress_ignoring_mb2:
309 break;
310 od;
311}
312
313#define PROGRESS_LABEL(progressid) progress_writer_progid_##progressid:
314
315#define smp_mb_send(i, j, progressid) \
316{ \
317 smp_mb(i); \
318 i = 0; \
319 do \
320 :: i < NR_READERS -> \
321 reader_barrier[i] = 1; \
322 /* \
323 * Busy-looping waiting for reader barrier handling is of little\
324 * interest, given the reader has the ability to totally ignore \
325 * barrier requests. \
326 */ \
327 do \
328 :: (reader_barrier[i] == 1) -> \
329PROGRESS_LABEL(progressid) \
330 skip; \
331 :: (reader_barrier[i] == 0) -> break; \
332 od; \
333 i++; \
334 :: i >= NR_READERS -> \
335 break \
336 od; \
337 smp_mb(i); \
338}
339
340#else
341
342#define smp_mb_send(i, j, progressid) smp_mb(i)
343#define smp_mb_reader smp_mb(i)
344#define smp_mb_recv(i, j)
345
346#endif
347
348/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
349DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
350/* Note ! currently only one reader */
351DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
352/* RCU data */
353DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
354
355/* RCU pointer */
356#if (SLAB_SIZE == 2)
357DECLARE_CACHED_VAR(bit, rcu_ptr);
358bit ptr_read_first[NR_READERS];
359bit ptr_read_second[NR_READERS];
360#else
361DECLARE_CACHED_VAR(byte, rcu_ptr);
362byte ptr_read_first[NR_READERS];
363byte ptr_read_second[NR_READERS];
364#endif
365
366bit data_read_first[NR_READERS];
367bit data_read_second[NR_READERS];
368
369bit init_done = 0;
370
371inline wait_init_done()
372{
373 do
374 :: init_done == 0 -> skip;
375 :: else -> break;
376 od;
377}
378
379inline ooo_mem(i)
380{
381 atomic {
382 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
383 i = 0;
384 do
385 :: i < NR_READERS ->
386 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
387 get_pid());
388 i++
389 :: i >= NR_READERS -> break
390 od;
391 RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
392 i = 0;
393 do
394 :: i < SLAB_SIZE ->
395 RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
396 i++
397 :: i >= SLAB_SIZE -> break
398 od;
399#ifdef HAVE_OOO_CACHE_READ
400 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
401 i = 0;
402 do
403 :: i < NR_READERS ->
404 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
405 get_pid());
406 i++
407 :: i >= NR_READERS -> break
408 od;
409 RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
410 i = 0;
411 do
412 :: i < SLAB_SIZE ->
413 RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
414 i++
415 :: i >= SLAB_SIZE -> break
416 od;
417#else
418 smp_rmb(i);
419#endif /* HAVE_OOO_CACHE_READ */
420 }
421}
422
423/*
424 * Bit encoding, urcu_reader :
425 */
426
427int _proc_urcu_reader;
428#define proc_urcu_reader _proc_urcu_reader
429
430/* Body of PROCEDURE_READ_LOCK */
431#define READ_PROD_A_READ (1 << 0)
432#define READ_PROD_B_IF_TRUE (1 << 1)
433#define READ_PROD_B_IF_FALSE (1 << 2)
434#define READ_PROD_C_IF_TRUE_READ (1 << 3)
435
436#define PROCEDURE_READ_LOCK(base, consumetoken, producetoken) \
437 :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, READ_PROD_A_READ << base) -> \
438 ooo_mem(i); \
439 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]); \
440 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base); \
441 :: CONSUME_TOKENS(proc_urcu_reader, \
442 READ_PROD_A_READ << base, /* RAW, pre-dominant */ \
443 (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) -> \
444 if \
445 :: (!(tmp & RCU_GP_CTR_NEST_MASK)) -> \
446 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base); \
447 :: else -> \
448 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
449 fi; \
450 /* IF TRUE */ \
451 :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base, \
452 READ_PROD_C_IF_TRUE_READ << base) -> \
453 ooo_mem(i); \
454 tmp2 = READ_CACHED_VAR(urcu_gp_ctr); \
455 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base); \
456 :: CONSUME_TOKENS(proc_urcu_reader, \
457 (READ_PROD_C_IF_TRUE_READ /* pre-dominant */ \
458 | READ_PROD_A_READ) << base, /* WAR */ \
459 producetoken) -> \
460 ooo_mem(i); \
461 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2); \
462 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
463 /* IF_MERGE implies \
464 * post-dominance */ \
465 /* ELSE */ \
466 :: CONSUME_TOKENS(proc_urcu_reader, \
467 (READ_PROD_B_IF_FALSE /* pre-dominant */ \
468 | READ_PROD_A_READ) << base, /* WAR */ \
469 producetoken) -> \
470 ooo_mem(i); \
471 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], \
472 tmp + 1); \
473 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
474 /* IF_MERGE implies \
475 * post-dominance */ \
476 /* ENDIF */ \
477 skip
478
479/* Body of PROCEDURE_READ_LOCK */
480#define READ_PROC_READ_UNLOCK (1 << 0)
481
482#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken) \
483 :: CONSUME_TOKENS(proc_urcu_reader, \
484 consumetoken, \
485 READ_PROC_READ_UNLOCK << base) -> \
486 ooo_mem(i); \
487 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]); \
488 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base); \
489 :: CONSUME_TOKENS(proc_urcu_reader, \
490 consumetoken \
491 | (READ_PROC_READ_UNLOCK << base), /* WAR */ \
492 producetoken) -> \
493 ooo_mem(i); \
494 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1); \
495 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
496 skip
497
498
499#define READ_PROD_NONE (1 << 0)
500
501/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
502#define READ_LOCK_BASE 1
503#define READ_LOCK_OUT (1 << 5)
504
505#define READ_PROC_FIRST_MB (1 << 6)
506
507/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
508#define READ_LOCK_NESTED_BASE 7
509#define READ_LOCK_NESTED_OUT (1 << 11)
510
511#define READ_PROC_READ_GEN (1 << 12)
512#define READ_PROC_ACCESS_GEN (1 << 13)
513
514/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
515#define READ_UNLOCK_NESTED_BASE 14
516#define READ_UNLOCK_NESTED_OUT (1 << 15)
517
518#define READ_PROC_SECOND_MB (1 << 16)
519
520/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
521#define READ_UNLOCK_BASE 17
522#define READ_UNLOCK_OUT (1 << 18)
523
524/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
525#define READ_LOCK_UNROLL_BASE 19
526#define READ_LOCK_OUT_UNROLL (1 << 23)
527
528#define READ_PROC_THIRD_MB (1 << 24)
529
530#define READ_PROC_READ_GEN_UNROLL (1 << 25)
531#define READ_PROC_ACCESS_GEN_UNROLL (1 << 26)
532
533#define READ_PROC_FOURTH_MB (1 << 27)
534
535/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
536#define READ_UNLOCK_UNROLL_BASE 28
537#define READ_UNLOCK_OUT_UNROLL (1 << 29)
538
539
540/* Should not include branches */
541#define READ_PROC_ALL_TOKENS (READ_PROD_NONE \
542 | READ_LOCK_OUT \
543 | READ_PROC_FIRST_MB \
544 | READ_LOCK_NESTED_OUT \
545 | READ_PROC_READ_GEN \
546 | READ_PROC_ACCESS_GEN \
547 | READ_UNLOCK_NESTED_OUT \
548 | READ_PROC_SECOND_MB \
549 | READ_UNLOCK_OUT \
550 | READ_LOCK_OUT_UNROLL \
551 | READ_PROC_THIRD_MB \
552 | READ_PROC_READ_GEN_UNROLL \
553 | READ_PROC_ACCESS_GEN_UNROLL \
554 | READ_PROC_FOURTH_MB \
555 | READ_UNLOCK_OUT_UNROLL)
556
557/* Must clear all tokens, including branches */
558#define READ_PROC_ALL_TOKENS_CLEAR ((1 << 30) - 1)
559
560inline urcu_one_read(i, j, nest_i, tmp, tmp2)
561{
562 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
563
564#ifdef NO_MB
565 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
566 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
567 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
568 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
569#endif
570
571#ifdef REMOTE_BARRIERS
572 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
573 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
574 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
575 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
576#endif
577
578 do
579 :: 1 ->
580
581#ifdef REMOTE_BARRIERS
582 /*
583 * Signal-based memory barrier will only execute when the
584 * execution order appears in program order.
585 */
586 if
587 :: 1 ->
588 atomic {
589 if
590 :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
591 READ_LOCK_OUT | READ_LOCK_NESTED_OUT
592 | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
593 | READ_UNLOCK_OUT
594 | READ_LOCK_OUT_UNROLL
595 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
596 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
597 READ_LOCK_NESTED_OUT
598 | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
599 | READ_UNLOCK_OUT
600 | READ_LOCK_OUT_UNROLL
601 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
602 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
603 READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
604 | READ_UNLOCK_OUT
605 | READ_LOCK_OUT_UNROLL
606 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
607 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
608 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
609 READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
610 | READ_UNLOCK_OUT
611 | READ_LOCK_OUT_UNROLL
612 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
613 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
614 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
615 READ_UNLOCK_NESTED_OUT
616 | READ_UNLOCK_OUT
617 | READ_LOCK_OUT_UNROLL
618 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
619 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
620 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
621 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
622 READ_UNLOCK_OUT
623 | READ_LOCK_OUT_UNROLL
624 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
625 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
626 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
627 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
628 | READ_UNLOCK_OUT,
629 READ_LOCK_OUT_UNROLL
630 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
631 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
632 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
633 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
634 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
635 READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
636 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
637 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
638 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
639 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
640 | READ_PROC_READ_GEN_UNROLL,
641 READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
642 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
643 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
644 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
645 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
646 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
647 READ_UNLOCK_OUT_UNROLL)
648 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
649 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
650 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
651 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
652 0) ->
653 goto non_atomic3;
654non_atomic3_end:
655 skip;
656 fi;
657 }
658 fi;
659
660 goto non_atomic3_skip;
661non_atomic3:
662 smp_mb_recv(i, j);
663 goto non_atomic3_end;
664non_atomic3_skip:
665
666#endif /* REMOTE_BARRIERS */
667
668 atomic {
669 if
670 PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, READ_LOCK_OUT);
671
672 :: CONSUME_TOKENS(proc_urcu_reader,
673 READ_LOCK_OUT, /* post-dominant */
674 READ_PROC_FIRST_MB) ->
675 smp_mb_reader(i, j);
676 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
677
678 PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB | READ_LOCK_OUT,
679 READ_LOCK_NESTED_OUT);
680
681 :: CONSUME_TOKENS(proc_urcu_reader,
682 READ_PROC_FIRST_MB, /* mb() orders reads */
683 READ_PROC_READ_GEN) ->
684 ooo_mem(i);
685 ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
686 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
687
688 :: CONSUME_TOKENS(proc_urcu_reader,
689 READ_PROC_FIRST_MB /* mb() orders reads */
690 | READ_PROC_READ_GEN,
691 READ_PROC_ACCESS_GEN) ->
692 /* smp_read_barrier_depends */
693 goto rmb1;
694rmb1_end:
695 data_read_first[get_readerid()] =
696 READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
697 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
698
699
700 /* Note : we remove the nested memory barrier from the read unlock
701 * model, given it is not usually needed. The implementation has the barrier
702 * because the performance impact added by a branch in the common case does not
703 * justify it.
704 */
705
706 PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
707 READ_PROC_FIRST_MB
708 | READ_LOCK_OUT
709 | READ_LOCK_NESTED_OUT,
710 READ_UNLOCK_NESTED_OUT);
711
712
713 :: CONSUME_TOKENS(proc_urcu_reader,
714 READ_PROC_ACCESS_GEN /* mb() orders reads */
715 | READ_PROC_READ_GEN /* mb() orders reads */
716 | READ_PROC_FIRST_MB /* mb() ordered */
717 | READ_LOCK_OUT /* post-dominant */
718 | READ_LOCK_NESTED_OUT /* post-dominant */
719 | READ_UNLOCK_NESTED_OUT,
720 READ_PROC_SECOND_MB) ->
721 smp_mb_reader(i, j);
722 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
723
724 PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
725 READ_PROC_SECOND_MB /* mb() orders reads */
726 | READ_PROC_FIRST_MB /* mb() orders reads */
727 | READ_LOCK_NESTED_OUT /* RAW */
728 | READ_LOCK_OUT /* RAW */
729 | READ_UNLOCK_NESTED_OUT, /* RAW */
730 READ_UNLOCK_OUT);
731
732 /* Unrolling loop : second consecutive lock */
733 /* reading urcu_active_readers, which have been written by
734 * READ_UNLOCK_OUT : RAW */
735 PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
736 READ_UNLOCK_OUT /* RAW */
737 | READ_PROC_SECOND_MB /* mb() orders reads */
738 | READ_PROC_FIRST_MB /* mb() orders reads */
739 | READ_LOCK_NESTED_OUT /* RAW */
740 | READ_LOCK_OUT /* RAW */
741 | READ_UNLOCK_NESTED_OUT, /* RAW */
742 READ_LOCK_OUT_UNROLL);
743
744
745 :: CONSUME_TOKENS(proc_urcu_reader,
746 READ_PROC_FIRST_MB /* mb() ordered */
747 | READ_PROC_SECOND_MB /* mb() ordered */
748 | READ_LOCK_OUT_UNROLL /* post-dominant */
749 | READ_LOCK_NESTED_OUT
750 | READ_LOCK_OUT
751 | READ_UNLOCK_NESTED_OUT
752 | READ_UNLOCK_OUT,
753 READ_PROC_THIRD_MB) ->
754 smp_mb_reader(i, j);
755 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
756
757 :: CONSUME_TOKENS(proc_urcu_reader,
758 READ_PROC_FIRST_MB /* mb() orders reads */
759 | READ_PROC_SECOND_MB /* mb() orders reads */
760 | READ_PROC_THIRD_MB, /* mb() orders reads */
761 READ_PROC_READ_GEN_UNROLL) ->
762 ooo_mem(i);
763 ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
764 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
765
766 :: CONSUME_TOKENS(proc_urcu_reader,
767 READ_PROC_READ_GEN_UNROLL
768 | READ_PROC_FIRST_MB /* mb() orders reads */
769 | READ_PROC_SECOND_MB /* mb() orders reads */
770 | READ_PROC_THIRD_MB, /* mb() orders reads */
771 READ_PROC_ACCESS_GEN_UNROLL) ->
772 /* smp_read_barrier_depends */
773 goto rmb2;
774rmb2_end:
775 data_read_second[get_readerid()] =
776 READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
777 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
778
779 :: CONSUME_TOKENS(proc_urcu_reader,
780 READ_PROC_READ_GEN_UNROLL /* mb() orders reads */
781 | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
782 | READ_PROC_FIRST_MB /* mb() ordered */
783 | READ_PROC_SECOND_MB /* mb() ordered */
784 | READ_PROC_THIRD_MB /* mb() ordered */
785 | READ_LOCK_OUT_UNROLL /* post-dominant */
786 | READ_LOCK_NESTED_OUT
787 | READ_LOCK_OUT
788 | READ_UNLOCK_NESTED_OUT
789 | READ_UNLOCK_OUT,
790 READ_PROC_FOURTH_MB) ->
791 smp_mb_reader(i, j);
792 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
793
794 PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
795 READ_PROC_FOURTH_MB /* mb() orders reads */
796 | READ_PROC_THIRD_MB /* mb() orders reads */
797 | READ_LOCK_OUT_UNROLL /* RAW */
798 | READ_PROC_SECOND_MB /* mb() orders reads */
799 | READ_PROC_FIRST_MB /* mb() orders reads */
800 | READ_LOCK_NESTED_OUT /* RAW */
801 | READ_LOCK_OUT /* RAW */
802 | READ_UNLOCK_NESTED_OUT, /* RAW */
803 READ_UNLOCK_OUT_UNROLL);
804 :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
805 CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
806 break;
807 fi;
808 }
809 od;
810 /*
811 * Dependency between consecutive loops :
812 * RAW dependency on
813 * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
814 * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
815 * between loops.
816 * _WHEN THE MB()s are in place_, they add full ordering of the
817 * generation pointer read wrt active reader count read, which ensures
818 * execution will not spill across loop execution.
819 * However, in the event mb()s are removed (execution using signal
820 * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
821 * to spill its execution on other loop's execution.
822 */
823 goto end;
824rmb1:
825#ifndef NO_RMB
826 smp_rmb(i);
827#else
828 ooo_mem(i);
829#endif
830 goto rmb1_end;
831rmb2:
832#ifndef NO_RMB
833 smp_rmb(i);
834#else
835 ooo_mem(i);
836#endif
837 goto rmb2_end;
838end:
839 skip;
840}
841
842
843
844active proctype urcu_reader()
845{
846 byte i, j, nest_i;
847 byte tmp, tmp2;
848
849 wait_init_done();
850
851 assert(get_pid() < NR_PROCS);
852
853end_reader:
854 do
855 :: 1 ->
856 /*
857 * We do not test reader's progress here, because we are mainly
858 * interested in writer's progress. The reader never blocks
859 * anyway. We have to test for reader/writer's progress
860 * separately, otherwise we could think the writer is doing
861 * progress when it's blocked by an always progressing reader.
862 */
863#ifdef READER_PROGRESS
864progress_reader:
865#endif
866 urcu_one_read(i, j, nest_i, tmp, tmp2);
867 od;
868}
869
870/* no name clash please */
871#undef proc_urcu_reader
872
873
874/* Model the RCU update process. */
875
876/*
877 * Bit encoding, urcu_writer :
878 * Currently only supports one reader.
879 */
880
881int _proc_urcu_writer;
882#define proc_urcu_writer _proc_urcu_writer
883
884#define WRITE_PROD_NONE (1 << 0)
885
886#define WRITE_DATA (1 << 1)
887#define WRITE_PROC_WMB (1 << 2)
888#define WRITE_XCHG_PTR (1 << 3)
889
890#define WRITE_PROC_FIRST_MB (1 << 4)
891
892/* first flip */
893#define WRITE_PROC_FIRST_READ_GP (1 << 5)
894#define WRITE_PROC_FIRST_WRITE_GP (1 << 6)
895#define WRITE_PROC_FIRST_WAIT (1 << 7)
896#define WRITE_PROC_FIRST_WAIT_LOOP (1 << 8)
897
898/* second flip */
899#define WRITE_PROC_SECOND_READ_GP (1 << 9)
900#define WRITE_PROC_SECOND_WRITE_GP (1 << 10)
901#define WRITE_PROC_SECOND_WAIT (1 << 11)
902#define WRITE_PROC_SECOND_WAIT_LOOP (1 << 12)
903
904#define WRITE_PROC_SECOND_MB (1 << 13)
905
906#define WRITE_FREE (1 << 14)
907
908#define WRITE_PROC_ALL_TOKENS (WRITE_PROD_NONE \
909 | WRITE_DATA \
910 | WRITE_PROC_WMB \
911 | WRITE_XCHG_PTR \
912 | WRITE_PROC_FIRST_MB \
913 | WRITE_PROC_FIRST_READ_GP \
914 | WRITE_PROC_FIRST_WRITE_GP \
915 | WRITE_PROC_FIRST_WAIT \
916 | WRITE_PROC_SECOND_READ_GP \
917 | WRITE_PROC_SECOND_WRITE_GP \
918 | WRITE_PROC_SECOND_WAIT \
919 | WRITE_PROC_SECOND_MB \
920 | WRITE_FREE)
921
922#define WRITE_PROC_ALL_TOKENS_CLEAR ((1 << 15) - 1)
923
924/*
925 * Mutexes are implied around writer execution. A single writer at a time.
926 */
927active proctype urcu_writer()
928{
929 byte i, j;
930 byte tmp, tmp2, tmpa;
931 byte cur_data = 0, old_data, loop_nr = 0;
932 byte cur_gp_val = 0; /*
933 * Keep a local trace of the current parity so
934 * we don't add non-existing dependencies on the global
935 * GP update. Needed to test single flip case.
936 */
937
938 wait_init_done();
939
940 assert(get_pid() < NR_PROCS);
941
942 do
943 :: (loop_nr < 3) ->
944#ifdef WRITER_PROGRESS
945progress_writer1:
946#endif
947 loop_nr = loop_nr + 1;
948
949 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
950
951#ifdef NO_WMB
952 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
953#endif
954
955#ifdef NO_MB
956 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
957 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
958#endif
959
960#ifdef SINGLE_FLIP
961 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
962 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
963 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
964 /* For single flip, we need to know the current parity */
965 cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
966#endif
967
968 do :: 1 ->
969 atomic {
970 if
971
972 :: CONSUME_TOKENS(proc_urcu_writer,
973 WRITE_PROD_NONE,
974 WRITE_DATA) ->
975 ooo_mem(i);
976 cur_data = (cur_data + 1) % SLAB_SIZE;
977 WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
978 PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
979
980
981 :: CONSUME_TOKENS(proc_urcu_writer,
982 WRITE_DATA,
983 WRITE_PROC_WMB) ->
984 smp_wmb(i);
985 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
986
987 :: CONSUME_TOKENS(proc_urcu_writer,
988 WRITE_PROC_WMB,
989 WRITE_XCHG_PTR) ->
990 /* rcu_xchg_pointer() */
991 atomic {
992 old_data = READ_CACHED_VAR(rcu_ptr);
993 WRITE_CACHED_VAR(rcu_ptr, cur_data);
994 }
995 PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
996
997 :: CONSUME_TOKENS(proc_urcu_writer,
998 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
999 WRITE_PROC_FIRST_MB) ->
1000 goto smp_mb_send1;
1001smp_mb_send1_end:
1002 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
1003
1004 /* first flip */
1005 :: CONSUME_TOKENS(proc_urcu_writer,
1006 WRITE_PROC_FIRST_MB,
1007 WRITE_PROC_FIRST_READ_GP) ->
1008 tmpa = READ_CACHED_VAR(urcu_gp_ctr);
1009 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
1010 :: CONSUME_TOKENS(proc_urcu_writer,
1011 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
1012 | WRITE_PROC_FIRST_READ_GP,
1013 WRITE_PROC_FIRST_WRITE_GP) ->
1014 ooo_mem(i);
1015 WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
1016 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
1017
1018 :: CONSUME_TOKENS(proc_urcu_writer,
1019 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1020 WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1021 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
1022 ooo_mem(i);
1023 /* ONLY WAITING FOR READER 0 */
1024 tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
1025#ifndef SINGLE_FLIP
1026 /* In normal execution, we are always starting by
1027 * waiting for the even parity.
1028 */
1029 cur_gp_val = RCU_GP_CTR_BIT;
1030#endif
1031 if
1032 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
1033 && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
1034 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
1035 :: else ->
1036 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
1037 fi;
1038
1039 :: CONSUME_TOKENS(proc_urcu_writer,
1040 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1041 WRITE_PROC_FIRST_WRITE_GP
1042 | WRITE_PROC_FIRST_READ_GP
1043 | WRITE_PROC_FIRST_WAIT_LOOP
1044 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
1045 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1046 0) ->
1047#ifndef GEN_ERROR_WRITER_PROGRESS
1048 goto smp_mb_send2;
1049smp_mb_send2_end:
1050#else
1051 ooo_mem(i);
1052#endif
1053 /* This instruction loops to WRITE_PROC_FIRST_WAIT */
1054 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
1055
1056 /* second flip */
1057 :: CONSUME_TOKENS(proc_urcu_writer,
1058 WRITE_PROC_FIRST_WAIT /* Control dependency : need to branch out of
1059 * the loop to execute the next flip (CHECK) */
1060 | WRITE_PROC_FIRST_WRITE_GP
1061 | WRITE_PROC_FIRST_READ_GP
1062 | WRITE_PROC_FIRST_MB,
1063 WRITE_PROC_SECOND_READ_GP) ->
1064 ooo_mem(i);
1065 tmpa = READ_CACHED_VAR(urcu_gp_ctr);
1066 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
1067 :: CONSUME_TOKENS(proc_urcu_writer,
1068 WRITE_PROC_FIRST_MB
1069 | WRITE_PROC_WMB
1070 | WRITE_PROC_FIRST_READ_GP
1071 | WRITE_PROC_FIRST_WRITE_GP
1072 | WRITE_PROC_SECOND_READ_GP,
1073 WRITE_PROC_SECOND_WRITE_GP) ->
1074 ooo_mem(i);
1075 WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
1076 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
1077
1078 :: CONSUME_TOKENS(proc_urcu_writer,
1079 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1080 WRITE_PROC_FIRST_WAIT
1081 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1082 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
1083 ooo_mem(i);
1084 /* ONLY WAITING FOR READER 0 */
1085 tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
1086 if
1087 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
1088 && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
1089 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
1090 :: else ->
1091 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
1092 fi;
1093
1094 :: CONSUME_TOKENS(proc_urcu_writer,
1095 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1096 WRITE_PROC_SECOND_WRITE_GP
1097 | WRITE_PROC_FIRST_WRITE_GP
1098 | WRITE_PROC_SECOND_READ_GP
1099 | WRITE_PROC_FIRST_READ_GP
1100 | WRITE_PROC_SECOND_WAIT_LOOP
1101 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
1102 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1103 0) ->
1104#ifndef GEN_ERROR_WRITER_PROGRESS
1105 goto smp_mb_send3;
1106smp_mb_send3_end:
1107#else
1108 ooo_mem(i);
1109#endif
1110 /* This instruction loops to WRITE_PROC_SECOND_WAIT */
1111 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
1112
1113
1114 :: CONSUME_TOKENS(proc_urcu_writer,
1115 WRITE_PROC_FIRST_WAIT
1116 | WRITE_PROC_SECOND_WAIT
1117 | WRITE_PROC_FIRST_READ_GP
1118 | WRITE_PROC_SECOND_READ_GP
1119 | WRITE_PROC_FIRST_WRITE_GP
1120 | WRITE_PROC_SECOND_WRITE_GP
1121 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
1122 | WRITE_PROC_FIRST_MB,
1123 WRITE_PROC_SECOND_MB) ->
1124 goto smp_mb_send4;
1125smp_mb_send4_end:
1126 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
1127
1128 :: CONSUME_TOKENS(proc_urcu_writer,
1129 WRITE_XCHG_PTR
1130 | WRITE_PROC_FIRST_WAIT
1131 | WRITE_PROC_SECOND_WAIT
1132 | WRITE_PROC_WMB /* No dependency on
1133 * WRITE_DATA because we
1134 * write to a
1135 * different location. */
1136 | WRITE_PROC_SECOND_MB
1137 | WRITE_PROC_FIRST_MB,
1138 WRITE_FREE) ->
1139 WRITE_CACHED_VAR(rcu_data[old_data], POISON);
1140 PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
1141
1142 :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
1143 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
1144 break;
1145 fi;
1146 }
1147 od;
1148 /*
1149 * Note : Promela model adds implicit serialization of the
1150 * WRITE_FREE instruction. Normally, it would be permitted to
1151 * spill on the next loop execution. Given the validation we do
1152 * checks for the data entry read to be poisoned, it's ok if
1153 * we do not check "late arriving" memory poisoning.
1154 */
1155 :: else -> break;
1156 od;
1157 /*
1158 * Given the reader loops infinitely, let the writer also busy-loop
1159 * with progress here so, with weak fairness, we can test the
1160 * writer's progress.
1161 */
1162end_writer:
1163 do
1164 :: 1 ->
1165#ifdef WRITER_PROGRESS
1166progress_writer2:
1167#endif
1168#ifdef READER_PROGRESS
1169 /*
1170 * Make sure we don't block the reader's progress.
1171 */
1172 smp_mb_send(i, j, 5);
1173#endif
1174 skip;
1175 od;
1176
1177 /* Non-atomic parts of the loop */
1178 goto end;
1179smp_mb_send1:
1180 smp_mb_send(i, j, 1);
1181 goto smp_mb_send1_end;
1182#ifndef GEN_ERROR_WRITER_PROGRESS
1183smp_mb_send2:
1184 smp_mb_send(i, j, 2);
1185 goto smp_mb_send2_end;
1186smp_mb_send3:
1187 smp_mb_send(i, j, 3);
1188 goto smp_mb_send3_end;
1189#endif
1190smp_mb_send4:
1191 smp_mb_send(i, j, 4);
1192 goto smp_mb_send4_end;
1193end:
1194 skip;
1195}
1196
1197/* no name clash please */
1198#undef proc_urcu_writer
1199
1200
1201/* Leave after the readers and writers so the pid count is ok. */
1202init {
1203 byte i, j;
1204
1205 atomic {
1206 INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
1207 INIT_CACHED_VAR(rcu_ptr, 0, j);
1208
1209 i = 0;
1210 do
1211 :: i < NR_READERS ->
1212 INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
1213 ptr_read_first[i] = 1;
1214 ptr_read_second[i] = 1;
1215 data_read_first[i] = WINE;
1216 data_read_second[i] = WINE;
1217 i++;
1218 :: i >= NR_READERS -> break
1219 od;
1220 INIT_CACHED_VAR(rcu_data[0], WINE, j);
1221 i = 1;
1222 do
1223 :: i < SLAB_SIZE ->
1224 INIT_CACHED_VAR(rcu_data[i], POISON, j);
1225 i++
1226 :: i >= SLAB_SIZE -> break
1227 od;
1228
1229 init_done = 1;
1230 }
1231}
This page took 0.068115 seconds and 4 git commands to generate.