Add phase 3 : scalability run
[urcu.git] / formal-model / results / urcu-controldataflow-ipi-intel / .input.spin
1
2 // Poison value for freed memory
3 #define POISON 1
4 // Memory with correct data
5 #define WINE 0
6 #define SLAB_SIZE 2
7
8 #define read_poison (data_read_first[0] == POISON || data_read_second[0] == POISON)
9
10 #define RCU_GP_CTR_BIT (1 << 7)
11 #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
12
13 //disabled
14 #define REMOTE_BARRIERS
15
16 //#define ARCH_ALPHA
17 #define ARCH_INTEL
18 //#define ARCH_POWERPC
19 /*
20 * mem.spin: Promela code to validate memory barriers with OOO memory
21 * and out-of-order instruction scheduling.
22 *
23 * This program is free software; you can redistribute it and/or modify
24 * it under the terms of the GNU General Public License as published by
25 * the Free Software Foundation; either version 2 of the License, or
26 * (at your option) any later version.
27 *
28 * This program is distributed in the hope that it will be useful,
29 * but WITHOUT ANY WARRANTY; without even the implied warranty of
30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
31 * GNU General Public License for more details.
32 *
33 * You should have received a copy of the GNU General Public License
34 * along with this program; if not, write to the Free Software
35 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
36 *
37 * Copyright (c) 2009 Mathieu Desnoyers
38 */
39
40 /* Promela validation variables. */
41
42 /* specific defines "included" here */
43 /* DEFINES file "included" here */
44
45 #define NR_READERS 1
46 #define NR_WRITERS 1
47
48 #define NR_PROCS 2
49
50 #define get_pid() (_pid)
51
52 #define get_readerid() (get_pid())
53
54 /*
55 * Produced process control and data flow. Updated after each instruction to
56 * show which variables are ready. Using one-hot bit encoding per variable to
57 * save state space. Used as triggers to execute the instructions having those
58 * variables as input. Leaving bits active to inhibit instruction execution.
59 * Scheme used to make instruction disabling and automatic dependency fall-back
60 * automatic.
61 */
62
63 #define CONSUME_TOKENS(state, bits, notbits) \
64 ((!(state & (notbits))) && (state & (bits)) == (bits))
65
66 #define PRODUCE_TOKENS(state, bits) \
67 state = state | (bits);
68
69 #define CLEAR_TOKENS(state, bits) \
70 state = state & ~(bits)
71
72 /*
73 * Types of dependency :
74 *
75 * Data dependency
76 *
77 * - True dependency, Read-after-Write (RAW)
78 *
79 * This type of dependency happens when a statement depends on the result of a
80 * previous statement. This applies to any statement which needs to read a
81 * variable written by a preceding statement.
82 *
83 * - False dependency, Write-after-Read (WAR)
84 *
85 * Typically, variable renaming can ensure that this dependency goes away.
86 * However, if the statements must read and then write from/to the same variable
87 * in the OOO memory model, renaming may be impossible, and therefore this
88 * causes a WAR dependency.
89 *
90 * - Output dependency, Write-after-Write (WAW)
91 *
92 * Two writes to the same variable in subsequent statements. Variable renaming
93 * can ensure this is not needed, but can be required when writing multiple
94 * times to the same OOO mem model variable.
95 *
96 * Control dependency
97 *
98 * Execution of a given instruction depends on a previous instruction evaluating
99 * in a way that allows its execution. E.g. : branches.
100 *
101 * Useful considerations for joining dependencies after branch
102 *
103 * - Pre-dominance
104 *
105 * "We say box i dominates box j if every path (leading from input to output
106 * through the diagram) which passes through box j must also pass through box
107 * i. Thus box i dominates box j if box j is subordinate to box i in the
108 * program."
109 *
110 * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
111 * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
112 *
113 * - Post-dominance
114 *
115 * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
116 * output exchanged. Therefore, i post-dominating j ensures that every path
117 * passing by j will pass by i before reaching the output.
118 *
119 * Other considerations
120 *
121 * Note about "volatile" keyword dependency : The compiler will order volatile
122 * accesses so they appear in the right order on a given CPU. They can be
123 * reordered by the CPU instruction scheduling. This therefore cannot be
124 * considered as a depencency.
125 *
126 * References :
127 *
128 * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
129 * Kaufmann. ISBN 1-55860-698-X.
130 * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
131 * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
132 * 1-55860-286-0.
133 * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
134 * Morgan Kaufmann. ISBN 1-55860-320-4.
135 */
136
137 /*
138 * Note about loops and nested calls
139 *
140 * To keep this model simple, loops expressed in the framework will behave as if
141 * there was a core synchronizing instruction between loops. To see the effect
142 * of loop unrolling, manually unrolling loops is required. Note that if loops
143 * end or start with a core synchronizing instruction, the model is appropriate.
144 * Nested calls are not supported.
145 */
146
147 /*
148 * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
149 * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
150 * http://www.linuxjournal.com/article/8212)
151 #ifdef ARCH_ALPHA
152 #define HAVE_OOO_CACHE_READ
153 #endif
154
155 /*
156 * Each process have its own data in cache. Caches are randomly updated.
157 * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
158 * both.
159 */
160
161 typedef per_proc_byte {
162 byte val[NR_PROCS];
163 };
164
165 typedef per_proc_bit {
166 bit val[NR_PROCS];
167 };
168
169 /* Bitfield has a maximum of 8 procs */
170 typedef per_proc_bitfield {
171 byte bitfield;
172 };
173
174 #define DECLARE_CACHED_VAR(type, x) \
175 type mem_##x; \
176 per_proc_##type cached_##x; \
177 per_proc_bitfield cache_dirty_##x;
178
179 #define INIT_CACHED_VAR(x, v, j) \
180 mem_##x = v; \
181 cache_dirty_##x.bitfield = 0; \
182 j = 0; \
183 do \
184 :: j < NR_PROCS -> \
185 cached_##x.val[j] = v; \
186 j++ \
187 :: j >= NR_PROCS -> break \
188 od;
189
190 #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x.bitfield & (1 << id))
191
192 #define READ_CACHED_VAR(x) (cached_##x.val[get_pid()])
193
194 #define WRITE_CACHED_VAR(x, v) \
195 atomic { \
196 cached_##x.val[get_pid()] = v; \
197 cache_dirty_##x.bitfield = \
198 cache_dirty_##x.bitfield | (1 << get_pid()); \
199 }
200
201 #define CACHE_WRITE_TO_MEM(x, id) \
202 if \
203 :: IS_CACHE_DIRTY(x, id) -> \
204 mem_##x = cached_##x.val[id]; \
205 cache_dirty_##x.bitfield = \
206 cache_dirty_##x.bitfield & (~(1 << id)); \
207 :: else -> \
208 skip \
209 fi;
210
211 #define CACHE_READ_FROM_MEM(x, id) \
212 if \
213 :: !IS_CACHE_DIRTY(x, id) -> \
214 cached_##x.val[id] = mem_##x;\
215 :: else -> \
216 skip \
217 fi;
218
219 /*
220 * May update other caches if cache is dirty, or not.
221 */
222 #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
223 if \
224 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
225 :: 1 -> skip \
226 fi;
227
228 #define RANDOM_CACHE_READ_FROM_MEM(x, id)\
229 if \
230 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
231 :: 1 -> skip \
232 fi;
233
234 /* Must consume all prior read tokens. All subsequent reads depend on it. */
235 inline smp_rmb(i)
236 {
237 atomic {
238 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
239 i = 0;
240 do
241 :: i < NR_READERS ->
242 CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
243 i++
244 :: i >= NR_READERS -> break
245 od;
246 CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
247 i = 0;
248 do
249 :: i < SLAB_SIZE ->
250 CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
251 i++
252 :: i >= SLAB_SIZE -> break
253 od;
254 }
255 }
256
257 /* Must consume all prior write tokens. All subsequent writes depend on it. */
258 inline smp_wmb(i)
259 {
260 atomic {
261 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
262 i = 0;
263 do
264 :: i < NR_READERS ->
265 CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
266 i++
267 :: i >= NR_READERS -> break
268 od;
269 CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
270 i = 0;
271 do
272 :: i < SLAB_SIZE ->
273 CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
274 i++
275 :: i >= SLAB_SIZE -> break
276 od;
277 }
278 }
279
280 /* Synchronization point. Must consume all prior read and write tokens. All
281 * subsequent reads and writes depend on it. */
282 inline smp_mb(i)
283 {
284 atomic {
285 smp_wmb(i);
286 smp_rmb(i);
287 }
288 }
289
290 #ifdef REMOTE_BARRIERS
291
292 bit reader_barrier[NR_READERS];
293
294 /*
295 * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
296 * because they would add unexisting core synchronization and would therefore
297 * create an incomplete model.
298 * Therefore, we model the read-side memory barriers by completely disabling the
299 * memory barriers and their dependencies from the read-side. One at a time
300 * (different verification runs), we make a different instruction listen for
301 * signals.
302 */
303
304 #define smp_mb_reader(i, j)
305
306 /*
307 * Service 0, 1 or many barrier requests.
308 */
309 inline smp_mb_recv(i, j)
310 {
311 do
312 :: (reader_barrier[get_readerid()] == 1) ->
313 /*
314 * We choose to ignore cycles caused by writer busy-looping,
315 * waiting for the reader, sending barrier requests, and the
316 * reader always services them without continuing execution.
317 */
318 progress_ignoring_mb1:
319 smp_mb(i);
320 reader_barrier[get_readerid()] = 0;
321 :: 1 ->
322 /*
323 * We choose to ignore writer's non-progress caused by the
324 * reader ignoring the writer's mb() requests.
325 */
326 progress_ignoring_mb2:
327 break;
328 od;
329 }
330
331 #define PROGRESS_LABEL(progressid) progress_writer_progid_##progressid:
332
333 #define smp_mb_send(i, j, progressid) \
334 { \
335 smp_mb(i); \
336 i = 0; \
337 do \
338 :: i < NR_READERS -> \
339 reader_barrier[i] = 1; \
340 /* \
341 * Busy-looping waiting for reader barrier handling is of little\
342 * interest, given the reader has the ability to totally ignore \
343 * barrier requests. \
344 */ \
345 do \
346 :: (reader_barrier[i] == 1) -> \
347 PROGRESS_LABEL(progressid) \
348 skip; \
349 :: (reader_barrier[i] == 0) -> break; \
350 od; \
351 i++; \
352 :: i >= NR_READERS -> \
353 break \
354 od; \
355 smp_mb(i); \
356 }
357
358 #else
359
360 #define smp_mb_send(i, j, progressid) smp_mb(i)
361 #define smp_mb_reader smp_mb(i)
362 #define smp_mb_recv(i, j)
363
364 #endif
365
366 /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
367 DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
368 /* Note ! currently only one reader */
369 DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
370 /* RCU data */
371 DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
372
373 /* RCU pointer */
374 #if (SLAB_SIZE == 2)
375 DECLARE_CACHED_VAR(bit, rcu_ptr);
376 bit ptr_read_first[NR_READERS];
377 bit ptr_read_second[NR_READERS];
378 #else
379 DECLARE_CACHED_VAR(byte, rcu_ptr);
380 byte ptr_read_first[NR_READERS];
381 byte ptr_read_second[NR_READERS];
382 #endif
383
384 bit data_read_first[NR_READERS];
385 bit data_read_second[NR_READERS];
386
387 bit init_done = 0;
388
389 inline wait_init_done()
390 {
391 do
392 :: init_done == 0 -> skip;
393 :: else -> break;
394 od;
395 }
396
397 inline ooo_mem(i)
398 {
399 atomic {
400 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
401 i = 0;
402 do
403 :: i < NR_READERS ->
404 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
405 get_pid());
406 i++
407 :: i >= NR_READERS -> break
408 od;
409 RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
410 i = 0;
411 do
412 :: i < SLAB_SIZE ->
413 RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
414 i++
415 :: i >= SLAB_SIZE -> break
416 od;
417 #ifdef HAVE_OOO_CACHE_READ
418 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
419 i = 0;
420 do
421 :: i < NR_READERS ->
422 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
423 get_pid());
424 i++
425 :: i >= NR_READERS -> break
426 od;
427 RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
428 i = 0;
429 do
430 :: i < SLAB_SIZE ->
431 RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
432 i++
433 :: i >= SLAB_SIZE -> break
434 od;
435 #else
436 smp_rmb(i);
437 #endif /* HAVE_OOO_CACHE_READ */
438 }
439 }
440
441 /*
442 * Bit encoding, urcu_reader :
443 */
444
445 int _proc_urcu_reader;
446 #define proc_urcu_reader _proc_urcu_reader
447
448 /* Body of PROCEDURE_READ_LOCK */
449 #define READ_PROD_A_READ (1 << 0)
450 #define READ_PROD_B_IF_TRUE (1 << 1)
451 #define READ_PROD_B_IF_FALSE (1 << 2)
452 #define READ_PROD_C_IF_TRUE_READ (1 << 3)
453
454 #define PROCEDURE_READ_LOCK(base, consumetoken, producetoken) \
455 :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, READ_PROD_A_READ << base) -> \
456 ooo_mem(i); \
457 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]); \
458 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base); \
459 :: CONSUME_TOKENS(proc_urcu_reader, \
460 READ_PROD_A_READ << base, /* RAW, pre-dominant */ \
461 (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) -> \
462 if \
463 :: (!(tmp & RCU_GP_CTR_NEST_MASK)) -> \
464 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base); \
465 :: else -> \
466 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
467 fi; \
468 /* IF TRUE */ \
469 :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base, \
470 READ_PROD_C_IF_TRUE_READ << base) -> \
471 ooo_mem(i); \
472 tmp2 = READ_CACHED_VAR(urcu_gp_ctr); \
473 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base); \
474 :: CONSUME_TOKENS(proc_urcu_reader, \
475 (READ_PROD_C_IF_TRUE_READ /* pre-dominant */ \
476 | READ_PROD_A_READ) << base, /* WAR */ \
477 producetoken) -> \
478 ooo_mem(i); \
479 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2); \
480 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
481 /* IF_MERGE implies \
482 * post-dominance */ \
483 /* ELSE */ \
484 :: CONSUME_TOKENS(proc_urcu_reader, \
485 (READ_PROD_B_IF_FALSE /* pre-dominant */ \
486 | READ_PROD_A_READ) << base, /* WAR */ \
487 producetoken) -> \
488 ooo_mem(i); \
489 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], \
490 tmp + 1); \
491 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
492 /* IF_MERGE implies \
493 * post-dominance */ \
494 /* ENDIF */ \
495 skip
496
497 /* Body of PROCEDURE_READ_LOCK */
498 #define READ_PROC_READ_UNLOCK (1 << 0)
499
500 #define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken) \
501 :: CONSUME_TOKENS(proc_urcu_reader, \
502 consumetoken, \
503 READ_PROC_READ_UNLOCK << base) -> \
504 ooo_mem(i); \
505 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]); \
506 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base); \
507 :: CONSUME_TOKENS(proc_urcu_reader, \
508 consumetoken \
509 | (READ_PROC_READ_UNLOCK << base), /* WAR */ \
510 producetoken) -> \
511 ooo_mem(i); \
512 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1); \
513 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
514 skip
515
516
517 #define READ_PROD_NONE (1 << 0)
518
519 /* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
520 #define READ_LOCK_BASE 1
521 #define READ_LOCK_OUT (1 << 5)
522
523 #define READ_PROC_FIRST_MB (1 << 6)
524
525 /* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
526 #define READ_LOCK_NESTED_BASE 7
527 #define READ_LOCK_NESTED_OUT (1 << 11)
528
529 #define READ_PROC_READ_GEN (1 << 12)
530 #define READ_PROC_ACCESS_GEN (1 << 13)
531
532 /* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
533 #define READ_UNLOCK_NESTED_BASE 14
534 #define READ_UNLOCK_NESTED_OUT (1 << 15)
535
536 #define READ_PROC_SECOND_MB (1 << 16)
537
538 /* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
539 #define READ_UNLOCK_BASE 17
540 #define READ_UNLOCK_OUT (1 << 18)
541
542 /* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
543 #define READ_LOCK_UNROLL_BASE 19
544 #define READ_LOCK_OUT_UNROLL (1 << 23)
545
546 #define READ_PROC_THIRD_MB (1 << 24)
547
548 #define READ_PROC_READ_GEN_UNROLL (1 << 25)
549 #define READ_PROC_ACCESS_GEN_UNROLL (1 << 26)
550
551 #define READ_PROC_FOURTH_MB (1 << 27)
552
553 /* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
554 #define READ_UNLOCK_UNROLL_BASE 28
555 #define READ_UNLOCK_OUT_UNROLL (1 << 29)
556
557
558 /* Should not include branches */
559 #define READ_PROC_ALL_TOKENS (READ_PROD_NONE \
560 | READ_LOCK_OUT \
561 | READ_PROC_FIRST_MB \
562 | READ_LOCK_NESTED_OUT \
563 | READ_PROC_READ_GEN \
564 | READ_PROC_ACCESS_GEN \
565 | READ_UNLOCK_NESTED_OUT \
566 | READ_PROC_SECOND_MB \
567 | READ_UNLOCK_OUT \
568 | READ_LOCK_OUT_UNROLL \
569 | READ_PROC_THIRD_MB \
570 | READ_PROC_READ_GEN_UNROLL \
571 | READ_PROC_ACCESS_GEN_UNROLL \
572 | READ_PROC_FOURTH_MB \
573 | READ_UNLOCK_OUT_UNROLL)
574
575 /* Must clear all tokens, including branches */
576 #define READ_PROC_ALL_TOKENS_CLEAR ((1 << 30) - 1)
577
578 inline urcu_one_read(i, j, nest_i, tmp, tmp2)
579 {
580 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
581
582 #ifdef NO_MB
583 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
584 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
585 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
586 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
587 #endif
588
589 #ifdef REMOTE_BARRIERS
590 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
591 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
592 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
593 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
594 #endif
595
596 do
597 :: 1 ->
598
599 #ifdef REMOTE_BARRIERS
600 /*
601 * Signal-based memory barrier will only execute when the
602 * execution order appears in program order.
603 */
604 if
605 :: 1 ->
606 atomic {
607 if
608 :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
609 READ_LOCK_OUT | READ_LOCK_NESTED_OUT
610 | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
611 | READ_UNLOCK_OUT
612 | READ_LOCK_OUT_UNROLL
613 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
614 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
615 READ_LOCK_NESTED_OUT
616 | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
617 | READ_UNLOCK_OUT
618 | READ_LOCK_OUT_UNROLL
619 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
620 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
621 READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
622 | READ_UNLOCK_OUT
623 | READ_LOCK_OUT_UNROLL
624 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
625 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
626 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
627 READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
628 | READ_UNLOCK_OUT
629 | READ_LOCK_OUT_UNROLL
630 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
631 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
632 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
633 READ_UNLOCK_NESTED_OUT
634 | READ_UNLOCK_OUT
635 | READ_LOCK_OUT_UNROLL
636 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
637 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
638 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
639 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
640 READ_UNLOCK_OUT
641 | READ_LOCK_OUT_UNROLL
642 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
643 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
644 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
645 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
646 | READ_UNLOCK_OUT,
647 READ_LOCK_OUT_UNROLL
648 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
649 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
650 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
651 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
652 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
653 READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
654 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
655 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
656 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
657 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
658 | READ_PROC_READ_GEN_UNROLL,
659 READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
660 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
661 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
662 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
663 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
664 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
665 READ_UNLOCK_OUT_UNROLL)
666 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
667 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
668 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
669 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
670 0) ->
671 goto non_atomic3;
672 non_atomic3_end:
673 skip;
674 fi;
675 }
676 fi;
677
678 goto non_atomic3_skip;
679 non_atomic3:
680 smp_mb_recv(i, j);
681 goto non_atomic3_end;
682 non_atomic3_skip:
683
684 #endif /* REMOTE_BARRIERS */
685
686 atomic {
687 if
688 PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, READ_LOCK_OUT);
689
690 :: CONSUME_TOKENS(proc_urcu_reader,
691 READ_LOCK_OUT, /* post-dominant */
692 READ_PROC_FIRST_MB) ->
693 smp_mb_reader(i, j);
694 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
695
696 PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB | READ_LOCK_OUT,
697 READ_LOCK_NESTED_OUT);
698
699 :: CONSUME_TOKENS(proc_urcu_reader,
700 READ_PROC_FIRST_MB, /* mb() orders reads */
701 READ_PROC_READ_GEN) ->
702 ooo_mem(i);
703 ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
704 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
705
706 :: CONSUME_TOKENS(proc_urcu_reader,
707 READ_PROC_FIRST_MB /* mb() orders reads */
708 | READ_PROC_READ_GEN,
709 READ_PROC_ACCESS_GEN) ->
710 /* smp_read_barrier_depends */
711 goto rmb1;
712 rmb1_end:
713 data_read_first[get_readerid()] =
714 READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
715 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
716
717
718 /* Note : we remove the nested memory barrier from the read unlock
719 * model, given it is not usually needed. The implementation has the barrier
720 * because the performance impact added by a branch in the common case does not
721 * justify it.
722 */
723
724 PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
725 READ_PROC_FIRST_MB
726 | READ_LOCK_OUT
727 | READ_LOCK_NESTED_OUT,
728 READ_UNLOCK_NESTED_OUT);
729
730
731 :: CONSUME_TOKENS(proc_urcu_reader,
732 READ_PROC_ACCESS_GEN /* mb() orders reads */
733 | READ_PROC_READ_GEN /* mb() orders reads */
734 | READ_PROC_FIRST_MB /* mb() ordered */
735 | READ_LOCK_OUT /* post-dominant */
736 | READ_LOCK_NESTED_OUT /* post-dominant */
737 | READ_UNLOCK_NESTED_OUT,
738 READ_PROC_SECOND_MB) ->
739 smp_mb_reader(i, j);
740 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
741
742 PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
743 READ_PROC_SECOND_MB /* mb() orders reads */
744 | READ_PROC_FIRST_MB /* mb() orders reads */
745 | READ_LOCK_NESTED_OUT /* RAW */
746 | READ_LOCK_OUT /* RAW */
747 | READ_UNLOCK_NESTED_OUT, /* RAW */
748 READ_UNLOCK_OUT);
749
750 /* Unrolling loop : second consecutive lock */
751 /* reading urcu_active_readers, which have been written by
752 * READ_UNLOCK_OUT : RAW */
753 PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
754 READ_UNLOCK_OUT /* RAW */
755 | READ_PROC_SECOND_MB /* mb() orders reads */
756 | READ_PROC_FIRST_MB /* mb() orders reads */
757 | READ_LOCK_NESTED_OUT /* RAW */
758 | READ_LOCK_OUT /* RAW */
759 | READ_UNLOCK_NESTED_OUT, /* RAW */
760 READ_LOCK_OUT_UNROLL);
761
762
763 :: CONSUME_TOKENS(proc_urcu_reader,
764 READ_PROC_FIRST_MB /* mb() ordered */
765 | READ_PROC_SECOND_MB /* mb() ordered */
766 | READ_LOCK_OUT_UNROLL /* post-dominant */
767 | READ_LOCK_NESTED_OUT
768 | READ_LOCK_OUT
769 | READ_UNLOCK_NESTED_OUT
770 | READ_UNLOCK_OUT,
771 READ_PROC_THIRD_MB) ->
772 smp_mb_reader(i, j);
773 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
774
775 :: CONSUME_TOKENS(proc_urcu_reader,
776 READ_PROC_FIRST_MB /* mb() orders reads */
777 | READ_PROC_SECOND_MB /* mb() orders reads */
778 | READ_PROC_THIRD_MB, /* mb() orders reads */
779 READ_PROC_READ_GEN_UNROLL) ->
780 ooo_mem(i);
781 ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
782 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
783
784 :: CONSUME_TOKENS(proc_urcu_reader,
785 READ_PROC_READ_GEN_UNROLL
786 | READ_PROC_FIRST_MB /* mb() orders reads */
787 | READ_PROC_SECOND_MB /* mb() orders reads */
788 | READ_PROC_THIRD_MB, /* mb() orders reads */
789 READ_PROC_ACCESS_GEN_UNROLL) ->
790 /* smp_read_barrier_depends */
791 goto rmb2;
792 rmb2_end:
793 data_read_second[get_readerid()] =
794 READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
795 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
796
797 :: CONSUME_TOKENS(proc_urcu_reader,
798 READ_PROC_READ_GEN_UNROLL /* mb() orders reads */
799 | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
800 | READ_PROC_FIRST_MB /* mb() ordered */
801 | READ_PROC_SECOND_MB /* mb() ordered */
802 | READ_PROC_THIRD_MB /* mb() ordered */
803 | READ_LOCK_OUT_UNROLL /* post-dominant */
804 | READ_LOCK_NESTED_OUT
805 | READ_LOCK_OUT
806 | READ_UNLOCK_NESTED_OUT
807 | READ_UNLOCK_OUT,
808 READ_PROC_FOURTH_MB) ->
809 smp_mb_reader(i, j);
810 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
811
812 PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
813 READ_PROC_FOURTH_MB /* mb() orders reads */
814 | READ_PROC_THIRD_MB /* mb() orders reads */
815 | READ_LOCK_OUT_UNROLL /* RAW */
816 | READ_PROC_SECOND_MB /* mb() orders reads */
817 | READ_PROC_FIRST_MB /* mb() orders reads */
818 | READ_LOCK_NESTED_OUT /* RAW */
819 | READ_LOCK_OUT /* RAW */
820 | READ_UNLOCK_NESTED_OUT, /* RAW */
821 READ_UNLOCK_OUT_UNROLL);
822 :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
823 CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
824 break;
825 fi;
826 }
827 od;
828 /*
829 * Dependency between consecutive loops :
830 * RAW dependency on
831 * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
832 * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
833 * between loops.
834 * _WHEN THE MB()s are in place_, they add full ordering of the
835 * generation pointer read wrt active reader count read, which ensures
836 * execution will not spill across loop execution.
837 * However, in the event mb()s are removed (execution using signal
838 * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
839 * to spill its execution on other loop's execution.
840 */
841 goto end;
842 rmb1:
843 #ifndef NO_RMB
844 smp_rmb(i);
845 #else
846 ooo_mem(i);
847 #endif
848 goto rmb1_end;
849 rmb2:
850 #ifndef NO_RMB
851 smp_rmb(i);
852 #else
853 ooo_mem(i);
854 #endif
855 goto rmb2_end;
856 end:
857 skip;
858 }
859
860
861
862 active proctype urcu_reader()
863 {
864 byte i, j, nest_i;
865 byte tmp, tmp2;
866
867 wait_init_done();
868
869 assert(get_pid() < NR_PROCS);
870
871 end_reader:
872 do
873 :: 1 ->
874 /*
875 * We do not test reader's progress here, because we are mainly
876 * interested in writer's progress. The reader never blocks
877 * anyway. We have to test for reader/writer's progress
878 * separately, otherwise we could think the writer is doing
879 * progress when it's blocked by an always progressing reader.
880 */
881 #ifdef READER_PROGRESS
882 progress_reader:
883 #endif
884 urcu_one_read(i, j, nest_i, tmp, tmp2);
885 od;
886 }
887
888 /* no name clash please */
889 #undef proc_urcu_reader
890
891
892 /* Model the RCU update process. */
893
894 /*
895 * Bit encoding, urcu_writer :
896 * Currently only supports one reader.
897 */
898
899 int _proc_urcu_writer;
900 #define proc_urcu_writer _proc_urcu_writer
901
902 #define WRITE_PROD_NONE (1 << 0)
903
904 #define WRITE_DATA (1 << 1)
905 #define WRITE_PROC_WMB (1 << 2)
906 #define WRITE_XCHG_PTR (1 << 3)
907
908 #define WRITE_PROC_FIRST_MB (1 << 4)
909
910 /* first flip */
911 #define WRITE_PROC_FIRST_READ_GP (1 << 5)
912 #define WRITE_PROC_FIRST_WRITE_GP (1 << 6)
913 #define WRITE_PROC_FIRST_WAIT (1 << 7)
914 #define WRITE_PROC_FIRST_WAIT_LOOP (1 << 8)
915
916 /* second flip */
917 #define WRITE_PROC_SECOND_READ_GP (1 << 9)
918 #define WRITE_PROC_SECOND_WRITE_GP (1 << 10)
919 #define WRITE_PROC_SECOND_WAIT (1 << 11)
920 #define WRITE_PROC_SECOND_WAIT_LOOP (1 << 12)
921
922 #define WRITE_PROC_SECOND_MB (1 << 13)
923
924 #define WRITE_FREE (1 << 14)
925
926 #define WRITE_PROC_ALL_TOKENS (WRITE_PROD_NONE \
927 | WRITE_DATA \
928 | WRITE_PROC_WMB \
929 | WRITE_XCHG_PTR \
930 | WRITE_PROC_FIRST_MB \
931 | WRITE_PROC_FIRST_READ_GP \
932 | WRITE_PROC_FIRST_WRITE_GP \
933 | WRITE_PROC_FIRST_WAIT \
934 | WRITE_PROC_SECOND_READ_GP \
935 | WRITE_PROC_SECOND_WRITE_GP \
936 | WRITE_PROC_SECOND_WAIT \
937 | WRITE_PROC_SECOND_MB \
938 | WRITE_FREE)
939
940 #define WRITE_PROC_ALL_TOKENS_CLEAR ((1 << 15) - 1)
941
942 /*
943 * Mutexes are implied around writer execution. A single writer at a time.
944 */
945 active proctype urcu_writer()
946 {
947 byte i, j;
948 byte tmp, tmp2, tmpa;
949 byte cur_data = 0, old_data, loop_nr = 0;
950 byte cur_gp_val = 0; /*
951 * Keep a local trace of the current parity so
952 * we don't add non-existing dependencies on the global
953 * GP update. Needed to test single flip case.
954 */
955
956 wait_init_done();
957
958 assert(get_pid() < NR_PROCS);
959
960 do
961 :: (loop_nr < 3) ->
962 #ifdef WRITER_PROGRESS
963 progress_writer1:
964 #endif
965 loop_nr = loop_nr + 1;
966
967 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
968
969 #ifdef NO_WMB
970 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
971 #endif
972
973 #ifdef NO_MB
974 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
975 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
976 #endif
977
978 #ifdef SINGLE_FLIP
979 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
980 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
981 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
982 /* For single flip, we need to know the current parity */
983 cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
984 #endif
985
986 do :: 1 ->
987 atomic {
988 if
989
990 :: CONSUME_TOKENS(proc_urcu_writer,
991 WRITE_PROD_NONE,
992 WRITE_DATA) ->
993 ooo_mem(i);
994 cur_data = (cur_data + 1) % SLAB_SIZE;
995 WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
996 PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
997
998
999 :: CONSUME_TOKENS(proc_urcu_writer,
1000 WRITE_DATA,
1001 WRITE_PROC_WMB) ->
1002 smp_wmb(i);
1003 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
1004
1005 :: CONSUME_TOKENS(proc_urcu_writer,
1006 WRITE_PROC_WMB,
1007 WRITE_XCHG_PTR) ->
1008 /* rcu_xchg_pointer() */
1009 atomic {
1010 old_data = READ_CACHED_VAR(rcu_ptr);
1011 WRITE_CACHED_VAR(rcu_ptr, cur_data);
1012 }
1013 PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
1014
1015 :: CONSUME_TOKENS(proc_urcu_writer,
1016 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
1017 WRITE_PROC_FIRST_MB) ->
1018 goto smp_mb_send1;
1019 smp_mb_send1_end:
1020 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
1021
1022 /* first flip */
1023 :: CONSUME_TOKENS(proc_urcu_writer,
1024 WRITE_PROC_FIRST_MB,
1025 WRITE_PROC_FIRST_READ_GP) ->
1026 tmpa = READ_CACHED_VAR(urcu_gp_ctr);
1027 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
1028 :: CONSUME_TOKENS(proc_urcu_writer,
1029 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
1030 | WRITE_PROC_FIRST_READ_GP,
1031 WRITE_PROC_FIRST_WRITE_GP) ->
1032 ooo_mem(i);
1033 WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
1034 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
1035
1036 :: CONSUME_TOKENS(proc_urcu_writer,
1037 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1038 WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1039 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
1040 ooo_mem(i);
1041 /* ONLY WAITING FOR READER 0 */
1042 tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
1043 #ifndef SINGLE_FLIP
1044 /* In normal execution, we are always starting by
1045 * waiting for the even parity.
1046 */
1047 cur_gp_val = RCU_GP_CTR_BIT;
1048 #endif
1049 if
1050 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
1051 && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
1052 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
1053 :: else ->
1054 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
1055 fi;
1056
1057 :: CONSUME_TOKENS(proc_urcu_writer,
1058 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1059 WRITE_PROC_FIRST_WRITE_GP
1060 | WRITE_PROC_FIRST_READ_GP
1061 | WRITE_PROC_FIRST_WAIT_LOOP
1062 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
1063 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1064 0) ->
1065 #ifndef GEN_ERROR_WRITER_PROGRESS
1066 goto smp_mb_send2;
1067 smp_mb_send2_end:
1068 #else
1069 ooo_mem(i);
1070 #endif
1071 /* This instruction loops to WRITE_PROC_FIRST_WAIT */
1072 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
1073
1074 /* second flip */
1075 :: CONSUME_TOKENS(proc_urcu_writer,
1076 WRITE_PROC_FIRST_WAIT /* Control dependency : need to branch out of
1077 * the loop to execute the next flip (CHECK) */
1078 | WRITE_PROC_FIRST_WRITE_GP
1079 | WRITE_PROC_FIRST_READ_GP
1080 | WRITE_PROC_FIRST_MB,
1081 WRITE_PROC_SECOND_READ_GP) ->
1082 ooo_mem(i);
1083 tmpa = READ_CACHED_VAR(urcu_gp_ctr);
1084 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
1085 :: CONSUME_TOKENS(proc_urcu_writer,
1086 WRITE_PROC_FIRST_MB
1087 | WRITE_PROC_WMB
1088 | WRITE_PROC_FIRST_READ_GP
1089 | WRITE_PROC_FIRST_WRITE_GP
1090 | WRITE_PROC_SECOND_READ_GP,
1091 WRITE_PROC_SECOND_WRITE_GP) ->
1092 ooo_mem(i);
1093 WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
1094 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
1095
1096 :: CONSUME_TOKENS(proc_urcu_writer,
1097 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1098 WRITE_PROC_FIRST_WAIT
1099 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1100 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
1101 ooo_mem(i);
1102 /* ONLY WAITING FOR READER 0 */
1103 tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
1104 if
1105 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
1106 && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
1107 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
1108 :: else ->
1109 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
1110 fi;
1111
1112 :: CONSUME_TOKENS(proc_urcu_writer,
1113 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1114 WRITE_PROC_SECOND_WRITE_GP
1115 | WRITE_PROC_FIRST_WRITE_GP
1116 | WRITE_PROC_SECOND_READ_GP
1117 | WRITE_PROC_FIRST_READ_GP
1118 | WRITE_PROC_SECOND_WAIT_LOOP
1119 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
1120 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1121 0) ->
1122 #ifndef GEN_ERROR_WRITER_PROGRESS
1123 goto smp_mb_send3;
1124 smp_mb_send3_end:
1125 #else
1126 ooo_mem(i);
1127 #endif
1128 /* This instruction loops to WRITE_PROC_SECOND_WAIT */
1129 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
1130
1131
1132 :: CONSUME_TOKENS(proc_urcu_writer,
1133 WRITE_PROC_FIRST_WAIT
1134 | WRITE_PROC_SECOND_WAIT
1135 | WRITE_PROC_FIRST_READ_GP
1136 | WRITE_PROC_SECOND_READ_GP
1137 | WRITE_PROC_FIRST_WRITE_GP
1138 | WRITE_PROC_SECOND_WRITE_GP
1139 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
1140 | WRITE_PROC_FIRST_MB,
1141 WRITE_PROC_SECOND_MB) ->
1142 goto smp_mb_send4;
1143 smp_mb_send4_end:
1144 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
1145
1146 :: CONSUME_TOKENS(proc_urcu_writer,
1147 WRITE_XCHG_PTR
1148 | WRITE_PROC_FIRST_WAIT
1149 | WRITE_PROC_SECOND_WAIT
1150 | WRITE_PROC_WMB /* No dependency on
1151 * WRITE_DATA because we
1152 * write to a
1153 * different location. */
1154 | WRITE_PROC_SECOND_MB
1155 | WRITE_PROC_FIRST_MB,
1156 WRITE_FREE) ->
1157 WRITE_CACHED_VAR(rcu_data[old_data], POISON);
1158 PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
1159
1160 :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
1161 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
1162 break;
1163 fi;
1164 }
1165 od;
1166 /*
1167 * Note : Promela model adds implicit serialization of the
1168 * WRITE_FREE instruction. Normally, it would be permitted to
1169 * spill on the next loop execution. Given the validation we do
1170 * checks for the data entry read to be poisoned, it's ok if
1171 * we do not check "late arriving" memory poisoning.
1172 */
1173 :: else -> break;
1174 od;
1175 /*
1176 * Given the reader loops infinitely, let the writer also busy-loop
1177 * with progress here so, with weak fairness, we can test the
1178 * writer's progress.
1179 */
1180 end_writer:
1181 do
1182 :: 1 ->
1183 #ifdef WRITER_PROGRESS
1184 progress_writer2:
1185 #endif
1186 #ifdef READER_PROGRESS
1187 /*
1188 * Make sure we don't block the reader's progress.
1189 */
1190 smp_mb_send(i, j, 5);
1191 #endif
1192 skip;
1193 od;
1194
1195 /* Non-atomic parts of the loop */
1196 goto end;
1197 smp_mb_send1:
1198 smp_mb_send(i, j, 1);
1199 goto smp_mb_send1_end;
1200 #ifndef GEN_ERROR_WRITER_PROGRESS
1201 smp_mb_send2:
1202 smp_mb_send(i, j, 2);
1203 goto smp_mb_send2_end;
1204 smp_mb_send3:
1205 smp_mb_send(i, j, 3);
1206 goto smp_mb_send3_end;
1207 #endif
1208 smp_mb_send4:
1209 smp_mb_send(i, j, 4);
1210 goto smp_mb_send4_end;
1211 end:
1212 skip;
1213 }
1214
1215 /* no name clash please */
1216 #undef proc_urcu_writer
1217
1218
1219 /* Leave after the readers and writers so the pid count is ok. */
1220 init {
1221 byte i, j;
1222
1223 atomic {
1224 INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
1225 INIT_CACHED_VAR(rcu_ptr, 0, j);
1226
1227 i = 0;
1228 do
1229 :: i < NR_READERS ->
1230 INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
1231 ptr_read_first[i] = 1;
1232 ptr_read_second[i] = 1;
1233 data_read_first[i] = WINE;
1234 data_read_second[i] = WINE;
1235 i++;
1236 :: i >= NR_READERS -> break
1237 od;
1238 INIT_CACHED_VAR(rcu_data[0], WINE, j);
1239 i = 1;
1240 do
1241 :: i < SLAB_SIZE ->
1242 INIT_CACHED_VAR(rcu_data[i], POISON, j);
1243 i++
1244 :: i >= SLAB_SIZE -> break
1245 od;
1246
1247 init_done = 1;
1248 }
1249 }
This page took 0.084726 seconds and 4 git commands to generate.