Add phase 3 : scalability run
[urcu.git] / formal-model / results / urcu-controldataflow-ipi / urcu_progress_writer_error.spin.input
1 #define WRITER_PROGRESS
2 #define GEN_ERROR_WRITER_PROGRESS
3
4 // Poison value for freed memory
5 #define POISON 1
6 // Memory with correct data
7 #define WINE 0
8 #define SLAB_SIZE 2
9
10 #define read_poison (data_read_first[0] == POISON || data_read_second[0] == POISON)
11
12 #define RCU_GP_CTR_BIT (1 << 7)
13 #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
14
15 //disabled
16 #define REMOTE_BARRIERS
17 /*
18 * mem.spin: Promela code to validate memory barriers with OOO memory
19 * and out-of-order instruction scheduling.
20 *
21 * This program is free software; you can redistribute it and/or modify
22 * it under the terms of the GNU General Public License as published by
23 * the Free Software Foundation; either version 2 of the License, or
24 * (at your option) any later version.
25 *
26 * This program is distributed in the hope that it will be useful,
27 * but WITHOUT ANY WARRANTY; without even the implied warranty of
28 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
29 * GNU General Public License for more details.
30 *
31 * You should have received a copy of the GNU General Public License
32 * along with this program; if not, write to the Free Software
33 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
34 *
35 * Copyright (c) 2009 Mathieu Desnoyers
36 */
37
38 /* Promela validation variables. */
39
40 /* specific defines "included" here */
41 /* DEFINES file "included" here */
42
43 #define NR_READERS 1
44 #define NR_WRITERS 1
45
46 #define NR_PROCS 2
47
48 #define get_pid() (_pid)
49
50 #define get_readerid() (get_pid())
51
52 /*
53 * Produced process control and data flow. Updated after each instruction to
54 * show which variables are ready. Using one-hot bit encoding per variable to
55 * save state space. Used as triggers to execute the instructions having those
56 * variables as input. Leaving bits active to inhibit instruction execution.
57 * Scheme used to make instruction disabling and automatic dependency fall-back
58 * automatic.
59 */
60
61 #define CONSUME_TOKENS(state, bits, notbits) \
62 ((!(state & (notbits))) && (state & (bits)) == (bits))
63
64 #define PRODUCE_TOKENS(state, bits) \
65 state = state | (bits);
66
67 #define CLEAR_TOKENS(state, bits) \
68 state = state & ~(bits)
69
70 /*
71 * Types of dependency :
72 *
73 * Data dependency
74 *
75 * - True dependency, Read-after-Write (RAW)
76 *
77 * This type of dependency happens when a statement depends on the result of a
78 * previous statement. This applies to any statement which needs to read a
79 * variable written by a preceding statement.
80 *
81 * - False dependency, Write-after-Read (WAR)
82 *
83 * Typically, variable renaming can ensure that this dependency goes away.
84 * However, if the statements must read and then write from/to the same variable
85 * in the OOO memory model, renaming may be impossible, and therefore this
86 * causes a WAR dependency.
87 *
88 * - Output dependency, Write-after-Write (WAW)
89 *
90 * Two writes to the same variable in subsequent statements. Variable renaming
91 * can ensure this is not needed, but can be required when writing multiple
92 * times to the same OOO mem model variable.
93 *
94 * Control dependency
95 *
96 * Execution of a given instruction depends on a previous instruction evaluating
97 * in a way that allows its execution. E.g. : branches.
98 *
99 * Useful considerations for joining dependencies after branch
100 *
101 * - Pre-dominance
102 *
103 * "We say box i dominates box j if every path (leading from input to output
104 * through the diagram) which passes through box j must also pass through box
105 * i. Thus box i dominates box j if box j is subordinate to box i in the
106 * program."
107 *
108 * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
109 * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
110 *
111 * - Post-dominance
112 *
113 * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
114 * output exchanged. Therefore, i post-dominating j ensures that every path
115 * passing by j will pass by i before reaching the output.
116 *
117 * Other considerations
118 *
119 * Note about "volatile" keyword dependency : The compiler will order volatile
120 * accesses so they appear in the right order on a given CPU. They can be
121 * reordered by the CPU instruction scheduling. This therefore cannot be
122 * considered as a depencency.
123 *
124 * References :
125 *
126 * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
127 * Kaufmann. ISBN 1-55860-698-X.
128 * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
129 * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
130 * 1-55860-286-0.
131 * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
132 * Morgan Kaufmann. ISBN 1-55860-320-4.
133 */
134
135 /*
136 * Note about loops and nested calls
137 *
138 * To keep this model simple, loops expressed in the framework will behave as if
139 * there was a core synchronizing instruction between loops. To see the effect
140 * of loop unrolling, manually unrolling loops is required. Note that if loops
141 * end or start with a core synchronizing instruction, the model is appropriate.
142 * Nested calls are not supported.
143 */
144
145 /*
146 * Each process have its own data in cache. Caches are randomly updated.
147 * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
148 * both.
149 */
150
151 typedef per_proc_byte {
152 byte val[NR_PROCS];
153 };
154
155 typedef per_proc_bit {
156 bit val[NR_PROCS];
157 };
158
159 /* Bitfield has a maximum of 8 procs */
160 typedef per_proc_bitfield {
161 byte bitfield;
162 };
163
164 #define DECLARE_CACHED_VAR(type, x) \
165 type mem_##x; \
166 per_proc_##type cached_##x; \
167 per_proc_bitfield cache_dirty_##x;
168
169 #define INIT_CACHED_VAR(x, v, j) \
170 mem_##x = v; \
171 cache_dirty_##x.bitfield = 0; \
172 j = 0; \
173 do \
174 :: j < NR_PROCS -> \
175 cached_##x.val[j] = v; \
176 j++ \
177 :: j >= NR_PROCS -> break \
178 od;
179
180 #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x.bitfield & (1 << id))
181
182 #define READ_CACHED_VAR(x) (cached_##x.val[get_pid()])
183
184 #define WRITE_CACHED_VAR(x, v) \
185 atomic { \
186 cached_##x.val[get_pid()] = v; \
187 cache_dirty_##x.bitfield = \
188 cache_dirty_##x.bitfield | (1 << get_pid()); \
189 }
190
191 #define CACHE_WRITE_TO_MEM(x, id) \
192 if \
193 :: IS_CACHE_DIRTY(x, id) -> \
194 mem_##x = cached_##x.val[id]; \
195 cache_dirty_##x.bitfield = \
196 cache_dirty_##x.bitfield & (~(1 << id)); \
197 :: else -> \
198 skip \
199 fi;
200
201 #define CACHE_READ_FROM_MEM(x, id) \
202 if \
203 :: !IS_CACHE_DIRTY(x, id) -> \
204 cached_##x.val[id] = mem_##x;\
205 :: else -> \
206 skip \
207 fi;
208
209 /*
210 * May update other caches if cache is dirty, or not.
211 */
212 #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
213 if \
214 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
215 :: 1 -> skip \
216 fi;
217
218 #define RANDOM_CACHE_READ_FROM_MEM(x, id)\
219 if \
220 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
221 :: 1 -> skip \
222 fi;
223
224 /* Must consume all prior read tokens. All subsequent reads depend on it. */
225 inline smp_rmb(i, j)
226 {
227 atomic {
228 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
229 i = 0;
230 do
231 :: i < NR_READERS ->
232 CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
233 i++
234 :: i >= NR_READERS -> break
235 od;
236 CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
237 i = 0;
238 do
239 :: i < SLAB_SIZE ->
240 CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
241 i++
242 :: i >= SLAB_SIZE -> break
243 od;
244 }
245 }
246
247 /* Must consume all prior write tokens. All subsequent writes depend on it. */
248 inline smp_wmb(i, j)
249 {
250 atomic {
251 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
252 i = 0;
253 do
254 :: i < NR_READERS ->
255 CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
256 i++
257 :: i >= NR_READERS -> break
258 od;
259 CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
260 i = 0;
261 do
262 :: i < SLAB_SIZE ->
263 CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
264 i++
265 :: i >= SLAB_SIZE -> break
266 od;
267 }
268 }
269
270 /* Synchronization point. Must consume all prior read and write tokens. All
271 * subsequent reads and writes depend on it. */
272 inline smp_mb(i, j)
273 {
274 atomic {
275 smp_wmb(i, j);
276 smp_rmb(i, j);
277 }
278 }
279
280 #ifdef REMOTE_BARRIERS
281
282 bit reader_barrier[NR_READERS];
283
284 /*
285 * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
286 * because they would add unexisting core synchronization and would therefore
287 * create an incomplete model.
288 * Therefore, we model the read-side memory barriers by completely disabling the
289 * memory barriers and their dependencies from the read-side. One at a time
290 * (different verification runs), we make a different instruction listen for
291 * signals.
292 */
293
294 #define smp_mb_reader(i, j)
295
296 /*
297 * Service 0, 1 or many barrier requests.
298 */
299 inline smp_mb_recv(i, j)
300 {
301 do
302 :: (reader_barrier[get_readerid()] == 1) ->
303 /*
304 * We choose to ignore cycles caused by writer busy-looping,
305 * waiting for the reader, sending barrier requests, and the
306 * reader always services them without continuing execution.
307 */
308 progress_ignoring_mb1:
309 smp_mb(i, j);
310 reader_barrier[get_readerid()] = 0;
311 :: 1 ->
312 /*
313 * We choose to ignore writer's non-progress caused by the
314 * reader ignoring the writer's mb() requests.
315 */
316 progress_ignoring_mb2:
317 break;
318 od;
319 }
320
321 //#ifdef WRITER_PROGRESS
322 //#define PROGRESS_LABEL(progressid)
323 //#else
324 //#define PROGRESS_LABEL(progressid)
325 //#endif
326
327 #define PROGRESS_LABEL(progressid) progress_writer_progid_##progressid:
328
329 #define smp_mb_send(i, j, progressid) \
330 { \
331 smp_mb(i, j); \
332 i = 0; \
333 do \
334 :: i < NR_READERS -> \
335 reader_barrier[i] = 1; \
336 /* \
337 * Busy-looping waiting for reader barrier handling is of little\
338 * interest, given the reader has the ability to totally ignore \
339 * barrier requests. \
340 */ \
341 do \
342 :: (reader_barrier[i] == 1) -> \
343 PROGRESS_LABEL(progressid) \
344 skip; \
345 :: (reader_barrier[i] == 0) -> break; \
346 od; \
347 i++; \
348 :: i >= NR_READERS -> \
349 break \
350 od; \
351 smp_mb(i, j); \
352 }
353
354 #else
355
356 #define smp_mb_send(i, j, progressid) smp_mb(i, j)
357 #define smp_mb_reader smp_mb
358 #define smp_mb_recv(i, j)
359
360 #endif
361
362 /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
363 DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
364 /* Note ! currently only one reader */
365 DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
366 /* RCU data */
367 DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
368
369 /* RCU pointer */
370 #if (SLAB_SIZE == 2)
371 DECLARE_CACHED_VAR(bit, rcu_ptr);
372 bit ptr_read_first[NR_READERS];
373 bit ptr_read_second[NR_READERS];
374 #else
375 DECLARE_CACHED_VAR(byte, rcu_ptr);
376 byte ptr_read_first[NR_READERS];
377 byte ptr_read_second[NR_READERS];
378 #endif
379
380 bit data_read_first[NR_READERS];
381 bit data_read_second[NR_READERS];
382
383 bit init_done = 0;
384
385 inline wait_init_done()
386 {
387 do
388 :: init_done == 0 -> skip;
389 :: else -> break;
390 od;
391 }
392
393 inline ooo_mem(i)
394 {
395 atomic {
396 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
397 i = 0;
398 do
399 :: i < NR_READERS ->
400 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
401 get_pid());
402 i++
403 :: i >= NR_READERS -> break
404 od;
405 RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
406 i = 0;
407 do
408 :: i < SLAB_SIZE ->
409 RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
410 i++
411 :: i >= SLAB_SIZE -> break
412 od;
413 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
414 i = 0;
415 do
416 :: i < NR_READERS ->
417 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
418 get_pid());
419 i++
420 :: i >= NR_READERS -> break
421 od;
422 RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
423 i = 0;
424 do
425 :: i < SLAB_SIZE ->
426 RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
427 i++
428 :: i >= SLAB_SIZE -> break
429 od;
430 }
431 }
432
433 /*
434 * Bit encoding, urcu_reader :
435 */
436
437 int _proc_urcu_reader;
438 #define proc_urcu_reader _proc_urcu_reader
439
440 /* Body of PROCEDURE_READ_LOCK */
441 #define READ_PROD_A_READ (1 << 0)
442 #define READ_PROD_B_IF_TRUE (1 << 1)
443 #define READ_PROD_B_IF_FALSE (1 << 2)
444 #define READ_PROD_C_IF_TRUE_READ (1 << 3)
445
446 #define PROCEDURE_READ_LOCK(base, consumetoken, producetoken) \
447 :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, READ_PROD_A_READ << base) -> \
448 ooo_mem(i); \
449 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]); \
450 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base); \
451 :: CONSUME_TOKENS(proc_urcu_reader, \
452 READ_PROD_A_READ << base, /* RAW, pre-dominant */ \
453 (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) -> \
454 if \
455 :: (!(tmp & RCU_GP_CTR_NEST_MASK)) -> \
456 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base); \
457 :: else -> \
458 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
459 fi; \
460 /* IF TRUE */ \
461 :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base, \
462 READ_PROD_C_IF_TRUE_READ << base) -> \
463 ooo_mem(i); \
464 tmp2 = READ_CACHED_VAR(urcu_gp_ctr); \
465 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base); \
466 :: CONSUME_TOKENS(proc_urcu_reader, \
467 (READ_PROD_C_IF_TRUE_READ /* pre-dominant */ \
468 | READ_PROD_A_READ) << base, /* WAR */ \
469 producetoken) -> \
470 ooo_mem(i); \
471 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2); \
472 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
473 /* IF_MERGE implies \
474 * post-dominance */ \
475 /* ELSE */ \
476 :: CONSUME_TOKENS(proc_urcu_reader, \
477 (READ_PROD_B_IF_FALSE /* pre-dominant */ \
478 | READ_PROD_A_READ) << base, /* WAR */ \
479 producetoken) -> \
480 ooo_mem(i); \
481 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], \
482 tmp + 1); \
483 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
484 /* IF_MERGE implies \
485 * post-dominance */ \
486 /* ENDIF */ \
487 skip
488
489 /* Body of PROCEDURE_READ_LOCK */
490 #define READ_PROC_READ_UNLOCK (1 << 0)
491
492 #define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken) \
493 :: CONSUME_TOKENS(proc_urcu_reader, \
494 consumetoken, \
495 READ_PROC_READ_UNLOCK << base) -> \
496 ooo_mem(i); \
497 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]); \
498 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base); \
499 :: CONSUME_TOKENS(proc_urcu_reader, \
500 consumetoken \
501 | (READ_PROC_READ_UNLOCK << base), /* WAR */ \
502 producetoken) -> \
503 ooo_mem(i); \
504 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1); \
505 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
506 skip
507
508
509 #define READ_PROD_NONE (1 << 0)
510
511 /* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
512 #define READ_LOCK_BASE 1
513 #define READ_LOCK_OUT (1 << 5)
514
515 #define READ_PROC_FIRST_MB (1 << 6)
516
517 /* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
518 #define READ_LOCK_NESTED_BASE 7
519 #define READ_LOCK_NESTED_OUT (1 << 11)
520
521 #define READ_PROC_READ_GEN (1 << 12)
522 #define READ_PROC_ACCESS_GEN (1 << 13)
523
524 /* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
525 #define READ_UNLOCK_NESTED_BASE 14
526 #define READ_UNLOCK_NESTED_OUT (1 << 15)
527
528 #define READ_PROC_SECOND_MB (1 << 16)
529
530 /* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
531 #define READ_UNLOCK_BASE 17
532 #define READ_UNLOCK_OUT (1 << 18)
533
534 /* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
535 #define READ_LOCK_UNROLL_BASE 19
536 #define READ_LOCK_OUT_UNROLL (1 << 23)
537
538 #define READ_PROC_THIRD_MB (1 << 24)
539
540 #define READ_PROC_READ_GEN_UNROLL (1 << 25)
541 #define READ_PROC_ACCESS_GEN_UNROLL (1 << 26)
542
543 #define READ_PROC_FOURTH_MB (1 << 27)
544
545 /* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
546 #define READ_UNLOCK_UNROLL_BASE 28
547 #define READ_UNLOCK_OUT_UNROLL (1 << 29)
548
549
550 /* Should not include branches */
551 #define READ_PROC_ALL_TOKENS (READ_PROD_NONE \
552 | READ_LOCK_OUT \
553 | READ_PROC_FIRST_MB \
554 | READ_LOCK_NESTED_OUT \
555 | READ_PROC_READ_GEN \
556 | READ_PROC_ACCESS_GEN \
557 | READ_UNLOCK_NESTED_OUT \
558 | READ_PROC_SECOND_MB \
559 | READ_UNLOCK_OUT \
560 | READ_LOCK_OUT_UNROLL \
561 | READ_PROC_THIRD_MB \
562 | READ_PROC_READ_GEN_UNROLL \
563 | READ_PROC_ACCESS_GEN_UNROLL \
564 | READ_PROC_FOURTH_MB \
565 | READ_UNLOCK_OUT_UNROLL)
566
567 /* Must clear all tokens, including branches */
568 #define READ_PROC_ALL_TOKENS_CLEAR ((1 << 30) - 1)
569
570 inline urcu_one_read(i, j, nest_i, tmp, tmp2)
571 {
572 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
573
574 #ifdef NO_MB
575 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
576 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
577 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
578 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
579 #endif
580
581 #ifdef REMOTE_BARRIERS
582 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
583 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
584 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
585 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
586 #endif
587
588 do
589 :: 1 ->
590
591 #ifdef REMOTE_BARRIERS
592 /*
593 * Signal-based memory barrier will only execute when the
594 * execution order appears in program order.
595 */
596 if
597 :: 1 ->
598 atomic {
599 if
600 :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
601 READ_LOCK_OUT | READ_LOCK_NESTED_OUT
602 | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
603 | READ_UNLOCK_OUT
604 | READ_LOCK_OUT_UNROLL
605 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
606 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
607 READ_LOCK_NESTED_OUT
608 | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
609 | READ_UNLOCK_OUT
610 | READ_LOCK_OUT_UNROLL
611 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
612 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
613 READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
614 | READ_UNLOCK_OUT
615 | READ_LOCK_OUT_UNROLL
616 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
617 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
618 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
619 READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
620 | READ_UNLOCK_OUT
621 | READ_LOCK_OUT_UNROLL
622 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
623 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
624 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
625 READ_UNLOCK_NESTED_OUT
626 | READ_UNLOCK_OUT
627 | READ_LOCK_OUT_UNROLL
628 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
629 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
630 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
631 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
632 READ_UNLOCK_OUT
633 | READ_LOCK_OUT_UNROLL
634 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
635 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
636 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
637 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
638 | READ_UNLOCK_OUT,
639 READ_LOCK_OUT_UNROLL
640 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
641 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
642 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
643 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
644 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
645 READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
646 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
647 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
648 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
649 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
650 | READ_PROC_READ_GEN_UNROLL,
651 READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
652 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
653 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
654 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
655 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
656 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
657 READ_UNLOCK_OUT_UNROLL)
658 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
659 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
660 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
661 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
662 0) ->
663 goto non_atomic3;
664 non_atomic3_end:
665 skip;
666 fi;
667 }
668 fi;
669
670 goto non_atomic3_skip;
671 non_atomic3:
672 smp_mb_recv(i, j);
673 goto non_atomic3_end;
674 non_atomic3_skip:
675
676 #endif /* REMOTE_BARRIERS */
677
678 atomic {
679 if
680 PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, READ_LOCK_OUT);
681
682 :: CONSUME_TOKENS(proc_urcu_reader,
683 READ_LOCK_OUT, /* post-dominant */
684 READ_PROC_FIRST_MB) ->
685 smp_mb_reader(i, j);
686 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
687
688 PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB | READ_LOCK_OUT,
689 READ_LOCK_NESTED_OUT);
690
691 :: CONSUME_TOKENS(proc_urcu_reader,
692 READ_PROC_FIRST_MB, /* mb() orders reads */
693 READ_PROC_READ_GEN) ->
694 ooo_mem(i);
695 ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
696 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
697
698 :: CONSUME_TOKENS(proc_urcu_reader,
699 READ_PROC_FIRST_MB /* mb() orders reads */
700 | READ_PROC_READ_GEN,
701 READ_PROC_ACCESS_GEN) ->
702 /* smp_read_barrier_depends */
703 goto rmb1;
704 rmb1_end:
705 data_read_first[get_readerid()] =
706 READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
707 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
708
709
710 /* Note : we remove the nested memory barrier from the read unlock
711 * model, given it is not usually needed. The implementation has the barrier
712 * because the performance impact added by a branch in the common case does not
713 * justify it.
714 */
715
716 PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
717 READ_PROC_FIRST_MB
718 | READ_LOCK_OUT
719 | READ_LOCK_NESTED_OUT,
720 READ_UNLOCK_NESTED_OUT);
721
722
723 :: CONSUME_TOKENS(proc_urcu_reader,
724 READ_PROC_ACCESS_GEN /* mb() orders reads */
725 | READ_PROC_READ_GEN /* mb() orders reads */
726 | READ_PROC_FIRST_MB /* mb() ordered */
727 | READ_LOCK_OUT /* post-dominant */
728 | READ_LOCK_NESTED_OUT /* post-dominant */
729 | READ_UNLOCK_NESTED_OUT,
730 READ_PROC_SECOND_MB) ->
731 smp_mb_reader(i, j);
732 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
733
734 PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
735 READ_PROC_SECOND_MB /* mb() orders reads */
736 | READ_PROC_FIRST_MB /* mb() orders reads */
737 | READ_LOCK_NESTED_OUT /* RAW */
738 | READ_LOCK_OUT /* RAW */
739 | READ_UNLOCK_NESTED_OUT, /* RAW */
740 READ_UNLOCK_OUT);
741
742 /* Unrolling loop : second consecutive lock */
743 /* reading urcu_active_readers, which have been written by
744 * READ_UNLOCK_OUT : RAW */
745 PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
746 READ_UNLOCK_OUT /* RAW */
747 | READ_PROC_SECOND_MB /* mb() orders reads */
748 | READ_PROC_FIRST_MB /* mb() orders reads */
749 | READ_LOCK_NESTED_OUT /* RAW */
750 | READ_LOCK_OUT /* RAW */
751 | READ_UNLOCK_NESTED_OUT, /* RAW */
752 READ_LOCK_OUT_UNROLL);
753
754
755 :: CONSUME_TOKENS(proc_urcu_reader,
756 READ_PROC_FIRST_MB /* mb() ordered */
757 | READ_PROC_SECOND_MB /* mb() ordered */
758 | READ_LOCK_OUT_UNROLL /* post-dominant */
759 | READ_LOCK_NESTED_OUT
760 | READ_LOCK_OUT
761 | READ_UNLOCK_NESTED_OUT
762 | READ_UNLOCK_OUT,
763 READ_PROC_THIRD_MB) ->
764 smp_mb_reader(i, j);
765 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
766
767 :: CONSUME_TOKENS(proc_urcu_reader,
768 READ_PROC_FIRST_MB /* mb() orders reads */
769 | READ_PROC_SECOND_MB /* mb() orders reads */
770 | READ_PROC_THIRD_MB, /* mb() orders reads */
771 READ_PROC_READ_GEN_UNROLL) ->
772 ooo_mem(i);
773 ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
774 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
775
776 :: CONSUME_TOKENS(proc_urcu_reader,
777 READ_PROC_READ_GEN_UNROLL
778 | READ_PROC_FIRST_MB /* mb() orders reads */
779 | READ_PROC_SECOND_MB /* mb() orders reads */
780 | READ_PROC_THIRD_MB, /* mb() orders reads */
781 READ_PROC_ACCESS_GEN_UNROLL) ->
782 /* smp_read_barrier_depends */
783 goto rmb2;
784 rmb2_end:
785 data_read_second[get_readerid()] =
786 READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
787 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
788
789 :: CONSUME_TOKENS(proc_urcu_reader,
790 READ_PROC_READ_GEN_UNROLL /* mb() orders reads */
791 | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
792 | READ_PROC_FIRST_MB /* mb() ordered */
793 | READ_PROC_SECOND_MB /* mb() ordered */
794 | READ_PROC_THIRD_MB /* mb() ordered */
795 | READ_LOCK_OUT_UNROLL /* post-dominant */
796 | READ_LOCK_NESTED_OUT
797 | READ_LOCK_OUT
798 | READ_UNLOCK_NESTED_OUT
799 | READ_UNLOCK_OUT,
800 READ_PROC_FOURTH_MB) ->
801 smp_mb_reader(i, j);
802 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
803
804 PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
805 READ_PROC_FOURTH_MB /* mb() orders reads */
806 | READ_PROC_THIRD_MB /* mb() orders reads */
807 | READ_LOCK_OUT_UNROLL /* RAW */
808 | READ_PROC_SECOND_MB /* mb() orders reads */
809 | READ_PROC_FIRST_MB /* mb() orders reads */
810 | READ_LOCK_NESTED_OUT /* RAW */
811 | READ_LOCK_OUT /* RAW */
812 | READ_UNLOCK_NESTED_OUT, /* RAW */
813 READ_UNLOCK_OUT_UNROLL);
814 :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
815 CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
816 break;
817 fi;
818 }
819 od;
820 /*
821 * Dependency between consecutive loops :
822 * RAW dependency on
823 * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
824 * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
825 * between loops.
826 * _WHEN THE MB()s are in place_, they add full ordering of the
827 * generation pointer read wrt active reader count read, which ensures
828 * execution will not spill across loop execution.
829 * However, in the event mb()s are removed (execution using signal
830 * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
831 * to spill its execution on other loop's execution.
832 */
833 goto end;
834 rmb1:
835 #ifndef NO_RMB
836 smp_rmb(i, j);
837 #else
838 ooo_mem(i);
839 #endif
840 goto rmb1_end;
841 rmb2:
842 #ifndef NO_RMB
843 smp_rmb(i, j);
844 #else
845 ooo_mem(i);
846 #endif
847 goto rmb2_end;
848 end:
849 skip;
850 }
851
852
853
854 active proctype urcu_reader()
855 {
856 byte i, j, nest_i;
857 byte tmp, tmp2;
858
859 wait_init_done();
860
861 assert(get_pid() < NR_PROCS);
862
863 end_reader:
864 do
865 :: 1 ->
866 /*
867 * We do not test reader's progress here, because we are mainly
868 * interested in writer's progress. The reader never blocks
869 * anyway. We have to test for reader/writer's progress
870 * separately, otherwise we could think the writer is doing
871 * progress when it's blocked by an always progressing reader.
872 */
873 #ifdef READER_PROGRESS
874 progress_reader:
875 #endif
876 urcu_one_read(i, j, nest_i, tmp, tmp2);
877 od;
878 }
879
880 /* no name clash please */
881 #undef proc_urcu_reader
882
883
884 /* Model the RCU update process. */
885
886 /*
887 * Bit encoding, urcu_writer :
888 * Currently only supports one reader.
889 */
890
891 int _proc_urcu_writer;
892 #define proc_urcu_writer _proc_urcu_writer
893
894 #define WRITE_PROD_NONE (1 << 0)
895
896 #define WRITE_DATA (1 << 1)
897 #define WRITE_PROC_WMB (1 << 2)
898 #define WRITE_XCHG_PTR (1 << 3)
899
900 #define WRITE_PROC_FIRST_MB (1 << 4)
901
902 /* first flip */
903 #define WRITE_PROC_FIRST_READ_GP (1 << 5)
904 #define WRITE_PROC_FIRST_WRITE_GP (1 << 6)
905 #define WRITE_PROC_FIRST_WAIT (1 << 7)
906 #define WRITE_PROC_FIRST_WAIT_LOOP (1 << 8)
907
908 /* second flip */
909 #define WRITE_PROC_SECOND_READ_GP (1 << 9)
910 #define WRITE_PROC_SECOND_WRITE_GP (1 << 10)
911 #define WRITE_PROC_SECOND_WAIT (1 << 11)
912 #define WRITE_PROC_SECOND_WAIT_LOOP (1 << 12)
913
914 #define WRITE_PROC_SECOND_MB (1 << 13)
915
916 #define WRITE_FREE (1 << 14)
917
918 #define WRITE_PROC_ALL_TOKENS (WRITE_PROD_NONE \
919 | WRITE_DATA \
920 | WRITE_PROC_WMB \
921 | WRITE_XCHG_PTR \
922 | WRITE_PROC_FIRST_MB \
923 | WRITE_PROC_FIRST_READ_GP \
924 | WRITE_PROC_FIRST_WRITE_GP \
925 | WRITE_PROC_FIRST_WAIT \
926 | WRITE_PROC_SECOND_READ_GP \
927 | WRITE_PROC_SECOND_WRITE_GP \
928 | WRITE_PROC_SECOND_WAIT \
929 | WRITE_PROC_SECOND_MB \
930 | WRITE_FREE)
931
932 #define WRITE_PROC_ALL_TOKENS_CLEAR ((1 << 15) - 1)
933
934 /*
935 * Mutexes are implied around writer execution. A single writer at a time.
936 */
937 active proctype urcu_writer()
938 {
939 byte i, j;
940 byte tmp, tmp2, tmpa;
941 byte cur_data = 0, old_data, loop_nr = 0;
942 byte cur_gp_val = 0; /*
943 * Keep a local trace of the current parity so
944 * we don't add non-existing dependencies on the global
945 * GP update. Needed to test single flip case.
946 */
947
948 wait_init_done();
949
950 assert(get_pid() < NR_PROCS);
951
952 do
953 :: (loop_nr < 3) ->
954 #ifdef WRITER_PROGRESS
955 progress_writer1:
956 #endif
957 loop_nr = loop_nr + 1;
958
959 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
960
961 #ifdef NO_WMB
962 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
963 #endif
964
965 #ifdef NO_MB
966 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
967 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
968 #endif
969
970 #ifdef SINGLE_FLIP
971 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
972 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
973 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
974 /* For single flip, we need to know the current parity */
975 cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
976 #endif
977
978 do :: 1 ->
979 atomic {
980 if
981
982 :: CONSUME_TOKENS(proc_urcu_writer,
983 WRITE_PROD_NONE,
984 WRITE_DATA) ->
985 ooo_mem(i);
986 cur_data = (cur_data + 1) % SLAB_SIZE;
987 WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
988 PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
989
990
991 :: CONSUME_TOKENS(proc_urcu_writer,
992 WRITE_DATA,
993 WRITE_PROC_WMB) ->
994 smp_wmb(i, j);
995 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
996
997 :: CONSUME_TOKENS(proc_urcu_writer,
998 WRITE_PROC_WMB,
999 WRITE_XCHG_PTR) ->
1000 /* rcu_xchg_pointer() */
1001 atomic {
1002 old_data = READ_CACHED_VAR(rcu_ptr);
1003 WRITE_CACHED_VAR(rcu_ptr, cur_data);
1004 }
1005 PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
1006
1007 :: CONSUME_TOKENS(proc_urcu_writer,
1008 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
1009 WRITE_PROC_FIRST_MB) ->
1010 goto smp_mb_send1;
1011 smp_mb_send1_end:
1012 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
1013
1014 /* first flip */
1015 :: CONSUME_TOKENS(proc_urcu_writer,
1016 WRITE_PROC_FIRST_MB,
1017 WRITE_PROC_FIRST_READ_GP) ->
1018 tmpa = READ_CACHED_VAR(urcu_gp_ctr);
1019 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
1020 :: CONSUME_TOKENS(proc_urcu_writer,
1021 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
1022 | WRITE_PROC_FIRST_READ_GP,
1023 WRITE_PROC_FIRST_WRITE_GP) ->
1024 ooo_mem(i);
1025 WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
1026 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
1027
1028 :: CONSUME_TOKENS(proc_urcu_writer,
1029 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1030 WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1031 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
1032 ooo_mem(i);
1033 /* ONLY WAITING FOR READER 0 */
1034 tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
1035 #ifndef SINGLE_FLIP
1036 /* In normal execution, we are always starting by
1037 * waiting for the even parity.
1038 */
1039 cur_gp_val = RCU_GP_CTR_BIT;
1040 #endif
1041 if
1042 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
1043 && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
1044 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
1045 :: else ->
1046 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
1047 fi;
1048
1049 :: CONSUME_TOKENS(proc_urcu_writer,
1050 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1051 WRITE_PROC_FIRST_WRITE_GP
1052 | WRITE_PROC_FIRST_READ_GP
1053 | WRITE_PROC_FIRST_WAIT_LOOP
1054 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
1055 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1056 0) ->
1057 #ifndef GEN_ERROR_WRITER_PROGRESS
1058 goto smp_mb_send2;
1059 smp_mb_send2_end:
1060 #else
1061 ooo_mem(i);
1062 #endif
1063 /* This instruction loops to WRITE_PROC_FIRST_WAIT */
1064 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
1065
1066 /* second flip */
1067 :: CONSUME_TOKENS(proc_urcu_writer,
1068 WRITE_PROC_FIRST_WAIT /* Control dependency : need to branch out of
1069 * the loop to execute the next flip (CHECK) */
1070 | WRITE_PROC_FIRST_WRITE_GP
1071 | WRITE_PROC_FIRST_READ_GP
1072 | WRITE_PROC_FIRST_MB,
1073 WRITE_PROC_SECOND_READ_GP) ->
1074 ooo_mem(i);
1075 tmpa = READ_CACHED_VAR(urcu_gp_ctr);
1076 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
1077 :: CONSUME_TOKENS(proc_urcu_writer,
1078 WRITE_PROC_FIRST_MB
1079 | WRITE_PROC_WMB
1080 | WRITE_PROC_FIRST_READ_GP
1081 | WRITE_PROC_FIRST_WRITE_GP
1082 | WRITE_PROC_SECOND_READ_GP,
1083 WRITE_PROC_SECOND_WRITE_GP) ->
1084 ooo_mem(i);
1085 WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
1086 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
1087
1088 :: CONSUME_TOKENS(proc_urcu_writer,
1089 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1090 WRITE_PROC_FIRST_WAIT
1091 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1092 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
1093 ooo_mem(i);
1094 /* ONLY WAITING FOR READER 0 */
1095 tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
1096 if
1097 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
1098 && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
1099 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
1100 :: else ->
1101 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
1102 fi;
1103
1104 :: CONSUME_TOKENS(proc_urcu_writer,
1105 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1106 WRITE_PROC_SECOND_WRITE_GP
1107 | WRITE_PROC_FIRST_WRITE_GP
1108 | WRITE_PROC_SECOND_READ_GP
1109 | WRITE_PROC_FIRST_READ_GP
1110 | WRITE_PROC_SECOND_WAIT_LOOP
1111 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
1112 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1113 0) ->
1114 #ifndef GEN_ERROR_WRITER_PROGRESS
1115 goto smp_mb_send3;
1116 smp_mb_send3_end:
1117 #else
1118 ooo_mem(i);
1119 #endif
1120 /* This instruction loops to WRITE_PROC_SECOND_WAIT */
1121 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
1122
1123
1124 :: CONSUME_TOKENS(proc_urcu_writer,
1125 WRITE_PROC_FIRST_WAIT
1126 | WRITE_PROC_SECOND_WAIT
1127 | WRITE_PROC_FIRST_READ_GP
1128 | WRITE_PROC_SECOND_READ_GP
1129 | WRITE_PROC_FIRST_WRITE_GP
1130 | WRITE_PROC_SECOND_WRITE_GP
1131 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
1132 | WRITE_PROC_FIRST_MB,
1133 WRITE_PROC_SECOND_MB) ->
1134 goto smp_mb_send4;
1135 smp_mb_send4_end:
1136 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
1137
1138 :: CONSUME_TOKENS(proc_urcu_writer,
1139 WRITE_XCHG_PTR
1140 | WRITE_PROC_FIRST_WAIT
1141 | WRITE_PROC_SECOND_WAIT
1142 | WRITE_PROC_WMB /* No dependency on
1143 * WRITE_DATA because we
1144 * write to a
1145 * different location. */
1146 | WRITE_PROC_SECOND_MB
1147 | WRITE_PROC_FIRST_MB,
1148 WRITE_FREE) ->
1149 WRITE_CACHED_VAR(rcu_data[old_data], POISON);
1150 PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
1151
1152 :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
1153 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
1154 break;
1155 fi;
1156 }
1157 od;
1158 /*
1159 * Note : Promela model adds implicit serialization of the
1160 * WRITE_FREE instruction. Normally, it would be permitted to
1161 * spill on the next loop execution. Given the validation we do
1162 * checks for the data entry read to be poisoned, it's ok if
1163 * we do not check "late arriving" memory poisoning.
1164 */
1165 :: else -> break;
1166 od;
1167 /*
1168 * Given the reader loops infinitely, let the writer also busy-loop
1169 * with progress here so, with weak fairness, we can test the
1170 * writer's progress.
1171 */
1172 end_writer:
1173 do
1174 :: 1 ->
1175 #ifdef WRITER_PROGRESS
1176 progress_writer2:
1177 #endif
1178 #ifdef READER_PROGRESS
1179 /*
1180 * Make sure we don't block the reader's progress.
1181 */
1182 smp_mb_send(i, j, 5);
1183 #endif
1184 skip;
1185 od;
1186
1187 /* Non-atomic parts of the loop */
1188 goto end;
1189 smp_mb_send1:
1190 smp_mb_send(i, j, 1);
1191 goto smp_mb_send1_end;
1192 #ifndef GEN_ERROR_WRITER_PROGRESS
1193 smp_mb_send2:
1194 smp_mb_send(i, j, 2);
1195 goto smp_mb_send2_end;
1196 smp_mb_send3:
1197 smp_mb_send(i, j, 3);
1198 goto smp_mb_send3_end;
1199 #endif
1200 smp_mb_send4:
1201 smp_mb_send(i, j, 4);
1202 goto smp_mb_send4_end;
1203 end:
1204 skip;
1205 }
1206
1207 /* no name clash please */
1208 #undef proc_urcu_writer
1209
1210
1211 /* Leave after the readers and writers so the pid count is ok. */
1212 init {
1213 byte i, j;
1214
1215 atomic {
1216 INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
1217 INIT_CACHED_VAR(rcu_ptr, 0, j);
1218
1219 i = 0;
1220 do
1221 :: i < NR_READERS ->
1222 INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
1223 ptr_read_first[i] = 1;
1224 ptr_read_second[i] = 1;
1225 data_read_first[i] = WINE;
1226 data_read_second[i] = WINE;
1227 i++;
1228 :: i >= NR_READERS -> break
1229 od;
1230 INIT_CACHED_VAR(rcu_data[0], WINE, j);
1231 i = 1;
1232 do
1233 :: i < SLAB_SIZE ->
1234 INIT_CACHED_VAR(rcu_data[i], POISON, j);
1235 i++
1236 :: i >= SLAB_SIZE -> break
1237 od;
1238
1239 init_done = 1;
1240 }
1241 }
This page took 0.061104 seconds and 4 git commands to generate.