Add phase 3 : scalability run
[urcu.git] / formal-model / results / urcu-controldataflow-no-ipi / result-ipi-urcu_free / urcu_progress_writer_error.spin.input
1 #define WRITER_PROGRESS
2 #define GEN_ERROR_WRITER_PROGRESS
3
4 // Poison value for freed memory
5 #define POISON 1
6 // Memory with correct data
7 #define WINE 0
8 #define SLAB_SIZE 2
9
10 #define read_poison (data_read_first[0] == POISON || data_read_second[0] == POISON)
11
12 #define RCU_GP_CTR_BIT (1 << 7)
13 #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
14
15 //disabled
16 #define REMOTE_BARRIERS
17 /*
18 * mem.spin: Promela code to validate memory barriers with OOO memory
19 * and out-of-order instruction scheduling.
20 *
21 * This program is free software; you can redistribute it and/or modify
22 * it under the terms of the GNU General Public License as published by
23 * the Free Software Foundation; either version 2 of the License, or
24 * (at your option) any later version.
25 *
26 * This program is distributed in the hope that it will be useful,
27 * but WITHOUT ANY WARRANTY; without even the implied warranty of
28 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
29 * GNU General Public License for more details.
30 *
31 * You should have received a copy of the GNU General Public License
32 * along with this program; if not, write to the Free Software
33 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
34 *
35 * Copyright (c) 2009 Mathieu Desnoyers
36 */
37
38 /* Promela validation variables. */
39
40 /* specific defines "included" here */
41 /* DEFINES file "included" here */
42
43 #define NR_READERS 1
44 #define NR_WRITERS 1
45
46 #define NR_PROCS 2
47
48 #define get_pid() (_pid)
49
50 #define get_readerid() (get_pid())
51
52 /*
53 * Produced process control and data flow. Updated after each instruction to
54 * show which variables are ready. Using one-hot bit encoding per variable to
55 * save state space. Used as triggers to execute the instructions having those
56 * variables as input. Leaving bits active to inhibit instruction execution.
57 * Scheme used to make instruction disabling and automatic dependency fall-back
58 * automatic.
59 */
60
61 #define CONSUME_TOKENS(state, bits, notbits) \
62 ((!(state & (notbits))) && (state & (bits)) == (bits))
63
64 #define PRODUCE_TOKENS(state, bits) \
65 state = state | (bits);
66
67 #define CLEAR_TOKENS(state, bits) \
68 state = state & ~(bits)
69
70 /*
71 * Types of dependency :
72 *
73 * Data dependency
74 *
75 * - True dependency, Read-after-Write (RAW)
76 *
77 * This type of dependency happens when a statement depends on the result of a
78 * previous statement. This applies to any statement which needs to read a
79 * variable written by a preceding statement.
80 *
81 * - False dependency, Write-after-Read (WAR)
82 *
83 * Typically, variable renaming can ensure that this dependency goes away.
84 * However, if the statements must read and then write from/to the same variable
85 * in the OOO memory model, renaming may be impossible, and therefore this
86 * causes a WAR dependency.
87 *
88 * - Output dependency, Write-after-Write (WAW)
89 *
90 * Two writes to the same variable in subsequent statements. Variable renaming
91 * can ensure this is not needed, but can be required when writing multiple
92 * times to the same OOO mem model variable.
93 *
94 * Control dependency
95 *
96 * Execution of a given instruction depends on a previous instruction evaluating
97 * in a way that allows its execution. E.g. : branches.
98 *
99 * Useful considerations for joining dependencies after branch
100 *
101 * - Pre-dominance
102 *
103 * "We say box i dominates box j if every path (leading from input to output
104 * through the diagram) which passes through box j must also pass through box
105 * i. Thus box i dominates box j if box j is subordinate to box i in the
106 * program."
107 *
108 * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
109 * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
110 *
111 * - Post-dominance
112 *
113 * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
114 * output exchanged. Therefore, i post-dominating j ensures that every path
115 * passing by j will pass by i before reaching the output.
116 *
117 * Other considerations
118 *
119 * Note about "volatile" keyword dependency : The compiler will order volatile
120 * accesses so they appear in the right order on a given CPU. They can be
121 * reordered by the CPU instruction scheduling. This therefore cannot be
122 * considered as a depencency.
123 *
124 * References :
125 *
126 * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
127 * Kaufmann. ISBN 1-55860-698-X.
128 * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
129 * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
130 * 1-55860-286-0.
131 * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
132 * Morgan Kaufmann. ISBN 1-55860-320-4.
133 */
134
135 /*
136 * Note about loops and nested calls
137 *
138 * To keep this model simple, loops expressed in the framework will behave as if
139 * there was a core synchronizing instruction between loops. To see the effect
140 * of loop unrolling, manually unrolling loops is required. Note that if loops
141 * end or start with a core synchronizing instruction, the model is appropriate.
142 * Nested calls are not supported.
143 */
144
145 /*
146 * Each process have its own data in cache. Caches are randomly updated.
147 * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
148 * both.
149 */
150
151 typedef per_proc_byte {
152 byte val[NR_PROCS];
153 };
154
155 typedef per_proc_bit {
156 bit val[NR_PROCS];
157 };
158
159 /* Bitfield has a maximum of 8 procs */
160 typedef per_proc_bitfield {
161 byte bitfield;
162 };
163
164 #define DECLARE_CACHED_VAR(type, x) \
165 type mem_##x; \
166 per_proc_##type cached_##x; \
167 per_proc_bitfield cache_dirty_##x;
168
169 #define INIT_CACHED_VAR(x, v, j) \
170 mem_##x = v; \
171 cache_dirty_##x.bitfield = 0; \
172 j = 0; \
173 do \
174 :: j < NR_PROCS -> \
175 cached_##x.val[j] = v; \
176 j++ \
177 :: j >= NR_PROCS -> break \
178 od;
179
180 #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x.bitfield & (1 << id))
181
182 #define READ_CACHED_VAR(x) (cached_##x.val[get_pid()])
183
184 #define WRITE_CACHED_VAR(x, v) \
185 atomic { \
186 cached_##x.val[get_pid()] = v; \
187 cache_dirty_##x.bitfield = \
188 cache_dirty_##x.bitfield | (1 << get_pid()); \
189 }
190
191 #define CACHE_WRITE_TO_MEM(x, id) \
192 if \
193 :: IS_CACHE_DIRTY(x, id) -> \
194 mem_##x = cached_##x.val[id]; \
195 cache_dirty_##x.bitfield = \
196 cache_dirty_##x.bitfield & (~(1 << id)); \
197 :: else -> \
198 skip \
199 fi;
200
201 #define CACHE_READ_FROM_MEM(x, id) \
202 if \
203 :: !IS_CACHE_DIRTY(x, id) -> \
204 cached_##x.val[id] = mem_##x;\
205 :: else -> \
206 skip \
207 fi;
208
209 /*
210 * May update other caches if cache is dirty, or not.
211 */
212 #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
213 if \
214 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
215 :: 1 -> skip \
216 fi;
217
218 #define RANDOM_CACHE_READ_FROM_MEM(x, id)\
219 if \
220 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
221 :: 1 -> skip \
222 fi;
223
224 /* Must consume all prior read tokens. All subsequent reads depend on it. */
225 inline smp_rmb(i, j)
226 {
227 atomic {
228 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
229 i = 0;
230 do
231 :: i < NR_READERS ->
232 CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
233 i++
234 :: i >= NR_READERS -> break
235 od;
236 CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
237 i = 0;
238 do
239 :: i < SLAB_SIZE ->
240 CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
241 i++
242 :: i >= SLAB_SIZE -> break
243 od;
244 }
245 }
246
247 /* Must consume all prior write tokens. All subsequent writes depend on it. */
248 inline smp_wmb(i, j)
249 {
250 atomic {
251 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
252 i = 0;
253 do
254 :: i < NR_READERS ->
255 CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
256 i++
257 :: i >= NR_READERS -> break
258 od;
259 CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
260 i = 0;
261 do
262 :: i < SLAB_SIZE ->
263 CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
264 i++
265 :: i >= SLAB_SIZE -> break
266 od;
267 }
268 }
269
270 /* Synchronization point. Must consume all prior read and write tokens. All
271 * subsequent reads and writes depend on it. */
272 inline smp_mb(i, j)
273 {
274 atomic {
275 smp_wmb(i, j);
276 smp_rmb(i, j);
277 }
278 }
279
280 #ifdef REMOTE_BARRIERS
281
282 bit reader_barrier[NR_READERS];
283
284 /*
285 * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
286 * because they would add unexisting core synchronization and would therefore
287 * create an incomplete model.
288 * Therefore, we model the read-side memory barriers by completely disabling the
289 * memory barriers and their dependencies from the read-side. One at a time
290 * (different verification runs), we make a different instruction listen for
291 * signals.
292 */
293
294 #define smp_mb_reader(i, j)
295
296 /*
297 * Service 0, 1 or many barrier requests.
298 */
299 inline smp_mb_recv(i, j)
300 {
301 do
302 :: (reader_barrier[get_readerid()] == 1) ->
303 smp_mb(i, j);
304 reader_barrier[get_readerid()] = 0;
305 :: 1 ->
306 /* We choose to ignore writer's non-progress caused from the
307 * reader ignoring the writer's mb() requests */
308 #ifdef WRITER_PROGRESS
309 progress_writer_from_reader:
310 #endif
311 break;
312 od;
313 }
314
315 #ifdef WRITER_PROGRESS
316 //#define PROGRESS_LABEL(progressid) progress_writer_progid_##progressid:
317 #define PROGRESS_LABEL(progressid)
318 #else
319 #define PROGRESS_LABEL(progressid)
320 #endif
321
322 #define smp_mb_send(i, j, progressid) \
323 { \
324 smp_mb(i, j); \
325 i = 0; \
326 do \
327 :: i < NR_READERS -> \
328 reader_barrier[i] = 1; \
329 /* \
330 * Busy-looping waiting for reader barrier handling is of little\
331 * interest, given the reader has the ability to totally ignore \
332 * barrier requests. \
333 */ \
334 PROGRESS_LABEL(progressid) \
335 do \
336 :: (reader_barrier[i] == 1) -> skip; \
337 :: (reader_barrier[i] == 0) -> break; \
338 od; \
339 i++; \
340 :: i >= NR_READERS -> \
341 break \
342 od; \
343 smp_mb(i, j); \
344 }
345
346 #else
347
348 #define smp_mb_send(i, j, progressid) smp_mb(i, j)
349 #define smp_mb_reader smp_mb
350 #define smp_mb_recv(i, j)
351
352 #endif
353
354 /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
355 DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
356 /* Note ! currently only one reader */
357 DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
358 /* RCU data */
359 DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
360
361 /* RCU pointer */
362 #if (SLAB_SIZE == 2)
363 DECLARE_CACHED_VAR(bit, rcu_ptr);
364 bit ptr_read_first[NR_READERS];
365 bit ptr_read_second[NR_READERS];
366 #else
367 DECLARE_CACHED_VAR(byte, rcu_ptr);
368 byte ptr_read_first[NR_READERS];
369 byte ptr_read_second[NR_READERS];
370 #endif
371
372 bit data_read_first[NR_READERS];
373 bit data_read_second[NR_READERS];
374
375 bit init_done = 0;
376
377 inline wait_init_done()
378 {
379 do
380 :: init_done == 0 -> skip;
381 :: else -> break;
382 od;
383 }
384
385 inline ooo_mem(i)
386 {
387 atomic {
388 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
389 i = 0;
390 do
391 :: i < NR_READERS ->
392 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
393 get_pid());
394 i++
395 :: i >= NR_READERS -> break
396 od;
397 RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
398 i = 0;
399 do
400 :: i < SLAB_SIZE ->
401 RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
402 i++
403 :: i >= SLAB_SIZE -> break
404 od;
405 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
406 i = 0;
407 do
408 :: i < NR_READERS ->
409 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
410 get_pid());
411 i++
412 :: i >= NR_READERS -> break
413 od;
414 RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
415 i = 0;
416 do
417 :: i < SLAB_SIZE ->
418 RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
419 i++
420 :: i >= SLAB_SIZE -> break
421 od;
422 }
423 }
424
425 /*
426 * Bit encoding, urcu_reader :
427 */
428
429 int _proc_urcu_reader;
430 #define proc_urcu_reader _proc_urcu_reader
431
432 /* Body of PROCEDURE_READ_LOCK */
433 #define READ_PROD_A_READ (1 << 0)
434 #define READ_PROD_B_IF_TRUE (1 << 1)
435 #define READ_PROD_B_IF_FALSE (1 << 2)
436 #define READ_PROD_C_IF_TRUE_READ (1 << 3)
437
438 #define PROCEDURE_READ_LOCK(base, consumetoken, producetoken) \
439 :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, READ_PROD_A_READ << base) -> \
440 ooo_mem(i); \
441 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]); \
442 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base); \
443 :: CONSUME_TOKENS(proc_urcu_reader, \
444 READ_PROD_A_READ << base, /* RAW, pre-dominant */ \
445 (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) -> \
446 if \
447 :: (!(tmp & RCU_GP_CTR_NEST_MASK)) -> \
448 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base); \
449 :: else -> \
450 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
451 fi; \
452 /* IF TRUE */ \
453 :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base, \
454 READ_PROD_C_IF_TRUE_READ << base) -> \
455 ooo_mem(i); \
456 tmp2 = READ_CACHED_VAR(urcu_gp_ctr); \
457 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base); \
458 :: CONSUME_TOKENS(proc_urcu_reader, \
459 (READ_PROD_C_IF_TRUE_READ /* pre-dominant */ \
460 | READ_PROD_A_READ) << base, /* WAR */ \
461 producetoken) -> \
462 ooo_mem(i); \
463 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2); \
464 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
465 /* IF_MERGE implies \
466 * post-dominance */ \
467 /* ELSE */ \
468 :: CONSUME_TOKENS(proc_urcu_reader, \
469 (READ_PROD_B_IF_FALSE /* pre-dominant */ \
470 | READ_PROD_A_READ) << base, /* WAR */ \
471 producetoken) -> \
472 ooo_mem(i); \
473 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], \
474 tmp + 1); \
475 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
476 /* IF_MERGE implies \
477 * post-dominance */ \
478 /* ENDIF */ \
479 skip
480
481 /* Body of PROCEDURE_READ_LOCK */
482 #define READ_PROC_READ_UNLOCK (1 << 0)
483
484 #define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken) \
485 :: CONSUME_TOKENS(proc_urcu_reader, \
486 consumetoken, \
487 READ_PROC_READ_UNLOCK << base) -> \
488 ooo_mem(i); \
489 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]); \
490 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base); \
491 :: CONSUME_TOKENS(proc_urcu_reader, \
492 consumetoken \
493 | (READ_PROC_READ_UNLOCK << base), /* WAR */ \
494 producetoken) -> \
495 ooo_mem(i); \
496 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1); \
497 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
498 skip
499
500
501 #define READ_PROD_NONE (1 << 0)
502
503 /* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
504 #define READ_LOCK_BASE 1
505 #define READ_LOCK_OUT (1 << 5)
506
507 #define READ_PROC_FIRST_MB (1 << 6)
508
509 /* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
510 #define READ_LOCK_NESTED_BASE 7
511 #define READ_LOCK_NESTED_OUT (1 << 11)
512
513 #define READ_PROC_READ_GEN (1 << 12)
514 #define READ_PROC_ACCESS_GEN (1 << 13)
515
516 /* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
517 #define READ_UNLOCK_NESTED_BASE 14
518 #define READ_UNLOCK_NESTED_OUT (1 << 15)
519
520 #define READ_PROC_SECOND_MB (1 << 16)
521
522 /* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
523 #define READ_UNLOCK_BASE 17
524 #define READ_UNLOCK_OUT (1 << 18)
525
526 /* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
527 #define READ_LOCK_UNROLL_BASE 19
528 #define READ_LOCK_OUT_UNROLL (1 << 23)
529
530 #define READ_PROC_THIRD_MB (1 << 24)
531
532 #define READ_PROC_READ_GEN_UNROLL (1 << 25)
533 #define READ_PROC_ACCESS_GEN_UNROLL (1 << 26)
534
535 #define READ_PROC_FOURTH_MB (1 << 27)
536
537 /* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
538 #define READ_UNLOCK_UNROLL_BASE 28
539 #define READ_UNLOCK_OUT_UNROLL (1 << 29)
540
541
542 /* Should not include branches */
543 #define READ_PROC_ALL_TOKENS (READ_PROD_NONE \
544 | READ_LOCK_OUT \
545 | READ_PROC_FIRST_MB \
546 | READ_LOCK_NESTED_OUT \
547 | READ_PROC_READ_GEN \
548 | READ_PROC_ACCESS_GEN \
549 | READ_UNLOCK_NESTED_OUT \
550 | READ_PROC_SECOND_MB \
551 | READ_UNLOCK_OUT \
552 | READ_LOCK_OUT_UNROLL \
553 | READ_PROC_THIRD_MB \
554 | READ_PROC_READ_GEN_UNROLL \
555 | READ_PROC_ACCESS_GEN_UNROLL \
556 | READ_PROC_FOURTH_MB \
557 | READ_UNLOCK_OUT_UNROLL)
558
559 /* Must clear all tokens, including branches */
560 #define READ_PROC_ALL_TOKENS_CLEAR ((1 << 30) - 1)
561
562 inline urcu_one_read(i, j, nest_i, tmp, tmp2)
563 {
564 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
565
566 #ifdef NO_MB
567 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
568 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
569 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
570 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
571 #endif
572
573 #ifdef REMOTE_BARRIERS
574 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
575 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
576 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
577 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
578 #endif
579
580 do
581 :: 1 ->
582
583 #ifdef REMOTE_BARRIERS
584 /*
585 * Signal-based memory barrier will only execute when the
586 * execution order appears in program order.
587 */
588 if
589 :: 1 ->
590 atomic {
591 if
592 :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
593 READ_LOCK_OUT | READ_LOCK_NESTED_OUT
594 | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
595 | READ_UNLOCK_OUT
596 | READ_LOCK_OUT_UNROLL
597 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
598 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
599 READ_LOCK_NESTED_OUT
600 | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
601 | READ_UNLOCK_OUT
602 | READ_LOCK_OUT_UNROLL
603 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
604 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
605 READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
606 | READ_UNLOCK_OUT
607 | READ_LOCK_OUT_UNROLL
608 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
609 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
610 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
611 READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
612 | READ_UNLOCK_OUT
613 | READ_LOCK_OUT_UNROLL
614 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
615 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
616 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
617 READ_UNLOCK_NESTED_OUT
618 | READ_UNLOCK_OUT
619 | READ_LOCK_OUT_UNROLL
620 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
621 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
622 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
623 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
624 READ_UNLOCK_OUT
625 | READ_LOCK_OUT_UNROLL
626 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
627 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
628 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
629 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
630 | READ_UNLOCK_OUT,
631 READ_LOCK_OUT_UNROLL
632 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
633 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
634 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
635 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
636 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
637 READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
638 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
639 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
640 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
641 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
642 | READ_PROC_READ_GEN_UNROLL,
643 READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
644 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
645 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
646 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
647 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
648 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
649 READ_UNLOCK_OUT_UNROLL)
650 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
651 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
652 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
653 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
654 0) ->
655 goto non_atomic3;
656 non_atomic3_end:
657 skip;
658 fi;
659 }
660 :: 1 -> skip;
661 fi;
662
663 goto non_atomic3_skip;
664 non_atomic3:
665 smp_mb_recv(i, j);
666 goto non_atomic3_end;
667 non_atomic3_skip:
668
669 #endif /* REMOTE_BARRIERS */
670
671 atomic {
672 if
673 PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, READ_LOCK_OUT);
674
675 :: CONSUME_TOKENS(proc_urcu_reader,
676 READ_LOCK_OUT, /* post-dominant */
677 READ_PROC_FIRST_MB) ->
678 smp_mb_reader(i, j);
679 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
680
681 PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB | READ_LOCK_OUT,
682 READ_LOCK_NESTED_OUT);
683
684 :: CONSUME_TOKENS(proc_urcu_reader,
685 READ_PROC_FIRST_MB, /* mb() orders reads */
686 READ_PROC_READ_GEN) ->
687 ooo_mem(i);
688 ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
689 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
690
691 :: CONSUME_TOKENS(proc_urcu_reader,
692 READ_PROC_FIRST_MB /* mb() orders reads */
693 | READ_PROC_READ_GEN,
694 READ_PROC_ACCESS_GEN) ->
695 /* smp_read_barrier_depends */
696 goto rmb1;
697 rmb1_end:
698 data_read_first[get_readerid()] =
699 READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
700 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
701
702
703 /* Note : we remove the nested memory barrier from the read unlock
704 * model, given it is not usually needed. The implementation has the barrier
705 * because the performance impact added by a branch in the common case does not
706 * justify it.
707 */
708
709 PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
710 READ_PROC_FIRST_MB
711 | READ_LOCK_OUT
712 | READ_LOCK_NESTED_OUT,
713 READ_UNLOCK_NESTED_OUT);
714
715
716 :: CONSUME_TOKENS(proc_urcu_reader,
717 READ_PROC_ACCESS_GEN /* mb() orders reads */
718 | READ_PROC_READ_GEN /* mb() orders reads */
719 | READ_PROC_FIRST_MB /* mb() ordered */
720 | READ_LOCK_OUT /* post-dominant */
721 | READ_LOCK_NESTED_OUT /* post-dominant */
722 | READ_UNLOCK_NESTED_OUT,
723 READ_PROC_SECOND_MB) ->
724 smp_mb_reader(i, j);
725 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
726
727 PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
728 READ_PROC_SECOND_MB /* mb() orders reads */
729 | READ_PROC_FIRST_MB /* mb() orders reads */
730 | READ_LOCK_NESTED_OUT /* RAW */
731 | READ_LOCK_OUT /* RAW */
732 | READ_UNLOCK_NESTED_OUT, /* RAW */
733 READ_UNLOCK_OUT);
734
735 /* Unrolling loop : second consecutive lock */
736 /* reading urcu_active_readers, which have been written by
737 * READ_UNLOCK_OUT : RAW */
738 PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
739 READ_UNLOCK_OUT /* RAW */
740 | READ_PROC_SECOND_MB /* mb() orders reads */
741 | READ_PROC_FIRST_MB /* mb() orders reads */
742 | READ_LOCK_NESTED_OUT /* RAW */
743 | READ_LOCK_OUT /* RAW */
744 | READ_UNLOCK_NESTED_OUT, /* RAW */
745 READ_LOCK_OUT_UNROLL);
746
747
748 :: CONSUME_TOKENS(proc_urcu_reader,
749 READ_PROC_FIRST_MB /* mb() ordered */
750 | READ_PROC_SECOND_MB /* mb() ordered */
751 | READ_LOCK_OUT_UNROLL /* post-dominant */
752 | READ_LOCK_NESTED_OUT
753 | READ_LOCK_OUT
754 | READ_UNLOCK_NESTED_OUT
755 | READ_UNLOCK_OUT,
756 READ_PROC_THIRD_MB) ->
757 smp_mb_reader(i, j);
758 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
759
760 :: CONSUME_TOKENS(proc_urcu_reader,
761 READ_PROC_FIRST_MB /* mb() orders reads */
762 | READ_PROC_SECOND_MB /* mb() orders reads */
763 | READ_PROC_THIRD_MB, /* mb() orders reads */
764 READ_PROC_READ_GEN_UNROLL) ->
765 ooo_mem(i);
766 ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
767 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
768
769 :: CONSUME_TOKENS(proc_urcu_reader,
770 READ_PROC_READ_GEN_UNROLL
771 | READ_PROC_FIRST_MB /* mb() orders reads */
772 | READ_PROC_SECOND_MB /* mb() orders reads */
773 | READ_PROC_THIRD_MB, /* mb() orders reads */
774 READ_PROC_ACCESS_GEN_UNROLL) ->
775 /* smp_read_barrier_depends */
776 goto rmb2;
777 rmb2_end:
778 data_read_second[get_readerid()] =
779 READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
780 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
781
782 :: CONSUME_TOKENS(proc_urcu_reader,
783 READ_PROC_READ_GEN_UNROLL /* mb() orders reads */
784 | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
785 | READ_PROC_FIRST_MB /* mb() ordered */
786 | READ_PROC_SECOND_MB /* mb() ordered */
787 | READ_PROC_THIRD_MB /* mb() ordered */
788 | READ_LOCK_OUT_UNROLL /* post-dominant */
789 | READ_LOCK_NESTED_OUT
790 | READ_LOCK_OUT
791 | READ_UNLOCK_NESTED_OUT
792 | READ_UNLOCK_OUT,
793 READ_PROC_FOURTH_MB) ->
794 smp_mb_reader(i, j);
795 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
796
797 PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
798 READ_PROC_FOURTH_MB /* mb() orders reads */
799 | READ_PROC_THIRD_MB /* mb() orders reads */
800 | READ_LOCK_OUT_UNROLL /* RAW */
801 | READ_PROC_SECOND_MB /* mb() orders reads */
802 | READ_PROC_FIRST_MB /* mb() orders reads */
803 | READ_LOCK_NESTED_OUT /* RAW */
804 | READ_LOCK_OUT /* RAW */
805 | READ_UNLOCK_NESTED_OUT, /* RAW */
806 READ_UNLOCK_OUT_UNROLL);
807 :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
808 CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
809 break;
810 fi;
811 }
812 od;
813 /*
814 * Dependency between consecutive loops :
815 * RAW dependency on
816 * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
817 * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
818 * between loops.
819 * _WHEN THE MB()s are in place_, they add full ordering of the
820 * generation pointer read wrt active reader count read, which ensures
821 * execution will not spill across loop execution.
822 * However, in the event mb()s are removed (execution using signal
823 * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
824 * to spill its execution on other loop's execution.
825 */
826 goto end;
827 rmb1:
828 #ifndef NO_RMB
829 smp_rmb(i, j);
830 #else
831 ooo_mem(i);
832 #endif
833 goto rmb1_end;
834 rmb2:
835 #ifndef NO_RMB
836 smp_rmb(i, j);
837 #else
838 ooo_mem(i);
839 #endif
840 goto rmb2_end;
841 end:
842 skip;
843 }
844
845
846
847 active proctype urcu_reader()
848 {
849 byte i, j, nest_i;
850 byte tmp, tmp2;
851
852 wait_init_done();
853
854 assert(get_pid() < NR_PROCS);
855
856 end_reader:
857 do
858 :: 1 ->
859 /*
860 * We do not test reader's progress here, because we are mainly
861 * interested in writer's progress. The reader never blocks
862 * anyway. We have to test for reader/writer's progress
863 * separately, otherwise we could think the writer is doing
864 * progress when it's blocked by an always progressing reader.
865 */
866 #ifdef READER_PROGRESS
867 progress_reader:
868 #endif
869 urcu_one_read(i, j, nest_i, tmp, tmp2);
870 od;
871 }
872
873 /* no name clash please */
874 #undef proc_urcu_reader
875
876
877 /* Model the RCU update process. */
878
879 /*
880 * Bit encoding, urcu_writer :
881 * Currently only supports one reader.
882 */
883
884 int _proc_urcu_writer;
885 #define proc_urcu_writer _proc_urcu_writer
886
887 #define WRITE_PROD_NONE (1 << 0)
888
889 #define WRITE_DATA (1 << 1)
890 #define WRITE_PROC_WMB (1 << 2)
891 #define WRITE_XCHG_PTR (1 << 3)
892
893 #define WRITE_PROC_FIRST_MB (1 << 4)
894
895 /* first flip */
896 #define WRITE_PROC_FIRST_READ_GP (1 << 5)
897 #define WRITE_PROC_FIRST_WRITE_GP (1 << 6)
898 #define WRITE_PROC_FIRST_WAIT (1 << 7)
899 #define WRITE_PROC_FIRST_WAIT_LOOP (1 << 8)
900
901 /* second flip */
902 #define WRITE_PROC_SECOND_READ_GP (1 << 9)
903 #define WRITE_PROC_SECOND_WRITE_GP (1 << 10)
904 #define WRITE_PROC_SECOND_WAIT (1 << 11)
905 #define WRITE_PROC_SECOND_WAIT_LOOP (1 << 12)
906
907 #define WRITE_PROC_SECOND_MB (1 << 13)
908
909 #define WRITE_FREE (1 << 14)
910
911 #define WRITE_PROC_ALL_TOKENS (WRITE_PROD_NONE \
912 | WRITE_DATA \
913 | WRITE_PROC_WMB \
914 | WRITE_XCHG_PTR \
915 | WRITE_PROC_FIRST_MB \
916 | WRITE_PROC_FIRST_READ_GP \
917 | WRITE_PROC_FIRST_WRITE_GP \
918 | WRITE_PROC_FIRST_WAIT \
919 | WRITE_PROC_SECOND_READ_GP \
920 | WRITE_PROC_SECOND_WRITE_GP \
921 | WRITE_PROC_SECOND_WAIT \
922 | WRITE_PROC_SECOND_MB \
923 | WRITE_FREE)
924
925 #define WRITE_PROC_ALL_TOKENS_CLEAR ((1 << 15) - 1)
926
927 /*
928 * Mutexes are implied around writer execution. A single writer at a time.
929 */
930 active proctype urcu_writer()
931 {
932 byte i, j;
933 byte tmp, tmp2, tmpa;
934 byte cur_data = 0, old_data, loop_nr = 0;
935 byte cur_gp_val = 0; /*
936 * Keep a local trace of the current parity so
937 * we don't add non-existing dependencies on the global
938 * GP update. Needed to test single flip case.
939 */
940
941 wait_init_done();
942
943 assert(get_pid() < NR_PROCS);
944
945 do
946 :: (loop_nr < 3) ->
947 #ifdef WRITER_PROGRESS
948 progress_writer1:
949 #endif
950 loop_nr = loop_nr + 1;
951
952 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
953
954 #ifdef NO_WMB
955 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
956 #endif
957
958 #ifdef NO_MB
959 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
960 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
961 #endif
962
963 #ifdef SINGLE_FLIP
964 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
965 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
966 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
967 /* For single flip, we need to know the current parity */
968 cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
969 #endif
970
971 do :: 1 ->
972 atomic {
973 if
974
975 :: CONSUME_TOKENS(proc_urcu_writer,
976 WRITE_PROD_NONE,
977 WRITE_DATA) ->
978 ooo_mem(i);
979 cur_data = (cur_data + 1) % SLAB_SIZE;
980 WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
981 PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
982
983
984 :: CONSUME_TOKENS(proc_urcu_writer,
985 WRITE_DATA,
986 WRITE_PROC_WMB) ->
987 smp_wmb(i, j);
988 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
989
990 :: CONSUME_TOKENS(proc_urcu_writer,
991 WRITE_PROC_WMB,
992 WRITE_XCHG_PTR) ->
993 /* rcu_xchg_pointer() */
994 atomic {
995 old_data = READ_CACHED_VAR(rcu_ptr);
996 WRITE_CACHED_VAR(rcu_ptr, cur_data);
997 }
998 PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
999
1000 :: CONSUME_TOKENS(proc_urcu_writer,
1001 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
1002 WRITE_PROC_FIRST_MB) ->
1003 goto smp_mb_send1;
1004 smp_mb_send1_end:
1005 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
1006
1007 /* first flip */
1008 :: CONSUME_TOKENS(proc_urcu_writer,
1009 WRITE_PROC_FIRST_MB,
1010 WRITE_PROC_FIRST_READ_GP) ->
1011 tmpa = READ_CACHED_VAR(urcu_gp_ctr);
1012 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
1013 :: CONSUME_TOKENS(proc_urcu_writer,
1014 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
1015 | WRITE_PROC_FIRST_READ_GP,
1016 WRITE_PROC_FIRST_WRITE_GP) ->
1017 ooo_mem(i);
1018 WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
1019 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
1020
1021 :: CONSUME_TOKENS(proc_urcu_writer,
1022 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1023 WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1024 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
1025 ooo_mem(i);
1026 /* ONLY WAITING FOR READER 0 */
1027 tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
1028 #ifndef SINGLE_FLIP
1029 /* In normal execution, we are always starting by
1030 * waiting for the even parity.
1031 */
1032 cur_gp_val = RCU_GP_CTR_BIT;
1033 #endif
1034 if
1035 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
1036 && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
1037 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
1038 :: else ->
1039 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
1040 fi;
1041
1042 :: CONSUME_TOKENS(proc_urcu_writer,
1043 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1044 WRITE_PROC_FIRST_WRITE_GP
1045 | WRITE_PROC_FIRST_READ_GP
1046 | WRITE_PROC_FIRST_WAIT_LOOP
1047 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
1048 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1049 0) ->
1050 #ifndef GEN_ERROR_WRITER_PROGRESS
1051 goto smp_mb_send2;
1052 smp_mb_send2_end:
1053 #else
1054 ooo_mem(i);
1055 #endif
1056 /* This instruction loops to WRITE_PROC_FIRST_WAIT */
1057 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
1058
1059 /* second flip */
1060 :: CONSUME_TOKENS(proc_urcu_writer,
1061 WRITE_PROC_FIRST_WAIT /* Control dependency : need to branch out of
1062 * the loop to execute the next flip (CHECK) */
1063 | WRITE_PROC_FIRST_WRITE_GP
1064 | WRITE_PROC_FIRST_READ_GP
1065 | WRITE_PROC_FIRST_MB,
1066 WRITE_PROC_SECOND_READ_GP) ->
1067 ooo_mem(i);
1068 tmpa = READ_CACHED_VAR(urcu_gp_ctr);
1069 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
1070 :: CONSUME_TOKENS(proc_urcu_writer,
1071 WRITE_PROC_FIRST_MB
1072 | WRITE_PROC_WMB
1073 | WRITE_PROC_FIRST_READ_GP
1074 | WRITE_PROC_FIRST_WRITE_GP
1075 | WRITE_PROC_SECOND_READ_GP,
1076 WRITE_PROC_SECOND_WRITE_GP) ->
1077 ooo_mem(i);
1078 WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
1079 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
1080
1081 :: CONSUME_TOKENS(proc_urcu_writer,
1082 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1083 WRITE_PROC_FIRST_WAIT
1084 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1085 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
1086 ooo_mem(i);
1087 /* ONLY WAITING FOR READER 0 */
1088 tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
1089 if
1090 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
1091 && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
1092 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
1093 :: else ->
1094 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
1095 fi;
1096
1097 :: CONSUME_TOKENS(proc_urcu_writer,
1098 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1099 WRITE_PROC_SECOND_WRITE_GP
1100 | WRITE_PROC_FIRST_WRITE_GP
1101 | WRITE_PROC_SECOND_READ_GP
1102 | WRITE_PROC_FIRST_READ_GP
1103 | WRITE_PROC_SECOND_WAIT_LOOP
1104 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
1105 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1106 0) ->
1107 #ifndef GEN_ERROR_WRITER_PROGRESS
1108 goto smp_mb_send3;
1109 smp_mb_send3_end:
1110 #else
1111 ooo_mem(i);
1112 #endif
1113 /* This instruction loops to WRITE_PROC_SECOND_WAIT */
1114 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
1115
1116
1117 :: CONSUME_TOKENS(proc_urcu_writer,
1118 WRITE_PROC_FIRST_WAIT
1119 | WRITE_PROC_SECOND_WAIT
1120 | WRITE_PROC_FIRST_READ_GP
1121 | WRITE_PROC_SECOND_READ_GP
1122 | WRITE_PROC_FIRST_WRITE_GP
1123 | WRITE_PROC_SECOND_WRITE_GP
1124 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
1125 | WRITE_PROC_FIRST_MB,
1126 WRITE_PROC_SECOND_MB) ->
1127 goto smp_mb_send4;
1128 smp_mb_send4_end:
1129 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
1130
1131 :: CONSUME_TOKENS(proc_urcu_writer,
1132 WRITE_XCHG_PTR
1133 | WRITE_PROC_FIRST_WAIT
1134 | WRITE_PROC_SECOND_WAIT
1135 | WRITE_PROC_WMB /* No dependency on
1136 * WRITE_DATA because we
1137 * write to a
1138 * different location. */
1139 | WRITE_PROC_SECOND_MB
1140 | WRITE_PROC_FIRST_MB,
1141 WRITE_FREE) ->
1142 WRITE_CACHED_VAR(rcu_data[old_data], POISON);
1143 PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
1144
1145 :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
1146 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
1147 break;
1148 fi;
1149 }
1150 od;
1151 /*
1152 * Note : Promela model adds implicit serialization of the
1153 * WRITE_FREE instruction. Normally, it would be permitted to
1154 * spill on the next loop execution. Given the validation we do
1155 * checks for the data entry read to be poisoned, it's ok if
1156 * we do not check "late arriving" memory poisoning.
1157 */
1158 :: else -> break;
1159 od;
1160 /*
1161 * Given the reader loops infinitely, let the writer also busy-loop
1162 * with progress here so, with weak fairness, we can test the
1163 * writer's progress.
1164 */
1165 end_writer:
1166 do
1167 :: 1 ->
1168 #ifdef WRITER_PROGRESS
1169 progress_writer2:
1170 #endif
1171 skip;
1172 od;
1173
1174 /* Non-atomic parts of the loop */
1175 goto end;
1176 smp_mb_send1:
1177 smp_mb_send(i, j, 1);
1178 goto smp_mb_send1_end;
1179 #ifndef GEN_ERROR_WRITER_PROGRESS
1180 smp_mb_send2:
1181 smp_mb_send(i, j, 2);
1182 goto smp_mb_send2_end;
1183 smp_mb_send3:
1184 smp_mb_send(i, j, 3);
1185 goto smp_mb_send3_end;
1186 #endif
1187 smp_mb_send4:
1188 smp_mb_send(i, j, 4);
1189 goto smp_mb_send4_end;
1190 end:
1191 skip;
1192 }
1193
1194 /* no name clash please */
1195 #undef proc_urcu_writer
1196
1197
1198 /* Leave after the readers and writers so the pid count is ok. */
1199 init {
1200 byte i, j;
1201
1202 atomic {
1203 INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
1204 INIT_CACHED_VAR(rcu_ptr, 0, j);
1205
1206 i = 0;
1207 do
1208 :: i < NR_READERS ->
1209 INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
1210 ptr_read_first[i] = 1;
1211 ptr_read_second[i] = 1;
1212 data_read_first[i] = WINE;
1213 data_read_second[i] = WINE;
1214 i++;
1215 :: i >= NR_READERS -> break
1216 od;
1217 INIT_CACHED_VAR(rcu_data[0], WINE, j);
1218 i = 1;
1219 do
1220 :: i < SLAB_SIZE ->
1221 INIT_CACHED_VAR(rcu_data[i], POISON, j);
1222 i++
1223 :: i >= SLAB_SIZE -> break
1224 od;
1225
1226 init_done = 1;
1227 }
1228 }
This page took 0.084149 seconds and 4 git commands to generate.