Add phase 3 : scalability run
[urcu.git] / formal-model / results / urcu-controldataflow-no-ipi / result-ipi-urcu_free / urcu_free_no_mb.spin.input
1 #define NO_MB
2
3 // Poison value for freed memory
4 #define POISON 1
5 // Memory with correct data
6 #define WINE 0
7 #define SLAB_SIZE 2
8
9 #define read_poison (data_read_first[0] == POISON || data_read_second[0] == POISON)
10
11 #define RCU_GP_CTR_BIT (1 << 7)
12 #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
13
14 //disabled
15 #define REMOTE_BARRIERS
16 /*
17 * mem.spin: Promela code to validate memory barriers with OOO memory
18 * and out-of-order instruction scheduling.
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
33 *
34 * Copyright (c) 2009 Mathieu Desnoyers
35 */
36
37 /* Promela validation variables. */
38
39 /* specific defines "included" here */
40 /* DEFINES file "included" here */
41
42 #define NR_READERS 1
43 #define NR_WRITERS 1
44
45 #define NR_PROCS 2
46
47 #define get_pid() (_pid)
48
49 #define get_readerid() (get_pid())
50
51 /*
52 * Produced process control and data flow. Updated after each instruction to
53 * show which variables are ready. Using one-hot bit encoding per variable to
54 * save state space. Used as triggers to execute the instructions having those
55 * variables as input. Leaving bits active to inhibit instruction execution.
56 * Scheme used to make instruction disabling and automatic dependency fall-back
57 * automatic.
58 */
59
60 #define CONSUME_TOKENS(state, bits, notbits) \
61 ((!(state & (notbits))) && (state & (bits)) == (bits))
62
63 #define PRODUCE_TOKENS(state, bits) \
64 state = state | (bits);
65
66 #define CLEAR_TOKENS(state, bits) \
67 state = state & ~(bits)
68
69 /*
70 * Types of dependency :
71 *
72 * Data dependency
73 *
74 * - True dependency, Read-after-Write (RAW)
75 *
76 * This type of dependency happens when a statement depends on the result of a
77 * previous statement. This applies to any statement which needs to read a
78 * variable written by a preceding statement.
79 *
80 * - False dependency, Write-after-Read (WAR)
81 *
82 * Typically, variable renaming can ensure that this dependency goes away.
83 * However, if the statements must read and then write from/to the same variable
84 * in the OOO memory model, renaming may be impossible, and therefore this
85 * causes a WAR dependency.
86 *
87 * - Output dependency, Write-after-Write (WAW)
88 *
89 * Two writes to the same variable in subsequent statements. Variable renaming
90 * can ensure this is not needed, but can be required when writing multiple
91 * times to the same OOO mem model variable.
92 *
93 * Control dependency
94 *
95 * Execution of a given instruction depends on a previous instruction evaluating
96 * in a way that allows its execution. E.g. : branches.
97 *
98 * Useful considerations for joining dependencies after branch
99 *
100 * - Pre-dominance
101 *
102 * "We say box i dominates box j if every path (leading from input to output
103 * through the diagram) which passes through box j must also pass through box
104 * i. Thus box i dominates box j if box j is subordinate to box i in the
105 * program."
106 *
107 * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
108 * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
109 *
110 * - Post-dominance
111 *
112 * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
113 * output exchanged. Therefore, i post-dominating j ensures that every path
114 * passing by j will pass by i before reaching the output.
115 *
116 * Other considerations
117 *
118 * Note about "volatile" keyword dependency : The compiler will order volatile
119 * accesses so they appear in the right order on a given CPU. They can be
120 * reordered by the CPU instruction scheduling. This therefore cannot be
121 * considered as a depencency.
122 *
123 * References :
124 *
125 * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
126 * Kaufmann. ISBN 1-55860-698-X.
127 * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
128 * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
129 * 1-55860-286-0.
130 * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
131 * Morgan Kaufmann. ISBN 1-55860-320-4.
132 */
133
134 /*
135 * Note about loops and nested calls
136 *
137 * To keep this model simple, loops expressed in the framework will behave as if
138 * there was a core synchronizing instruction between loops. To see the effect
139 * of loop unrolling, manually unrolling loops is required. Note that if loops
140 * end or start with a core synchronizing instruction, the model is appropriate.
141 * Nested calls are not supported.
142 */
143
144 /*
145 * Each process have its own data in cache. Caches are randomly updated.
146 * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
147 * both.
148 */
149
150 typedef per_proc_byte {
151 byte val[NR_PROCS];
152 };
153
154 typedef per_proc_bit {
155 bit val[NR_PROCS];
156 };
157
158 /* Bitfield has a maximum of 8 procs */
159 typedef per_proc_bitfield {
160 byte bitfield;
161 };
162
163 #define DECLARE_CACHED_VAR(type, x) \
164 type mem_##x; \
165 per_proc_##type cached_##x; \
166 per_proc_bitfield cache_dirty_##x;
167
168 #define INIT_CACHED_VAR(x, v, j) \
169 mem_##x = v; \
170 cache_dirty_##x.bitfield = 0; \
171 j = 0; \
172 do \
173 :: j < NR_PROCS -> \
174 cached_##x.val[j] = v; \
175 j++ \
176 :: j >= NR_PROCS -> break \
177 od;
178
179 #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x.bitfield & (1 << id))
180
181 #define READ_CACHED_VAR(x) (cached_##x.val[get_pid()])
182
183 #define WRITE_CACHED_VAR(x, v) \
184 atomic { \
185 cached_##x.val[get_pid()] = v; \
186 cache_dirty_##x.bitfield = \
187 cache_dirty_##x.bitfield | (1 << get_pid()); \
188 }
189
190 #define CACHE_WRITE_TO_MEM(x, id) \
191 if \
192 :: IS_CACHE_DIRTY(x, id) -> \
193 mem_##x = cached_##x.val[id]; \
194 cache_dirty_##x.bitfield = \
195 cache_dirty_##x.bitfield & (~(1 << id)); \
196 :: else -> \
197 skip \
198 fi;
199
200 #define CACHE_READ_FROM_MEM(x, id) \
201 if \
202 :: !IS_CACHE_DIRTY(x, id) -> \
203 cached_##x.val[id] = mem_##x;\
204 :: else -> \
205 skip \
206 fi;
207
208 /*
209 * May update other caches if cache is dirty, or not.
210 */
211 #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
212 if \
213 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
214 :: 1 -> skip \
215 fi;
216
217 #define RANDOM_CACHE_READ_FROM_MEM(x, id)\
218 if \
219 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
220 :: 1 -> skip \
221 fi;
222
223 /* Must consume all prior read tokens. All subsequent reads depend on it. */
224 inline smp_rmb(i, j)
225 {
226 atomic {
227 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
228 i = 0;
229 do
230 :: i < NR_READERS ->
231 CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
232 i++
233 :: i >= NR_READERS -> break
234 od;
235 CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
236 i = 0;
237 do
238 :: i < SLAB_SIZE ->
239 CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
240 i++
241 :: i >= SLAB_SIZE -> break
242 od;
243 }
244 }
245
246 /* Must consume all prior write tokens. All subsequent writes depend on it. */
247 inline smp_wmb(i, j)
248 {
249 atomic {
250 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
251 i = 0;
252 do
253 :: i < NR_READERS ->
254 CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
255 i++
256 :: i >= NR_READERS -> break
257 od;
258 CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
259 i = 0;
260 do
261 :: i < SLAB_SIZE ->
262 CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
263 i++
264 :: i >= SLAB_SIZE -> break
265 od;
266 }
267 }
268
269 /* Synchronization point. Must consume all prior read and write tokens. All
270 * subsequent reads and writes depend on it. */
271 inline smp_mb(i, j)
272 {
273 atomic {
274 smp_wmb(i, j);
275 smp_rmb(i, j);
276 }
277 }
278
279 #ifdef REMOTE_BARRIERS
280
281 bit reader_barrier[NR_READERS];
282
283 /*
284 * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
285 * because they would add unexisting core synchronization and would therefore
286 * create an incomplete model.
287 * Therefore, we model the read-side memory barriers by completely disabling the
288 * memory barriers and their dependencies from the read-side. One at a time
289 * (different verification runs), we make a different instruction listen for
290 * signals.
291 */
292
293 #define smp_mb_reader(i, j)
294
295 /*
296 * Service 0, 1 or many barrier requests.
297 */
298 inline smp_mb_recv(i, j)
299 {
300 do
301 :: (reader_barrier[get_readerid()] == 1) ->
302 smp_mb(i, j);
303 reader_barrier[get_readerid()] = 0;
304 :: 1 ->
305 /* We choose to ignore writer's non-progress caused from the
306 * reader ignoring the writer's mb() requests */
307 #ifdef WRITER_PROGRESS
308 progress_writer_from_reader:
309 #endif
310 break;
311 od;
312 }
313
314 #ifdef WRITER_PROGRESS
315 //#define PROGRESS_LABEL(progressid) progress_writer_progid_##progressid:
316 #define PROGRESS_LABEL(progressid)
317 #else
318 #define PROGRESS_LABEL(progressid)
319 #endif
320
321 #define smp_mb_send(i, j, progressid) \
322 { \
323 smp_mb(i, j); \
324 i = 0; \
325 do \
326 :: i < NR_READERS -> \
327 reader_barrier[i] = 1; \
328 /* \
329 * Busy-looping waiting for reader barrier handling is of little\
330 * interest, given the reader has the ability to totally ignore \
331 * barrier requests. \
332 */ \
333 PROGRESS_LABEL(progressid) \
334 do \
335 :: (reader_barrier[i] == 1) -> skip; \
336 :: (reader_barrier[i] == 0) -> break; \
337 od; \
338 i++; \
339 :: i >= NR_READERS -> \
340 break \
341 od; \
342 smp_mb(i, j); \
343 }
344
345 #else
346
347 #define smp_mb_send(i, j, progressid) smp_mb(i, j)
348 #define smp_mb_reader smp_mb
349 #define smp_mb_recv(i, j)
350
351 #endif
352
353 /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
354 DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
355 /* Note ! currently only one reader */
356 DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
357 /* RCU data */
358 DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
359
360 /* RCU pointer */
361 #if (SLAB_SIZE == 2)
362 DECLARE_CACHED_VAR(bit, rcu_ptr);
363 bit ptr_read_first[NR_READERS];
364 bit ptr_read_second[NR_READERS];
365 #else
366 DECLARE_CACHED_VAR(byte, rcu_ptr);
367 byte ptr_read_first[NR_READERS];
368 byte ptr_read_second[NR_READERS];
369 #endif
370
371 bit data_read_first[NR_READERS];
372 bit data_read_second[NR_READERS];
373
374 bit init_done = 0;
375
376 inline wait_init_done()
377 {
378 do
379 :: init_done == 0 -> skip;
380 :: else -> break;
381 od;
382 }
383
384 inline ooo_mem(i)
385 {
386 atomic {
387 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
388 i = 0;
389 do
390 :: i < NR_READERS ->
391 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
392 get_pid());
393 i++
394 :: i >= NR_READERS -> break
395 od;
396 RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
397 i = 0;
398 do
399 :: i < SLAB_SIZE ->
400 RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
401 i++
402 :: i >= SLAB_SIZE -> break
403 od;
404 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
405 i = 0;
406 do
407 :: i < NR_READERS ->
408 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
409 get_pid());
410 i++
411 :: i >= NR_READERS -> break
412 od;
413 RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
414 i = 0;
415 do
416 :: i < SLAB_SIZE ->
417 RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
418 i++
419 :: i >= SLAB_SIZE -> break
420 od;
421 }
422 }
423
424 /*
425 * Bit encoding, urcu_reader :
426 */
427
428 int _proc_urcu_reader;
429 #define proc_urcu_reader _proc_urcu_reader
430
431 /* Body of PROCEDURE_READ_LOCK */
432 #define READ_PROD_A_READ (1 << 0)
433 #define READ_PROD_B_IF_TRUE (1 << 1)
434 #define READ_PROD_B_IF_FALSE (1 << 2)
435 #define READ_PROD_C_IF_TRUE_READ (1 << 3)
436
437 #define PROCEDURE_READ_LOCK(base, consumetoken, producetoken) \
438 :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, READ_PROD_A_READ << base) -> \
439 ooo_mem(i); \
440 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]); \
441 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base); \
442 :: CONSUME_TOKENS(proc_urcu_reader, \
443 READ_PROD_A_READ << base, /* RAW, pre-dominant */ \
444 (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) -> \
445 if \
446 :: (!(tmp & RCU_GP_CTR_NEST_MASK)) -> \
447 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base); \
448 :: else -> \
449 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
450 fi; \
451 /* IF TRUE */ \
452 :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base, \
453 READ_PROD_C_IF_TRUE_READ << base) -> \
454 ooo_mem(i); \
455 tmp2 = READ_CACHED_VAR(urcu_gp_ctr); \
456 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base); \
457 :: CONSUME_TOKENS(proc_urcu_reader, \
458 (READ_PROD_C_IF_TRUE_READ /* pre-dominant */ \
459 | READ_PROD_A_READ) << base, /* WAR */ \
460 producetoken) -> \
461 ooo_mem(i); \
462 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2); \
463 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
464 /* IF_MERGE implies \
465 * post-dominance */ \
466 /* ELSE */ \
467 :: CONSUME_TOKENS(proc_urcu_reader, \
468 (READ_PROD_B_IF_FALSE /* pre-dominant */ \
469 | READ_PROD_A_READ) << base, /* WAR */ \
470 producetoken) -> \
471 ooo_mem(i); \
472 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], \
473 tmp + 1); \
474 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
475 /* IF_MERGE implies \
476 * post-dominance */ \
477 /* ENDIF */ \
478 skip
479
480 /* Body of PROCEDURE_READ_LOCK */
481 #define READ_PROC_READ_UNLOCK (1 << 0)
482
483 #define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken) \
484 :: CONSUME_TOKENS(proc_urcu_reader, \
485 consumetoken, \
486 READ_PROC_READ_UNLOCK << base) -> \
487 ooo_mem(i); \
488 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]); \
489 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base); \
490 :: CONSUME_TOKENS(proc_urcu_reader, \
491 consumetoken \
492 | (READ_PROC_READ_UNLOCK << base), /* WAR */ \
493 producetoken) -> \
494 ooo_mem(i); \
495 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1); \
496 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
497 skip
498
499
500 #define READ_PROD_NONE (1 << 0)
501
502 /* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
503 #define READ_LOCK_BASE 1
504 #define READ_LOCK_OUT (1 << 5)
505
506 #define READ_PROC_FIRST_MB (1 << 6)
507
508 /* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
509 #define READ_LOCK_NESTED_BASE 7
510 #define READ_LOCK_NESTED_OUT (1 << 11)
511
512 #define READ_PROC_READ_GEN (1 << 12)
513 #define READ_PROC_ACCESS_GEN (1 << 13)
514
515 /* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
516 #define READ_UNLOCK_NESTED_BASE 14
517 #define READ_UNLOCK_NESTED_OUT (1 << 15)
518
519 #define READ_PROC_SECOND_MB (1 << 16)
520
521 /* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
522 #define READ_UNLOCK_BASE 17
523 #define READ_UNLOCK_OUT (1 << 18)
524
525 /* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
526 #define READ_LOCK_UNROLL_BASE 19
527 #define READ_LOCK_OUT_UNROLL (1 << 23)
528
529 #define READ_PROC_THIRD_MB (1 << 24)
530
531 #define READ_PROC_READ_GEN_UNROLL (1 << 25)
532 #define READ_PROC_ACCESS_GEN_UNROLL (1 << 26)
533
534 #define READ_PROC_FOURTH_MB (1 << 27)
535
536 /* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
537 #define READ_UNLOCK_UNROLL_BASE 28
538 #define READ_UNLOCK_OUT_UNROLL (1 << 29)
539
540
541 /* Should not include branches */
542 #define READ_PROC_ALL_TOKENS (READ_PROD_NONE \
543 | READ_LOCK_OUT \
544 | READ_PROC_FIRST_MB \
545 | READ_LOCK_NESTED_OUT \
546 | READ_PROC_READ_GEN \
547 | READ_PROC_ACCESS_GEN \
548 | READ_UNLOCK_NESTED_OUT \
549 | READ_PROC_SECOND_MB \
550 | READ_UNLOCK_OUT \
551 | READ_LOCK_OUT_UNROLL \
552 | READ_PROC_THIRD_MB \
553 | READ_PROC_READ_GEN_UNROLL \
554 | READ_PROC_ACCESS_GEN_UNROLL \
555 | READ_PROC_FOURTH_MB \
556 | READ_UNLOCK_OUT_UNROLL)
557
558 /* Must clear all tokens, including branches */
559 #define READ_PROC_ALL_TOKENS_CLEAR ((1 << 30) - 1)
560
561 inline urcu_one_read(i, j, nest_i, tmp, tmp2)
562 {
563 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
564
565 #ifdef NO_MB
566 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
567 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
568 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
569 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
570 #endif
571
572 #ifdef REMOTE_BARRIERS
573 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
574 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
575 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
576 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
577 #endif
578
579 do
580 :: 1 ->
581
582 #ifdef REMOTE_BARRIERS
583 /*
584 * Signal-based memory barrier will only execute when the
585 * execution order appears in program order.
586 */
587 if
588 :: 1 ->
589 atomic {
590 if
591 :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
592 READ_LOCK_OUT | READ_LOCK_NESTED_OUT
593 | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
594 | READ_UNLOCK_OUT
595 | READ_LOCK_OUT_UNROLL
596 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
597 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
598 READ_LOCK_NESTED_OUT
599 | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
600 | READ_UNLOCK_OUT
601 | READ_LOCK_OUT_UNROLL
602 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
603 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
604 READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
605 | READ_UNLOCK_OUT
606 | READ_LOCK_OUT_UNROLL
607 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
608 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
609 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
610 READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
611 | READ_UNLOCK_OUT
612 | READ_LOCK_OUT_UNROLL
613 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
614 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
615 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
616 READ_UNLOCK_NESTED_OUT
617 | READ_UNLOCK_OUT
618 | READ_LOCK_OUT_UNROLL
619 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
620 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
621 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
622 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
623 READ_UNLOCK_OUT
624 | READ_LOCK_OUT_UNROLL
625 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
626 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
627 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
628 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
629 | READ_UNLOCK_OUT,
630 READ_LOCK_OUT_UNROLL
631 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
632 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
633 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
634 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
635 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
636 READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
637 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
638 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
639 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
640 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
641 | READ_PROC_READ_GEN_UNROLL,
642 READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
643 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
644 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
645 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
646 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
647 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
648 READ_UNLOCK_OUT_UNROLL)
649 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
650 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
651 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
652 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
653 0) ->
654 goto non_atomic3;
655 non_atomic3_end:
656 skip;
657 fi;
658 }
659 :: 1 -> skip;
660 fi;
661
662 goto non_atomic3_skip;
663 non_atomic3:
664 smp_mb_recv(i, j);
665 goto non_atomic3_end;
666 non_atomic3_skip:
667
668 #endif /* REMOTE_BARRIERS */
669
670 atomic {
671 if
672 PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, READ_LOCK_OUT);
673
674 :: CONSUME_TOKENS(proc_urcu_reader,
675 READ_LOCK_OUT, /* post-dominant */
676 READ_PROC_FIRST_MB) ->
677 smp_mb_reader(i, j);
678 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
679
680 PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB | READ_LOCK_OUT,
681 READ_LOCK_NESTED_OUT);
682
683 :: CONSUME_TOKENS(proc_urcu_reader,
684 READ_PROC_FIRST_MB, /* mb() orders reads */
685 READ_PROC_READ_GEN) ->
686 ooo_mem(i);
687 ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
688 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
689
690 :: CONSUME_TOKENS(proc_urcu_reader,
691 READ_PROC_FIRST_MB /* mb() orders reads */
692 | READ_PROC_READ_GEN,
693 READ_PROC_ACCESS_GEN) ->
694 /* smp_read_barrier_depends */
695 goto rmb1;
696 rmb1_end:
697 data_read_first[get_readerid()] =
698 READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
699 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
700
701
702 /* Note : we remove the nested memory barrier from the read unlock
703 * model, given it is not usually needed. The implementation has the barrier
704 * because the performance impact added by a branch in the common case does not
705 * justify it.
706 */
707
708 PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
709 READ_PROC_FIRST_MB
710 | READ_LOCK_OUT
711 | READ_LOCK_NESTED_OUT,
712 READ_UNLOCK_NESTED_OUT);
713
714
715 :: CONSUME_TOKENS(proc_urcu_reader,
716 READ_PROC_ACCESS_GEN /* mb() orders reads */
717 | READ_PROC_READ_GEN /* mb() orders reads */
718 | READ_PROC_FIRST_MB /* mb() ordered */
719 | READ_LOCK_OUT /* post-dominant */
720 | READ_LOCK_NESTED_OUT /* post-dominant */
721 | READ_UNLOCK_NESTED_OUT,
722 READ_PROC_SECOND_MB) ->
723 smp_mb_reader(i, j);
724 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
725
726 PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
727 READ_PROC_SECOND_MB /* mb() orders reads */
728 | READ_PROC_FIRST_MB /* mb() orders reads */
729 | READ_LOCK_NESTED_OUT /* RAW */
730 | READ_LOCK_OUT /* RAW */
731 | READ_UNLOCK_NESTED_OUT, /* RAW */
732 READ_UNLOCK_OUT);
733
734 /* Unrolling loop : second consecutive lock */
735 /* reading urcu_active_readers, which have been written by
736 * READ_UNLOCK_OUT : RAW */
737 PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
738 READ_UNLOCK_OUT /* RAW */
739 | READ_PROC_SECOND_MB /* mb() orders reads */
740 | READ_PROC_FIRST_MB /* mb() orders reads */
741 | READ_LOCK_NESTED_OUT /* RAW */
742 | READ_LOCK_OUT /* RAW */
743 | READ_UNLOCK_NESTED_OUT, /* RAW */
744 READ_LOCK_OUT_UNROLL);
745
746
747 :: CONSUME_TOKENS(proc_urcu_reader,
748 READ_PROC_FIRST_MB /* mb() ordered */
749 | READ_PROC_SECOND_MB /* mb() ordered */
750 | READ_LOCK_OUT_UNROLL /* post-dominant */
751 | READ_LOCK_NESTED_OUT
752 | READ_LOCK_OUT
753 | READ_UNLOCK_NESTED_OUT
754 | READ_UNLOCK_OUT,
755 READ_PROC_THIRD_MB) ->
756 smp_mb_reader(i, j);
757 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
758
759 :: CONSUME_TOKENS(proc_urcu_reader,
760 READ_PROC_FIRST_MB /* mb() orders reads */
761 | READ_PROC_SECOND_MB /* mb() orders reads */
762 | READ_PROC_THIRD_MB, /* mb() orders reads */
763 READ_PROC_READ_GEN_UNROLL) ->
764 ooo_mem(i);
765 ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
766 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
767
768 :: CONSUME_TOKENS(proc_urcu_reader,
769 READ_PROC_READ_GEN_UNROLL
770 | READ_PROC_FIRST_MB /* mb() orders reads */
771 | READ_PROC_SECOND_MB /* mb() orders reads */
772 | READ_PROC_THIRD_MB, /* mb() orders reads */
773 READ_PROC_ACCESS_GEN_UNROLL) ->
774 /* smp_read_barrier_depends */
775 goto rmb2;
776 rmb2_end:
777 data_read_second[get_readerid()] =
778 READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
779 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
780
781 :: CONSUME_TOKENS(proc_urcu_reader,
782 READ_PROC_READ_GEN_UNROLL /* mb() orders reads */
783 | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
784 | READ_PROC_FIRST_MB /* mb() ordered */
785 | READ_PROC_SECOND_MB /* mb() ordered */
786 | READ_PROC_THIRD_MB /* mb() ordered */
787 | READ_LOCK_OUT_UNROLL /* post-dominant */
788 | READ_LOCK_NESTED_OUT
789 | READ_LOCK_OUT
790 | READ_UNLOCK_NESTED_OUT
791 | READ_UNLOCK_OUT,
792 READ_PROC_FOURTH_MB) ->
793 smp_mb_reader(i, j);
794 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
795
796 PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
797 READ_PROC_FOURTH_MB /* mb() orders reads */
798 | READ_PROC_THIRD_MB /* mb() orders reads */
799 | READ_LOCK_OUT_UNROLL /* RAW */
800 | READ_PROC_SECOND_MB /* mb() orders reads */
801 | READ_PROC_FIRST_MB /* mb() orders reads */
802 | READ_LOCK_NESTED_OUT /* RAW */
803 | READ_LOCK_OUT /* RAW */
804 | READ_UNLOCK_NESTED_OUT, /* RAW */
805 READ_UNLOCK_OUT_UNROLL);
806 :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
807 CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
808 break;
809 fi;
810 }
811 od;
812 /*
813 * Dependency between consecutive loops :
814 * RAW dependency on
815 * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
816 * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
817 * between loops.
818 * _WHEN THE MB()s are in place_, they add full ordering of the
819 * generation pointer read wrt active reader count read, which ensures
820 * execution will not spill across loop execution.
821 * However, in the event mb()s are removed (execution using signal
822 * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
823 * to spill its execution on other loop's execution.
824 */
825 goto end;
826 rmb1:
827 #ifndef NO_RMB
828 smp_rmb(i, j);
829 #else
830 ooo_mem(i);
831 #endif
832 goto rmb1_end;
833 rmb2:
834 #ifndef NO_RMB
835 smp_rmb(i, j);
836 #else
837 ooo_mem(i);
838 #endif
839 goto rmb2_end;
840 end:
841 skip;
842 }
843
844
845
846 active proctype urcu_reader()
847 {
848 byte i, j, nest_i;
849 byte tmp, tmp2;
850
851 wait_init_done();
852
853 assert(get_pid() < NR_PROCS);
854
855 end_reader:
856 do
857 :: 1 ->
858 /*
859 * We do not test reader's progress here, because we are mainly
860 * interested in writer's progress. The reader never blocks
861 * anyway. We have to test for reader/writer's progress
862 * separately, otherwise we could think the writer is doing
863 * progress when it's blocked by an always progressing reader.
864 */
865 #ifdef READER_PROGRESS
866 progress_reader:
867 #endif
868 urcu_one_read(i, j, nest_i, tmp, tmp2);
869 od;
870 }
871
872 /* no name clash please */
873 #undef proc_urcu_reader
874
875
876 /* Model the RCU update process. */
877
878 /*
879 * Bit encoding, urcu_writer :
880 * Currently only supports one reader.
881 */
882
883 int _proc_urcu_writer;
884 #define proc_urcu_writer _proc_urcu_writer
885
886 #define WRITE_PROD_NONE (1 << 0)
887
888 #define WRITE_DATA (1 << 1)
889 #define WRITE_PROC_WMB (1 << 2)
890 #define WRITE_XCHG_PTR (1 << 3)
891
892 #define WRITE_PROC_FIRST_MB (1 << 4)
893
894 /* first flip */
895 #define WRITE_PROC_FIRST_READ_GP (1 << 5)
896 #define WRITE_PROC_FIRST_WRITE_GP (1 << 6)
897 #define WRITE_PROC_FIRST_WAIT (1 << 7)
898 #define WRITE_PROC_FIRST_WAIT_LOOP (1 << 8)
899
900 /* second flip */
901 #define WRITE_PROC_SECOND_READ_GP (1 << 9)
902 #define WRITE_PROC_SECOND_WRITE_GP (1 << 10)
903 #define WRITE_PROC_SECOND_WAIT (1 << 11)
904 #define WRITE_PROC_SECOND_WAIT_LOOP (1 << 12)
905
906 #define WRITE_PROC_SECOND_MB (1 << 13)
907
908 #define WRITE_FREE (1 << 14)
909
910 #define WRITE_PROC_ALL_TOKENS (WRITE_PROD_NONE \
911 | WRITE_DATA \
912 | WRITE_PROC_WMB \
913 | WRITE_XCHG_PTR \
914 | WRITE_PROC_FIRST_MB \
915 | WRITE_PROC_FIRST_READ_GP \
916 | WRITE_PROC_FIRST_WRITE_GP \
917 | WRITE_PROC_FIRST_WAIT \
918 | WRITE_PROC_SECOND_READ_GP \
919 | WRITE_PROC_SECOND_WRITE_GP \
920 | WRITE_PROC_SECOND_WAIT \
921 | WRITE_PROC_SECOND_MB \
922 | WRITE_FREE)
923
924 #define WRITE_PROC_ALL_TOKENS_CLEAR ((1 << 15) - 1)
925
926 /*
927 * Mutexes are implied around writer execution. A single writer at a time.
928 */
929 active proctype urcu_writer()
930 {
931 byte i, j;
932 byte tmp, tmp2, tmpa;
933 byte cur_data = 0, old_data, loop_nr = 0;
934 byte cur_gp_val = 0; /*
935 * Keep a local trace of the current parity so
936 * we don't add non-existing dependencies on the global
937 * GP update. Needed to test single flip case.
938 */
939
940 wait_init_done();
941
942 assert(get_pid() < NR_PROCS);
943
944 do
945 :: (loop_nr < 3) ->
946 #ifdef WRITER_PROGRESS
947 progress_writer1:
948 #endif
949 loop_nr = loop_nr + 1;
950
951 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
952
953 #ifdef NO_WMB
954 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
955 #endif
956
957 #ifdef NO_MB
958 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
959 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
960 #endif
961
962 #ifdef SINGLE_FLIP
963 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
964 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
965 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
966 /* For single flip, we need to know the current parity */
967 cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
968 #endif
969
970 do :: 1 ->
971 atomic {
972 if
973
974 :: CONSUME_TOKENS(proc_urcu_writer,
975 WRITE_PROD_NONE,
976 WRITE_DATA) ->
977 ooo_mem(i);
978 cur_data = (cur_data + 1) % SLAB_SIZE;
979 WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
980 PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
981
982
983 :: CONSUME_TOKENS(proc_urcu_writer,
984 WRITE_DATA,
985 WRITE_PROC_WMB) ->
986 smp_wmb(i, j);
987 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
988
989 :: CONSUME_TOKENS(proc_urcu_writer,
990 WRITE_PROC_WMB,
991 WRITE_XCHG_PTR) ->
992 /* rcu_xchg_pointer() */
993 atomic {
994 old_data = READ_CACHED_VAR(rcu_ptr);
995 WRITE_CACHED_VAR(rcu_ptr, cur_data);
996 }
997 PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
998
999 :: CONSUME_TOKENS(proc_urcu_writer,
1000 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
1001 WRITE_PROC_FIRST_MB) ->
1002 goto smp_mb_send1;
1003 smp_mb_send1_end:
1004 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
1005
1006 /* first flip */
1007 :: CONSUME_TOKENS(proc_urcu_writer,
1008 WRITE_PROC_FIRST_MB,
1009 WRITE_PROC_FIRST_READ_GP) ->
1010 tmpa = READ_CACHED_VAR(urcu_gp_ctr);
1011 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
1012 :: CONSUME_TOKENS(proc_urcu_writer,
1013 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
1014 | WRITE_PROC_FIRST_READ_GP,
1015 WRITE_PROC_FIRST_WRITE_GP) ->
1016 ooo_mem(i);
1017 WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
1018 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
1019
1020 :: CONSUME_TOKENS(proc_urcu_writer,
1021 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1022 WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1023 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
1024 ooo_mem(i);
1025 /* ONLY WAITING FOR READER 0 */
1026 tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
1027 #ifndef SINGLE_FLIP
1028 /* In normal execution, we are always starting by
1029 * waiting for the even parity.
1030 */
1031 cur_gp_val = RCU_GP_CTR_BIT;
1032 #endif
1033 if
1034 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
1035 && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
1036 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
1037 :: else ->
1038 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
1039 fi;
1040
1041 :: CONSUME_TOKENS(proc_urcu_writer,
1042 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1043 WRITE_PROC_FIRST_WRITE_GP
1044 | WRITE_PROC_FIRST_READ_GP
1045 | WRITE_PROC_FIRST_WAIT_LOOP
1046 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
1047 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1048 0) ->
1049 #ifndef GEN_ERROR_WRITER_PROGRESS
1050 goto smp_mb_send2;
1051 smp_mb_send2_end:
1052 #else
1053 ooo_mem(i);
1054 #endif
1055 /* This instruction loops to WRITE_PROC_FIRST_WAIT */
1056 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
1057
1058 /* second flip */
1059 :: CONSUME_TOKENS(proc_urcu_writer,
1060 WRITE_PROC_FIRST_WAIT /* Control dependency : need to branch out of
1061 * the loop to execute the next flip (CHECK) */
1062 | WRITE_PROC_FIRST_WRITE_GP
1063 | WRITE_PROC_FIRST_READ_GP
1064 | WRITE_PROC_FIRST_MB,
1065 WRITE_PROC_SECOND_READ_GP) ->
1066 ooo_mem(i);
1067 tmpa = READ_CACHED_VAR(urcu_gp_ctr);
1068 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
1069 :: CONSUME_TOKENS(proc_urcu_writer,
1070 WRITE_PROC_FIRST_MB
1071 | WRITE_PROC_WMB
1072 | WRITE_PROC_FIRST_READ_GP
1073 | WRITE_PROC_FIRST_WRITE_GP
1074 | WRITE_PROC_SECOND_READ_GP,
1075 WRITE_PROC_SECOND_WRITE_GP) ->
1076 ooo_mem(i);
1077 WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
1078 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
1079
1080 :: CONSUME_TOKENS(proc_urcu_writer,
1081 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1082 WRITE_PROC_FIRST_WAIT
1083 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1084 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
1085 ooo_mem(i);
1086 /* ONLY WAITING FOR READER 0 */
1087 tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
1088 if
1089 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
1090 && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
1091 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
1092 :: else ->
1093 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
1094 fi;
1095
1096 :: CONSUME_TOKENS(proc_urcu_writer,
1097 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1098 WRITE_PROC_SECOND_WRITE_GP
1099 | WRITE_PROC_FIRST_WRITE_GP
1100 | WRITE_PROC_SECOND_READ_GP
1101 | WRITE_PROC_FIRST_READ_GP
1102 | WRITE_PROC_SECOND_WAIT_LOOP
1103 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
1104 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1105 0) ->
1106 #ifndef GEN_ERROR_WRITER_PROGRESS
1107 goto smp_mb_send3;
1108 smp_mb_send3_end:
1109 #else
1110 ooo_mem(i);
1111 #endif
1112 /* This instruction loops to WRITE_PROC_SECOND_WAIT */
1113 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
1114
1115
1116 :: CONSUME_TOKENS(proc_urcu_writer,
1117 WRITE_PROC_FIRST_WAIT
1118 | WRITE_PROC_SECOND_WAIT
1119 | WRITE_PROC_FIRST_READ_GP
1120 | WRITE_PROC_SECOND_READ_GP
1121 | WRITE_PROC_FIRST_WRITE_GP
1122 | WRITE_PROC_SECOND_WRITE_GP
1123 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
1124 | WRITE_PROC_FIRST_MB,
1125 WRITE_PROC_SECOND_MB) ->
1126 goto smp_mb_send4;
1127 smp_mb_send4_end:
1128 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
1129
1130 :: CONSUME_TOKENS(proc_urcu_writer,
1131 WRITE_XCHG_PTR
1132 | WRITE_PROC_FIRST_WAIT
1133 | WRITE_PROC_SECOND_WAIT
1134 | WRITE_PROC_WMB /* No dependency on
1135 * WRITE_DATA because we
1136 * write to a
1137 * different location. */
1138 | WRITE_PROC_SECOND_MB
1139 | WRITE_PROC_FIRST_MB,
1140 WRITE_FREE) ->
1141 WRITE_CACHED_VAR(rcu_data[old_data], POISON);
1142 PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
1143
1144 :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
1145 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
1146 break;
1147 fi;
1148 }
1149 od;
1150 /*
1151 * Note : Promela model adds implicit serialization of the
1152 * WRITE_FREE instruction. Normally, it would be permitted to
1153 * spill on the next loop execution. Given the validation we do
1154 * checks for the data entry read to be poisoned, it's ok if
1155 * we do not check "late arriving" memory poisoning.
1156 */
1157 :: else -> break;
1158 od;
1159 /*
1160 * Given the reader loops infinitely, let the writer also busy-loop
1161 * with progress here so, with weak fairness, we can test the
1162 * writer's progress.
1163 */
1164 end_writer:
1165 do
1166 :: 1 ->
1167 #ifdef WRITER_PROGRESS
1168 progress_writer2:
1169 #endif
1170 skip;
1171 od;
1172
1173 /* Non-atomic parts of the loop */
1174 goto end;
1175 smp_mb_send1:
1176 smp_mb_send(i, j, 1);
1177 goto smp_mb_send1_end;
1178 #ifndef GEN_ERROR_WRITER_PROGRESS
1179 smp_mb_send2:
1180 smp_mb_send(i, j, 2);
1181 goto smp_mb_send2_end;
1182 smp_mb_send3:
1183 smp_mb_send(i, j, 3);
1184 goto smp_mb_send3_end;
1185 #endif
1186 smp_mb_send4:
1187 smp_mb_send(i, j, 4);
1188 goto smp_mb_send4_end;
1189 end:
1190 skip;
1191 }
1192
1193 /* no name clash please */
1194 #undef proc_urcu_writer
1195
1196
1197 /* Leave after the readers and writers so the pid count is ok. */
1198 init {
1199 byte i, j;
1200
1201 atomic {
1202 INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
1203 INIT_CACHED_VAR(rcu_ptr, 0, j);
1204
1205 i = 0;
1206 do
1207 :: i < NR_READERS ->
1208 INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
1209 ptr_read_first[i] = 1;
1210 ptr_read_second[i] = 1;
1211 data_read_first[i] = WINE;
1212 data_read_second[i] = WINE;
1213 i++;
1214 :: i >= NR_READERS -> break
1215 od;
1216 INIT_CACHED_VAR(rcu_data[0], WINE, j);
1217 i = 1;
1218 do
1219 :: i < SLAB_SIZE ->
1220 INIT_CACHED_VAR(rcu_data[i], POISON, j);
1221 i++
1222 :: i >= SLAB_SIZE -> break
1223 od;
1224
1225 init_done = 1;
1226 }
1227 }
This page took 0.099829 seconds and 4 git commands to generate.