Add phase 3 : scalability run
[urcu.git] / formal-model / results / urcu-controldataflow-no-ipi / result-ipi-urcu_free / result-ipi-urcu_free / asserts.spin.input
1
2 // Poison value for freed memory
3 #define POISON 1
4 // Memory with correct data
5 #define WINE 0
6 #define SLAB_SIZE 2
7
8 #define read_poison (data_read_first[0] == POISON || data_read_second[0] == POISON)
9
10 #define RCU_GP_CTR_BIT (1 << 7)
11 #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
12
13 //disabled
14 //#define REMOTE_BARRIERS
15 /*
16 * mem.spin: Promela code to validate memory barriers with OOO memory
17 * and out-of-order instruction scheduling.
18 *
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License as published by
21 * the Free Software Foundation; either version 2 of the License, or
22 * (at your option) any later version.
23 *
24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
28 *
29 * You should have received a copy of the GNU General Public License
30 * along with this program; if not, write to the Free Software
31 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
32 *
33 * Copyright (c) 2009 Mathieu Desnoyers
34 */
35
36 /* Promela validation variables. */
37
38 /* specific defines "included" here */
39 /* DEFINES file "included" here */
40
41 #define NR_READERS 1
42 #define NR_WRITERS 1
43
44 #define NR_PROCS 2
45
46 #define get_pid() (_pid)
47
48 #define get_readerid() (get_pid())
49
50 /*
51 * Produced process control and data flow. Updated after each instruction to
52 * show which variables are ready. Using one-hot bit encoding per variable to
53 * save state space. Used as triggers to execute the instructions having those
54 * variables as input. Leaving bits active to inhibit instruction execution.
55 * Scheme used to make instruction disabling and automatic dependency fall-back
56 * automatic.
57 */
58
59 #define CONSUME_TOKENS(state, bits, notbits) \
60 ((!(state & (notbits))) && (state & (bits)) == (bits))
61
62 #define PRODUCE_TOKENS(state, bits) \
63 state = state | (bits);
64
65 #define CLEAR_TOKENS(state, bits) \
66 state = state & ~(bits)
67
68 /*
69 * Types of dependency :
70 *
71 * Data dependency
72 *
73 * - True dependency, Read-after-Write (RAW)
74 *
75 * This type of dependency happens when a statement depends on the result of a
76 * previous statement. This applies to any statement which needs to read a
77 * variable written by a preceding statement.
78 *
79 * - False dependency, Write-after-Read (WAR)
80 *
81 * Typically, variable renaming can ensure that this dependency goes away.
82 * However, if the statements must read and then write from/to the same variable
83 * in the OOO memory model, renaming may be impossible, and therefore this
84 * causes a WAR dependency.
85 *
86 * - Output dependency, Write-after-Write (WAW)
87 *
88 * Two writes to the same variable in subsequent statements. Variable renaming
89 * can ensure this is not needed, but can be required when writing multiple
90 * times to the same OOO mem model variable.
91 *
92 * Control dependency
93 *
94 * Execution of a given instruction depends on a previous instruction evaluating
95 * in a way that allows its execution. E.g. : branches.
96 *
97 * Useful considerations for joining dependencies after branch
98 *
99 * - Pre-dominance
100 *
101 * "We say box i dominates box j if every path (leading from input to output
102 * through the diagram) which passes through box j must also pass through box
103 * i. Thus box i dominates box j if box j is subordinate to box i in the
104 * program."
105 *
106 * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
107 * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
108 *
109 * - Post-dominance
110 *
111 * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
112 * output exchanged. Therefore, i post-dominating j ensures that every path
113 * passing by j will pass by i before reaching the output.
114 *
115 * Other considerations
116 *
117 * Note about "volatile" keyword dependency : The compiler will order volatile
118 * accesses so they appear in the right order on a given CPU. They can be
119 * reordered by the CPU instruction scheduling. This therefore cannot be
120 * considered as a depencency.
121 *
122 * References :
123 *
124 * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
125 * Kaufmann. ISBN 1-55860-698-X.
126 * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
127 * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
128 * 1-55860-286-0.
129 * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
130 * Morgan Kaufmann. ISBN 1-55860-320-4.
131 */
132
133 /*
134 * Note about loops and nested calls
135 *
136 * To keep this model simple, loops expressed in the framework will behave as if
137 * there was a core synchronizing instruction between loops. To see the effect
138 * of loop unrolling, manually unrolling loops is required. Note that if loops
139 * end or start with a core synchronizing instruction, the model is appropriate.
140 * Nested calls are not supported.
141 */
142
143 /*
144 * Each process have its own data in cache. Caches are randomly updated.
145 * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
146 * both.
147 */
148
149 typedef per_proc_byte {
150 byte val[NR_PROCS];
151 };
152
153 typedef per_proc_bit {
154 bit val[NR_PROCS];
155 };
156
157 /* Bitfield has a maximum of 8 procs */
158 typedef per_proc_bitfield {
159 byte bitfield;
160 };
161
162 #define DECLARE_CACHED_VAR(type, x) \
163 type mem_##x; \
164 per_proc_##type cached_##x; \
165 per_proc_bitfield cache_dirty_##x;
166
167 #define INIT_CACHED_VAR(x, v, j) \
168 mem_##x = v; \
169 cache_dirty_##x.bitfield = 0; \
170 j = 0; \
171 do \
172 :: j < NR_PROCS -> \
173 cached_##x.val[j] = v; \
174 j++ \
175 :: j >= NR_PROCS -> break \
176 od;
177
178 #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x.bitfield & (1 << id))
179
180 #define READ_CACHED_VAR(x) (cached_##x.val[get_pid()])
181
182 #define WRITE_CACHED_VAR(x, v) \
183 atomic { \
184 cached_##x.val[get_pid()] = v; \
185 cache_dirty_##x.bitfield = \
186 cache_dirty_##x.bitfield | (1 << get_pid()); \
187 }
188
189 #define CACHE_WRITE_TO_MEM(x, id) \
190 if \
191 :: IS_CACHE_DIRTY(x, id) -> \
192 mem_##x = cached_##x.val[id]; \
193 cache_dirty_##x.bitfield = \
194 cache_dirty_##x.bitfield & (~(1 << id)); \
195 :: else -> \
196 skip \
197 fi;
198
199 #define CACHE_READ_FROM_MEM(x, id) \
200 if \
201 :: !IS_CACHE_DIRTY(x, id) -> \
202 cached_##x.val[id] = mem_##x;\
203 :: else -> \
204 skip \
205 fi;
206
207 /*
208 * May update other caches if cache is dirty, or not.
209 */
210 #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
211 if \
212 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
213 :: 1 -> skip \
214 fi;
215
216 #define RANDOM_CACHE_READ_FROM_MEM(x, id)\
217 if \
218 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
219 :: 1 -> skip \
220 fi;
221
222 /* Must consume all prior read tokens. All subsequent reads depend on it. */
223 inline smp_rmb(i, j)
224 {
225 atomic {
226 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
227 i = 0;
228 do
229 :: i < NR_READERS ->
230 CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
231 i++
232 :: i >= NR_READERS -> break
233 od;
234 CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
235 i = 0;
236 do
237 :: i < SLAB_SIZE ->
238 CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
239 i++
240 :: i >= SLAB_SIZE -> break
241 od;
242 }
243 }
244
245 /* Must consume all prior write tokens. All subsequent writes depend on it. */
246 inline smp_wmb(i, j)
247 {
248 atomic {
249 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
250 i = 0;
251 do
252 :: i < NR_READERS ->
253 CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
254 i++
255 :: i >= NR_READERS -> break
256 od;
257 CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
258 i = 0;
259 do
260 :: i < SLAB_SIZE ->
261 CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
262 i++
263 :: i >= SLAB_SIZE -> break
264 od;
265 }
266 }
267
268 /* Synchronization point. Must consume all prior read and write tokens. All
269 * subsequent reads and writes depend on it. */
270 inline smp_mb(i, j)
271 {
272 atomic {
273 smp_wmb(i, j);
274 smp_rmb(i, j);
275 }
276 }
277
278 #ifdef REMOTE_BARRIERS
279
280 bit reader_barrier[NR_READERS];
281
282 /*
283 * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
284 * because they would add unexisting core synchronization and would therefore
285 * create an incomplete model.
286 * Therefore, we model the read-side memory barriers by completely disabling the
287 * memory barriers and their dependencies from the read-side. One at a time
288 * (different verification runs), we make a different instruction listen for
289 * signals.
290 */
291
292 #define smp_mb_reader(i, j)
293
294 /*
295 * Service 0, 1 or many barrier requests.
296 */
297 inline smp_mb_recv(i, j)
298 {
299 do
300 :: (reader_barrier[get_readerid()] == 1) ->
301 smp_mb(i, j);
302 reader_barrier[get_readerid()] = 0;
303 :: 1 ->
304 /*
305 * Busy-looping waiting for other barrier requests is not considered as
306 * non-progress.
307 */
308 #ifdef READER_PROGRESS
309 progress_reader2:
310 #endif
311 #ifdef WRITER_PROGRESS
312 //progress_writer_from_reader1:
313 #endif
314 skip;
315 :: 1 ->
316 /* We choose to ignore writer's non-progress caused from the
317 * reader ignoring the writer's mb() requests */
318 #ifdef WRITER_PROGRESS
319 //progress_writer_from_reader2:
320 #endif
321 break;
322 od;
323 }
324
325 #ifdef WRITER_PROGRESS
326 #define PROGRESS_LABEL(progressid) progress_writer_progid_##progressid:
327 #else
328 #define PROGRESS_LABEL(progressid)
329 #endif
330
331 #define smp_mb_send(i, j, progressid) \
332 { \
333 smp_mb(i, j); \
334 i = 0; \
335 do \
336 :: i < NR_READERS -> \
337 reader_barrier[i] = 1; \
338 /* \
339 * Busy-looping waiting for reader barrier handling is of little\
340 * interest, given the reader has the ability to totally ignore \
341 * barrier requests. \
342 */ \
343 PROGRESS_LABEL(progressid) \
344 do \
345 :: (reader_barrier[i] == 1) -> skip; \
346 :: (reader_barrier[i] == 0) -> break; \
347 od; \
348 i++; \
349 :: i >= NR_READERS -> \
350 break \
351 od; \
352 smp_mb(i, j); \
353 }
354
355 #else
356
357 #define smp_mb_send(i, j, progressid) smp_mb(i, j)
358 #define smp_mb_reader smp_mb
359 #define smp_mb_recv(i, j)
360
361 #endif
362
363 /* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
364 DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
365 /* Note ! currently only one reader */
366 DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
367 /* RCU data */
368 DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
369
370 /* RCU pointer */
371 #if (SLAB_SIZE == 2)
372 DECLARE_CACHED_VAR(bit, rcu_ptr);
373 bit ptr_read_first[NR_READERS];
374 bit ptr_read_second[NR_READERS];
375 #else
376 DECLARE_CACHED_VAR(byte, rcu_ptr);
377 byte ptr_read_first[NR_READERS];
378 byte ptr_read_second[NR_READERS];
379 #endif
380
381 bit data_read_first[NR_READERS];
382 bit data_read_second[NR_READERS];
383
384 bit init_done = 0;
385
386 inline wait_init_done()
387 {
388 do
389 :: init_done == 0 -> skip;
390 :: else -> break;
391 od;
392 }
393
394 inline ooo_mem(i)
395 {
396 atomic {
397 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
398 i = 0;
399 do
400 :: i < NR_READERS ->
401 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
402 get_pid());
403 i++
404 :: i >= NR_READERS -> break
405 od;
406 RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
407 i = 0;
408 do
409 :: i < SLAB_SIZE ->
410 RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
411 i++
412 :: i >= SLAB_SIZE -> break
413 od;
414 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
415 i = 0;
416 do
417 :: i < NR_READERS ->
418 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
419 get_pid());
420 i++
421 :: i >= NR_READERS -> break
422 od;
423 RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
424 i = 0;
425 do
426 :: i < SLAB_SIZE ->
427 RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
428 i++
429 :: i >= SLAB_SIZE -> break
430 od;
431 }
432 }
433
434 /*
435 * Bit encoding, urcu_reader :
436 */
437
438 int _proc_urcu_reader;
439 #define proc_urcu_reader _proc_urcu_reader
440
441 /* Body of PROCEDURE_READ_LOCK */
442 #define READ_PROD_A_READ (1 << 0)
443 #define READ_PROD_B_IF_TRUE (1 << 1)
444 #define READ_PROD_B_IF_FALSE (1 << 2)
445 #define READ_PROD_C_IF_TRUE_READ (1 << 3)
446
447 #define PROCEDURE_READ_LOCK(base, consumetoken, producetoken) \
448 :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, READ_PROD_A_READ << base) -> \
449 ooo_mem(i); \
450 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]); \
451 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base); \
452 :: CONSUME_TOKENS(proc_urcu_reader, \
453 READ_PROD_A_READ << base, /* RAW, pre-dominant */ \
454 (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) -> \
455 if \
456 :: (!(tmp & RCU_GP_CTR_NEST_MASK)) -> \
457 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base); \
458 :: else -> \
459 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
460 fi; \
461 /* IF TRUE */ \
462 :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base, \
463 READ_PROD_C_IF_TRUE_READ << base) -> \
464 ooo_mem(i); \
465 tmp2 = READ_CACHED_VAR(urcu_gp_ctr); \
466 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base); \
467 :: CONSUME_TOKENS(proc_urcu_reader, \
468 (READ_PROD_C_IF_TRUE_READ /* pre-dominant */ \
469 | READ_PROD_A_READ) << base, /* WAR */ \
470 producetoken) -> \
471 ooo_mem(i); \
472 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2); \
473 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
474 /* IF_MERGE implies \
475 * post-dominance */ \
476 /* ELSE */ \
477 :: CONSUME_TOKENS(proc_urcu_reader, \
478 (READ_PROD_B_IF_FALSE /* pre-dominant */ \
479 | READ_PROD_A_READ) << base, /* WAR */ \
480 producetoken) -> \
481 ooo_mem(i); \
482 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], \
483 tmp + 1); \
484 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
485 /* IF_MERGE implies \
486 * post-dominance */ \
487 /* ENDIF */ \
488 skip
489
490 /* Body of PROCEDURE_READ_LOCK */
491 #define READ_PROC_READ_UNLOCK (1 << 0)
492
493 #define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken) \
494 :: CONSUME_TOKENS(proc_urcu_reader, \
495 consumetoken, \
496 READ_PROC_READ_UNLOCK << base) -> \
497 ooo_mem(i); \
498 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]); \
499 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base); \
500 :: CONSUME_TOKENS(proc_urcu_reader, \
501 consumetoken \
502 | (READ_PROC_READ_UNLOCK << base), /* WAR */ \
503 producetoken) -> \
504 ooo_mem(i); \
505 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1); \
506 PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
507 skip
508
509
510 #define READ_PROD_NONE (1 << 0)
511
512 /* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
513 #define READ_LOCK_BASE 1
514 #define READ_LOCK_OUT (1 << 5)
515
516 #define READ_PROC_FIRST_MB (1 << 6)
517
518 /* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
519 #define READ_LOCK_NESTED_BASE 7
520 #define READ_LOCK_NESTED_OUT (1 << 11)
521
522 #define READ_PROC_READ_GEN (1 << 12)
523 #define READ_PROC_ACCESS_GEN (1 << 13)
524
525 /* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
526 #define READ_UNLOCK_NESTED_BASE 14
527 #define READ_UNLOCK_NESTED_OUT (1 << 15)
528
529 #define READ_PROC_SECOND_MB (1 << 16)
530
531 /* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
532 #define READ_UNLOCK_BASE 17
533 #define READ_UNLOCK_OUT (1 << 18)
534
535 /* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
536 #define READ_LOCK_UNROLL_BASE 19
537 #define READ_LOCK_OUT_UNROLL (1 << 23)
538
539 #define READ_PROC_THIRD_MB (1 << 24)
540
541 #define READ_PROC_READ_GEN_UNROLL (1 << 25)
542 #define READ_PROC_ACCESS_GEN_UNROLL (1 << 26)
543
544 #define READ_PROC_FOURTH_MB (1 << 27)
545
546 /* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
547 #define READ_UNLOCK_UNROLL_BASE 28
548 #define READ_UNLOCK_OUT_UNROLL (1 << 29)
549
550
551 /* Should not include branches */
552 #define READ_PROC_ALL_TOKENS (READ_PROD_NONE \
553 | READ_LOCK_OUT \
554 | READ_PROC_FIRST_MB \
555 | READ_LOCK_NESTED_OUT \
556 | READ_PROC_READ_GEN \
557 | READ_PROC_ACCESS_GEN \
558 | READ_UNLOCK_NESTED_OUT \
559 | READ_PROC_SECOND_MB \
560 | READ_UNLOCK_OUT \
561 | READ_LOCK_OUT_UNROLL \
562 | READ_PROC_THIRD_MB \
563 | READ_PROC_READ_GEN_UNROLL \
564 | READ_PROC_ACCESS_GEN_UNROLL \
565 | READ_PROC_FOURTH_MB \
566 | READ_UNLOCK_OUT_UNROLL)
567
568 /* Must clear all tokens, including branches */
569 #define READ_PROC_ALL_TOKENS_CLEAR ((1 << 30) - 1)
570
571 inline urcu_one_read(i, j, nest_i, tmp, tmp2)
572 {
573 PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
574
575 #ifdef NO_MB
576 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
577 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
578 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
579 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
580 #endif
581
582 #ifdef REMOTE_BARRIERS
583 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
584 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
585 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
586 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
587 #endif
588
589 do
590 :: 1 ->
591
592 #ifdef REMOTE_BARRIERS
593 /*
594 * Signal-based memory barrier will only execute when the
595 * execution order appears in program order.
596 */
597 if
598 :: 1 ->
599 atomic {
600 if
601 :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
602 READ_LOCK_OUT | READ_LOCK_NESTED_OUT
603 | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
604 | READ_UNLOCK_OUT
605 | READ_LOCK_OUT_UNROLL
606 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
607 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
608 READ_LOCK_NESTED_OUT
609 | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
610 | READ_UNLOCK_OUT
611 | READ_LOCK_OUT_UNROLL
612 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
613 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
614 READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
615 | READ_UNLOCK_OUT
616 | READ_LOCK_OUT_UNROLL
617 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
618 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
619 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
620 READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
621 | READ_UNLOCK_OUT
622 | READ_LOCK_OUT_UNROLL
623 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
624 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
625 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
626 READ_UNLOCK_NESTED_OUT
627 | READ_UNLOCK_OUT
628 | READ_LOCK_OUT_UNROLL
629 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
630 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
631 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
632 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
633 READ_UNLOCK_OUT
634 | READ_LOCK_OUT_UNROLL
635 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
636 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
637 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
638 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
639 | READ_UNLOCK_OUT,
640 READ_LOCK_OUT_UNROLL
641 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
642 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
643 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
644 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
645 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
646 READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
647 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
648 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
649 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
650 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
651 | READ_PROC_READ_GEN_UNROLL,
652 READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
653 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
654 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
655 | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
656 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
657 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
658 READ_UNLOCK_OUT_UNROLL)
659 || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
660 | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
661 | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
662 | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
663 0) ->
664 goto non_atomic3;
665 non_atomic3_end:
666 skip;
667 fi;
668 }
669 :: 1 -> skip;
670 fi;
671
672 goto non_atomic3_skip;
673 non_atomic3:
674 smp_mb_recv(i, j);
675 goto non_atomic3_end;
676 non_atomic3_skip:
677
678 #endif /* REMOTE_BARRIERS */
679
680 atomic {
681 if
682 PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, READ_LOCK_OUT);
683
684 :: CONSUME_TOKENS(proc_urcu_reader,
685 READ_LOCK_OUT, /* post-dominant */
686 READ_PROC_FIRST_MB) ->
687 smp_mb_reader(i, j);
688 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
689
690 PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB | READ_LOCK_OUT,
691 READ_LOCK_NESTED_OUT);
692
693 :: CONSUME_TOKENS(proc_urcu_reader,
694 READ_PROC_FIRST_MB, /* mb() orders reads */
695 READ_PROC_READ_GEN) ->
696 ooo_mem(i);
697 ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
698 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
699
700 :: CONSUME_TOKENS(proc_urcu_reader,
701 READ_PROC_FIRST_MB /* mb() orders reads */
702 | READ_PROC_READ_GEN,
703 READ_PROC_ACCESS_GEN) ->
704 /* smp_read_barrier_depends */
705 goto rmb1;
706 rmb1_end:
707 data_read_first[get_readerid()] =
708 READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
709 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
710
711
712 /* Note : we remove the nested memory barrier from the read unlock
713 * model, given it is not usually needed. The implementation has the barrier
714 * because the performance impact added by a branch in the common case does not
715 * justify it.
716 */
717
718 PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
719 READ_PROC_FIRST_MB
720 | READ_LOCK_OUT
721 | READ_LOCK_NESTED_OUT,
722 READ_UNLOCK_NESTED_OUT);
723
724
725 :: CONSUME_TOKENS(proc_urcu_reader,
726 READ_PROC_ACCESS_GEN /* mb() orders reads */
727 | READ_PROC_READ_GEN /* mb() orders reads */
728 | READ_PROC_FIRST_MB /* mb() ordered */
729 | READ_LOCK_OUT /* post-dominant */
730 | READ_LOCK_NESTED_OUT /* post-dominant */
731 | READ_UNLOCK_NESTED_OUT,
732 READ_PROC_SECOND_MB) ->
733 smp_mb_reader(i, j);
734 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
735
736 PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
737 READ_PROC_SECOND_MB /* mb() orders reads */
738 | READ_PROC_FIRST_MB /* mb() orders reads */
739 | READ_LOCK_NESTED_OUT /* RAW */
740 | READ_LOCK_OUT /* RAW */
741 | READ_UNLOCK_NESTED_OUT, /* RAW */
742 READ_UNLOCK_OUT);
743
744 /* Unrolling loop : second consecutive lock */
745 /* reading urcu_active_readers, which have been written by
746 * READ_UNLOCK_OUT : RAW */
747 PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
748 READ_UNLOCK_OUT /* RAW */
749 | READ_PROC_SECOND_MB /* mb() orders reads */
750 | READ_PROC_FIRST_MB /* mb() orders reads */
751 | READ_LOCK_NESTED_OUT /* RAW */
752 | READ_LOCK_OUT /* RAW */
753 | READ_UNLOCK_NESTED_OUT, /* RAW */
754 READ_LOCK_OUT_UNROLL);
755
756
757 :: CONSUME_TOKENS(proc_urcu_reader,
758 READ_PROC_FIRST_MB /* mb() ordered */
759 | READ_PROC_SECOND_MB /* mb() ordered */
760 | READ_LOCK_OUT_UNROLL /* post-dominant */
761 | READ_LOCK_NESTED_OUT
762 | READ_LOCK_OUT
763 | READ_UNLOCK_NESTED_OUT
764 | READ_UNLOCK_OUT,
765 READ_PROC_THIRD_MB) ->
766 smp_mb_reader(i, j);
767 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
768
769 :: CONSUME_TOKENS(proc_urcu_reader,
770 READ_PROC_FIRST_MB /* mb() orders reads */
771 | READ_PROC_SECOND_MB /* mb() orders reads */
772 | READ_PROC_THIRD_MB, /* mb() orders reads */
773 READ_PROC_READ_GEN_UNROLL) ->
774 ooo_mem(i);
775 ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
776 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
777
778 :: CONSUME_TOKENS(proc_urcu_reader,
779 READ_PROC_READ_GEN_UNROLL
780 | READ_PROC_FIRST_MB /* mb() orders reads */
781 | READ_PROC_SECOND_MB /* mb() orders reads */
782 | READ_PROC_THIRD_MB, /* mb() orders reads */
783 READ_PROC_ACCESS_GEN_UNROLL) ->
784 /* smp_read_barrier_depends */
785 goto rmb2;
786 rmb2_end:
787 data_read_second[get_readerid()] =
788 READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
789 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
790
791 :: CONSUME_TOKENS(proc_urcu_reader,
792 READ_PROC_READ_GEN_UNROLL /* mb() orders reads */
793 | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
794 | READ_PROC_FIRST_MB /* mb() ordered */
795 | READ_PROC_SECOND_MB /* mb() ordered */
796 | READ_PROC_THIRD_MB /* mb() ordered */
797 | READ_LOCK_OUT_UNROLL /* post-dominant */
798 | READ_LOCK_NESTED_OUT
799 | READ_LOCK_OUT
800 | READ_UNLOCK_NESTED_OUT
801 | READ_UNLOCK_OUT,
802 READ_PROC_FOURTH_MB) ->
803 smp_mb_reader(i, j);
804 PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
805
806 PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
807 READ_PROC_FOURTH_MB /* mb() orders reads */
808 | READ_PROC_THIRD_MB /* mb() orders reads */
809 | READ_LOCK_OUT_UNROLL /* RAW */
810 | READ_PROC_SECOND_MB /* mb() orders reads */
811 | READ_PROC_FIRST_MB /* mb() orders reads */
812 | READ_LOCK_NESTED_OUT /* RAW */
813 | READ_LOCK_OUT /* RAW */
814 | READ_UNLOCK_NESTED_OUT, /* RAW */
815 READ_UNLOCK_OUT_UNROLL);
816 :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
817 CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
818 break;
819 fi;
820 }
821 od;
822 /*
823 * Dependency between consecutive loops :
824 * RAW dependency on
825 * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
826 * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
827 * between loops.
828 * _WHEN THE MB()s are in place_, they add full ordering of the
829 * generation pointer read wrt active reader count read, which ensures
830 * execution will not spill across loop execution.
831 * However, in the event mb()s are removed (execution using signal
832 * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
833 * to spill its execution on other loop's execution.
834 */
835 goto end;
836 rmb1:
837 #ifndef NO_RMB
838 smp_rmb(i, j);
839 #else
840 ooo_mem(i);
841 #endif
842 goto rmb1_end;
843 rmb2:
844 #ifndef NO_RMB
845 smp_rmb(i, j);
846 #else
847 ooo_mem(i);
848 #endif
849 goto rmb2_end;
850 end:
851 skip;
852 }
853
854
855
856 active proctype urcu_reader()
857 {
858 byte i, j, nest_i;
859 byte tmp, tmp2;
860
861 wait_init_done();
862
863 assert(get_pid() < NR_PROCS);
864
865 end_reader:
866 do
867 :: 1 ->
868 /*
869 * We do not test reader's progress here, because we are mainly
870 * interested in writer's progress. The reader never blocks
871 * anyway. We have to test for reader/writer's progress
872 * separately, otherwise we could think the writer is doing
873 * progress when it's blocked by an always progressing reader.
874 */
875 #ifdef READER_PROGRESS
876 progress_reader:
877 #endif
878 urcu_one_read(i, j, nest_i, tmp, tmp2);
879 od;
880 }
881
882 /* no name clash please */
883 #undef proc_urcu_reader
884
885
886 /* Model the RCU update process. */
887
888 /*
889 * Bit encoding, urcu_writer :
890 * Currently only supports one reader.
891 */
892
893 int _proc_urcu_writer;
894 #define proc_urcu_writer _proc_urcu_writer
895
896 #define WRITE_PROD_NONE (1 << 0)
897
898 #define WRITE_PROC_FIRST_MB (1 << 1)
899
900 /* first flip */
901 #define WRITE_PROC_FIRST_READ_GP (1 << 2)
902 #define WRITE_PROC_FIRST_WRITE_GP (1 << 3)
903 #define WRITE_PROC_FIRST_WAIT (1 << 4)
904 #define WRITE_PROC_FIRST_WAIT_LOOP (1 << 5)
905
906 /* second flip */
907 #define WRITE_PROC_SECOND_READ_GP (1 << 6)
908 #define WRITE_PROC_SECOND_WRITE_GP (1 << 7)
909 #define WRITE_PROC_SECOND_WAIT (1 << 8)
910 #define WRITE_PROC_SECOND_WAIT_LOOP (1 << 9)
911
912 #define WRITE_PROC_SECOND_MB (1 << 10)
913
914 #define WRITE_PROC_ALL_TOKENS (WRITE_PROD_NONE \
915 | WRITE_PROC_FIRST_MB \
916 | WRITE_PROC_FIRST_READ_GP \
917 | WRITE_PROC_FIRST_WRITE_GP \
918 | WRITE_PROC_FIRST_WAIT \
919 | WRITE_PROC_SECOND_READ_GP \
920 | WRITE_PROC_SECOND_WRITE_GP \
921 | WRITE_PROC_SECOND_WAIT \
922 | WRITE_PROC_SECOND_MB)
923
924 #define WRITE_PROC_ALL_TOKENS_CLEAR ((1 << 11) - 1)
925
926 /*
927 * Mutexes are implied around writer execution. A single writer at a time.
928 */
929 active proctype urcu_writer()
930 {
931 byte i, j;
932 byte tmp, tmp2, tmpa;
933 byte cur_data = 0, old_data, loop_nr = 0;
934 byte cur_gp_val = 0; /*
935 * Keep a local trace of the current parity so
936 * we don't add non-existing dependencies on the global
937 * GP update. Needed to test single flip case.
938 */
939
940 wait_init_done();
941
942 assert(get_pid() < NR_PROCS);
943
944 do
945 :: (loop_nr < 4) ->
946 #ifdef WRITER_PROGRESS
947 progress_writer1:
948 #endif
949 loop_nr = loop_nr + 1;
950
951 /* TODO : add instruction scheduling to this code path to test
952 * missing wmb effect. */
953 /* smp_wmb() ensures order of the following instructions */
954 /* malloc */
955 cur_data = (cur_data + 1) % SLAB_SIZE;
956 ooo_mem(i);
957 WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
958 #ifndef NO_WMB
959 smp_wmb(i, j);
960 #else
961 ooo_mem(i);
962 #endif
963 /* rcu_xchg_pointer() */
964 atomic {
965 old_data = READ_CACHED_VAR(rcu_ptr);
966 WRITE_CACHED_VAR(rcu_ptr, cur_data);
967 }
968 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
969
970 #ifdef NO_MB
971 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
972 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
973 #endif
974
975 #ifdef SINGLE_FLIP
976 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
977 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
978 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
979 /* For single flip, we need to know the current parity */
980 cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
981 #endif
982
983 do :: 1 ->
984 atomic {
985 if
986 :: CONSUME_TOKENS(proc_urcu_writer,
987 WRITE_PROD_NONE,
988 WRITE_PROC_FIRST_MB) ->
989 goto smp_mb_send1;
990 smp_mb_send1_end:
991 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
992
993 /* first flip */
994 :: CONSUME_TOKENS(proc_urcu_writer,
995 WRITE_PROC_FIRST_MB,
996 WRITE_PROC_FIRST_READ_GP) ->
997 tmpa = READ_CACHED_VAR(urcu_gp_ctr);
998 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
999 :: CONSUME_TOKENS(proc_urcu_writer,
1000 WRITE_PROC_FIRST_MB | WRITE_PROC_FIRST_READ_GP,
1001 WRITE_PROC_FIRST_WRITE_GP) ->
1002 ooo_mem(i);
1003 WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
1004 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
1005
1006 :: CONSUME_TOKENS(proc_urcu_writer,
1007 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1008 WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1009 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
1010 ooo_mem(i);
1011 /* ONLY WAITING FOR READER 0 */
1012 tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
1013 #ifndef SINGLE_FLIP
1014 /* In normal execution, we are always starting by
1015 * waiting for the even parity.
1016 */
1017 cur_gp_val = RCU_GP_CTR_BIT;
1018 #endif
1019 if
1020 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
1021 && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
1022 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
1023 :: else ->
1024 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
1025 fi;
1026
1027 :: CONSUME_TOKENS(proc_urcu_writer,
1028 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1029 WRITE_PROC_FIRST_WRITE_GP
1030 | WRITE_PROC_FIRST_READ_GP
1031 | WRITE_PROC_FIRST_WAIT_LOOP
1032 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1033 0) ->
1034 #ifndef GEN_ERROR_WRITER_PROGRESS
1035 goto smp_mb_send2;
1036 smp_mb_send2_end:
1037 #else
1038 ooo_mem(i);
1039 #endif
1040 /* This instruction loops to WRITE_PROC_FIRST_WAIT */
1041 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
1042
1043 /* second flip */
1044 :: CONSUME_TOKENS(proc_urcu_writer,
1045 WRITE_PROC_FIRST_WAIT /* Control dependency : need to branch out of
1046 * the loop to execute the next flip (CHECK) */
1047 | WRITE_PROC_FIRST_WRITE_GP
1048 | WRITE_PROC_FIRST_READ_GP
1049 | WRITE_PROC_FIRST_MB,
1050 WRITE_PROC_SECOND_READ_GP) ->
1051 ooo_mem(i);
1052 tmpa = READ_CACHED_VAR(urcu_gp_ctr);
1053 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
1054 :: CONSUME_TOKENS(proc_urcu_writer,
1055 WRITE_PROC_FIRST_MB
1056 | WRITE_PROC_FIRST_READ_GP
1057 | WRITE_PROC_FIRST_WRITE_GP
1058 | WRITE_PROC_SECOND_READ_GP,
1059 WRITE_PROC_SECOND_WRITE_GP) ->
1060 ooo_mem(i);
1061 WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
1062 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
1063
1064 :: CONSUME_TOKENS(proc_urcu_writer,
1065 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1066 WRITE_PROC_FIRST_WAIT
1067 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1068 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
1069 ooo_mem(i);
1070 /* ONLY WAITING FOR READER 0 */
1071 tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
1072 if
1073 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
1074 && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
1075 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
1076 :: else ->
1077 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
1078 fi;
1079
1080 :: CONSUME_TOKENS(proc_urcu_writer,
1081 //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
1082 WRITE_PROC_SECOND_WRITE_GP
1083 | WRITE_PROC_FIRST_WRITE_GP
1084 | WRITE_PROC_SECOND_READ_GP
1085 | WRITE_PROC_FIRST_READ_GP
1086 | WRITE_PROC_SECOND_WAIT_LOOP
1087 | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
1088 0) ->
1089 #ifndef GEN_ERROR_WRITER_PROGRESS
1090 goto smp_mb_send3;
1091 smp_mb_send3_end:
1092 #else
1093 ooo_mem(i);
1094 #endif
1095 /* This instruction loops to WRITE_PROC_SECOND_WAIT */
1096 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
1097
1098
1099 :: CONSUME_TOKENS(proc_urcu_writer,
1100 WRITE_PROC_FIRST_WAIT
1101 | WRITE_PROC_SECOND_WAIT
1102 | WRITE_PROC_FIRST_READ_GP
1103 | WRITE_PROC_SECOND_READ_GP
1104 | WRITE_PROC_FIRST_WRITE_GP
1105 | WRITE_PROC_SECOND_WRITE_GP
1106 | WRITE_PROC_FIRST_MB,
1107 WRITE_PROC_SECOND_MB) ->
1108 goto smp_mb_send4;
1109 smp_mb_send4_end:
1110 PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
1111
1112 :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
1113 CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
1114 break;
1115 fi;
1116 }
1117 od;
1118
1119 WRITE_CACHED_VAR(rcu_data[old_data], POISON);
1120
1121 :: else -> break;
1122 od;
1123 /*
1124 * Given the reader loops infinitely, let the writer also busy-loop
1125 * with progress here so, with weak fairness, we can test the
1126 * writer's progress.
1127 */
1128 end_writer:
1129 do
1130 :: 1 ->
1131 #ifdef WRITER_PROGRESS
1132 progress_writer2:
1133 #endif
1134 skip;
1135 od;
1136
1137 /* Non-atomic parts of the loop */
1138 goto end;
1139 smp_mb_send1:
1140 smp_mb_send(i, j, 1);
1141 goto smp_mb_send1_end;
1142 #ifndef GEN_ERROR_WRITER_PROGRESS
1143 smp_mb_send2:
1144 smp_mb_send(i, j, 2);
1145 goto smp_mb_send2_end;
1146 smp_mb_send3:
1147 smp_mb_send(i, j, 3);
1148 goto smp_mb_send3_end;
1149 #endif
1150 smp_mb_send4:
1151 smp_mb_send(i, j, 4);
1152 goto smp_mb_send4_end;
1153 end:
1154 skip;
1155 }
1156
1157 /* no name clash please */
1158 #undef proc_urcu_writer
1159
1160
1161 /* Leave after the readers and writers so the pid count is ok. */
1162 init {
1163 byte i, j;
1164
1165 atomic {
1166 INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
1167 INIT_CACHED_VAR(rcu_ptr, 0, j);
1168
1169 i = 0;
1170 do
1171 :: i < NR_READERS ->
1172 INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
1173 ptr_read_first[i] = 1;
1174 ptr_read_second[i] = 1;
1175 data_read_first[i] = WINE;
1176 data_read_second[i] = WINE;
1177 i++;
1178 :: i >= NR_READERS -> break
1179 od;
1180 INIT_CACHED_VAR(rcu_data[0], WINE, j);
1181 i = 1;
1182 do
1183 :: i < SLAB_SIZE ->
1184 INIT_CACHED_VAR(rcu_data[i], POISON, j);
1185 i++
1186 :: i >= SLAB_SIZE -> break
1187 od;
1188
1189 init_done = 1;
1190 }
1191 }
This page took 0.089821 seconds and 4 git commands to generate.