* output exchanged. Therefore, i post-dominating j ensures that every path
* passing by j will pass by i before reaching the output.
*
+ * Prefetch and speculative execution
+ *
+ * If an instruction depends on the result of a previous branch, but it does not
+ * have side-effects, it can be executed before the branch result is known.
+ * however, it must be restarted if a core-synchronizing instruction is issued.
+ * Note that instructions which depend on the speculative instruction result
+ * but that have side-effects must depend on the branch completion in addition
+ * to the speculatively executed instruction.
+ *
* Other considerations
*
* Note about "volatile" keyword dependency : The compiler will order volatile
* Nested calls are not supported.
*/
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+ */
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
/*
* Each process have its own data in cache. Caches are randomly updated.
* smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
byte val[NR_PROCS];
};
-/* Bitfield has a maximum of 8 procs */
typedef per_proc_bit {
+ bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
byte bitfield;
};
#define DECLARE_CACHED_VAR(type, x) \
type mem_##x; \
per_proc_##type cached_##x; \
- per_proc_bit cache_dirty_##x;
+ per_proc_bitfield cache_dirty_##x;
#define INIT_CACHED_VAR(x, v, j) \
mem_##x = v; \
fi;
/* Must consume all prior read tokens. All subsequent reads depend on it. */
-inline smp_rmb(i, j)
+inline smp_rmb(i)
{
atomic {
CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
}
/* Must consume all prior write tokens. All subsequent writes depend on it. */
-inline smp_wmb(i, j)
+inline smp_wmb(i)
{
atomic {
CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
/* Synchronization point. Must consume all prior read and write tokens. All
* subsequent reads and writes depend on it. */
-inline smp_mb(i, j)
+inline smp_mb(i)
{
atomic {
- smp_wmb(i, j);
- smp_rmb(i, j);
+ smp_wmb(i);
+ smp_rmb(i);
}
}
{
do
:: (reader_barrier[get_readerid()] == 1) ->
- smp_mb(i, j);
+ /*
+ * We choose to ignore cycles caused by writer busy-looping,
+ * waiting for the reader, sending barrier requests, and the
+ * reader always services them without continuing execution.
+ */
+progress_ignoring_mb1:
+ smp_mb(i);
reader_barrier[get_readerid()] = 0;
:: 1 ->
- /*
- * Busy-looping waiting for other barrier requests is not considered as
- * non-progress.
- */
-#ifdef READER_PROGRESS
-progress_reader2:
-#endif
-#ifdef WRITER_PROGRESS
-//progress_writer_from_reader1:
-#endif
- skip;
- :: 1 ->
- /* We choose to ignore writer's non-progress caused from the
- * reader ignoring the writer's mb() requests */
-#ifdef WRITER_PROGRESS
-//progress_writer_from_reader2:
-#endif
+ /*
+ * We choose to ignore writer's non-progress caused by the
+ * reader ignoring the writer's mb() requests.
+ */
+progress_ignoring_mb2:
break;
od;
}
-#ifdef WRITER_PROGRESS
#define PROGRESS_LABEL(progressid) progress_writer_progid_##progressid:
-#else
-#define PROGRESS_LABEL(progressid)
-#endif
#define smp_mb_send(i, j, progressid) \
{ \
- smp_mb(i, j); \
+ smp_mb(i); \
i = 0; \
do \
:: i < NR_READERS -> \
* interest, given the reader has the ability to totally ignore \
* barrier requests. \
*/ \
-PROGRESS_LABEL(progressid) \
do \
- :: (reader_barrier[i] == 1) -> skip; \
+ :: (reader_barrier[i] == 1) -> \
+PROGRESS_LABEL(progressid) \
+ skip; \
:: (reader_barrier[i] == 0) -> break; \
od; \
i++; \
:: i >= NR_READERS -> \
break \
od; \
- smp_mb(i, j); \
+ smp_mb(i); \
}
#else
-#define smp_mb_send(i, j, progressid) smp_mb(i, j)
-#define smp_mb_reader smp_mb
+#define smp_mb_send(i, j, progressid) smp_mb(i)
+#define smp_mb_reader(i, j) smp_mb(i)
#define smp_mb_recv(i, j)
#endif
/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
-/* Note ! currently only two readers */
+/* Note ! currently only one reader */
DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
DECLARE_CACHED_VAR(byte, rcu_ptr);
-/* RCU data */
-DECLARE_CACHED_VAR(byte, rcu_data[2]);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
-byte ptr_read[NR_READERS];
-byte data_read[NR_READERS];
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
bit init_done = 0;
i++
:: i >= SLAB_SIZE -> break
od;
+#ifdef HAVE_OOO_CACHE_READ
RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
i = 0;
do
i++
:: i >= SLAB_SIZE -> break
od;
+#else
+ smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
}
}
#define READ_PROD_B_IF_FALSE (1 << 2)
#define READ_PROD_C_IF_TRUE_READ (1 << 3)
-#define PROCEDURE_READ_LOCK(base, consumetoken, producetoken) \
- :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, READ_PROD_A_READ << base) -> \
+#define PROCEDURE_READ_LOCK(base, consumetoken, consumetoken2, producetoken) \
+ :: CONSUME_TOKENS(proc_urcu_reader, (consumetoken | consumetoken2), READ_PROD_A_READ << base) -> \
ooo_mem(i); \
tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]); \
PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base); \
PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
fi; \
/* IF TRUE */ \
- :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base, \
+ :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, /* prefetch */ \
READ_PROD_C_IF_TRUE_READ << base) -> \
ooo_mem(i); \
tmp2 = READ_CACHED_VAR(urcu_gp_ctr); \
PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base); \
:: CONSUME_TOKENS(proc_urcu_reader, \
- (READ_PROD_C_IF_TRUE_READ /* pre-dominant */ \
+ (READ_PROD_B_IF_TRUE \
+ | READ_PROD_C_IF_TRUE_READ /* pre-dominant */ \
| READ_PROD_A_READ) << base, /* WAR */ \
producetoken) -> \
ooo_mem(i); \
consumetoken, \
READ_PROC_READ_UNLOCK << base) -> \
ooo_mem(i); \
- tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]); \
+ tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]); \
PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base); \
:: CONSUME_TOKENS(proc_urcu_reader, \
consumetoken \
| (READ_PROC_READ_UNLOCK << base), /* WAR */ \
producetoken) -> \
ooo_mem(i); \
- WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1); \
+ WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp - 1); \
PRODUCE_TOKENS(proc_urcu_reader, producetoken); \
skip
skip;
fi;
}
- :: 1 -> skip;
fi;
goto non_atomic3_skip;
atomic {
if
- PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, READ_LOCK_OUT);
+ PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, 0, READ_LOCK_OUT);
:: CONSUME_TOKENS(proc_urcu_reader,
READ_LOCK_OUT, /* post-dominant */
smp_mb_reader(i, j);
PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
- PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB | READ_LOCK_OUT,
+ PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB, READ_LOCK_OUT,
READ_LOCK_NESTED_OUT);
:: CONSUME_TOKENS(proc_urcu_reader,
READ_PROC_FIRST_MB, /* mb() orders reads */
READ_PROC_READ_GEN) ->
ooo_mem(i);
- ptr_read[get_readerid()] =
- READ_CACHED_VAR(rcu_ptr);
+ ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
:: CONSUME_TOKENS(proc_urcu_reader,
/* smp_read_barrier_depends */
goto rmb1;
rmb1_end:
- data_read[get_readerid()] =
- READ_CACHED_VAR(rcu_data[ptr_read[get_readerid()]]);
+ data_read_first[get_readerid()] =
+ READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
/* reading urcu_active_readers, which have been written by
* READ_UNLOCK_OUT : RAW */
PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
- READ_UNLOCK_OUT /* RAW */
- | READ_PROC_SECOND_MB /* mb() orders reads */
- | READ_PROC_FIRST_MB /* mb() orders reads */
- | READ_LOCK_NESTED_OUT /* RAW */
+ READ_PROC_SECOND_MB /* mb() orders reads */
+ | READ_PROC_FIRST_MB, /* mb() orders reads */
+ READ_LOCK_NESTED_OUT /* RAW */
| READ_LOCK_OUT /* RAW */
- | READ_UNLOCK_NESTED_OUT, /* RAW */
+ | READ_UNLOCK_NESTED_OUT /* RAW */
+ | READ_UNLOCK_OUT, /* RAW */
READ_LOCK_OUT_UNROLL);
| READ_PROC_THIRD_MB, /* mb() orders reads */
READ_PROC_READ_GEN_UNROLL) ->
ooo_mem(i);
- ptr_read[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+ ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
:: CONSUME_TOKENS(proc_urcu_reader,
/* smp_read_barrier_depends */
goto rmb2;
rmb2_end:
- data_read[get_readerid()] =
- READ_CACHED_VAR(rcu_data[ptr_read[get_readerid()]]);
+ data_read_second[get_readerid()] =
+ READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
:: CONSUME_TOKENS(proc_urcu_reader,
goto end;
rmb1:
#ifndef NO_RMB
- smp_rmb(i, j);
+ smp_rmb(i);
#else
- ooo_mem();
+ ooo_mem(i);
#endif
goto rmb1_end;
rmb2:
#ifndef NO_RMB
- smp_rmb(i, j);
+ smp_rmb(i);
#else
- ooo_mem();
+ ooo_mem(i);
#endif
goto rmb2_end;
end:
#define WRITE_PROD_NONE (1 << 0)
-#define WRITE_PROC_FIRST_MB (1 << 1)
+#define WRITE_DATA (1 << 1)
+#define WRITE_PROC_WMB (1 << 2)
+#define WRITE_XCHG_PTR (1 << 3)
+
+#define WRITE_PROC_FIRST_MB (1 << 4)
/* first flip */
-#define WRITE_PROC_FIRST_READ_GP (1 << 2)
-#define WRITE_PROC_FIRST_WRITE_GP (1 << 3)
-#define WRITE_PROC_FIRST_WAIT (1 << 4)
-#define WRITE_PROC_FIRST_WAIT_LOOP (1 << 5)
+#define WRITE_PROC_FIRST_READ_GP (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP (1 << 6)
+#define WRITE_PROC_FIRST_WAIT (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP (1 << 8)
/* second flip */
-#define WRITE_PROC_SECOND_READ_GP (1 << 6)
-#define WRITE_PROC_SECOND_WRITE_GP (1 << 7)
-#define WRITE_PROC_SECOND_WAIT (1 << 8)
-#define WRITE_PROC_SECOND_WAIT_LOOP (1 << 9)
+#define WRITE_PROC_SECOND_READ_GP (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP (1 << 10)
+#define WRITE_PROC_SECOND_WAIT (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP (1 << 12)
+
+#define WRITE_PROC_SECOND_MB (1 << 13)
-#define WRITE_PROC_SECOND_MB (1 << 10)
+#define WRITE_FREE (1 << 14)
#define WRITE_PROC_ALL_TOKENS (WRITE_PROD_NONE \
+ | WRITE_DATA \
+ | WRITE_PROC_WMB \
+ | WRITE_XCHG_PTR \
| WRITE_PROC_FIRST_MB \
| WRITE_PROC_FIRST_READ_GP \
| WRITE_PROC_FIRST_WRITE_GP \
| WRITE_PROC_SECOND_READ_GP \
| WRITE_PROC_SECOND_WRITE_GP \
| WRITE_PROC_SECOND_WAIT \
- | WRITE_PROC_SECOND_MB)
+ | WRITE_PROC_SECOND_MB \
+ | WRITE_FREE)
-#define WRITE_PROC_ALL_TOKENS_CLEAR ((1 << 11) - 1)
+#define WRITE_PROC_ALL_TOKENS_CLEAR ((1 << 15) - 1)
/*
* Mutexes are implied around writer execution. A single writer at a time.
#endif
loop_nr = loop_nr + 1;
- /* TODO : add instruction scheduling to this code path to test
- * missing wmb effect. */
- /* smp_wmb() ensures order of the following instructions */
- /* malloc */
- cur_data = (cur_data + 1) % SLAB_SIZE;
- ooo_mem(i);
- WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
-#ifndef NO_WMB
- smp_wmb(i, j);
-#else
- ooo_mem(i);
-#endif
- old_data = READ_CACHED_VAR(rcu_ptr);
- ooo_mem(i);
- WRITE_CACHED_VAR(rcu_ptr, cur_data); /* rcu_assign_pointer() */
- ooo_mem(i);
-
-
PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+#ifdef NO_WMB
+ PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
#ifdef NO_MB
PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
do :: 1 ->
atomic {
if
+
:: CONSUME_TOKENS(proc_urcu_writer,
WRITE_PROD_NONE,
+ WRITE_DATA) ->
+ ooo_mem(i);
+ cur_data = (cur_data + 1) % SLAB_SIZE;
+ WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+ PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+ :: CONSUME_TOKENS(proc_urcu_writer,
+ WRITE_DATA,
+ WRITE_PROC_WMB) ->
+ smp_wmb(i);
+ PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+ :: CONSUME_TOKENS(proc_urcu_writer,
+ WRITE_PROC_WMB,
+ WRITE_XCHG_PTR) ->
+ /* rcu_xchg_pointer() */
+ atomic {
+ old_data = READ_CACHED_VAR(rcu_ptr);
+ WRITE_CACHED_VAR(rcu_ptr, cur_data);
+ }
+ PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+ :: CONSUME_TOKENS(proc_urcu_writer,
+ WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
WRITE_PROC_FIRST_MB) ->
goto smp_mb_send1;
smp_mb_send1_end:
tmpa = READ_CACHED_VAR(urcu_gp_ctr);
PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
:: CONSUME_TOKENS(proc_urcu_writer,
- WRITE_PROC_FIRST_MB | WRITE_PROC_FIRST_READ_GP,
+ WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+ | WRITE_PROC_FIRST_READ_GP,
WRITE_PROC_FIRST_WRITE_GP) ->
ooo_mem(i);
WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
:: CONSUME_TOKENS(proc_urcu_writer,
- //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
+ //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
ooo_mem(i);
+ //smp_mb(i); /* TEST */
/* ONLY WAITING FOR READER 0 */
tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
#ifndef SINGLE_FLIP
WRITE_PROC_FIRST_WRITE_GP
| WRITE_PROC_FIRST_READ_GP
| WRITE_PROC_FIRST_WAIT_LOOP
+ | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
| WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
0) ->
#ifndef GEN_ERROR_WRITER_PROGRESS
goto smp_mb_send2;
smp_mb_send2_end:
+ /* The memory barrier will invalidate the
+ * second read done as prefetching. Note that all
+ * instructions with side-effects depending on
+ * WRITE_PROC_SECOND_READ_GP should also depend on
+ * completion of this busy-waiting loop. */
+ CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
#else
ooo_mem(i);
#endif
/* second flip */
:: CONSUME_TOKENS(proc_urcu_writer,
- WRITE_PROC_FIRST_WAIT /* Control dependency : need to branch out of
- * the loop to execute the next flip (CHECK) */
- | WRITE_PROC_FIRST_WRITE_GP
+ //WRITE_PROC_FIRST_WAIT | //test /* no dependency. Could pre-fetch, no side-effect. */
+ WRITE_PROC_FIRST_WRITE_GP
| WRITE_PROC_FIRST_READ_GP
| WRITE_PROC_FIRST_MB,
WRITE_PROC_SECOND_READ_GP) ->
ooo_mem(i);
+ //smp_mb(i); /* TEST */
tmpa = READ_CACHED_VAR(urcu_gp_ctr);
PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
:: CONSUME_TOKENS(proc_urcu_writer,
- WRITE_PROC_FIRST_MB
+ WRITE_PROC_FIRST_WAIT /* dependency on first wait, because this
+ * instruction has globally observable
+ * side-effects.
+ */
+ | WRITE_PROC_FIRST_MB
+ | WRITE_PROC_WMB
| WRITE_PROC_FIRST_READ_GP
| WRITE_PROC_FIRST_WRITE_GP
| WRITE_PROC_SECOND_READ_GP,
PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
:: CONSUME_TOKENS(proc_urcu_writer,
- //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
+ //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
WRITE_PROC_FIRST_WAIT
| WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
ooo_mem(i);
+ //smp_mb(i); /* TEST */
/* ONLY WAITING FOR READER 0 */
tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
if
fi;
:: CONSUME_TOKENS(proc_urcu_writer,
- //WRITE_PROC_FIRST_WRITE_GP /* TEST ADDING SYNC CORE */
+ //WRITE_PROC_FIRST_WRITE_GP | /* TEST ADDING SYNC CORE */
WRITE_PROC_SECOND_WRITE_GP
| WRITE_PROC_FIRST_WRITE_GP
| WRITE_PROC_SECOND_READ_GP
| WRITE_PROC_FIRST_READ_GP
| WRITE_PROC_SECOND_WAIT_LOOP
+ | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
| WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */
0) ->
#ifndef GEN_ERROR_WRITER_PROGRESS
| WRITE_PROC_SECOND_READ_GP
| WRITE_PROC_FIRST_WRITE_GP
| WRITE_PROC_SECOND_WRITE_GP
+ | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
| WRITE_PROC_FIRST_MB,
WRITE_PROC_SECOND_MB) ->
goto smp_mb_send4;
smp_mb_send4_end:
PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+ :: CONSUME_TOKENS(proc_urcu_writer,
+ WRITE_XCHG_PTR
+ | WRITE_PROC_FIRST_WAIT
+ | WRITE_PROC_SECOND_WAIT
+ | WRITE_PROC_WMB /* No dependency on
+ * WRITE_DATA because we
+ * write to a
+ * different location. */
+ | WRITE_PROC_SECOND_MB
+ | WRITE_PROC_FIRST_MB,
+ WRITE_FREE) ->
+ WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+ PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
:: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
break;
fi;
}
od;
-
- WRITE_CACHED_VAR(rcu_data[old_data], POISON);
-
+ /*
+ * Note : Promela model adds implicit serialization of the
+ * WRITE_FREE instruction. Normally, it would be permitted to
+ * spill on the next loop execution. Given the validation we do
+ * checks for the data entry read to be poisoned, it's ok if
+ * we do not check "late arriving" memory poisoning.
+ */
:: else -> break;
od;
/*
:: 1 ->
#ifdef WRITER_PROGRESS
progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+ /*
+ * Make sure we don't block the reader's progress.
+ */
+ smp_mb_send(i, j, 5);
#endif
skip;
od;
do
:: i < NR_READERS ->
INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
- data_read[i] = 0;
+ ptr_read_first[i] = 1;
+ ptr_read_second[i] = 1;
+ data_read_first[i] = WINE;
+ data_read_second[i] = WINE;
i++;
:: i >= NR_READERS -> break
od;