X-Git-Url: https://git.liburcu.org/?a=blobdiff_plain;f=formal-model%2Furcu-controldataflow%2Furcu.spin;h=3956d75ecd20567f2b8fe1064c0f44bf2070f0b3;hb=abbe7e27151039f5f56b7130f0043f703862e99c;hp=5c377f78879d87bb85505acc7bdb045571b4067a;hpb=a60dadc52fed020644e44b346c438a3ccd3a5026;p=urcu.git diff --git a/formal-model/urcu-controldataflow/urcu.spin b/formal-model/urcu-controldataflow/urcu.spin index 5c377f7..3956d75 100644 --- a/formal-model/urcu-controldataflow/urcu.spin +++ b/formal-model/urcu-controldataflow/urcu.spin @@ -213,7 +213,14 @@ inline smp_rmb(i, j) i++ :: i >= NR_READERS -> break od; - CACHE_READ_FROM_MEM(generation_ptr, get_pid()); + CACHE_READ_FROM_MEM(rcu_ptr, get_pid()); + i = 0; + do + :: i < SLAB_SIZE -> + CACHE_READ_FROM_MEM(rcu_data[i], get_pid()); + i++ + :: i >= SLAB_SIZE -> break + od; } } @@ -229,7 +236,14 @@ inline smp_wmb(i, j) i++ :: i >= NR_READERS -> break od; - CACHE_WRITE_TO_MEM(generation_ptr, get_pid()); + CACHE_WRITE_TO_MEM(rcu_ptr, get_pid()); + i = 0; + do + :: i < SLAB_SIZE -> + CACHE_WRITE_TO_MEM(rcu_data[i], get_pid()); + i++ + :: i >= SLAB_SIZE -> break + od; } } @@ -243,7 +257,6 @@ inline smp_mb(i, j) } } - #ifdef REMOTE_BARRIERS bit reader_barrier[NR_READERS]; @@ -269,55 +282,80 @@ inline smp_mb_recv(i, j) :: (reader_barrier[get_readerid()] == 1) -> smp_mb(i, j); reader_barrier[get_readerid()] = 0; - :: 1 -> skip; - :: 1 -> break; + :: 1 -> + /* + * Busy-looping waiting for other barrier requests is not considered as + * non-progress. + */ +#ifdef READER_PROGRESS +progress_reader2: +#endif +#ifdef WRITER_PROGRESS +//progress_writer_from_reader1: +#endif + skip; + :: 1 -> + /* We choose to ignore writer's non-progress caused from the + * reader ignoring the writer's mb() requests */ +#ifdef WRITER_PROGRESS +//progress_writer_from_reader2: +#endif + break; od; } -inline smp_mb_send(i, j) -{ - smp_mb(i, j); - i = 0; - do - :: i < NR_READERS -> - reader_barrier[i] = 1; - do - :: (reader_barrier[i] == 1) -> skip; - :: (reader_barrier[i] == 0) -> break; - od; - i++; - :: i >= NR_READERS -> - break - od; - smp_mb(i, j); +#ifdef WRITER_PROGRESS +#define PROGRESS_LABEL(progressid) progress_writer_progid_##progressid: +#else +#define PROGRESS_LABEL(progressid) +#endif + +#define smp_mb_send(i, j, progressid) \ +{ \ + smp_mb(i, j); \ + i = 0; \ + do \ + :: i < NR_READERS -> \ + reader_barrier[i] = 1; \ + /* \ + * Busy-looping waiting for reader barrier handling is of little\ + * interest, given the reader has the ability to totally ignore \ + * barrier requests. \ + */ \ +PROGRESS_LABEL(progressid) \ + do \ + :: (reader_barrier[i] == 1) -> skip; \ + :: (reader_barrier[i] == 0) -> break; \ + od; \ + i++; \ + :: i >= NR_READERS -> \ + break \ + od; \ + smp_mb(i, j); \ } #else -#define smp_mb_send smp_mb +#define smp_mb_send(i, j, progressid) smp_mb(i, j) #define smp_mb_reader smp_mb #define smp_mb_recv(i, j) #endif -/* Keep in sync manually with smp_rmb, wmp_wmb, ooo_mem and init() */ +/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */ DECLARE_CACHED_VAR(byte, urcu_gp_ctr); /* Note ! currently only two readers */ DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]); -/* pointer generation */ -DECLARE_CACHED_VAR(byte, generation_ptr); - -byte last_free_gen = 0; -bit free_done = 0; -byte read_generation[NR_READERS]; -bit data_access[NR_READERS]; +/* RCU pointer */ +DECLARE_CACHED_VAR(byte, rcu_ptr); +/* RCU data */ +DECLARE_CACHED_VAR(byte, rcu_data[SLAB_SIZE]); -bit write_lock = 0; +byte ptr_read[NR_READERS]; +byte data_read[NR_READERS]; bit init_done = 0; -bit sighand_exec = 0; - inline wait_init_done() { do @@ -338,7 +376,14 @@ inline ooo_mem(i) i++ :: i >= NR_READERS -> break od; - RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid()); + RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid()); + i = 0; + do + :: i < SLAB_SIZE -> + RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid()); + i++ + :: i >= SLAB_SIZE -> break + od; RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid()); i = 0; do @@ -348,7 +393,14 @@ inline ooo_mem(i) i++ :: i >= NR_READERS -> break od; - RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid()); + RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid()); + i = 0; + do + :: i < SLAB_SIZE -> + RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid()); + i++ + :: i >= SLAB_SIZE -> break + od; } } @@ -441,30 +493,32 @@ int _proc_urcu_reader; #define READ_LOCK_NESTED_OUT (1 << 11) #define READ_PROC_READ_GEN (1 << 12) +#define READ_PROC_ACCESS_GEN (1 << 13) -/* PROCEDURE_READ_UNLOCK (NESTED) base = << 13 : 13 to 14 */ -#define READ_UNLOCK_NESTED_BASE 13 -#define READ_UNLOCK_NESTED_OUT (1 << 14) +/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */ +#define READ_UNLOCK_NESTED_BASE 14 +#define READ_UNLOCK_NESTED_OUT (1 << 15) -#define READ_PROC_SECOND_MB (1 << 15) +#define READ_PROC_SECOND_MB (1 << 16) -/* PROCEDURE_READ_UNLOCK base = << 16 : 16 to 17 */ -#define READ_UNLOCK_BASE 16 -#define READ_UNLOCK_OUT (1 << 17) +/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */ +#define READ_UNLOCK_BASE 17 +#define READ_UNLOCK_OUT (1 << 18) -/* PROCEDURE_READ_LOCK_UNROLL base = << 18 : 18 to 22 */ -#define READ_LOCK_UNROLL_BASE 18 -#define READ_LOCK_OUT_UNROLL (1 << 22) +/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */ +#define READ_LOCK_UNROLL_BASE 19 +#define READ_LOCK_OUT_UNROLL (1 << 23) -#define READ_PROC_THIRD_MB (1 << 23) +#define READ_PROC_THIRD_MB (1 << 24) -#define READ_PROC_READ_GEN_UNROLL (1 << 24) +#define READ_PROC_READ_GEN_UNROLL (1 << 25) +#define READ_PROC_ACCESS_GEN_UNROLL (1 << 26) -#define READ_PROC_FOURTH_MB (1 << 25) +#define READ_PROC_FOURTH_MB (1 << 27) -/* PROCEDURE_READ_UNLOCK_UNROLL base = << 26 : 26 to 27 */ -#define READ_UNLOCK_UNROLL_BASE 26 -#define READ_UNLOCK_OUT_UNROLL (1 << 27) +/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */ +#define READ_UNLOCK_UNROLL_BASE 28 +#define READ_UNLOCK_OUT_UNROLL (1 << 29) /* Should not include branches */ @@ -473,17 +527,19 @@ int _proc_urcu_reader; | READ_PROC_FIRST_MB \ | READ_LOCK_NESTED_OUT \ | READ_PROC_READ_GEN \ + | READ_PROC_ACCESS_GEN \ | READ_UNLOCK_NESTED_OUT \ | READ_PROC_SECOND_MB \ | READ_UNLOCK_OUT \ | READ_LOCK_OUT_UNROLL \ | READ_PROC_THIRD_MB \ | READ_PROC_READ_GEN_UNROLL \ + | READ_PROC_ACCESS_GEN_UNROLL \ | READ_PROC_FOURTH_MB \ | READ_UNLOCK_OUT_UNROLL) /* Must clear all tokens, including branches */ -#define READ_PROC_ALL_TOKENS_CLEAR ((1 << 28) - 1) +#define READ_PROC_ALL_TOKENS_CLEAR ((1 << 30) - 1) inline urcu_one_read(i, j, nest_i, tmp, tmp2) { @@ -517,50 +573,66 @@ inline urcu_one_read(i, j, nest_i, tmp, tmp2) if :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE, READ_LOCK_OUT | READ_LOCK_NESTED_OUT - | READ_PROC_READ_GEN | READ_UNLOCK_NESTED_OUT + | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL - | READ_PROC_READ_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL) + | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL) || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT, READ_LOCK_NESTED_OUT - | READ_PROC_READ_GEN | READ_UNLOCK_NESTED_OUT + | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL - | READ_PROC_READ_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL) + | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL) || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT, - READ_PROC_READ_GEN | READ_UNLOCK_NESTED_OUT + READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL - | READ_PROC_READ_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL) + | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL) || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN, + READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT + | READ_UNLOCK_OUT + | READ_LOCK_OUT_UNROLL + | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL) + || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT + | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN, READ_UNLOCK_NESTED_OUT | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL - | READ_PROC_READ_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL) + | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL) || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT - | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_UNLOCK_NESTED_OUT, + | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN + | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT, READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL - | READ_PROC_READ_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL) + | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL) || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT - | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_UNLOCK_NESTED_OUT + | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN + | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT | READ_UNLOCK_OUT, READ_LOCK_OUT_UNROLL - | READ_PROC_READ_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL) + | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL) || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT - | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_UNLOCK_NESTED_OUT + | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN + | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL, - READ_PROC_READ_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL) + READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL) || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT - | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_UNLOCK_NESTED_OUT + | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN + | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL | READ_PROC_READ_GEN_UNROLL, + READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL) + || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT + | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN + | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT + | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL + | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL, READ_UNLOCK_OUT_UNROLL) || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT - | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_UNLOCK_NESTED_OUT + | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL - | READ_PROC_READ_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL, + | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL, 0) -> goto non_atomic3; non_atomic3_end: @@ -572,7 +644,7 @@ non_atomic3_end: goto non_atomic3_skip; non_atomic3: - smp_mb_recv(i, j); + smp_mb_recv(i, j); goto non_atomic3_end; non_atomic3_skip: @@ -595,12 +667,22 @@ non_atomic3_skip: READ_PROC_FIRST_MB, /* mb() orders reads */ READ_PROC_READ_GEN) -> ooo_mem(i); - read_generation[get_readerid()] = - READ_CACHED_VAR(generation_ptr); - goto non_atomic; -non_atomic_end: + ptr_read[get_readerid()] = + READ_CACHED_VAR(rcu_ptr); PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN); + :: CONSUME_TOKENS(proc_urcu_reader, + READ_PROC_FIRST_MB /* mb() orders reads */ + | READ_PROC_READ_GEN, + READ_PROC_ACCESS_GEN) -> + /* smp_read_barrier_depends */ + goto rmb1; +rmb1_end: + data_read[get_readerid()] = + READ_CACHED_VAR(rcu_data[ptr_read[get_readerid()]]); + PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN); + + /* Note : we remove the nested memory barrier from the read unlock * model, given it is not usually needed. The implementation has the barrier * because the performance impact added by a branch in the common case does not @@ -615,7 +697,8 @@ non_atomic_end: :: CONSUME_TOKENS(proc_urcu_reader, - READ_PROC_READ_GEN /* mb() orders reads */ + READ_PROC_ACCESS_GEN /* mb() orders reads */ + | READ_PROC_READ_GEN /* mb() orders reads */ | READ_PROC_FIRST_MB /* mb() ordered */ | READ_LOCK_OUT /* post-dominant */ | READ_LOCK_NESTED_OUT /* post-dominant */ @@ -663,14 +746,25 @@ non_atomic_end: | READ_PROC_THIRD_MB, /* mb() orders reads */ READ_PROC_READ_GEN_UNROLL) -> ooo_mem(i); - read_generation[get_readerid()] = - READ_CACHED_VAR(generation_ptr); - goto non_atomic2; -non_atomic2_end: + ptr_read[get_readerid()] = READ_CACHED_VAR(rcu_ptr); PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL); + :: CONSUME_TOKENS(proc_urcu_reader, + READ_PROC_READ_GEN_UNROLL + | READ_PROC_FIRST_MB /* mb() orders reads */ + | READ_PROC_SECOND_MB /* mb() orders reads */ + | READ_PROC_THIRD_MB, /* mb() orders reads */ + READ_PROC_ACCESS_GEN_UNROLL) -> + /* smp_read_barrier_depends */ + goto rmb2; +rmb2_end: + data_read[get_readerid()] = + READ_CACHED_VAR(rcu_data[ptr_read[get_readerid()]]); + PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL); + :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL /* mb() orders reads */ + | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */ | READ_PROC_FIRST_MB /* mb() ordered */ | READ_PROC_SECOND_MB /* mb() ordered */ | READ_PROC_THIRD_MB /* mb() ordered */ @@ -713,14 +807,20 @@ non_atomic2_end: * to spill its execution on other loop's execution. */ goto end; -non_atomic: - data_access[get_readerid()] = 1; - data_access[get_readerid()] = 0; - goto non_atomic_end; -non_atomic2: - data_access[get_readerid()] = 1; - data_access[get_readerid()] = 0; - goto non_atomic2_end; +rmb1: +#ifndef NO_RMB + smp_rmb(i, j); +#else + ooo_mem(); +#endif + goto rmb1_end; +rmb2: +#ifndef NO_RMB + smp_rmb(i, j); +#else + ooo_mem(); +#endif + goto rmb2_end; end: skip; } @@ -797,40 +897,48 @@ int _proc_urcu_writer; #define WRITE_PROC_ALL_TOKENS_CLEAR ((1 << 11) - 1) +/* + * Mutexes are implied around writer execution. A single writer at a time. + */ active proctype urcu_writer() { byte i, j; byte tmp, tmp2, tmpa; - byte old_gen; + byte cur_data = 0, old_data, loop_nr = 0; + byte cur_gp_val = 0; /* + * Keep a local trace of the current parity so + * we don't add non-existing dependencies on the global + * GP update. Needed to test single flip case. + */ wait_init_done(); assert(get_pid() < NR_PROCS); do - :: (READ_CACHED_VAR(generation_ptr) < 5) -> + :: (loop_nr < 3) -> #ifdef WRITER_PROGRESS progress_writer1: #endif + loop_nr = loop_nr + 1; + + /* TODO : add instruction scheduling to this code path to test + * missing wmb effect. */ + /* smp_wmb() ensures order of the following instructions */ + /* malloc */ + cur_data = (cur_data + 1) % SLAB_SIZE; + ooo_mem(i); + WRITE_CACHED_VAR(rcu_data[cur_data], WINE); +#ifndef NO_WMB + smp_wmb(i, j); +#else + ooo_mem(i); +#endif + old_data = READ_CACHED_VAR(rcu_ptr); ooo_mem(i); - atomic { - old_gen = READ_CACHED_VAR(generation_ptr); - WRITE_CACHED_VAR(generation_ptr, old_gen + 1); - } + WRITE_CACHED_VAR(rcu_ptr, cur_data); /* rcu_assign_pointer() */ ooo_mem(i); - do - :: 1 -> - atomic { - if - :: write_lock == 0 -> - write_lock = 1; - break; - :: else -> - skip; - fi; - } - od; PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE); @@ -843,13 +951,18 @@ progress_writer1: PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP); PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP); PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT); + /* For single flip, we need to know the current parity */ + cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT; #endif - do + do :: 1 -> + atomic { + if :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROD_NONE, WRITE_PROC_FIRST_MB) -> - smp_mb_send(i, j); + goto smp_mb_send1; +smp_mb_send1_end: PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB); /* first flip */ @@ -872,9 +985,15 @@ progress_writer1: ooo_mem(i); /* ONLY WAITING FOR READER 0 */ tmp2 = READ_CACHED_VAR(urcu_active_readers[0]); +#ifndef SINGLE_FLIP + /* In normal execution, we are always starting by + * waiting for the even parity. + */ + cur_gp_val = RCU_GP_CTR_BIT; +#endif if :: (tmp2 & RCU_GP_CTR_NEST_MASK) - && ((tmp2 ^ RCU_GP_CTR_BIT) & RCU_GP_CTR_BIT) -> + && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) -> PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP); :: else -> PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT); @@ -888,7 +1007,8 @@ progress_writer1: | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */ 0) -> #ifndef GEN_ERROR_WRITER_PROGRESS - smp_mb_send(i, j); + goto smp_mb_send2; +smp_mb_send2_end: #else ooo_mem(i); #endif @@ -903,7 +1023,6 @@ progress_writer1: | WRITE_PROC_FIRST_READ_GP | WRITE_PROC_FIRST_MB, WRITE_PROC_SECOND_READ_GP) -> - //smp_mb_send(i, j); //TEST ooo_mem(i); tmpa = READ_CACHED_VAR(urcu_gp_ctr); PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP); @@ -943,7 +1062,8 @@ progress_writer1: | WRITE_PROC_FIRST_MB, /* can be reordered before/after flips */ 0) -> #ifndef GEN_ERROR_WRITER_PROGRESS - smp_mb_send(i, j); + goto smp_mb_send3; +smp_mb_send3_end: #else ooo_mem(i); #endif @@ -960,20 +1080,19 @@ progress_writer1: | WRITE_PROC_SECOND_WRITE_GP | WRITE_PROC_FIRST_MB, WRITE_PROC_SECOND_MB) -> - smp_mb_send(i, j); + goto smp_mb_send4; +smp_mb_send4_end: PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB); :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) -> CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR); break; + fi; + } od; - write_lock = 0; - /* free-up step, e.g., kfree(). */ - atomic { - last_free_gen = old_gen; - free_done = 1; - } + WRITE_CACHED_VAR(rcu_data[old_data], POISON); + :: else -> break; od; /* @@ -989,6 +1108,25 @@ progress_writer2: #endif skip; od; + + /* Non-atomic parts of the loop */ + goto end; +smp_mb_send1: + smp_mb_send(i, j, 1); + goto smp_mb_send1_end; +#ifndef GEN_ERROR_WRITER_PROGRESS +smp_mb_send2: + smp_mb_send(i, j, 2); + goto smp_mb_send2_end; +smp_mb_send3: + smp_mb_send(i, j, 3); + goto smp_mb_send3_end; +#endif +smp_mb_send4: + smp_mb_send(i, j, 4); + goto smp_mb_send4_end; +end: + skip; } /* no name clash please */ @@ -1001,17 +1139,25 @@ init { atomic { INIT_CACHED_VAR(urcu_gp_ctr, 1, j); - INIT_CACHED_VAR(generation_ptr, 0, j); + INIT_CACHED_VAR(rcu_ptr, 0, j); i = 0; do :: i < NR_READERS -> INIT_CACHED_VAR(urcu_active_readers[i], 0, j); - read_generation[i] = 1; - data_access[i] = 0; + data_read[i] = 0; i++; :: i >= NR_READERS -> break od; + INIT_CACHED_VAR(rcu_data[0], WINE, j); + i = 1; + do + :: i < SLAB_SIZE -> + INIT_CACHED_VAR(rcu_data[i], POISON, j); + i++ + :: i >= SLAB_SIZE -> break + od; + init_done = 1; } }