X-Git-Url: https://git.liburcu.org/?a=blobdiff_plain;f=formal-model%2Furcu%2Furcu.spin;h=f4d3ea78ade593788bf9d97710e7c8bda5ace2ff;hb=a570e118a8f9ed38d5eaebf8d21b65d2b6f5cf40;hp=cc84e51d09d623b330664a0e8a7ca2fbb8ff97ef;hpb=60a1db9d10aaca98e79a5126f168a37d00151845;p=urcu.git diff --git a/formal-model/urcu/urcu.spin b/formal-model/urcu/urcu.spin index cc84e51..f4d3ea7 100644 --- a/formal-model/urcu/urcu.spin +++ b/formal-model/urcu/urcu.spin @@ -80,6 +80,68 @@ :: 1 -> skip \ fi; +/* + * Remote barriers tests the scheme where a signal (or IPI) is sent to all + * reader threads to promote their compiler barrier to a smp_mb(). + */ +#ifdef REMOTE_BARRIERS + +inline smp_rmb_pid(i) +{ + atomic { + CACHE_READ_FROM_MEM(urcu_gp_ctr, i); + CACHE_READ_FROM_MEM(urcu_active_readers_one, i); + CACHE_READ_FROM_MEM(generation_ptr, i); + } +} + +inline smp_wmb_pid(i) +{ + atomic { + CACHE_WRITE_TO_MEM(urcu_gp_ctr, i); + CACHE_WRITE_TO_MEM(urcu_active_readers_one, i); + CACHE_WRITE_TO_MEM(generation_ptr, i); + } +} + +inline smp_mb_pid(i) +{ + atomic { +#ifndef NO_WMB + smp_wmb_pid(i); +#endif +#ifndef NO_RMB + smp_rmb_pid(i); +#endif + skip; + } +} + +/* + * Readers do a simple barrier(), writers are doing a smp_mb() _and_ sending a + * signal or IPI to have all readers execute a smp_mb. + * We are not modeling the whole rendez-vous between readers and writers here, + * we just let the writer update each reader's caches remotely. + */ +inline smp_mb(i) +{ + if + :: get_pid() >= NR_READERS -> + smp_mb_pid(get_pid()); + i = 0; + do + :: i < NR_READERS -> + smp_mb_pid(i); + i++; + :: i >= NR_READERS -> break + od; + smp_mb_pid(get_pid()); + :: else -> skip; + fi; +} + +#else + inline smp_rmb(i) { atomic { @@ -111,6 +173,8 @@ inline smp_mb(i) } } +#endif + /* Keep in sync manually with smp_rmb, wmp_wmb and ooo_mem */ DECLARE_CACHED_VAR(byte, urcu_gp_ctr, 1); /* Note ! currently only one reader */ @@ -123,6 +187,8 @@ bit free_done = 0; byte read_generation = 1; bit data_access = 0; +bit write_lock = 0; + inline ooo_mem(i) { atomic { @@ -142,16 +208,23 @@ inline ooo_mem(i) inline wait_for_reader(tmp, id, i) { - tmp = READ_CACHED_VAR(urcu_active_readers_one); - ooo_mem(i); do - :: (tmp & RCU_GP_CTR_NEST_MASK) && ((tmp ^ READ_CACHED_VAR(urcu_gp_ctr)) - & RCU_GP_CTR_BIT) - -> - ooo_mem(i); + :: 1 -> + ooo_mem(i); + tmp = READ_CACHED_VAR(urcu_active_readers_one); + ooo_mem(i); + if + :: (tmp & RCU_GP_CTR_NEST_MASK) + && ((tmp ^ READ_CACHED_VAR(urcu_gp_ctr)) + & RCU_GP_CTR_BIT) -> +#ifndef GEN_ERROR_WRITER_PROGRESS + smp_mb(i); +#else skip; - :: else -> +#endif + :: else -> break; + fi; od; } @@ -168,42 +241,76 @@ inline wait_for_quiescent_state(tmp, i, j) /* Model the RCU read-side critical section. */ -active [NR_READERS] proctype urcu_reader() +inline urcu_one_read(i, nest_i, tmp, tmp2) { - byte i; - byte tmp, tmp2; - - assert(get_pid() < NR_PROCS); - - ooo_mem(i); - tmp = READ_CACHED_VAR(urcu_active_readers_one); - ooo_mem(i); - if - :: (!(tmp & RCU_GP_CTR_NEST_MASK)) - -> - tmp2 = READ_CACHED_VAR(urcu_gp_ctr); + nest_i = 0; + do + :: nest_i < READER_NEST_LEVEL -> ooo_mem(i); - WRITE_CACHED_VAR(urcu_active_readers_one, tmp2); - :: else -> - WRITE_CACHED_VAR(urcu_active_readers_one, tmp + 1); - fi; + tmp = READ_CACHED_VAR(urcu_active_readers_one); + ooo_mem(i); + if + :: (!(tmp & RCU_GP_CTR_NEST_MASK)) + -> + tmp2 = READ_CACHED_VAR(urcu_gp_ctr); + ooo_mem(i); + WRITE_CACHED_VAR(urcu_active_readers_one, tmp2); + :: else -> + WRITE_CACHED_VAR(urcu_active_readers_one, + tmp + 1); + fi; + ooo_mem(i); + smp_mb(i); + nest_i++; + :: nest_i >= READER_NEST_LEVEL -> break; + od; + ooo_mem(i); - smp_mb(i); read_generation = READ_CACHED_VAR(generation_ptr); ooo_mem(i); data_access = 1; ooo_mem(i); data_access = 0; - ooo_mem(i); - smp_mb(i); - ooo_mem(i); - tmp2 = READ_CACHED_VAR(urcu_active_readers_one); - ooo_mem(i); - WRITE_CACHED_VAR(urcu_active_readers_one, tmp2 - 1); + + nest_i = 0; + do + :: nest_i < READER_NEST_LEVEL -> + ooo_mem(i); + smp_mb(i); + ooo_mem(i); + tmp2 = READ_CACHED_VAR(urcu_active_readers_one); + ooo_mem(i); + WRITE_CACHED_VAR(urcu_active_readers_one, tmp2 - 1); + nest_i++; + :: nest_i >= READER_NEST_LEVEL -> break; + od; ooo_mem(i); //smp_mc(i); /* added */ } +active [NR_READERS] proctype urcu_reader() +{ + byte i, nest_i; + byte tmp, tmp2; + + assert(get_pid() < NR_PROCS); + +end_reader: + do + :: 1 -> + /* + * We do not test reader's progress here, because we are mainly + * interested in writer's progress. The reader never blocks + * anyway. We have to test for reader/writer's progress + * separately, otherwise we could think the writer is doing + * progress when it's blocked by an always progressing reader. + */ +#ifdef READER_PROGRESS +progress_reader: +#endif + urcu_one_read(i, nest_i, tmp, tmp2); + od; +} /* Model the RCU update process. */ @@ -215,35 +322,70 @@ active [NR_WRITERS] proctype urcu_writer() assert(get_pid() < NR_PROCS); - ooo_mem(i); - atomic { - old_gen = READ_CACHED_VAR(generation_ptr); - WRITE_CACHED_VAR(generation_ptr, old_gen + 1); - } - ooo_mem(i); + do + :: (READ_CACHED_VAR(generation_ptr) < 5) -> +#ifdef WRITER_PROGRESS +progress_writer1: +#endif + ooo_mem(i); + atomic { + old_gen = READ_CACHED_VAR(generation_ptr); + WRITE_CACHED_VAR(generation_ptr, old_gen + 1); + } + ooo_mem(i); - smp_mb(i); - ooo_mem(i); - tmp = READ_CACHED_VAR(urcu_gp_ctr); - ooo_mem(i); - WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT); - ooo_mem(i); - //smp_mc(i); - wait_for_quiescent_state(tmp, i, j); - //smp_mc(i); - ooo_mem(i); - tmp = READ_CACHED_VAR(urcu_gp_ctr); - ooo_mem(i); - WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT); - //smp_mc(i); - ooo_mem(i); - wait_for_quiescent_state(tmp, i, j); - ooo_mem(i); - smp_mb(i); - /* free-up step, e.g., kfree(). */ - ooo_mem(i); - atomic { - last_free_gen = old_gen; - free_done = 1; - } + do + :: 1 -> + atomic { + if + :: write_lock == 0 -> + write_lock = 1; + break; + :: else -> + skip; + fi; + } + od; + smp_mb(i); + ooo_mem(i); + tmp = READ_CACHED_VAR(urcu_gp_ctr); + ooo_mem(i); + WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT); + ooo_mem(i); + //smp_mc(i); + wait_for_quiescent_state(tmp, i, j); + //smp_mc(i); +#ifndef SINGLE_FLIP + ooo_mem(i); + tmp = READ_CACHED_VAR(urcu_gp_ctr); + ooo_mem(i); + WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT); + //smp_mc(i); + ooo_mem(i); + wait_for_quiescent_state(tmp, i, j); +#endif + ooo_mem(i); + smp_mb(i); + ooo_mem(i); + write_lock = 0; + /* free-up step, e.g., kfree(). */ + atomic { + last_free_gen = old_gen; + free_done = 1; + } + :: else -> break; + od; + /* + * Given the reader loops infinitely, let the writer also busy-loop + * with progress here so, with weak fairness, we can test the + * writer's progress. + */ +end_writer: + do + :: 1 -> +#ifdef WRITER_PROGRESS +progress_writer2: +#endif + skip; + od; }