X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=formal-model%2Furcu%2Furcu.spin;h=324ee3945bd21507b11b49d9ab151230dcf0f0ed;hb=a5b558b0c4655e98f7d8f43b900b6e3350a74f86;hp=903782b352ee6a5f8a1d58af29b78fbfea46f355;hpb=d4e437ba8e99a9cd38c4ccb1c243427935c8f293;p=urcu.git diff --git a/formal-model/urcu/urcu.spin b/formal-model/urcu/urcu.spin index 903782b..324ee39 100644 --- a/formal-model/urcu/urcu.spin +++ b/formal-model/urcu/urcu.spin @@ -29,7 +29,7 @@ /* * Each process have its own data in cache. Caches are randomly updated. - * smp_wmb and smp_rmb forces cache updates (write and read), wmb_mb forces + * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces * both. */ @@ -80,6 +80,72 @@ :: 1 -> skip \ fi; +/* + * Remote barriers tests the scheme where a signal (or IPI) is sent to all + * reader threads to promote their compiler barrier to a smp_mb(). + */ +#ifdef REMOTE_BARRIERS + +inline smp_rmb_pid(i) +{ + atomic { + CACHE_READ_FROM_MEM(urcu_gp_ctr, i); + CACHE_READ_FROM_MEM(urcu_active_readers_one, i); + CACHE_READ_FROM_MEM(generation_ptr, i); + } +} + +inline smp_wmb_pid(i) +{ + atomic { + CACHE_WRITE_TO_MEM(urcu_gp_ctr, i); + CACHE_WRITE_TO_MEM(urcu_active_readers_one, i); + CACHE_WRITE_TO_MEM(generation_ptr, i); + } +} + +inline smp_mb_pid(i) +{ + atomic { +#ifndef NO_WMB + smp_wmb_pid(i); +#endif +#ifndef NO_RMB + smp_rmb_pid(i); +#endif +#ifdef NO_WMB +#ifdef NO_RMB + ooo_mem(i); +#endif +#endif + } +} + +/* + * Readers do a simple barrier(), writers are doing a smp_mb() _and_ sending a + * signal or IPI to have all readers execute a smp_mb. + * We are not modeling the whole rendez-vous between readers and writers here, + * we just let the writer update each reader's caches remotely. + */ +inline smp_mb(i) +{ + if + :: get_pid() >= NR_READERS -> + smp_mb_pid(get_pid()); + i = 0; + do + :: i < NR_READERS -> + smp_mb_pid(i); + i++; + :: i >= NR_READERS -> break + od; + smp_mb_pid(get_pid()); + :: else -> skip; + fi; +} + +#else + inline smp_rmb(i) { atomic { @@ -107,10 +173,16 @@ inline smp_mb(i) #ifndef NO_RMB smp_rmb(i); #endif - skip; +#ifdef NO_WMB +#ifdef NO_RMB + ooo_mem(i); +#endif +#endif } } +#endif + /* Keep in sync manually with smp_rmb, wmp_wmb and ooo_mem */ DECLARE_CACHED_VAR(byte, urcu_gp_ctr, 1); /* Note ! currently only one reader */ @@ -146,8 +218,8 @@ inline wait_for_reader(tmp, id, i) { do :: 1 -> - ooo_mem(i); tmp = READ_CACHED_VAR(urcu_active_readers_one); + ooo_mem(i); if :: (tmp & RCU_GP_CTR_NEST_MASK) && ((tmp ^ READ_CACHED_VAR(urcu_gp_ctr)) @@ -155,7 +227,7 @@ inline wait_for_reader(tmp, id, i) #ifndef GEN_ERROR_WRITER_PROGRESS smp_mb(i); #else - skip; + ooo_mem(i); #endif :: else -> break; @@ -169,6 +241,12 @@ inline wait_for_quiescent_state(tmp, i, j) do :: i < NR_READERS -> wait_for_reader(tmp, i, j); + if + :: (NR_READERS > 1) && (i < NR_READERS - 1) + -> ooo_mem(j); + :: else + -> skip; + fi; i++ :: i >= NR_READERS -> break od; @@ -176,6 +254,50 @@ inline wait_for_quiescent_state(tmp, i, j) /* Model the RCU read-side critical section. */ +inline urcu_one_read(i, nest_i, tmp, tmp2) +{ + nest_i = 0; + do + :: nest_i < READER_NEST_LEVEL -> + ooo_mem(i); + tmp = READ_CACHED_VAR(urcu_active_readers_one); + ooo_mem(i); + if + :: (!(tmp & RCU_GP_CTR_NEST_MASK)) + -> + tmp2 = READ_CACHED_VAR(urcu_gp_ctr); + ooo_mem(i); + WRITE_CACHED_VAR(urcu_active_readers_one, tmp2); + :: else -> + WRITE_CACHED_VAR(urcu_active_readers_one, + tmp + 1); + fi; + smp_mb(i); + nest_i++; + :: nest_i >= READER_NEST_LEVEL -> break; + od; + + ooo_mem(i); + read_generation = READ_CACHED_VAR(generation_ptr); + ooo_mem(i); + data_access = 1; + ooo_mem(i); + data_access = 0; + + nest_i = 0; + do + :: nest_i < READER_NEST_LEVEL -> + smp_mb(i); + tmp2 = READ_CACHED_VAR(urcu_active_readers_one); + ooo_mem(i); + WRITE_CACHED_VAR(urcu_active_readers_one, tmp2 - 1); + nest_i++; + :: nest_i >= READER_NEST_LEVEL -> break; + od; + ooo_mem(i); + //smp_mc(i); /* added */ +} + active [NR_READERS] proctype urcu_reader() { byte i, nest_i; @@ -196,53 +318,10 @@ end_reader: #ifdef READER_PROGRESS progress_reader: #endif - nest_i = 0; - do - :: nest_i < READER_NEST_LEVEL -> - ooo_mem(i); - tmp = READ_CACHED_VAR(urcu_active_readers_one); - ooo_mem(i); - if - :: (!(tmp & RCU_GP_CTR_NEST_MASK)) - -> - tmp2 = READ_CACHED_VAR(urcu_gp_ctr); - ooo_mem(i); - WRITE_CACHED_VAR(urcu_active_readers_one, tmp2); - :: else -> - WRITE_CACHED_VAR(urcu_active_readers_one, - tmp + 1); - fi; - ooo_mem(i); - smp_mb(i); - nest_i++; - :: nest_i >= READER_NEST_LEVEL -> break; - od; - - ooo_mem(i); - read_generation = READ_CACHED_VAR(generation_ptr); - ooo_mem(i); - data_access = 1; - ooo_mem(i); - data_access = 0; - - nest_i = 0; - do - :: nest_i < READER_NEST_LEVEL -> - ooo_mem(i); - smp_mb(i); - ooo_mem(i); - tmp2 = READ_CACHED_VAR(urcu_active_readers_one); - ooo_mem(i); - WRITE_CACHED_VAR(urcu_active_readers_one, tmp2 - 1); - nest_i++; - :: nest_i >= READER_NEST_LEVEL -> break; - od; - ooo_mem(i); - //smp_mc(i); /* added */ + urcu_one_read(i, nest_i, tmp, tmp2); od; } - /* Model the RCU update process. */ active [NR_WRITERS] proctype urcu_writer() @@ -278,7 +357,6 @@ progress_writer1: } od; smp_mb(i); - ooo_mem(i); tmp = READ_CACHED_VAR(urcu_gp_ctr); ooo_mem(i); WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT); @@ -295,9 +373,7 @@ progress_writer1: ooo_mem(i); wait_for_quiescent_state(tmp, i, j); #endif - ooo_mem(i); smp_mb(i); - ooo_mem(i); write_lock = 0; /* free-up step, e.g., kfree(). */ atomic {