5ac2f02dd9d80c3abd8d08f2b20136dd9a604635
[urcu.git] / formal-model / urcu / urcu.spin
1 /*
2 * mem.spin: Promela code to validate memory barriers with OOO memory.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (c) 2009 Mathieu Desnoyers
19 */
20
21 /* Promela validation variables. */
22
23 #define NR_READERS 1
24 #define NR_WRITERS 2
25
26 #define NR_PROCS 3
27
28 #define get_pid() (_pid)
29
30 /*
31 * Each process have its own data in cache. Caches are randomly updated.
32 * smp_wmb and smp_rmb forces cache updates (write and read), wmb_mb forces
33 * both.
34 */
35
36 #define DECLARE_CACHED_VAR(type, x, v) \
37 type mem_##x = v; \
38 type cached_##x[NR_PROCS] = v; \
39 bit cache_dirty_##x[NR_PROCS] = 0
40
41 #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x[id])
42
43 #define READ_CACHED_VAR(x) (cached_##x[get_pid()])
44
45 #define WRITE_CACHED_VAR(x, v) \
46 atomic { \
47 cached_##x[get_pid()] = v; \
48 cache_dirty_##x[get_pid()] = 1; \
49 }
50
51 #define CACHE_WRITE_TO_MEM(x, id) \
52 if \
53 :: IS_CACHE_DIRTY(x, id) -> \
54 mem_##x = cached_##x[id]; \
55 cache_dirty_##x[id] = 0; \
56 :: else -> \
57 skip \
58 fi;
59
60 #define CACHE_READ_FROM_MEM(x, id) \
61 if \
62 :: !IS_CACHE_DIRTY(x, id) -> \
63 cached_##x[id] = mem_##x;\
64 :: else -> \
65 skip \
66 fi;
67
68 /*
69 * May update other caches if cache is dirty, or not.
70 */
71 #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
72 if \
73 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
74 :: 1 -> skip \
75 fi;
76
77 #define RANDOM_CACHE_READ_FROM_MEM(x, id)\
78 if \
79 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
80 :: 1 -> skip \
81 fi;
82
83 inline smp_rmb(i)
84 {
85 atomic {
86 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
87 CACHE_READ_FROM_MEM(urcu_active_readers_one, get_pid());
88 CACHE_READ_FROM_MEM(generation_ptr, get_pid());
89 }
90 }
91
92 inline smp_wmb(i)
93 {
94 atomic {
95 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
96 CACHE_WRITE_TO_MEM(urcu_active_readers_one, get_pid());
97 CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
98 }
99 }
100
101 inline smp_mb(i)
102 {
103 atomic {
104 #ifndef NO_WMB
105 smp_wmb(i);
106 #endif
107 #ifndef NO_RMB
108 smp_rmb(i);
109 #endif
110 skip;
111 }
112 }
113
114 /* Keep in sync manually with smp_rmb, wmp_wmb and ooo_mem */
115 DECLARE_CACHED_VAR(byte, urcu_gp_ctr, 1);
116 /* Note ! currently only one reader */
117 DECLARE_CACHED_VAR(byte, urcu_active_readers_one, 0);
118 /* pointer generation */
119 DECLARE_CACHED_VAR(byte, generation_ptr, 0);
120
121 byte last_free_gen = 0;
122 bit free_done = 0;
123 byte read_generation = 1;
124 bit data_access = 0;
125
126 bit write_lock = 0;
127
128 inline ooo_mem(i)
129 {
130 atomic {
131 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
132 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers_one,
133 get_pid());
134 RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
135 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
136 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers_one,
137 get_pid());
138 RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid());
139 }
140 }
141
142 #define get_readerid() (get_pid())
143 #define get_writerid() (get_readerid() + NR_READERS)
144
145 inline wait_for_reader(tmp, id, i)
146 {
147 tmp = READ_CACHED_VAR(urcu_active_readers_one);
148 ooo_mem(i);
149 do
150 :: (tmp & RCU_GP_CTR_NEST_MASK) && ((tmp ^ READ_CACHED_VAR(urcu_gp_ctr))
151 & RCU_GP_CTR_BIT)
152 ->
153 ooo_mem(i);
154 skip;
155 :: else ->
156 break;
157 od;
158 }
159
160 inline wait_for_quiescent_state(tmp, i, j)
161 {
162 i = 0;
163 do
164 :: i < NR_READERS ->
165 wait_for_reader(tmp, i, j);
166 i++
167 :: i >= NR_READERS -> break
168 od;
169 }
170
171 /* Model the RCU read-side critical section. */
172
173 active [NR_READERS] proctype urcu_reader()
174 {
175 byte i;
176 byte tmp, tmp2;
177
178 assert(get_pid() < NR_PROCS);
179
180 ooo_mem(i);
181 tmp = READ_CACHED_VAR(urcu_active_readers_one);
182 ooo_mem(i);
183 if
184 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
185 ->
186 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
187 ooo_mem(i);
188 WRITE_CACHED_VAR(urcu_active_readers_one, tmp2);
189 :: else ->
190 WRITE_CACHED_VAR(urcu_active_readers_one, tmp + 1);
191 fi;
192 ooo_mem(i);
193 smp_mb(i);
194 read_generation = READ_CACHED_VAR(generation_ptr);
195 ooo_mem(i);
196 data_access = 1;
197 ooo_mem(i);
198 data_access = 0;
199 ooo_mem(i);
200 smp_mb(i);
201 ooo_mem(i);
202 tmp2 = READ_CACHED_VAR(urcu_active_readers_one);
203 ooo_mem(i);
204 WRITE_CACHED_VAR(urcu_active_readers_one, tmp2 - 1);
205 ooo_mem(i);
206 //smp_mc(i); /* added */
207 }
208
209
210 /* Model the RCU update process. */
211
212 active [NR_WRITERS] proctype urcu_writer()
213 {
214 byte i, j;
215 byte tmp;
216 byte old_gen;
217
218 assert(get_pid() < NR_PROCS);
219
220 ooo_mem(i);
221 atomic {
222 old_gen = READ_CACHED_VAR(generation_ptr);
223 WRITE_CACHED_VAR(generation_ptr, old_gen + 1);
224 }
225 ooo_mem(i);
226
227 do
228 :: 1 ->
229 atomic {
230 if
231 :: write_lock == 0 ->
232 write_lock = 1;
233 break;
234 :: else ->
235 skip;
236 fi;
237 }
238 od;
239 smp_mb(i);
240 ooo_mem(i);
241 tmp = READ_CACHED_VAR(urcu_gp_ctr);
242 ooo_mem(i);
243 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
244 ooo_mem(i);
245 //smp_mc(i);
246 wait_for_quiescent_state(tmp, i, j);
247 //smp_mc(i);
248 #ifndef SINGLE_FLIP
249 ooo_mem(i);
250 tmp = READ_CACHED_VAR(urcu_gp_ctr);
251 ooo_mem(i);
252 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
253 //smp_mc(i);
254 ooo_mem(i);
255 wait_for_quiescent_state(tmp, i, j);
256 #endif
257 ooo_mem(i);
258 smp_mb(i);
259 ooo_mem(i);
260 write_lock = 0;
261 /* free-up step, e.g., kfree(). */
262 atomic {
263 last_free_gen = old_gen;
264 free_done = 1;
265 }
266 }
This page took 0.034055 seconds and 3 git commands to generate.