hash table comment fix.
[urcu.git] / formal-model / urcu / result-signal-over-writer / testmerge / urcu_progress_writer.spin.input
1 #define WRITER_PROGRESS
2 #define RCU_GP_CTR_BIT (1 << 7)
3 #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
4
5 #define read_free_race (read_generation == last_free_gen)
6 #define read_free (free_done && data_access)
7
8 #ifndef READER_NEST_LEVEL
9 #define READER_NEST_LEVEL 2
10 #endif
11
12 #define REMOTE_BARRIERS
13 /*
14 * mem.spin: Promela code to validate memory barriers with OOO memory.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
29 *
30 * Copyright (c) 2009 Mathieu Desnoyers
31 */
32
33 /* Promela validation variables. */
34
35 #define NR_READERS 1
36 #define NR_WRITERS 1
37
38 #define NR_PROCS 2
39
40 #define get_pid() (_pid)
41
42 /*
43 * Each process have its own data in cache. Caches are randomly updated.
44 * smp_wmb and smp_rmb forces cache updates (write and read), wmb_mb forces
45 * both.
46 */
47
48 #define DECLARE_CACHED_VAR(type, x, v) \
49 type mem_##x = v; \
50 type cached_##x[NR_PROCS] = v; \
51 bit cache_dirty_##x[NR_PROCS] = 0
52
53 #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x[id])
54
55 #define READ_CACHED_VAR(x) (cached_##x[get_pid()])
56
57 #define WRITE_CACHED_VAR(x, v) \
58 atomic { \
59 cached_##x[get_pid()] = v; \
60 cache_dirty_##x[get_pid()] = 1; \
61 }
62
63 #define CACHE_WRITE_TO_MEM(x, id) \
64 if \
65 :: IS_CACHE_DIRTY(x, id) -> \
66 mem_##x = cached_##x[id]; \
67 cache_dirty_##x[id] = 0; \
68 :: else -> \
69 skip \
70 fi;
71
72 #define CACHE_READ_FROM_MEM(x, id) \
73 if \
74 :: !IS_CACHE_DIRTY(x, id) -> \
75 cached_##x[id] = mem_##x;\
76 :: else -> \
77 skip \
78 fi;
79
80 /*
81 * May update other caches if cache is dirty, or not.
82 */
83 #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
84 if \
85 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
86 :: 1 -> skip \
87 fi;
88
89 #define RANDOM_CACHE_READ_FROM_MEM(x, id)\
90 if \
91 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
92 :: 1 -> skip \
93 fi;
94
95 /*
96 * Remote barriers tests the scheme where a signal (or IPI) is sent to all
97 * reader threads to promote their compiler barrier to a smp_mb().
98 */
99 #ifdef REMOTE_BARRIERS
100
101 inline smp_rmb_pid(i)
102 {
103 atomic {
104 CACHE_READ_FROM_MEM(urcu_gp_ctr, i);
105 CACHE_READ_FROM_MEM(urcu_active_readers_one, i);
106 CACHE_READ_FROM_MEM(generation_ptr, i);
107 }
108 }
109
110 inline smp_wmb_pid(i)
111 {
112 atomic {
113 CACHE_WRITE_TO_MEM(urcu_gp_ctr, i);
114 CACHE_WRITE_TO_MEM(urcu_active_readers_one, i);
115 CACHE_WRITE_TO_MEM(generation_ptr, i);
116 }
117 }
118
119 inline smp_mb_pid(i)
120 {
121 atomic {
122 #ifndef NO_WMB
123 smp_wmb_pid(i);
124 #endif
125 #ifndef NO_RMB
126 smp_rmb_pid(i);
127 #endif
128 #ifdef NO_WMB
129 #ifdef NO_RMB
130 ooo_mem(i);
131 #endif
132 #endif
133 }
134 }
135
136 /*
137 * Readers do a simple barrier(), writers are doing a smp_mb() _and_ sending a
138 * signal or IPI to have all readers execute a smp_mb.
139 * We are not modeling the whole rendez-vous between readers and writers here,
140 * we just let the writer update each reader's caches remotely.
141 */
142 inline smp_mb(i)
143 {
144 if
145 :: get_pid() >= NR_READERS ->
146 smp_mb_pid(get_pid());
147 i = 0;
148 do
149 :: i < NR_READERS ->
150 smp_mb_pid(i);
151 i++;
152 :: i >= NR_READERS -> break
153 od;
154 smp_mb_pid(get_pid());
155 :: else -> skip;
156 fi;
157 }
158
159 #else
160
161 inline smp_rmb(i)
162 {
163 atomic {
164 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
165 CACHE_READ_FROM_MEM(urcu_active_readers_one, get_pid());
166 CACHE_READ_FROM_MEM(generation_ptr, get_pid());
167 }
168 }
169
170 inline smp_wmb(i)
171 {
172 atomic {
173 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
174 CACHE_WRITE_TO_MEM(urcu_active_readers_one, get_pid());
175 CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
176 }
177 }
178
179 inline smp_mb(i)
180 {
181 atomic {
182 #ifndef NO_WMB
183 smp_wmb(i);
184 #endif
185 #ifndef NO_RMB
186 smp_rmb(i);
187 #endif
188 #ifdef NO_WMB
189 #ifdef NO_RMB
190 ooo_mem(i);
191 #endif
192 #endif
193 }
194 }
195
196 #endif
197
198 /* Keep in sync manually with smp_rmb, wmp_wmb and ooo_mem */
199 DECLARE_CACHED_VAR(byte, urcu_gp_ctr, 1);
200 /* Note ! currently only one reader */
201 DECLARE_CACHED_VAR(byte, urcu_active_readers_one, 0);
202 /* pointer generation */
203 DECLARE_CACHED_VAR(byte, generation_ptr, 0);
204
205 byte last_free_gen = 0;
206 bit free_done = 0;
207 byte read_generation = 1;
208 bit data_access = 0;
209
210 bit write_lock = 0;
211
212 inline ooo_mem(i)
213 {
214 atomic {
215 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
216 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers_one,
217 get_pid());
218 RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
219 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
220 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers_one,
221 get_pid());
222 RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid());
223 }
224 }
225
226 #define get_readerid() (get_pid())
227 #define get_writerid() (get_readerid() + NR_READERS)
228
229 inline wait_for_reader(tmp, id, i)
230 {
231 do
232 :: 1 ->
233 tmp = READ_CACHED_VAR(urcu_active_readers_one);
234 ooo_mem(i);
235 if
236 :: (tmp & RCU_GP_CTR_NEST_MASK)
237 && ((tmp ^ READ_CACHED_VAR(urcu_gp_ctr))
238 & RCU_GP_CTR_BIT) ->
239 #ifndef GEN_ERROR_WRITER_PROGRESS
240 smp_mb(i);
241 #else
242 ooo_mem(i);
243 #endif
244 :: else ->
245 break;
246 fi;
247 od;
248 }
249
250 inline wait_for_quiescent_state(tmp, i, j)
251 {
252 i = 0;
253 do
254 :: i < NR_READERS ->
255 wait_for_reader(tmp, i, j);
256 if
257 :: (NR_READERS > 1) && (i < NR_READERS - 1)
258 -> ooo_mem(j);
259 :: else
260 -> skip;
261 fi;
262 i++
263 :: i >= NR_READERS -> break
264 od;
265 }
266
267 /* Model the RCU read-side critical section. */
268
269 inline urcu_one_read(i, nest_i, tmp, tmp2)
270 {
271 nest_i = 0;
272 do
273 :: nest_i < READER_NEST_LEVEL ->
274 ooo_mem(i);
275 tmp = READ_CACHED_VAR(urcu_active_readers_one);
276 ooo_mem(i);
277 if
278 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
279 ->
280 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
281 ooo_mem(i);
282 WRITE_CACHED_VAR(urcu_active_readers_one, tmp2);
283 :: else ->
284 WRITE_CACHED_VAR(urcu_active_readers_one,
285 tmp + 1);
286 fi;
287 smp_mb(i);
288 nest_i++;
289 :: nest_i >= READER_NEST_LEVEL -> break;
290 od;
291
292 ooo_mem(i);
293 read_generation = READ_CACHED_VAR(generation_ptr);
294 ooo_mem(i);
295 data_access = 1;
296 ooo_mem(i);
297 data_access = 0;
298
299 nest_i = 0;
300 do
301 :: nest_i < READER_NEST_LEVEL ->
302 smp_mb(i);
303 tmp2 = READ_CACHED_VAR(urcu_active_readers_one);
304 ooo_mem(i);
305 WRITE_CACHED_VAR(urcu_active_readers_one, tmp2 - 1);
306 nest_i++;
307 :: nest_i >= READER_NEST_LEVEL -> break;
308 od;
309 ooo_mem(i);
310 //smp_mc(i); /* added */
311 }
312
313 active [NR_READERS] proctype urcu_reader()
314 {
315 byte i, nest_i;
316 byte tmp, tmp2;
317
318 assert(get_pid() < NR_PROCS);
319
320 end_reader:
321 do
322 :: 1 ->
323 /*
324 * We do not test reader's progress here, because we are mainly
325 * interested in writer's progress. The reader never blocks
326 * anyway. We have to test for reader/writer's progress
327 * separately, otherwise we could think the writer is doing
328 * progress when it's blocked by an always progressing reader.
329 */
330 #ifdef READER_PROGRESS
331 progress_reader:
332 #endif
333 urcu_one_read(i, nest_i, tmp, tmp2);
334 od;
335 }
336
337 /* Model the RCU update process. */
338
339 active [NR_WRITERS] proctype urcu_writer()
340 {
341 byte i, j;
342 byte tmp;
343 byte old_gen;
344
345 assert(get_pid() < NR_PROCS);
346
347 do
348 :: (READ_CACHED_VAR(generation_ptr) < 5) ->
349 #ifdef WRITER_PROGRESS
350 progress_writer1:
351 #endif
352 ooo_mem(i);
353 atomic {
354 old_gen = READ_CACHED_VAR(generation_ptr);
355 WRITE_CACHED_VAR(generation_ptr, old_gen + 1);
356 }
357 ooo_mem(i);
358
359 do
360 :: 1 ->
361 atomic {
362 if
363 :: write_lock == 0 ->
364 write_lock = 1;
365 break;
366 :: else ->
367 skip;
368 fi;
369 }
370 od;
371 smp_mb(i);
372 tmp = READ_CACHED_VAR(urcu_gp_ctr);
373 ooo_mem(i);
374 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
375 ooo_mem(i);
376 //smp_mc(i);
377 wait_for_quiescent_state(tmp, i, j);
378 //smp_mc(i);
379 #ifndef SINGLE_FLIP
380 ooo_mem(i);
381 tmp = READ_CACHED_VAR(urcu_gp_ctr);
382 ooo_mem(i);
383 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
384 //smp_mc(i);
385 ooo_mem(i);
386 wait_for_quiescent_state(tmp, i, j);
387 #endif
388 smp_mb(i);
389 write_lock = 0;
390 /* free-up step, e.g., kfree(). */
391 atomic {
392 last_free_gen = old_gen;
393 free_done = 1;
394 }
395 :: else -> break;
396 od;
397 /*
398 * Given the reader loops infinitely, let the writer also busy-loop
399 * with progress here so, with weak fairness, we can test the
400 * writer's progress.
401 */
402 end_writer:
403 do
404 :: 1 ->
405 #ifdef WRITER_PROGRESS
406 progress_writer2:
407 #endif
408 skip;
409 od;
410 }
This page took 0.038726 seconds and 4 git commands to generate.