Add ooomem and urcu checks
[urcu.git] / formal-model / ooomem / mem.spin
1 /*
2 * mem.spin: Promela code to validate memory barriers with OOO memory.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (c) 2009 Mathieu Desnoyers
19 */
20
21 /* Promela validation variables. */
22
23 #define NR_READERS 1
24 #define NR_WRITERS 1
25
26 #define NR_PROCS 2
27
28 #define get_pid() (_pid)
29
30 /*
31 * Each process have its own data in cache. Caches are randomly updated.
32 * smp_wmb and smp_rmb forces cache updates (write and read), wmb_mb forces
33 * both.
34 */
35
36 #define DECLARE_CACHED_VAR(type, x, v) \
37 type mem_##x = v; \
38 type cached_##x[NR_PROCS] = v; \
39 bit cache_dirty_##x[NR_PROCS] = 0
40
41 #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x[id])
42
43 #define READ_CACHED_VAR(x) (cached_##x[get_pid()])
44
45 #define WRITE_CACHED_VAR(x, v) \
46 atomic { \
47 cached_##x[get_pid()] = v; \
48 cache_dirty_##x[get_pid()] = 1; \
49 }
50
51 #define CACHE_WRITE_TO_MEM(x, id) \
52 if \
53 :: IS_CACHE_DIRTY(x, id) -> \
54 mem_##x = cached_##x[id]; \
55 cache_dirty_##x[id] = 0; \
56 :: else -> \
57 skip \
58 fi;
59
60 #define CACHE_READ_FROM_MEM(x, id) \
61 if \
62 :: !IS_CACHE_DIRTY(x, id) -> \
63 cached_##x[id] = mem_##x;\
64 :: else -> \
65 skip \
66 fi;
67
68 /*
69 * May update other caches if cache is dirty, or not.
70 */
71 #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
72 if \
73 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
74 :: 1 -> skip \
75 fi;
76
77 #define RANDOM_CACHE_READ_FROM_MEM(x, id)\
78 if \
79 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
80 :: 1 -> skip \
81 fi;
82
83 inline smp_rmb()
84 {
85 atomic {
86 CACHE_READ_FROM_MEM(alpha, get_pid());
87 CACHE_READ_FROM_MEM(beta, get_pid());
88 }
89 }
90
91 inline smp_wmb()
92 {
93 atomic {
94 CACHE_WRITE_TO_MEM(alpha, get_pid());
95 CACHE_WRITE_TO_MEM(beta, get_pid());
96 }
97 }
98
99 inline smp_mb()
100 {
101 atomic {
102 smp_wmb();
103 smp_rmb();
104 }
105 }
106
107 /* Keep in sync manually with smp_rmb, wmp_wmb and ooo_mem */
108 DECLARE_CACHED_VAR(byte, alpha, 0);
109 DECLARE_CACHED_VAR(byte, beta, 0);
110
111 inline ooo_mem()
112 {
113 atomic {
114 RANDOM_CACHE_WRITE_TO_MEM(alpha, get_pid());
115 RANDOM_CACHE_WRITE_TO_MEM(beta, get_pid());
116 RANDOM_CACHE_READ_FROM_MEM(alpha, get_pid());
117 RANDOM_CACHE_READ_FROM_MEM(beta, get_pid());
118 }
119 }
120
121 #define get_readerid() (get_pid())
122
123 byte first_read[NR_READERS];
124 byte second_read[NR_READERS];
125
126 active [NR_READERS] proctype test_reader()
127 {
128 assert(get_pid() < NR_PROCS);
129
130 ooo_mem();
131 first_read[get_readerid()] = READ_CACHED_VAR(beta);
132 ooo_mem();
133 #ifndef NO_RMB
134 smp_rmb();
135 ooo_mem();
136 #endif
137 second_read[get_readerid()] = READ_CACHED_VAR(alpha);
138 ooo_mem();
139 // test : [] (first_read == 6 -> <> second_read == 4)
140 assert(first_read[get_readerid()] != 6
141 || second_read[get_readerid()] == 4);
142 }
143
144 #define get_writerid() (get_readerid() + NR_READERS)
145
146 active [NR_WRITERS] proctype test_writer()
147 {
148 byte i;
149
150 assert(get_pid() < NR_PROCS);
151
152 ooo_mem();
153 WRITE_CACHED_VAR(alpha, 4);
154 ooo_mem();
155 #ifndef NO_WMB
156 smp_wmb();
157 ooo_mem();
158 #endif
159 WRITE_CACHED_VAR(beta, 6);
160 ooo_mem();
161 }
This page took 0.034138 seconds and 4 git commands to generate.