uatomic/x86: Remove redundant memory barriers
[urcu.git] / src / compat_arch.c
1 // SPDX-FileCopyrightText: 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
2 //
3 // SPDX-License-Identifier: LGPL-2.1-or-later
4
5 /*
6 * Userspace RCU library - architecture compatibility checks
7 */
8
9 #include <urcu/arch.h>
10
11 #ifdef URCU_ARCH_X86_NO_CAS
12
13 #include <stdio.h>
14 #include <pthread.h>
15 #include <signal.h>
16 #include <urcu/assert.h>
17 #include <urcu/uatomic.h>
18
19 /*
20 * Using attribute "weak" for __rcu_cas_avail and
21 * __urcu_x86_compat_mutex. Those are globally visible by the entire
22 * program, even though many shared objects may have their own version.
23 * The first version that gets loaded will be used by the entire
24 * program (executable and all shared objects).
25 */
26
27 /*
28 * It does not really matter if the constructor is called before using
29 * the library, as long as the caller checks if __rcu_cas_avail < 0 and calls
30 * compat_arch_init() explicitly if needed.
31 */
32 int __attribute__((constructor)) __rcu_cas_init(void);
33
34 /*
35 * -1: unknown
36 * 1: available
37 * 0: unavailable
38 */
39 __attribute__((weak))
40 int __rcu_cas_avail = -1;
41
42 __attribute__((weak))
43 pthread_mutex_t __urcu_x86_compat_mutex = PTHREAD_MUTEX_INITIALIZER;
44
45 /*
46 * get_eflags/set_eflags/compare_and_swap_is_available imported from glibc
47 * 2.3.5. linuxthreads/sysdeps/i386/pt-machine.h.
48 */
49
50 static int get_eflags (void)
51 {
52 int res;
53 __asm__ __volatile__ ("pushfl; popl %0" : "=r" (res) : );
54 return res;
55 }
56
57 static void set_eflags (int newflags)
58 {
59 __asm__ __volatile__ ("pushl %0; popfl" : : "r" (newflags) : "cc");
60 }
61
62 static int compare_and_swap_is_available (void)
63 {
64 int oldflags = get_eflags ();
65 int changed;
66 /* Flip AC bit in EFLAGS. */
67 set_eflags (oldflags ^ 0x40000);
68 /* See if bit changed. */
69 changed = (get_eflags () ^ oldflags) & 0x40000;
70 /* Restore EFLAGS. */
71 set_eflags (oldflags);
72 /* If the AC flag did not change, it's a 386 and it lacks cmpxchg.
73 Otherwise, it's a 486 or above and it has cmpxchg. */
74 return changed != 0;
75 }
76
77 static void mutex_lock_signal_save(pthread_mutex_t *mutex, sigset_t *oldmask)
78 {
79 sigset_t newmask;
80 int ret;
81
82 /* Disable signals */
83 ret = sigfillset(&newmask);
84 urcu_posix_assert(!ret);
85 ret = pthread_sigmask(SIG_BLOCK, &newmask, oldmask);
86 urcu_posix_assert(!ret);
87 ret = pthread_mutex_lock(&__urcu_x86_compat_mutex);
88 urcu_posix_assert(!ret);
89 }
90
91 static void mutex_lock_signal_restore(pthread_mutex_t *mutex, sigset_t *oldmask)
92 {
93 int ret;
94
95 ret = pthread_mutex_unlock(&__urcu_x86_compat_mutex);
96 urcu_posix_assert(!ret);
97 ret = pthread_sigmask(SIG_SETMASK, oldmask, NULL);
98 urcu_posix_assert(!ret);
99 }
100
101 unsigned long _compat_uatomic_set(void *addr, unsigned long _new, int len)
102 {
103 sigset_t mask;
104 unsigned long result;
105
106 mutex_lock_signal_save(&__urcu_x86_compat_mutex, &mask);
107 switch (len) {
108 case 1:
109 *(unsigned char *)addr = (unsigned char)_new;
110 result = *(unsigned char *)addr;
111 break;
112 case 2:
113 *(unsigned short *)addr = (unsigned short)_new;
114 result = *(unsigned short *)addr;
115 break;
116 case 4:
117 *(unsigned int *)addr = (unsigned int)_new;
118 result = *(unsigned int *)addr;
119 break;
120 default:
121 /*
122 * generate an illegal instruction. Cannot catch this with
123 * linker tricks when optimizations are disabled.
124 */
125 result = 0;
126 __asm__ __volatile__("ud2");
127 }
128 mutex_lock_signal_restore(&__urcu_x86_compat_mutex, &mask);
129 return result;
130 }
131
132 unsigned long _compat_uatomic_xchg(void *addr, unsigned long _new, int len)
133 {
134 sigset_t mask;
135 unsigned long retval;
136
137 mutex_lock_signal_save(&__urcu_x86_compat_mutex, &mask);
138 switch (len) {
139 case 1:
140 retval = *(unsigned char *)addr;
141 *(unsigned char *)addr = (unsigned char)_new;
142 break;
143 case 2:
144 retval = *(unsigned short *)addr;
145 *(unsigned short *)addr = (unsigned short)_new;
146 break;
147 case 4:
148 retval = *(unsigned int *)addr;
149 *(unsigned int *)addr = (unsigned int)_new;
150 break;
151 default:
152 /*
153 * generate an illegal instruction. Cannot catch this with
154 * linker tricks when optimizations are disabled.
155 */
156 retval = 0; /* silence gcc warnings */
157 __asm__ __volatile__("ud2");
158 }
159 mutex_lock_signal_restore(&__urcu_x86_compat_mutex, &mask);
160 return retval;
161 }
162
163 unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old,
164 unsigned long _new, int len)
165 {
166 unsigned long retval;
167 sigset_t mask;
168
169 mutex_lock_signal_save(&__urcu_x86_compat_mutex, &mask);
170 switch (len) {
171 case 1:
172 {
173 unsigned char result = *(unsigned char *)addr;
174 if (result == (unsigned char)old)
175 *(unsigned char *)addr = (unsigned char)_new;
176 retval = result;
177 break;
178 }
179 case 2:
180 {
181 unsigned short result = *(unsigned short *)addr;
182 if (result == (unsigned short)old)
183 *(unsigned short *)addr = (unsigned short)_new;
184 retval = result;
185 break;
186 }
187 case 4:
188 {
189 unsigned int result = *(unsigned int *)addr;
190 if (result == (unsigned int)old)
191 *(unsigned int *)addr = (unsigned int)_new;
192 retval = result;
193 break;
194 }
195 default:
196 /*
197 * generate an illegal instruction. Cannot catch this with
198 * linker tricks when optimizations are disabled.
199 */
200 retval = 0; /* silence gcc warnings */
201 __asm__ __volatile__("ud2");
202 }
203 mutex_lock_signal_restore(&__urcu_x86_compat_mutex, &mask);
204 return retval;
205 }
206
207 void _compat_uatomic_or(void *addr, unsigned long v, int len)
208 {
209 sigset_t mask;
210
211 mutex_lock_signal_save(&__urcu_x86_compat_mutex, &mask);
212 switch (len) {
213 case 1:
214 *(unsigned char *)addr |= (unsigned char)v;
215 break;
216 case 2:
217 *(unsigned short *)addr |= (unsigned short)v;
218 break;
219 case 4:
220 *(unsigned int *)addr |= (unsigned int)v;
221 break;
222 default:
223 /*
224 * generate an illegal instruction. Cannot catch this with
225 * linker tricks when optimizations are disabled.
226 */
227 __asm__ __volatile__("ud2");
228 }
229 mutex_lock_signal_restore(&__urcu_x86_compat_mutex, &mask);
230 }
231
232 void _compat_uatomic_and(void *addr, unsigned long v, int len)
233 {
234 sigset_t mask;
235
236 mutex_lock_signal_save(&__urcu_x86_compat_mutex, &mask);
237 switch (len) {
238 case 1:
239 *(unsigned char *)addr &= (unsigned char)v;
240 break;
241 case 2:
242 *(unsigned short *)addr &= (unsigned short)v;
243 break;
244 case 4:
245 *(unsigned int *)addr &= (unsigned int)v;
246 break;
247 default:
248 /*
249 * generate an illegal instruction. Cannot catch this with
250 * linker tricks when optimizations are disabled.
251 */
252 __asm__ __volatile__("ud2");
253 }
254 mutex_lock_signal_restore(&__urcu_x86_compat_mutex, &mask);
255 }
256
257 unsigned long _compat_uatomic_add_return(void *addr, unsigned long v, int len)
258 {
259 sigset_t mask;
260 unsigned long result;
261
262 mutex_lock_signal_save(&__urcu_x86_compat_mutex, &mask);
263 switch (len) {
264 case 1:
265 *(unsigned char *)addr += (unsigned char)v;
266 result = *(unsigned char *)addr;
267 break;
268 case 2:
269 *(unsigned short *)addr += (unsigned short)v;
270 result = *(unsigned short *)addr;
271 break;
272 case 4:
273 *(unsigned int *)addr += (unsigned int)v;
274 result = *(unsigned int *)addr;
275 break;
276 default:
277 /*
278 * generate an illegal instruction. Cannot catch this with
279 * linker tricks when optimizations are disabled.
280 */
281 result = 0; /* silence gcc warnings */
282 __asm__ __volatile__("ud2");
283 }
284 mutex_lock_signal_restore(&__urcu_x86_compat_mutex, &mask);
285 return result;
286 }
287
288 int __rcu_cas_init(void)
289 {
290 if (__rcu_cas_avail < 0)
291 __rcu_cas_avail = compare_and_swap_is_available();
292 return __rcu_cas_avail;
293 }
294 #endif
This page took 0.03343 seconds and 4 git commands to generate.