uatomic: add uatomic_and
[urcu.git] / compat_arch_x86.c
1 /*
2 * compat_arch_x86.c
3 *
4 * Userspace RCU library - x86 compatibility checks
5 *
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <stdio.h>
24 #include <pthread.h>
25 #include <signal.h>
26 #include <assert.h>
27 #include <urcu/uatomic_arch.h>
28
29 /*
30 * It does not really matter if the constructor is called before using
31 * the library, as long as the caller checks if __rcu_cas_avail < 0 and calls
32 * compat_arch_init() explicitely if needed.
33 */
34 int __attribute__((constructor)) __rcu_cas_init(void);
35
36 /*
37 * -1: unknown
38 * 1: available
39 * 0: unavailable
40 */
41 int __rcu_cas_avail = -1;
42
43 static pthread_mutex_t compat_mutex = PTHREAD_MUTEX_INITIALIZER;
44
45 /*
46 * get_eflags/set_eflags/compare_and_swap_is_available imported from glibc
47 * 2.3.5. linuxthreads/sysdeps/i386/pt-machine.h.
48 */
49
50 static int get_eflags (void)
51 {
52 int res;
53 __asm__ __volatile__ ("pushfl; popl %0" : "=r" (res) : );
54 return res;
55 }
56
57 static void set_eflags (int newflags)
58 {
59 __asm__ __volatile__ ("pushl %0; popfl" : : "r" (newflags) : "cc");
60 }
61
62 static int compare_and_swap_is_available (void)
63 {
64 int oldflags = get_eflags ();
65 int changed;
66 /* Flip AC bit in EFLAGS. */
67 set_eflags (oldflags ^ 0x40000);
68 /* See if bit changed. */
69 changed = (get_eflags () ^ oldflags) & 0x40000;
70 /* Restore EFLAGS. */
71 set_eflags (oldflags);
72 /* If the AC flag did not change, it's a 386 and it lacks cmpxchg.
73 Otherwise, it's a 486 or above and it has cmpxchg. */
74 return changed != 0;
75 }
76
77 static void mutex_lock_signal_save(pthread_mutex_t *mutex, sigset_t *oldmask)
78 {
79 sigset_t newmask;
80 int ret;
81
82 /* Disable signals */
83 ret = sigemptyset(&newmask);
84 assert(!ret);
85 ret = pthread_sigmask(SIG_SETMASK, &newmask, oldmask);
86 assert(!ret);
87 ret = pthread_mutex_lock(&compat_mutex);
88 assert(!ret);
89 }
90
91 static void mutex_lock_signal_restore(pthread_mutex_t *mutex, sigset_t *oldmask)
92 {
93 int ret;
94
95 ret = pthread_mutex_unlock(&compat_mutex);
96 assert(!ret);
97 ret = pthread_sigmask(SIG_SETMASK, oldmask, NULL);
98 assert(!ret);
99 }
100
101 unsigned long _compat_uatomic_set(void *addr, unsigned long _new, int len)
102 {
103 sigset_t mask;
104 unsigned long result;
105
106 mutex_lock_signal_save(&compat_mutex, &mask);
107 switch (len) {
108 case 1:
109 *(unsigned char *)addr = (unsigned char)_new;
110 result = *(unsigned char *)addr;
111 break;
112 case 2:
113 *(unsigned short *)addr = (unsigned short)_new;
114 result = *(unsigned short *)addr;
115 break;
116 case 4:
117 *(unsigned int *)addr = (unsigned int)_new;
118 result = *(unsigned int *)addr;
119 break;
120 default:
121 /*
122 * generate an illegal instruction. Cannot catch this with
123 * linker tricks when optimizations are disabled.
124 */
125 __asm__ __volatile__("ud2");
126 }
127 mutex_lock_signal_restore(&compat_mutex, &mask);
128 return _new;
129 }
130
131 unsigned long _compat_uatomic_xchg(void *addr, unsigned long _new, int len)
132 {
133 sigset_t mask;
134 unsigned long retval;
135
136 mutex_lock_signal_save(&compat_mutex, &mask);
137 switch (len) {
138 case 1:
139 retval = *(unsigned char *)addr;
140 *(unsigned char *)addr = (unsigned char)_new;
141 break;
142 case 2:
143 retval = *(unsigned short *)addr;
144 *(unsigned short *)addr = (unsigned short)_new;
145 break;
146 case 4:
147 retval = *(unsigned int *)addr;
148 *(unsigned int *)addr = (unsigned int)_new;
149 break;
150 default:
151 /*
152 * generate an illegal instruction. Cannot catch this with
153 * linker tricks when optimizations are disabled.
154 */
155 __asm__ __volatile__("ud2");
156 }
157 mutex_lock_signal_restore(&compat_mutex, &mask);
158 return retval;
159 }
160
161 unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old,
162 unsigned long _new, int len)
163 {
164 unsigned long retval;
165 sigset_t mask;
166
167 mutex_lock_signal_save(&compat_mutex, &mask);
168 switch (len) {
169 case 1:
170 {
171 unsigned char result = *(unsigned char *)addr;
172 if (result == (unsigned char)old)
173 *(unsigned char *)addr = (unsigned char)_new;
174 retval = result;
175 break;
176 }
177 case 2:
178 {
179 unsigned short result = *(unsigned short *)addr;
180 if (result == (unsigned short)old)
181 *(unsigned short *)addr = (unsigned short)_new;
182 retval = result;
183 break;
184 }
185 case 4:
186 {
187 unsigned int result = *(unsigned int *)addr;
188 if (result == (unsigned int)old)
189 *(unsigned int *)addr = (unsigned int)_new;
190 retval = result;
191 break;
192 }
193 default:
194 /*
195 * generate an illegal instruction. Cannot catch this with
196 * linker tricks when optimizations are disabled.
197 */
198 __asm__ __volatile__("ud2");
199 }
200 mutex_lock_signal_restore(&compat_mutex, &mask);
201 return retval;
202 }
203
204 void _compat_uatomic_or(void *addr, unsigned long v, int len)
205 {
206 sigset_t mask;
207
208 mutex_lock_signal_save(&compat_mutex, &mask);
209 switch (len) {
210 case 1:
211 *(unsigned char *)addr |= (unsigned char)v;
212 break;
213 case 2:
214 *(unsigned short *)addr |= (unsigned short)v;
215 break;
216 case 4:
217 *(unsigned int *)addr |= (unsigned int)v;
218 break;
219 default:
220 /*
221 * generate an illegal instruction. Cannot catch this with
222 * linker tricks when optimizations are disabled.
223 */
224 __asm__ __volatile__("ud2");
225 }
226 mutex_lock_signal_restore(&compat_mutex, &mask);
227 }
228
229 void _compat_uatomic_and(void *addr, unsigned long v, int len)
230 {
231 sigset_t mask;
232
233 mutex_lock_signal_save(&compat_mutex, &mask);
234 switch (len) {
235 case 1:
236 *(unsigned char *)addr &= (unsigned char)v;
237 break;
238 case 2:
239 *(unsigned short *)addr &= (unsigned short)v;
240 break;
241 case 4:
242 *(unsigned int *)addr &= (unsigned int)v;
243 break;
244 default:
245 /*
246 * generate an illegal instruction. Cannot catch this with
247 * linker tricks when optimizations are disabled.
248 */
249 __asm__ __volatile__("ud2");
250 }
251 mutex_lock_signal_restore(&compat_mutex, &mask);
252 }
253
254 unsigned long _compat_uatomic_add_return(void *addr, unsigned long v, int len)
255 {
256 sigset_t mask;
257 unsigned long result;
258
259 mutex_lock_signal_save(&compat_mutex, &mask);
260 switch (len) {
261 case 1:
262 *(unsigned char *)addr += (unsigned char)v;
263 result = *(unsigned char *)addr;
264 break;
265 case 2:
266 *(unsigned short *)addr += (unsigned short)v;
267 result = *(unsigned short *)addr;
268 break;
269 case 4:
270 *(unsigned int *)addr += (unsigned int)v;
271 result = *(unsigned int *)addr;
272 break;
273 default:
274 /*
275 * generate an illegal instruction. Cannot catch this with
276 * linker tricks when optimizations are disabled.
277 */
278 __asm__ __volatile__("ud2");
279 }
280 mutex_lock_signal_restore(&compat_mutex, &mask);
281 return result;
282 }
283
284 int __rcu_cas_init(void)
285 {
286 if (__rcu_cas_avail < 0)
287 __rcu_cas_avail = compare_and_swap_is_available();
288 return __rcu_cas_avail;
289 }
This page took 0.035035 seconds and 5 git commands to generate.