Commit | Line | Data |
---|---|---|
ec4e58a3 MD |
1 | #ifndef _URCU_ARCH_UATOMIC_X86_H |
2 | #define _URCU_ARCH_UATOMIC_X86_H | |
0114ba7f MD |
3 | |
4 | /* | |
5 | * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. | |
6 | * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. | |
7 | * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P. | |
8 | * Copyright (c) 2009 Mathieu Desnoyers | |
9 | * | |
10 | * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED | |
11 | * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. | |
12 | * | |
13 | * Permission is hereby granted to use or copy this program | |
14 | * for any purpose, provided the above notices are retained on all copies. | |
15 | * Permission to modify the code and to distribute modified code is granted, | |
16 | * provided the above notices are retained, and a notice that the code was | |
17 | * modified is included with the above copyright notice. | |
18 | * | |
ec4e58a3 | 19 | * Code inspired from libuatomic_ops-1.2, inherited in part from the |
0114ba7f MD |
20 | * Boehm-Demers-Weiser conservative garbage collector. |
21 | */ | |
22 | ||
ec4e58a3 | 23 | #include <urcu/compiler.h> |
bf9de1b7 | 24 | #include <urcu/system.h> |
0fad128b | 25 | |
36bc70a8 MD |
26 | #ifdef __cplusplus |
27 | extern "C" { | |
28 | #endif | |
29 | ||
618c4ae1 MD |
30 | #ifndef __SIZEOF_LONG__ |
31 | #if defined(__x86_64__) || defined(__amd64__) | |
32 | #define __SIZEOF_LONG__ 8 | |
33 | #else | |
34 | #define __SIZEOF_LONG__ 4 | |
35 | #endif | |
36 | #endif | |
37 | ||
0114ba7f MD |
38 | #ifndef BITS_PER_LONG |
39 | #define BITS_PER_LONG (__SIZEOF_LONG__ * 8) | |
40 | #endif | |
41 | ||
0114ba7f | 42 | /* |
0114ba7f MD |
43 | * Derived from AO_compare_and_swap() and AO_test_and_set_full(). |
44 | */ | |
45 | ||
ec4e58a3 | 46 | struct __uatomic_dummy { |
cc1be41b MD |
47 | unsigned long v[10]; |
48 | }; | |
ec4e58a3 | 49 | #define __hp(x) ((struct __uatomic_dummy *)(x)) |
cc1be41b | 50 | |
bf9de1b7 MD |
51 | #define _uatomic_set(addr, v) STORE_SHARED(*(addr), (v)) |
52 | #define _uatomic_read(addr) LOAD_SHARED(*(addr)) | |
0fad128b | 53 | |
cc1be41b MD |
54 | /* cmpxchg */ |
55 | ||
5dba80f9 | 56 | static inline __attribute__((always_inline)) |
bf9de1b7 | 57 | unsigned long __uatomic_cmpxchg(void *addr, unsigned long old, |
0fad128b | 58 | unsigned long _new, int len) |
0114ba7f | 59 | { |
cc1be41b MD |
60 | switch (len) { |
61 | case 1: | |
62 | { | |
63 | unsigned char result = old; | |
0fad128b | 64 | |
cc1be41b MD |
65 | __asm__ __volatile__( |
66 | "lock; cmpxchgb %2, %1" | |
67 | : "+a"(result), "+m"(*__hp(addr)) | |
68 | : "q"((unsigned char)_new) | |
0114ba7f | 69 | : "memory"); |
cc1be41b MD |
70 | return result; |
71 | } | |
72 | case 2: | |
73 | { | |
74 | unsigned short result = old; | |
0fad128b | 75 | |
cc1be41b MD |
76 | __asm__ __volatile__( |
77 | "lock; cmpxchgw %2, %1" | |
78 | : "+a"(result), "+m"(*__hp(addr)) | |
79 | : "r"((unsigned short)_new) | |
80 | : "memory"); | |
81 | return result; | |
82 | } | |
83 | case 4: | |
84 | { | |
85 | unsigned int result = old; | |
0fad128b | 86 | |
cc1be41b MD |
87 | __asm__ __volatile__( |
88 | "lock; cmpxchgl %2, %1" | |
89 | : "+a"(result), "+m"(*__hp(addr)) | |
90 | : "r"((unsigned int)_new) | |
91 | : "memory"); | |
92 | return result; | |
93 | } | |
94 | #if (BITS_PER_LONG == 64) | |
95 | case 8: | |
96 | { | |
6edb297e | 97 | unsigned long result = old; |
0fad128b | 98 | |
cc1be41b | 99 | __asm__ __volatile__( |
2c5e5fb3 | 100 | "lock; cmpxchgq %2, %1" |
cc1be41b MD |
101 | : "+a"(result), "+m"(*__hp(addr)) |
102 | : "r"((unsigned long)_new) | |
103 | : "memory"); | |
104 | return result; | |
105 | } | |
106 | #endif | |
107 | } | |
108 | /* generate an illegal instruction. Cannot catch this with linker tricks | |
109 | * when optimizations are disabled. */ | |
110 | __asm__ __volatile__("ud2"); | |
111 | return 0; | |
0114ba7f MD |
112 | } |
113 | ||
bf9de1b7 MD |
114 | #define _uatomic_cmpxchg(addr, old, _new) \ |
115 | ((__typeof__(*(addr))) __uatomic_cmpxchg((addr), (unsigned long)(old),\ | |
116 | (unsigned long)(_new), \ | |
cc1be41b MD |
117 | sizeof(*(addr)))) |
118 | ||
119 | /* xchg */ | |
0114ba7f | 120 | |
5dba80f9 | 121 | static inline __attribute__((always_inline)) |
bf9de1b7 | 122 | unsigned long __uatomic_exchange(void *addr, unsigned long val, int len) |
0114ba7f | 123 | { |
cc1be41b MD |
124 | /* Note: the "xchg" instruction does not need a "lock" prefix. */ |
125 | switch (len) { | |
126 | case 1: | |
127 | { | |
128 | unsigned char result; | |
129 | __asm__ __volatile__( | |
130 | "xchgb %0, %1" | |
131 | : "=q"(result), "+m"(*__hp(addr)) | |
132 | : "0" ((unsigned char)val) | |
133 | : "memory"); | |
134 | return result; | |
135 | } | |
136 | case 2: | |
137 | { | |
138 | unsigned short result; | |
139 | __asm__ __volatile__( | |
140 | "xchgw %0, %1" | |
141 | : "=r"(result), "+m"(*__hp(addr)) | |
142 | : "0" ((unsigned short)val) | |
143 | : "memory"); | |
144 | return result; | |
145 | } | |
146 | case 4: | |
147 | { | |
148 | unsigned int result; | |
149 | __asm__ __volatile__( | |
150 | "xchgl %0, %1" | |
151 | : "=r"(result), "+m"(*__hp(addr)) | |
152 | : "0" ((unsigned int)val) | |
153 | : "memory"); | |
154 | return result; | |
155 | } | |
156 | #if (BITS_PER_LONG == 64) | |
157 | case 8: | |
158 | { | |
159 | unsigned long result; | |
160 | __asm__ __volatile__( | |
0114ba7f | 161 | "xchgq %0, %1" |
cc1be41b MD |
162 | : "=r"(result), "+m"(*__hp(addr)) |
163 | : "0" ((unsigned long)val) | |
0114ba7f | 164 | : "memory"); |
cc1be41b MD |
165 | return result; |
166 | } | |
167 | #endif | |
168 | } | |
169 | /* generate an illegal instruction. Cannot catch this with linker tricks | |
170 | * when optimizations are disabled. */ | |
171 | __asm__ __volatile__("ud2"); | |
172 | return 0; | |
0114ba7f MD |
173 | } |
174 | ||
bf9de1b7 MD |
175 | #define _uatomic_xchg(addr, v) \ |
176 | ((__typeof__(*(addr))) __uatomic_exchange((addr), (unsigned long)(v), \ | |
cc1be41b MD |
177 | sizeof(*(addr)))) |
178 | ||
ec4e58a3 | 179 | /* uatomic_add_return, uatomic_sub_return */ |
0fad128b MD |
180 | |
181 | static inline __attribute__((always_inline)) | |
bf9de1b7 | 182 | unsigned long __uatomic_add_return(void *addr, unsigned long val, |
0fad128b MD |
183 | int len) |
184 | { | |
185 | switch (len) { | |
186 | case 1: | |
187 | { | |
188 | unsigned char result = val; | |
189 | ||
190 | __asm__ __volatile__( | |
191 | "lock; xaddb %1, %0" | |
192 | : "+m"(*__hp(addr)), "+q" (result) | |
193 | : | |
194 | : "memory"); | |
195 | return result + (unsigned char)val; | |
196 | } | |
197 | case 2: | |
198 | { | |
199 | unsigned short result = val; | |
200 | ||
201 | __asm__ __volatile__( | |
202 | "lock; xaddw %1, %0" | |
203 | : "+m"(*__hp(addr)), "+r" (result) | |
204 | : | |
205 | : "memory"); | |
206 | return result + (unsigned short)val; | |
207 | } | |
208 | case 4: | |
209 | { | |
210 | unsigned int result = val; | |
211 | ||
212 | __asm__ __volatile__( | |
213 | "lock; xaddl %1, %0" | |
214 | : "+m"(*__hp(addr)), "+r" (result) | |
215 | : | |
216 | : "memory"); | |
217 | return result + (unsigned int)val; | |
218 | } | |
219 | #if (BITS_PER_LONG == 64) | |
220 | case 8: | |
221 | { | |
222 | unsigned long result = val; | |
223 | ||
224 | __asm__ __volatile__( | |
225 | "lock; xaddq %1, %0" | |
226 | : "+m"(*__hp(addr)), "+r" (result) | |
227 | : | |
228 | : "memory"); | |
229 | return result + (unsigned long)val; | |
230 | } | |
231 | #endif | |
232 | } | |
233 | /* generate an illegal instruction. Cannot catch this with linker tricks | |
234 | * when optimizations are disabled. */ | |
235 | __asm__ __volatile__("ud2"); | |
236 | return 0; | |
237 | } | |
238 | ||
bf9de1b7 MD |
239 | #define _uatomic_add_return(addr, v) \ |
240 | ((__typeof__(*(addr))) __uatomic_add_return((addr), \ | |
0fad128b MD |
241 | (unsigned long)(v), \ |
242 | sizeof(*(addr)))) | |
243 | ||
bf9de1b7 | 244 | #define _uatomic_sub_return(addr, v) _uatomic_add_return((addr), -(v)) |
0fad128b | 245 | |
ec4e58a3 | 246 | /* uatomic_add, uatomic_sub */ |
0114ba7f | 247 | |
5dba80f9 | 248 | static inline __attribute__((always_inline)) |
bf9de1b7 | 249 | void __uatomic_add(void *addr, unsigned long val, int len) |
0114ba7f MD |
250 | { |
251 | switch (len) { | |
cc1be41b MD |
252 | case 1: |
253 | { | |
254 | __asm__ __volatile__( | |
255 | "lock; addb %1, %0" | |
256 | : "=m"(*__hp(addr)) | |
87322fe8 MD |
257 | : "iq" ((unsigned char)val) |
258 | : "memory"); | |
cc1be41b MD |
259 | return; |
260 | } | |
261 | case 2: | |
262 | { | |
263 | __asm__ __volatile__( | |
264 | "lock; addw %1, %0" | |
265 | : "=m"(*__hp(addr)) | |
87322fe8 MD |
266 | : "ir" ((unsigned short)val) |
267 | : "memory"); | |
cc1be41b MD |
268 | return; |
269 | } | |
270 | case 4: | |
271 | { | |
272 | __asm__ __volatile__( | |
273 | "lock; addl %1, %0" | |
274 | : "=m"(*__hp(addr)) | |
87322fe8 MD |
275 | : "ir" ((unsigned int)val) |
276 | : "memory"); | |
cc1be41b MD |
277 | return; |
278 | } | |
0114ba7f | 279 | #if (BITS_PER_LONG == 64) |
cc1be41b MD |
280 | case 8: |
281 | { | |
282 | __asm__ __volatile__( | |
283 | "lock; addq %1, %0" | |
284 | : "=m"(*__hp(addr)) | |
87322fe8 MD |
285 | : "er" ((unsigned long)val) |
286 | : "memory"); | |
cc1be41b MD |
287 | return; |
288 | } | |
0114ba7f MD |
289 | #endif |
290 | } | |
291 | /* generate an illegal instruction. Cannot catch this with linker tricks | |
292 | * when optimizations are disabled. */ | |
293 | __asm__ __volatile__("ud2"); | |
a81b8e5e | 294 | return; |
0114ba7f MD |
295 | } |
296 | ||
bf9de1b7 MD |
297 | #define _uatomic_add(addr, v) \ |
298 | (__uatomic_add((addr), (unsigned long)(v), sizeof(*(addr)))) | |
0114ba7f | 299 | |
bf9de1b7 | 300 | #define _uatomic_sub(addr, v) _uatomic_add((addr), -(v)) |
2c5e5fb3 MD |
301 | |
302 | ||
ec4e58a3 | 303 | /* uatomic_inc */ |
2c5e5fb3 MD |
304 | |
305 | static inline __attribute__((always_inline)) | |
bf9de1b7 | 306 | void __uatomic_inc(void *addr, int len) |
2c5e5fb3 MD |
307 | { |
308 | switch (len) { | |
309 | case 1: | |
310 | { | |
311 | __asm__ __volatile__( | |
312 | "lock; incb %0" | |
313 | : "=m"(*__hp(addr)) | |
314 | : | |
315 | : "memory"); | |
316 | return; | |
317 | } | |
318 | case 2: | |
319 | { | |
320 | __asm__ __volatile__( | |
321 | "lock; incw %0" | |
322 | : "=m"(*__hp(addr)) | |
323 | : | |
324 | : "memory"); | |
325 | return; | |
326 | } | |
327 | case 4: | |
328 | { | |
329 | __asm__ __volatile__( | |
330 | "lock; incl %0" | |
331 | : "=m"(*__hp(addr)) | |
332 | : | |
333 | : "memory"); | |
334 | return; | |
335 | } | |
336 | #if (BITS_PER_LONG == 64) | |
337 | case 8: | |
338 | { | |
339 | __asm__ __volatile__( | |
340 | "lock; incq %0" | |
341 | : "=m"(*__hp(addr)) | |
342 | : | |
343 | : "memory"); | |
344 | return; | |
345 | } | |
346 | #endif | |
347 | } | |
348 | /* generate an illegal instruction. Cannot catch this with linker tricks | |
349 | * when optimizations are disabled. */ | |
350 | __asm__ __volatile__("ud2"); | |
351 | return; | |
352 | } | |
353 | ||
bf9de1b7 | 354 | #define _uatomic_inc(addr) (__uatomic_inc((addr), sizeof(*(addr)))) |
2c5e5fb3 | 355 | |
ec4e58a3 | 356 | /* uatomic_dec */ |
2c5e5fb3 MD |
357 | |
358 | static inline __attribute__((always_inline)) | |
bf9de1b7 | 359 | void __uatomic_dec(void *addr, int len) |
2c5e5fb3 MD |
360 | { |
361 | switch (len) { | |
362 | case 1: | |
363 | { | |
364 | __asm__ __volatile__( | |
365 | "lock; decb %0" | |
366 | : "=m"(*__hp(addr)) | |
367 | : | |
368 | : "memory"); | |
369 | return; | |
370 | } | |
371 | case 2: | |
372 | { | |
373 | __asm__ __volatile__( | |
374 | "lock; decw %0" | |
375 | : "=m"(*__hp(addr)) | |
376 | : | |
377 | : "memory"); | |
378 | return; | |
379 | } | |
380 | case 4: | |
381 | { | |
382 | __asm__ __volatile__( | |
383 | "lock; decl %0" | |
384 | : "=m"(*__hp(addr)) | |
385 | : | |
386 | : "memory"); | |
387 | return; | |
388 | } | |
389 | #if (BITS_PER_LONG == 64) | |
390 | case 8: | |
391 | { | |
392 | __asm__ __volatile__( | |
393 | "lock; decq %0" | |
394 | : "=m"(*__hp(addr)) | |
395 | : | |
396 | : "memory"); | |
397 | return; | |
398 | } | |
399 | #endif | |
400 | } | |
401 | /* generate an illegal instruction. Cannot catch this with linker tricks | |
402 | * when optimizations are disabled. */ | |
403 | __asm__ __volatile__("ud2"); | |
404 | return; | |
405 | } | |
406 | ||
bf9de1b7 | 407 | #define _uatomic_dec(addr) (__uatomic_dec((addr), sizeof(*(addr)))) |
0114ba7f | 408 | |
bf9de1b7 | 409 | #if ((BITS_PER_LONG != 64) && defined(CONFIG_URCU_COMPAT_ARCH)) |
7d413817 | 410 | extern int __urcu_cas_avail; |
e1562850 | 411 | extern int __urcu_cas_init(void); |
bf9de1b7 MD |
412 | |
413 | #define UATOMIC_COMPAT(insn) \ | |
414 | ((likely(__urcu_cas_avail > 0)) \ | |
415 | ? (_uatomic_##insn) \ | |
416 | : ((unlikely(__urcu_cas_avail < 0) \ | |
417 | ? ((__urcu_cas_init() > 0) \ | |
418 | ? (_uatomic_##insn) \ | |
419 | : (compat_uatomic_##insn)) \ | |
420 | : (compat_uatomic_##insn)))) | |
421 | ||
422 | extern unsigned long _compat_uatomic_set(void *addr, | |
423 | unsigned long _new, int len); | |
424 | #define compat_uatomic_set(addr, _new) \ | |
425 | ((__typeof__(*(addr))) _compat_uatomic_set((addr), \ | |
426 | (unsigned long)(_new), \ | |
427 | sizeof(*(addr)))) | |
428 | ||
429 | ||
430 | extern unsigned long _compat_uatomic_xchg(void *addr, | |
431 | unsigned long _new, int len); | |
432 | #define compat_uatomic_xchg(addr, _new) \ | |
433 | ((__typeof__(*(addr))) _compat_uatomic_xchg((addr), \ | |
434 | (unsigned long)(_new), \ | |
435 | sizeof(*(addr)))) | |
7d413817 MD |
436 | |
437 | extern unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old, | |
bf9de1b7 MD |
438 | unsigned long _new, int len); |
439 | #define compat_uatomic_cmpxchg(addr, old, _new) \ | |
440 | ((__typeof__(*(addr))) _compat_uatomic_cmpxchg((addr), \ | |
441 | (unsigned long)(old), \ | |
442 | (unsigned long)(_new), \ | |
443 | sizeof(*(addr)))) | |
7d413817 | 444 | |
bf9de1b7 MD |
445 | extern unsigned long _compat_uatomic_xchg(void *addr, |
446 | unsigned long _new, int len); | |
447 | #define compat_uatomic_add_return(addr, v) \ | |
448 | ((__typeof__(*(addr))) _compat_uatomic_add_return((addr), \ | |
449 | (unsigned long)(v), \ | |
7d413817 | 450 | sizeof(*(addr)))) |
bf9de1b7 MD |
451 | |
452 | #define compat_uatomic_sub_return(addr, v) \ | |
453 | compat_uatomic_add_return((addr), -(v)) | |
454 | #define compat_uatomic_add(addr, v) \ | |
455 | ((void)compat_uatomic_add_return((addr), (v))) | |
456 | #define compat_uatomic_sub(addr, v) \ | |
457 | ((void)compat_uatomic_sub_return((addr), (v))) | |
458 | #define compat_uatomic_inc(addr) \ | |
459 | (compat_uatomic_add((addr), 1)) | |
460 | #define compat_uatomic_dec(addr) \ | |
461 | (compat_uatomic_sub((addr), 1)) | |
462 | ||
463 | #else | |
464 | #define UATOMIC_COMPAT(insn) (_uatomic_##insn) | |
7d413817 MD |
465 | #endif |
466 | ||
bf9de1b7 MD |
467 | /* Read is atomic even in compat mode */ |
468 | #define uatomic_read(addr) _uatomic_read(addr) | |
469 | ||
470 | #define uatomic_set(addr, v) \ | |
471 | UATOMIC_COMPAT(set(addr, v)) | |
472 | #define uatomic_cmpxchg(addr, old, _new) \ | |
473 | UATOMIC_COMPAT(cmpxchg(addr, old, _new)) | |
474 | #define uatomic_xchg(addr, v) \ | |
475 | UATOMIC_COMPAT(xchg(addr, v)) | |
476 | #define uatomic_add_return(addr, v) \ | |
477 | UATOMIC_COMPAT(add_return(addr, v)) | |
478 | #define uatomic_sub_return(addr, v) \ | |
479 | UATOMIC_COMPAT(sub_return(addr, v)) | |
480 | #define uatomic_add(addr, v) UATOMIC_COMPAT(add(addr, v)) | |
481 | #define uatomic_sub(addr, v) UATOMIC_COMPAT(sub(addr, v)) | |
482 | #define uatomic_inc(addr) UATOMIC_COMPAT(inc(addr)) | |
483 | #define uatomic_dec(addr) UATOMIC_COMPAT(dec(addr)) | |
484 | ||
36bc70a8 MD |
485 | #ifdef __cplusplus |
486 | } | |
487 | #endif | |
488 | ||
ec4e58a3 | 489 | #endif /* _URCU_ARCH_UATOMIC_X86_H */ |