X-Git-Url: http://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=include%2Furcu%2Fuatomic%2Fgeneric.h;h=ed655bb8def13a5120990d049eb12199ee47376a;hp=c3762b0780b1f80c93c566012d8b904bd1984dbc;hb=HEAD;hpb=6fa8b4f80f1d2efbc90fc6d2a5fb0dc1d7fd2a19 diff --git a/include/urcu/uatomic/generic.h b/include/urcu/uatomic/generic.h index c3762b0..ed655bb 100644 --- a/include/urcu/uatomic/generic.h +++ b/include/urcu/uatomic/generic.h @@ -1,22 +1,15 @@ +// SPDX-FileCopyrightText: 1991-1994 by Xerox Corporation. All rights reserved. +// SPDX-FileCopyrightText: 1996-1999 by Silicon Graphics. All rights reserved. +// SPDX-FileCopyrightText: 1999-2004 Hewlett-Packard Development Company, L.P. +// SPDX-FileCopyrightText: 2009 Mathieu Desnoyers +// SPDX-FileCopyrightText: 2010 Paolo Bonzini +// +// SPDX-License-Identifier: LicenseRef-Boehm-GC + #ifndef _URCU_UATOMIC_GENERIC_H #define _URCU_UATOMIC_GENERIC_H /* - * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. - * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. - * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P. - * Copyright (c) 2009 Mathieu Desnoyers - * Copyright (c) 2010 Paolo Bonzini - * - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED - * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. - * - * Permission is hereby granted to use or copy this program - * for any purpose, provided the above notices are retained on all copies. - * Permission to modify the code and to distribute modified code is granted, - * provided the above notices are retained, and a notice that the code was - * modified is included with the above copyright notice. - * * Code inspired from libuatomic_ops-1.2, inherited in part from the * Boehm-Demers-Weiser conservative garbage collector. */ @@ -33,24 +26,125 @@ extern "C" { #define uatomic_set(addr, v) ((void) CMM_STORE_SHARED(*(addr), (v))) #endif +/* + * Can be defined for the architecture. + * + * What needs to be emitted _before_ the `operation' with memory ordering `mo'. + */ +#ifndef _cmm_compat_c11_smp_mb__before_mo +# define _cmm_compat_c11_smp_mb__before_mo(operation, mo) cmm_smp_mb() +#endif + +/* + * Can be defined for the architecture. + * + * What needs to be emitted _after_ the `operation' with memory ordering `mo'. + */ +#ifndef _cmm_compat_c11_smp_mb__after_mo +# define _cmm_compat_c11_smp_mb__after_mo(operation, mo) cmm_smp_mb() +#endif + +#define uatomic_load_store_return_op(op, addr, v, mo) \ + __extension__ \ + ({ \ + _cmm_compat_c11_smp_mb__before_mo(op, mo); \ + __typeof__((*addr)) _value = op(addr, v); \ + _cmm_compat_c11_smp_mb__after_mo(op, mo); \ + \ + _value; \ + }) + +#define uatomic_load_store_op(op, addr, v, mo) \ + do { \ + _cmm_compat_c11_smp_mb__before_mo(op, mo); \ + op(addr, v); \ + _cmm_compat_c11_smp_mb__after_mo(op, mo); \ + } while (0) + +#define uatomic_store(addr, v, mo) \ + do { \ + _cmm_compat_c11_smp_mb__before_mo(uatomic_set, mo); \ + uatomic_set(addr, v); \ + _cmm_compat_c11_smp_mb__after_mo(uatomic_set, mo); \ + } while (0) + +#define uatomic_and_mo(addr, v, mo) \ + uatomic_load_store_op(uatomic_and, addr, v, mo) + +#define uatomic_or_mo(addr, v, mo) \ + uatomic_load_store_op(uatomic_or, addr, v, mo) + +#define uatomic_add_mo(addr, v, mo) \ + uatomic_load_store_op(uatomic_add, addr, v, mo) + +#define uatomic_sub_mo(addr, v, mo) \ + uatomic_load_store_op(uatomic_sub, addr, v, mo) + +#define uatomic_inc_mo(addr, mo) \ + uatomic_load_store_op(uatomic_add, addr, 1, mo) + +#define uatomic_dec_mo(addr, mo) \ + uatomic_load_store_op(uatomic_add, addr, -1, mo) +/* + * NOTE: We can not just do switch (_value == (old) ? mos : mof) otherwise the + * compiler emit a -Wduplicated-cond warning. + */ +#define uatomic_cmpxchg_mo(addr, old, new, mos, mof) \ + __extension__ \ + ({ \ + _cmm_compat_c11_smp_mb__before_mo(uatomic_cmpxchg, mos); \ + __typeof__(*(addr)) _value = uatomic_cmpxchg(addr, old, \ + new); \ + \ + if (_value == (old)) { \ + _cmm_compat_c11_smp_mb__after_mo(uatomic_cmpxchg, mos); \ + } else { \ + _cmm_compat_c11_smp_mb__after_mo(uatomic_cmpxchg, mof); \ + } \ + _value; \ + }) + +#define uatomic_xchg_mo(addr, v, mo) \ + uatomic_load_store_return_op(uatomic_xchg, addr, v, mo) + +#define uatomic_add_return_mo(addr, v, mo) \ + uatomic_load_store_return_op(uatomic_add_return, addr, v) + +#define uatomic_sub_return_mo(addr, v, mo) \ + uatomic_load_store_return_op(uatomic_sub_return, addr, v) + #ifndef uatomic_read #define uatomic_read(addr) CMM_LOAD_SHARED(*(addr)) #endif +#define uatomic_load(addr, mo) \ + __extension__ \ + ({ \ + _cmm_compat_c11_smp_mb__before_mo(uatomic_read, mo); \ + __typeof__(*(addr)) _rcu_value = uatomic_read(addr); \ + _cmm_compat_c11_smp_mb__after_mo(uatomic_read, mo); \ + \ + _rcu_value; \ + }) + #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR -static inline __attribute__((always_inline, __noreturn__)) +#ifdef ILLEGAL_INSTR +static inline __attribute__((always_inline)) void _uatomic_link_error(void) { -#ifdef ILLEGAL_INSTR /* * generate an illegal instruction. Cannot catch this with * linker tricks when optimizations are disabled. */ __asm__ __volatile__(ILLEGAL_INSTR); +} #else +static inline __attribute__((always_inline, __noreturn__)) +void _uatomic_link_error(void) +{ __builtin_trap(); -#endif } +#endif #else /* #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR */ extern void _uatomic_link_error(void);