1 #ifndef _URCU_BP_STATIC_H
2 #define _URCU_BP_STATIC_H
7 * Userspace RCU header.
9 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu.h for linking
10 * dynamically with the userspace rcu library.
12 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
13 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
15 * This library is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU Lesser General Public
17 * License as published by the Free Software Foundation; either
18 * version 2.1 of the License, or (at your option) any later version.
20 * This library is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * Lesser General Public License for more details.
25 * You should have received a copy of the GNU Lesser General Public
26 * License along with this library; if not, write to the Free Software
27 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
29 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
36 #include <urcu/compiler.h>
37 #include <urcu/arch.h>
38 #include <urcu/system.h>
39 #include <urcu/uatomic.h>
40 #include <urcu/list.h>
41 #include <urcu/tls-compat.h>
44 * This code section can only be included in LGPL 2.1 compatible source code.
45 * See below for the function call wrappers which can be used in code meant to
46 * be only linked with the Userspace RCU library. This comes with a small
47 * performance degradation on the read-side due to the added function calls.
48 * This is required to permit relinking with newer versions of the library.
56 #define rcu_assert(args...) assert(args)
58 #define rcu_assert(args...)
67 #define YIELD_READ (1 << 0)
68 #define YIELD_WRITE (1 << 1)
71 * Updates without RCU_MB are much slower. Account this in
74 /* maximum sleep delay, in us */
77 extern unsigned int yield_active
;
78 extern DECLARE_URCU_TLS(unsigned int, rand_yield
);
80 static inline void debug_yield_read(void)
82 if (yield_active
& YIELD_READ
)
83 if (rand_r(&URCU_TLS(rand_yield
)) & 0x1)
84 usleep(rand_r(&URCU_TLS(rand_yield
)) % MAX_SLEEP
);
87 static inline void debug_yield_write(void)
89 if (yield_active
& YIELD_WRITE
)
90 if (rand_r(&URCU_TLS(rand_yield
)) & 0x1)
91 usleep(rand_r(&URCU_TLS(rand_yield
)) % MAX_SLEEP
);
94 static inline void debug_yield_init(void)
96 URCU_TLS(rand_yield
) = time(NULL
) ^ pthread_self();
99 static inline void debug_yield_read(void)
103 static inline void debug_yield_write(void)
107 static inline void debug_yield_init(void)
114 * The trick here is that RCU_GP_CTR_PHASE must be a multiple of 8 so we can use a
115 * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits.
117 #define RCU_GP_COUNT (1UL << 0)
118 /* Use the amount of bits equal to half of the architecture long size */
119 #define RCU_GP_CTR_PHASE (1UL << (sizeof(long) << 2))
120 #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_PHASE - 1)
123 * Used internally by _rcu_read_lock.
125 extern void rcu_bp_register(void);
128 * Global quiescent period counter with low-order bits unused.
129 * Using a int rather than a char to eliminate false register dependencies
130 * causing stalls on some architectures.
132 extern long rcu_gp_ctr
;
135 /* Data used by both reader and synchronize_rcu() */
137 /* Data used for registry */
138 struct cds_list_head node
__attribute__((aligned(CAA_CACHE_LINE_SIZE
)));
140 int alloc
; /* registry entry allocated */
144 * Bulletproof version keeps a pointer to a registry not part of the TLS.
145 * Adds a pointer dereference on the read-side, but won't require to unregister
148 extern DECLARE_URCU_TLS(struct rcu_reader
*, rcu_reader
);
150 static inline int rcu_old_gp_ongoing(long *value
)
157 * Make sure both tests below are done on the same version of *value
158 * to insure consistency.
160 v
= CMM_LOAD_SHARED(*value
);
161 return (v
& RCU_GP_CTR_NEST_MASK
) &&
162 ((v
^ rcu_gp_ctr
) & RCU_GP_CTR_PHASE
);
165 static inline void _rcu_read_lock(void)
169 /* Check if registered */
170 if (caa_unlikely(!URCU_TLS(rcu_reader
)))
173 cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
174 tmp
= URCU_TLS(rcu_reader
)->ctr
;
177 * RCU_GP_COUNT | (~RCU_GP_CTR_PHASE or RCU_GP_CTR_PHASE)
179 if (caa_likely(!(tmp
& RCU_GP_CTR_NEST_MASK
))) {
180 _CMM_STORE_SHARED(URCU_TLS(rcu_reader
)->ctr
, _CMM_LOAD_SHARED(rcu_gp_ctr
));
182 * Set active readers count for outermost nesting level before
183 * accessing the pointer.
187 _CMM_STORE_SHARED(URCU_TLS(rcu_reader
)->ctr
, tmp
+ RCU_GP_COUNT
);
191 static inline void _rcu_read_unlock(void)
194 * Finish using rcu before decrementing the pointer.
197 _CMM_STORE_SHARED(URCU_TLS(rcu_reader
)->ctr
, URCU_TLS(rcu_reader
)->ctr
- RCU_GP_COUNT
);
198 cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
205 #endif /* _URCU_BP_STATIC_H */
This page took 0.034215 seconds and 5 git commands to generate.