| 1 | #ifndef _URCU_H |
| 2 | #define _URCU_H |
| 3 | |
| 4 | /* |
| 5 | * urcu.h |
| 6 | * |
| 7 | * Userspace RCU header |
| 8 | * |
| 9 | * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> |
| 10 | * |
| 11 | * Credits for Paul e. McKenney <paulmck@linux.vnet.ibm.com> |
| 12 | * for inspiration coming from the Linux kernel RCU and rcu-preempt. |
| 13 | * |
| 14 | * The barrier, mb, rmb, wmb, atomic_inc, smp_read_barrier_depends, ACCESS_ONCE |
| 15 | * and rcu_dereference primitives come from the Linux kernel. |
| 16 | * |
| 17 | * Distributed under GPLv2 |
| 18 | */ |
| 19 | |
| 20 | #include <stdlib.h> |
| 21 | |
| 22 | /* The "volatile" is due to gcc bugs */ |
| 23 | #define barrier() __asm__ __volatile__("": : :"memory") |
| 24 | |
| 25 | /* x86 32/64 specific */ |
| 26 | #define mb() asm volatile("mfence":::"memory") |
| 27 | #define rmb() asm volatile("lfence":::"memory") |
| 28 | #define wmb() asm volatile("sfence" ::: "memory") |
| 29 | |
| 30 | static inline void atomic_inc(int *v) |
| 31 | { |
| 32 | asm volatile("lock; incl %0" |
| 33 | : "+m" (*v)); |
| 34 | } |
| 35 | |
| 36 | /* Nop everywhere except on alpha. */ |
| 37 | #define smp_read_barrier_depends() |
| 38 | |
| 39 | /* |
| 40 | * Prevent the compiler from merging or refetching accesses. The compiler |
| 41 | * is also forbidden from reordering successive instances of ACCESS_ONCE(), |
| 42 | * but only when the compiler is aware of some particular ordering. One way |
| 43 | * to make the compiler aware of ordering is to put the two invocations of |
| 44 | * ACCESS_ONCE() in different C statements. |
| 45 | * |
| 46 | * This macro does absolutely -nothing- to prevent the CPU from reordering, |
| 47 | * merging, or refetching absolutely anything at any time. Its main intended |
| 48 | * use is to mediate communication between process-level code and irq/NMI |
| 49 | * handlers, all running on the same CPU. |
| 50 | */ |
| 51 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) |
| 52 | |
| 53 | /** |
| 54 | * rcu_dereference - fetch an RCU-protected pointer in an |
| 55 | * RCU read-side critical section. This pointer may later |
| 56 | * be safely dereferenced. |
| 57 | * |
| 58 | * Inserts memory barriers on architectures that require them |
| 59 | * (currently only the Alpha), and, more importantly, documents |
| 60 | * exactly which pointers are protected by RCU. |
| 61 | */ |
| 62 | |
| 63 | #define rcu_dereference(p) ({ \ |
| 64 | typeof(p) _________p1 = ACCESS_ONCE(p); \ |
| 65 | smp_read_barrier_depends(); \ |
| 66 | (_________p1); \ |
| 67 | }) |
| 68 | |
| 69 | #define SIGURCU SIGUSR1 |
| 70 | |
| 71 | #ifdef DEBUG_YIELD |
| 72 | #include <sched.h> |
| 73 | |
| 74 | #define YIELD_READ (1 << 0) |
| 75 | #define YIELD_WRITE (1 << 1) |
| 76 | |
| 77 | extern unsigned int yield_active; |
| 78 | extern unsigned int __thread rand_yield; |
| 79 | |
| 80 | static inline void debug_yield_read(void) |
| 81 | { |
| 82 | if (yield_active & YIELD_READ) |
| 83 | if (rand_r(&rand_yield) & 0x1) |
| 84 | sched_yield(); |
| 85 | } |
| 86 | |
| 87 | static inline void debug_yield_write(void) |
| 88 | { |
| 89 | if (yield_active & YIELD_WRITE) |
| 90 | if (rand_r(&rand_yield) & 0x1) |
| 91 | sched_yield(); |
| 92 | } |
| 93 | |
| 94 | static inline void debug_yield_init(void) |
| 95 | { |
| 96 | rand_yield = time(NULL) ^ pthread_self(); |
| 97 | } |
| 98 | #else |
| 99 | static inline void debug_yield_read(void) |
| 100 | { |
| 101 | } |
| 102 | |
| 103 | static inline void debug_yield_write(void) |
| 104 | { |
| 105 | } |
| 106 | |
| 107 | static inline void debug_yield_init(void) |
| 108 | { |
| 109 | |
| 110 | } |
| 111 | #endif |
| 112 | |
| 113 | /* |
| 114 | * Limiting the nesting level to 256 to keep instructions small in the read |
| 115 | * fast-path. |
| 116 | */ |
| 117 | #define RCU_GP_COUNT (1U << 0) |
| 118 | #define RCU_GP_CTR_BIT (1U << 8) |
| 119 | #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1) |
| 120 | |
| 121 | /* Global quiescent period counter with low-order bits unused. */ |
| 122 | extern int urcu_gp_ctr; |
| 123 | |
| 124 | extern int __thread urcu_active_readers; |
| 125 | |
| 126 | static inline int rcu_old_gp_ongoing(int *value) |
| 127 | { |
| 128 | int v; |
| 129 | |
| 130 | if (value == NULL) |
| 131 | return 0; |
| 132 | debug_yield_write(); |
| 133 | v = ACCESS_ONCE(*value); |
| 134 | debug_yield_write(); |
| 135 | return (v & RCU_GP_CTR_NEST_MASK) && |
| 136 | ((v ^ ACCESS_ONCE(urcu_gp_ctr)) & RCU_GP_CTR_BIT); |
| 137 | } |
| 138 | |
| 139 | static inline void rcu_read_lock(void) |
| 140 | { |
| 141 | int tmp; |
| 142 | |
| 143 | debug_yield_read(); |
| 144 | tmp = urcu_active_readers; |
| 145 | debug_yield_read(); |
| 146 | if (!(tmp & RCU_GP_CTR_NEST_MASK)) |
| 147 | urcu_active_readers = urcu_gp_ctr + RCU_GP_COUNT; |
| 148 | else |
| 149 | urcu_active_readers = tmp + RCU_GP_COUNT; |
| 150 | debug_yield_read(); |
| 151 | /* |
| 152 | * Increment active readers count before accessing the pointer. |
| 153 | * See force_mb_all_threads(). |
| 154 | */ |
| 155 | barrier(); |
| 156 | debug_yield_read(); |
| 157 | } |
| 158 | |
| 159 | static inline void rcu_read_unlock(void) |
| 160 | { |
| 161 | debug_yield_read(); |
| 162 | barrier(); |
| 163 | debug_yield_read(); |
| 164 | /* |
| 165 | * Finish using rcu before decrementing the pointer. |
| 166 | * See force_mb_all_threads(). |
| 167 | */ |
| 168 | urcu_active_readers -= RCU_GP_COUNT; |
| 169 | debug_yield_read(); |
| 170 | } |
| 171 | |
| 172 | extern void *urcu_publish_content(void **ptr, void *new); |
| 173 | |
| 174 | /* |
| 175 | * Reader thread registration. |
| 176 | */ |
| 177 | extern void urcu_register_thread(void); |
| 178 | extern void urcu_unregister_thread(void); |
| 179 | |
| 180 | #endif /* _URCU_H */ |