From 6cf3827cf5809fbcee555fb08286ad756be42dad Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Thu, 18 Nov 2010 21:44:59 -0500 Subject: [PATCH] LOAD_SHARED and STORE_SHARED should have CMM_ prefix Signed-off-by: Mathieu Desnoyers --- urcu-bp-static.h | 8 ++++---- urcu-bp.c | 4 ++-- urcu-defer.c | 32 ++++++++++++++++---------------- urcu-pointer-static.h | 4 ++-- urcu-qsbr-static.h | 8 ++++---- urcu-qsbr.c | 12 ++++++------ urcu-static.h | 10 +++++----- urcu.c | 12 ++++++------ urcu/compiler.h | 6 +++--- urcu/system.h | 12 ++++++------ urcu/uatomic_arch_x86.h | 2 +- urcu/uatomic_generic.h | 4 ++-- urcu/wfqueue-static.h | 6 +++--- urcu/wfstack-static.h | 6 +++--- 14 files changed, 63 insertions(+), 63 deletions(-) diff --git a/urcu-bp-static.h b/urcu-bp-static.h index 2049ee6..14c6cfe 100644 --- a/urcu-bp-static.h +++ b/urcu-bp-static.h @@ -162,7 +162,7 @@ static inline int rcu_old_gp_ongoing(long *value) * Make sure both tests below are done on the same version of *value * to insure consistency. */ - v = CAA_LOAD_SHARED(*value); + v = CMM_LOAD_SHARED(*value); return (v & RCU_GP_CTR_NEST_MASK) && ((v ^ rcu_gp_ctr) & RCU_GP_CTR_PHASE); } @@ -182,14 +182,14 @@ static inline void _rcu_read_lock(void) * RCU_GP_COUNT | (~RCU_GP_CTR_PHASE or RCU_GP_CTR_PHASE) */ if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) { - _CAA_STORE_SHARED(rcu_reader->ctr, _CAA_LOAD_SHARED(rcu_gp_ctr)); + _CMM_STORE_SHARED(rcu_reader->ctr, _CMM_LOAD_SHARED(rcu_gp_ctr)); /* * Set active readers count for outermost nesting level before * accessing the pointer. */ cmm_smp_mb(); } else { - _CAA_STORE_SHARED(rcu_reader->ctr, tmp + RCU_GP_COUNT); + _CMM_STORE_SHARED(rcu_reader->ctr, tmp + RCU_GP_COUNT); } } @@ -199,7 +199,7 @@ static inline void _rcu_read_unlock(void) * Finish using rcu before decrementing the pointer. */ cmm_smp_mb(); - _CAA_STORE_SHARED(rcu_reader->ctr, rcu_reader->ctr - RCU_GP_COUNT); + _CMM_STORE_SHARED(rcu_reader->ctr, rcu_reader->ctr - RCU_GP_COUNT); cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */ } diff --git a/urcu-bp.c b/urcu-bp.c index 136f97d..62d2704 100644 --- a/urcu-bp.c +++ b/urcu-bp.c @@ -123,13 +123,13 @@ void update_counter_and_wait(void) struct rcu_reader *index, *tmp; /* Switch parity: 0 -> 1, 1 -> 0 */ - CAA_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE); + CMM_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE); /* * Must commit qparity update to memory before waiting for other parity * quiescent state. Failure to do so could result in the writer waiting * forever while new readers are always accessing data (no progress). - * Ensured by CAA_STORE_SHARED and CAA_LOAD_SHARED. + * Ensured by CMM_STORE_SHARED and CMM_LOAD_SHARED. */ /* diff --git a/urcu-defer.c b/urcu-defer.c index 796e7e1..28bb18c 100644 --- a/urcu-defer.c +++ b/urcu-defer.c @@ -110,7 +110,7 @@ static unsigned long rcu_defer_num_callbacks(void) mutex_lock(&rcu_defer_mutex); cds_list_for_each_entry(index, ®istry, list) { - head = CAA_LOAD_SHARED(index->head); + head = CMM_LOAD_SHARED(index->head); num_items += head - index->tail; } mutex_unlock(&rcu_defer_mutex); @@ -153,21 +153,21 @@ static void rcu_defer_barrier_queue(struct defer_queue *queue, for (i = queue->tail; i != head;) { cmm_smp_rmb(); /* read head before q[]. */ - p = CAA_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); + p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); if (unlikely(DQ_IS_FCT_BIT(p))) { DQ_CLEAR_FCT_BIT(p); queue->last_fct_out = p; - p = CAA_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); + p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); } else if (unlikely(p == DQ_FCT_MARK)) { - p = CAA_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); + p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); queue->last_fct_out = p; - p = CAA_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); + p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); } fct = queue->last_fct_out; fct(p); } cmm_smp_mb(); /* push tail after having used q[] */ - CAA_STORE_SHARED(queue->tail, i); + CMM_STORE_SHARED(queue->tail, i); } static void _rcu_defer_barrier_thread(void) @@ -212,7 +212,7 @@ void rcu_defer_barrier(void) mutex_lock(&rcu_defer_mutex); cds_list_for_each_entry(index, ®istry, list) { - index->last_head = CAA_LOAD_SHARED(index->head); + index->last_head = CMM_LOAD_SHARED(index->head); num_items += index->last_head - index->tail; } if (likely(!num_items)) { @@ -241,7 +241,7 @@ void _defer_rcu(void (*fct)(void *p), void *p) * thread. */ head = defer_queue.head; - tail = CAA_LOAD_SHARED(defer_queue.tail); + tail = CMM_LOAD_SHARED(defer_queue.tail); /* * If queue is full, or reached threshold. Empty queue ourself. @@ -250,7 +250,7 @@ void _defer_rcu(void (*fct)(void *p), void *p) if (unlikely(head - tail >= DEFER_QUEUE_SIZE - 2)) { assert(head - tail <= DEFER_QUEUE_SIZE); rcu_defer_barrier_thread(); - assert(head - CAA_LOAD_SHARED(defer_queue.tail) == 0); + assert(head - CMM_LOAD_SHARED(defer_queue.tail) == 0); } if (unlikely(defer_queue.last_fct_in != fct)) { @@ -261,13 +261,13 @@ void _defer_rcu(void (*fct)(void *p), void *p) * marker, write DQ_FCT_MARK followed by the function * pointer. */ - _CAA_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], + _CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], DQ_FCT_MARK); - _CAA_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], + _CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], fct); } else { DQ_SET_FCT_BIT(fct); - _CAA_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], + _CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], fct); } } else { @@ -276,16 +276,16 @@ void _defer_rcu(void (*fct)(void *p), void *p) * If the data to encode is not aligned or the marker, * write DQ_FCT_MARK followed by the function pointer. */ - _CAA_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], + _CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], DQ_FCT_MARK); - _CAA_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], + _CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], fct); } } - _CAA_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], p); + _CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], p); cmm_smp_wmb(); /* Publish new pointer before head */ /* Write q[] before head. */ - CAA_STORE_SHARED(defer_queue.head, head); + CMM_STORE_SHARED(defer_queue.head, head); cmm_smp_mb(); /* Write queue head before read futex */ /* * Wake-up any waiting defer thread. diff --git a/urcu-pointer-static.h b/urcu-pointer-static.h index 5a1e0e4..b644486 100644 --- a/urcu-pointer-static.h +++ b/urcu-pointer-static.h @@ -49,7 +49,7 @@ extern "C" { * Inserts memory barriers on architectures that require them (currently only * Alpha) and documents which pointers are protected by RCU. * - * The compiler memory barrier in CAA_LOAD_SHARED() ensures that value-speculative + * The compiler memory barrier in CMM_LOAD_SHARED() ensures that value-speculative * optimizations (e.g. VSS: Value Speculation Scheduling) does not perform the * data read before the pointer read by speculating the value of the pointer. * Correct ordering is ensured because the pointer is read as a volatile access. @@ -62,7 +62,7 @@ extern "C" { */ #define _rcu_dereference(p) ({ \ - typeof(p) _________p1 = CAA_LOAD_SHARED(p); \ + typeof(p) _________p1 = CMM_LOAD_SHARED(p); \ cmm_smp_read_barrier_depends(); \ (_________p1); \ }) diff --git a/urcu-qsbr-static.h b/urcu-qsbr-static.h index da4a782..e0b12be 100644 --- a/urcu-qsbr-static.h +++ b/urcu-qsbr-static.h @@ -159,7 +159,7 @@ static inline int rcu_gp_ongoing(unsigned long *ctr) { unsigned long v; - v = CAA_LOAD_SHARED(*ctr); + v = CMM_LOAD_SHARED(*ctr); return v && (v != rcu_gp_ctr); } @@ -175,7 +175,7 @@ static inline void _rcu_read_unlock(void) static inline void _rcu_quiescent_state(void) { cmm_smp_mb(); - _CAA_STORE_SHARED(rcu_reader.ctr, _CAA_LOAD_SHARED(rcu_gp_ctr)); + _CMM_STORE_SHARED(rcu_reader.ctr, _CMM_LOAD_SHARED(rcu_gp_ctr)); cmm_smp_mb(); /* write rcu_reader.ctr before read futex */ wake_up_gp(); cmm_smp_mb(); @@ -184,7 +184,7 @@ static inline void _rcu_quiescent_state(void) static inline void _rcu_thread_offline(void) { cmm_smp_mb(); - CAA_STORE_SHARED(rcu_reader.ctr, 0); + CMM_STORE_SHARED(rcu_reader.ctr, 0); cmm_smp_mb(); /* write rcu_reader.ctr before read futex */ wake_up_gp(); cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */ @@ -193,7 +193,7 @@ static inline void _rcu_thread_offline(void) static inline void _rcu_thread_online(void) { cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */ - _CAA_STORE_SHARED(rcu_reader.ctr, CAA_LOAD_SHARED(rcu_gp_ctr)); + _CMM_STORE_SHARED(rcu_reader.ctr, CMM_LOAD_SHARED(rcu_gp_ctr)); cmm_smp_mb(); } diff --git a/urcu-qsbr.c b/urcu-qsbr.c index 607e6ae..69effd5 100644 --- a/urcu-qsbr.c +++ b/urcu-qsbr.c @@ -114,10 +114,10 @@ static void update_counter_and_wait(void) #if (CAA_BITS_PER_LONG < 64) /* Switch parity: 0 -> 1, 1 -> 0 */ - CAA_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR); + CMM_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR); #else /* !(CAA_BITS_PER_LONG < 64) */ /* Increment current G.P. */ - CAA_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR); + CMM_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR); #endif /* !(CAA_BITS_PER_LONG < 64) */ /* @@ -198,7 +198,7 @@ void synchronize_rcu(void) * threads registered as readers. */ if (was_online) - CAA_STORE_SHARED(rcu_reader.ctr, 0); + CMM_STORE_SHARED(rcu_reader.ctr, 0); mutex_lock(&rcu_gp_lock); @@ -238,7 +238,7 @@ out: * freed. */ if (was_online) - _CAA_STORE_SHARED(rcu_reader.ctr, CAA_LOAD_SHARED(rcu_gp_ctr)); + _CMM_STORE_SHARED(rcu_reader.ctr, CMM_LOAD_SHARED(rcu_gp_ctr)); cmm_smp_mb(); } #else /* !(CAA_BITS_PER_LONG < 64) */ @@ -255,7 +255,7 @@ void synchronize_rcu(void) */ cmm_smp_mb(); if (was_online) - CAA_STORE_SHARED(rcu_reader.ctr, 0); + CMM_STORE_SHARED(rcu_reader.ctr, 0); mutex_lock(&rcu_gp_lock); if (cds_list_empty(®istry)) @@ -265,7 +265,7 @@ out: mutex_unlock(&rcu_gp_lock); if (was_online) - _CAA_STORE_SHARED(rcu_reader.ctr, CAA_LOAD_SHARED(rcu_gp_ctr)); + _CMM_STORE_SHARED(rcu_reader.ctr, CMM_LOAD_SHARED(rcu_gp_ctr)); cmm_smp_mb(); } #endif /* !(CAA_BITS_PER_LONG < 64) */ diff --git a/urcu-static.h b/urcu-static.h index 58dfabb..18e4826 100644 --- a/urcu-static.h +++ b/urcu-static.h @@ -250,7 +250,7 @@ static inline int rcu_gp_ongoing(unsigned long *ctr) * Make sure both tests below are done on the same version of *value * to insure consistency. */ - v = CAA_LOAD_SHARED(*ctr); + v = CMM_LOAD_SHARED(*ctr); return (v & RCU_GP_CTR_NEST_MASK) && ((v ^ rcu_gp_ctr) & RCU_GP_CTR_PHASE); } @@ -266,14 +266,14 @@ static inline void _rcu_read_lock(void) * RCU_GP_COUNT | (~RCU_GP_CTR_PHASE or RCU_GP_CTR_PHASE) */ if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) { - _CAA_STORE_SHARED(rcu_reader.ctr, _CAA_LOAD_SHARED(rcu_gp_ctr)); + _CMM_STORE_SHARED(rcu_reader.ctr, _CMM_LOAD_SHARED(rcu_gp_ctr)); /* * Set active readers count for outermost nesting level before * accessing the pointer. See smp_mb_master(). */ smp_mb_slave(RCU_MB_GROUP); } else { - _CAA_STORE_SHARED(rcu_reader.ctr, tmp + RCU_GP_COUNT); + _CMM_STORE_SHARED(rcu_reader.ctr, tmp + RCU_GP_COUNT); } } @@ -288,12 +288,12 @@ static inline void _rcu_read_unlock(void) */ if (likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) { smp_mb_slave(RCU_MB_GROUP); - _CAA_STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT); + _CMM_STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT); /* write rcu_reader.ctr before read futex */ smp_mb_slave(RCU_MB_GROUP); wake_up_gp(); } else { - _CAA_STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT); + _CMM_STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT); } cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */ } diff --git a/urcu.c b/urcu.c index 5a9c2f0..e529ac0 100644 --- a/urcu.c +++ b/urcu.c @@ -99,9 +99,9 @@ static void mutex_lock(pthread_mutex_t *mutex) perror("Error in pthread mutex lock"); exit(-1); } - if (CAA_LOAD_SHARED(rcu_reader.need_mb)) { + if (CMM_LOAD_SHARED(rcu_reader.need_mb)) { cmm_smp_mb(); - _CAA_STORE_SHARED(rcu_reader.need_mb, 0); + _CMM_STORE_SHARED(rcu_reader.need_mb, 0); cmm_smp_mb(); } poll(NULL,0,10); @@ -155,7 +155,7 @@ static void force_mb_all_readers(void) * cache flush is enforced. */ cds_list_for_each_entry(index, ®istry, node) { - CAA_STORE_SHARED(index->need_mb, 1); + CMM_STORE_SHARED(index->need_mb, 1); pthread_kill(index->tid, SIGRCU); } /* @@ -172,7 +172,7 @@ static void force_mb_all_readers(void) * the Linux Test Project (LTP). */ cds_list_for_each_entry(index, ®istry, node) { - while (CAA_LOAD_SHARED(index->need_mb)) { + while (CMM_LOAD_SHARED(index->need_mb)) { pthread_kill(index->tid, SIGRCU); poll(NULL, 0, 1); } @@ -205,7 +205,7 @@ void update_counter_and_wait(void) struct rcu_reader *index, *tmp; /* Switch parity: 0 -> 1, 1 -> 0 */ - CAA_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE); + CMM_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE); /* * Must commit rcu_gp_ctr update to memory before waiting for quiescent @@ -384,7 +384,7 @@ static void sigrcu_handler(int signo, siginfo_t *siginfo, void *context) * executed on. */ cmm_smp_mb(); - _CAA_STORE_SHARED(rcu_reader.need_mb, 0); + _CMM_STORE_SHARED(rcu_reader.need_mb, 0); cmm_smp_mb(); } diff --git a/urcu/compiler.h b/urcu/compiler.h index ca32deb..64d12d3 100644 --- a/urcu/compiler.h +++ b/urcu/compiler.h @@ -28,16 +28,16 @@ /* * Instruct the compiler to perform only a single access to a variable * (prohibits merging and refetching). The compiler is also forbidden to reorder - * successive instances of CAA_ACCESS_ONCE(), but only when the compiler is aware of + * successive instances of CMM_ACCESS_ONCE(), but only when the compiler is aware of * particular ordering. Compiler ordering can be ensured, for example, by - * putting two CAA_ACCESS_ONCE() in separate C statements. + * putting two CMM_ACCESS_ONCE() in separate C statements. * * This macro does absolutely -nothing- to prevent the CPU from reordering, * merging, or refetching absolutely anything at any time. Its main intended * use is to mediate communication between process-level code and irq/NMI * handlers, all running on the same CPU. */ -#define CAA_ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) +#define CMM_ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) #ifndef max #define max(a,b) ((a)>(b)?(a):(b)) diff --git a/urcu/system.h b/urcu/system.h index e018606..86ee6b1 100644 --- a/urcu/system.h +++ b/urcu/system.h @@ -24,29 +24,29 @@ /* * Identify a shared load. A cmm_smp_rmc() or cmm_smp_mc() should come before the load. */ -#define _CAA_LOAD_SHARED(p) CAA_ACCESS_ONCE(p) +#define _CMM_LOAD_SHARED(p) CAA_ACCESS_ONCE(p) /* * Load a data from shared memory, doing a cache flush if required. */ -#define CAA_LOAD_SHARED(p) \ +#define CMM_LOAD_SHARED(p) \ ({ \ cmm_smp_rmc(); \ - _CAA_LOAD_SHARED(p); \ + _CMM_LOAD_SHARED(p); \ }) /* * Identify a shared store. A cmm_smp_wmc() or cmm_smp_mc() should follow the store. */ -#define _CAA_STORE_SHARED(x, v) ({ CAA_ACCESS_ONCE(x) = (v); }) +#define _CMM_STORE_SHARED(x, v) ({ CAA_ACCESS_ONCE(x) = (v); }) /* * Store v into x, where x is located in shared memory. Performs the required * cache flush after writing. Returns v. */ -#define CAA_STORE_SHARED(x, v) \ +#define CMM_STORE_SHARED(x, v) \ ({ \ - typeof(x) _v = _CAA_STORE_SHARED(x, v); \ + typeof(x) _v = _CMM_STORE_SHARED(x, v); \ cmm_smp_wmc(); \ _v; \ }) diff --git a/urcu/uatomic_arch_x86.h b/urcu/uatomic_arch_x86.h index ceb7a17..aed513b 100644 --- a/urcu/uatomic_arch_x86.h +++ b/urcu/uatomic_arch_x86.h @@ -39,7 +39,7 @@ struct __uatomic_dummy { }; #define __hp(x) ((struct __uatomic_dummy *)(x)) -#define _uatomic_set(addr, v) CAA_STORE_SHARED(*(addr), (v)) +#define _uatomic_set(addr, v) CMM_STORE_SHARED(*(addr), (v)) /* cmpxchg */ diff --git a/urcu/uatomic_generic.h b/urcu/uatomic_generic.h index ced6191..347e73f 100644 --- a/urcu/uatomic_generic.h +++ b/urcu/uatomic_generic.h @@ -29,11 +29,11 @@ extern "C" { #endif #ifndef uatomic_set -#define uatomic_set(addr, v) CAA_STORE_SHARED(*(addr), (v)) +#define uatomic_set(addr, v) CMM_STORE_SHARED(*(addr), (v)) #endif #ifndef uatomic_read -#define uatomic_read(addr) CAA_LOAD_SHARED(*(addr)) +#define uatomic_read(addr) CMM_LOAD_SHARED(*(addr)) #endif #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR diff --git a/urcu/wfqueue-static.h b/urcu/wfqueue-static.h index 4839c47..30d6e96 100644 --- a/urcu/wfqueue-static.h +++ b/urcu/wfqueue-static.h @@ -79,7 +79,7 @@ void _cds_wfq_enqueue(struct cds_wfq_queue *q, struct cds_wfq_node *node) * that the queue is being appended to. The following store will append * "node" to the queue from a dequeuer perspective. */ - CAA_STORE_SHARED(*old_tail, node); + CMM_STORE_SHARED(*old_tail, node); } /* @@ -99,14 +99,14 @@ ___cds_wfq_dequeue_blocking(struct cds_wfq_queue *q) /* * Queue is empty if it only contains the dummy node. */ - if (q->head == &q->dummy && CAA_LOAD_SHARED(q->tail) == &q->dummy.next) + if (q->head == &q->dummy && CMM_LOAD_SHARED(q->tail) == &q->dummy.next) return NULL; node = q->head; /* * Adaptative busy-looping waiting for enqueuer to complete enqueue. */ - while ((next = CAA_LOAD_SHARED(node->next)) == NULL) { + while ((next = CMM_LOAD_SHARED(node->next)) == NULL) { if (++attempt >= WFQ_ADAPT_ATTEMPTS) { poll(NULL, 0, WFQ_WAIT); /* Wait for 10ms */ attempt = 0; diff --git a/urcu/wfstack-static.h b/urcu/wfstack-static.h index cecdde1..eed83da 100644 --- a/urcu/wfstack-static.h +++ b/urcu/wfstack-static.h @@ -67,7 +67,7 @@ void _cds_wfs_push(struct cds_wfs_stack *s, struct cds_wfs_node *node) * At this point, dequeuers see a NULL node->next, they should busy-wait * until node->next is set to old_head. */ - CAA_STORE_SHARED(node->next, old_head); + CMM_STORE_SHARED(node->next, old_head); } /* @@ -80,13 +80,13 @@ ___cds_wfs_pop_blocking(struct cds_wfs_stack *s) int attempt = 0; retry: - head = CAA_LOAD_SHARED(s->head); + head = CMM_LOAD_SHARED(s->head); if (head == CDS_WF_STACK_END) return NULL; /* * Adaptative busy-looping waiting for push to complete. */ - while ((next = CAA_LOAD_SHARED(head->next)) == NULL) { + while ((next = CMM_LOAD_SHARED(head->next)) == NULL) { if (++attempt >= CDS_WFS_ADAPT_ATTEMPTS) { poll(NULL, 0, CDS_WFS_WAIT); /* Wait for 10ms */ attempt = 0; -- 2.34.1