projects
/
urcu.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
urcu: move busy-wait code and name it ___cds_wfq_node_sync_next()
[urcu.git]
/
urcu-defer-impl.h
diff --git
a/urcu-defer-impl.h
b/urcu-defer-impl.h
index 4d1ca5e4e63085f4dbe24f7284361e9879e46e4b..a7d0b2f7c45aa0a1054d70aac989293b63c87466 100644
(file)
--- a/
urcu-defer-impl.h
+++ b/
urcu-defer-impl.h
@@
-48,6
+48,8
@@
#include <urcu/uatomic.h>
#include <urcu/list.h>
#include <urcu/system.h>
#include <urcu/uatomic.h>
#include <urcu/list.h>
#include <urcu/system.h>
+#include <urcu/tls-compat.h>
+#include "urcu-die.h"
/*
* Number of entries in the per-thread defer queue. Must be power of 2.
/*
* Number of entries in the per-thread defer queue. Must be power of 2.
@@
-130,7
+132,7
@@
static int32_t defer_thread_stop;
* Written to only by each individual deferer. Read by both the deferer and
* the reclamation tread.
*/
* Written to only by each individual deferer. Read by both the deferer and
* the reclamation tread.
*/
-static
struct defer_queue __thread defer_queue
;
+static
DEFINE_URCU_TLS(struct defer_queue, defer_queue)
;
static CDS_LIST_HEAD(registry_defer);
static pthread_t tid_defer;
static CDS_LIST_HEAD(registry_defer);
static pthread_t tid_defer;
@@
-140,17
+142,12
@@
static void mutex_lock_defer(pthread_mutex_t *mutex)
#ifndef DISTRUST_SIGNALS_EXTREME
ret = pthread_mutex_lock(mutex);
#ifndef DISTRUST_SIGNALS_EXTREME
ret = pthread_mutex_lock(mutex);
- if (ret) {
- perror("Error in pthread mutex lock");
- exit(-1);
- }
+ if (ret)
+ urcu_die(ret);
#else /* #ifndef DISTRUST_SIGNALS_EXTREME */
while ((ret = pthread_mutex_trylock(mutex)) != 0) {
#else /* #ifndef DISTRUST_SIGNALS_EXTREME */
while ((ret = pthread_mutex_trylock(mutex)) != 0) {
- if (ret != EBUSY && ret != EINTR) {
- printf("ret = %d, errno = %d\n", ret, errno);
- perror("Error in pthread mutex lock");
- exit(-1);
- }
+ if (ret != EBUSY && ret != EINTR)
+ urcu_die(ret);
poll(NULL,0,10);
}
#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
poll(NULL,0,10);
}
#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
@@
-245,12
+242,12
@@
static void _rcu_defer_barrier_thread(void)
{
unsigned long head, num_items;
{
unsigned long head, num_items;
- head =
defer_queue
.head;
- num_items = head -
defer_queue
.tail;
+ head =
URCU_TLS(defer_queue)
.head;
+ num_items = head -
URCU_TLS(defer_queue)
.tail;
if (caa_unlikely(!num_items))
return;
synchronize_rcu();
if (caa_unlikely(!num_items))
return;
synchronize_rcu();
- rcu_defer_barrier_queue(&
defer_queue
, head);
+ rcu_defer_barrier_queue(&
URCU_TLS(defer_queue)
, head);
}
void rcu_defer_barrier_thread(void)
}
void rcu_defer_barrier_thread(void)
@@
-311,8
+308,8
@@
void _defer_rcu(void (*fct)(void *p), void *p)
* Head is only modified by ourself. Tail can be modified by reclamation
* thread.
*/
* Head is only modified by ourself. Tail can be modified by reclamation
* thread.
*/
- head =
defer_queue
.head;
- tail = CMM_LOAD_SHARED(
defer_queue
.tail);
+ head =
URCU_TLS(defer_queue)
.head;
+ tail = CMM_LOAD_SHARED(
URCU_TLS(defer_queue)
.tail);
/*
* If queue is full, or reached threshold. Empty queue ourself.
/*
* If queue is full, or reached threshold. Empty queue ourself.
@@
-321,7
+318,7
@@
void _defer_rcu(void (*fct)(void *p), void *p)
if (caa_unlikely(head - tail >= DEFER_QUEUE_SIZE - 2)) {
assert(head - tail <= DEFER_QUEUE_SIZE);
rcu_defer_barrier_thread();
if (caa_unlikely(head - tail >= DEFER_QUEUE_SIZE - 2)) {
assert(head - tail <= DEFER_QUEUE_SIZE);
rcu_defer_barrier_thread();
- assert(head - CMM_LOAD_SHARED(
defer_queue
.tail) == 0);
+ assert(head - CMM_LOAD_SHARED(
URCU_TLS(defer_queue)
.tail) == 0);
}
/*
}
/*
@@
-340,25
+337,25
@@
void _defer_rcu(void (*fct)(void *p), void *p)
* Decode: see the comments before 'struct defer_queue'
* or the code in rcu_defer_barrier_queue().
*/
* Decode: see the comments before 'struct defer_queue'
* or the code in rcu_defer_barrier_queue().
*/
- if (caa_unlikely(
defer_queue
.last_fct_in != fct
+ if (caa_unlikely(
URCU_TLS(defer_queue)
.last_fct_in != fct
|| DQ_IS_FCT_BIT(p)
|| p == DQ_FCT_MARK)) {
|| DQ_IS_FCT_BIT(p)
|| p == DQ_FCT_MARK)) {
-
defer_queue
.last_fct_in = fct;
+
URCU_TLS(defer_queue)
.last_fct_in = fct;
if (caa_unlikely(DQ_IS_FCT_BIT(fct) || fct == DQ_FCT_MARK)) {
if (caa_unlikely(DQ_IS_FCT_BIT(fct) || fct == DQ_FCT_MARK)) {
- _CMM_STORE_SHARED(
defer_queue
.q[head++ & DEFER_QUEUE_MASK],
+ _CMM_STORE_SHARED(
URCU_TLS(defer_queue)
.q[head++ & DEFER_QUEUE_MASK],
DQ_FCT_MARK);
DQ_FCT_MARK);
- _CMM_STORE_SHARED(
defer_queue
.q[head++ & DEFER_QUEUE_MASK],
+ _CMM_STORE_SHARED(
URCU_TLS(defer_queue)
.q[head++ & DEFER_QUEUE_MASK],
fct);
} else {
DQ_SET_FCT_BIT(fct);
fct);
} else {
DQ_SET_FCT_BIT(fct);
- _CMM_STORE_SHARED(
defer_queue
.q[head++ & DEFER_QUEUE_MASK],
+ _CMM_STORE_SHARED(
URCU_TLS(defer_queue)
.q[head++ & DEFER_QUEUE_MASK],
fct);
}
}
fct);
}
}
- _CMM_STORE_SHARED(
defer_queue
.q[head++ & DEFER_QUEUE_MASK], p);
+ _CMM_STORE_SHARED(
URCU_TLS(defer_queue)
.q[head++ & DEFER_QUEUE_MASK], p);
cmm_smp_wmb(); /* Publish new pointer before head */
/* Write q[] before head. */
cmm_smp_wmb(); /* Publish new pointer before head */
/* Write q[] before head. */
- CMM_STORE_SHARED(
defer_queue
.head, head);
+ CMM_STORE_SHARED(
URCU_TLS(defer_queue)
.head, head);
cmm_smp_mb(); /* Write queue head before read futex */
/*
* Wake-up any waiting defer thread.
cmm_smp_mb(); /* Write queue head before read futex */
/*
* Wake-up any waiting defer thread.
@@
-422,16
+419,16
@@
int rcu_defer_register_thread(void)
{
int was_empty;
{
int was_empty;
- assert(
defer_queue
.last_head == 0);
- assert(
defer_queue
.q == NULL);
-
defer_queue
.q = malloc(sizeof(void *) * DEFER_QUEUE_SIZE);
- if (!
defer_queue
.q)
+ assert(
URCU_TLS(defer_queue)
.last_head == 0);
+ assert(
URCU_TLS(defer_queue)
.q == NULL);
+
URCU_TLS(defer_queue)
.q = malloc(sizeof(void *) * DEFER_QUEUE_SIZE);
+ if (!
URCU_TLS(defer_queue)
.q)
return -ENOMEM;
mutex_lock_defer(&defer_thread_mutex);
mutex_lock_defer(&rcu_defer_mutex);
was_empty = cds_list_empty(®istry_defer);
return -ENOMEM;
mutex_lock_defer(&defer_thread_mutex);
mutex_lock_defer(&rcu_defer_mutex);
was_empty = cds_list_empty(®istry_defer);
- cds_list_add(&
defer_queue
.list, ®istry_defer);
+ cds_list_add(&
URCU_TLS(defer_queue)
.list, ®istry_defer);
mutex_unlock(&rcu_defer_mutex);
if (was_empty)
mutex_unlock(&rcu_defer_mutex);
if (was_empty)
@@
-446,10
+443,10
@@
void rcu_defer_unregister_thread(void)
mutex_lock_defer(&defer_thread_mutex);
mutex_lock_defer(&rcu_defer_mutex);
mutex_lock_defer(&defer_thread_mutex);
mutex_lock_defer(&rcu_defer_mutex);
- cds_list_del(&
defer_queue
.list);
+ cds_list_del(&
URCU_TLS(defer_queue)
.list);
_rcu_defer_barrier_thread();
_rcu_defer_barrier_thread();
- free(
defer_queue
.q);
-
defer_queue
.q = NULL;
+ free(
URCU_TLS(defer_queue)
.q);
+
URCU_TLS(defer_queue)
.q = NULL;
is_empty = cds_list_empty(®istry_defer);
mutex_unlock(&rcu_defer_mutex);
is_empty = cds_list_empty(®istry_defer);
mutex_unlock(&rcu_defer_mutex);
This page took
0.031106 seconds
and
4
git commands to generate.