projects
/
urcu.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Remove parameter from rcu_read_lock()
[urcu.git]
/
urcu.c
diff --git
a/urcu.c
b/urcu.c
index 08fb75dd0feae13421647f7d84615bee6d88654f..31cdf6fc08b61c14e0a8f47d93154c0d54599ccc 100644
(file)
--- a/
urcu.c
+++ b/
urcu.c
@@
-19,10
+19,10
@@
pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER;
pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER;
-/* Global
quiescent period parity
*/
-int urcu_
qparity
;
+/* Global
grace period counter
*/
+int urcu_
gp_ctr
;
-int __thread urcu_active_readers
[2]
;
+int __thread urcu_active_readers;
/* Thread IDs of registered readers */
#define INIT_NUM_THREADS 4
/* Thread IDs of registered readers */
#define INIT_NUM_THREADS 4
@@
-32,6
+32,11
@@
struct reader_data {
int *urcu_active_readers;
};
int *urcu_active_readers;
};
+#ifdef DEBUG_YIELD
+unsigned int yield_active;
+unsigned int __thread rand_yield;
+#endif
+
static struct reader_data *reader_data;
static int num_readers, alloc_readers;
static int sig_done;
static struct reader_data *reader_data;
static int num_readers, alloc_readers;
static int sig_done;
@@
-60,11
+65,9
@@
void internal_urcu_unlock(void)
/*
* called with urcu_mutex held.
*/
/*
* called with urcu_mutex held.
*/
-static
int
switch_next_urcu_qparity(void)
+static
void
switch_next_urcu_qparity(void)
{
{
- int old_parity = urcu_qparity;
- urcu_qparity = 1 - old_parity;
- return old_parity;
+ urcu_gp_ctr ^= RCU_GP_CTR_BIT;
}
static void force_mb_all_threads(void)
}
static void force_mb_all_threads(void)
@@
-76,20
+79,27
@@
static void force_mb_all_threads(void)
*/
if (!reader_data)
return;
*/
if (!reader_data)
return;
+ debug_yield_write();
sig_done = 0;
sig_done = 0;
+ debug_yield_write();
mb(); /* write sig_done before sending the signals */
mb(); /* write sig_done before sending the signals */
- for (index = reader_data; index < reader_data + num_readers; index++)
+ debug_yield_write();
+ for (index = reader_data; index < reader_data + num_readers; index++) {
pthread_kill(index->tid, SIGURCU);
pthread_kill(index->tid, SIGURCU);
+ debug_yield_write();
+ }
/*
* Wait for sighandler (and thus mb()) to execute on every thread.
* BUSY-LOOP.
*/
while (sig_done < num_readers)
barrier();
/*
* Wait for sighandler (and thus mb()) to execute on every thread.
* BUSY-LOOP.
*/
while (sig_done < num_readers)
barrier();
+ debug_yield_write();
mb(); /* read sig_done before ending the barrier */
mb(); /* read sig_done before ending the barrier */
+ debug_yield_write();
}
}
-void wait_for_quiescent_state(
int parity
)
+void wait_for_quiescent_state(
void
)
{
struct reader_data *index;
{
struct reader_data *index;
@@
-101,7
+111,7
@@
void wait_for_quiescent_state(int parity)
/*
* BUSY-LOOP.
*/
/*
* BUSY-LOOP.
*/
- while (
index->urcu_active_readers[parity] != 0
)
+ while (
rcu_old_gp_ongoing(index->urcu_active_readers)
)
barrier();
}
/*
barrier();
}
/*
@@
-115,25
+125,30
@@
void wait_for_quiescent_state(int parity)
static void switch_qparity(void)
{
static void switch_qparity(void)
{
- int prev_parity;
-
/* All threads should read qparity before accessing data structure. */
/* Write ptr before changing the qparity */
force_mb_all_threads();
/* All threads should read qparity before accessing data structure. */
/* Write ptr before changing the qparity */
force_mb_all_threads();
- prev_parity = switch_next_urcu_qparity();
+ debug_yield_write();
+ switch_next_urcu_qparity();
+ debug_yield_write();
/*
* Wait for previous parity to be empty of readers.
*/
/*
* Wait for previous parity to be empty of readers.
*/
- wait_for_quiescent_state(
prev_parity
);
+ wait_for_quiescent_state();
}
void synchronize_rcu(void)
{
}
void synchronize_rcu(void)
{
+ debug_yield_write();
internal_urcu_lock();
internal_urcu_lock();
+ debug_yield_write();
switch_qparity();
switch_qparity();
+ debug_yield_write();
switch_qparity();
switch_qparity();
+ debug_yield_write();
internal_urcu_lock();
internal_urcu_lock();
+ debug_yield_write();
}
/*
}
/*
@@
-144,7
+159,9
@@
void *urcu_publish_content(void **ptr, void *new)
{
void *oldptr;
{
void *oldptr;
+ debug_yield_write();
internal_urcu_lock();
internal_urcu_lock();
+ debug_yield_write();
/*
* We can publish the new pointer before we change the current qparity.
* Readers seeing the new pointer while being in the previous qparity
/*
* We can publish the new pointer before we change the current qparity.
* Readers seeing the new pointer while being in the previous qparity
@@
-156,11
+173,16
@@
void *urcu_publish_content(void **ptr, void *new)
* when the next quiescent state window will be over.
*/
oldptr = *ptr;
* when the next quiescent state window will be over.
*/
oldptr = *ptr;
+ debug_yield_write();
*ptr = new;
*ptr = new;
+ debug_yield_write();
switch_qparity();
switch_qparity();
+ debug_yield_write();
switch_qparity();
switch_qparity();
+ debug_yield_write();
internal_urcu_unlock();
internal_urcu_unlock();
+ debug_yield_write();
return oldptr;
}
return oldptr;
}
@@
-186,7
+208,7
@@
void urcu_add_reader(pthread_t id)
}
reader_data[num_readers].tid = id;
/* reference to the TLS of _this_ reader thread. */
}
reader_data[num_readers].tid = id;
/* reference to the TLS of _this_ reader thread. */
- reader_data[num_readers].urcu_active_readers = urcu_active_readers;
+ reader_data[num_readers].urcu_active_readers =
&
urcu_active_readers;
num_readers++;
}
num_readers++;
}
This page took
0.024427 seconds
and
4
git commands to generate.