Fix urcu-bp: don't move registry
[urcu.git] / urcu-bp.c
index 4b3cf01801764c9c132c525876e2cf40ec5895d8..190a5693dff5aaa8ee683faa18f1eeecca9658db 100644 (file)
--- a/urcu-bp.c
+++ b/urcu-bp.c
@@ -66,34 +66,25 @@ void *mremap_wrapper(void *old_address, size_t old_size,
 #define MREMAP_FIXED   2
 
 /*
- * mremap wrapper for non-Linux systems. Maps a RW, anonymous private mapping.
+ * mremap wrapper for non-Linux systems not allowing MAYMOVE.
  * This is not generic.
 */
 static
 void *mremap_wrapper(void *old_address, size_t old_size,
                size_t new_size, int flags)
 {
-       void *new_address;
-
-       assert(flags & MREMAP_MAYMOVE);
-       assert(!(flags & MREMAP_FIXED));
-       new_address = mmap(old_address, new_size,
-                          PROT_READ | PROT_WRITE,
-                          MAP_ANONYMOUS | MAP_PRIVATE,
-                          -1, 0);
-       if (new_address == MAP_FAILED)
-               return MAP_FAILED;
-       if (old_address) {
-               memcpy(new_address, old_address, old_size);
-               munmap(old_address, old_size);
-       }
-       return new_address;
+       assert(!(flags & MREMAP_MAYMOVE));
+
+       return MAP_FAILED;
 }
 #endif
 
 /* Sleep delay in us */
 #define RCU_SLEEP_DELAY                1000
-#define ARENA_INIT_ALLOC       16
+#define INIT_NR_THREADS                8
+#define ARENA_INIT_ALLOC               \
+       sizeof(struct registry_chunk)   \
+       + INIT_NR_THREADS * sizeof(struct rcu_reader)
 
 /*
  * Active attempts to check for reader Q.S. before calling sleep().
@@ -109,13 +100,7 @@ unsigned int rcu_yield_active;
 DEFINE_URCU_TLS(unsigned int, rcu_rand_yield);
 #endif
 
-/*
- * Global grace period counter.
- * Contains the current RCU_GP_CTR_PHASE.
- * Also has a RCU_GP_COUNT of 1, to accelerate the reader fast path.
- * Written to only by writer with mutex taken. Read by both writer and readers.
- */
-long rcu_gp_ctr = RCU_GP_COUNT;
+struct rcu_gp rcu_gp = { .ctr = RCU_GP_COUNT };
 
 /*
  * Pointer to registry elements. Written to only by each individual reader. Read
@@ -125,13 +110,20 @@ DEFINE_URCU_TLS(struct rcu_reader *, rcu_reader);
 
 static CDS_LIST_HEAD(registry);
 
+struct registry_chunk {
+       size_t data_len;                /* data length */
+       size_t used;                    /* data used */
+       struct cds_list_head node;      /* chunk_list node */
+       char data[];
+};
+
 struct registry_arena {
-       void *p;
-       size_t len;
-       size_t used;
+       struct cds_list_head chunk_list;
 };
 
-static struct registry_arena registry_arena;
+static struct registry_arena registry_arena = {
+       .chunk_list = CDS_LIST_HEAD_INIT(registry_arena.chunk_list),
+};
 
 /* Saved fork signal mask, protected by rcu_gp_lock */
 static sigset_t saved_fork_signal_mask;
@@ -164,25 +156,44 @@ static void mutex_unlock(pthread_mutex_t *mutex)
                urcu_die(ret);
 }
 
-static void wait_for_readers(void)
+static void wait_for_readers(struct cds_list_head *input_readers,
+                       struct cds_list_head *cur_snap_readers,
+                       struct cds_list_head *qsreaders)
 {
-       CDS_LIST_HEAD(qsreaders);
        int wait_loops = 0;
        struct rcu_reader *index, *tmp;
 
        /*
         * Wait for each thread URCU_TLS(rcu_reader).ctr to either
         * indicate quiescence (not nested), or observe the current
-        * rcu_gp_ctr value.
+        * rcu_gp.ctr value.
         */
        for (;;) {
                wait_loops++;
-               cds_list_for_each_entry_safe(index, tmp, &registry, node) {
-                       if (!rcu_old_gp_ongoing(&index->ctr))
-                               cds_list_move(&index->node, &qsreaders);
+               cds_list_for_each_entry_safe(index, tmp, input_readers, node) {
+                       switch (rcu_reader_state(&index->ctr)) {
+                       case RCU_READER_ACTIVE_CURRENT:
+                               if (cur_snap_readers) {
+                                       cds_list_move(&index->node,
+                                               cur_snap_readers);
+                                       break;
+                               }
+                               /* Fall-through */
+                       case RCU_READER_INACTIVE:
+                               cds_list_move(&index->node, qsreaders);
+                               break;
+                       case RCU_READER_ACTIVE_OLD:
+                               /*
+                                * Old snapshot. Leaving node in
+                                * input_readers will make us busy-loop
+                                * until the snapshot becomes current or
+                                * the reader becomes inactive.
+                                */
+                               break;
+                       }
                }
 
-               if (cds_list_empty(&registry)) {
+               if (cds_list_empty(input_readers)) {
                        break;
                } else {
                        if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS)
@@ -191,18 +202,18 @@ static void wait_for_readers(void)
                                caa_cpu_relax();
                }
        }
-       /* put back the reader list in the registry */
-       cds_list_splice(&qsreaders, &registry);
 }
 
 void synchronize_rcu(void)
 {
+       CDS_LIST_HEAD(cur_snap_readers);
+       CDS_LIST_HEAD(qsreaders);
        sigset_t newmask, oldmask;
        int ret;
 
-       ret = sigemptyset(&newmask);
+       ret = sigfillset(&newmask);
        assert(!ret);
-       ret = pthread_sigmask(SIG_SETMASK, &newmask, &oldmask);
+       ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
        assert(!ret);
 
        mutex_lock(&rcu_gp_lock);
@@ -221,7 +232,7 @@ void synchronize_rcu(void)
        /*
         * Wait for readers to observe original parity or be quiescent.
         */
-       wait_for_readers();
+       wait_for_readers(&registry, &cur_snap_readers, &qsreaders);
 
        /*
         * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
@@ -231,7 +242,7 @@ void synchronize_rcu(void)
        cmm_smp_mb();
 
        /* Switch parity: 0 -> 1, 1 -> 0 */
-       CMM_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE);
+       CMM_STORE_SHARED(rcu_gp.ctr, rcu_gp.ctr ^ RCU_GP_CTR_PHASE);
 
        /*
         * Must commit qparity update to memory before waiting for other parity
@@ -250,7 +261,12 @@ void synchronize_rcu(void)
        /*
         * Wait for readers to observe new parity or be quiescent.
         */
-       wait_for_readers();
+       wait_for_readers(&cur_snap_readers, NULL, &qsreaders);
+
+       /*
+        * Put quiescent reader list back into registry.
+        */
+       cds_list_splice(&qsreaders, &registry);
 
        /*
         * Finish waiting for reader threads before letting the old ptr being
@@ -277,81 +293,156 @@ void rcu_read_unlock(void)
        _rcu_read_unlock();
 }
 
+int rcu_read_ongoing(void)
+{
+       return _rcu_read_ongoing();
+}
+
 /*
- * only grow for now.
+ * Only grow for now. If empty, allocate a ARENA_INIT_ALLOC sized chunk.
+ * Else, try expanding the last chunk. If this fails, allocate a new
+ * chunk twice as big as the last chunk.
+ * Memory used by chunks _never_ moves. A chunk could theoretically be
+ * freed when all "used" slots are released, but we don't do it at this
+ * point.
  */
-static void resize_arena(struct registry_arena *arena, size_t len)
+static
+void expand_arena(struct registry_arena *arena)
 {
-       void *new_arena;
-
-       if (!arena->p)
-               new_arena = mmap(arena->p, len,
-                                PROT_READ | PROT_WRITE,
-                                MAP_ANONYMOUS | MAP_PRIVATE,
-                                -1, 0);
-       else
-               new_arena = mremap_wrapper(arena->p, arena->len,
-                                       len, MREMAP_MAYMOVE);
-       assert(new_arena != MAP_FAILED);
+       struct registry_chunk *new_chunk, *last_chunk;
+       size_t old_chunk_len, new_chunk_len;
+
+       /* No chunk. */
+       if (cds_list_empty(&arena->chunk_list)) {
+               assert(ARENA_INIT_ALLOC >=
+                       sizeof(struct registry_chunk)
+                       + sizeof(struct rcu_reader));
+               new_chunk_len = ARENA_INIT_ALLOC;
+               new_chunk = mmap(NULL, new_chunk_len,
+                       PROT_READ | PROT_WRITE,
+                       MAP_ANONYMOUS | MAP_PRIVATE,
+                       -1, 0);
+               if (new_chunk == MAP_FAILED)
+                       abort();
+               bzero(new_chunk, new_chunk_len);
+               new_chunk->data_len =
+                       new_chunk_len - sizeof(struct registry_chunk);
+               cds_list_add_tail(&new_chunk->node, &arena->chunk_list);
+               return;         /* We're done. */
+       }
 
-       /*
-        * re-used the same region ?
-        */
-       if (new_arena == arena->p)
-               return;
+       /* Try expanding last chunk. */
+       last_chunk = cds_list_entry(arena->chunk_list.prev,
+               struct registry_chunk, node);
+       old_chunk_len =
+               last_chunk->data_len + sizeof(struct registry_chunk);
+       new_chunk_len = old_chunk_len << 1;
+
+       /* Don't allow memory mapping to move, just expand. */
+       new_chunk = mremap_wrapper(last_chunk, old_chunk_len,
+               new_chunk_len, 0);
+       if (new_chunk != MAP_FAILED) {
+               /* Should not have moved. */
+               assert(new_chunk == last_chunk);
+               bzero((char *) last_chunk + old_chunk_len,
+                       new_chunk_len - old_chunk_len);
+               last_chunk->data_len =
+                       new_chunk_len - sizeof(struct registry_chunk);
+               return;         /* We're done. */
+       }
 
-       bzero(new_arena + arena->len, len - arena->len);
-       arena->p = new_arena;
+       /* Remap did not succeed, we need to add a new chunk. */
+       new_chunk = mmap(NULL, new_chunk_len,
+               PROT_READ | PROT_WRITE,
+               MAP_ANONYMOUS | MAP_PRIVATE,
+               -1, 0);
+       if (new_chunk == MAP_FAILED)
+               abort();
+       bzero(new_chunk, new_chunk_len);
+       new_chunk->data_len =
+               new_chunk_len - sizeof(struct registry_chunk);
+       cds_list_add_tail(&new_chunk->node, &arena->chunk_list);
 }
 
-/* Called with signals off and mutex locked */
-static void add_thread(void)
+static
+struct rcu_reader *arena_alloc(struct registry_arena *arena)
 {
+       struct registry_chunk *chunk;
        struct rcu_reader *rcu_reader_reg;
+       int expand_done = 0;    /* Only allow to expand once per alloc */
+       size_t len = sizeof(struct rcu_reader);
 
-       if (registry_arena.len
-           < registry_arena.used + sizeof(struct rcu_reader))
-               resize_arena(&registry_arena,
-               caa_max(registry_arena.len << 1, ARENA_INIT_ALLOC));
-       /*
-        * Find a free spot.
-        */
-       for (rcu_reader_reg = registry_arena.p;
-            (void *)rcu_reader_reg < registry_arena.p + registry_arena.len;
-            rcu_reader_reg++) {
-               if (!rcu_reader_reg->alloc)
-                       break;
+retry:
+       cds_list_for_each_entry(chunk, &arena->chunk_list, node) {
+               if (chunk->data_len - chunk->used < len)
+                       continue;
+               /* Find spot */
+               for (rcu_reader_reg = (struct rcu_reader *) &chunk->data[0];
+                               rcu_reader_reg < (struct rcu_reader *) &chunk->data[chunk->data_len];
+                               rcu_reader_reg++) {
+                       if (!rcu_reader_reg->alloc) {
+                               rcu_reader_reg->alloc = 1;
+                               chunk->used += len;
+                               return rcu_reader_reg;
+                       }
+               }
        }
-       rcu_reader_reg->alloc = 1;
-       registry_arena.used += sizeof(struct rcu_reader);
+
+       if (!expand_done) {
+               expand_arena(arena);
+               expand_done = 1;
+               goto retry;
+       }
+
+       return NULL;
+}
+
+/* Called with signals off and mutex locked */
+static
+void add_thread(void)
+{
+       struct rcu_reader *rcu_reader_reg;
+
+       rcu_reader_reg = arena_alloc(&registry_arena);
+       if (!rcu_reader_reg)
+               abort();
 
        /* Add to registry */
        rcu_reader_reg->tid = pthread_self();
        assert(rcu_reader_reg->ctr == 0);
        cds_list_add(&rcu_reader_reg->node, &registry);
+       /*
+        * Reader threads are pointing to the reader registry. This is
+        * why its memory should never be relocated.
+        */
        URCU_TLS(rcu_reader) = rcu_reader_reg;
 }
 
 /* Called with signals off and mutex locked */
 static void rcu_gc_registry(void)
 {
+       struct registry_chunk *chunk;
        struct rcu_reader *rcu_reader_reg;
-       pthread_t tid;
-       int ret;
 
-       for (rcu_reader_reg = registry_arena.p;
-            (void *)rcu_reader_reg < registry_arena.p + registry_arena.len;
-            rcu_reader_reg++) {
-               if (!rcu_reader_reg->alloc)
-                       continue;
-               tid = rcu_reader_reg->tid;
-               ret = pthread_kill(tid, 0);
-               assert(ret != EINVAL);
-               if (ret == ESRCH) {
-                       cds_list_del(&rcu_reader_reg->node);
-                       rcu_reader_reg->ctr = 0;
-                       rcu_reader_reg->alloc = 0;
-                       registry_arena.used -= sizeof(struct rcu_reader);
+       cds_list_for_each_entry(chunk, &registry_arena.chunk_list, node) {
+               for (rcu_reader_reg = (struct rcu_reader *) &chunk->data[0];
+                               rcu_reader_reg < (struct rcu_reader *) &chunk->data[chunk->data_len];
+                               rcu_reader_reg++) {
+                       pthread_t tid;
+                       int ret;
+
+                       if (!rcu_reader_reg->alloc)
+                               continue;
+                       tid = rcu_reader_reg->tid;
+                       ret = pthread_kill(tid, 0);
+                       assert(ret != EINVAL);
+                       if (ret == ESRCH) {
+                               cds_list_del(&rcu_reader_reg->node);
+                               rcu_reader_reg->ctr = 0;
+                               rcu_reader_reg->alloc = 0;
+                               chunk->used -= sizeof(struct rcu_reader);
+                       }
+
                }
        }
 }
@@ -362,9 +453,9 @@ void rcu_bp_register(void)
        sigset_t newmask, oldmask;
        int ret;
 
-       ret = sigemptyset(&newmask);
+       ret = sigfillset(&newmask);
        assert(!ret);
-       ret = pthread_sigmask(SIG_SETMASK, &newmask, &oldmask);
+       ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
        assert(!ret);
 
        /*
@@ -383,8 +474,12 @@ end:
 
 void rcu_bp_exit(void)
 {
-       if (registry_arena.p)
-               munmap(registry_arena.p, registry_arena.len);
+       struct registry_chunk *chunk, *tmp;
+
+       cds_list_for_each_entry_safe(chunk, tmp,
+                       &registry_arena.chunk_list, node) {
+               munmap(chunk, chunk->data_len + sizeof(struct registry_chunk));
+       }
 }
 
 /*
@@ -397,9 +492,9 @@ void rcu_bp_before_fork(void)
        sigset_t newmask, oldmask;
        int ret;
 
-       ret = sigemptyset(&newmask);
+       ret = sigfillset(&newmask);
        assert(!ret);
-       ret = pthread_sigmask(SIG_SETMASK, &newmask, &oldmask);
+       ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
        assert(!ret);
        mutex_lock(&rcu_gp_lock);
        saved_fork_signal_mask = oldmask;
This page took 0.026989 seconds and 4 git commands to generate.