+
+__attribute__((visibility("protected")))
+struct cds_ja_shadow_node *rcuja_shadow_set(struct cds_lfht *ht,
+ struct cds_ja_inode_flag *new_node_flag,
+ struct cds_ja_shadow_node *inherit_from,
+ struct cds_ja *ja, int level)
+{
+ struct cds_ja_shadow_node *shadow_node;
+ struct cds_lfht_node *ret_node;
+ const struct rcu_flavor_struct *flavor;
+
+ shadow_node = calloc(sizeof(*shadow_node), 1);
+ if (!shadow_node)
+ return NULL;
+
+ shadow_node->node_flag = new_node_flag;
+ shadow_node->ja = ja;
+ /*
+ * Lock can be inherited from previous node at this position.
+ */
+ if (inherit_from) {
+ shadow_node->lock = inherit_from->lock;
+ shadow_node->level = inherit_from->level;
+ } else {
+ shadow_node->lock = calloc(sizeof(*shadow_node->lock), 1);
+ if (!shadow_node->lock) {
+ free(shadow_node);
+ return NULL;
+ }
+ pthread_mutex_init(shadow_node->lock, NULL);
+ shadow_node->level = level;
+ }
+
+ flavor = cds_lfht_rcu_flavor(ht);
+ flavor->read_lock();
+ ret_node = cds_lfht_add_unique(ht,
+ hash_pointer(new_node_flag, hash_seed),
+ match_pointer,
+ new_node_flag,
+ &shadow_node->ht_node);
+ flavor->read_unlock();
+
+ if (ret_node != &shadow_node->ht_node) {
+ free(shadow_node);
+ return NULL;
+ }
+ return shadow_node;
+}
+
+static
+void free_shadow_node(struct rcu_head *head)
+{
+ struct cds_ja_shadow_node *shadow_node =
+ caa_container_of(head, struct cds_ja_shadow_node, head);
+ free(shadow_node);
+}
+
+static
+void free_shadow_node_and_node(struct rcu_head *head)
+{
+ struct cds_ja_shadow_node *shadow_node =
+ caa_container_of(head, struct cds_ja_shadow_node, head);
+ free_cds_ja_node(shadow_node->ja, ja_node_ptr(shadow_node->node_flag));
+ free(shadow_node);
+}
+
+static
+void free_shadow_node_and_lock(struct rcu_head *head)
+{
+ struct cds_ja_shadow_node *shadow_node =
+ caa_container_of(head, struct cds_ja_shadow_node, head);
+ free(shadow_node->lock);
+ free(shadow_node);
+}
+
+static
+void free_shadow_node_and_node_and_lock(struct rcu_head *head)
+{
+ struct cds_ja_shadow_node *shadow_node =
+ caa_container_of(head, struct cds_ja_shadow_node, head);
+ assert(shadow_node->level);
+ free_cds_ja_node(shadow_node->ja, ja_node_ptr(shadow_node->node_flag));
+ free(shadow_node->lock);
+ free(shadow_node);
+}
+
+__attribute__((visibility("protected")))
+int rcuja_shadow_clear(struct cds_lfht *ht,
+ struct cds_ja_inode_flag *node_flag,
+ struct cds_ja_shadow_node *shadow_node,
+ unsigned int flags)
+{
+ struct cds_lfht_iter iter;
+ struct cds_lfht_node *lookup_node;
+ const struct rcu_flavor_struct *flavor;
+ int ret, lockret;
+ int lookup_shadow = 0;
+
+ flavor = cds_lfht_rcu_flavor(ht);
+ flavor->read_lock();
+
+ cds_lfht_lookup(ht, hash_pointer(node_flag, hash_seed),
+ match_pointer, node_flag, &iter);
+ lookup_node = cds_lfht_iter_get_node(&iter);
+ if (!lookup_node) {
+ ret = -ENOENT;
+ goto rcu_unlock;
+ }
+
+ if (!shadow_node) {
+ shadow_node = caa_container_of(lookup_node,
+ struct cds_ja_shadow_node, ht_node);
+ lockret = pthread_mutex_lock(shadow_node->lock);
+ assert(!lockret);
+ lookup_shadow = 1;
+ }
+
+ /*
+ * Holding the mutex across deletion, and by also re-checking if
+ * the node is deleted with mutex held at lookup ensure that we
+ * don't let RCU JA use a node being removed.
+ */
+ ret = cds_lfht_del(ht, lookup_node);
+ if (ret)
+ goto unlock;
+ if ((flags & RCUJA_SHADOW_CLEAR_FREE_NODE)
+ && shadow_node->level) {
+ if (flags & RCUJA_SHADOW_CLEAR_FREE_LOCK) {
+ flavor->update_call_rcu(&shadow_node->head,
+ free_shadow_node_and_node_and_lock);
+ } else {
+ flavor->update_call_rcu(&shadow_node->head,
+ free_shadow_node_and_node);
+ }
+ } else {
+ if (flags & RCUJA_SHADOW_CLEAR_FREE_LOCK) {
+ flavor->update_call_rcu(&shadow_node->head,
+ free_shadow_node_and_lock);
+ } else {
+ flavor->update_call_rcu(&shadow_node->head,
+ free_shadow_node);
+ }
+ }
+unlock:
+ if (lookup_shadow) {
+ lockret = pthread_mutex_unlock(shadow_node->lock);
+ assert(!lockret);
+ }
+rcu_unlock:
+ flavor->read_unlock();
+
+ return ret;
+}
+
+/*
+ * Delete all shadow nodes and nodes from hash table, along with their
+ * associated lock.
+ */
+__attribute__((visibility("protected")))
+void rcuja_shadow_prune(struct cds_lfht *ht,
+ unsigned int flags)
+{
+ const struct rcu_flavor_struct *flavor;
+ struct cds_ja_shadow_node *shadow_node;
+ struct cds_lfht_iter iter;
+ int ret, lockret;
+
+ flavor = cds_lfht_rcu_flavor(ht);
+ /*
+ * Read-side lock is needed to ensure hash table node existence
+ * vs concurrent resize.
+ */
+ flavor->read_lock();
+ cds_lfht_for_each_entry(ht, &iter, shadow_node, ht_node) {
+ lockret = pthread_mutex_lock(shadow_node->lock);
+ assert(!lockret);
+
+ ret = cds_lfht_del(ht, &shadow_node->ht_node);
+ if (ret)
+ goto unlock;
+ if ((flags & RCUJA_SHADOW_CLEAR_FREE_NODE)
+ && shadow_node->level) {
+ if (flags & RCUJA_SHADOW_CLEAR_FREE_LOCK) {
+ flavor->update_call_rcu(&shadow_node->head,
+ free_shadow_node_and_node_and_lock);
+ } else {
+ flavor->update_call_rcu(&shadow_node->head,
+ free_shadow_node_and_node);
+ }
+ } else {
+ if (flags & RCUJA_SHADOW_CLEAR_FREE_LOCK) {
+ flavor->update_call_rcu(&shadow_node->head,
+ free_shadow_node_and_lock);
+ } else {
+ flavor->update_call_rcu(&shadow_node->head,
+ free_shadow_node);
+ }
+ }
+ unlock:
+ lockret = pthread_mutex_unlock(shadow_node->lock);
+ assert(!lockret);
+ }
+ flavor->read_unlock();
+}
+
+__attribute__((visibility("protected")))
+struct cds_lfht *rcuja_create_ht(const struct rcu_flavor_struct *flavor)
+{
+ return _cds_lfht_new(1, 1, 0,
+ CDS_LFHT_AUTO_RESIZE | CDS_LFHT_ACCOUNTING,
+ NULL, flavor, NULL);
+}
+
+__attribute__((visibility("protected")))
+int rcuja_delete_ht(struct cds_lfht *ht)
+{
+ return cds_lfht_destroy(ht, NULL);
+}
+
+__attribute__((constructor))
+void rcuja_ht_init(void)
+{
+ hash_seed = (unsigned long) time(NULL);
+}