4 * Userspace RCU library - RCU Judy Array Range Support
6 * Copyright 2012-2013 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
30 #include <urcu/rcuja.h>
31 #include <urcu/compiler.h>
32 #include <urcu/arch.h>
33 #include <urcu-pointer.h>
34 #include <urcu/uatomic.h>
35 #include <urcu/rcuja-range.h>
36 #include <urcu-flavor.h>
38 #include "rcuja-internal.h"
41 * Discussion about order of lookup/lock vs allocated node deletion.
43 * - If node deletion returns before call to
44 * cds_ja_range_lookup(), the node will not be found by lookup.
45 * - If node deletion is called after cds_ja_range_lock() returns a
46 * non-NULL range, the deletion will wait until the lock is released
47 * before it takes place.
48 * - If node deletion call/return overlaps with the call to
49 * cds_ja_range_lookup() and return from cds_ja_range_lock(), the node
50 * may or may not be found by each of cds_ja_range_lookup() and
51 * cds_ja_range_lock().
55 * Discussion about order of lookup/lock vs allocated node add. Assuming
56 * no concurrent delete.
58 * - If node add returns before call to
59 * cds_ja_range_lookup(), the node will be found by lookup.
60 * - If node add is called after cds_ja_range_lookup returns, the node
61 * will not be found by lookup.
62 * - If node add call/return overlaps with the call to and return from
63 * cds_ja_range_lookup(), the node may or may not be found.
64 * - If node add call/return overlaps with call to cds_ja_range_lookup()
65 * and return from cds_ja_range_lock(), in the specific case where
66 * cds_ja_range_lookup() _does_ succeed, then cds_ja_range_lock() will
67 * succeed (still assuming no concurrent deletion).
71 * Discussion: concurrent deletion of contiguous allocated ranges.
73 * Ensuring that merge of contiguous free ranges is always performed, we
74 * need to ensure locking of concurrent removal of contiguous allocated
75 * ranges one with respect to another. This is done by locking the
76 * ranges prior to and after the range to remove, even if that range is
77 * allocated. This serializes removal of contiguous ranges. The only
78 * cases for which there is no range to lock is when removing an
79 * allocated range starting at 0, and/or ending at the end of the key
84 * Discussion of the type state transitions.
86 * State transitions of "type" always go from either:
88 * CDS_JA_RANGE_FREE -> CDS_JA_RANGE_REMOVED
90 * CDS_JA_RANGE_ALLOCATED -> CDS_JA_RANGE_REMOVED
92 * A range type never changes otherwise.
95 #define CDS_JA_RANGE_KEY_BITS 64
97 enum cds_ja_range_type
{
98 CDS_JA_RANGE_ALLOCATED
,
100 CDS_JA_RANGE_REMOVED
,
104 * Range goes from start (inclusive) to end (inclusive).
105 * Range start is used as node key in the Judy array.
107 struct cds_ja_range
{
109 struct cds_ja_node ja_node
;
110 pthread_mutex_t lock
;
112 enum cds_ja_range_type type
;
114 /* not required on lookup fast-path */
116 struct rcu_head head
;
119 struct cds_ja_range
*cds_ja_range_lookup(struct cds_ja
*ja
, uint64_t key
)
121 struct cds_ja_node
*node
, *last_node
;
122 struct cds_ja_range
*range
;
124 node
= cds_ja_lookup_below_equal(ja
, key
, NULL
);
127 * Get the last of duplicate chain. Adding a node to Judy array
128 * duplicates inserts them at the end of the chain.
130 cds_ja_for_each_duplicate_rcu(node
)
132 range
= caa_container_of(last_node
, struct cds_ja_range
, ja_node
);
134 * If last node in the duplicates is removed or free, we can
135 * consider that either a removal or add operation is in
136 * progress, or removal is the last completed operation to
137 * update this range. We can therefore consider that this area
140 if (range
->type
!= CDS_JA_RANGE_ALLOCATED
)
143 * We found an allocated range. We can return it for use with
144 * RCU read-side protection for existence. However, we have no
145 * mutual exclusion against removal at this point.
151 * Provide mutual exclusion against removal.
153 struct cds_ja_range
*cds_ja_range_lock(struct cds_ja_range
*range
)
155 pthread_mutex_lock(&range
->lock
);
157 if (range
->type
== CDS_JA_RANGE_REMOVED
)
162 pthread_mutex_unlock(&range
->lock
);
166 void cds_ja_range_unlock(struct cds_ja_range
*range
)
168 pthread_mutex_unlock(&range
->lock
);
172 struct cds_ja_range
*range_create(
173 uint64_t start
, /* inclusive */
174 uint64_t end
, /* inclusive */
176 enum cds_ja_range_type type
)
178 struct cds_ja_range
*range
;
180 range
= calloc(sizeof(*range
), 1);
183 range
->start
= start
;
187 pthread_mutex_init(&range
->lock
, NULL
);
192 void free_range_cb(struct rcu_head
*head
)
194 struct cds_ja_range
*range
=
195 caa_container_of(head
, struct cds_ja_range
, head
);
200 void free_range(struct cds_ja_range
*range
)
206 void rcu_free_range(struct cds_ja
*ja
, struct cds_ja_range
*range
)
208 cds_lfht_rcu_flavor(ja
->ht
)->update_call_rcu(&range
->head
,
212 struct cds_ja_range
*cds_ja_range_add(struct cds_ja
*ja
,
213 uint64_t start
, /* inclusive */
214 uint64_t end
, /* inclusive */
217 struct cds_ja_node
*old_node
, *old_node_end
;
218 struct cds_ja_range
*old_range
, *old_range_end
, *new_range
, *ranges
[3];
219 unsigned int nr_ranges
, i
;
224 * Find if requested range is entirely contained within a single
227 old_node
= cds_ja_lookup_below_equal(ja
, start
, NULL
);
230 old_range
= caa_container_of(old_node
, struct cds_ja_range
, ja_node
);
231 switch (CMM_LOAD_SHARED(old_range
->type
)) {
232 case CDS_JA_RANGE_ALLOCATED
:
234 case CDS_JA_RANGE_FREE
:
236 case CDS_JA_RANGE_REMOVED
:
240 old_node_end
= cds_ja_lookup_below_equal(ja
, end
, NULL
);
241 assert(old_node_end
);
242 old_range_end
= caa_container_of(old_node_end
,
243 struct cds_ja_range
, ja_node
);
244 if (old_range_end
!= old_range
) {
245 switch (CMM_LOAD_SHARED(old_range
->type
)) {
246 case CDS_JA_RANGE_ALLOCATED
:
247 case CDS_JA_RANGE_FREE
: /* fall-through */
249 case CDS_JA_RANGE_REMOVED
:
254 pthread_mutex_lock(&old_range
->lock
);
256 if (old_range
->type
== CDS_JA_RANGE_REMOVED
) {
257 pthread_mutex_unlock(&old_range
->lock
);
261 /* Create replacement ranges: at most 2 free and 1 allocated */
262 if (start
== old_range
->start
) {
263 if (end
== old_range
->end
) {
265 ranges
[0] = new_range
= range_create(start
, end
,
266 priv
, CDS_JA_RANGE_ALLOCATED
);
270 ranges
[0] = new_range
= range_create(start
, end
,
271 priv
, CDS_JA_RANGE_ALLOCATED
);
272 ranges
[1] = range_create(end
+ 1, old_range
->end
,
273 NULL
, CDS_JA_RANGE_FREE
);
277 if (end
== old_range
->end
) {
279 ranges
[0] = range_create(old_range
->start
, start
- 1,
280 NULL
, CDS_JA_RANGE_FREE
);
281 ranges
[1] = new_range
= range_create(start
, end
,
282 priv
, CDS_JA_RANGE_ALLOCATED
);
286 ranges
[0] = range_create(old_range
->start
, start
- 1,
287 NULL
, CDS_JA_RANGE_FREE
);
288 ranges
[1] = new_range
= range_create(start
, end
,
289 priv
, CDS_JA_RANGE_ALLOCATED
);
290 ranges
[2] = range_create(end
+ 1, old_range
->end
,
291 NULL
, CDS_JA_RANGE_FREE
);
296 /* Add replacement ranges to Judy array */
297 for (i
= 0; i
< nr_ranges
; i
++) {
298 ret
= cds_ja_add(ja
, ranges
[i
]->start
, &ranges
[i
]->ja_node
);
303 * We add replacement ranges _before_ removing old ranges, so
304 * concurrent traversals will always see one or the other. This
305 * is OK because we temporarily have a duplicate key, and Judy
306 * arrays provide key existence guarantee for lookups performed
307 * concurrently with add followed by del of duplicate keys.
310 /* Remove old free range */
311 ret
= cds_ja_del(ja
, old_range
->start
, &old_range
->ja_node
);
313 old_range
->type
= CDS_JA_RANGE_REMOVED
;
314 pthread_mutex_unlock(&old_range
->lock
);
316 rcu_free_range(ja
, old_range
);
321 int cds_ja_range_del(struct cds_ja
*ja
, struct cds_ja_range
*range
)
323 struct cds_ja_node
*prev_node
, *next_node
;
324 struct cds_ja_range
*new_range
;
325 struct cds_ja_range
*merge_ranges
[3], *lock_ranges
[3];
326 unsigned int nr_merge
, nr_lock
, i
;
333 prev_node
= cds_ja_lookup_below_equal(ja
, range
->start
- 1, NULL
);
335 struct cds_ja_range
*prev_range
;
337 prev_range
= caa_container_of(prev_node
,
338 struct cds_ja_range
, ja_node
);
339 lock_ranges
[nr_lock
++] = prev_range
;
340 if (prev_range
->type
!= CDS_JA_RANGE_ALLOCATED
)
341 merge_ranges
[nr_merge
++] = prev_range
;
344 lock_ranges
[nr_lock
++] = range
;
345 merge_ranges
[nr_merge
++] = range
;
347 next_node
= cds_ja_lookup_above_equal(ja
, range
->end
+ 1, NULL
);
349 struct cds_ja_range
*next_range
;
351 next_range
= caa_container_of(next_node
,
352 struct cds_ja_range
, ja_node
);
353 lock_ranges
[nr_lock
++] = next_range
;
354 if (next_range
->type
!= CDS_JA_RANGE_ALLOCATED
)
355 merge_ranges
[nr_merge
++] = next_range
;
358 /* Acquire locks in increasing key order for range merge */
359 for (i
= 0; i
< nr_lock
; i
++)
360 pthread_mutex_lock(&lock_ranges
[i
]->lock
);
361 /* Ensure they are valid */
362 for (i
= 0; i
< nr_lock
; i
++) {
363 if (lock_ranges
[i
]->type
== CDS_JA_RANGE_REMOVED
)
367 /* Create new free range */
368 start
= merge_ranges
[0]->start
;
369 end
= merge_ranges
[nr_merge
- 1]->end
;
370 new_range
= range_create(start
, end
, NULL
, CDS_JA_RANGE_FREE
);
371 ret
= cds_ja_add(ja
, start
, &new_range
->ja_node
);
374 /* Remove old ranges */
375 for (i
= 0; i
< nr_merge
; i
++) {
376 ret
= cds_ja_del(ja
, merge_ranges
[i
]->start
,
377 &merge_ranges
[i
]->ja_node
);
379 merge_ranges
[i
]->type
= CDS_JA_RANGE_REMOVED
;
381 for (i
= 0; i
< nr_lock
; i
++)
382 pthread_mutex_unlock(&lock_ranges
[i
]->lock
);
383 /* Free old merged ranges */
384 for (i
= 0; i
< nr_merge
; i
++)
385 rcu_free_range(ja
, merge_ranges
[i
]);
391 for (i
= 0; i
< nr_lock
; i
++)
392 pthread_mutex_unlock(&lock_ranges
[i
]->lock
);
396 struct cds_ja
*_cds_ja_range_new(const struct rcu_flavor_struct
*flavor
)
398 struct cds_ja_range
*range
;
402 ja
= _cds_ja_new(CDS_JA_RANGE_KEY_BITS
, flavor
);
405 range
= range_create(0, UINT64_MAX
, NULL
, CDS_JA_RANGE_FREE
);
408 ret
= cds_ja_add(ja
, 0, &range
->ja_node
);
416 ret
= cds_ja_destroy(ja
);
421 int cds_ja_range_destroy(struct cds_ja
*ja
,
422 void (*free_priv
)(void *ptr
))
425 struct cds_ja_node
*ja_node
;
428 cds_ja_for_each_key_rcu(ja
, key
, ja_node
) {
429 struct cds_ja_node
*tmp_node
;
431 cds_ja_for_each_duplicate_safe_rcu(ja_node
, tmp_node
) {
432 struct cds_ja_range
*range
;
434 range
= caa_container_of(ja_node
,
435 struct cds_ja_range
, ja_node
);
436 ret
= cds_ja_del(ja
, key
, &range
->ja_node
);
440 free_priv(range
->priv
);
441 /* Alone using Judy array, OK to free now */
445 return cds_ja_destroy(ja
);
This page took 0.045353 seconds and 4 git commands to generate.