4 * Userspace RCU library - RCU Judy Array Range Support
6 * Copyright 2012-2013 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
30 #include <urcu/rcuja.h>
31 #include <urcu/compiler.h>
32 #include <urcu/arch.h>
33 #include <urcu-pointer.h>
34 #include <urcu/uatomic.h>
35 #include <urcu/rcuja-range.h>
36 #include <urcu-flavor.h>
38 #include "rcuja-internal.h"
41 * Discussion about order of lookup/lock vs allocated node deletion.
43 * - If node deletion returns before call to
44 * cds_ja_range_lookup(), the node will not be found by lookup.
45 * - If node deletion is called after cds_ja_range_lock() returns a
46 * non-NULL range, the deletion will wait until the lock is released
47 * before it takes place.
48 * - If node deletion call/return overlaps with the call to
49 * cds_ja_range_lookup() and return from cds_ja_range_lock(), the node
50 * may or may not be found by each of cds_ja_range_lookup() and
51 * cds_ja_range_lock().
55 * Discussion about order of lookup/lock vs allocated node add. Assuming
56 * no concurrent delete.
58 * - If node add returns before call to
59 * cds_ja_range_lookup(), the node will be found by lookup.
60 * - If node add is called after cds_ja_range_lookup returns, the node
61 * will not be found by lookup.
62 * - If node add call/return overlaps with the call to and return from
63 * cds_ja_range_lookup(), the node may or may not be found.
64 * - If node add call/return overlaps with call to cds_ja_range_lookup()
65 * and return from cds_ja_range_lock(), in the specific case where
66 * cds_ja_range_lookup() _does_ succeed, then cds_ja_range_lock() will
67 * succeed (still assuming no concurrent deletion).
71 * Discussion of the type state transitions.
73 * State transitions of "type" always go from either:
75 * CDS_JA_RANGE_FREE -> CDS_JA_RANGE_REMOVED
77 * CDS_JA_RANGE_ALLOCATED -> CDS_JA_RANGE_REMOVED
79 * A range type never changes otherwise.
83 struct cds_ja_range
*cds_ja_range_lookup(struct cds_ja
*ja
, uint64_t key
)
85 struct cds_ja_node
*node
, *last_node
;
86 struct cds_ja_range
*range
;
88 node
= cds_ja_lookup_below_equal(ja
, key
, NULL
);
91 * Get the last of duplicate chain. Adding a node to Judy array
92 * duplicates inserts them at the end of the chain.
94 cds_ja_for_each_duplicate_rcu(node
)
96 range
= caa_container_of(last_node
, struct cds_ja_range
, ja_node
);
98 * If last node in the duplicates is removed or free, we can
99 * consider that either a removal or add operation is in
100 * progress, or removal is the last completed operation to
101 * update this range. We can therefore consider that this area
104 if (range
->type
!= CDS_JA_RANGE_ALLOCATED
)
107 * We found an allocated range. We can return it for use with
108 * RCU read-side protection for existence. However, we have no
109 * mutual exclusion against removal at this point.
115 * Provide mutual exclusion against removal.
117 struct cds_ja_range
*cds_ja_range_lock(struct cds_ja_range
*range
)
119 pthread_mutex_lock(&range
->lock
);
121 if (range
->type
== CDS_JA_RANGE_REMOVED
)
126 pthread_mutex_unlock(&range
->lock
);
130 void cds_ja_range_unlock(struct cds_ja_range
*range
)
132 pthread_mutex_unlock(&range
->lock
);
136 struct cds_ja_range
*range_create(
137 uint64_t start
, /* inclusive */
138 uint64_t end
, /* inclusive */
139 enum cds_ja_range_type type
)
141 struct cds_ja_range
*range
;
143 range
= calloc(sizeof(*range
), 1);
146 range
->start
= start
;
149 pthread_mutex_init(&range
->lock
, NULL
);
154 void free_range_cb(struct rcu_head
*head
)
156 struct cds_ja_range
*range
=
157 caa_container_of(head
, struct cds_ja_range
, head
);
162 void free_range(struct cds_ja_range
*range
)
168 void rcu_free_range(struct cds_ja
*ja
, struct cds_ja_range
*range
)
170 cds_lfht_rcu_flavor(ja
->ht
)->update_call_rcu(&range
->head
,
174 struct cds_ja_range
*cds_ja_range_add(struct cds_ja
*ja
,
175 uint64_t start
, /* inclusive */
176 uint64_t end
) /* inclusive */
178 struct cds_ja_node
*old_node
, *old_node_end
;
179 struct cds_ja_range
*old_range
, *old_range_end
, *new_range
, *ranges
[3];
180 unsigned int nr_ranges
, i
;
185 * Find if requested range is entirely contained within a single
188 old_node
= cds_ja_lookup_below_equal(ja
, start
, NULL
);
191 old_range
= caa_container_of(old_node
, struct cds_ja_range
, ja_node
);
192 switch (CMM_LOAD_SHARED(old_range
->type
)) {
193 case CDS_JA_RANGE_ALLOCATED
:
195 case CDS_JA_RANGE_FREE
:
197 case CDS_JA_RANGE_REMOVED
:
201 old_node_end
= cds_ja_lookup_below_equal(ja
, end
, NULL
);
202 assert(old_node_end
);
203 old_range_end
= caa_container_of(old_node_end
,
204 struct cds_ja_range
, ja_node
);
205 if (old_range_end
!= old_range
) {
206 switch (CMM_LOAD_SHARED(old_range
->type
)) {
207 case CDS_JA_RANGE_ALLOCATED
:
208 case CDS_JA_RANGE_FREE
: /* fall-through */
210 * TODO: think about free range merge.
213 case CDS_JA_RANGE_REMOVED
:
218 pthread_mutex_lock(&old_range
->lock
);
220 if (old_range
->type
== CDS_JA_RANGE_REMOVED
) {
221 pthread_mutex_unlock(&old_range
->lock
);
225 /* Create replacement ranges: at most 2 free and 1 allocated */
226 if (start
== old_range
->start
) {
227 if (end
== old_range
->end
) {
229 ranges
[0] = new_range
= range_create(start
, end
,
230 CDS_JA_RANGE_ALLOCATED
);
234 ranges
[0] = new_range
= range_create(start
, end
,
235 CDS_JA_RANGE_ALLOCATED
);
236 ranges
[1] = range_create(end
+ 1, old_range
->end
,
241 if (end
== old_range
->end
) {
243 ranges
[0] = range_create(old_range
->start
, start
- 1,
245 ranges
[1] = new_range
= range_create(start
, end
,
246 CDS_JA_RANGE_ALLOCATED
);
250 ranges
[0] = range_create(old_range
->start
, start
- 1,
252 ranges
[1] = new_range
= range_create(start
, end
,
253 CDS_JA_RANGE_ALLOCATED
);
254 ranges
[2] = range_create(end
+ 1, old_range
->end
,
260 /* Add replacement ranges to Judy array */
261 for (i
= 0; i
< nr_ranges
; i
++) {
262 ret
= cds_ja_add(ja
, ranges
[i
]->start
, &ranges
[i
]->ja_node
);
267 * We add replacement ranges _before_ removing old ranges, so
268 * concurrent traversals will always see one or the other. This
269 * is OK because we temporarily have a duplicate key, and Judy
270 * arrays provide key existence guarantee for lookups performed
271 * concurrently with add followed by del of duplicate keys.
274 /* Remove old free range */
275 ret
= cds_ja_del(ja
, old_range
->start
, &old_range
->ja_node
);
277 old_range
->type
= CDS_JA_RANGE_REMOVED
;
278 pthread_mutex_unlock(&old_range
->lock
);
280 rcu_free_range(ja
, old_range
);
285 int cds_ja_range_del(struct cds_ja
*ja
, struct cds_ja_range
*range
)
287 struct cds_ja_node
*prev_node
, *next_node
;
288 struct cds_ja_range
*prev_range
, *next_range
, *new_range
;
289 struct cds_ja_range
*merge_ranges
[3];
290 unsigned int nr_merge
, i
;
296 prev_node
= cds_ja_lookup_below_equal(ja
, range
->start
- 1, NULL
);
297 prev_range
= caa_container_of(prev_node
, struct cds_ja_range
, ja_node
);
298 if (prev_node
&& prev_range
->type
!= CDS_JA_RANGE_ALLOCATED
)
299 merge_ranges
[nr_merge
++] = prev_range
;
301 merge_ranges
[nr_merge
++] = range
;
303 next_node
= cds_ja_lookup_above_equal(ja
, range
->end
+ 1, NULL
);
304 next_range
= caa_container_of(next_node
, struct cds_ja_range
, ja_node
);
305 if (next_node
&& next_range
->type
!= CDS_JA_RANGE_ALLOCATED
)
306 merge_ranges
[nr_merge
++] = next_range
;
308 /* Acquire locks in increasing key order for range merge */
309 for (i
= 0; i
< nr_merge
; i
++)
310 pthread_mutex_lock(&merge_ranges
[i
]->lock
);
311 /* Ensure they are valid */
312 for (i
= 0; i
< nr_merge
; i
++) {
313 if (merge_ranges
[i
]->type
== CDS_JA_RANGE_REMOVED
)
317 /* Create new free range */
318 start
= merge_ranges
[0]->start
;
319 end
= merge_ranges
[nr_merge
- 1]->end
;
320 new_range
= range_create(start
, end
, CDS_JA_RANGE_FREE
);
321 ret
= cds_ja_add(ja
, start
, &new_range
->ja_node
);
324 /* Remove old ranges */
325 for (i
= 0; i
< nr_merge
; i
++) {
326 ret
= cds_ja_del(ja
, merge_ranges
[i
]->start
,
327 &merge_ranges
[i
]->ja_node
);
329 merge_ranges
[i
]->type
= CDS_JA_RANGE_REMOVED
;
330 pthread_mutex_unlock(&merge_ranges
[i
]->lock
);
331 rcu_free_range(ja
, merge_ranges
[i
]);
338 for (i
= 0; i
< nr_merge
; i
++)
339 pthread_mutex_unlock(&merge_ranges
[i
]->lock
);
343 int cds_ja_range_init(struct cds_ja
*ja
)
345 struct cds_ja_node
*node
;
346 struct cds_ja_range
*range
;
350 * Sanity check: should be empty.
352 node
= cds_ja_lookup_above_equal(ja
, 0, NULL
);
355 range
= range_create(0, UINT64_MAX
, CDS_JA_RANGE_FREE
);
358 ret
= cds_ja_add(ja
, 0, &range
->ja_node
);
362 int cds_ja_range_fini(struct cds_ja
*ja
)
365 struct cds_ja_node
*ja_node
;
368 cds_ja_for_each_key_rcu(ja
, key
, ja_node
) {
369 struct cds_ja_node
*tmp_node
;
371 cds_ja_for_each_duplicate_safe_rcu(ja_node
, tmp_node
) {
372 struct cds_ja_range
*range
;
374 range
= caa_container_of(ja_node
,
375 struct cds_ja_range
, ja_node
);
376 ret
= cds_ja_del(ja
, key
, &range
->ja_node
);
380 /* Alone using Judy array, OK to free now */
This page took 0.038501 seconds and 4 git commands to generate.