4 * mmap/reservation based memory management for Lock-Free RCU Hash Table
6 * Copyright 2011 - Lai Jiangshan <laijs@cn.fujitsu.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "rculfhash-internal.h"
28 #define MAP_ANONYMOUS MAP_ANON
31 /* reserve inaccessible memory space without allocation any memory */
32 static void *memory_map(size_t length
)
34 void *ret
= mmap(NULL
, length
, PROT_NONE
,
35 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
37 assert(ret
!= MAP_FAILED
);
41 static void memory_unmap(void *ptr
, size_t length
)
43 int ret
__attribute__((unused
));
45 ret
= munmap(ptr
, length
);
50 static void memory_populate(void *ptr
, size_t length
)
52 void *ret
__attribute__((unused
));
54 ret
= mmap(ptr
, length
, PROT_READ
| PROT_WRITE
,
55 MAP_FIXED
| MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
61 * Discard garbage memory and avoid system save it when try to swap it out.
62 * Make it still reserved, inaccessible.
64 static void memory_discard(void *ptr
, size_t length
)
66 void *ret
__attribute__((unused
));
68 ret
= mmap(ptr
, length
, PROT_NONE
,
69 MAP_FIXED
| MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
75 void cds_lfht_alloc_bucket_table(struct cds_lfht
*ht
, unsigned long order
)
78 if (ht
->min_nr_alloc_buckets
== ht
->max_nr_buckets
) {
80 ht
->tbl_mmap
= calloc(ht
->max_nr_buckets
,
81 sizeof(*ht
->tbl_mmap
));
86 ht
->tbl_mmap
= memory_map(ht
->max_nr_buckets
87 * sizeof(*ht
->tbl_mmap
));
88 memory_populate(ht
->tbl_mmap
,
89 ht
->min_nr_alloc_buckets
* sizeof(*ht
->tbl_mmap
));
90 } else if (order
> ht
->min_alloc_buckets_order
) {
92 unsigned long len
= 1UL << (order
- 1);
94 assert(ht
->min_nr_alloc_buckets
< ht
->max_nr_buckets
);
95 memory_populate(ht
->tbl_mmap
+ len
,
96 len
* sizeof(*ht
->tbl_mmap
));
98 /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
102 * cds_lfht_free_bucket_table() should be called with decreasing order.
103 * When cds_lfht_free_bucket_table(0) is called, it means the whole
107 void cds_lfht_free_bucket_table(struct cds_lfht
*ht
, unsigned long order
)
110 if (ht
->min_nr_alloc_buckets
== ht
->max_nr_buckets
) {
112 poison_free(ht
->tbl_mmap
);
116 memory_unmap(ht
->tbl_mmap
,
117 ht
->max_nr_buckets
* sizeof(*ht
->tbl_mmap
));
118 } else if (order
> ht
->min_alloc_buckets_order
) {
120 unsigned long len
= 1UL << (order
- 1);
122 assert(ht
->min_nr_alloc_buckets
< ht
->max_nr_buckets
);
123 memory_discard(ht
->tbl_mmap
+ len
, len
* sizeof(*ht
->tbl_mmap
));
125 /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
129 struct cds_lfht_node
*bucket_at(struct cds_lfht
*ht
, unsigned long index
)
131 return &ht
->tbl_mmap
[index
];
135 struct cds_lfht
*alloc_cds_lfht(unsigned long min_nr_alloc_buckets
,
136 unsigned long max_nr_buckets
)
138 unsigned long page_bucket_size
;
140 page_bucket_size
= getpagesize() / sizeof(struct cds_lfht_node
);
141 if (max_nr_buckets
<= page_bucket_size
) {
143 min_nr_alloc_buckets
= max_nr_buckets
;
146 min_nr_alloc_buckets
= max(min_nr_alloc_buckets
,
150 return __default_alloc_cds_lfht(
151 &cds_lfht_mm_mmap
, sizeof(struct cds_lfht
),
152 min_nr_alloc_buckets
, max_nr_buckets
);
155 const struct cds_lfht_mm_type cds_lfht_mm_mmap
= {
156 .alloc_cds_lfht
= alloc_cds_lfht
,
157 .alloc_bucket_table
= cds_lfht_alloc_bucket_table
,
158 .free_bucket_table
= cds_lfht_free_bucket_table
,
159 .bucket_at
= bucket_at
,