4 * mmap/reservation based memory management for Lock-Free RCU Hash Table
6 * Copyright 2011 - Lai Jiangshan <laijs@cn.fujitsu.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "rculfhash-internal.h"
27 /* reserve inaccessible memory space without allocation any memory */
28 static void *memory_map(size_t length
)
30 void *ret
= mmap(NULL
, length
, PROT_NONE
,
31 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
33 assert(ret
!= MAP_FAILED
);
37 static void memory_unmap(void *ptr
, size_t length
)
39 int ret
__attribute__((unused
));
41 ret
= munmap(ptr
, length
);
46 static void memory_populate(void *ptr
, size_t length
)
48 void *ret
__attribute__((unused
));
50 ret
= mmap(ptr
, length
, PROT_READ
| PROT_WRITE
,
51 MAP_FIXED
| MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
57 * Discard garbage memory and avoid system save it when try to swap it out.
58 * Make it still reserved, inaccessible.
60 static void memory_discard(void *ptr
, size_t length
)
62 void *ret
__attribute__((unused
));
64 ret
= mmap(ptr
, length
, PROT_NONE
,
65 MAP_FIXED
| MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
71 void cds_lfht_alloc_bucket_table(struct cds_lfht
*ht
, unsigned long order
)
74 if (ht
->min_nr_alloc_buckets
== ht
->max_nr_buckets
) {
76 ht
->tbl_mmap
= calloc(ht
->max_nr_buckets
,
77 sizeof(*ht
->tbl_mmap
));
82 ht
->tbl_mmap
= memory_map(ht
->max_nr_buckets
83 * sizeof(*ht
->tbl_mmap
));
84 memory_populate(ht
->tbl_mmap
,
85 ht
->min_nr_alloc_buckets
* sizeof(*ht
->tbl_mmap
));
86 } else if (order
> ht
->min_alloc_buckets_order
) {
88 unsigned long len
= 1UL << (order
- 1);
90 assert(ht
->min_nr_alloc_buckets
< ht
->max_nr_buckets
);
91 memory_populate(ht
->tbl_mmap
+ len
,
92 len
* sizeof(*ht
->tbl_mmap
));
94 /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
98 * cds_lfht_free_bucket_table() should be called with decreasing order.
99 * When cds_lfht_free_bucket_table(0) is called, it means the whole
103 void cds_lfht_free_bucket_table(struct cds_lfht
*ht
, unsigned long order
)
106 if (ht
->min_nr_alloc_buckets
== ht
->max_nr_buckets
) {
108 poison_free(ht
->tbl_mmap
);
112 memory_unmap(ht
->tbl_mmap
,
113 ht
->max_nr_buckets
* sizeof(*ht
->tbl_mmap
));
114 } else if (order
> ht
->min_alloc_buckets_order
) {
116 unsigned long len
= 1UL << (order
- 1);
118 assert(ht
->min_nr_alloc_buckets
< ht
->max_nr_buckets
);
119 memory_discard(ht
->tbl_mmap
+ len
, len
* sizeof(*ht
->tbl_mmap
));
121 /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
125 struct cds_lfht_node
*bucket_at(struct cds_lfht
*ht
, unsigned long index
)
127 return &ht
->tbl_mmap
[index
];
131 struct cds_lfht
*alloc_cds_lfht(unsigned long min_nr_alloc_buckets
,
132 unsigned long max_nr_buckets
)
134 unsigned long page_bucket_size
;
136 page_bucket_size
= getpagesize() / sizeof(struct cds_lfht_node
);
137 if (max_nr_buckets
<= page_bucket_size
) {
139 min_nr_alloc_buckets
= max_nr_buckets
;
142 min_nr_alloc_buckets
= max(min_nr_alloc_buckets
,
146 return __default_alloc_cds_lfht(
147 &cds_lfht_mm_mmap
, sizeof(struct cds_lfht
),
148 min_nr_alloc_buckets
, max_nr_buckets
);
151 const struct cds_lfht_mm_type cds_lfht_mm_mmap
= {
152 .alloc_cds_lfht
= alloc_cds_lfht
,
153 .alloc_bucket_table
= cds_lfht_alloc_bucket_table
,
154 .free_bucket_table
= cds_lfht_free_bucket_table
,
155 .bucket_at
= bucket_at
,