add rculfhash-mm-mmap.c memory management
[urcu.git] / rculfhash-mm-mmap.c
1 /*
2 * rculfhash-mm-mmap.c
3 *
4 * mmap/reservation based memory management for Lock-Free RCU Hash Table
5 *
6 * Copyright 2011 - Lai Jiangshan <laijs@cn.fujitsu.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <unistd.h>
24 #include <sys/mman.h>
25 #include "rculfhash-internal.h"
26
27 /* reserve inaccessible memory space without allocation any memory */
28 static void *memory_map(size_t length)
29 {
30 void *ret = mmap(NULL, length, PROT_NONE,
31 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
32
33 assert(ret != MAP_FAILED);
34 return ret;
35 }
36
37 static void memory_unmap(void *ptr, size_t length)
38 {
39 int ret = munmap(ptr, length);
40
41 assert(ret == 0);
42 }
43
44 static void memory_populate(void *ptr, size_t length)
45 {
46 void *ret = mmap(ptr, length, PROT_READ | PROT_WRITE,
47 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
48
49 assert(ret == ptr);
50 }
51
52 /*
53 * Discard garbage memory and avoid system save it when try to swap it out.
54 * Make it still reserved, inaccessible.
55 */
56 static void memory_discard(void *ptr, size_t length)
57 {
58 void *ret = mmap(ptr, length, PROT_NONE,
59 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
60
61 assert(ret == ptr);
62 }
63
64 static
65 void cds_lfht_alloc_bucket_table(struct cds_lfht *ht, unsigned long order)
66 {
67 if (order == 0) {
68 if (ht->min_nr_alloc_buckets == ht->max_nr_buckets) {
69 /* small table */
70 ht->tbl_mmap = calloc(ht->max_nr_buckets,
71 sizeof(*ht->tbl_mmap));
72 assert(ht->tbl_mmap);
73 return;
74 }
75 /* large table */
76 ht->tbl_mmap = memory_map(ht->max_nr_buckets
77 * sizeof(*ht->tbl_mmap));
78 memory_populate(ht->tbl_mmap,
79 ht->min_nr_alloc_buckets * sizeof(*ht->tbl_mmap));
80 } else if (order > ht->min_alloc_buckets_order) {
81 /* large table */
82 unsigned long len = 1UL << (order - 1);
83
84 assert(ht->min_nr_alloc_buckets < ht->max_nr_buckets);
85 memory_populate(ht->tbl_mmap + len,
86 len * sizeof(*ht->tbl_mmap));
87 }
88 /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
89 }
90
91 /*
92 * cds_lfht_free_bucket_table() should be called with decreasing order.
93 * When cds_lfht_free_bucket_table(0) is called, it means the whole
94 * lfht is destroyed.
95 */
96 static
97 void cds_lfht_free_bucket_table(struct cds_lfht *ht, unsigned long order)
98 {
99 if (order == 0) {
100 if (ht->min_nr_alloc_buckets == ht->max_nr_buckets) {
101 /* small table */
102 poison_free(ht->tbl_mmap);
103 return;
104 }
105 /* large table */
106 memory_unmap(ht->tbl_mmap,
107 ht->max_nr_buckets * sizeof(*ht->tbl_mmap));
108 } else if (order > ht->min_alloc_buckets_order) {
109 /* large table */
110 unsigned long len = 1UL << (order - 1);
111
112 assert(ht->min_nr_alloc_buckets < ht->max_nr_buckets);
113 memory_discard(ht->tbl_mmap + len, len * sizeof(*ht->tbl_mmap));
114 }
115 /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
116 }
117
118 static
119 struct cds_lfht_node *bucket_at(struct cds_lfht *ht, unsigned long index)
120 {
121 return &ht->tbl_mmap[index];
122 }
123
124 static
125 struct cds_lfht *alloc_cds_lfht(unsigned long min_nr_alloc_buckets,
126 unsigned long max_nr_buckets)
127 {
128 struct cds_lfht *ht;
129 unsigned long page_bucket_size = getpagesize() / sizeof(*ht->tbl_mmap);
130
131 if (max_nr_buckets <= page_bucket_size) {
132 /* small table */
133 min_nr_alloc_buckets = max_nr_buckets;
134 } else {
135 /* large table */
136 min_nr_alloc_buckets = max(min_nr_alloc_buckets,
137 page_bucket_size);
138 }
139
140 ht = calloc(1, sizeof(struct cds_lfht));
141 assert(ht);
142
143 ht->mm = &cds_lfht_mm_mmap;
144
145 ht->min_nr_alloc_buckets = min_nr_alloc_buckets;
146 ht->min_alloc_buckets_order =
147 get_count_order_ulong(min_nr_alloc_buckets);
148 ht->max_nr_buckets = max_nr_buckets;
149
150 ht->bucket_at = bucket_at;
151
152 return ht;
153 }
154
155 const struct cds_lfht_mm_type cds_lfht_mm_mmap = {
156 .alloc_cds_lfht = alloc_cds_lfht,
157 .alloc_bucket_table = cds_lfht_alloc_bucket_table,
158 .free_bucket_table = cds_lfht_free_bucket_table,
159 .bucket_at = bucket_at,
160 };
This page took 0.03252 seconds and 5 git commands to generate.