Fix: compat futex duplicated lock and completion
[urcu.git] / rculfhash-mm-mmap.c
1 /*
2 * rculfhash-mm-mmap.c
3 *
4 * mmap/reservation based memory management for Lock-Free RCU Hash Table
5 *
6 * Copyright 2011 - Lai Jiangshan <laijs@cn.fujitsu.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <unistd.h>
24 #include <sys/mman.h>
25 #include "rculfhash-internal.h"
26
27 #ifndef MAP_ANONYMOUS
28 #define MAP_ANONYMOUS MAP_ANON
29 #endif
30
31 /* reserve inaccessible memory space without allocation any memory */
32 static void *memory_map(size_t length)
33 {
34 void *ret = mmap(NULL, length, PROT_NONE,
35 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
36
37 assert(ret != MAP_FAILED);
38 return ret;
39 }
40
41 static void memory_unmap(void *ptr, size_t length)
42 {
43 int ret __attribute__((unused));
44
45 ret = munmap(ptr, length);
46
47 assert(ret == 0);
48 }
49
50 static void memory_populate(void *ptr, size_t length)
51 {
52 void *ret __attribute__((unused));
53
54 ret = mmap(ptr, length, PROT_READ | PROT_WRITE,
55 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
56
57 assert(ret == ptr);
58 }
59
60 /*
61 * Discard garbage memory and avoid system save it when try to swap it out.
62 * Make it still reserved, inaccessible.
63 */
64 static void memory_discard(void *ptr, size_t length)
65 {
66 void *ret __attribute__((unused));
67
68 ret = mmap(ptr, length, PROT_NONE,
69 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
70
71 assert(ret == ptr);
72 }
73
74 static
75 void cds_lfht_alloc_bucket_table(struct cds_lfht *ht, unsigned long order)
76 {
77 if (order == 0) {
78 if (ht->min_nr_alloc_buckets == ht->max_nr_buckets) {
79 /* small table */
80 ht->tbl_mmap = calloc(ht->max_nr_buckets,
81 sizeof(*ht->tbl_mmap));
82 assert(ht->tbl_mmap);
83 return;
84 }
85 /* large table */
86 ht->tbl_mmap = memory_map(ht->max_nr_buckets
87 * sizeof(*ht->tbl_mmap));
88 memory_populate(ht->tbl_mmap,
89 ht->min_nr_alloc_buckets * sizeof(*ht->tbl_mmap));
90 } else if (order > ht->min_alloc_buckets_order) {
91 /* large table */
92 unsigned long len = 1UL << (order - 1);
93
94 assert(ht->min_nr_alloc_buckets < ht->max_nr_buckets);
95 memory_populate(ht->tbl_mmap + len,
96 len * sizeof(*ht->tbl_mmap));
97 }
98 /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
99 }
100
101 /*
102 * cds_lfht_free_bucket_table() should be called with decreasing order.
103 * When cds_lfht_free_bucket_table(0) is called, it means the whole
104 * lfht is destroyed.
105 */
106 static
107 void cds_lfht_free_bucket_table(struct cds_lfht *ht, unsigned long order)
108 {
109 if (order == 0) {
110 if (ht->min_nr_alloc_buckets == ht->max_nr_buckets) {
111 /* small table */
112 poison_free(ht->tbl_mmap);
113 return;
114 }
115 /* large table */
116 memory_unmap(ht->tbl_mmap,
117 ht->max_nr_buckets * sizeof(*ht->tbl_mmap));
118 } else if (order > ht->min_alloc_buckets_order) {
119 /* large table */
120 unsigned long len = 1UL << (order - 1);
121
122 assert(ht->min_nr_alloc_buckets < ht->max_nr_buckets);
123 memory_discard(ht->tbl_mmap + len, len * sizeof(*ht->tbl_mmap));
124 }
125 /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
126 }
127
128 static
129 struct cds_lfht_node *bucket_at(struct cds_lfht *ht, unsigned long index)
130 {
131 return &ht->tbl_mmap[index];
132 }
133
134 static
135 struct cds_lfht *alloc_cds_lfht(unsigned long min_nr_alloc_buckets,
136 unsigned long max_nr_buckets)
137 {
138 unsigned long page_bucket_size;
139
140 page_bucket_size = getpagesize() / sizeof(struct cds_lfht_node);
141 if (max_nr_buckets <= page_bucket_size) {
142 /* small table */
143 min_nr_alloc_buckets = max_nr_buckets;
144 } else {
145 /* large table */
146 min_nr_alloc_buckets = max(min_nr_alloc_buckets,
147 page_bucket_size);
148 }
149
150 return __default_alloc_cds_lfht(
151 &cds_lfht_mm_mmap, sizeof(struct cds_lfht),
152 min_nr_alloc_buckets, max_nr_buckets);
153 }
154
155 const struct cds_lfht_mm_type cds_lfht_mm_mmap = {
156 .alloc_cds_lfht = alloc_cds_lfht,
157 .alloc_bucket_table = cds_lfht_alloc_bucket_table,
158 .free_bucket_table = cds_lfht_free_bucket_table,
159 .bucket_at = bucket_at,
160 };
This page took 0.033345 seconds and 4 git commands to generate.