#include <sys/mman.h>
#include "rculfhash-internal.h"
-/* reserve inaccessible memory space without allocation any memory */
-static void *memory_map(size_t length)
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+/*
+ * The allocation scheme used by the mmap based RCU hash table is to make a
+ * large unaccessible mapping to reserve memory without allocating it.
+ * Then smaller chunks are allocated by overlapping read/write mappings which
+ * do allocate memory. Deallocation is done by an overlapping unaccessible
+ * mapping.
+ *
+ * This scheme was tested on Linux, macOS and Solaris. However, on Cygwin the
+ * mmap wrapper is based on the Windows NtMapViewOfSection API which doesn't
+ * support overlapping mappings.
+ *
+ * An alternative to the overlapping mappings is to use mprotect to change the
+ * protection on chunks of the large mapping, read/write to allocate and none
+ * to deallocate. This works perfecty on Cygwin and Solaris but on Linux a
+ * call to madvise is also required to deallocate and it just doesn't work on
+ * macOS.
+ *
+ * For this reason, we keep to original scheme on all platforms except Cygwin.
+ */
+
+
+/* Reserve inaccessible memory space without allocating it */
+static
+void *memory_map(size_t length)
{
void *ret = mmap(NULL, length, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
return ret;
}
-static void memory_unmap(void *ptr, size_t length)
+static
+void memory_unmap(void *ptr, size_t length)
{
- int ret = munmap(ptr, length);
+ int ret __attribute__((unused));
+
+ ret = munmap(ptr, length);
assert(ret == 0);
}
-static void memory_populate(void *ptr, size_t length)
+#ifdef __CYGWIN__
+/* Set protection to read/write to allocate a memory chunk */
+static
+void memory_populate(void *ptr, size_t length)
{
- void *ret = mmap(ptr, length, PROT_READ | PROT_WRITE,
+ int ret __attribute__((unused));
+
+ ret = mprotect(ptr, length, PROT_READ | PROT_WRITE);
+
+ assert(!ret);
+}
+
+/* Set protection to none to deallocate a memory chunk */
+static
+void memory_discard(void *ptr, size_t length)
+{
+ int ret __attribute__((unused));
+
+ ret = mprotect(ptr, length, PROT_NONE);
+
+ assert(!ret);
+}
+
+#else /* __CYGWIN__ */
+
+static
+void memory_populate(void *ptr, size_t length)
+{
+ void *ret __attribute__((unused));
+
+ ret = mmap(ptr, length, PROT_READ | PROT_WRITE,
MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
assert(ret == ptr);
* Discard garbage memory and avoid system save it when try to swap it out.
* Make it still reserved, inaccessible.
*/
-static void memory_discard(void *ptr, size_t length)
+static
+void memory_discard(void *ptr, size_t length)
{
- void *ret = mmap(ptr, length, PROT_NONE,
+ void *ret __attribute__((unused));
+
+ ret = mmap(ptr, length, PROT_NONE,
MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
assert(ret == ptr);
}
+#endif /* __CYGWIN__ */
static
void cds_lfht_alloc_bucket_table(struct cds_lfht *ht, unsigned long order)
struct cds_lfht *alloc_cds_lfht(unsigned long min_nr_alloc_buckets,
unsigned long max_nr_buckets)
{
- struct cds_lfht *ht;
- unsigned long page_bucket_size = getpagesize() / sizeof(*ht->tbl_mmap);
+ unsigned long page_bucket_size;
+ page_bucket_size = getpagesize() / sizeof(struct cds_lfht_node);
if (max_nr_buckets <= page_bucket_size) {
/* small table */
min_nr_alloc_buckets = max_nr_buckets;
page_bucket_size);
}
- ht = calloc(1, sizeof(struct cds_lfht));
- assert(ht);
-
- ht->bucket_at = bucket_at;
- ht->mm = &cds_lfht_mm_mmap;
- ht->min_nr_alloc_buckets = min_nr_alloc_buckets;
- ht->min_alloc_buckets_order =
- get_count_order_ulong(min_nr_alloc_buckets);
- ht->max_nr_buckets = max_nr_buckets;
-
-
- return ht;
+ return __default_alloc_cds_lfht(
+ &cds_lfht_mm_mmap, sizeof(struct cds_lfht),
+ min_nr_alloc_buckets, max_nr_buckets);
}
const struct cds_lfht_mm_type cds_lfht_mm_mmap = {