uatomic/x86: Remove redundant memory barriers
[urcu.git] / doc / examples / rculfhash / jhash.h
1 // SPDX-FileCopyrightText: 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
2 //
3 // SPDX-License-Identifier: MIT
4
5 #ifndef _JHASH_H
6 #define _JHASH_H
7
8 #if defined(__FreeBSD__)
9 #include <sys/endian.h>
10 #endif
11
12 /*
13 * Example hash function.
14 */
15
16 /*
17 * Hash function
18 * Source: http://burtleburtle.net/bob/c/lookup3.c
19 * Originally Public Domain
20 */
21
22 #define rot(x, k) (((x) << (k)) | ((x) >> (32 - (k))))
23
24 #define mix(a, b, c) \
25 do { \
26 a -= c; a ^= rot(c, 4); c += b; \
27 b -= a; b ^= rot(a, 6); a += c; \
28 c -= b; c ^= rot(b, 8); b += a; \
29 a -= c; a ^= rot(c, 16); c += b; \
30 b -= a; b ^= rot(a, 19); a += c; \
31 c -= b; c ^= rot(b, 4); b += a; \
32 } while (0)
33
34 #define final(a, b, c) \
35 { \
36 c ^= b; c -= rot(b, 14); \
37 a ^= c; a -= rot(c, 11); \
38 b ^= a; b -= rot(a, 25); \
39 c ^= b; c -= rot(b, 16); \
40 a ^= c; a -= rot(c, 4); \
41 b ^= a; b -= rot(a, 14); \
42 c ^= b; c -= rot(b, 24); \
43 }
44
45 #if (BYTE_ORDER == LITTLE_ENDIAN)
46 #define HASH_LITTLE_ENDIAN 1
47 #else
48 #define HASH_LITTLE_ENDIAN 0
49 #endif
50
51 /*
52 *
53 * hashlittle() -- hash a variable-length key into a 32-bit value
54 * k : the key (the unaligned variable-length array of bytes)
55 * length : the length of the key, counting by bytes
56 * initval : can be any 4-byte value
57 * Returns a 32-bit value. Every bit of the key affects every bit of
58 * the return value. Two keys differing by one or two bits will have
59 * totally different hash values.
60 *
61 * The best hash table sizes are powers of 2. There is no need to do
62 * mod a prime (mod is sooo slow!). If you need less than 32 bits,
63 * use a bitmask. For example, if you need only 10 bits, do
64 * h = (h & hashmask(10));
65 * In which case, the hash table should have hashsize(10) elements.
66 *
67 * If you are hashing n strings (uint8_t **)k, do it like this:
68 * for (i = 0, h = 0; i < n; ++i) h = hashlittle(k[i], len[i], h);
69 *
70 * By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this
71 * code any way you wish, private, educational, or commercial. It's free.
72 *
73 * Use for hash table lookup, or anything where one collision in 2^^32 is
74 * acceptable. Do NOT use for cryptographic purposes.
75 */
76 static
77 uint32_t hashlittle(const void *key, size_t length, uint32_t initval)
78 {
79 uint32_t a, b, c; /* internal state */
80 union {
81 const void *ptr;
82 size_t i;
83 } u;
84
85 /* Set up the internal state */
86 a = b = c = 0xdeadbeef + ((uint32_t)length) + initval;
87
88 u.ptr = key;
89 if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
90 const uint32_t *k = (const uint32_t *) key; /* read 32-bit chunks */
91
92 /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
93 while (length > 12) {
94 a += k[0];
95 b += k[1];
96 c += k[2];
97 mix(a, b, c);
98 length -= 12;
99 k += 3;
100 }
101
102 /*----------------------------- handle the last (probably partial) block */
103 /*
104 * "k[2]&0xffffff" actually reads beyond the end of the string, but
105 * then masks off the part it's not allowed to read. Because the
106 * string is aligned, the masked-off tail is in the same word as the
107 * rest of the string. Every machine with memory protection I've seen
108 * does it on word boundaries, so is OK with this. But VALGRIND will
109 * still catch it and complain. The masking trick does make the hash
110 * noticeably faster for short strings (like English words).
111 */
112 #ifndef VALGRIND
113
114 switch (length) {
115 case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
116 case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break;
117 case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break;
118 case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break;
119 case 8 : b+=k[1]; a+=k[0]; break;
120 case 7 : b+=k[1]&0xffffff; a+=k[0]; break;
121 case 6 : b+=k[1]&0xffff; a+=k[0]; break;
122 case 5 : b+=k[1]&0xff; a+=k[0]; break;
123 case 4 : a+=k[0]; break;
124 case 3 : a+=k[0]&0xffffff; break;
125 case 2 : a+=k[0]&0xffff; break;
126 case 1 : a+=k[0]&0xff; break;
127 case 0 : return c; /* zero length strings require no mixing */
128 }
129
130 #else /* make valgrind happy */
131 {
132 const uint8_t *k8;
133
134 k8 = (const uint8_t *) k;
135 switch (length) {
136 case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
137 case 11: c+=((uint32_t) k8[10])<<16; /* fall through */
138 case 10: c+=((uint32_t) k8[9])<<8; /* fall through */
139 case 9 : c+=k8[8]; /* fall through */
140 case 8 : b+=k[1]; a+=k[0]; break;
141 case 7 : b+=((uint32_t) k8[6])<<16; /* fall through */
142 case 6 : b+=((uint32_t) k8[5])<<8; /* fall through */
143 case 5 : b+=k8[4]; /* fall through */
144 case 4 : a+=k[0]; break;
145 case 3 : a+=((uint32_t) k8[2])<<16; /* fall through */
146 case 2 : a+=((uint32_t) k8[1])<<8; /* fall through */
147 case 1 : a+=k8[0]; break;
148 case 0 : return c;
149 }
150 }
151 #endif /* !valgrind */
152
153 } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) {
154 const uint16_t *k = (const uint16_t *) key; /* read 16-bit chunks */
155 const uint8_t *k8;
156
157 /*--------------- all but last block: aligned reads and different mixing */
158 while (length > 12)
159 {
160 a += k[0] + (((uint32_t) k[1])<<16);
161 b += k[2] + (((uint32_t) k[3])<<16);
162 c += k[4] + (((uint32_t) k[5])<<16);
163 mix(a, b, c);
164 length -= 12;
165 k += 6;
166 }
167
168 /*----------------------------- handle the last (probably partial) block */
169 k8 = (const uint8_t *) k;
170 switch(length)
171 {
172 case 12: c+=k[4]+(((uint32_t) k[5])<<16);
173 b+=k[2]+(((uint32_t) k[3])<<16);
174 a+=k[0]+(((uint32_t) k[1])<<16);
175 break;
176 case 11: c+=((uint32_t) k8[10])<<16; /* fall through */
177 case 10: c+=k[4];
178 b+=k[2]+(((uint32_t) k[3])<<16);
179 a+=k[0]+(((uint32_t) k[1])<<16);
180 break;
181 case 9 : c+=k8[8]; /* fall through */
182 case 8 : b+=k[2]+(((uint32_t) k[3])<<16);
183 a+=k[0]+(((uint32_t) k[1])<<16);
184 break;
185 case 7 : b+=((uint32_t) k8[6])<<16; /* fall through */
186 case 6 : b+=k[2];
187 a+=k[0]+(((uint32_t) k[1])<<16);
188 break;
189 case 5 : b+=k8[4]; /* fall through */
190 case 4 : a+=k[0]+(((uint32_t) k[1])<<16);
191 break;
192 case 3 : a+=((uint32_t) k8[2])<<16; /* fall through */
193 case 2 : a+=k[0];
194 break;
195 case 1 : a+=k8[0];
196 break;
197 case 0 : return c; /* zero length requires no mixing */
198 }
199
200 } else { /* need to read the key one byte at a time */
201 const uint8_t *k = (const uint8_t *)key;
202
203 /*--------------- all but the last block: affect some 32 bits of (a, b, c) */
204 while (length > 12) {
205 a += k[0];
206 a += ((uint32_t) k[1])<<8;
207 a += ((uint32_t) k[2])<<16;
208 a += ((uint32_t) k[3])<<24;
209 b += k[4];
210 b += ((uint32_t) k[5])<<8;
211 b += ((uint32_t) k[6])<<16;
212 b += ((uint32_t) k[7])<<24;
213 c += k[8];
214 c += ((uint32_t) k[9])<<8;
215 c += ((uint32_t) k[10])<<16;
216 c += ((uint32_t) k[11])<<24;
217 mix(a,b,c);
218 length -= 12;
219 k += 12;
220 }
221
222 /*-------------------------------- last block: affect all 32 bits of (c) */
223 switch (length) { /* all the case statements fall through */
224 case 12: c+=((uint32_t) k[11])<<24; /* fall through */
225 case 11: c+=((uint32_t) k[10])<<16; /* fall through */
226 case 10: c+=((uint32_t) k[9])<<8; /* fall through */
227 case 9 : c+=k[8]; /* fall through */
228 case 8 : b+=((uint32_t) k[7])<<24; /* fall through */
229 case 7 : b+=((uint32_t) k[6])<<16; /* fall through */
230 case 6 : b+=((uint32_t) k[5])<<8; /* fall through */
231 case 5 : b+=k[4]; /* fall through */
232 case 4 : a+=((uint32_t) k[3])<<24; /* fall through */
233 case 3 : a+=((uint32_t) k[2])<<16; /* fall through */
234 case 2 : a+=((uint32_t) k[1])<<8; /* fall through */
235 case 1 : a+=k[0];
236 break;
237 case 0 : return c;
238 }
239 }
240
241 final(a, b, c);
242 return c;
243 }
244
245 static inline
246 uint32_t jhash(const void *key, size_t length, uint32_t seed)
247 {
248 return hashlittle(key, length, seed);
249 }
250
251 #endif /* _JHASH_H */
This page took 0.036709 seconds and 5 git commands to generate.