StarPU Internal Handbook
uthash.h
Go to the documentation of this file.
1 /*
2 Copyright (c) 2003-2010, Troy D. Hanson http://uthash.sourceforge.net
3 All rights reserved.
4 
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions are met:
7 
8  * Redistributions of source code must retain the above copyright
9  notice, this list of conditions and the following disclaimer.
10 
11 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12 IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
13 TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
14 PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
15 OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
16 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
17 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
18 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
19 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
20 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
21 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
22 */
23 
24 #ifndef UTHASH_H
25 #define UTHASH_H
26 
29 #include <string.h> /* memcmp,strlen */
30 #include <stddef.h> /* ptrdiff_t */
31 
32 /* These macros use decltype or the earlier __typeof GNU extension.
33  As decltype is only available in newer compilers (VS2010 or gcc 4.3+
34  when compiling c++ source) this code uses whatever method is needed
35  or, for VS2008 where neither is available, uses casting workarounds. */
36 #ifdef _MSC_VER /* MS compiler */
37 #if _MSC_VER >= 1600 && defined(__cplusplus) /* VS2010 or newer in C++ mode */
38 #define DECLTYPE(x) (decltype(x))
39 #else /* VS2008 or older (or VS2010 in C mode) */
40 #define NO_DECLTYPE
41 #define DECLTYPE(x)
42 #endif
43 #else /* GNU, Sun and other compilers */
44 #define DECLTYPE(x) (__typeof(x))
45 #endif
46 
47 #ifdef NO_DECLTYPE
48 #define DECLTYPE_ASSIGN(dst,src) \
49 do { \
50  char **_da_dst = (char**)(&(dst)); \
51  *_da_dst = (char*)(src); \
52 } while(0)
53 #else
54 #define DECLTYPE_ASSIGN(dst,src) \
55 do { \
56  (dst) = DECLTYPE(dst)(src); \
57 } while(0)
58 #endif
59 
60 /* a number of the hash function use uint32_t which isn't defined on win32 */
61 #ifdef _MSC_VER
62 typedef unsigned int uint32_t;
63 #else
64 #include <inttypes.h> /* uint32_t */
65 #endif
66 
67 #pragma GCC visibility push(hidden)
68 
69 #define UTHASH_VERSION 1.9.3
70 
71 #define uthash_fatal(msg) exit(-1) /* fatal error (out of memory,etc) */
72 #define uthash_malloc(sz) malloc(sz) /* malloc fcn */
73 #define uthash_free(ptr,sz) free(ptr) /* free fcn */
74 
75 #define uthash_noexpand_fyi(tbl) /* can be defined to log noexpand */
76 #define uthash_expand_fyi(tbl) /* can be defined to log expands */
77 
78 /* initial number of buckets */
79 #define HASH_INITIAL_NUM_BUCKETS 32 /* initial number of buckets */
80 #define HASH_INITIAL_NUM_BUCKETS_LOG2 5 /* lg2 of initial number of buckets */
81 #define HASH_BKT_CAPACITY_THRESH 10 /* expand when bucket count reaches */
82 
83 /* calculate the element whose hash handle address is hhe */
84 #define ELMT_FROM_HH(tbl,hhp) ((void*)(((char*)(hhp)) - ((tbl)->hho)))
85 
86 #define HASH_FIND(hh,head,keyptr,keylen,out) \
87 do { \
88  unsigned _hf_bkt=0,_hf_hashv=0; \
89  out=NULL; \
90  if (head) { \
91  HASH_FCN(keyptr,keylen, (head)->hh.tbl->num_buckets, _hf_hashv, _hf_bkt); \
92  if (HASH_BLOOM_TEST((head)->hh.tbl, _hf_hashv)) { \
93  HASH_FIND_IN_BKT((head)->hh.tbl, hh, (head)->hh.tbl->buckets[ _hf_bkt ], \
94  keyptr,keylen,out); \
95  } \
96  } \
97 } while (0)
98 
99 #ifdef HASH_BLOOM
100 #define HASH_BLOOM_BITLEN (1ULL << HASH_BLOOM)
101 #define HASH_BLOOM_BYTELEN (HASH_BLOOM_BITLEN/8) + ((HASH_BLOOM_BITLEN%8) ? 1:0)
102 #define HASH_BLOOM_MAKE(tbl) \
103 do { \
104  (tbl)->bloom_nbits = HASH_BLOOM; \
105  (tbl)->bloom_bv = (uint8_t*)uthash_malloc(HASH_BLOOM_BYTELEN); \
106  if (!((tbl)->bloom_bv)) { uthash_fatal( "out of memory"); } \
107  memset((tbl)->bloom_bv, 0, HASH_BLOOM_BYTELEN); \
108  (tbl)->bloom_sig = HASH_BLOOM_SIGNATURE; \
109 } while (0)
110 
111 #define HASH_BLOOM_FREE(tbl) \
112 do { \
113  uthash_free((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \
114 } while (0)
115 
116 #define HASH_BLOOM_BITSET(bv,idx) (bv[(idx)/8] |= (1U << ((idx)%8)))
117 #define HASH_BLOOM_BITTEST(bv,idx) (bv[(idx)/8] & (1U << ((idx)%8)))
118 
119 #define HASH_BLOOM_ADD(tbl,hashv) \
120  HASH_BLOOM_BITSET((tbl)->bloom_bv, (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1)))
121 
122 #define HASH_BLOOM_TEST(tbl,hashv) \
123  HASH_BLOOM_BITTEST((tbl)->bloom_bv, (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1)))
124 
125 #else
126 #define HASH_BLOOM_MAKE(tbl)
127 #define HASH_BLOOM_FREE(tbl)
128 #define HASH_BLOOM_ADD(tbl,hashv)
129 #define HASH_BLOOM_TEST(tbl,hashv) (1)
130 #endif
131 
132 #define HASH_MAKE_TABLE(hh,head) \
133 do { \
134  (head)->hh.tbl = (UT_hash_table*)uthash_malloc( \
135  sizeof(UT_hash_table)); \
136  if (!((head)->hh.tbl)) { uthash_fatal( "out of memory"); } \
137  memset((head)->hh.tbl, 0, sizeof(UT_hash_table)); \
138  (head)->hh.tbl->tail = &((head)->hh); \
139  (head)->hh.tbl->num_buckets = HASH_INITIAL_NUM_BUCKETS; \
140  (head)->hh.tbl->log2_num_buckets = HASH_INITIAL_NUM_BUCKETS_LOG2; \
141  (head)->hh.tbl->hho = (char*)(&(head)->hh) - (char*)(head); \
142  (head)->hh.tbl->buckets = (UT_hash_bucket*)uthash_malloc( \
143  HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \
144  if (! (head)->hh.tbl->buckets) { uthash_fatal( "out of memory"); } \
145  memset((head)->hh.tbl->buckets, 0, \
146  HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \
147  HASH_BLOOM_MAKE((head)->hh.tbl); \
148  (head)->hh.tbl->signature = HASH_SIGNATURE; \
149 } while(0)
150 
151 #define HASH_ADD(hh,head,fieldname,keylen_in,add) \
152  HASH_ADD_KEYPTR(hh,head,&add->fieldname,keylen_in,add)
153 
154 #ifdef STARPU_DEBUG
155 /* Check that we don't insert the same key several times */
156 #define HASH_CHECK_KEY(hh,head,keyptr,keylen,out) \
157 do { \
158  __typeof__(out) _out; \
159  HASH_FIND(hh,head,keyptr,keylen,_out); \
160  STARPU_ASSERT_MSG(!_out,"Cannot insert the same key twice"); \
161 } while(0)
162 #else
163 #define HASH_CHECK_KEY(hh,head,keyptr,keylen,out)
164 #endif
165 
166 #define HASH_ADD_KEYPTR(hh,head,keyptr,keylen_in,add) \
167 do { \
168  unsigned _ha_bkt=0; \
169  HASH_CHECK_KEY(hh,head,keyptr,keylen_in,add); \
170  (add)->hh.next = NULL; \
171  (add)->hh.key = (char*)keyptr; \
172  (add)->hh.keylen = keylen_in; \
173  if (!(head)) { \
174  head = (add); \
175  (head)->hh.prev = NULL; \
176  HASH_MAKE_TABLE(hh,head); \
177  } else { \
178  (head)->hh.tbl->tail->next = (add); \
179  (add)->hh.prev = ELMT_FROM_HH((head)->hh.tbl, (head)->hh.tbl->tail); \
180  (head)->hh.tbl->tail = &((add)->hh); \
181  } \
182  (head)->hh.tbl->num_items++; \
183  (add)->hh.tbl = (head)->hh.tbl; \
184  HASH_FCN(keyptr,keylen_in, (head)->hh.tbl->num_buckets, \
185  (add)->hh.hashv, _ha_bkt); \
186  HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt],&(add)->hh); \
187  HASH_BLOOM_ADD((head)->hh.tbl,(add)->hh.hashv); \
188  HASH_EMIT_KEY(hh,head,keyptr,keylen_in); \
189  HASH_FSCK(hh,head); \
190 } while(0)
191 
192 #define HASH_TO_BKT( hashv, num_bkts, bkt ) \
193 do { \
194  bkt = ((hashv) & ((num_bkts) - 1)); \
195 } while(0)
196 
197 /* delete "delptr" from the hash table.
198  * "the usual" patch-up process for the app-order doubly-linked-list.
199  * The use of _hd_hh_del below deserves special explanation.
200  * These used to be expressed using (delptr) but that led to a bug
201  * if someone used the same symbol for the head and deletee, like
202  * HASH_DELETE(hh,users,users);
203  * We want that to work, but by changing the head (users) below
204  * we were forfeiting our ability to further refer to the deletee (users)
205  * in the patch-up process. Solution: use scratch space to
206  * copy the deletee pointer, then the latter references are via that
207  * scratch pointer rather than through the repointed (users) symbol.
208  */
209 #define HASH_DELETE(hh,head,delptr) \
210 do { \
211  unsigned _hd_bkt; \
212  struct UT_hash_handle *_hd_hh_del; \
213  if ( ((delptr)->hh.prev == NULL) && ((delptr)->hh.next == NULL) ) { \
214  uthash_free((head)->hh.tbl->buckets, \
215  (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket) ); \
216  HASH_BLOOM_FREE((head)->hh.tbl); \
217  uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \
218  head = NULL; \
219  } else { \
220  _hd_hh_del = &((delptr)->hh); \
221  if ((delptr) == ELMT_FROM_HH((head)->hh.tbl,(head)->hh.tbl->tail)) { \
222  (head)->hh.tbl->tail = \
223  (UT_hash_handle*)((char*)((delptr)->hh.prev) + \
224  (head)->hh.tbl->hho); \
225  } \
226  if ((delptr)->hh.prev) { \
227  ((UT_hash_handle*)((char*)((delptr)->hh.prev) + \
228  (head)->hh.tbl->hho))->next = (delptr)->hh.next; \
229  } else { \
230  DECLTYPE_ASSIGN(head,(delptr)->hh.next); \
231  } \
232  if (_hd_hh_del->next) { \
233  ((UT_hash_handle*)((char*)_hd_hh_del->next + \
234  (head)->hh.tbl->hho))->prev = \
235  _hd_hh_del->prev; \
236  } \
237  HASH_TO_BKT( _hd_hh_del->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \
238  HASH_DEL_IN_BKT(hh,(head)->hh.tbl->buckets[_hd_bkt], _hd_hh_del); \
239  (head)->hh.tbl->num_items--; \
240  } \
241  HASH_FSCK(hh,head); \
242 } while (0)
243 
244 
245 /* convenience forms of HASH_FIND/HASH_ADD/HASH_DEL */
246 #define HASH_FIND_STR(head,findstr,out) \
247  HASH_FIND(hh,head,findstr,strlen(findstr),out)
248 #define HASH_ADD_STR(head,strfield,add) \
249  HASH_ADD(hh,head,strfield[0],strlen(add->strfield),add)
250 #define HASH_FIND_INT(head,findint,out) \
251  HASH_FIND(hh,head,findint,sizeof(int),out)
252 #define HASH_ADD_INT(head,intfield,add) \
253  HASH_ADD(hh,head,intfield,sizeof(int),add)
254 #define HASH_FIND_PTR(head,findptr,out) \
255  HASH_FIND(hh,head,findptr,sizeof(void *),out)
256 #define HASH_ADD_PTR(head,ptrfield,add) \
257  HASH_ADD(hh,head,ptrfield,sizeof(void *),add)
258 #define HASH_DEL(head,delptr) \
259  HASH_DELETE(hh,head,delptr)
260 
261 /* HASH_FSCK checks hash integrity on every add/delete when HASH_DEBUG is defined.
262  * This is for uthash developer only; it compiles away if HASH_DEBUG isn't defined.
263  */
264 #ifdef HASH_DEBUG
265 #define HASH_OOPS(...) do { fprintf(stderr,__VA_ARGS__); exit(-1); } while (0)
266 #define HASH_FSCK(hh,head) \
267 do { \
268  unsigned _bkt_i; \
269  unsigned _count, _bkt_count; \
270  char *_prev; \
271  struct UT_hash_handle *_thh; \
272  if (head) { \
273  _count = 0; \
274  for( _bkt_i = 0; _bkt_i < (head)->hh.tbl->num_buckets; _bkt_i++) { \
275  _bkt_count = 0; \
276  _thh = (head)->hh.tbl->buckets[_bkt_i].hh_head; \
277  _prev = NULL; \
278  while (_thh) { \
279  if (_prev != (char*)(_thh->hh_prev)) { \
280  HASH_OOPS("invalid hh_prev %p, actual %p\n", \
281  _thh->hh_prev, _prev ); \
282  } \
283  _bkt_count++; \
284  _prev = (char*)(_thh); \
285  _thh = _thh->hh_next; \
286  } \
287  _count += _bkt_count; \
288  if ((head)->hh.tbl->buckets[_bkt_i].count != _bkt_count) { \
289  HASH_OOPS("invalid bucket count %u, actual %u\n", \
290  (head)->hh.tbl->buckets[_bkt_i].count, _bkt_count); \
291  } \
292  } \
293  if (_count != (head)->hh.tbl->num_items) { \
294  HASH_OOPS("invalid hh item count %u, actual %u\n", \
295  (head)->hh.tbl->num_items, _count ); \
296  } \
297  /* traverse hh in app order; check next/prev integrity, count */ \
298  _count = 0; \
299  _prev = NULL; \
300  _thh = &(head)->hh; \
301  while (_thh) { \
302  _count++; \
303  if (_prev !=(char*)(_thh->prev)) { \
304  HASH_OOPS("invalid prev %p, actual %p\n", \
305  _thh->prev, _prev ); \
306  } \
307  _prev = (char*)ELMT_FROM_HH((head)->hh.tbl, _thh); \
308  _thh = ( _thh->next ? (UT_hash_handle*)((char*)(_thh->next) + \
309  (head)->hh.tbl->hho) : NULL ); \
310  } \
311  if (_count != (head)->hh.tbl->num_items) { \
312  HASH_OOPS("invalid app item count %u, actual %u\n", \
313  (head)->hh.tbl->num_items, _count ); \
314  } \
315  } \
316 } while (0)
317 #else
318 #define HASH_FSCK(hh,head)
319 #endif
320 
321 /* When compiled with -DHASH_EMIT_KEYS, length-prefixed keys are emitted to
322  * the descriptor to which this macro is defined for tuning the hash function.
323  * The app can #include <unistd.h> to get the prototype for write(2). */
324 #ifdef HASH_EMIT_KEYS
325 #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) \
326 do { \
327  unsigned _klen = fieldlen; \
328  write(HASH_EMIT_KEYS, &_klen, sizeof(_klen)); \
329  write(HASH_EMIT_KEYS, keyptr, fieldlen); \
330 } while (0)
331 #else
332 #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen)
333 #endif
334 
335 /* default to Jenkin's hash unless overridden e.g. DHASH_FUNCTION=HASH_SAX */
336 #ifdef HASH_FUNCTION
337 #define HASH_FCN HASH_FUNCTION
338 #else
339 #define HASH_FCN HASH_JEN
340 #endif
341 
342 /* The Bernstein hash function, used in Perl prior to v5.6 */
343 #define HASH_BER(key,keylen,num_bkts,hashv,bkt) \
344 do { \
345  unsigned _hb_keylen=keylen; \
346  char *_hb_key=(char*)(key); \
347  (hashv) = 0; \
348  while (_hb_keylen--) { (hashv) = ((hashv) * 33) + *_hb_key++; } \
349  bkt = (hashv) & (num_bkts-1); \
350 } while (0)
351 
352 
353 /* SAX/FNV/OAT/JEN hash functions are macro variants of those listed at
354  * http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx */
355 #define HASH_SAX(key,keylen,num_bkts,hashv,bkt) \
356 do { \
357  unsigned _sx_i; \
358  char *_hs_key=(char*)(key); \
359  hashv = 0; \
360  for(_sx_i=0; _sx_i < keylen; _sx_i++) \
361  hashv ^= (hashv << 5) + (hashv >> 2) + _hs_key[_sx_i]; \
362  bkt = hashv & (num_bkts-1); \
363 } while (0)
364 
365 #define HASH_FNV(key,keylen,num_bkts,hashv,bkt) \
366 do { \
367  unsigned _fn_i; \
368  char *_hf_key=(char*)(key); \
369  hashv = 2166136261UL; \
370  for(_fn_i=0; _fn_i < keylen; _fn_i++) \
371  hashv = (hashv * 16777619) ^ _hf_key[_fn_i]; \
372  bkt = hashv & (num_bkts-1); \
373 } while(0)
374 
375 #define HASH_OAT(key,keylen,num_bkts,hashv,bkt) \
376 do { \
377  unsigned _ho_i; \
378  char *_ho_key=(char*)(key); \
379  hashv = 0; \
380  for(_ho_i=0; _ho_i < keylen; _ho_i++) { \
381  hashv += _ho_key[_ho_i]; \
382  hashv += (hashv << 10); \
383  hashv ^= (hashv >> 6); \
384  } \
385  hashv += (hashv << 3); \
386  hashv ^= (hashv >> 11); \
387  hashv += (hashv << 15); \
388  bkt = hashv & (num_bkts-1); \
389 } while(0)
390 
391 #define HASH_JEN_MIX(a,b,c) \
392 do { \
393  a -= b; a -= c; a ^= ( c >> 13 ); \
394  b -= c; b -= a; b ^= ( a << 8 ); \
395  c -= a; c -= b; c ^= ( b >> 13 ); \
396  a -= b; a -= c; a ^= ( c >> 12 ); \
397  b -= c; b -= a; b ^= ( a << 16 ); \
398  c -= a; c -= b; c ^= ( b >> 5 ); \
399  a -= b; a -= c; a ^= ( c >> 3 ); \
400  b -= c; b -= a; b ^= ( a << 10 ); \
401  c -= a; c -= b; c ^= ( b >> 15 ); \
402 } while (0)
403 
404 #define HASH_JEN(key,keylen,num_bkts,hashv,bkt) \
405 do { \
406  unsigned _hj_i,_hj_j,_hj_k; \
407  char *_hj_key=(char*)(key); \
408  hashv = 0xfeedbeef; \
409  _hj_i = _hj_j = 0x9e3779b9; \
410  _hj_k = keylen; \
411  while (_hj_k >= 12) { \
412  _hj_i += (_hj_key[0] + ( (unsigned)_hj_key[1] << 8 ) \
413  + ( (unsigned)_hj_key[2] << 16 ) \
414  + ( (unsigned)_hj_key[3] << 24 ) ); \
415  _hj_j += (_hj_key[4] + ( (unsigned)_hj_key[5] << 8 ) \
416  + ( (unsigned)_hj_key[6] << 16 ) \
417  + ( (unsigned)_hj_key[7] << 24 ) ); \
418  hashv += (_hj_key[8] + ( (unsigned)_hj_key[9] << 8 ) \
419  + ( (unsigned)_hj_key[10] << 16 ) \
420  + ( (unsigned)_hj_key[11] << 24 ) ); \
421  \
422  HASH_JEN_MIX(_hj_i, _hj_j, hashv); \
423  \
424  _hj_key += 12; \
425  _hj_k -= 12; \
426  } \
427  hashv += keylen; \
428  switch ( _hj_k ) { \
429  case 11: hashv += ( (unsigned)_hj_key[10] << 24 ); \
430  /* FALLTHRU */ \
431  case 10: hashv += ( (unsigned)_hj_key[9] << 16 ); \
432  /* FALLTHRU */ \
433  case 9: hashv += ( (unsigned)_hj_key[8] << 8 ); \
434  /* FALLTHRU */ \
435  case 8: _hj_j += ( (unsigned)_hj_key[7] << 24 ); \
436  /* FALLTHRU */ \
437  case 7: _hj_j += ( (unsigned)_hj_key[6] << 16 ); \
438  /* FALLTHRU */ \
439  case 6: _hj_j += ( (unsigned)_hj_key[5] << 8 ); \
440  /* FALLTHRU */ \
441  case 5: _hj_j += _hj_key[4]; \
442  /* FALLTHRU */ \
443  case 4: _hj_i += ( (unsigned)_hj_key[3] << 24 ); \
444  /* FALLTHRU */ \
445  case 3: _hj_i += ( (unsigned)_hj_key[2] << 16 ); \
446  /* FALLTHRU */ \
447  case 2: _hj_i += ( (unsigned)_hj_key[1] << 8 ); \
448  /* FALLTHRU */ \
449  case 1: _hj_i += _hj_key[0]; \
450  /* FALLTHRU */ \
451  default: break; \
452  } \
453  HASH_JEN_MIX(_hj_i, _hj_j, hashv); \
454  bkt = hashv & (num_bkts-1); \
455 } while(0)
456 
457 /* The Paul Hsieh hash function */
458 #undef get16bits
459 #if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \
460  || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__)
461 #define get16bits(d) (*((const uint16_t *) (d)))
462 #endif
463 
464 #if !defined (get16bits)
465 #define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8) \
466  +(uint32_t)(((const uint8_t *)(d))[0]) )
467 #endif
468 #define HASH_SFH(key,keylen,num_bkts,hashv,bkt) \
469 do { \
470  char *_sfh_key=(char*)(key); \
471  uint32_t _sfh_tmp, _sfh_len = keylen; \
472  \
473  int _sfh_rem = _sfh_len & 3; \
474  _sfh_len >>= 2; \
475  hashv = 0xcafebabe; \
476  \
477  /* Main loop */ \
478  for (;_sfh_len > 0; _sfh_len--) { \
479  hashv += get16bits (_sfh_key); \
480  _sfh_tmp = (get16bits (_sfh_key+2) << 11) ^ hashv; \
481  hashv = (hashv << 16) ^ _sfh_tmp; \
482  _sfh_key += 2*sizeof (uint16_t); \
483  hashv += hashv >> 11; \
484  } \
485  \
486  /* Handle end cases */ \
487  switch (_sfh_rem) { \
488  case 3: hashv += get16bits (_sfh_key); \
489  hashv ^= hashv << 16; \
490  hashv ^= _sfh_key[sizeof (uint16_t)] << 18; \
491  hashv += hashv >> 11; \
492  break; \
493  case 2: hashv += get16bits (_sfh_key); \
494  hashv ^= hashv << 11; \
495  hashv += hashv >> 17; \
496  break; \
497  case 1: hashv += *_sfh_key; \
498  hashv ^= hashv << 10; \
499  hashv += hashv >> 1; \
500  break; \
501  default: break; \
502  } \
503  \
504  /* Force "avalanching" of final 127 bits */ \
505  hashv ^= hashv << 3; \
506  hashv += hashv >> 5; \
507  hashv ^= hashv << 4; \
508  hashv += hashv >> 17; \
509  hashv ^= hashv << 25; \
510  hashv += hashv >> 6; \
511  bkt = hashv & (num_bkts-1); \
512 } while(0)
513 
514 #ifdef HASH_USING_NO_STRICT_ALIASING
515 /* The MurmurHash exploits some CPU's (e.g. x86) tolerance for unaligned reads.
516  * For other types of CPU's (e.g. Sparc) an unaligned read causes a bus error.
517  * So MurmurHash comes in two versions, the faster unaligned one and the slower
518  * aligned one. We only use the faster one on CPU's where we know it's safe.
519  *
520  * Note the preprocessor built-in defines can be emitted using:
521  *
522  * gcc -m64 -dM -E - < /dev/null (on gcc)
523  * cc -## a.c (where a.c is a simple test file) (Sun Studio)
524  */
525 #if (defined(__i386__) || defined(__x86_64__))
526 #define HASH_MUR HASH_MUR_UNALIGNED
527 #else
528 #define HASH_MUR HASH_MUR_ALIGNED
529 #endif
530 
531 /* Appleby's MurmurHash fast version for unaligned-tolerant archs like i386 */
532 #define HASH_MUR_UNALIGNED(key,keylen,num_bkts,hashv,bkt) \
533 do { \
534  const unsigned int _mur_m = 0x5bd1e995; \
535  const int _mur_r = 24; \
536  hashv = 0xcafebabe ^ keylen; \
537  char *_mur_key = (char *)(key); \
538  uint32_t _mur_tmp, _mur_len = keylen; \
539  \
540  for (;_mur_len >= 4; _mur_len-=4) { \
541  _mur_tmp = *(uint32_t *)_mur_key; \
542  _mur_tmp *= _mur_m; \
543  _mur_tmp ^= _mur_tmp >> _mur_r; \
544  _mur_tmp *= _mur_m; \
545  hashv *= _mur_m; \
546  hashv ^= _mur_tmp; \
547  _mur_key += 4; \
548  } \
549  \
550  switch(_mur_len) \
551  { \
552  case 3: hashv ^= _mur_key[2] << 16; \
553  /* FALLTHRU */ \
554  case 2: hashv ^= _mur_key[1] << 8; \
555  /* FALLTHRU */ \
556  case 1: hashv ^= _mur_key[0]; \
557  hashv *= _mur_m; \
558  /* FALLTHRU */ \
559  default: break; \
560  }; \
561  \
562  hashv ^= hashv >> 13; \
563  hashv *= _mur_m; \
564  hashv ^= hashv >> 15; \
565  \
566  bkt = hashv & (num_bkts-1); \
567 } while(0)
568 
569 /* Appleby's MurmurHash version for alignment-sensitive archs like Sparc */
570 #define HASH_MUR_ALIGNED(key,keylen,num_bkts,hashv,bkt) \
571 do { \
572  const unsigned int _mur_m = 0x5bd1e995; \
573  const int _mur_r = 24; \
574  hashv = 0xcafebabe ^ (keylen); \
575  char *_mur_key = (char *)(key); \
576  uint32_t _mur_len = keylen; \
577  int _mur_align = (int)_mur_key & 3; \
578  \
579  if (_mur_align && (_mur_len >= 4)) { \
580  unsigned _mur_t = 0, _mur_d = 0; \
581  switch(_mur_align) { \
582  case 1: _mur_t |= _mur_key[2] << 16; \
583  /* FALLTHRU */ \
584  case 2: _mur_t |= _mur_key[1] << 8; \
585  /* FALLTHRU */ \
586  case 3: _mur_t |= _mur_key[0]; \
587  /* FALLTHRU */ \
588  default: break; \
589  } \
590  _mur_t <<= (8 * _mur_align); \
591  _mur_key += 4-_mur_align; \
592  _mur_len -= 4-_mur_align; \
593  int _mur_sl = 8 * (4-_mur_align); \
594  int _mur_sr = 8 * _mur_align; \
595  \
596  for (;_mur_len >= 4; _mur_len-=4) { \
597  _mur_d = *(unsigned *)_mur_key; \
598  _mur_t = (_mur_t >> _mur_sr) | (_mur_d << _mur_sl); \
599  unsigned _mur_k = _mur_t; \
600  _mur_k *= _mur_m; \
601  _mur_k ^= _mur_k >> _mur_r; \
602  _mur_k *= _mur_m; \
603  hashv *= _mur_m; \
604  hashv ^= _mur_k; \
605  _mur_t = _mur_d; \
606  _mur_key += 4; \
607  } \
608  _mur_d = 0; \
609  if(_mur_len >= _mur_align) { \
610  switch(_mur_align) { \
611  case 3: _mur_d |= _mur_key[2] << 16; \
612  /* FALLTHRU */ \
613  case 2: _mur_d |= _mur_key[1] << 8; \
614  /* FALLTHRU */ \
615  case 1: _mur_d |= _mur_key[0]; \
616  /* FALLTHRU */ \
617  default: break; \
618  } \
619  unsigned _mur_k = (_mur_t >> _mur_sr) | (_mur_d << _mur_sl); \
620  _mur_k *= _mur_m; \
621  _mur_k ^= _mur_k >> _mur_r; \
622  _mur_k *= _mur_m; \
623  hashv *= _mur_m; \
624  hashv ^= _mur_k; \
625  _mur_k += _mur_align; \
626  _mur_len -= _mur_align; \
627  \
628  switch(_mur_len) \
629  { \
630  case 3: hashv ^= _mur_key[2] << 16; \
631  /* FALLTHRU */ \
632  case 2: hashv ^= _mur_key[1] << 8; \
633  /* FALLTHRU */ \
634  case 1: hashv ^= _mur_key[0]; \
635  hashv *= _mur_m; \
636  /* FALLTHRU */ \
637  default: break; \
638  } \
639  } else { \
640  switch(_mur_len) \
641  { \
642  case 3: _mur_d ^= _mur_key[2] << 16; \
643  /* FALLTHRU */ \
644  case 2: _mur_d ^= _mur_key[1] << 8; \
645  /* FALLTHRU */ \
646  case 1: _mur_d ^= _mur_key[0]; \
647  /* FALLTHRU */ \
648  case 0: hashv ^= (_mur_t >> _mur_sr) | (_mur_d << _mur_sl); \
649  hashv *= _mur_m; \
650  /* FALLTHRU */ \
651  default: break; \
652  } \
653  } \
654  \
655  hashv ^= hashv >> 13; \
656  hashv *= _mur_m; \
657  hashv ^= hashv >> 15; \
658  } else { \
659  for (;_mur_len >= 4; _mur_len-=4) { \
660  unsigned _mur_k = *(unsigned*)_mur_key; \
661  _mur_k *= _mur_m; \
662  _mur_k ^= _mur_k >> _mur_r; \
663  _mur_k *= _mur_m; \
664  hashv *= _mur_m; \
665  hashv ^= _mur_k; \
666  _mur_key += 4; \
667  } \
668  switch(_mur_len) \
669  { \
670  case 3: hashv ^= _mur_key[2] << 16; \
671  /* FALLTHRU */ \
672  case 2: hashv ^= _mur_key[1] << 8; \
673  /* FALLTHRU */ \
674  case 1: hashv ^= _mur_key[0]; \
675  hashv *= _mur_m; \
676  /* FALLTHRU */ \
677  default: break; \
678  } \
679  \
680  hashv ^= hashv >> 13; \
681  hashv *= _mur_m; \
682  hashv ^= hashv >> 15; \
683  } \
684  bkt = hashv & (num_bkts-1); \
685 } while(0)
686 #endif /* HASH_USING_NO_STRICT_ALIASING */
687 
688 /* key comparison function; return 0 if keys equal */
689 #define HASH_KEYCMP(a,b,len) memcmp(a,b,len)
690 
691 /* iterate over items in a known bucket to find desired item */
692 #define HASH_FIND_IN_BKT(tbl,hh,head,keyptr,keylen_in,out) \
693 do { \
694  if (head.hh_head) DECLTYPE_ASSIGN(out,ELMT_FROM_HH(tbl,head.hh_head)); \
695  else out=NULL; \
696  while (out) { \
697  if (out->hh.keylen == keylen_in) { \
698  if ((HASH_KEYCMP(out->hh.key,keyptr,keylen_in)) == 0) break; \
699  } \
700  if (out->hh.hh_next) DECLTYPE_ASSIGN(out,ELMT_FROM_HH(tbl,out->hh.hh_next)); \
701  else out = NULL; \
702  } \
703 } while(0)
704 
705 /* add an item to a bucket */
706 #define HASH_ADD_TO_BKT(head,addhh) \
707 do { \
708  head.count++; \
709  (addhh)->hh_next = head.hh_head; \
710  (addhh)->hh_prev = NULL; \
711  if (head.hh_head) { (head).hh_head->hh_prev = (addhh); } \
712  (head).hh_head=addhh; \
713  if (head.count >= ((head.expand_mult+1) * HASH_BKT_CAPACITY_THRESH) \
714  && (addhh)->tbl->noexpand != 1) { \
715  HASH_EXPAND_BUCKETS((addhh)->tbl); \
716  } \
717 } while(0)
718 
719 /* remove an item from a given bucket */
720 #define HASH_DEL_IN_BKT(hh,head,hh_del) \
721  (head).count--; \
722  if ((head).hh_head == hh_del) { \
723  (head).hh_head = hh_del->hh_next; \
724  } \
725  if (hh_del->hh_prev) { \
726  hh_del->hh_prev->hh_next = hh_del->hh_next; \
727  } \
728  if (hh_del->hh_next) { \
729  hh_del->hh_next->hh_prev = hh_del->hh_prev; \
730  }
731 
732 /* Bucket expansion has the effect of doubling the number of buckets
733  * and redistributing the items into the new buckets. Ideally the
734  * items will distribute more or less evenly into the new buckets
735  * (the extent to which this is true is a measure of the quality of
736  * the hash function as it applies to the key domain).
737  *
738  * With the items distributed into more buckets, the chain length
739  * (item count) in each bucket is reduced. Thus by expanding buckets
740  * the hash keeps a bound on the chain length. This bounded chain
741  * length is the essence of how a hash provides constant time lookup.
742  *
743  * The calculation of tbl->ideal_chain_maxlen below deserves some
744  * explanation. First, keep in mind that we're calculating the ideal
745  * maximum chain length based on the *new* (doubled) bucket count.
746  * In fractions this is just n/b (n=number of items,b=new num buckets).
747  * Since the ideal chain length is an integer, we want to calculate
748  * ceil(n/b). We don't depend on floating point arithmetic in this
749  * hash, so to calculate ceil(n/b) with integers we could write
750  *
751  * ceil(n/b) = (n/b) + ((n%b)?1:0)
752  *
753  * and in fact a previous version of this hash did just that.
754  * But now we have improved things a bit by recognizing that b is
755  * always a power of two. We keep its base 2 log handy (call it lb),
756  * so now we can write this with a bit shift and logical AND:
757  *
758  * ceil(n/b) = (n>>lb) + ( (n & (b-1)) ? 1:0)
759  *
760  */
761 #define HASH_EXPAND_BUCKETS(tbl) \
762 do { \
763  unsigned _he_bkt; \
764  unsigned _he_bkt_i; \
765  struct UT_hash_handle *_he_thh, *_he_hh_nxt; \
766  UT_hash_bucket *_he_new_buckets, *_he_newbkt; \
767  _he_new_buckets = (UT_hash_bucket*)uthash_malloc( \
768  2 * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \
769  if (!_he_new_buckets) { uthash_fatal( "out of memory"); } \
770  memset(_he_new_buckets, 0, \
771  2 * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \
772  tbl->ideal_chain_maxlen = \
773  (tbl->num_items >> (tbl->log2_num_buckets+1)) + \
774  ((tbl->num_items & ((tbl->num_buckets*2)-1)) ? 1 : 0); \
775  tbl->nonideal_items = 0; \
776  for(_he_bkt_i = 0; _he_bkt_i < tbl->num_buckets; _he_bkt_i++) \
777  { \
778  _he_thh = tbl->buckets[ _he_bkt_i ].hh_head; \
779  while (_he_thh) { \
780  _he_hh_nxt = _he_thh->hh_next; \
781  HASH_TO_BKT( _he_thh->hashv, tbl->num_buckets*2, _he_bkt); \
782  _he_newbkt = &(_he_new_buckets[ _he_bkt ]); \
783  if (++(_he_newbkt->count) > tbl->ideal_chain_maxlen) { \
784  tbl->nonideal_items++; \
785  _he_newbkt->expand_mult = _he_newbkt->count / \
786  tbl->ideal_chain_maxlen; \
787  } \
788  _he_thh->hh_prev = NULL; \
789  _he_thh->hh_next = _he_newbkt->hh_head; \
790  if (_he_newbkt->hh_head) _he_newbkt->hh_head->hh_prev = \
791  _he_thh; \
792  _he_newbkt->hh_head = _he_thh; \
793  _he_thh = _he_hh_nxt; \
794  } \
795  } \
796  uthash_free( tbl->buckets, tbl->num_buckets*sizeof(struct UT_hash_bucket) ); \
797  tbl->num_buckets *= 2; \
798  tbl->log2_num_buckets++; \
799  tbl->buckets = _he_new_buckets; \
800  tbl->ineff_expands = (tbl->nonideal_items > (tbl->num_items >> 1)) ? \
801  (tbl->ineff_expands+1) : 0; \
802  if (tbl->ineff_expands > 1) { \
803  tbl->noexpand=1; \
804  uthash_noexpand_fyi(tbl); \
805  } \
806  uthash_expand_fyi(tbl); \
807 } while(0)
808 
809 
810 /* This is an adaptation of Simon Tatham's O(n log(n)) mergesort */
811 /* Note that HASH_SORT assumes the hash handle name to be hh.
812  * HASH_SRT was added to allow the hash handle name to be passed in. */
813 #define HASH_SORT(head,cmpfcn) HASH_SRT(hh,head,cmpfcn)
814 #define HASH_SRT(hh,head,cmpfcn) \
815 do { \
816  unsigned _hs_i; \
817  unsigned _hs_looping,_hs_nmerges,_hs_insize,_hs_psize,_hs_qsize; \
818  struct UT_hash_handle *_hs_p, *_hs_q, *_hs_e, *_hs_list, *_hs_tail; \
819  if (head) { \
820  _hs_insize = 1; \
821  _hs_looping = 1; \
822  _hs_list = &((head)->hh); \
823  while (_hs_looping) { \
824  _hs_p = _hs_list; \
825  _hs_list = NULL; \
826  _hs_tail = NULL; \
827  _hs_nmerges = 0; \
828  while (_hs_p) { \
829  _hs_nmerges++; \
830  _hs_q = _hs_p; \
831  _hs_psize = 0; \
832  for ( _hs_i = 0; _hs_i < _hs_insize; _hs_i++ ) { \
833  _hs_psize++; \
834  _hs_q = (UT_hash_handle*)((_hs_q->next) ? \
835  ((void*)((char*)(_hs_q->next) + \
836  (head)->hh.tbl->hho)) : NULL); \
837  if (! (_hs_q) ) break; \
838  } \
839  _hs_qsize = _hs_insize; \
840  while ((_hs_psize > 0) || ((_hs_qsize > 0) && _hs_q )) { \
841  if (_hs_psize == 0) { \
842  _hs_e = _hs_q; \
843  _hs_q = (UT_hash_handle*)((_hs_q->next) ? \
844  ((void*)((char*)(_hs_q->next) + \
845  (head)->hh.tbl->hho)) : NULL); \
846  _hs_qsize--; \
847  } else if ( (_hs_qsize == 0) || !(_hs_q) ) { \
848  _hs_e = _hs_p; \
849  _hs_p = (UT_hash_handle*)((_hs_p->next) ? \
850  ((void*)((char*)(_hs_p->next) + \
851  (head)->hh.tbl->hho)) : NULL); \
852  _hs_psize--; \
853  } else if (( \
854  cmpfcn(DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_p)), \
855  DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_q))) \
856  ) <= 0) { \
857  _hs_e = _hs_p; \
858  _hs_p = (UT_hash_handle*)((_hs_p->next) ? \
859  ((void*)((char*)(_hs_p->next) + \
860  (head)->hh.tbl->hho)) : NULL); \
861  _hs_psize--; \
862  } else { \
863  _hs_e = _hs_q; \
864  _hs_q = (UT_hash_handle*)((_hs_q->next) ? \
865  ((void*)((char*)(_hs_q->next) + \
866  (head)->hh.tbl->hho)) : NULL); \
867  _hs_qsize--; \
868  } \
869  if ( _hs_tail ) { \
870  _hs_tail->next = ((_hs_e) ? \
871  ELMT_FROM_HH((head)->hh.tbl,_hs_e) : NULL); \
872  } else { \
873  _hs_list = _hs_e; \
874  } \
875  _hs_e->prev = ((_hs_tail) ? \
876  ELMT_FROM_HH((head)->hh.tbl,_hs_tail) : NULL); \
877  _hs_tail = _hs_e; \
878  } \
879  _hs_p = _hs_q; \
880  } \
881  _hs_tail->next = NULL; \
882  if ( _hs_nmerges <= 1 ) { \
883  _hs_looping=0; \
884  (head)->hh.tbl->tail = _hs_tail; \
885  DECLTYPE_ASSIGN(head,ELMT_FROM_HH((head)->hh.tbl, _hs_list)); \
886  } \
887  _hs_insize *= 2; \
888  } \
889  HASH_FSCK(hh,head); \
890  } \
891 } while (0)
892 
893 /* This function selects items from one hash into another hash.
894  * The end result is that the selected items have dual presence
895  * in both hashes. There is no copy of the items made; rather
896  * they are added into the new hash through a secondary hash
897  * hash handle that must be present in the structure. */
898 #define HASH_SELECT(hh_dst, dst, hh_src, src, cond) \
899 do { \
900  unsigned _src_bkt, _dst_bkt; \
901  void *_last_elt=NULL, *_elt; \
902  UT_hash_handle *_src_hh, *_dst_hh, *_last_elt_hh=NULL; \
903  ptrdiff_t _dst_hho = ((char*)(&(dst)->hh_dst) - (char*)(dst)); \
904  if (src) { \
905  for(_src_bkt=0; _src_bkt < (src)->hh_src.tbl->num_buckets; _src_bkt++) { \
906  for(_src_hh = (src)->hh_src.tbl->buckets[_src_bkt].hh_head; \
907  _src_hh; \
908  _src_hh = _src_hh->hh_next) { \
909  _elt = ELMT_FROM_HH((src)->hh_src.tbl, _src_hh); \
910  if (cond(_elt)) { \
911  _dst_hh = (UT_hash_handle*)(((char*)_elt) + _dst_hho); \
912  _dst_hh->key = _src_hh->key; \
913  _dst_hh->keylen = _src_hh->keylen; \
914  _dst_hh->hashv = _src_hh->hashv; \
915  _dst_hh->prev = _last_elt; \
916  _dst_hh->next = NULL; \
917  if (_last_elt_hh) { _last_elt_hh->next = _elt; } \
918  if (!dst) { \
919  DECLTYPE_ASSIGN(dst,_elt); \
920  HASH_MAKE_TABLE(hh_dst,dst); \
921  } else { \
922  _dst_hh->tbl = (dst)->hh_dst.tbl; \
923  } \
924  HASH_TO_BKT(_dst_hh->hashv, _dst_hh->tbl->num_buckets, _dst_bkt); \
925  HASH_ADD_TO_BKT(_dst_hh->tbl->buckets[_dst_bkt],_dst_hh); \
926  (dst)->hh_dst.tbl->num_items++; \
927  _last_elt = _elt; \
928  _last_elt_hh = _dst_hh; \
929  } \
930  } \
931  } \
932  } \
933  HASH_FSCK(hh_dst,dst); \
934 } while (0)
935 
936 #define HASH_CLEAR(hh,head) \
937 do { \
938  if (head) { \
939  uthash_free((head)->hh.tbl->buckets, \
940  (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket)); \
941  uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \
942  (head)=NULL; \
943  } \
944 } while(0)
945 
946 #ifdef NO_DECLTYPE
947 #define HASH_ITER(hh,head,el,tmp) \
948 for((el)=(head), (*(char**)(&(tmp)))=(char*)((head)?(head)->hh.next:NULL); \
949  el; (el)=(tmp),(*(char**)(&(tmp)))=(char*)((tmp)?(tmp)->hh.next:NULL))
950 #else
951 #define HASH_ITER(hh,head,el,tmp) \
952 for((el)=(head),(tmp)=DECLTYPE(el)((head)?(head)->hh.next:NULL); \
953  el; (el)=(tmp),(tmp)=DECLTYPE(el)((tmp)?(tmp)->hh.next:NULL))
954 #endif
955 
956 /* obtain a count of items in the hash */
957 #define HASH_COUNT(head) HASH_CNT(hh,head)
958 #define HASH_CNT(hh,head) ((head)?((head)->hh.tbl->num_items):0)
959 
960 typedef struct UT_hash_bucket {
961  struct UT_hash_handle *hh_head;
962  unsigned count;
963 
964  /* expand_mult is normally set to 0. In this situation, the max chain length
965  * threshold is enforced at its default value, HASH_BKT_CAPACITY_THRESH. (If
966  * the bucket's chain exceeds this length, bucket expansion is triggered).
967  * However, setting expand_mult to a non-zero value delays bucket expansion
968  * (that would be triggered by additions to this particular bucket)
969  * until its chain length reaches a *multiple* of HASH_BKT_CAPACITY_THRESH.
970  * (The multiplier is simply expand_mult+1). The whole idea of this
971  * multiplier is to reduce bucket expansions, since they are expensive, in
972  * situations where we know that a particular bucket tends to be overused.
973  * It is better to let its chain length grow to a longer yet-still-bounded
974  * value, than to do an O(n) bucket expansion too often.
975  */
976  unsigned expand_mult;
977 
979 
980 /* random signature used only to find hash tables in external analysis */
981 #define HASH_SIGNATURE 0xa0111fe1
982 #define HASH_BLOOM_SIGNATURE 0xb12220f2
983 
984 typedef struct UT_hash_table {
985  UT_hash_bucket *buckets;
986  unsigned num_buckets, log2_num_buckets;
987  unsigned num_items;
988  struct UT_hash_handle *tail; /* tail hh in app order, for fast append */
989  ptrdiff_t hho; /* hash handle offset (byte pos of hash handle in element */
990 
991  /* in an ideal situation (all buckets used equally), no bucket would have
992  * more than ceil(#items/#buckets) items. that's the ideal chain length. */
993  unsigned ideal_chain_maxlen;
994 
995  /* nonideal_items is the number of items in the hash whose chain position
996  * exceeds the ideal chain maxlen. these items pay the penalty for an uneven
997  * hash distribution; reaching them in a chain traversal takes >ideal steps */
998  unsigned nonideal_items;
999 
1000  /* ineffective expands occur when a bucket doubling was performed, but
1001  * afterward, more than half the items in the hash had nonideal chain
1002  * positions. If this happens on two consecutive expansions we inhibit any
1003  * further expansion, as it's not helping; this happens when the hash
1004  * function isn't a good fit for the key domain. When expansion is inhibited
1005  * the hash will still work, albeit no longer in constant time. */
1006  unsigned ineff_expands, noexpand;
1007 
1008  uint32_t signature; /* used only to find hash tables in external analysis */
1009 #ifdef HASH_BLOOM
1010  uint32_t bloom_sig; /* used only to test bloom exists in external analysis */
1011  uint8_t *bloom_bv;
1012  char bloom_nbits;
1013 #endif
1014 
1015 } UT_hash_table;
1016 
1017 typedef struct UT_hash_handle {
1018  struct UT_hash_table *tbl;
1019  void *prev; /* prev element in app order */
1020  void *next; /* next element in app order */
1021  struct UT_hash_handle *hh_prev; /* previous hh in bucket order */
1022  struct UT_hash_handle *hh_next; /* next hh in bucket order */
1023  void *key; /* ptr to enclosing struct's key */
1024  unsigned keylen; /* enclosing struct's key len */
1025  unsigned hashv; /* result of hash-fcn(key) */
1026 } UT_hash_handle;
1027 
1028 #pragma GCC visibility pop
1029 
1030 #endif /* UTHASH_H */
Definition: uthash.h:960
Definition: uthash.h:1017
Definition: uthash.h:984