1 /* 2 Copyright (c) 2003-2017, Troy D. Hanson http://troydhanson.github.com/uthash/ 3 All rights reserved. 4 5 Redistribution and use in source and binary forms, with or without 6 modification, are permitted provided that the following conditions are met: 7 8 * Redistributions of source code must retain the above copyright 9 notice, this list of conditions and the following disclaimer. 10 11 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 12 IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 13 TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 14 PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER 15 OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 16 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 17 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 18 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 19 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 20 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 21 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 */ 23 24 #ifndef UTHASH_H 25 #define UTHASH_H 26 27 #define UTHASH_VERSION 2.0.2 28 29 #include <string.h> /* memcmp,strlen */ 30 #include <stddef.h> /* ptrdiff_t */ 31 #include <stdlib.h> /* exit() */ 32 33 /* These macros use decltype or the earlier __typeof GNU extension. 34 As decltype is only available in newer compilers (VS2010 or gcc 4.3+ 35 when compiling c++ source) this code uses whatever method is needed 36 or, for VS2008 where neither is available, uses casting workarounds. */ 37 #if !defined(DECLTYPE) && !defined(NO_DECLTYPE) 38 #if defined(_MSC_VER) /* MS compiler */ 39 #if _MSC_VER >= 1600 && defined(__cplusplus) /* VS2010 or newer in C++ mode */ 40 #define DECLTYPE(x) (decltype(x)) 41 #else /* VS2008 or older (or VS2010 in C mode) */ 42 #define NO_DECLTYPE 43 #endif 44 #elif defined(__BORLANDC__) || defined(__ICCARM__) || defined(__LCC__) || defined(__WATCOMC__) 45 #define NO_DECLTYPE 46 #else /* GNU, Sun and other compilers */ 47 #define DECLTYPE(x) (__typeof(x)) 48 #endif 49 #endif 50 51 #ifdef NO_DECLTYPE 52 #define DECLTYPE(x) 53 #define DECLTYPE_ASSIGN(dst,src) \ 54 do { \ 55 char **_da_dst = (char**)(&(dst)); \ 56 *_da_dst = (char*)(src); \ 57 } while (0) 58 #else 59 #define DECLTYPE_ASSIGN(dst,src) \ 60 do { \ 61 (dst) = DECLTYPE(dst)(src); \ 62 } while (0) 63 #endif 64 65 /* a number of the hash function use uint32_t which isn't defined on Pre VS2010 */ 66 #if defined(_WIN32) 67 #if defined(_MSC_VER) && _MSC_VER >= 1600 68 #include <stdint.h> 69 #elif defined(__WATCOMC__) || defined(__MINGW32__) || defined(__CYGWIN__) 70 #include <stdint.h> 71 #else 72 typedef unsigned int uint32_t; 73 typedef unsigned char uint8_t; 74 #endif 75 #elif defined(__GNUC__) && !defined(__VXWORKS__) 76 #include <stdint.h> 77 #else 78 typedef unsigned int uint32_t; 79 typedef unsigned char uint8_t; 80 #endif 81 82 #ifndef uthash_fatal 83 #define uthash_fatal(msg) exit(-1) /* fatal error (out of memory,etc) */ 84 #endif 85 #ifndef uthash_malloc 86 #define uthash_malloc(sz) malloc(sz) /* malloc fcn */ 87 #endif 88 #ifndef uthash_free 89 #define uthash_free(ptr,sz) free(ptr) /* free fcn */ 90 #endif 91 #ifndef uthash_strlen 92 #define uthash_strlen(s) strlen(s) 93 #endif 94 #ifndef uthash_memcmp 95 #define uthash_memcmp(a,b,n) memcmp(a,b,n) 96 #endif 97 98 #ifndef uthash_noexpand_fyi 99 #define uthash_noexpand_fyi(tbl) /* can be defined to log noexpand */ 100 #endif 101 #ifndef uthash_expand_fyi 102 #define uthash_expand_fyi(tbl) /* can be defined to log expands */ 103 #endif 104 105 /* initial number of buckets */ 106 #define HASH_INITIAL_NUM_BUCKETS 32U /* initial number of buckets */ 107 #define HASH_INITIAL_NUM_BUCKETS_LOG2 5U /* lg2 of initial number of buckets */ 108 #define HASH_BKT_CAPACITY_THRESH 10U /* expand when bucket count reaches */ 109 110 /* calculate the element whose hash handle address is hhp */ 111 #define ELMT_FROM_HH(tbl,hhp) ((void*)(((char*)(hhp)) - ((tbl)->hho))) 112 /* calculate the hash handle from element address elp */ 113 #define HH_FROM_ELMT(tbl,elp) ((UT_hash_handle *)(((char*)(elp)) + ((tbl)->hho))) 114 115 #define HASH_VALUE(keyptr,keylen,hashv) \ 116 do { \ 117 HASH_FCN(keyptr, keylen, hashv); \ 118 } while (0) 119 120 #define HASH_FIND_BYHASHVALUE(hh,head,keyptr,keylen,hashval,out) \ 121 do { \ 122 (out) = NULL; \ 123 if (head) { \ 124 unsigned _hf_bkt; \ 125 HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _hf_bkt); \ 126 if (HASH_BLOOM_TEST((head)->hh.tbl, hashval) != 0) { \ 127 HASH_FIND_IN_BKT((head)->hh.tbl, hh, (head)->hh.tbl->buckets[ _hf_bkt ], keyptr, keylen, hashval, out); \ 128 } \ 129 } \ 130 } while (0) 131 132 #define HASH_FIND(hh,head,keyptr,keylen,out) \ 133 do { \ 134 unsigned _hf_hashv; \ 135 HASH_VALUE(keyptr, keylen, _hf_hashv); \ 136 HASH_FIND_BYHASHVALUE(hh, head, keyptr, keylen, _hf_hashv, out); \ 137 } while (0) 138 139 #ifdef HASH_BLOOM 140 #define HASH_BLOOM_BITLEN (1UL << HASH_BLOOM) 141 #define HASH_BLOOM_BYTELEN (HASH_BLOOM_BITLEN/8UL) + (((HASH_BLOOM_BITLEN%8UL)!=0UL) ? 1UL : 0UL) 142 #define HASH_BLOOM_MAKE(tbl) \ 143 do { \ 144 (tbl)->bloom_nbits = HASH_BLOOM; \ 145 (tbl)->bloom_bv = (uint8_t*)uthash_malloc(HASH_BLOOM_BYTELEN); \ 146 if (!((tbl)->bloom_bv)) { uthash_fatal( "out of memory"); } \ 147 memset((tbl)->bloom_bv, 0, HASH_BLOOM_BYTELEN); \ 148 (tbl)->bloom_sig = HASH_BLOOM_SIGNATURE; \ 149 } while (0) 150 151 #define HASH_BLOOM_FREE(tbl) \ 152 do { \ 153 uthash_free((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \ 154 } while (0) 155 156 #define HASH_BLOOM_BITSET(bv,idx) (bv[(idx)/8U] |= (1U << ((idx)%8U))) 157 #define HASH_BLOOM_BITTEST(bv,idx) (bv[(idx)/8U] & (1U << ((idx)%8U))) 158 159 #define HASH_BLOOM_ADD(tbl,hashv) \ 160 HASH_BLOOM_BITSET((tbl)->bloom_bv, (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1U))) 161 162 #define HASH_BLOOM_TEST(tbl,hashv) \ 163 HASH_BLOOM_BITTEST((tbl)->bloom_bv, (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1U))) 164 165 #else 166 #define HASH_BLOOM_MAKE(tbl) 167 #define HASH_BLOOM_FREE(tbl) 168 #define HASH_BLOOM_ADD(tbl,hashv) 169 #define HASH_BLOOM_TEST(tbl,hashv) (1) 170 #define HASH_BLOOM_BYTELEN 0U 171 #endif 172 173 #define HASH_MAKE_TABLE(hh,head) \ 174 do { \ 175 (head)->hh.tbl = (UT_hash_table*)uthash_malloc( \ 176 sizeof(UT_hash_table)); \ 177 if (!((head)->hh.tbl)) { uthash_fatal( "out of memory"); } \ 178 memset((head)->hh.tbl, 0, sizeof(UT_hash_table)); \ 179 (head)->hh.tbl->tail = &((head)->hh); \ 180 (head)->hh.tbl->num_buckets = HASH_INITIAL_NUM_BUCKETS; \ 181 (head)->hh.tbl->log2_num_buckets = HASH_INITIAL_NUM_BUCKETS_LOG2; \ 182 (head)->hh.tbl->hho = (char*)(&(head)->hh) - (char*)(head); \ 183 (head)->hh.tbl->buckets = (UT_hash_bucket*)uthash_malloc( \ 184 HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \ 185 if (! (head)->hh.tbl->buckets) { uthash_fatal( "out of memory"); } \ 186 memset((head)->hh.tbl->buckets, 0, \ 187 HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \ 188 HASH_BLOOM_MAKE((head)->hh.tbl); \ 189 (head)->hh.tbl->signature = HASH_SIGNATURE; \ 190 } while (0) 191 192 #define HASH_REPLACE_BYHASHVALUE_INORDER(hh,head,fieldname,keylen_in,hashval,add,replaced,cmpfcn) \ 193 do { \ 194 (replaced) = NULL; \ 195 HASH_FIND_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, replaced); \ 196 if (replaced) { \ 197 HASH_DELETE(hh, head, replaced); \ 198 } \ 199 HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, &((add)->fieldname), keylen_in, hashval, add, cmpfcn); \ 200 } while (0) 201 202 #define HASH_REPLACE_BYHASHVALUE(hh,head,fieldname,keylen_in,hashval,add,replaced) \ 203 do { \ 204 (replaced) = NULL; \ 205 HASH_FIND_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, replaced); \ 206 if (replaced) { \ 207 HASH_DELETE(hh, head, replaced); \ 208 } \ 209 HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, add); \ 210 } while (0) 211 212 #define HASH_REPLACE(hh,head,fieldname,keylen_in,add,replaced) \ 213 do { \ 214 unsigned _hr_hashv; \ 215 HASH_VALUE(&((add)->fieldname), keylen_in, _hr_hashv); \ 216 HASH_REPLACE_BYHASHVALUE(hh, head, fieldname, keylen_in, _hr_hashv, add, replaced); \ 217 } while (0) 218 219 #define HASH_REPLACE_INORDER(hh,head,fieldname,keylen_in,add,replaced,cmpfcn) \ 220 do { \ 221 unsigned _hr_hashv; \ 222 HASH_VALUE(&((add)->fieldname), keylen_in, _hr_hashv); \ 223 HASH_REPLACE_BYHASHVALUE_INORDER(hh, head, fieldname, keylen_in, _hr_hashv, add, replaced, cmpfcn); \ 224 } while (0) 225 226 #define HASH_APPEND_LIST(hh, head, add) \ 227 do { \ 228 (add)->hh.next = NULL; \ 229 (add)->hh.prev = ELMT_FROM_HH((head)->hh.tbl, (head)->hh.tbl->tail); \ 230 (head)->hh.tbl->tail->next = (add); \ 231 (head)->hh.tbl->tail = &((add)->hh); \ 232 } while (0) 233 234 #define HASH_AKBI_INNER_LOOP(hh,head,add,cmpfcn) \ 235 do { \ 236 do { \ 237 if (cmpfcn(DECLTYPE(head)(_hs_iter), add) > 0) \ 238 break; \ 239 } while ((_hs_iter = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->next)); \ 240 } while (0) 241 242 #ifdef NO_DECLTYPE 243 #undef HASH_AKBI_INNER_LOOP 244 #define HASH_AKBI_INNER_LOOP(hh,head,add,cmpfcn) \ 245 do { \ 246 char *_hs_saved_head = (char*)(head); \ 247 do { \ 248 DECLTYPE_ASSIGN(head, _hs_iter); \ 249 if (cmpfcn(head, add) > 0) { \ 250 DECLTYPE_ASSIGN(head, _hs_saved_head); \ 251 break; \ 252 } \ 253 DECLTYPE_ASSIGN(head, _hs_saved_head); \ 254 } while ((_hs_iter = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->next)); \ 255 } while (0) 256 #endif 257 258 #define HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh,head,keyptr,keylen_in,hashval,add,cmpfcn) \ 259 do { \ 260 unsigned _ha_bkt; \ 261 (add)->hh.hashv = (hashval); \ 262 (add)->hh.key = (char*) (keyptr); \ 263 (add)->hh.keylen = (unsigned) (keylen_in); \ 264 if (!(head)) { \ 265 (add)->hh.next = NULL; \ 266 (add)->hh.prev = NULL; \ 267 (head) = (add); \ 268 HASH_MAKE_TABLE(hh, head); \ 269 } else { \ 270 void *_hs_iter = (head); \ 271 (add)->hh.tbl = (head)->hh.tbl; \ 272 HASH_AKBI_INNER_LOOP(hh, head, add, cmpfcn); \ 273 if (_hs_iter) { \ 274 (add)->hh.next = _hs_iter; \ 275 if (((add)->hh.prev = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->prev)) { \ 276 HH_FROM_ELMT((head)->hh.tbl, (add)->hh.prev)->next = (add); \ 277 } else { \ 278 (head) = (add); \ 279 } \ 280 HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->prev = (add); \ 281 } else { \ 282 HASH_APPEND_LIST(hh, head, add); \ 283 } \ 284 } \ 285 (head)->hh.tbl->num_items++; \ 286 HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _ha_bkt); \ 287 HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt], &(add)->hh); \ 288 HASH_BLOOM_ADD((head)->hh.tbl, hashval); \ 289 HASH_EMIT_KEY(hh, head, keyptr, keylen_in); \ 290 HASH_FSCK(hh, head); \ 291 } while (0) 292 293 #define HASH_ADD_KEYPTR_INORDER(hh,head,keyptr,keylen_in,add,cmpfcn) \ 294 do { \ 295 unsigned _hs_hashv; \ 296 HASH_VALUE(keyptr, keylen_in, _hs_hashv); \ 297 HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, keyptr, keylen_in, _hs_hashv, add, cmpfcn); \ 298 } while (0) 299 300 #define HASH_ADD_BYHASHVALUE_INORDER(hh,head,fieldname,keylen_in,hashval,add,cmpfcn) \ 301 HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, &((add)->fieldname), keylen_in, hashval, add, cmpfcn) 302 303 #define HASH_ADD_INORDER(hh,head,fieldname,keylen_in,add,cmpfcn) \ 304 HASH_ADD_KEYPTR_INORDER(hh, head, &((add)->fieldname), keylen_in, add, cmpfcn) 305 306 #define HASH_ADD_KEYPTR_BYHASHVALUE(hh,head,keyptr,keylen_in,hashval,add) \ 307 do { \ 308 unsigned _ha_bkt; \ 309 (add)->hh.hashv = (hashval); \ 310 (add)->hh.key = (char*) (keyptr); \ 311 (add)->hh.keylen = (unsigned) (keylen_in); \ 312 if (!(head)) { \ 313 (add)->hh.next = NULL; \ 314 (add)->hh.prev = NULL; \ 315 (head) = (add); \ 316 HASH_MAKE_TABLE(hh, head); \ 317 } else { \ 318 (add)->hh.tbl = (head)->hh.tbl; \ 319 HASH_APPEND_LIST(hh, head, add); \ 320 } \ 321 (head)->hh.tbl->num_items++; \ 322 HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _ha_bkt); \ 323 HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt], &(add)->hh); \ 324 HASH_BLOOM_ADD((head)->hh.tbl, hashval); \ 325 HASH_EMIT_KEY(hh, head, keyptr, keylen_in); \ 326 HASH_FSCK(hh, head); \ 327 } while (0) 328 329 #define HASH_ADD_KEYPTR(hh,head,keyptr,keylen_in,add) \ 330 do { \ 331 unsigned _ha_hashv; \ 332 HASH_VALUE(keyptr, keylen_in, _ha_hashv); \ 333 HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, keyptr, keylen_in, _ha_hashv, add); \ 334 } while (0) 335 336 #define HASH_ADD_BYHASHVALUE(hh,head,fieldname,keylen_in,hashval,add) \ 337 HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, add) 338 339 #define HASH_ADD(hh,head,fieldname,keylen_in,add) \ 340 HASH_ADD_KEYPTR(hh, head, &((add)->fieldname), keylen_in, add) 341 342 #define HASH_TO_BKT(hashv,num_bkts,bkt) \ 343 do { \ 344 bkt = ((hashv) & ((num_bkts) - 1U)); \ 345 } while (0) 346 347 /* delete "delptr" from the hash table. 348 * "the usual" patch-up process for the app-order doubly-linked-list. 349 * The use of _hd_hh_del below deserves special explanation. 350 * These used to be expressed using (delptr) but that led to a bug 351 * if someone used the same symbol for the head and deletee, like 352 * HASH_DELETE(hh,users,users); 353 * We want that to work, but by changing the head (users) below 354 * we were forfeiting our ability to further refer to the deletee (users) 355 * in the patch-up process. Solution: use scratch space to 356 * copy the deletee pointer, then the latter references are via that 357 * scratch pointer rather than through the repointed (users) symbol. 358 */ 359 #define HASH_DELETE(hh,head,delptr) \ 360 do { \ 361 struct UT_hash_handle *_hd_hh_del; \ 362 if ( ((delptr)->hh.prev == NULL) && ((delptr)->hh.next == NULL) ) { \ 363 uthash_free((head)->hh.tbl->buckets, \ 364 (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket) ); \ 365 HASH_BLOOM_FREE((head)->hh.tbl); \ 366 uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ 367 head = NULL; \ 368 } else { \ 369 unsigned _hd_bkt; \ 370 _hd_hh_del = &((delptr)->hh); \ 371 if ((delptr) == ELMT_FROM_HH((head)->hh.tbl,(head)->hh.tbl->tail)) { \ 372 (head)->hh.tbl->tail = \ 373 (UT_hash_handle*)((ptrdiff_t)((delptr)->hh.prev) + \ 374 (head)->hh.tbl->hho); \ 375 } \ 376 if ((delptr)->hh.prev != NULL) { \ 377 ((UT_hash_handle*)((ptrdiff_t)((delptr)->hh.prev) + \ 378 (head)->hh.tbl->hho))->next = (delptr)->hh.next; \ 379 } else { \ 380 DECLTYPE_ASSIGN(head,(delptr)->hh.next); \ 381 } \ 382 if (_hd_hh_del->next != NULL) { \ 383 ((UT_hash_handle*)((ptrdiff_t)_hd_hh_del->next + \ 384 (head)->hh.tbl->hho))->prev = \ 385 _hd_hh_del->prev; \ 386 } \ 387 HASH_TO_BKT( _hd_hh_del->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \ 388 HASH_DEL_IN_BKT(hh,(head)->hh.tbl->buckets[_hd_bkt], _hd_hh_del); \ 389 (head)->hh.tbl->num_items--; \ 390 } \ 391 HASH_FSCK(hh,head); \ 392 } while (0) 393 394 395 /* convenience forms of HASH_FIND/HASH_ADD/HASH_DEL */ 396 #define HASH_FIND_STR(head,findstr,out) \ 397 HASH_FIND(hh,head,findstr,(unsigned)uthash_strlen(findstr),out) 398 #define HASH_ADD_STR(head,strfield,add) \ 399 HASH_ADD(hh,head,strfield[0],(unsigned)uthash_strlen(add->strfield),add) 400 #define HASH_REPLACE_STR(head,strfield,add,replaced) \ 401 HASH_REPLACE(hh,head,strfield[0],(unsigned)uthash_strlen(add->strfield),add,replaced) 402 #define HASH_FIND_INT(head,findint,out) \ 403 HASH_FIND(hh,head,findint,sizeof(int),out) 404 #define HASH_ADD_INT(head,intfield,add) \ 405 HASH_ADD(hh,head,intfield,sizeof(int),add) 406 #define HASH_REPLACE_INT(head,intfield,add,replaced) \ 407 HASH_REPLACE(hh,head,intfield,sizeof(int),add,replaced) 408 #define HASH_FIND_PTR(head,findptr,out) \ 409 HASH_FIND(hh,head,findptr,sizeof(void *),out) 410 #define HASH_ADD_PTR(head,ptrfield,add) \ 411 HASH_ADD(hh,head,ptrfield,sizeof(void *),add) 412 #define HASH_REPLACE_PTR(head,ptrfield,add,replaced) \ 413 HASH_REPLACE(hh,head,ptrfield,sizeof(void *),add,replaced) 414 #define HASH_DEL(head,delptr) \ 415 HASH_DELETE(hh,head,delptr) 416 417 /* HASH_FSCK checks hash integrity on every add/delete when HASH_DEBUG is defined. 418 * This is for uthash developer only; it compiles away if HASH_DEBUG isn't defined. 419 */ 420 #ifdef HASH_DEBUG 421 #define HASH_OOPS(...) do { fprintf(stderr,__VA_ARGS__); exit(-1); } while (0) 422 #define HASH_FSCK(hh,head) \ 423 do { \ 424 struct UT_hash_handle *_thh; \ 425 if (head) { \ 426 unsigned _bkt_i; \ 427 unsigned _count; \ 428 char *_prev; \ 429 _count = 0; \ 430 for( _bkt_i = 0; _bkt_i < (head)->hh.tbl->num_buckets; _bkt_i++) { \ 431 unsigned _bkt_count = 0; \ 432 _thh = (head)->hh.tbl->buckets[_bkt_i].hh_head; \ 433 _prev = NULL; \ 434 while (_thh) { \ 435 if (_prev != (char*)(_thh->hh_prev)) { \ 436 HASH_OOPS("invalid hh_prev %p, actual %p\n", \ 437 _thh->hh_prev, _prev ); \ 438 } \ 439 _bkt_count++; \ 440 _prev = (char*)(_thh); \ 441 _thh = _thh->hh_next; \ 442 } \ 443 _count += _bkt_count; \ 444 if ((head)->hh.tbl->buckets[_bkt_i].count != _bkt_count) { \ 445 HASH_OOPS("invalid bucket count %u, actual %u\n", \ 446 (head)->hh.tbl->buckets[_bkt_i].count, _bkt_count); \ 447 } \ 448 } \ 449 if (_count != (head)->hh.tbl->num_items) { \ 450 HASH_OOPS("invalid hh item count %u, actual %u\n", \ 451 (head)->hh.tbl->num_items, _count ); \ 452 } \ 453 /* traverse hh in app order; check next/prev integrity, count */ \ 454 _count = 0; \ 455 _prev = NULL; \ 456 _thh = &(head)->hh; \ 457 while (_thh) { \ 458 _count++; \ 459 if (_prev !=(char*)(_thh->prev)) { \ 460 HASH_OOPS("invalid prev %p, actual %p\n", \ 461 _thh->prev, _prev ); \ 462 } \ 463 _prev = (char*)ELMT_FROM_HH((head)->hh.tbl, _thh); \ 464 _thh = ( _thh->next ? (UT_hash_handle*)((char*)(_thh->next) + \ 465 (head)->hh.tbl->hho) : NULL ); \ 466 } \ 467 if (_count != (head)->hh.tbl->num_items) { \ 468 HASH_OOPS("invalid app item count %u, actual %u\n", \ 469 (head)->hh.tbl->num_items, _count ); \ 470 } \ 471 } \ 472 } while (0) 473 #else 474 #define HASH_FSCK(hh,head) 475 #endif 476 477 /* When compiled with -DHASH_EMIT_KEYS, length-prefixed keys are emitted to 478 * the descriptor to which this macro is defined for tuning the hash function. 479 * The app can #include <unistd.h> to get the prototype for write(2). */ 480 #ifdef HASH_EMIT_KEYS 481 #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) \ 482 do { \ 483 unsigned _klen = fieldlen; \ 484 write(HASH_EMIT_KEYS, &_klen, sizeof(_klen)); \ 485 write(HASH_EMIT_KEYS, keyptr, (unsigned long)fieldlen); \ 486 } while (0) 487 #else 488 #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) 489 #endif 490 491 /* default to Jenkin's hash unless overridden e.g. DHASH_FUNCTION=HASH_SAX */ 492 #ifdef HASH_FUNCTION 493 #define HASH_FCN HASH_FUNCTION 494 #else 495 #define HASH_FCN HASH_JEN 496 #endif 497 498 /* The Bernstein hash function, used in Perl prior to v5.6. Note (x<<5+x)=x*33. */ 499 #define HASH_BER(key,keylen,hashv) \ 500 do { \ 501 unsigned _hb_keylen=(unsigned)keylen; \ 502 const unsigned char *_hb_key=(const unsigned char*)(key); \ 503 (hashv) = 0; \ 504 while (_hb_keylen-- != 0U) { \ 505 (hashv) = (((hashv) << 5) + (hashv)) + *_hb_key++; \ 506 } \ 507 } while (0) 508 509 510 /* SAX/FNV/OAT/JEN hash functions are macro variants of those listed at 511 * http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx */ 512 #define HASH_SAX(key,keylen,hashv) \ 513 do { \ 514 unsigned _sx_i; \ 515 const unsigned char *_hs_key=(const unsigned char*)(key); \ 516 hashv = 0; \ 517 for(_sx_i=0; _sx_i < keylen; _sx_i++) { \ 518 hashv ^= (hashv << 5) + (hashv >> 2) + _hs_key[_sx_i]; \ 519 } \ 520 } while (0) 521 /* FNV-1a variation */ 522 #define HASH_FNV(key,keylen,hashv) \ 523 do { \ 524 unsigned _fn_i; \ 525 const unsigned char *_hf_key=(const unsigned char*)(key); \ 526 hashv = 2166136261U; \ 527 for(_fn_i=0; _fn_i < keylen; _fn_i++) { \ 528 hashv = hashv ^ _hf_key[_fn_i]; \ 529 hashv = hashv * 16777619U; \ 530 } \ 531 } while (0) 532 533 #define HASH_OAT(key,keylen,hashv) \ 534 do { \ 535 unsigned _ho_i; \ 536 const unsigned char *_ho_key=(const unsigned char*)(key); \ 537 hashv = 0; \ 538 for(_ho_i=0; _ho_i < keylen; _ho_i++) { \ 539 hashv += _ho_key[_ho_i]; \ 540 hashv += (hashv << 10); \ 541 hashv ^= (hashv >> 6); \ 542 } \ 543 hashv += (hashv << 3); \ 544 hashv ^= (hashv >> 11); \ 545 hashv += (hashv << 15); \ 546 } while (0) 547 548 #define HASH_JEN_MIX(a,b,c) \ 549 do { \ 550 a -= b; a -= c; a ^= ( c >> 13 ); \ 551 b -= c; b -= a; b ^= ( a << 8 ); \ 552 c -= a; c -= b; c ^= ( b >> 13 ); \ 553 a -= b; a -= c; a ^= ( c >> 12 ); \ 554 b -= c; b -= a; b ^= ( a << 16 ); \ 555 c -= a; c -= b; c ^= ( b >> 5 ); \ 556 a -= b; a -= c; a ^= ( c >> 3 ); \ 557 b -= c; b -= a; b ^= ( a << 10 ); \ 558 c -= a; c -= b; c ^= ( b >> 15 ); \ 559 } while (0) 560 561 #define HASH_JEN(key,keylen,hashv) \ 562 do { \ 563 unsigned _hj_i,_hj_j,_hj_k; \ 564 unsigned const char *_hj_key=(unsigned const char*)(key); \ 565 hashv = 0xfeedbeefu; \ 566 _hj_i = _hj_j = 0x9e3779b9u; \ 567 _hj_k = (unsigned)(keylen); \ 568 while (_hj_k >= 12U) { \ 569 _hj_i += (_hj_key[0] + ( (unsigned)_hj_key[1] << 8 ) \ 570 + ( (unsigned)_hj_key[2] << 16 ) \ 571 + ( (unsigned)_hj_key[3] << 24 ) ); \ 572 _hj_j += (_hj_key[4] + ( (unsigned)_hj_key[5] << 8 ) \ 573 + ( (unsigned)_hj_key[6] << 16 ) \ 574 + ( (unsigned)_hj_key[7] << 24 ) ); \ 575 hashv += (_hj_key[8] + ( (unsigned)_hj_key[9] << 8 ) \ 576 + ( (unsigned)_hj_key[10] << 16 ) \ 577 + ( (unsigned)_hj_key[11] << 24 ) ); \ 578 \ 579 HASH_JEN_MIX(_hj_i, _hj_j, hashv); \ 580 \ 581 _hj_key += 12; \ 582 _hj_k -= 12U; \ 583 } \ 584 hashv += (unsigned)(keylen); \ 585 switch ( _hj_k ) { \ 586 case 11: hashv += ( (unsigned)_hj_key[10] << 24 ); /* FALLTHROUGH */ \ 587 case 10: hashv += ( (unsigned)_hj_key[9] << 16 ); /* FALLTHROUGH */ \ 588 case 9: hashv += ( (unsigned)_hj_key[8] << 8 ); /* FALLTHROUGH */ \ 589 case 8: _hj_j += ( (unsigned)_hj_key[7] << 24 ); /* FALLTHROUGH */ \ 590 case 7: _hj_j += ( (unsigned)_hj_key[6] << 16 ); /* FALLTHROUGH */ \ 591 case 6: _hj_j += ( (unsigned)_hj_key[5] << 8 ); /* FALLTHROUGH */ \ 592 case 5: _hj_j += _hj_key[4]; /* FALLTHROUGH */ \ 593 case 4: _hj_i += ( (unsigned)_hj_key[3] << 24 ); /* FALLTHROUGH */ \ 594 case 3: _hj_i += ( (unsigned)_hj_key[2] << 16 ); /* FALLTHROUGH */ \ 595 case 2: _hj_i += ( (unsigned)_hj_key[1] << 8 ); /* FALLTHROUGH */ \ 596 case 1: _hj_i += _hj_key[0]; \ 597 } \ 598 HASH_JEN_MIX(_hj_i, _hj_j, hashv); \ 599 } while (0) 600 601 /* The Paul Hsieh hash function */ 602 #undef get16bits 603 #if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \ 604 || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__) 605 #define get16bits(d) (*((const uint16_t *) (d))) 606 #endif 607 608 #if !defined (get16bits) 609 #define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8) \ 610 +(uint32_t)(((const uint8_t *)(d))[0]) ) 611 #endif 612 #define HASH_SFH(key,keylen,hashv) \ 613 do { \ 614 unsigned const char *_sfh_key=(unsigned const char*)(key); \ 615 uint32_t _sfh_tmp, _sfh_len = (uint32_t)keylen; \ 616 \ 617 unsigned _sfh_rem = _sfh_len & 3U; \ 618 _sfh_len >>= 2; \ 619 hashv = 0xcafebabeu; \ 620 \ 621 /* Main loop */ \ 622 for (;_sfh_len > 0U; _sfh_len--) { \ 623 hashv += get16bits (_sfh_key); \ 624 _sfh_tmp = ((uint32_t)(get16bits (_sfh_key+2)) << 11) ^ hashv; \ 625 hashv = (hashv << 16) ^ _sfh_tmp; \ 626 _sfh_key += 2U*sizeof (uint16_t); \ 627 hashv += hashv >> 11; \ 628 } \ 629 \ 630 /* Handle end cases */ \ 631 switch (_sfh_rem) { \ 632 case 3: hashv += get16bits (_sfh_key); \ 633 hashv ^= hashv << 16; \ 634 hashv ^= (uint32_t)(_sfh_key[sizeof (uint16_t)]) << 18; \ 635 hashv += hashv >> 11; \ 636 break; \ 637 case 2: hashv += get16bits (_sfh_key); \ 638 hashv ^= hashv << 11; \ 639 hashv += hashv >> 17; \ 640 break; \ 641 case 1: hashv += *_sfh_key; \ 642 hashv ^= hashv << 10; \ 643 hashv += hashv >> 1; \ 644 } \ 645 \ 646 /* Force "avalanching" of final 127 bits */ \ 647 hashv ^= hashv << 3; \ 648 hashv += hashv >> 5; \ 649 hashv ^= hashv << 4; \ 650 hashv += hashv >> 17; \ 651 hashv ^= hashv << 25; \ 652 hashv += hashv >> 6; \ 653 } while (0) 654 655 #ifdef HASH_USING_NO_STRICT_ALIASING 656 /* The MurmurHash exploits some CPU's (x86,x86_64) tolerance for unaligned reads. 657 * For other types of CPU's (e.g. Sparc) an unaligned read causes a bus error. 658 * MurmurHash uses the faster approach only on CPU's where we know it's safe. 659 * 660 * Note the preprocessor built-in defines can be emitted using: 661 * 662 * gcc -m64 -dM -E - < /dev/null (on gcc) 663 * cc -## a.c (where a.c is a simple test file) (Sun Studio) 664 */ 665 #if (defined(__i386__) || defined(__x86_64__) || defined(_M_IX86)) 666 #define MUR_GETBLOCK(p,i) p[i] 667 #else /* non intel */ 668 #define MUR_PLUS0_ALIGNED(p) (((unsigned long)p & 3UL) == 0UL) 669 #define MUR_PLUS1_ALIGNED(p) (((unsigned long)p & 3UL) == 1UL) 670 #define MUR_PLUS2_ALIGNED(p) (((unsigned long)p & 3UL) == 2UL) 671 #define MUR_PLUS3_ALIGNED(p) (((unsigned long)p & 3UL) == 3UL) 672 #define WP(p) ((uint32_t*)((unsigned long)(p) & ~3UL)) 673 #if (defined(__BIG_ENDIAN__) || defined(SPARC) || defined(__ppc__) || defined(__ppc64__)) 674 #define MUR_THREE_ONE(p) ((((*WP(p))&0x00ffffff) << 8) | (((*(WP(p)+1))&0xff000000) >> 24)) 675 #define MUR_TWO_TWO(p) ((((*WP(p))&0x0000ffff) <<16) | (((*(WP(p)+1))&0xffff0000) >> 16)) 676 #define MUR_ONE_THREE(p) ((((*WP(p))&0x000000ff) <<24) | (((*(WP(p)+1))&0xffffff00) >> 8)) 677 #else /* assume little endian non-intel */ 678 #define MUR_THREE_ONE(p) ((((*WP(p))&0xffffff00) >> 8) | (((*(WP(p)+1))&0x000000ff) << 24)) 679 #define MUR_TWO_TWO(p) ((((*WP(p))&0xffff0000) >>16) | (((*(WP(p)+1))&0x0000ffff) << 16)) 680 #define MUR_ONE_THREE(p) ((((*WP(p))&0xff000000) >>24) | (((*(WP(p)+1))&0x00ffffff) << 8)) 681 #endif 682 #define MUR_GETBLOCK(p,i) (MUR_PLUS0_ALIGNED(p) ? ((p)[i]) : \ 683 (MUR_PLUS1_ALIGNED(p) ? MUR_THREE_ONE(p) : \ 684 (MUR_PLUS2_ALIGNED(p) ? MUR_TWO_TWO(p) : \ 685 MUR_ONE_THREE(p)))) 686 #endif 687 #define MUR_ROTL32(x,r) (((x) << (r)) | ((x) >> (32 - (r)))) 688 #define MUR_FMIX(_h) \ 689 do { \ 690 _h ^= _h >> 16; \ 691 _h *= 0x85ebca6bu; \ 692 _h ^= _h >> 13; \ 693 _h *= 0xc2b2ae35u; \ 694 _h ^= _h >> 16; \ 695 } while (0) 696 697 #define HASH_MUR(key,keylen,hashv) \ 698 do { \ 699 const uint8_t *_mur_data = (const uint8_t*)(key); \ 700 const int _mur_nblocks = (int)(keylen) / 4; \ 701 uint32_t _mur_h1 = 0xf88D5353u; \ 702 uint32_t _mur_c1 = 0xcc9e2d51u; \ 703 uint32_t _mur_c2 = 0x1b873593u; \ 704 uint32_t _mur_k1 = 0; \ 705 const uint8_t *_mur_tail; \ 706 const uint32_t *_mur_blocks = (const uint32_t*)(_mur_data+(_mur_nblocks*4)); \ 707 int _mur_i; \ 708 for(_mur_i = -_mur_nblocks; _mur_i!=0; _mur_i++) { \ 709 _mur_k1 = MUR_GETBLOCK(_mur_blocks,_mur_i); \ 710 _mur_k1 *= _mur_c1; \ 711 _mur_k1 = MUR_ROTL32(_mur_k1,15); \ 712 _mur_k1 *= _mur_c2; \ 713 \ 714 _mur_h1 ^= _mur_k1; \ 715 _mur_h1 = MUR_ROTL32(_mur_h1,13); \ 716 _mur_h1 = (_mur_h1*5U) + 0xe6546b64u; \ 717 } \ 718 _mur_tail = (const uint8_t*)(_mur_data + (_mur_nblocks*4)); \ 719 _mur_k1=0; \ 720 switch((keylen) & 3U) { \ 721 case 3: _mur_k1 ^= (uint32_t)_mur_tail[2] << 16; /* FALLTHROUGH */ \ 722 case 2: _mur_k1 ^= (uint32_t)_mur_tail[1] << 8; /* FALLTHROUGH */ \ 723 case 1: _mur_k1 ^= (uint32_t)_mur_tail[0]; \ 724 _mur_k1 *= _mur_c1; \ 725 _mur_k1 = MUR_ROTL32(_mur_k1,15); \ 726 _mur_k1 *= _mur_c2; \ 727 _mur_h1 ^= _mur_k1; \ 728 } \ 729 _mur_h1 ^= (uint32_t)(keylen); \ 730 MUR_FMIX(_mur_h1); \ 731 hashv = _mur_h1; \ 732 } while (0) 733 #endif /* HASH_USING_NO_STRICT_ALIASING */ 734 735 /* iterate over items in a known bucket to find desired item */ 736 #define HASH_FIND_IN_BKT(tbl,hh,head,keyptr,keylen_in,hashval,out) \ 737 do { \ 738 if ((head).hh_head != NULL) { \ 739 DECLTYPE_ASSIGN(out, ELMT_FROM_HH(tbl, (head).hh_head)); \ 740 } else { \ 741 (out) = NULL; \ 742 } \ 743 while ((out) != NULL) { \ 744 if ((out)->hh.hashv == (hashval) && (out)->hh.keylen == (keylen_in)) { \ 745 if (uthash_memcmp((out)->hh.key, keyptr, keylen_in) == 0) { \ 746 break; \ 747 } \ 748 } \ 749 if ((out)->hh.hh_next != NULL) { \ 750 DECLTYPE_ASSIGN(out, ELMT_FROM_HH(tbl, (out)->hh.hh_next)); \ 751 } else { \ 752 (out) = NULL; \ 753 } \ 754 } \ 755 } while (0) 756 757 /* add an item to a bucket */ 758 #define HASH_ADD_TO_BKT(head,addhh) \ 759 do { \ 760 head.count++; \ 761 (addhh)->hh_next = head.hh_head; \ 762 (addhh)->hh_prev = NULL; \ 763 if (head.hh_head != NULL) { (head).hh_head->hh_prev = (addhh); } \ 764 (head).hh_head=addhh; \ 765 if ((head.count >= ((head.expand_mult+1U) * HASH_BKT_CAPACITY_THRESH)) \ 766 && ((addhh)->tbl->noexpand != 1U)) { \ 767 HASH_EXPAND_BUCKETS((addhh)->tbl); \ 768 } \ 769 } while (0) 770 771 /* remove an item from a given bucket */ 772 #define HASH_DEL_IN_BKT(hh,head,hh_del) \ 773 (head).count--; \ 774 if ((head).hh_head == hh_del) { \ 775 (head).hh_head = hh_del->hh_next; \ 776 } \ 777 if (hh_del->hh_prev) { \ 778 hh_del->hh_prev->hh_next = hh_del->hh_next; \ 779 } \ 780 if (hh_del->hh_next) { \ 781 hh_del->hh_next->hh_prev = hh_del->hh_prev; \ 782 } 783 784 /* Bucket expansion has the effect of doubling the number of buckets 785 * and redistributing the items into the new buckets. Ideally the 786 * items will distribute more or less evenly into the new buckets 787 * (the extent to which this is true is a measure of the quality of 788 * the hash function as it applies to the key domain). 789 * 790 * With the items distributed into more buckets, the chain length 791 * (item count) in each bucket is reduced. Thus by expanding buckets 792 * the hash keeps a bound on the chain length. This bounded chain 793 * length is the essence of how a hash provides constant time lookup. 794 * 795 * The calculation of tbl->ideal_chain_maxlen below deserves some 796 * explanation. First, keep in mind that we're calculating the ideal 797 * maximum chain length based on the *new* (doubled) bucket count. 798 * In fractions this is just n/b (n=number of items,b=new num buckets). 799 * Since the ideal chain length is an integer, we want to calculate 800 * ceil(n/b). We don't depend on floating point arithmetic in this 801 * hash, so to calculate ceil(n/b) with integers we could write 802 * 803 * ceil(n/b) = (n/b) + ((n%b)?1:0) 804 * 805 * and in fact a previous version of this hash did just that. 806 * But now we have improved things a bit by recognizing that b is 807 * always a power of two. We keep its base 2 log handy (call it lb), 808 * so now we can write this with a bit shift and logical AND: 809 * 810 * ceil(n/b) = (n>>lb) + ( (n & (b-1)) ? 1:0) 811 * 812 */ 813 #define HASH_EXPAND_BUCKETS(tbl) \ 814 do { \ 815 unsigned _he_bkt; \ 816 unsigned _he_bkt_i; \ 817 struct UT_hash_handle *_he_thh, *_he_hh_nxt; \ 818 UT_hash_bucket *_he_new_buckets, *_he_newbkt; \ 819 _he_new_buckets = (UT_hash_bucket*)uthash_malloc( \ 820 2UL * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \ 821 if (!_he_new_buckets) { uthash_fatal( "out of memory"); } \ 822 memset(_he_new_buckets, 0, \ 823 2UL * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \ 824 tbl->ideal_chain_maxlen = \ 825 (tbl->num_items >> (tbl->log2_num_buckets+1U)) + \ 826 (((tbl->num_items & ((tbl->num_buckets*2U)-1U)) != 0U) ? 1U : 0U); \ 827 tbl->nonideal_items = 0; \ 828 for(_he_bkt_i = 0; _he_bkt_i < tbl->num_buckets; _he_bkt_i++) \ 829 { \ 830 _he_thh = tbl->buckets[ _he_bkt_i ].hh_head; \ 831 while (_he_thh != NULL) { \ 832 _he_hh_nxt = _he_thh->hh_next; \ 833 HASH_TO_BKT( _he_thh->hashv, tbl->num_buckets*2U, _he_bkt); \ 834 _he_newbkt = &(_he_new_buckets[ _he_bkt ]); \ 835 if (++(_he_newbkt->count) > tbl->ideal_chain_maxlen) { \ 836 tbl->nonideal_items++; \ 837 _he_newbkt->expand_mult = _he_newbkt->count / \ 838 tbl->ideal_chain_maxlen; \ 839 } \ 840 _he_thh->hh_prev = NULL; \ 841 _he_thh->hh_next = _he_newbkt->hh_head; \ 842 if (_he_newbkt->hh_head != NULL) { _he_newbkt->hh_head->hh_prev = \ 843 _he_thh; } \ 844 _he_newbkt->hh_head = _he_thh; \ 845 _he_thh = _he_hh_nxt; \ 846 } \ 847 } \ 848 uthash_free( tbl->buckets, tbl->num_buckets*sizeof(struct UT_hash_bucket) ); \ 849 tbl->num_buckets *= 2U; \ 850 tbl->log2_num_buckets++; \ 851 tbl->buckets = _he_new_buckets; \ 852 tbl->ineff_expands = (tbl->nonideal_items > (tbl->num_items >> 1)) ? \ 853 (tbl->ineff_expands+1U) : 0U; \ 854 if (tbl->ineff_expands > 1U) { \ 855 tbl->noexpand=1; \ 856 uthash_noexpand_fyi(tbl); \ 857 } \ 858 uthash_expand_fyi(tbl); \ 859 } while (0) 860 861 862 /* This is an adaptation of Simon Tatham's O(n log(n)) mergesort */ 863 /* Note that HASH_SORT assumes the hash handle name to be hh. 864 * HASH_SRT was added to allow the hash handle name to be passed in. */ 865 #define HASH_SORT(head,cmpfcn) HASH_SRT(hh,head,cmpfcn) 866 #define HASH_SRT(hh,head,cmpfcn) \ 867 do { \ 868 unsigned _hs_i; \ 869 unsigned _hs_looping,_hs_nmerges,_hs_insize,_hs_psize,_hs_qsize; \ 870 struct UT_hash_handle *_hs_p, *_hs_q, *_hs_e, *_hs_list, *_hs_tail; \ 871 if (head != NULL) { \ 872 _hs_insize = 1; \ 873 _hs_looping = 1; \ 874 _hs_list = &((head)->hh); \ 875 while (_hs_looping != 0U) { \ 876 _hs_p = _hs_list; \ 877 _hs_list = NULL; \ 878 _hs_tail = NULL; \ 879 _hs_nmerges = 0; \ 880 while (_hs_p != NULL) { \ 881 _hs_nmerges++; \ 882 _hs_q = _hs_p; \ 883 _hs_psize = 0; \ 884 for ( _hs_i = 0; _hs_i < _hs_insize; _hs_i++ ) { \ 885 _hs_psize++; \ 886 _hs_q = (UT_hash_handle*)((_hs_q->next != NULL) ? \ 887 ((void*)((char*)(_hs_q->next) + \ 888 (head)->hh.tbl->hho)) : NULL); \ 889 if (! (_hs_q) ) { break; } \ 890 } \ 891 _hs_qsize = _hs_insize; \ 892 while ((_hs_psize > 0U) || ((_hs_qsize > 0U) && (_hs_q != NULL))) {\ 893 if (_hs_psize == 0U) { \ 894 _hs_e = _hs_q; \ 895 _hs_q = (UT_hash_handle*)((_hs_q->next != NULL) ? \ 896 ((void*)((char*)(_hs_q->next) + \ 897 (head)->hh.tbl->hho)) : NULL); \ 898 _hs_qsize--; \ 899 } else if ( (_hs_qsize == 0U) || (_hs_q == NULL) ) { \ 900 _hs_e = _hs_p; \ 901 if (_hs_p != NULL){ \ 902 _hs_p = (UT_hash_handle*)((_hs_p->next != NULL) ? \ 903 ((void*)((char*)(_hs_p->next) + \ 904 (head)->hh.tbl->hho)) : NULL); \ 905 } \ 906 _hs_psize--; \ 907 } else if (( \ 908 cmpfcn(DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_p)), \ 909 DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_q))) \ 910 ) <= 0) { \ 911 _hs_e = _hs_p; \ 912 if (_hs_p != NULL){ \ 913 _hs_p = (UT_hash_handle*)((_hs_p->next != NULL) ? \ 914 ((void*)((char*)(_hs_p->next) + \ 915 (head)->hh.tbl->hho)) : NULL); \ 916 } \ 917 _hs_psize--; \ 918 } else { \ 919 _hs_e = _hs_q; \ 920 _hs_q = (UT_hash_handle*)((_hs_q->next != NULL) ? \ 921 ((void*)((char*)(_hs_q->next) + \ 922 (head)->hh.tbl->hho)) : NULL); \ 923 _hs_qsize--; \ 924 } \ 925 if ( _hs_tail != NULL ) { \ 926 _hs_tail->next = ((_hs_e != NULL) ? \ 927 ELMT_FROM_HH((head)->hh.tbl,_hs_e) : NULL); \ 928 } else { \ 929 _hs_list = _hs_e; \ 930 } \ 931 if (_hs_e != NULL) { \ 932 _hs_e->prev = ((_hs_tail != NULL) ? \ 933 ELMT_FROM_HH((head)->hh.tbl,_hs_tail) : NULL); \ 934 } \ 935 _hs_tail = _hs_e; \ 936 } \ 937 _hs_p = _hs_q; \ 938 } \ 939 if (_hs_tail != NULL){ \ 940 _hs_tail->next = NULL; \ 941 } \ 942 if ( _hs_nmerges <= 1U ) { \ 943 _hs_looping=0; \ 944 (head)->hh.tbl->tail = _hs_tail; \ 945 DECLTYPE_ASSIGN(head,ELMT_FROM_HH((head)->hh.tbl, _hs_list)); \ 946 } \ 947 _hs_insize *= 2U; \ 948 } \ 949 HASH_FSCK(hh,head); \ 950 } \ 951 } while (0) 952 953 /* This function selects items from one hash into another hash. 954 * The end result is that the selected items have dual presence 955 * in both hashes. There is no copy of the items made; rather 956 * they are added into the new hash through a secondary hash 957 * hash handle that must be present in the structure. */ 958 #define HASH_SELECT(hh_dst, dst, hh_src, src, cond) \ 959 do { \ 960 unsigned _src_bkt, _dst_bkt; \ 961 void *_last_elt=NULL, *_elt; \ 962 UT_hash_handle *_src_hh, *_dst_hh, *_last_elt_hh=NULL; \ 963 ptrdiff_t _dst_hho = ((char*)(&(dst)->hh_dst) - (char*)(dst)); \ 964 if (src != NULL) { \ 965 for(_src_bkt=0; _src_bkt < (src)->hh_src.tbl->num_buckets; _src_bkt++) { \ 966 for(_src_hh = (src)->hh_src.tbl->buckets[_src_bkt].hh_head; \ 967 _src_hh != NULL; \ 968 _src_hh = _src_hh->hh_next) { \ 969 _elt = ELMT_FROM_HH((src)->hh_src.tbl, _src_hh); \ 970 if (cond(_elt)) { \ 971 _dst_hh = (UT_hash_handle*)(((char*)_elt) + _dst_hho); \ 972 _dst_hh->key = _src_hh->key; \ 973 _dst_hh->keylen = _src_hh->keylen; \ 974 _dst_hh->hashv = _src_hh->hashv; \ 975 _dst_hh->prev = _last_elt; \ 976 _dst_hh->next = NULL; \ 977 if (_last_elt_hh != NULL) { _last_elt_hh->next = _elt; } \ 978 if (dst == NULL) { \ 979 DECLTYPE_ASSIGN(dst,_elt); \ 980 HASH_MAKE_TABLE(hh_dst,dst); \ 981 } else { \ 982 _dst_hh->tbl = (dst)->hh_dst.tbl; \ 983 } \ 984 HASH_TO_BKT(_dst_hh->hashv, _dst_hh->tbl->num_buckets, _dst_bkt); \ 985 HASH_ADD_TO_BKT(_dst_hh->tbl->buckets[_dst_bkt],_dst_hh); \ 986 (dst)->hh_dst.tbl->num_items++; \ 987 _last_elt = _elt; \ 988 _last_elt_hh = _dst_hh; \ 989 } \ 990 } \ 991 } \ 992 } \ 993 HASH_FSCK(hh_dst,dst); \ 994 } while (0) 995 996 #define HASH_CLEAR(hh,head) \ 997 do { \ 998 if (head != NULL) { \ 999 uthash_free((head)->hh.tbl->buckets, \ 1000 (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket)); \ 1001 HASH_BLOOM_FREE((head)->hh.tbl); \ 1002 uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ 1003 (head)=NULL; \ 1004 } \ 1005 } while (0) 1006 1007 #define HASH_OVERHEAD(hh,head) \ 1008 ((head != NULL) ? ( \ 1009 (size_t)(((head)->hh.tbl->num_items * sizeof(UT_hash_handle)) + \ 1010 ((head)->hh.tbl->num_buckets * sizeof(UT_hash_bucket)) + \ 1011 sizeof(UT_hash_table) + \ 1012 (HASH_BLOOM_BYTELEN))) : 0U) 1013 1014 #ifdef NO_DECLTYPE 1015 #define HASH_ITER(hh,head,el,tmp) \ 1016 for(((el)=(head)), ((*(char**)(&(tmp)))=(char*)((head!=NULL)?(head)->hh.next:NULL)); \ 1017 (el) != NULL; ((el)=(tmp)), ((*(char**)(&(tmp)))=(char*)((tmp!=NULL)?(tmp)->hh.next:NULL))) 1018 #else 1019 #define HASH_ITER(hh,head,el,tmp) \ 1020 for(((el)=(head)), ((tmp)=DECLTYPE(el)((head!=NULL)?(head)->hh.next:NULL)); \ 1021 (el) != NULL; ((el)=(tmp)), ((tmp)=DECLTYPE(el)((tmp!=NULL)?(tmp)->hh.next:NULL))) 1022 #endif 1023 1024 /* obtain a count of items in the hash */ 1025 #define HASH_COUNT(head) HASH_CNT(hh,head) 1026 #define HASH_CNT(hh,head) ((head != NULL)?((head)->hh.tbl->num_items):0U) 1027 1028 typedef struct UT_hash_bucket { 1029 struct UT_hash_handle *hh_head; 1030 unsigned count; 1031 1032 /* expand_mult is normally set to 0. In this situation, the max chain length 1033 * threshold is enforced at its default value, HASH_BKT_CAPACITY_THRESH. (If 1034 * the bucket's chain exceeds this length, bucket expansion is triggered). 1035 * However, setting expand_mult to a non-zero value delays bucket expansion 1036 * (that would be triggered by additions to this particular bucket) 1037 * until its chain length reaches a *multiple* of HASH_BKT_CAPACITY_THRESH. 1038 * (The multiplier is simply expand_mult+1). The whole idea of this 1039 * multiplier is to reduce bucket expansions, since they are expensive, in 1040 * situations where we know that a particular bucket tends to be overused. 1041 * It is better to let its chain length grow to a longer yet-still-bounded 1042 * value, than to do an O(n) bucket expansion too often. 1043 */ 1044 unsigned expand_mult; 1045 1046 } UT_hash_bucket; 1047 1048 /* random signature used only to find hash tables in external analysis */ 1049 #define HASH_SIGNATURE 0xa0111fe1u 1050 #define HASH_BLOOM_SIGNATURE 0xb12220f2u 1051 1052 typedef struct UT_hash_table { 1053 UT_hash_bucket *buckets; 1054 unsigned num_buckets, log2_num_buckets; 1055 unsigned num_items; 1056 struct UT_hash_handle *tail; /* tail hh in app order, for fast append */ 1057 ptrdiff_t hho; /* hash handle offset (byte pos of hash handle in element */ 1058 1059 /* in an ideal situation (all buckets used equally), no bucket would have 1060 * more than ceil(#items/#buckets) items. that's the ideal chain length. */ 1061 unsigned ideal_chain_maxlen; 1062 1063 /* nonideal_items is the number of items in the hash whose chain position 1064 * exceeds the ideal chain maxlen. these items pay the penalty for an uneven 1065 * hash distribution; reaching them in a chain traversal takes >ideal steps */ 1066 unsigned nonideal_items; 1067 1068 /* ineffective expands occur when a bucket doubling was performed, but 1069 * afterward, more than half the items in the hash had nonideal chain 1070 * positions. If this happens on two consecutive expansions we inhibit any 1071 * further expansion, as it's not helping; this happens when the hash 1072 * function isn't a good fit for the key domain. When expansion is inhibited 1073 * the hash will still work, albeit no longer in constant time. */ 1074 unsigned ineff_expands, noexpand; 1075 1076 uint32_t signature; /* used only to find hash tables in external analysis */ 1077 #ifdef HASH_BLOOM 1078 uint32_t bloom_sig; /* used only to test bloom exists in external analysis */ 1079 uint8_t *bloom_bv; 1080 uint8_t bloom_nbits; 1081 #endif 1082 1083 } UT_hash_table; 1084 1085 typedef struct UT_hash_handle { 1086 struct UT_hash_table *tbl; 1087 void *prev; /* prev element in app order */ 1088 void *next; /* next element in app order */ 1089 struct UT_hash_handle *hh_prev; /* previous hh in bucket order */ 1090 struct UT_hash_handle *hh_next; /* next hh in bucket order */ 1091 void *key; /* ptr to enclosing struct's key */ 1092 unsigned keylen; /* enclosing struct's key len */ 1093 unsigned hashv; /* result of hash-fcn(key) */ 1094 } UT_hash_handle; 1095 1096 #endif /* UTHASH_H */ 1097