1 /* 2 Copyright (c) 2003-2021, Troy D. Hanson http://troydhanson.github.io/uthash/ 3 All rights reserved. 4 5 Redistribution and use in source and binary forms, with or without 6 modification, are permitted provided that the following conditions are met: 7 8 * Redistributions of source code must retain the above copyright 9 notice, this list of conditions and the following disclaimer. 10 11 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 12 IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 13 TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 14 PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER 15 OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 16 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 17 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 18 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 19 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 20 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 21 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 */ 23 24 #ifndef UTHASH_H 25 #define UTHASH_H 26 27 #define UTHASH_VERSION 2.3.0 28 29 #include <string.h> /* memcmp, memset, strlen */ 30 #include <stddef.h> /* ptrdiff_t */ 31 #include <stdlib.h> /* exit */ 32 33 #if defined(HASH_DEFINE_OWN_STDINT) && HASH_DEFINE_OWN_STDINT 34 /* This codepath is provided for backward compatibility, but I plan to remove it. */ 35 #warning "HASH_DEFINE_OWN_STDINT is deprecated; please use HASH_NO_STDINT instead" 36 typedef unsigned int uint32_t; 37 typedef unsigned char uint8_t; 38 #elif defined(HASH_NO_STDINT) && HASH_NO_STDINT 39 #else 40 #include <stdint.h> /* uint8_t, uint32_t */ 41 #endif 42 43 /* These macros use decltype or the earlier __typeof GNU extension. 44 As decltype is only available in newer compilers (VS2010 or gcc 4.3+ 45 when compiling c++ source) this code uses whatever method is needed 46 or, for VS2008 where neither is available, uses casting workarounds. */ 47 #if !defined(DECLTYPE) && !defined(NO_DECLTYPE) 48 #if defined(_MSC_VER) /* MS compiler */ 49 #if _MSC_VER >= 1600 && defined(__cplusplus) /* VS2010 or newer in C++ mode */ 50 #define DECLTYPE(x) (decltype(x)) 51 #else /* VS2008 or older (or VS2010 in C mode) */ 52 #define NO_DECLTYPE 53 #endif 54 #elif defined(__BORLANDC__) || defined(__ICCARM__) || defined(__LCC__) || defined(__WATCOMC__) 55 #define NO_DECLTYPE 56 #else /* GNU, Sun and other compilers */ 57 #define DECLTYPE(x) (__typeof(x)) 58 #endif 59 #endif 60 61 #ifdef NO_DECLTYPE 62 #define DECLTYPE(x) 63 #define DECLTYPE_ASSIGN(dst,src) \ 64 do { \ 65 char **_da_dst = (char**)(&(dst)); \ 66 *_da_dst = (char*)(src); \ 67 } while (0) 68 #else 69 #define DECLTYPE_ASSIGN(dst,src) \ 70 do { \ 71 (dst) = DECLTYPE(dst)(src); \ 72 } while (0) 73 #endif 74 75 #ifndef uthash_malloc 76 #define uthash_malloc(sz) malloc(sz) /* malloc fcn */ 77 #endif 78 #ifndef uthash_free 79 #define uthash_free(ptr,sz) free(ptr) /* free fcn */ 80 #endif 81 #ifndef uthash_bzero 82 #define uthash_bzero(a,n) memset(a,'\0',n) 83 #endif 84 #ifndef uthash_strlen 85 #define uthash_strlen(s) strlen(s) 86 #endif 87 88 #ifndef HASH_FUNCTION 89 #define HASH_FUNCTION(keyptr,keylen,hashv) HASH_JEN(keyptr, keylen, hashv) 90 #endif 91 92 #ifndef HASH_KEYCMP 93 #define HASH_KEYCMP(a,b,n) memcmp(a,b,n) 94 #endif 95 96 #ifndef uthash_noexpand_fyi 97 #define uthash_noexpand_fyi(tbl) /* can be defined to log noexpand */ 98 #endif 99 #ifndef uthash_expand_fyi 100 #define uthash_expand_fyi(tbl) /* can be defined to log expands */ 101 #endif 102 103 #ifndef HASH_NONFATAL_OOM 104 #define HASH_NONFATAL_OOM 0 105 #endif 106 107 #if HASH_NONFATAL_OOM 108 /* malloc failures can be recovered from */ 109 110 #ifndef uthash_nonfatal_oom 111 #define uthash_nonfatal_oom(obj) do {} while (0) /* non-fatal OOM error */ 112 #endif 113 114 #define HASH_RECORD_OOM(oomed) do { (oomed) = 1; } while (0) 115 #define IF_HASH_NONFATAL_OOM(x) x 116 117 #else 118 /* malloc failures result in lost memory, hash tables are unusable */ 119 120 #ifndef uthash_fatal 121 #define uthash_fatal(msg) exit(-1) /* fatal OOM error */ 122 #endif 123 124 #define HASH_RECORD_OOM(oomed) uthash_fatal("out of memory") 125 #define IF_HASH_NONFATAL_OOM(x) 126 127 #endif 128 129 /* initial number of buckets */ 130 #define HASH_INITIAL_NUM_BUCKETS 32U /* initial number of buckets */ 131 #define HASH_INITIAL_NUM_BUCKETS_LOG2 5U /* lg2 of initial number of buckets */ 132 #define HASH_BKT_CAPACITY_THRESH 10U /* expand when bucket count reaches */ 133 134 /* calculate the element whose hash handle address is hhp */ 135 #define ELMT_FROM_HH(tbl,hhp) ((void*)(((char*)(hhp)) - ((tbl)->hho))) 136 /* calculate the hash handle from element address elp */ 137 #define HH_FROM_ELMT(tbl,elp) ((UT_hash_handle*)(void*)(((char*)(elp)) + ((tbl)->hho))) 138 139 #define HASH_ROLLBACK_BKT(hh, head, itemptrhh) \ 140 do { \ 141 struct UT_hash_handle *_hd_hh_item = (itemptrhh); \ 142 unsigned _hd_bkt; \ 143 HASH_TO_BKT(_hd_hh_item->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \ 144 (head)->hh.tbl->buckets[_hd_bkt].count++; \ 145 _hd_hh_item->hh_next = NULL; \ 146 _hd_hh_item->hh_prev = NULL; \ 147 } while (0) 148 149 #define HASH_VALUE(keyptr,keylen,hashv) \ 150 do { \ 151 HASH_FUNCTION(keyptr, keylen, hashv); \ 152 } while (0) 153 154 #define HASH_FIND_BYHASHVALUE(hh,head,keyptr,keylen,hashval,out) \ 155 do { \ 156 (out) = NULL; \ 157 if (head) { \ 158 unsigned _hf_bkt; \ 159 HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _hf_bkt); \ 160 if (HASH_BLOOM_TEST((head)->hh.tbl, hashval) != 0) { \ 161 HASH_FIND_IN_BKT((head)->hh.tbl, hh, (head)->hh.tbl->buckets[ _hf_bkt ], keyptr, keylen, hashval, out); \ 162 } \ 163 } \ 164 } while (0) 165 166 #define HASH_FIND(hh,head,keyptr,keylen,out) \ 167 do { \ 168 (out) = NULL; \ 169 if (head) { \ 170 unsigned _hf_hashv; \ 171 HASH_VALUE(keyptr, keylen, _hf_hashv); \ 172 HASH_FIND_BYHASHVALUE(hh, head, keyptr, keylen, _hf_hashv, out); \ 173 } \ 174 } while (0) 175 176 #ifdef HASH_BLOOM 177 #define HASH_BLOOM_BITLEN (1UL << HASH_BLOOM) 178 #define HASH_BLOOM_BYTELEN (HASH_BLOOM_BITLEN/8UL) + (((HASH_BLOOM_BITLEN%8UL)!=0UL) ? 1UL : 0UL) 179 #define HASH_BLOOM_MAKE(tbl,oomed) \ 180 do { \ 181 (tbl)->bloom_nbits = HASH_BLOOM; \ 182 (tbl)->bloom_bv = (uint8_t*)uthash_malloc(HASH_BLOOM_BYTELEN); \ 183 if (!(tbl)->bloom_bv) { \ 184 HASH_RECORD_OOM(oomed); \ 185 } else { \ 186 uthash_bzero((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \ 187 (tbl)->bloom_sig = HASH_BLOOM_SIGNATURE; \ 188 } \ 189 } while (0) 190 191 #define HASH_BLOOM_FREE(tbl) \ 192 do { \ 193 uthash_free((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \ 194 } while (0) 195 196 #define HASH_BLOOM_BITSET(bv,idx) (bv[(idx)/8U] |= (1U << ((idx)%8U))) 197 #define HASH_BLOOM_BITTEST(bv,idx) (bv[(idx)/8U] & (1U << ((idx)%8U))) 198 199 #define HASH_BLOOM_ADD(tbl,hashv) \ 200 HASH_BLOOM_BITSET((tbl)->bloom_bv, ((hashv) & (uint32_t)((1UL << (tbl)->bloom_nbits) - 1U))) 201 202 #define HASH_BLOOM_TEST(tbl,hashv) \ 203 HASH_BLOOM_BITTEST((tbl)->bloom_bv, ((hashv) & (uint32_t)((1UL << (tbl)->bloom_nbits) - 1U))) 204 205 #else 206 #define HASH_BLOOM_MAKE(tbl,oomed) 207 #define HASH_BLOOM_FREE(tbl) 208 #define HASH_BLOOM_ADD(tbl,hashv) 209 #define HASH_BLOOM_TEST(tbl,hashv) (1) 210 #define HASH_BLOOM_BYTELEN 0U 211 #endif 212 213 #define HASH_MAKE_TABLE(hh,head,oomed) \ 214 do { \ 215 (head)->hh.tbl = (UT_hash_table*)uthash_malloc(sizeof(UT_hash_table)); \ 216 if (!(head)->hh.tbl) { \ 217 HASH_RECORD_OOM(oomed); \ 218 } else { \ 219 uthash_bzero((head)->hh.tbl, sizeof(UT_hash_table)); \ 220 (head)->hh.tbl->tail = &((head)->hh); \ 221 (head)->hh.tbl->num_buckets = HASH_INITIAL_NUM_BUCKETS; \ 222 (head)->hh.tbl->log2_num_buckets = HASH_INITIAL_NUM_BUCKETS_LOG2; \ 223 (head)->hh.tbl->hho = (char*)(&(head)->hh) - (char*)(head); \ 224 (head)->hh.tbl->buckets = (UT_hash_bucket*)uthash_malloc( \ 225 HASH_INITIAL_NUM_BUCKETS * sizeof(struct UT_hash_bucket)); \ 226 (head)->hh.tbl->signature = HASH_SIGNATURE; \ 227 if (!(head)->hh.tbl->buckets) { \ 228 HASH_RECORD_OOM(oomed); \ 229 uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ 230 } else { \ 231 uthash_bzero((head)->hh.tbl->buckets, \ 232 HASH_INITIAL_NUM_BUCKETS * sizeof(struct UT_hash_bucket)); \ 233 HASH_BLOOM_MAKE((head)->hh.tbl, oomed); \ 234 IF_HASH_NONFATAL_OOM( \ 235 if (oomed) { \ 236 uthash_free((head)->hh.tbl->buckets, \ 237 HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \ 238 uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ 239 } \ 240 ) \ 241 } \ 242 } \ 243 } while (0) 244 245 #define HASH_REPLACE_BYHASHVALUE_INORDER(hh,head,fieldname,keylen_in,hashval,add,replaced,cmpfcn) \ 246 do { \ 247 (replaced) = NULL; \ 248 HASH_FIND_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, replaced); \ 249 if (replaced) { \ 250 HASH_DELETE(hh, head, replaced); \ 251 } \ 252 HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, &((add)->fieldname), keylen_in, hashval, add, cmpfcn); \ 253 } while (0) 254 255 #define HASH_REPLACE_BYHASHVALUE(hh,head,fieldname,keylen_in,hashval,add,replaced) \ 256 do { \ 257 (replaced) = NULL; \ 258 HASH_FIND_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, replaced); \ 259 if (replaced) { \ 260 HASH_DELETE(hh, head, replaced); \ 261 } \ 262 HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, add); \ 263 } while (0) 264 265 #define HASH_REPLACE(hh,head,fieldname,keylen_in,add,replaced) \ 266 do { \ 267 unsigned _hr_hashv; \ 268 HASH_VALUE(&((add)->fieldname), keylen_in, _hr_hashv); \ 269 HASH_REPLACE_BYHASHVALUE(hh, head, fieldname, keylen_in, _hr_hashv, add, replaced); \ 270 } while (0) 271 272 #define HASH_REPLACE_INORDER(hh,head,fieldname,keylen_in,add,replaced,cmpfcn) \ 273 do { \ 274 unsigned _hr_hashv; \ 275 HASH_VALUE(&((add)->fieldname), keylen_in, _hr_hashv); \ 276 HASH_REPLACE_BYHASHVALUE_INORDER(hh, head, fieldname, keylen_in, _hr_hashv, add, replaced, cmpfcn); \ 277 } while (0) 278 279 #define HASH_APPEND_LIST(hh, head, add) \ 280 do { \ 281 (add)->hh.next = NULL; \ 282 (add)->hh.prev = ELMT_FROM_HH((head)->hh.tbl, (head)->hh.tbl->tail); \ 283 (head)->hh.tbl->tail->next = (add); \ 284 (head)->hh.tbl->tail = &((add)->hh); \ 285 } while (0) 286 287 #define HASH_AKBI_INNER_LOOP(hh,head,add,cmpfcn) \ 288 do { \ 289 do { \ 290 if (cmpfcn(DECLTYPE(head)(_hs_iter), add) > 0) { \ 291 break; \ 292 } \ 293 } while ((_hs_iter = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->next)); \ 294 } while (0) 295 296 #ifdef NO_DECLTYPE 297 #undef HASH_AKBI_INNER_LOOP 298 #define HASH_AKBI_INNER_LOOP(hh,head,add,cmpfcn) \ 299 do { \ 300 char *_hs_saved_head = (char*)(head); \ 301 do { \ 302 DECLTYPE_ASSIGN(head, _hs_iter); \ 303 if (cmpfcn(head, add) > 0) { \ 304 DECLTYPE_ASSIGN(head, _hs_saved_head); \ 305 break; \ 306 } \ 307 DECLTYPE_ASSIGN(head, _hs_saved_head); \ 308 } while ((_hs_iter = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->next)); \ 309 } while (0) 310 #endif 311 312 #if HASH_NONFATAL_OOM 313 314 #define HASH_ADD_TO_TABLE(hh,head,keyptr,keylen_in,hashval,add,oomed) \ 315 do { \ 316 if (!(oomed)) { \ 317 unsigned _ha_bkt; \ 318 (head)->hh.tbl->num_items++; \ 319 HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _ha_bkt); \ 320 HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt], hh, &(add)->hh, oomed); \ 321 if (oomed) { \ 322 HASH_ROLLBACK_BKT(hh, head, &(add)->hh); \ 323 HASH_DELETE_HH(hh, head, &(add)->hh); \ 324 (add)->hh.tbl = NULL; \ 325 uthash_nonfatal_oom(add); \ 326 } else { \ 327 HASH_BLOOM_ADD((head)->hh.tbl, hashval); \ 328 HASH_EMIT_KEY(hh, head, keyptr, keylen_in); \ 329 } \ 330 } else { \ 331 (add)->hh.tbl = NULL; \ 332 uthash_nonfatal_oom(add); \ 333 } \ 334 } while (0) 335 336 #else 337 338 #define HASH_ADD_TO_TABLE(hh,head,keyptr,keylen_in,hashval,add,oomed) \ 339 do { \ 340 unsigned _ha_bkt; \ 341 (head)->hh.tbl->num_items++; \ 342 HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _ha_bkt); \ 343 HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt], hh, &(add)->hh, oomed); \ 344 HASH_BLOOM_ADD((head)->hh.tbl, hashval); \ 345 HASH_EMIT_KEY(hh, head, keyptr, keylen_in); \ 346 } while (0) 347 348 #endif 349 350 351 #define HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh,head,keyptr,keylen_in,hashval,add,cmpfcn) \ 352 do { \ 353 IF_HASH_NONFATAL_OOM( int _ha_oomed = 0; ) \ 354 (add)->hh.hashv = (hashval); \ 355 (add)->hh.key = (char*) (keyptr); \ 356 (add)->hh.keylen = (unsigned) (keylen_in); \ 357 if (!(head)) { \ 358 (add)->hh.next = NULL; \ 359 (add)->hh.prev = NULL; \ 360 HASH_MAKE_TABLE(hh, add, _ha_oomed); \ 361 IF_HASH_NONFATAL_OOM( if (!_ha_oomed) { ) \ 362 (head) = (add); \ 363 IF_HASH_NONFATAL_OOM( } ) \ 364 } else { \ 365 void *_hs_iter = (head); \ 366 (add)->hh.tbl = (head)->hh.tbl; \ 367 HASH_AKBI_INNER_LOOP(hh, head, add, cmpfcn); \ 368 if (_hs_iter) { \ 369 (add)->hh.next = _hs_iter; \ 370 if (((add)->hh.prev = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->prev)) { \ 371 HH_FROM_ELMT((head)->hh.tbl, (add)->hh.prev)->next = (add); \ 372 } else { \ 373 (head) = (add); \ 374 } \ 375 HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->prev = (add); \ 376 } else { \ 377 HASH_APPEND_LIST(hh, head, add); \ 378 } \ 379 } \ 380 HASH_ADD_TO_TABLE(hh, head, keyptr, keylen_in, hashval, add, _ha_oomed); \ 381 HASH_FSCK(hh, head, "HASH_ADD_KEYPTR_BYHASHVALUE_INORDER"); \ 382 } while (0) 383 384 #define HASH_ADD_KEYPTR_INORDER(hh,head,keyptr,keylen_in,add,cmpfcn) \ 385 do { \ 386 unsigned _hs_hashv; \ 387 HASH_VALUE(keyptr, keylen_in, _hs_hashv); \ 388 HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, keyptr, keylen_in, _hs_hashv, add, cmpfcn); \ 389 } while (0) 390 391 #define HASH_ADD_BYHASHVALUE_INORDER(hh,head,fieldname,keylen_in,hashval,add,cmpfcn) \ 392 HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, &((add)->fieldname), keylen_in, hashval, add, cmpfcn) 393 394 #define HASH_ADD_INORDER(hh,head,fieldname,keylen_in,add,cmpfcn) \ 395 HASH_ADD_KEYPTR_INORDER(hh, head, &((add)->fieldname), keylen_in, add, cmpfcn) 396 397 #define HASH_ADD_KEYPTR_BYHASHVALUE(hh,head,keyptr,keylen_in,hashval,add) \ 398 do { \ 399 IF_HASH_NONFATAL_OOM( int _ha_oomed = 0; ) \ 400 (add)->hh.hashv = (hashval); \ 401 (add)->hh.key = (const void*) (keyptr); \ 402 (add)->hh.keylen = (unsigned) (keylen_in); \ 403 if (!(head)) { \ 404 (add)->hh.next = NULL; \ 405 (add)->hh.prev = NULL; \ 406 HASH_MAKE_TABLE(hh, add, _ha_oomed); \ 407 IF_HASH_NONFATAL_OOM( if (!_ha_oomed) { ) \ 408 (head) = (add); \ 409 IF_HASH_NONFATAL_OOM( } ) \ 410 } else { \ 411 (add)->hh.tbl = (head)->hh.tbl; \ 412 HASH_APPEND_LIST(hh, head, add); \ 413 } \ 414 HASH_ADD_TO_TABLE(hh, head, keyptr, keylen_in, hashval, add, _ha_oomed); \ 415 HASH_FSCK(hh, head, "HASH_ADD_KEYPTR_BYHASHVALUE"); \ 416 } while (0) 417 418 #define HASH_ADD_KEYPTR(hh,head,keyptr,keylen_in,add) \ 419 do { \ 420 unsigned _ha_hashv; \ 421 HASH_VALUE(keyptr, keylen_in, _ha_hashv); \ 422 HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, keyptr, keylen_in, _ha_hashv, add); \ 423 } while (0) 424 425 #define HASH_ADD_BYHASHVALUE(hh,head,fieldname,keylen_in,hashval,add) \ 426 HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, add) 427 428 #define HASH_ADD(hh,head,fieldname,keylen_in,add) \ 429 HASH_ADD_KEYPTR(hh, head, &((add)->fieldname), keylen_in, add) 430 431 #define HASH_TO_BKT(hashv,num_bkts,bkt) \ 432 do { \ 433 bkt = ((hashv) & ((num_bkts) - 1U)); \ 434 } while (0) 435 436 /* delete "delptr" from the hash table. 437 * "the usual" patch-up process for the app-order doubly-linked-list. 438 * The use of _hd_hh_del below deserves special explanation. 439 * These used to be expressed using (delptr) but that led to a bug 440 * if someone used the same symbol for the head and deletee, like 441 * HASH_DELETE(hh,users,users); 442 * We want that to work, but by changing the head (users) below 443 * we were forfeiting our ability to further refer to the deletee (users) 444 * in the patch-up process. Solution: use scratch space to 445 * copy the deletee pointer, then the latter references are via that 446 * scratch pointer rather than through the repointed (users) symbol. 447 */ 448 #define HASH_DELETE(hh,head,delptr) \ 449 HASH_DELETE_HH(hh, head, &(delptr)->hh) 450 451 #define HASH_DELETE_HH(hh,head,delptrhh) \ 452 do { \ 453 struct UT_hash_handle *_hd_hh_del = (delptrhh); \ 454 if ((_hd_hh_del->prev == NULL) && (_hd_hh_del->next == NULL)) { \ 455 HASH_BLOOM_FREE((head)->hh.tbl); \ 456 uthash_free((head)->hh.tbl->buckets, \ 457 (head)->hh.tbl->num_buckets * sizeof(struct UT_hash_bucket)); \ 458 uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ 459 (head) = NULL; \ 460 } else { \ 461 unsigned _hd_bkt; \ 462 if (_hd_hh_del == (head)->hh.tbl->tail) { \ 463 (head)->hh.tbl->tail = HH_FROM_ELMT((head)->hh.tbl, _hd_hh_del->prev); \ 464 } \ 465 if (_hd_hh_del->prev != NULL) { \ 466 HH_FROM_ELMT((head)->hh.tbl, _hd_hh_del->prev)->next = _hd_hh_del->next; \ 467 } else { \ 468 DECLTYPE_ASSIGN(head, _hd_hh_del->next); \ 469 } \ 470 if (_hd_hh_del->next != NULL) { \ 471 HH_FROM_ELMT((head)->hh.tbl, _hd_hh_del->next)->prev = _hd_hh_del->prev; \ 472 } \ 473 HASH_TO_BKT(_hd_hh_del->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \ 474 HASH_DEL_IN_BKT((head)->hh.tbl->buckets[_hd_bkt], _hd_hh_del); \ 475 (head)->hh.tbl->num_items--; \ 476 } \ 477 HASH_FSCK(hh, head, "HASH_DELETE_HH"); \ 478 } while (0) 479 480 /* convenience forms of HASH_FIND/HASH_ADD/HASH_DEL */ 481 #define HASH_FIND_STR(head,findstr,out) \ 482 do { \ 483 unsigned _uthash_hfstr_keylen = (unsigned)uthash_strlen(findstr); \ 484 HASH_FIND(hh, head, findstr, _uthash_hfstr_keylen, out); \ 485 } while (0) 486 #define HASH_ADD_STR(head,strfield,add) \ 487 do { \ 488 unsigned _uthash_hastr_keylen = (unsigned)uthash_strlen((add)->strfield); \ 489 HASH_ADD(hh, head, strfield[0], _uthash_hastr_keylen, add); \ 490 } while (0) 491 #define HASH_REPLACE_STR(head,strfield,add,replaced) \ 492 do { \ 493 unsigned _uthash_hrstr_keylen = (unsigned)uthash_strlen((add)->strfield); \ 494 HASH_REPLACE(hh, head, strfield[0], _uthash_hrstr_keylen, add, replaced); \ 495 } while (0) 496 #define HASH_FIND_INT(head,findint,out) \ 497 HASH_FIND(hh,head,findint,sizeof(int),out) 498 #define HASH_ADD_INT(head,intfield,add) \ 499 HASH_ADD(hh,head,intfield,sizeof(int),add) 500 #define HASH_REPLACE_INT(head,intfield,add,replaced) \ 501 HASH_REPLACE(hh,head,intfield,sizeof(int),add,replaced) 502 #define HASH_FIND_PTR(head,findptr,out) \ 503 HASH_FIND(hh,head,findptr,sizeof(void *),out) 504 #define HASH_ADD_PTR(head,ptrfield,add) \ 505 HASH_ADD(hh,head,ptrfield,sizeof(void *),add) 506 #define HASH_REPLACE_PTR(head,ptrfield,add,replaced) \ 507 HASH_REPLACE(hh,head,ptrfield,sizeof(void *),add,replaced) 508 #define HASH_DEL(head,delptr) \ 509 HASH_DELETE(hh,head,delptr) 510 511 /* HASH_FSCK checks hash integrity on every add/delete when HASH_DEBUG is defined. 512 * This is for uthash developer only; it compiles away if HASH_DEBUG isn't defined. 513 */ 514 #ifdef HASH_DEBUG 515 #include <stdio.h> /* fprintf, stderr */ 516 #define HASH_OOPS(...) do { fprintf(stderr, __VA_ARGS__); exit(-1); } while (0) 517 #define HASH_FSCK(hh,head,where) \ 518 do { \ 519 struct UT_hash_handle *_thh; \ 520 if (head) { \ 521 unsigned _bkt_i; \ 522 unsigned _count = 0; \ 523 char *_prev; \ 524 for (_bkt_i = 0; _bkt_i < (head)->hh.tbl->num_buckets; ++_bkt_i) { \ 525 unsigned _bkt_count = 0; \ 526 _thh = (head)->hh.tbl->buckets[_bkt_i].hh_head; \ 527 _prev = NULL; \ 528 while (_thh) { \ 529 if (_prev != (char*)(_thh->hh_prev)) { \ 530 HASH_OOPS("%s: invalid hh_prev %p, actual %p\n", \ 531 (where), (void*)_thh->hh_prev, (void*)_prev); \ 532 } \ 533 _bkt_count++; \ 534 _prev = (char*)(_thh); \ 535 _thh = _thh->hh_next; \ 536 } \ 537 _count += _bkt_count; \ 538 if ((head)->hh.tbl->buckets[_bkt_i].count != _bkt_count) { \ 539 HASH_OOPS("%s: invalid bucket count %u, actual %u\n", \ 540 (where), (head)->hh.tbl->buckets[_bkt_i].count, _bkt_count); \ 541 } \ 542 } \ 543 if (_count != (head)->hh.tbl->num_items) { \ 544 HASH_OOPS("%s: invalid hh item count %u, actual %u\n", \ 545 (where), (head)->hh.tbl->num_items, _count); \ 546 } \ 547 _count = 0; \ 548 _prev = NULL; \ 549 _thh = &(head)->hh; \ 550 while (_thh) { \ 551 _count++; \ 552 if (_prev != (char*)_thh->prev) { \ 553 HASH_OOPS("%s: invalid prev %p, actual %p\n", \ 554 (where), (void*)_thh->prev, (void*)_prev); \ 555 } \ 556 _prev = (char*)ELMT_FROM_HH((head)->hh.tbl, _thh); \ 557 _thh = (_thh->next ? HH_FROM_ELMT((head)->hh.tbl, _thh->next) : NULL); \ 558 } \ 559 if (_count != (head)->hh.tbl->num_items) { \ 560 HASH_OOPS("%s: invalid app item count %u, actual %u\n", \ 561 (where), (head)->hh.tbl->num_items, _count); \ 562 } \ 563 } \ 564 } while (0) 565 #else 566 #define HASH_FSCK(hh,head,where) 567 #endif 568 569 /* When compiled with -DHASH_EMIT_KEYS, length-prefixed keys are emitted to 570 * the descriptor to which this macro is defined for tuning the hash function. 571 * The app can #include <unistd.h> to get the prototype for write(2). */ 572 #ifdef HASH_EMIT_KEYS 573 #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) \ 574 do { \ 575 unsigned _klen = fieldlen; \ 576 write(HASH_EMIT_KEYS, &_klen, sizeof(_klen)); \ 577 write(HASH_EMIT_KEYS, keyptr, (unsigned long)fieldlen); \ 578 } while (0) 579 #else 580 #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) 581 #endif 582 583 /* The Bernstein hash function, used in Perl prior to v5.6. Note (x<<5+x)=x*33. */ 584 #define HASH_BER(key,keylen,hashv) \ 585 do { \ 586 unsigned _hb_keylen = (unsigned)keylen; \ 587 const unsigned char *_hb_key = (const unsigned char*)(key); \ 588 (hashv) = 0; \ 589 while (_hb_keylen-- != 0U) { \ 590 (hashv) = (((hashv) << 5) + (hashv)) + *_hb_key++; \ 591 } \ 592 } while (0) 593 594 595 /* SAX/FNV/OAT/JEN hash functions are macro variants of those listed at 596 * http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx */ 597 #define HASH_SAX(key,keylen,hashv) \ 598 do { \ 599 unsigned _sx_i; \ 600 const unsigned char *_hs_key = (const unsigned char*)(key); \ 601 hashv = 0; \ 602 for (_sx_i=0; _sx_i < keylen; _sx_i++) { \ 603 hashv ^= (hashv << 5) + (hashv >> 2) + _hs_key[_sx_i]; \ 604 } \ 605 } while (0) 606 /* FNV-1a variation */ 607 #define HASH_FNV(key,keylen,hashv) \ 608 do { \ 609 unsigned _fn_i; \ 610 const unsigned char *_hf_key = (const unsigned char*)(key); \ 611 (hashv) = 2166136261U; \ 612 for (_fn_i=0; _fn_i < keylen; _fn_i++) { \ 613 hashv = hashv ^ _hf_key[_fn_i]; \ 614 hashv = hashv * 16777619U; \ 615 } \ 616 } while (0) 617 618 #define HASH_OAT(key,keylen,hashv) \ 619 do { \ 620 unsigned _ho_i; \ 621 const unsigned char *_ho_key=(const unsigned char*)(key); \ 622 hashv = 0; \ 623 for(_ho_i=0; _ho_i < keylen; _ho_i++) { \ 624 hashv += _ho_key[_ho_i]; \ 625 hashv += (hashv << 10); \ 626 hashv ^= (hashv >> 6); \ 627 } \ 628 hashv += (hashv << 3); \ 629 hashv ^= (hashv >> 11); \ 630 hashv += (hashv << 15); \ 631 } while (0) 632 633 #define HASH_JEN_MIX(a,b,c) \ 634 do { \ 635 a -= b; a -= c; a ^= ( c >> 13 ); \ 636 b -= c; b -= a; b ^= ( a << 8 ); \ 637 c -= a; c -= b; c ^= ( b >> 13 ); \ 638 a -= b; a -= c; a ^= ( c >> 12 ); \ 639 b -= c; b -= a; b ^= ( a << 16 ); \ 640 c -= a; c -= b; c ^= ( b >> 5 ); \ 641 a -= b; a -= c; a ^= ( c >> 3 ); \ 642 b -= c; b -= a; b ^= ( a << 10 ); \ 643 c -= a; c -= b; c ^= ( b >> 15 ); \ 644 } while (0) 645 646 #define HASH_JEN(key,keylen,hashv) \ 647 do { \ 648 unsigned _hj_i,_hj_j,_hj_k; \ 649 unsigned const char *_hj_key=(unsigned const char*)(key); \ 650 hashv = 0xfeedbeefu; \ 651 _hj_i = _hj_j = 0x9e3779b9u; \ 652 _hj_k = (unsigned)(keylen); \ 653 while (_hj_k >= 12U) { \ 654 _hj_i += (_hj_key[0] + ( (unsigned)_hj_key[1] << 8 ) \ 655 + ( (unsigned)_hj_key[2] << 16 ) \ 656 + ( (unsigned)_hj_key[3] << 24 ) ); \ 657 _hj_j += (_hj_key[4] + ( (unsigned)_hj_key[5] << 8 ) \ 658 + ( (unsigned)_hj_key[6] << 16 ) \ 659 + ( (unsigned)_hj_key[7] << 24 ) ); \ 660 hashv += (_hj_key[8] + ( (unsigned)_hj_key[9] << 8 ) \ 661 + ( (unsigned)_hj_key[10] << 16 ) \ 662 + ( (unsigned)_hj_key[11] << 24 ) ); \ 663 \ 664 HASH_JEN_MIX(_hj_i, _hj_j, hashv); \ 665 \ 666 _hj_key += 12; \ 667 _hj_k -= 12U; \ 668 } \ 669 hashv += (unsigned)(keylen); \ 670 switch ( _hj_k ) { \ 671 case 11: hashv += ( (unsigned)_hj_key[10] << 24 ); /* FALLTHROUGH */ \ 672 case 10: hashv += ( (unsigned)_hj_key[9] << 16 ); /* FALLTHROUGH */ \ 673 case 9: hashv += ( (unsigned)_hj_key[8] << 8 ); /* FALLTHROUGH */ \ 674 case 8: _hj_j += ( (unsigned)_hj_key[7] << 24 ); /* FALLTHROUGH */ \ 675 case 7: _hj_j += ( (unsigned)_hj_key[6] << 16 ); /* FALLTHROUGH */ \ 676 case 6: _hj_j += ( (unsigned)_hj_key[5] << 8 ); /* FALLTHROUGH */ \ 677 case 5: _hj_j += _hj_key[4]; /* FALLTHROUGH */ \ 678 case 4: _hj_i += ( (unsigned)_hj_key[3] << 24 ); /* FALLTHROUGH */ \ 679 case 3: _hj_i += ( (unsigned)_hj_key[2] << 16 ); /* FALLTHROUGH */ \ 680 case 2: _hj_i += ( (unsigned)_hj_key[1] << 8 ); /* FALLTHROUGH */ \ 681 case 1: _hj_i += _hj_key[0]; /* FALLTHROUGH */ \ 682 default: ; \ 683 } \ 684 HASH_JEN_MIX(_hj_i, _hj_j, hashv); \ 685 } while (0) 686 687 /* The Paul Hsieh hash function */ 688 #undef get16bits 689 #if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \ 690 || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__) 691 #define get16bits(d) (*((const uint16_t *) (d))) 692 #endif 693 694 #if !defined (get16bits) 695 #define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8) \ 696 +(uint32_t)(((const uint8_t *)(d))[0]) ) 697 #endif 698 #define HASH_SFH(key,keylen,hashv) \ 699 do { \ 700 unsigned const char *_sfh_key=(unsigned const char*)(key); \ 701 uint32_t _sfh_tmp, _sfh_len = (uint32_t)keylen; \ 702 \ 703 unsigned _sfh_rem = _sfh_len & 3U; \ 704 _sfh_len >>= 2; \ 705 hashv = 0xcafebabeu; \ 706 \ 707 /* Main loop */ \ 708 for (;_sfh_len > 0U; _sfh_len--) { \ 709 hashv += get16bits (_sfh_key); \ 710 _sfh_tmp = ((uint32_t)(get16bits (_sfh_key+2)) << 11) ^ hashv; \ 711 hashv = (hashv << 16) ^ _sfh_tmp; \ 712 _sfh_key += 2U*sizeof (uint16_t); \ 713 hashv += hashv >> 11; \ 714 } \ 715 \ 716 /* Handle end cases */ \ 717 switch (_sfh_rem) { \ 718 case 3: hashv += get16bits (_sfh_key); \ 719 hashv ^= hashv << 16; \ 720 hashv ^= (uint32_t)(_sfh_key[sizeof (uint16_t)]) << 18; \ 721 hashv += hashv >> 11; \ 722 break; \ 723 case 2: hashv += get16bits (_sfh_key); \ 724 hashv ^= hashv << 11; \ 725 hashv += hashv >> 17; \ 726 break; \ 727 case 1: hashv += *_sfh_key; \ 728 hashv ^= hashv << 10; \ 729 hashv += hashv >> 1; \ 730 break; \ 731 default: ; \ 732 } \ 733 \ 734 /* Force "avalanching" of final 127 bits */ \ 735 hashv ^= hashv << 3; \ 736 hashv += hashv >> 5; \ 737 hashv ^= hashv << 4; \ 738 hashv += hashv >> 17; \ 739 hashv ^= hashv << 25; \ 740 hashv += hashv >> 6; \ 741 } while (0) 742 743 /* iterate over items in a known bucket to find desired item */ 744 #define HASH_FIND_IN_BKT(tbl,hh,head,keyptr,keylen_in,hashval,out) \ 745 do { \ 746 if ((head).hh_head != NULL) { \ 747 DECLTYPE_ASSIGN(out, ELMT_FROM_HH(tbl, (head).hh_head)); \ 748 } else { \ 749 (out) = NULL; \ 750 } \ 751 while ((out) != NULL) { \ 752 if ((out)->hh.hashv == (hashval) && (out)->hh.keylen == (keylen_in)) { \ 753 if (HASH_KEYCMP((out)->hh.key, keyptr, keylen_in) == 0) { \ 754 break; \ 755 } \ 756 } \ 757 if ((out)->hh.hh_next != NULL) { \ 758 DECLTYPE_ASSIGN(out, ELMT_FROM_HH(tbl, (out)->hh.hh_next)); \ 759 } else { \ 760 (out) = NULL; \ 761 } \ 762 } \ 763 } while (0) 764 765 /* add an item to a bucket */ 766 #define HASH_ADD_TO_BKT(head,hh,addhh,oomed) \ 767 do { \ 768 UT_hash_bucket *_ha_head = &(head); \ 769 _ha_head->count++; \ 770 (addhh)->hh_next = _ha_head->hh_head; \ 771 (addhh)->hh_prev = NULL; \ 772 if (_ha_head->hh_head != NULL) { \ 773 _ha_head->hh_head->hh_prev = (addhh); \ 774 } \ 775 _ha_head->hh_head = (addhh); \ 776 if ((_ha_head->count >= ((_ha_head->expand_mult + 1U) * HASH_BKT_CAPACITY_THRESH)) \ 777 && !(addhh)->tbl->noexpand) { \ 778 HASH_EXPAND_BUCKETS(addhh,(addhh)->tbl, oomed); \ 779 IF_HASH_NONFATAL_OOM( \ 780 if (oomed) { \ 781 HASH_DEL_IN_BKT(head,addhh); \ 782 } \ 783 ) \ 784 } \ 785 } while (0) 786 787 /* remove an item from a given bucket */ 788 #define HASH_DEL_IN_BKT(head,delhh) \ 789 do { \ 790 UT_hash_bucket *_hd_head = &(head); \ 791 _hd_head->count--; \ 792 if (_hd_head->hh_head == (delhh)) { \ 793 _hd_head->hh_head = (delhh)->hh_next; \ 794 } \ 795 if ((delhh)->hh_prev) { \ 796 (delhh)->hh_prev->hh_next = (delhh)->hh_next; \ 797 } \ 798 if ((delhh)->hh_next) { \ 799 (delhh)->hh_next->hh_prev = (delhh)->hh_prev; \ 800 } \ 801 } while (0) 802 803 /* Bucket expansion has the effect of doubling the number of buckets 804 * and redistributing the items into the new buckets. Ideally the 805 * items will distribute more or less evenly into the new buckets 806 * (the extent to which this is true is a measure of the quality of 807 * the hash function as it applies to the key domain). 808 * 809 * With the items distributed into more buckets, the chain length 810 * (item count) in each bucket is reduced. Thus by expanding buckets 811 * the hash keeps a bound on the chain length. This bounded chain 812 * length is the essence of how a hash provides constant time lookup. 813 * 814 * The calculation of tbl->ideal_chain_maxlen below deserves some 815 * explanation. First, keep in mind that we're calculating the ideal 816 * maximum chain length based on the *new* (doubled) bucket count. 817 * In fractions this is just n/b (n=number of items,b=new num buckets). 818 * Since the ideal chain length is an integer, we want to calculate 819 * ceil(n/b). We don't depend on floating point arithmetic in this 820 * hash, so to calculate ceil(n/b) with integers we could write 821 * 822 * ceil(n/b) = (n/b) + ((n%b)?1:0) 823 * 824 * and in fact a previous version of this hash did just that. 825 * But now we have improved things a bit by recognizing that b is 826 * always a power of two. We keep its base 2 log handy (call it lb), 827 * so now we can write this with a bit shift and logical AND: 828 * 829 * ceil(n/b) = (n>>lb) + ( (n & (b-1)) ? 1:0) 830 * 831 */ 832 #define HASH_EXPAND_BUCKETS(hh,tbl,oomed) \ 833 do { \ 834 unsigned _he_bkt; \ 835 unsigned _he_bkt_i; \ 836 struct UT_hash_handle *_he_thh, *_he_hh_nxt; \ 837 UT_hash_bucket *_he_new_buckets, *_he_newbkt; \ 838 _he_new_buckets = (UT_hash_bucket*)uthash_malloc( \ 839 sizeof(struct UT_hash_bucket) * (tbl)->num_buckets * 2U); \ 840 if (!_he_new_buckets) { \ 841 HASH_RECORD_OOM(oomed); \ 842 } else { \ 843 uthash_bzero(_he_new_buckets, \ 844 sizeof(struct UT_hash_bucket) * (tbl)->num_buckets * 2U); \ 845 (tbl)->ideal_chain_maxlen = \ 846 ((tbl)->num_items >> ((tbl)->log2_num_buckets+1U)) + \ 847 ((((tbl)->num_items & (((tbl)->num_buckets*2U)-1U)) != 0U) ? 1U : 0U); \ 848 (tbl)->nonideal_items = 0; \ 849 for (_he_bkt_i = 0; _he_bkt_i < (tbl)->num_buckets; _he_bkt_i++) { \ 850 _he_thh = (tbl)->buckets[ _he_bkt_i ].hh_head; \ 851 while (_he_thh != NULL) { \ 852 _he_hh_nxt = _he_thh->hh_next; \ 853 HASH_TO_BKT(_he_thh->hashv, (tbl)->num_buckets * 2U, _he_bkt); \ 854 _he_newbkt = &(_he_new_buckets[_he_bkt]); \ 855 if (++(_he_newbkt->count) > (tbl)->ideal_chain_maxlen) { \ 856 (tbl)->nonideal_items++; \ 857 if (_he_newbkt->count > _he_newbkt->expand_mult * (tbl)->ideal_chain_maxlen) { \ 858 _he_newbkt->expand_mult++; \ 859 } \ 860 } \ 861 _he_thh->hh_prev = NULL; \ 862 _he_thh->hh_next = _he_newbkt->hh_head; \ 863 if (_he_newbkt->hh_head != NULL) { \ 864 _he_newbkt->hh_head->hh_prev = _he_thh; \ 865 } \ 866 _he_newbkt->hh_head = _he_thh; \ 867 _he_thh = _he_hh_nxt; \ 868 } \ 869 } \ 870 uthash_free((tbl)->buckets, (tbl)->num_buckets * sizeof(struct UT_hash_bucket)); \ 871 (tbl)->num_buckets *= 2U; \ 872 (tbl)->log2_num_buckets++; \ 873 (tbl)->buckets = _he_new_buckets; \ 874 (tbl)->ineff_expands = ((tbl)->nonideal_items > ((tbl)->num_items >> 1)) ? \ 875 ((tbl)->ineff_expands+1U) : 0U; \ 876 if ((tbl)->ineff_expands > 1U) { \ 877 (tbl)->noexpand = 1; \ 878 uthash_noexpand_fyi(tbl); \ 879 } \ 880 uthash_expand_fyi(tbl); \ 881 } \ 882 } while (0) 883 884 885 /* This is an adaptation of Simon Tatham's O(n log(n)) mergesort */ 886 /* Note that HASH_SORT assumes the hash handle name to be hh. 887 * HASH_SRT was added to allow the hash handle name to be passed in. */ 888 #define HASH_SORT(head,cmpfcn) HASH_SRT(hh,head,cmpfcn) 889 #define HASH_SRT(hh,head,cmpfcn) \ 890 do { \ 891 unsigned _hs_i; \ 892 unsigned _hs_looping,_hs_nmerges,_hs_insize,_hs_psize,_hs_qsize; \ 893 struct UT_hash_handle *_hs_p, *_hs_q, *_hs_e, *_hs_list, *_hs_tail; \ 894 if (head != NULL) { \ 895 _hs_insize = 1; \ 896 _hs_looping = 1; \ 897 _hs_list = &((head)->hh); \ 898 while (_hs_looping != 0U) { \ 899 _hs_p = _hs_list; \ 900 _hs_list = NULL; \ 901 _hs_tail = NULL; \ 902 _hs_nmerges = 0; \ 903 while (_hs_p != NULL) { \ 904 _hs_nmerges++; \ 905 _hs_q = _hs_p; \ 906 _hs_psize = 0; \ 907 for (_hs_i = 0; _hs_i < _hs_insize; ++_hs_i) { \ 908 _hs_psize++; \ 909 _hs_q = ((_hs_q->next != NULL) ? \ 910 HH_FROM_ELMT((head)->hh.tbl, _hs_q->next) : NULL); \ 911 if (_hs_q == NULL) { \ 912 break; \ 913 } \ 914 } \ 915 _hs_qsize = _hs_insize; \ 916 while ((_hs_psize != 0U) || ((_hs_qsize != 0U) && (_hs_q != NULL))) { \ 917 if (_hs_psize == 0U) { \ 918 _hs_e = _hs_q; \ 919 _hs_q = ((_hs_q->next != NULL) ? \ 920 HH_FROM_ELMT((head)->hh.tbl, _hs_q->next) : NULL); \ 921 _hs_qsize--; \ 922 } else if ((_hs_qsize == 0U) || (_hs_q == NULL)) { \ 923 _hs_e = _hs_p; \ 924 if (_hs_p != NULL) { \ 925 _hs_p = ((_hs_p->next != NULL) ? \ 926 HH_FROM_ELMT((head)->hh.tbl, _hs_p->next) : NULL); \ 927 } \ 928 _hs_psize--; \ 929 } else if ((cmpfcn( \ 930 DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl, _hs_p)), \ 931 DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl, _hs_q)) \ 932 )) <= 0) { \ 933 _hs_e = _hs_p; \ 934 if (_hs_p != NULL) { \ 935 _hs_p = ((_hs_p->next != NULL) ? \ 936 HH_FROM_ELMT((head)->hh.tbl, _hs_p->next) : NULL); \ 937 } \ 938 _hs_psize--; \ 939 } else { \ 940 _hs_e = _hs_q; \ 941 _hs_q = ((_hs_q->next != NULL) ? \ 942 HH_FROM_ELMT((head)->hh.tbl, _hs_q->next) : NULL); \ 943 _hs_qsize--; \ 944 } \ 945 if ( _hs_tail != NULL ) { \ 946 _hs_tail->next = ((_hs_e != NULL) ? \ 947 ELMT_FROM_HH((head)->hh.tbl, _hs_e) : NULL); \ 948 } else { \ 949 _hs_list = _hs_e; \ 950 } \ 951 if (_hs_e != NULL) { \ 952 _hs_e->prev = ((_hs_tail != NULL) ? \ 953 ELMT_FROM_HH((head)->hh.tbl, _hs_tail) : NULL); \ 954 } \ 955 _hs_tail = _hs_e; \ 956 } \ 957 _hs_p = _hs_q; \ 958 } \ 959 if (_hs_tail != NULL) { \ 960 _hs_tail->next = NULL; \ 961 } \ 962 if (_hs_nmerges <= 1U) { \ 963 _hs_looping = 0; \ 964 (head)->hh.tbl->tail = _hs_tail; \ 965 DECLTYPE_ASSIGN(head, ELMT_FROM_HH((head)->hh.tbl, _hs_list)); \ 966 } \ 967 _hs_insize *= 2U; \ 968 } \ 969 HASH_FSCK(hh, head, "HASH_SRT"); \ 970 } \ 971 } while (0) 972 973 /* This function selects items from one hash into another hash. 974 * The end result is that the selected items have dual presence 975 * in both hashes. There is no copy of the items made; rather 976 * they are added into the new hash through a secondary hash 977 * hash handle that must be present in the structure. */ 978 #define HASH_SELECT(hh_dst, dst, hh_src, src, cond) \ 979 do { \ 980 unsigned _src_bkt, _dst_bkt; \ 981 void *_last_elt = NULL, *_elt; \ 982 UT_hash_handle *_src_hh, *_dst_hh, *_last_elt_hh=NULL; \ 983 ptrdiff_t _dst_hho = ((char*)(&(dst)->hh_dst) - (char*)(dst)); \ 984 if ((src) != NULL) { \ 985 for (_src_bkt=0; _src_bkt < (src)->hh_src.tbl->num_buckets; _src_bkt++) { \ 986 for (_src_hh = (src)->hh_src.tbl->buckets[_src_bkt].hh_head; \ 987 _src_hh != NULL; \ 988 _src_hh = _src_hh->hh_next) { \ 989 _elt = ELMT_FROM_HH((src)->hh_src.tbl, _src_hh); \ 990 if (cond(_elt)) { \ 991 IF_HASH_NONFATAL_OOM( int _hs_oomed = 0; ) \ 992 _dst_hh = (UT_hash_handle*)(void*)(((char*)_elt) + _dst_hho); \ 993 _dst_hh->key = _src_hh->key; \ 994 _dst_hh->keylen = _src_hh->keylen; \ 995 _dst_hh->hashv = _src_hh->hashv; \ 996 _dst_hh->prev = _last_elt; \ 997 _dst_hh->next = NULL; \ 998 if (_last_elt_hh != NULL) { \ 999 _last_elt_hh->next = _elt; \ 1000 } \ 1001 if ((dst) == NULL) { \ 1002 DECLTYPE_ASSIGN(dst, _elt); \ 1003 HASH_MAKE_TABLE(hh_dst, dst, _hs_oomed); \ 1004 IF_HASH_NONFATAL_OOM( \ 1005 if (_hs_oomed) { \ 1006 uthash_nonfatal_oom(_elt); \ 1007 (dst) = NULL; \ 1008 continue; \ 1009 } \ 1010 ) \ 1011 } else { \ 1012 _dst_hh->tbl = (dst)->hh_dst.tbl; \ 1013 } \ 1014 HASH_TO_BKT(_dst_hh->hashv, _dst_hh->tbl->num_buckets, _dst_bkt); \ 1015 HASH_ADD_TO_BKT(_dst_hh->tbl->buckets[_dst_bkt], hh_dst, _dst_hh, _hs_oomed); \ 1016 (dst)->hh_dst.tbl->num_items++; \ 1017 IF_HASH_NONFATAL_OOM( \ 1018 if (_hs_oomed) { \ 1019 HASH_ROLLBACK_BKT(hh_dst, dst, _dst_hh); \ 1020 HASH_DELETE_HH(hh_dst, dst, _dst_hh); \ 1021 _dst_hh->tbl = NULL; \ 1022 uthash_nonfatal_oom(_elt); \ 1023 continue; \ 1024 } \ 1025 ) \ 1026 HASH_BLOOM_ADD(_dst_hh->tbl, _dst_hh->hashv); \ 1027 _last_elt = _elt; \ 1028 _last_elt_hh = _dst_hh; \ 1029 } \ 1030 } \ 1031 } \ 1032 } \ 1033 HASH_FSCK(hh_dst, dst, "HASH_SELECT"); \ 1034 } while (0) 1035 1036 #define HASH_CLEAR(hh,head) \ 1037 do { \ 1038 if ((head) != NULL) { \ 1039 HASH_BLOOM_FREE((head)->hh.tbl); \ 1040 uthash_free((head)->hh.tbl->buckets, \ 1041 (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket)); \ 1042 uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ 1043 (head) = NULL; \ 1044 } \ 1045 } while (0) 1046 1047 #define HASH_OVERHEAD(hh,head) \ 1048 (((head) != NULL) ? ( \ 1049 (size_t)(((head)->hh.tbl->num_items * sizeof(UT_hash_handle)) + \ 1050 ((head)->hh.tbl->num_buckets * sizeof(UT_hash_bucket)) + \ 1051 sizeof(UT_hash_table) + \ 1052 (HASH_BLOOM_BYTELEN))) : 0U) 1053 1054 #ifdef NO_DECLTYPE 1055 #define HASH_ITER(hh,head,el,tmp) \ 1056 for(((el)=(head)), ((*(char**)(&(tmp)))=(char*)((head!=NULL)?(head)->hh.next:NULL)); \ 1057 (el) != NULL; ((el)=(tmp)), ((*(char**)(&(tmp)))=(char*)((tmp!=NULL)?(tmp)->hh.next:NULL))) 1058 #else 1059 #define HASH_ITER(hh,head,el,tmp) \ 1060 for(((el)=(head)), ((tmp)=DECLTYPE(el)((head!=NULL)?(head)->hh.next:NULL)); \ 1061 (el) != NULL; ((el)=(tmp)), ((tmp)=DECLTYPE(el)((tmp!=NULL)?(tmp)->hh.next:NULL))) 1062 #endif 1063 1064 /* obtain a count of items in the hash */ 1065 #define HASH_COUNT(head) HASH_CNT(hh,head) 1066 #define HASH_CNT(hh,head) ((head != NULL)?((head)->hh.tbl->num_items):0U) 1067 1068 typedef struct UT_hash_bucket { 1069 struct UT_hash_handle *hh_head; 1070 unsigned count; 1071 1072 /* expand_mult is normally set to 0. In this situation, the max chain length 1073 * threshold is enforced at its default value, HASH_BKT_CAPACITY_THRESH. (If 1074 * the bucket's chain exceeds this length, bucket expansion is triggered). 1075 * However, setting expand_mult to a non-zero value delays bucket expansion 1076 * (that would be triggered by additions to this particular bucket) 1077 * until its chain length reaches a *multiple* of HASH_BKT_CAPACITY_THRESH. 1078 * (The multiplier is simply expand_mult+1). The whole idea of this 1079 * multiplier is to reduce bucket expansions, since they are expensive, in 1080 * situations where we know that a particular bucket tends to be overused. 1081 * It is better to let its chain length grow to a longer yet-still-bounded 1082 * value, than to do an O(n) bucket expansion too often. 1083 */ 1084 unsigned expand_mult; 1085 1086 } UT_hash_bucket; 1087 1088 /* random signature used only to find hash tables in external analysis */ 1089 #define HASH_SIGNATURE 0xa0111fe1u 1090 #define HASH_BLOOM_SIGNATURE 0xb12220f2u 1091 1092 typedef struct UT_hash_table { 1093 UT_hash_bucket *buckets; 1094 unsigned num_buckets, log2_num_buckets; 1095 unsigned num_items; 1096 struct UT_hash_handle *tail; /* tail hh in app order, for fast append */ 1097 ptrdiff_t hho; /* hash handle offset (byte pos of hash handle in element */ 1098 1099 /* in an ideal situation (all buckets used equally), no bucket would have 1100 * more than ceil(#items/#buckets) items. that's the ideal chain length. */ 1101 unsigned ideal_chain_maxlen; 1102 1103 /* nonideal_items is the number of items in the hash whose chain position 1104 * exceeds the ideal chain maxlen. these items pay the penalty for an uneven 1105 * hash distribution; reaching them in a chain traversal takes >ideal steps */ 1106 unsigned nonideal_items; 1107 1108 /* ineffective expands occur when a bucket doubling was performed, but 1109 * afterward, more than half the items in the hash had nonideal chain 1110 * positions. If this happens on two consecutive expansions we inhibit any 1111 * further expansion, as it's not helping; this happens when the hash 1112 * function isn't a good fit for the key domain. When expansion is inhibited 1113 * the hash will still work, albeit no longer in constant time. */ 1114 unsigned ineff_expands, noexpand; 1115 1116 uint32_t signature; /* used only to find hash tables in external analysis */ 1117 #ifdef HASH_BLOOM 1118 uint32_t bloom_sig; /* used only to test bloom exists in external analysis */ 1119 uint8_t *bloom_bv; 1120 uint8_t bloom_nbits; 1121 #endif 1122 1123 } UT_hash_table; 1124 1125 typedef struct UT_hash_handle { 1126 struct UT_hash_table *tbl; 1127 void *prev; /* prev element in app order */ 1128 void *next; /* next element in app order */ 1129 struct UT_hash_handle *hh_prev; /* previous hh in bucket order */ 1130 struct UT_hash_handle *hh_next; /* next hh in bucket order */ 1131 const void *key; /* ptr to enclosing struct's key */ 1132 unsigned keylen; /* enclosing struct's key len */ 1133 unsigned hashv; /* result of hash-fcn(key) */ 1134 } UT_hash_handle; 1135 1136 #endif /* UTHASH_H */ 1137