1 /* 2 Copyright (c) 2003-2009, Troy D. Hanson http://uthash.sourceforge.net 3 All rights reserved. 4 5 Redistribution and use in source and binary forms, with or without 6 modification, are permitted provided that the following conditions are met: 7 8 * Redistributions of source code must retain the above copyright 9 notice, this list of conditions and the following disclaimer. 10 11 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 12 IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 13 TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 14 PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER 15 OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 16 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 17 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 18 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 19 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 20 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 21 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 */ 23 24 #ifndef UTHASH_H 25 #define UTHASH_H 26 27 #include <inttypes.h> /* uint32_t etc */ 28 #include <stddef.h> /* ptrdiff_t */ 29 #include <string.h> /* memcmp, strlen */ 30 31 #define UTHASH_VERSION 1.8 32 33 /* C++ requires extra stringent casting */ 34 #if defined __cplusplus 35 #define TYPEOF(x) (typeof(x)) 36 #else 37 #define TYPEOF(x) 38 #endif 39 40 #define uthash_fatal(msg) /* fatal error (out of memory, etc) */ 41 #define uthash_malloc(sz) malloc(sz) /* malloc fcn */ 42 #define uthash_free(ptr) free(ptr) /* free fcn */ 43 44 #define uthash_noexpand_fyi(tbl) /* can be defined to log noexpand */ 45 #define uthash_expand_fyi(tbl) /* can be defined to log expands */ 46 47 /* initial number of buckets */ 48 #define HASH_INITIAL_NUM_BUCKETS 32 /* initial number of buckets */ 49 #define HASH_INITIAL_NUM_BUCKETS_LOG2 5 /* lg2 of initial number of buckets */ 50 #define HASH_BKT_CAPACITY_THRESH 10 /* expand when bucket count reaches */ 51 52 /* calculate the element whose hash handle address is hhe */ 53 #define ELMT_FROM_HH(tbl, hhp) ((void *)(((char *)hhp) - (tbl)->hho)) 54 55 #define HASH_FIND(hh, head, keyptr, keylen, out) \ 56 do { \ 57 unsigned _hf_bkt, _hf_hashv; \ 58 out = TYPEOF(out) NULL; \ 59 if (head) { \ 60 HASH_FCN(keyptr, keylen, (head)->hh.tbl->num_buckets, _hf_hashv, \ 61 _hf_bkt); \ 62 if (HASH_BLOOM_TEST((head)->hh.tbl, _hf_hashv)) { \ 63 HASH_FIND_IN_BKT((head)->hh.tbl, hh, \ 64 (head)->hh.tbl->buckets[_hf_bkt], keyptr, \ 65 keylen, out); \ 66 } \ 67 } \ 68 } while (0) 69 70 #ifdef HASH_BLOOM 71 #define HASH_BLOOM_BITLEN (1ULL << HASH_BLOOM) 72 #define HASH_BLOOM_BYTELEN \ 73 (HASH_BLOOM_BITLEN / 8) + ((HASH_BLOOM_BITLEN % 8) ? 1 : 0) 74 #define HASH_BLOOM_MAKE(tbl) \ 75 do { \ 76 (tbl)->bloom_nbits = HASH_BLOOM; \ 77 (tbl)->bloom_bv = (uint8_t *)uthash_malloc(HASH_BLOOM_BYTELEN); \ 78 if (!((tbl)->bloom_bv)) { \ 79 uthash_fatal("out of memory"); \ 80 } \ 81 memset((tbl)->bloom_bv, 0, HASH_BLOOM_BYTELEN); \ 82 (tbl)->bloom_sig = HASH_BLOOM_SIGNATURE; \ 83 } while (0); 84 85 #define HASH_BLOOM_FREE(tbl) \ 86 do { \ 87 uthash_free((tbl)->bloom_bv); \ 88 } while (0); 89 90 #define HASH_BLOOM_BITSET(bv, idx) (bv[(idx) / 8] |= (1U << ((idx) % 8))) 91 #define HASH_BLOOM_BITTEST(bv, idx) (bv[(idx) / 8] & (1U << ((idx) % 8))) 92 93 #define HASH_BLOOM_ADD(tbl, hashv) \ 94 HASH_BLOOM_BITSET((tbl)->bloom_bv, \ 95 (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1))) 96 97 #define HASH_BLOOM_TEST(tbl, hashv) \ 98 HASH_BLOOM_BITTEST((tbl)->bloom_bv, \ 99 (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1))) 100 101 #else 102 #define HASH_BLOOM_MAKE(tbl) 103 #define HASH_BLOOM_FREE(tbl) 104 #define HASH_BLOOM_ADD(tbl, hashv) 105 #define HASH_BLOOM_TEST(tbl, hashv) (1) 106 #endif 107 108 #define HASH_MAKE_TABLE(hh, head) \ 109 do { \ 110 (head)->hh.tbl = \ 111 (UT_hash_table *)uthash_malloc(sizeof(UT_hash_table)); \ 112 if (!((head)->hh.tbl)) { \ 113 uthash_fatal("out of memory"); \ 114 } \ 115 memset((head)->hh.tbl, 0, sizeof(UT_hash_table)); \ 116 (head)->hh.tbl->tail = &((head)->hh); \ 117 (head)->hh.tbl->num_buckets = HASH_INITIAL_NUM_BUCKETS; \ 118 (head)->hh.tbl->log2_num_buckets = HASH_INITIAL_NUM_BUCKETS_LOG2; \ 119 (head)->hh.tbl->hho = (char *)(&(head)->hh) - (char *)(head); \ 120 (head)->hh.tbl->buckets = (UT_hash_bucket *)uthash_malloc( \ 121 HASH_INITIAL_NUM_BUCKETS * sizeof(struct UT_hash_bucket)); \ 122 if (!(head)->hh.tbl->buckets) { \ 123 uthash_fatal("out of memory"); \ 124 } \ 125 memset((head)->hh.tbl->buckets, 0, \ 126 HASH_INITIAL_NUM_BUCKETS * sizeof(struct UT_hash_bucket)); \ 127 HASH_BLOOM_MAKE((head)->hh.tbl); \ 128 (head)->hh.tbl->signature = HASH_SIGNATURE; \ 129 } while (0) 130 131 #define HASH_ADD(hh, head, fieldname, keylen_in, add) \ 132 HASH_ADD_KEYPTR(hh, head, &add->fieldname, keylen_in, add) 133 134 #define HASH_ADD_KEYPTR(hh, head, keyptr, keylen_in, add) \ 135 do { \ 136 unsigned _ha_bkt; \ 137 (add)->hh.next = NULL; \ 138 (add)->hh.key = (char *)keyptr; \ 139 (add)->hh.keylen = keylen_in; \ 140 if (!(head)) { \ 141 head = (add); \ 142 (head)->hh.prev = NULL; \ 143 HASH_MAKE_TABLE(hh, head); \ 144 } else { \ 145 (head)->hh.tbl->tail->next = (add); \ 146 (add)->hh.prev = \ 147 ELMT_FROM_HH((head)->hh.tbl, (head)->hh.tbl->tail); \ 148 (head)->hh.tbl->tail = &((add)->hh); \ 149 } \ 150 (head)->hh.tbl->num_items++; \ 151 (add)->hh.tbl = (head)->hh.tbl; \ 152 HASH_FCN(keyptr, keylen_in, (head)->hh.tbl->num_buckets, \ 153 (add)->hh.hashv, _ha_bkt); \ 154 HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt], &(add)->hh); \ 155 HASH_BLOOM_ADD((head)->hh.tbl, (add)->hh.hashv); \ 156 HASH_EMIT_KEY(hh, head, keyptr, keylen_in); \ 157 HASH_FSCK(hh, head); \ 158 } while (0) 159 160 #define HASH_TO_BKT(hashv, num_bkts, bkt) \ 161 do { \ 162 bkt = ((hashv) & ((num_bkts)-1)); \ 163 } while (0) 164 165 /* delete "delptr" from the hash table. 166 * "the usual" patch-up process for the app-order doubly-linked-list. 167 * The use of _hd_hh_del below deserves special explanation. 168 * These used to be expressed using (delptr) but that led to a bug 169 * if someone used the same symbol for the head and deletee, like 170 * HASH_DELETE(hh, users, users); 171 * We want that to work, but by changing the head (users) below 172 * we were forfeiting our ability to further refer to the deletee (users) 173 * in the patch-up process. Solution: use scratch space in the table to 174 * copy the deletee pointer, then the latter references are via that 175 * scratch pointer rather than through the repointed (users) symbol. 176 */ 177 #define HASH_DELETE(hh, head, delptr) \ 178 do { \ 179 unsigned _hd_bkt; \ 180 struct UT_hash_handle *_hd_hh_del; \ 181 if (((delptr)->hh.prev == NULL) && ((delptr)->hh.next == NULL)) { \ 182 uthash_free((head)->hh.tbl->buckets); \ 183 HASH_BLOOM_FREE((head)->hh.tbl); \ 184 uthash_free((head)->hh.tbl); \ 185 head = NULL; \ 186 } else { \ 187 _hd_hh_del = &((delptr)->hh); \ 188 if ((delptr) == \ 189 ELMT_FROM_HH((head)->hh.tbl, (head)->hh.tbl->tail)) { \ 190 (head)->hh.tbl->tail = \ 191 (UT_hash_handle *)((char *)((delptr)->hh.prev) + \ 192 (head)->hh.tbl->hho); \ 193 } \ 194 if ((delptr)->hh.prev) { \ 195 ((UT_hash_handle *)((char *)((delptr)->hh.prev) + \ 196 (head)->hh.tbl->hho)) \ 197 ->next = (delptr)->hh.next; \ 198 } else { \ 199 head = TYPEOF(head)((delptr)->hh.next); \ 200 } \ 201 if (_hd_hh_del->next) { \ 202 ((UT_hash_handle *)((char *)_hd_hh_del->next + \ 203 (head)->hh.tbl->hho)) \ 204 ->prev = _hd_hh_del->prev; \ 205 } \ 206 HASH_TO_BKT(_hd_hh_del->hashv, (head)->hh.tbl->num_buckets, \ 207 _hd_bkt); \ 208 HASH_DEL_IN_BKT(hh, (head)->hh.tbl->buckets[_hd_bkt], _hd_hh_del); \ 209 (head)->hh.tbl->num_items--; \ 210 } \ 211 HASH_FSCK(hh, head); \ 212 } while (0) 213 214 /* convenience forms of HASH_FIND/HASH_ADD/HASH_DEL */ 215 #define HASH_FIND_STR(head, findstr, out) \ 216 HASH_FIND(hh, head, findstr, strlen(findstr), out) 217 #define HASH_ADD_STR(head, strfield, add) \ 218 HASH_ADD(hh, head, strfield, strlen(add->strfield), add) 219 #define HASH_FIND_INT(head, findint, out) \ 220 HASH_FIND(hh, head, findint, sizeof(int), out) 221 #define HASH_ADD_INT(head, intfield, add) \ 222 HASH_ADD(hh, head, intfield, sizeof(int), add) 223 #define HASH_DEL(head, delptr) HASH_DELETE(hh, head, delptr) 224 225 /* HASH_FSCK checks hash integrity on every add/delete when HASH_DEBUG is 226 * defined. This is for uthash developer only; it compiles away if HASH_DEBUG 227 * isn't defined. 228 */ 229 #ifdef HASH_DEBUG 230 #define HASH_OOPS(...) \ 231 do { \ 232 fprintf(stderr, __VA_ARGS__); \ 233 } while (0) 234 #define HASH_FSCK(hh, head) \ 235 do { \ 236 unsigned _bkt_i; \ 237 unsigned _count, _bkt_count; \ 238 char *_prev; \ 239 struct UT_hash_handle *_thh; \ 240 if (head) { \ 241 _count = 0; \ 242 for (_bkt_i = 0; _bkt_i < (head)->hh.tbl->num_buckets; _bkt_i++) { \ 243 _bkt_count = 0; \ 244 _thh = (head)->hh.tbl->buckets[_bkt_i].hh_head; \ 245 _prev = NULL; \ 246 while (_thh) { \ 247 if (_prev != (char *)(_thh->hh_prev)) { \ 248 HASH_OOPS("invalid hh_prev %p, actual %p\n", \ 249 _thh->hh_prev, _prev); \ 250 } \ 251 _bkt_count++; \ 252 _prev = (char *)(_thh); \ 253 _thh = _thh->hh_next; \ 254 } \ 255 _count += _bkt_count; \ 256 if ((head)->hh.tbl->buckets[_bkt_i].count != _bkt_count) { \ 257 HASH_OOPS("invalid bucket count %d, actual %d\n", \ 258 (head)->hh.tbl->buckets[_bkt_i].count, \ 259 _bkt_count); \ 260 } \ 261 } \ 262 if (_count != (head)->hh.tbl->num_items) { \ 263 HASH_OOPS("invalid hh item count %d, actual %d\n", \ 264 (head)->hh.tbl->num_items, _count); \ 265 } \ 266 /* traverse hh in app order; \ 267 check next/prev integrity, count */ \ 268 _count = 0; \ 269 _prev = NULL; \ 270 _thh = &(head)->hh; \ 271 while (_thh) { \ 272 _count++; \ 273 if (_prev != (char *)(_thh->prev)) { \ 274 HASH_OOPS("invalid prev %p, actual %p\n", _thh->prev, \ 275 _prev); \ 276 } \ 277 _prev = (char *)ELMT_FROM_HH((head)->hh.tbl, _thh); \ 278 _thh = (_thh->next ? (UT_hash_handle *)((char *)(_thh->next) + \ 279 (head)->hh.tbl->hho) \ 280 : NULL); \ 281 } \ 282 if (_count != (head)->hh.tbl->num_items) { \ 283 HASH_OOPS("invalid app item count %d, actual %d\n", \ 284 (head)->hh.tbl->num_items, _count); \ 285 } \ 286 } \ 287 } while (0) 288 #else 289 #define HASH_FSCK(hh, head) 290 #endif 291 292 /* When compiled with -DHASH_EMIT_KEYS, length-prefixed keys are emitted to 293 * the descriptor to which this macro is defined for tuning the hash function. 294 * The app can #include <unistd.h> to get the prototype for write(2). */ 295 #ifdef HASH_EMIT_KEYS 296 #define HASH_EMIT_KEY(hh, head, keyptr, fieldlen) \ 297 do { \ 298 unsigned _klen = fieldlen; \ 299 write(HASH_EMIT_KEYS, &_klen, sizeof(_klen)); \ 300 write(HASH_EMIT_KEYS, keyptr, fieldlen); \ 301 } while (0) 302 #else 303 #define HASH_EMIT_KEY(hh, head, keyptr, fieldlen) 304 #endif 305 306 /* default to Jenkin's hash unless overridden e.g. DHASH_FUNCTION=HASH_SAX */ 307 #ifdef HASH_FUNCTION 308 #define HASH_FCN HASH_FUNCTION 309 #else 310 #define HASH_FCN HASH_JEN 311 #endif 312 313 /* The Bernstein hash function, used in Perl prior to v5.6 */ 314 #define HASH_BER(key, keylen, num_bkts, hashv, bkt) \ 315 do { \ 316 unsigned _hb_keylen = keylen; \ 317 char *_hb_key = (char *)key; \ 318 (hashv) = 0; \ 319 while (_hb_keylen--) { \ 320 (hashv) = ((hashv)*33) + *_hb_key++; \ 321 } \ 322 bkt = (hashv) & (num_bkts - 1); \ 323 } while (0) 324 325 /* SAX/FNV/OAT/JEN hash functions are macro variants of those listed at 326 * http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx */ 327 #define HASH_SAX(key, keylen, num_bkts, hashv, bkt) \ 328 do { \ 329 unsigned _sx_i; \ 330 char *_hs_key = (char *)key; \ 331 hashv = 0; \ 332 for (_sx_i = 0; _sx_i < keylen; _sx_i++) \ 333 hashv ^= (hashv << 5) + (hashv >> 2) + _hs_key[_sx_i]; \ 334 bkt = hashv & (num_bkts - 1); \ 335 } while (0) 336 337 #define HASH_FNV(key, keylen, num_bkts, hashv, bkt) \ 338 do { \ 339 unsigned _fn_i; \ 340 char *_hf_key = (char *)key; \ 341 hashv = 2166136261UL; \ 342 for (_fn_i = 0; _fn_i < keylen; _fn_i++) \ 343 hashv = (hashv * 16777619) ^ _hf_key[_fn_i]; \ 344 bkt = hashv & (num_bkts - 1); \ 345 } while (0); 346 347 #define HASH_OAT(key, keylen, num_bkts, hashv, bkt) \ 348 do { \ 349 unsigned _ho_i; \ 350 char *_ho_key = (char *)key; \ 351 hashv = 0; \ 352 for (_ho_i = 0; _ho_i < keylen; _ho_i++) { \ 353 hashv += _ho_key[_ho_i]; \ 354 hashv += (hashv << 10); \ 355 hashv ^= (hashv >> 6); \ 356 } \ 357 hashv += (hashv << 3); \ 358 hashv ^= (hashv >> 11); \ 359 hashv += (hashv << 15); \ 360 bkt = hashv & (num_bkts - 1); \ 361 } while (0) 362 363 #define HASH_JEN_MIX(a, b, c) \ 364 do { \ 365 a -= b; \ 366 a -= c; \ 367 a ^= (c >> 13); \ 368 b -= c; \ 369 b -= a; \ 370 b ^= (a << 8); \ 371 c -= a; \ 372 c -= b; \ 373 c ^= (b >> 13); \ 374 a -= b; \ 375 a -= c; \ 376 a ^= (c >> 12); \ 377 b -= c; \ 378 b -= a; \ 379 b ^= (a << 16); \ 380 c -= a; \ 381 c -= b; \ 382 c ^= (b >> 5); \ 383 a -= b; \ 384 a -= c; \ 385 a ^= (c >> 3); \ 386 b -= c; \ 387 b -= a; \ 388 b ^= (a << 10); \ 389 c -= a; \ 390 c -= b; \ 391 c ^= (b >> 15); \ 392 } while (0) 393 394 #define HASH_JEN(key, keylen, num_bkts, hashv, bkt) \ 395 do { \ 396 unsigned _hj_i, _hj_j, _hj_k; \ 397 char *_hj_key = (char *)key; \ 398 hashv = 0xfeedbeef; \ 399 _hj_i = _hj_j = 0x9e3779b9; \ 400 _hj_k = keylen; \ 401 while (_hj_k >= 12) { \ 402 _hj_i += \ 403 (_hj_key[0] + ((unsigned)_hj_key[1] << 8) + \ 404 ((unsigned)_hj_key[2] << 16) + ((unsigned)_hj_key[3] << 24)); \ 405 _hj_j += \ 406 (_hj_key[4] + ((unsigned)_hj_key[5] << 8) + \ 407 ((unsigned)_hj_key[6] << 16) + ((unsigned)_hj_key[7] << 24)); \ 408 hashv += (_hj_key[8] + ((unsigned)_hj_key[9] << 8) + \ 409 ((unsigned)_hj_key[10] << 16) + \ 410 ((unsigned)_hj_key[11] << 24)); \ 411 \ 412 HASH_JEN_MIX(_hj_i, _hj_j, hashv); \ 413 \ 414 _hj_key += 12; \ 415 _hj_k -= 12; \ 416 } \ 417 hashv += keylen; \ 418 switch (_hj_k) { \ 419 case 11: \ 420 hashv += ((unsigned)_hj_key[10] << 24); \ 421 /* FALLTHROUGH */ \ 422 case 10: \ 423 hashv += ((unsigned)_hj_key[9] << 16); \ 424 /* FALLTHROUGH */ \ 425 case 9: \ 426 hashv += ((unsigned)_hj_key[8] << 8); \ 427 /* FALLTHROUGH */ \ 428 case 8: \ 429 _hj_j += ((unsigned)_hj_key[7] << 24); \ 430 /* FALLTHROUGH */ \ 431 case 7: \ 432 _hj_j += ((unsigned)_hj_key[6] << 16); \ 433 /* FALLTHROUGH */ \ 434 case 6: \ 435 _hj_j += ((unsigned)_hj_key[5] << 8); \ 436 /* FALLTHROUGH */ \ 437 case 5: \ 438 _hj_j += _hj_key[4]; \ 439 /* FALLTHROUGH */ \ 440 case 4: \ 441 _hj_i += ((unsigned)_hj_key[3] << 24); \ 442 /* FALLTHROUGH */ \ 443 case 3: \ 444 _hj_i += ((unsigned)_hj_key[2] << 16); \ 445 /* FALLTHROUGH */ \ 446 case 2: \ 447 _hj_i += ((unsigned)_hj_key[1] << 8); \ 448 /* FALLTHROUGH */ \ 449 case 1: \ 450 _hj_i += _hj_key[0]; \ 451 } \ 452 HASH_JEN_MIX(_hj_i, _hj_j, hashv); \ 453 bkt = hashv & (num_bkts - 1); \ 454 } while (0) 455 456 /* The Paul Hsieh hash function */ 457 #undef get16bits 458 #if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) || \ 459 defined(_MSC_VER) || defined(__BORLANDC__) || defined(__TURBOC__) 460 #define get16bits(d) (*((const uint16_t *)(d))) 461 #endif 462 463 #if !defined(get16bits) 464 #define get16bits(d) \ 465 ((((uint32_t)(((const uint8_t *)(d))[1])) << 8) + \ 466 (uint32_t)(((const uint8_t *)(d))[0])) 467 #endif 468 #define HASH_SFH(key, keylen, num_bkts, hashv, bkt) \ 469 do { \ 470 char *_sfh_key = (char *)key; \ 471 uint32_t _sfh_tmp, _sfh_len = keylen; \ 472 \ 473 int _sfh_rem = _sfh_len & 3; \ 474 _sfh_len >>= 2; \ 475 hashv = 0xcafebabe; \ 476 \ 477 /* Main loop */ \ 478 for (; _sfh_len > 0; _sfh_len--) { \ 479 hashv += get16bits(_sfh_key); \ 480 _sfh_tmp = (get16bits(_sfh_key + 2) << 11) ^ hashv; \ 481 hashv = (hashv << 16) ^ _sfh_tmp; \ 482 _sfh_key += 2 * sizeof(uint16_t); \ 483 hashv += hashv >> 11; \ 484 } \ 485 \ 486 /* Handle end cases */ \ 487 switch (_sfh_rem) { \ 488 case 3: \ 489 hashv += get16bits(_sfh_key); \ 490 hashv ^= hashv << 16; \ 491 hashv ^= _sfh_key[sizeof(uint16_t)] << 18; \ 492 hashv += hashv >> 11; \ 493 break; \ 494 case 2: \ 495 hashv += get16bits(_sfh_key); \ 496 hashv ^= hashv << 11; \ 497 hashv += hashv >> 17; \ 498 break; \ 499 case 1: \ 500 hashv += *_sfh_key; \ 501 hashv ^= hashv << 10; \ 502 hashv += hashv >> 1; \ 503 } \ 504 \ 505 /* Force "avalanching" of final 127 bits */ \ 506 hashv ^= hashv << 3; \ 507 hashv += hashv >> 5; \ 508 hashv ^= hashv << 4; \ 509 hashv += hashv >> 17; \ 510 hashv ^= hashv << 25; \ 511 hashv += hashv >> 6; \ 512 bkt = hashv & (num_bkts - 1); \ 513 } while (0); 514 515 #ifdef HASH_USING_NO_STRICT_ALIASING 516 /* The MurmurHash exploits some CPU's (e.g. x86) tolerance for unaligned reads. 517 * For other types of CPU's (e.g. Sparc) an unaligned read causes a bus error. 518 * So MurmurHash comes in two versions, the faster unaligned one and the slower 519 * aligned one. We only use the faster one on CPU's where we know it's safe. 520 * 521 * Note the preprocessor built-in defines can be emitted using: 522 * 523 * gcc -m64 -dM -E - < /dev/null (on gcc) 524 * cc -## a.c (where a.c is a simple test file) (Sun Studio) 525 */ 526 #if (defined(__i386__) || defined(__x86_64__)) 527 #define HASH_MUR HASH_MUR_UNALIGNED 528 #else 529 #define HASH_MUR HASH_MUR_ALIGNED 530 #endif 531 532 /* Appleby's MurmurHash fast version for unaligned-tolerant archs like i386 */ 533 #define HASH_MUR_UNALIGNED(key, keylen, num_bkts, hashv, bkt) \ 534 do { \ 535 const unsigned int _mur_m = 0x5bd1e995; \ 536 const int _mur_r = 24; \ 537 hashv = 0xcafebabe ^ keylen; \ 538 char *_mur_key = (char *)key; \ 539 uint32_t _mur_tmp, _mur_len = keylen; \ 540 \ 541 for (; _mur_len >= 4; _mur_len -= 4) { \ 542 _mur_tmp = *(uint32_t *)_mur_key; \ 543 _mur_tmp *= _mur_m; \ 544 _mur_tmp ^= _mur_tmp >> _mur_r; \ 545 _mur_tmp *= _mur_m; \ 546 hashv *= _mur_m; \ 547 hashv ^= _mur_tmp; \ 548 _mur_key += 4; \ 549 } \ 550 \ 551 switch (_mur_len) { \ 552 case 3: \ 553 hashv ^= _mur_key[2] << 16; \ 554 /* FALLTHROUGH */ \ 555 case 2: \ 556 hashv ^= _mur_key[1] << 8; \ 557 /* FALLTHROUGH */ \ 558 case 1: \ 559 hashv ^= _mur_key[0]; \ 560 hashv *= _mur_m; \ 561 }; \ 562 \ 563 hashv ^= hashv >> 13; \ 564 hashv *= _mur_m; \ 565 hashv ^= hashv >> 15; \ 566 \ 567 bkt = hashv & (num_bkts - 1); \ 568 } while (0) 569 570 /* Appleby's MurmurHash version for alignment-sensitive archs like Sparc */ 571 #define HASH_MUR_ALIGNED(key, keylen, num_bkts, hashv, bkt) \ 572 do { \ 573 const unsigned int _mur_m = 0x5bd1e995; \ 574 const int _mur_r = 24; \ 575 hashv = 0xcafebabe ^ keylen; \ 576 char *_mur_key = (char *)key; \ 577 uint32_t _mur_len = keylen; \ 578 int _mur_align = (int)_mur_key & 3; \ 579 \ 580 if (_mur_align && (_mur_len >= 4)) { \ 581 unsigned _mur_t = 0, _mur_d = 0; \ 582 switch (_mur_align) { \ 583 case 1: \ 584 _mur_t |= _mur_key[2] << 16; \ 585 /* FALLTHROUGH */ \ 586 case 2: \ 587 _mur_t |= _mur_key[1] << 8; \ 588 /* FALLTHROUGH */ \ 589 case 3: \ 590 _mur_t |= _mur_key[0]; \ 591 } \ 592 _mur_t <<= (8 * _mur_align); \ 593 _mur_key += 4 - _mur_align; \ 594 _mur_len -= 4 - _mur_align; \ 595 int _mur_sl = 8 * (4 - _mur_align); \ 596 int _mur_sr = 8 * _mur_align; \ 597 \ 598 for (; _mur_len >= 4; _mur_len -= 4) { \ 599 _mur_d = *(unsigned *)_mur_key; \ 600 _mur_t = (_mur_t >> _mur_sr) | (_mur_d << _mur_sl); \ 601 unsigned _mur_k = _mur_t; \ 602 _mur_k *= _mur_m; \ 603 _mur_k ^= _mur_k >> _mur_r; \ 604 _mur_k *= _mur_m; \ 605 hashv *= _mur_m; \ 606 hashv ^= _mur_k; \ 607 _mur_t = _mur_d; \ 608 _mur_key += 4; \ 609 } \ 610 _mur_d = 0; \ 611 if (_mur_len >= _mur_align) { \ 612 switch (_mur_align) { \ 613 case 3: \ 614 _mur_d |= _mur_key[2] << 16; \ 615 /* FALLTHROUGH */ \ 616 case 2: \ 617 _mur_d |= _mur_key[1] << 8; \ 618 /* FALLTHROUGH */ \ 619 case 1: \ 620 _mur_d |= _mur_key[0]; \ 621 } \ 622 unsigned _mur_k = (_mur_t >> _mur_sr) | (_mur_d << _mur_sl); \ 623 _mur_k *= _mur_m; \ 624 _mur_k ^= _mur_k >> _mur_r; \ 625 _mur_k *= _mur_m; \ 626 hashv *= _mur_m; \ 627 hashv ^= _mur_k; \ 628 _mur_k += _mur_align; \ 629 _mur_len -= _mur_align; \ 630 \ 631 switch (_mur_len) { \ 632 case 3: \ 633 hashv ^= _mur_key[2] << 16; \ 634 /* FALLTHROUGH */ \ 635 case 2: \ 636 hashv ^= _mur_key[1] << 8; \ 637 /* FALLTHROUGH */ \ 638 case 1: \ 639 hashv ^= _mur_key[0]; \ 640 hashv *= _mur_m; \ 641 } \ 642 } else { \ 643 switch (_mur_len) { \ 644 case 3: \ 645 _mur_d ^= _mur_key[2] << 16; \ 646 /* FALLTHROUGH */ \ 647 case 2: \ 648 _mur_d ^= _mur_key[1] << 8; \ 649 /* FALLTHROUGH */ \ 650 case 1: \ 651 _mur_d ^= _mur_key[0]; \ 652 /* FALLTHROUGH */ \ 653 case 0: \ 654 hashv ^= (_mur_t >> _mur_sr) | (_mur_d << _mur_sl); \ 655 hashv *= _mur_m; \ 656 } \ 657 } \ 658 \ 659 hashv ^= hashv >> 13; \ 660 hashv *= _mur_m; \ 661 hashv ^= hashv >> 15; \ 662 } else { \ 663 for (; _mur_len >= 4; _mur_len -= 4) { \ 664 unsigned _mur_k = *(unsigned *)_mur_key; \ 665 _mur_k *= _mur_m; \ 666 _mur_k ^= _mur_k >> _mur_r; \ 667 _mur_k *= _mur_m; \ 668 hashv *= _mur_m; \ 669 hashv ^= _mur_k; \ 670 _mur_key += 4; \ 671 } \ 672 switch (_mur_len) { \ 673 case 3: \ 674 hashv ^= _mur_key[2] << 16; \ 675 /* FALLTHROUGH */ \ 676 case 2: \ 677 hashv ^= _mur_key[1] << 8; \ 678 /* FALLTHROUGH */ \ 679 case 1: \ 680 hashv ^= _mur_key[0]; \ 681 hashv *= _mur_m; \ 682 } \ 683 \ 684 hashv ^= hashv >> 13; \ 685 hashv *= _mur_m; \ 686 hashv ^= hashv >> 15; \ 687 } \ 688 bkt = hashv & (num_bkts - 1); \ 689 } while (0) 690 #endif /* HASH_USING_NO_STRICT_ALIASING */ 691 692 /* key comparison function; return 0 if keys equal */ 693 #define HASH_KEYCMP(a, b, len) memcmp(a, b, len) 694 695 /* iterate over items in a known bucket to find desired item */ 696 #define HASH_FIND_IN_BKT(tbl, hh, head, keyptr, keylen_in, out) \ 697 out = \ 698 TYPEOF(out)((head.hh_head) ? ELMT_FROM_HH(tbl, head.hh_head) : NULL); \ 699 while (out) { \ 700 if (out->hh.keylen == keylen_in) { \ 701 if ((HASH_KEYCMP(out->hh.key, keyptr, keylen_in)) == 0) \ 702 break; \ 703 } \ 704 out = TYPEOF(out)( \ 705 (out->hh.hh_next) ? ELMT_FROM_HH(tbl, out->hh.hh_next) : NULL); \ 706 } 707 708 /* add an item to a bucket */ 709 #define HASH_ADD_TO_BKT(head, addhh) \ 710 do { \ 711 head.count++; \ 712 (addhh)->hh_next = head.hh_head; \ 713 (addhh)->hh_prev = NULL; \ 714 if (head.hh_head) { \ 715 (head).hh_head->hh_prev = (addhh); \ 716 } \ 717 (head).hh_head = addhh; \ 718 if (head.count >= \ 719 ((head.expand_mult + 1) * HASH_BKT_CAPACITY_THRESH) && \ 720 (addhh)->tbl->noexpand != 1) { \ 721 HASH_EXPAND_BUCKETS((addhh)->tbl); \ 722 } \ 723 } while (0) 724 725 /* remove an item from a given bucket */ 726 #define HASH_DEL_IN_BKT(hh, head, hh_del) \ 727 (head).count--; \ 728 if ((head).hh_head == hh_del) { \ 729 (head).hh_head = hh_del->hh_next; \ 730 } \ 731 if (hh_del->hh_prev) { \ 732 hh_del->hh_prev->hh_next = hh_del->hh_next; \ 733 } \ 734 if (hh_del->hh_next) { \ 735 hh_del->hh_next->hh_prev = hh_del->hh_prev; \ 736 } 737 738 /* Bucket expansion has the effect of doubling the number of buckets 739 * and redistributing the items into the new buckets. Ideally the 740 * items will distribute more or less evenly into the new buckets 741 * (the extent to which this is true is a measure of the quality of 742 * the hash function as it applies to the key domain). 743 * 744 * With the items distributed into more buckets, the chain length 745 * (item count) in each bucket is reduced. Thus by expanding buckets 746 * the hash keeps a bound on the chain length. This bounded chain 747 * length is the essence of how a hash provides constant time lookup. 748 * 749 * The calculation of tbl->ideal_chain_maxlen below deserves some 750 * explanation. First, keep in mind that we're calculating the ideal 751 * maximum chain length based on the *new* (doubled) bucket count. 752 * In fractions this is just n/b (n=number of items, b=new num buckets). 753 * Since the ideal chain length is an integer, we want to calculate 754 * ceil(n/b). We don't depend on floating point arithmetic in this 755 * hash, so to calculate ceil(n/b) with integers we could write 756 * 757 * ceil(n/b) = (n/b) + ((n%b)?1:0) 758 * 759 * and in fact a previous version of this hash did just that. 760 * But now we have improved things a bit by recognizing that b is 761 * always a power of two. We keep its base 2 log handy (call it lb), 762 * so now we can write this with a bit shift and logical AND: 763 * 764 * ceil(n/b) = (n>>lb) + ((n & (b-1)) ? 1:0) 765 * 766 */ 767 #define HASH_EXPAND_BUCKETS(tbl) \ 768 do { \ 769 unsigned _he_bkt; \ 770 unsigned _he_bkt_i; \ 771 struct UT_hash_handle *_he_thh, *_he_hh_nxt; \ 772 UT_hash_bucket *_he_new_buckets, *_he_newbkt; \ 773 _he_new_buckets = (UT_hash_bucket *)uthash_malloc( \ 774 2 * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \ 775 if (!_he_new_buckets) { \ 776 uthash_fatal("out of memory"); \ 777 } \ 778 memset(_he_new_buckets, 0, \ 779 2 * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \ 780 tbl->ideal_chain_maxlen = \ 781 (tbl->num_items >> (tbl->log2_num_buckets + 1)) + \ 782 ((tbl->num_items & ((tbl->num_buckets * 2) - 1)) ? 1 : 0); \ 783 tbl->nonideal_items = 0; \ 784 for (_he_bkt_i = 0; _he_bkt_i < tbl->num_buckets; _he_bkt_i++) { \ 785 _he_thh = tbl->buckets[_he_bkt_i].hh_head; \ 786 while (_he_thh) { \ 787 _he_hh_nxt = _he_thh->hh_next; \ 788 HASH_TO_BKT(_he_thh->hashv, tbl->num_buckets * 2, _he_bkt); \ 789 _he_newbkt = &(_he_new_buckets[_he_bkt]); \ 790 if (++(_he_newbkt->count) > tbl->ideal_chain_maxlen) { \ 791 tbl->nonideal_items++; \ 792 _he_newbkt->expand_mult = \ 793 _he_newbkt->count / tbl->ideal_chain_maxlen; \ 794 } \ 795 _he_thh->hh_prev = NULL; \ 796 _he_thh->hh_next = _he_newbkt->hh_head; \ 797 if (_he_newbkt->hh_head) \ 798 _he_newbkt->hh_head->hh_prev = _he_thh; \ 799 _he_newbkt->hh_head = _he_thh; \ 800 _he_thh = _he_hh_nxt; \ 801 } \ 802 } \ 803 tbl->num_buckets *= 2; \ 804 tbl->log2_num_buckets++; \ 805 uthash_free(tbl->buckets); \ 806 tbl->buckets = _he_new_buckets; \ 807 tbl->ineff_expands = (tbl->nonideal_items > (tbl->num_items >> 1)) \ 808 ? (tbl->ineff_expands + 1) \ 809 : 0; \ 810 if (tbl->ineff_expands > 1) { \ 811 tbl->noexpand = 1; \ 812 uthash_noexpand_fyi(tbl); \ 813 } \ 814 uthash_expand_fyi(tbl); \ 815 } while (0) 816 817 /* This is an adaptation of Simon Tatham's O(n log(n)) mergesort */ 818 /* Note that HASH_SORT assumes the hash handle name to be hh. 819 * HASH_SRT was added to allow the hash handle name to be passed in. */ 820 #define HASH_SORT(head, cmpfcn) HASH_SRT(hh, head, cmpfcn) 821 #define HASH_SRT(hh, head, cmpfcn) \ 822 do { \ 823 unsigned _hs_i; \ 824 unsigned _hs_looping, _hs_nmerges, _hs_insize, _hs_psize, _hs_qsize; \ 825 struct UT_hash_handle *_hs_p, *_hs_q, *_hs_e, *_hs_list, *_hs_tail; \ 826 if (head) { \ 827 _hs_insize = 1; \ 828 _hs_looping = 1; \ 829 _hs_list = &((head)->hh); \ 830 while (_hs_looping) { \ 831 _hs_p = _hs_list; \ 832 _hs_list = NULL; \ 833 _hs_tail = NULL; \ 834 _hs_nmerges = 0; \ 835 while (_hs_p) { \ 836 _hs_nmerges++; \ 837 _hs_q = _hs_p; \ 838 _hs_psize = 0; \ 839 for (_hs_i = 0; _hs_i < _hs_insize; _hs_i++) { \ 840 _hs_psize++; \ 841 _hs_q = \ 842 (UT_hash_handle \ 843 *)((_hs_q->next) \ 844 ? ((void *)((char *)(_hs_q->next) + \ 845 (head)->hh.tbl->hho)) \ 846 : NULL); \ 847 if (!(_hs_q)) \ 848 break; \ 849 } \ 850 _hs_qsize = _hs_insize; \ 851 while ((_hs_psize > 0) || ((_hs_qsize > 0) && _hs_q)) { \ 852 if (_hs_psize == 0) { \ 853 _hs_e = _hs_q; \ 854 _hs_q = \ 855 (UT_hash_handle \ 856 *)((_hs_q->next) \ 857 ? ((void *)((char *)(_hs_q \ 858 ->next) + \ 859 (head)->hh.tbl->hho)) \ 860 : NULL); \ 861 _hs_qsize--; \ 862 } else if ((_hs_qsize == 0) || !(_hs_q)) { \ 863 _hs_e = _hs_p; \ 864 _hs_p = \ 865 (UT_hash_handle \ 866 *)((_hs_p->next) \ 867 ? ((void *)((char *)(_hs_p \ 868 ->next) + \ 869 (head)->hh.tbl->hho)) \ 870 : NULL); \ 871 _hs_psize--; \ 872 } else if ((cmpfcn(TYPEOF(head)(ELMT_FROM_HH( \ 873 (head)->hh.tbl, _hs_p)), \ 874 TYPEOF(head)(ELMT_FROM_HH( \ 875 (head)->hh.tbl, _hs_q)))) <= \ 876 0) { \ 877 _hs_e = _hs_p; \ 878 _hs_p = \ 879 (UT_hash_handle \ 880 *)((_hs_p->next) \ 881 ? ((void *)((char *)(_hs_p \ 882 ->next) + \ 883 (head)->hh.tbl->hho)) \ 884 : NULL); \ 885 _hs_psize--; \ 886 } else { \ 887 _hs_e = _hs_q; \ 888 _hs_q = \ 889 (UT_hash_handle \ 890 *)((_hs_q->next) \ 891 ? ((void *)((char *)(_hs_q \ 892 ->next) + \ 893 (head)->hh.tbl->hho)) \ 894 : NULL); \ 895 _hs_qsize--; \ 896 } \ 897 if (_hs_tail) { \ 898 _hs_tail->next = \ 899 ((_hs_e) ? ELMT_FROM_HH((head)->hh.tbl, _hs_e) \ 900 : NULL); \ 901 } else { \ 902 _hs_list = _hs_e; \ 903 } \ 904 _hs_e->prev = \ 905 ((_hs_tail) \ 906 ? ELMT_FROM_HH((head)->hh.tbl, _hs_tail) \ 907 : NULL); \ 908 _hs_tail = _hs_e; \ 909 } \ 910 _hs_p = _hs_q; \ 911 } \ 912 _hs_tail->next = NULL; \ 913 if (_hs_nmerges <= 1) { \ 914 _hs_looping = 0; \ 915 (head)->hh.tbl->tail = _hs_tail; \ 916 (head) = \ 917 TYPEOF(head) ELMT_FROM_HH((head)->hh.tbl, _hs_list); \ 918 } \ 919 _hs_insize *= 2; \ 920 } \ 921 HASH_FSCK(hh, head); \ 922 } \ 923 } while (0) 924 925 /* This function selects items from one hash into another hash. 926 * The end result is that the selected items have dual presence 927 * in both hashes. There is no copy of the items made; rather 928 * they are added into the new hash through a secondary hash 929 * hash handle that must be present in the structure. */ 930 #define HASH_SELECT(hh_dst, dst, hh_src, src, cond) \ 931 do { \ 932 unsigned _src_bkt, _dst_bkt; \ 933 void *_last_elt = NULL, *_elt; \ 934 UT_hash_handle *_src_hh, *_dst_hh, *_last_elt_hh = NULL; \ 935 ptrdiff_t _dst_hho = ((char *)(&(dst)->hh_dst) - (char *)(dst)); \ 936 if (src) { \ 937 for (_src_bkt = 0; _src_bkt < (src)->hh_src.tbl->num_buckets; \ 938 _src_bkt++) { \ 939 for (_src_hh = (src)->hh_src.tbl->buckets[_src_bkt].hh_head; \ 940 _src_hh; _src_hh = _src_hh->hh_next) { \ 941 _elt = ELMT_FROM_HH((src)->hh_src.tbl, _src_hh); \ 942 if (cond(_elt)) { \ 943 _dst_hh = \ 944 (UT_hash_handle *)(((char *)_elt) + _dst_hho); \ 945 _dst_hh->key = _src_hh->key; \ 946 _dst_hh->keylen = _src_hh->keylen; \ 947 _dst_hh->hashv = _src_hh->hashv; \ 948 _dst_hh->prev = _last_elt; \ 949 _dst_hh->next = NULL; \ 950 if (_last_elt_hh) { \ 951 _last_elt_hh->next = _elt; \ 952 } \ 953 if (!dst) { \ 954 dst = TYPEOF(dst) _elt; \ 955 HASH_MAKE_TABLE(hh_dst, dst); \ 956 } else { \ 957 _dst_hh->tbl = (dst)->hh_dst.tbl; \ 958 } \ 959 HASH_TO_BKT(_dst_hh->hashv, _dst_hh->tbl->num_buckets, \ 960 _dst_bkt); \ 961 HASH_ADD_TO_BKT(_dst_hh->tbl->buckets[_dst_bkt], \ 962 _dst_hh); \ 963 (dst)->hh_dst.tbl->num_items++; \ 964 _last_elt = _elt; \ 965 _last_elt_hh = _dst_hh; \ 966 } \ 967 } \ 968 } \ 969 } \ 970 HASH_FSCK(hh_dst, dst); \ 971 } while (0) 972 973 #define HASH_CLEAR(hh, head) \ 974 do { \ 975 if (head) { \ 976 uthash_free((head)->hh.tbl->buckets); \ 977 uthash_free((head)->hh.tbl); \ 978 (head) = NULL; \ 979 } \ 980 } while (0) 981 982 /* obtain a count of items in the hash */ 983 #define HASH_COUNT(head) HASH_CNT(hh, head) 984 #define HASH_CNT(hh, head) (head ? (head->hh.tbl->num_items) : 0) 985 986 #define HASH_FOREACH(key, head, type) \ 987 for (type *key = (type *)head; key != NULL; key = (type *)key->hh.next) 988 #define HASH_FOREACH_NL(key, head, type) \ 989 for (key = (type *)head; key != NULL; key = (type *)key->hh.next) 990 991 typedef struct UT_hash_bucket { 992 struct UT_hash_handle *hh_head; 993 unsigned count; 994 995 /* expand_mult is normally set to 0. In this situation, the max chain length 996 * threshold is enforced at its default value, HASH_BKT_CAPACITY_THRESH. (If 997 * the bucket's chain exceeds this length, bucket expansion is triggered). 998 * However, setting expand_mult to a non-zero value delays bucket expansion 999 * (that would be triggered by additions to this particular bucket) 1000 * until its chain length reaches a *multiple* of HASH_BKT_CAPACITY_THRESH. 1001 * (The multiplier is simply expand_mult+1). The whole idea of this 1002 * multiplier is to reduce bucket expansions, since they are expensive, in 1003 * situations where we know that a particular bucket tends to be overused. 1004 * It is better to let its chain length grow to a longer yet-still-bounded 1005 * value, than to do an O(n) bucket expansion too often. 1006 */ 1007 unsigned expand_mult; 1008 1009 } UT_hash_bucket; 1010 1011 /* random signature used only to find hash tables in external analysis */ 1012 #define HASH_SIGNATURE 0xa0111fe1 1013 #define HASH_BLOOM_SIGNATURE 0xb12220f2 1014 1015 typedef struct UT_hash_table { 1016 UT_hash_bucket *buckets; 1017 unsigned num_buckets, log2_num_buckets; 1018 unsigned num_items; 1019 struct UT_hash_handle *tail; /* tail hh in app order, for fast append */ 1020 ptrdiff_t hho; /* hash handle offset (byte pos of hash handle in element */ 1021 1022 /* in an ideal situation (all buckets used equally), no bucket would have 1023 * more than ceil(#items/#buckets) items. that's the ideal chain length. */ 1024 unsigned ideal_chain_maxlen; 1025 1026 /* nonideal_items is the number of items in the hash whose chain position 1027 * exceeds the ideal chain maxlen. these items pay the penalty for an uneven 1028 * hash distribution; reaching them in a chain traversal takes >ideal steps 1029 */ 1030 unsigned nonideal_items; 1031 1032 /* ineffective expands occur when a bucket doubling was performed, but 1033 * afterward, more than half the items in the hash had nonideal chain 1034 * positions. If this happens on two consecutive expansions we inhibit any 1035 * further expansion, as it's not helping; this happens when the hash 1036 * function isn't a good fit for the key domain. When expansion is inhibited 1037 * the hash will still work, albeit no longer in constant time. */ 1038 unsigned ineff_expands, noexpand; 1039 1040 uint32_t signature; /* used only to find hash tables in external analysis */ 1041 #ifdef HASH_BLOOM 1042 uint32_t 1043 bloom_sig; /* used only to test bloom exists in external analysis */ 1044 uint8_t *bloom_bv; 1045 char bloom_nbits; 1046 #endif 1047 1048 } UT_hash_table; 1049 1050 typedef struct UT_hash_handle { 1051 struct UT_hash_table *tbl; 1052 void *prev; /* prev element in app order */ 1053 void *next; /* next element in app order */ 1054 struct UT_hash_handle *hh_prev; /* previous hh in bucket order */ 1055 struct UT_hash_handle *hh_next; /* next hh in bucket order */ 1056 void *key; /* ptr to enclosing struct's key */ 1057 unsigned keylen; /* enclosing struct's key len */ 1058 unsigned hashv; /* result of hash-fcn(key) */ 1059 } UT_hash_handle; 1060 1061 #endif /* UTHASH_H */ 1062 1063 // kate: indent-mode cstyle; space-indent on; indent-width 0; 1064