1 /*
2  * $Id: linkhash.c,v 1.4 2006/01/26 02:16:28 mclark Exp $
3  *
4  * Copyright (c) 2004, 2005 Metaparadigm Pte. Ltd.
5  * Michael Clark <michael@metaparadigm.com>
6  * Copyright (c) 2009 Hewlett-Packard Development Company, L.P.
7  *
8  * This library is free software; you can redistribute it and/or modify
9  * it under the terms of the MIT license. See COPYING for details.
10  *
11  */
12 
13 #include "config.h"
14 
15 #include <stdio.h>
16 #include <string.h>
17 #include <stdlib.h>
18 #include <stdarg.h>
19 #include <stddef.h>
20 #include <limits.h>
21 
22 #ifdef HAVE_ENDIAN_H
23 # include <endian.h>    /* attempt to define endianness */
24 #endif
25 
26 #if defined(_MSC_VER) || defined(__MINGW32__)
27 # define WIN32_LEAN_AND_MEAN
28 # include <windows.h>   /* Get InterlockedCompareExchange */
29 #endif
30 
31 #include "random_seed.h"
32 #include "linkhash.h"
33 
34 /* hash functions */
35 static unsigned long lh_char_hash(const void *k);
36 static unsigned long lh_perllike_str_hash(const void *k);
37 static lh_hash_fn *char_hash_fn = lh_char_hash;
38 
39 /* comparison functions */
40 int lh_char_equal(const void *k1, const void *k2);
41 int lh_ptr_equal(const void *k1, const void *k2);
42 
43 int
json_global_set_string_hash(const int h)44 json_global_set_string_hash(const int h)
45 {
46 	switch(h) {
47 	case JSON_C_STR_HASH_DFLT:
48 		char_hash_fn = lh_char_hash;
49 		break;
50 	case JSON_C_STR_HASH_PERLLIKE:
51 		char_hash_fn = lh_perllike_str_hash;
52 		break;
53 	default:
54 		return -1;
55 	}
56 	return 0;
57 }
58 
lh_ptr_hash(const void * k)59 static unsigned long lh_ptr_hash(const void *k)
60 {
61 	/* CAW: refactored to be 64bit nice */
62 	return (unsigned long)((((ptrdiff_t)k * LH_PRIME) >> 4) & ULONG_MAX);
63 }
64 
lh_ptr_equal(const void * k1,const void * k2)65 int lh_ptr_equal(const void *k1, const void *k2)
66 {
67 	return (k1 == k2);
68 }
69 
70 /*
71  * hashlittle from lookup3.c, by Bob Jenkins, May 2006, Public Domain.
72  * http://burtleburtle.net/bob/c/lookup3.c
73  * minor modifications to make functions static so no symbols are exported
74  * minor mofifications to compile with -Werror
75  */
76 
77 /*
78 -------------------------------------------------------------------------------
79 lookup3.c, by Bob Jenkins, May 2006, Public Domain.
80 
81 These are functions for producing 32-bit hashes for hash table lookup.
82 hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()
83 are externally useful functions.  Routines to test the hash are included
84 if SELF_TEST is defined.  You can use this free for any purpose.  It's in
85 the public domain.  It has no warranty.
86 
87 You probably want to use hashlittle().  hashlittle() and hashbig()
88 hash byte arrays.  hashlittle() is is faster than hashbig() on
89 little-endian machines.  Intel and AMD are little-endian machines.
90 On second thought, you probably want hashlittle2(), which is identical to
91 hashlittle() except it returns two 32-bit hashes for the price of one.
92 You could implement hashbig2() if you wanted but I haven't bothered here.
93 
94 If you want to find a hash of, say, exactly 7 integers, do
95   a = i1;  b = i2;  c = i3;
96   mix(a,b,c);
97   a += i4; b += i5; c += i6;
98   mix(a,b,c);
99   a += i7;
100   final(a,b,c);
101 then use c as the hash value.  If you have a variable length array of
102 4-byte integers to hash, use hashword().  If you have a byte array (like
103 a character string), use hashlittle().  If you have several byte arrays, or
104 a mix of things, see the comments above hashlittle().
105 
106 Why is this so big?  I read 12 bytes at a time into 3 4-byte integers,
107 then mix those integers.  This is fast (you can do a lot more thorough
108 mixing with 12*3 instructions on 3 integers than you can with 3 instructions
109 on 1 byte), but shoehorning those bytes into integers efficiently is messy.
110 -------------------------------------------------------------------------------
111 */
112 
113 /*
114  * My best guess at if you are big-endian or little-endian.  This may
115  * need adjustment.
116  */
117 #if (defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && \
118      __BYTE_ORDER == __LITTLE_ENDIAN) || \
119     (defined(i386) || defined(__i386__) || defined(__i486__) || \
120      defined(__i586__) || defined(__i686__) || defined(vax) || defined(MIPSEL))
121 # define HASH_LITTLE_ENDIAN 1
122 # define HASH_BIG_ENDIAN 0
123 #elif (defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && \
124        __BYTE_ORDER == __BIG_ENDIAN) || \
125       (defined(sparc) || defined(POWERPC) || defined(mc68000) || defined(sel))
126 # define HASH_LITTLE_ENDIAN 0
127 # define HASH_BIG_ENDIAN 1
128 #else
129 # define HASH_LITTLE_ENDIAN 0
130 # define HASH_BIG_ENDIAN 0
131 #endif
132 
133 #define hashsize(n) ((uint32_t)1<<(n))
134 #define hashmask(n) (hashsize(n)-1)
135 #define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
136 
137 /*
138 -------------------------------------------------------------------------------
139 mix -- mix 3 32-bit values reversibly.
140 
141 This is reversible, so any information in (a,b,c) before mix() is
142 still in (a,b,c) after mix().
143 
144 If four pairs of (a,b,c) inputs are run through mix(), or through
145 mix() in reverse, there are at least 32 bits of the output that
146 are sometimes the same for one pair and different for another pair.
147 This was tested for:
148 * pairs that differed by one bit, by two bits, in any combination
149   of top bits of (a,b,c), or in any combination of bottom bits of
150   (a,b,c).
151 * "differ" is defined as +, -, ^, or ~^.  For + and -, I transformed
152   the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
153   is commonly produced by subtraction) look like a single 1-bit
154   difference.
155 * the base values were pseudorandom, all zero but one bit set, or
156   all zero plus a counter that starts at zero.
157 
158 Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that
159 satisfy this are
160     4  6  8 16 19  4
161     9 15  3 18 27 15
162    14  9  3  7 17  3
163 Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing
164 for "differ" defined as + with a one-bit base and a two-bit delta.  I
165 used http://burtleburtle.net/bob/hash/avalanche.html to choose
166 the operations, constants, and arrangements of the variables.
167 
168 This does not achieve avalanche.  There are input bits of (a,b,c)
169 that fail to affect some output bits of (a,b,c), especially of a.  The
170 most thoroughly mixed value is c, but it doesn't really even achieve
171 avalanche in c.
172 
173 This allows some parallelism.  Read-after-writes are good at doubling
174 the number of bits affected, so the goal of mixing pulls in the opposite
175 direction as the goal of parallelism.  I did what I could.  Rotates
176 seem to cost as much as shifts on every machine I could lay my hands
177 on, and rotates are much kinder to the top and bottom bits, so I used
178 rotates.
179 -------------------------------------------------------------------------------
180 */
181 #define mix(a,b,c) \
182 { \
183   a -= c;  a ^= rot(c, 4);  c += b; \
184   b -= a;  b ^= rot(a, 6);  a += c; \
185   c -= b;  c ^= rot(b, 8);  b += a; \
186   a -= c;  a ^= rot(c,16);  c += b; \
187   b -= a;  b ^= rot(a,19);  a += c; \
188   c -= b;  c ^= rot(b, 4);  b += a; \
189 }
190 
191 /*
192 -------------------------------------------------------------------------------
193 final -- final mixing of 3 32-bit values (a,b,c) into c
194 
195 Pairs of (a,b,c) values differing in only a few bits will usually
196 produce values of c that look totally different.  This was tested for
197 * pairs that differed by one bit, by two bits, in any combination
198   of top bits of (a,b,c), or in any combination of bottom bits of
199   (a,b,c).
200 * "differ" is defined as +, -, ^, or ~^.  For + and -, I transformed
201   the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
202   is commonly produced by subtraction) look like a single 1-bit
203   difference.
204 * the base values were pseudorandom, all zero but one bit set, or
205   all zero plus a counter that starts at zero.
206 
207 These constants passed:
208  14 11 25 16 4 14 24
209  12 14 25 16 4 14 24
210 and these came close:
211   4  8 15 26 3 22 24
212  10  8 15 26 3 22 24
213  11  8 15 26 3 22 24
214 -------------------------------------------------------------------------------
215 */
216 #define final(a,b,c) \
217 { \
218   c ^= b; c -= rot(b,14); \
219   a ^= c; a -= rot(c,11); \
220   b ^= a; b -= rot(a,25); \
221   c ^= b; c -= rot(b,16); \
222   a ^= c; a -= rot(c,4);  \
223   b ^= a; b -= rot(a,14); \
224   c ^= b; c -= rot(b,24); \
225 }
226 
227 
228 /*
229 -------------------------------------------------------------------------------
230 hashlittle() -- hash a variable-length key into a 32-bit value
231   k       : the key (the unaligned variable-length array of bytes)
232   length  : the length of the key, counting by bytes
233   initval : can be any 4-byte value
234 Returns a 32-bit value.  Every bit of the key affects every bit of
235 the return value.  Two keys differing by one or two bits will have
236 totally different hash values.
237 
238 The best hash table sizes are powers of 2.  There is no need to do
239 mod a prime (mod is sooo slow!).  If you need less than 32 bits,
240 use a bitmask.  For example, if you need only 10 bits, do
241   h = (h & hashmask(10));
242 In which case, the hash table should have hashsize(10) elements.
243 
244 If you are hashing n strings (uint8_t **)k, do it like this:
245   for (i=0, h=0; i<n; ++i) h = hashlittle( k[i], len[i], h);
246 
247 By Bob Jenkins, 2006.  bob_jenkins@burtleburtle.net.  You may use this
248 code any way you wish, private, educational, or commercial.  It's free.
249 
250 Use for hash table lookup, or anything where one collision in 2^^32 is
251 acceptable.  Do NOT use for cryptographic purposes.
252 -------------------------------------------------------------------------------
253 */
254 
hashlittle(const void * key,size_t length,uint32_t initval)255 static uint32_t hashlittle( const void *key, size_t length, uint32_t initval)
256 {
257   uint32_t a,b,c;                                          /* internal state */
258   union { const void *ptr; size_t i; } u;     /* needed for Mac Powerbook G4 */
259 
260   /* Set up the internal state */
261   a = b = c = 0xdeadbeef + ((uint32_t)length) + initval;
262 
263   u.ptr = key;
264   if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
265     const uint32_t *k = (const uint32_t *)key;         /* read 32-bit chunks */
266 
267     /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
268     while (length > 12)
269     {
270       a += k[0];
271       b += k[1];
272       c += k[2];
273       mix(a,b,c);
274       length -= 12;
275       k += 3;
276     }
277 
278     /*----------------------------- handle the last (probably partial) block */
279     /*
280      * "k[2]&0xffffff" actually reads beyond the end of the string, but
281      * then masks off the part it's not allowed to read.  Because the
282      * string is aligned, the masked-off tail is in the same word as the
283      * rest of the string.  Every machine with memory protection I've seen
284      * does it on word boundaries, so is OK with this.  But VALGRIND will
285      * still catch it and complain.  The masking trick does make the hash
286      * noticably faster for short strings (like English words).
287      * AddressSanitizer is similarly picky about overrunning
288 	 * the buffer. (http://clang.llvm.org/docs/AddressSanitizer.html
289      */
290 #ifdef VALGRIND
291 #    define PRECISE_MEMORY_ACCESS 1
292 #elif defined(__SANITIZE_ADDRESS__) /* GCC's ASAN */
293 #    define PRECISE_MEMORY_ACCESS 1
294 #elif defined(__has_feature)
295 #  if __has_feature(address_sanitizer) /* Clang's ASAN */
296 #    define PRECISE_MEMORY_ACCESS 1
297 #  endif
298 #endif
299 #ifndef PRECISE_MEMORY_ACCESS
300 
301     switch(length)
302     {
303     case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
304     case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break;
305     case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break;
306     case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break;
307     case 8 : b+=k[1]; a+=k[0]; break;
308     case 7 : b+=k[1]&0xffffff; a+=k[0]; break;
309     case 6 : b+=k[1]&0xffff; a+=k[0]; break;
310     case 5 : b+=k[1]&0xff; a+=k[0]; break;
311     case 4 : a+=k[0]; break;
312     case 3 : a+=k[0]&0xffffff; break;
313     case 2 : a+=k[0]&0xffff; break;
314     case 1 : a+=k[0]&0xff; break;
315     case 0 : return c;              /* zero length strings require no mixing */
316     }
317 
318 #else /* make valgrind happy */
319 
320     const uint8_t  *k8 = (const uint8_t *)k;
321     switch(length)
322     {
323     case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
324     case 11: c+=((uint32_t)k8[10])<<16;  /* fall through */
325     case 10: c+=((uint32_t)k8[9])<<8;    /* fall through */
326     case 9 : c+=k8[8];                   /* fall through */
327     case 8 : b+=k[1]; a+=k[0]; break;
328     case 7 : b+=((uint32_t)k8[6])<<16;   /* fall through */
329     case 6 : b+=((uint32_t)k8[5])<<8;    /* fall through */
330     case 5 : b+=k8[4];                   /* fall through */
331     case 4 : a+=k[0]; break;
332     case 3 : a+=((uint32_t)k8[2])<<16;   /* fall through */
333     case 2 : a+=((uint32_t)k8[1])<<8;    /* fall through */
334     case 1 : a+=k8[0]; break;
335     case 0 : return c;
336     }
337 
338 #endif /* !valgrind */
339 
340   } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) {
341     const uint16_t *k = (const uint16_t *)key;         /* read 16-bit chunks */
342     const uint8_t  *k8;
343 
344     /*--------------- all but last block: aligned reads and different mixing */
345     while (length > 12)
346     {
347       a += k[0] + (((uint32_t)k[1])<<16);
348       b += k[2] + (((uint32_t)k[3])<<16);
349       c += k[4] + (((uint32_t)k[5])<<16);
350       mix(a,b,c);
351       length -= 12;
352       k += 6;
353     }
354 
355     /*----------------------------- handle the last (probably partial) block */
356     k8 = (const uint8_t *)k;
357     switch(length)
358     {
359     case 12: c+=k[4]+(((uint32_t)k[5])<<16);
360              b+=k[2]+(((uint32_t)k[3])<<16);
361              a+=k[0]+(((uint32_t)k[1])<<16);
362              break;
363     case 11: c+=((uint32_t)k8[10])<<16;     /* fall through */
364     case 10: c+=k[4];
365              b+=k[2]+(((uint32_t)k[3])<<16);
366              a+=k[0]+(((uint32_t)k[1])<<16);
367              break;
368     case 9 : c+=k8[8];                      /* fall through */
369     case 8 : b+=k[2]+(((uint32_t)k[3])<<16);
370              a+=k[0]+(((uint32_t)k[1])<<16);
371              break;
372     case 7 : b+=((uint32_t)k8[6])<<16;      /* fall through */
373     case 6 : b+=k[2];
374              a+=k[0]+(((uint32_t)k[1])<<16);
375              break;
376     case 5 : b+=k8[4];                      /* fall through */
377     case 4 : a+=k[0]+(((uint32_t)k[1])<<16);
378              break;
379     case 3 : a+=((uint32_t)k8[2])<<16;      /* fall through */
380     case 2 : a+=k[0];
381              break;
382     case 1 : a+=k8[0];
383              break;
384     case 0 : return c;                     /* zero length requires no mixing */
385     }
386 
387   } else {                        /* need to read the key one byte at a time */
388     const uint8_t *k = (const uint8_t *)key;
389 
390     /*--------------- all but the last block: affect some 32 bits of (a,b,c) */
391     while (length > 12)
392     {
393       a += k[0];
394       a += ((uint32_t)k[1])<<8;
395       a += ((uint32_t)k[2])<<16;
396       a += ((uint32_t)k[3])<<24;
397       b += k[4];
398       b += ((uint32_t)k[5])<<8;
399       b += ((uint32_t)k[6])<<16;
400       b += ((uint32_t)k[7])<<24;
401       c += k[8];
402       c += ((uint32_t)k[9])<<8;
403       c += ((uint32_t)k[10])<<16;
404       c += ((uint32_t)k[11])<<24;
405       mix(a,b,c);
406       length -= 12;
407       k += 12;
408     }
409 
410     /*-------------------------------- last block: affect all 32 bits of (c) */
411     switch(length)                   /* all the case statements fall through */
412     {
413     case 12: c+=((uint32_t)k[11])<<24; /* FALLTHRU */
414     case 11: c+=((uint32_t)k[10])<<16; /* FALLTHRU */
415     case 10: c+=((uint32_t)k[9])<<8; /* FALLTHRU */
416     case 9 : c+=k[8]; /* FALLTHRU */
417     case 8 : b+=((uint32_t)k[7])<<24; /* FALLTHRU */
418     case 7 : b+=((uint32_t)k[6])<<16; /* FALLTHRU */
419     case 6 : b+=((uint32_t)k[5])<<8; /* FALLTHRU */
420     case 5 : b+=k[4]; /* FALLTHRU */
421     case 4 : a+=((uint32_t)k[3])<<24; /* FALLTHRU */
422     case 3 : a+=((uint32_t)k[2])<<16; /* FALLTHRU */
423     case 2 : a+=((uint32_t)k[1])<<8; /* FALLTHRU */
424     case 1 : a+=k[0];
425              break;
426     case 0 : return c;
427     }
428   }
429 
430   final(a,b,c);
431   return c;
432 }
433 
434 /* a simple hash function similiar to what perl does for strings.
435  * for good results, the string should not be excessivly large.
436  */
lh_perllike_str_hash(const void * k)437 static unsigned long lh_perllike_str_hash(const void *k)
438 {
439     const char *rkey = (const char *)k;
440     unsigned hashval = 1;
441 
442     while (*rkey)
443         hashval = hashval * 33 + *rkey++;
444 
445     return hashval;
446 }
447 
lh_char_hash(const void * k)448 static unsigned long lh_char_hash(const void *k)
449 {
450 #if defined _MSC_VER || defined __MINGW32__
451 #define RANDOM_SEED_TYPE LONG
452 #else
453 #define RANDOM_SEED_TYPE int
454 #endif
455 	static volatile RANDOM_SEED_TYPE random_seed = -1;
456 
457 	if (random_seed == -1) {
458 		RANDOM_SEED_TYPE seed;
459 		/* we can't use -1 as it is the unitialized sentinel */
460 		while ((seed = json_c_get_random_seed()) == -1);
461 #if SIZEOF_INT == 8 && defined __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8
462 #define USE_SYNC_COMPARE_AND_SWAP 1
463 #endif
464 #if SIZEOF_INT == 4 && defined __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
465 #define USE_SYNC_COMPARE_AND_SWAP 1
466 #endif
467 #if SIZEOF_INT == 2 && defined __GCC_HAVE_SYNC_COMPARE_AND_SWAP_2
468 #define USE_SYNC_COMPARE_AND_SWAP 1
469 #endif
470 #if defined USE_SYNC_COMPARE_AND_SWAP
471 		(void)__sync_val_compare_and_swap(&random_seed, -1, seed);
472 #elif defined _MSC_VER || defined __MINGW32__
473 		InterlockedCompareExchange(&random_seed, seed, -1);
474 #else
475 //#warning "racy random seed initializtion if used by multiple threads"
476 		random_seed = seed; /* potentially racy */
477 #endif
478 	}
479 
480 	return hashlittle((const char*)k, strlen((const char*)k), random_seed);
481 }
482 
lh_char_equal(const void * k1,const void * k2)483 int lh_char_equal(const void *k1, const void *k2)
484 {
485 	return (strcmp((const char*)k1, (const char*)k2) == 0);
486 }
487 
lh_table_new(int size,lh_entry_free_fn * free_fn,lh_hash_fn * hash_fn,lh_equal_fn * equal_fn)488 struct lh_table* lh_table_new(int size,
489 			      lh_entry_free_fn *free_fn,
490 			      lh_hash_fn *hash_fn,
491 			      lh_equal_fn *equal_fn)
492 {
493 	int i;
494 	struct lh_table *t;
495 
496 	t = (struct lh_table*)calloc(1, sizeof(struct lh_table));
497 	if (!t)
498 		return NULL;
499 
500 	t->count = 0;
501 	t->size = size;
502 	t->table = (struct lh_entry*)calloc(size, sizeof(struct lh_entry));
503 	if (!t->table)
504 	{
505 		free(t);
506 		return NULL;
507 	}
508 	t->free_fn = free_fn;
509 	t->hash_fn = hash_fn;
510 	t->equal_fn = equal_fn;
511 	for(i = 0; i < size; i++) t->table[i].k = LH_EMPTY;
512 	return t;
513 }
514 
lh_kchar_table_new(int size,lh_entry_free_fn * free_fn)515 struct lh_table* lh_kchar_table_new(int size,
516 				    lh_entry_free_fn *free_fn)
517 {
518 	return lh_table_new(size, free_fn, char_hash_fn, lh_char_equal);
519 }
520 
lh_kptr_table_new(int size,lh_entry_free_fn * free_fn)521 struct lh_table* lh_kptr_table_new(int size,
522 				   lh_entry_free_fn *free_fn)
523 {
524 	return lh_table_new(size, free_fn, lh_ptr_hash, lh_ptr_equal);
525 }
526 
lh_table_resize(struct lh_table * t,int new_size)527 int lh_table_resize(struct lh_table *t, int new_size)
528 {
529 	struct lh_table *new_t;
530 	struct lh_entry *ent;
531 
532 	new_t = lh_table_new(new_size, NULL, t->hash_fn, t->equal_fn);
533 	if (new_t == NULL)
534 		return -1;
535 
536 	for (ent = t->head; ent != NULL; ent = ent->next)
537 	{
538 		unsigned long h = lh_get_hash(new_t, ent->k);
539 		unsigned int opts = 0;
540 		if (ent->k_is_constant)
541 			opts = JSON_C_OBJECT_KEY_IS_CONSTANT;
542 		if (lh_table_insert_w_hash(new_t, ent->k, ent->v, h, opts) != 0)
543 		{
544 			lh_table_free(new_t);
545 			return -1;
546 		}
547 	}
548 	free(t->table);
549 	t->table = new_t->table;
550 	t->size = new_size;
551 	t->head = new_t->head;
552 	t->tail = new_t->tail;
553 	free(new_t);
554 
555 	return 0;
556 }
557 
lh_table_free(struct lh_table * t)558 void lh_table_free(struct lh_table *t)
559 {
560 	struct lh_entry *c;
561 	if(t->free_fn) {
562 		for(c = t->head; c != NULL; c = c->next)
563 			t->free_fn(c);
564 	}
565 	free(t->table);
566 	free(t);
567 }
568 
569 
lh_table_insert_w_hash(struct lh_table * t,const void * k,const void * v,const unsigned long h,const unsigned opts)570 int lh_table_insert_w_hash(struct lh_table *t, const void *k, const void *v, const unsigned long h, const unsigned opts)
571 {
572 	unsigned long n;
573 
574 	if (t->count >= t->size * LH_LOAD_FACTOR)
575 		if (lh_table_resize(t, t->size * 2) != 0)
576 			return -1;
577 
578 	n = h % t->size;
579 
580 	while( 1 ) {
581 		if(t->table[n].k == LH_EMPTY || t->table[n].k == LH_FREED) break;
582 		if ((int)++n == t->size) n = 0;
583 	}
584 
585 	t->table[n].k = k;
586 	t->table[n].k_is_constant = (opts & JSON_C_OBJECT_KEY_IS_CONSTANT);
587 	t->table[n].v = v;
588 	t->count++;
589 
590 	if(t->head == NULL) {
591 		t->head = t->tail = &t->table[n];
592 		t->table[n].next = t->table[n].prev = NULL;
593 	} else {
594 		t->tail->next = &t->table[n];
595 		t->table[n].prev = t->tail;
596 		t->table[n].next = NULL;
597 		t->tail = &t->table[n];
598 	}
599 
600 	return 0;
601 }
lh_table_insert(struct lh_table * t,const void * k,const void * v)602 int lh_table_insert(struct lh_table *t, const void *k, const void *v)
603 {
604 	return lh_table_insert_w_hash(t, k, v, lh_get_hash(t, k), 0);
605 }
606 
607 
lh_table_lookup_entry_w_hash(struct lh_table * t,const void * k,const unsigned long h)608 struct lh_entry* lh_table_lookup_entry_w_hash(struct lh_table *t, const void *k, const unsigned long h)
609 {
610 	unsigned long n = h % t->size;
611 	int count = 0;
612 
613 	while( count < t->size ) {
614 		if(t->table[n].k == LH_EMPTY) return NULL;
615 		if(t->table[n].k != LH_FREED &&
616 		   t->equal_fn(t->table[n].k, k)) return &t->table[n];
617 		if ((int)++n == t->size) n = 0;
618 		count++;
619 	}
620 	return NULL;
621 }
622 
lh_table_lookup_entry(struct lh_table * t,const void * k)623 struct lh_entry* lh_table_lookup_entry(struct lh_table *t, const void *k)
624 {
625 	return lh_table_lookup_entry_w_hash(t, k, lh_get_hash(t, k));
626 }
627 
lh_table_lookup_ex(struct lh_table * t,const void * k,void ** v)628 json_bool lh_table_lookup_ex(struct lh_table* t, const void* k, void **v)
629 {
630 	struct lh_entry *e = lh_table_lookup_entry(t, k);
631 	if (e != NULL) {
632 		if (v != NULL) *v = lh_entry_v(e);
633 		return 1; /* key found */
634 	}
635 	if (v != NULL) *v = NULL;
636 		return 0; /* key not found */
637 }
638 
lh_table_delete_entry(struct lh_table * t,struct lh_entry * e)639 int lh_table_delete_entry(struct lh_table *t, struct lh_entry *e)
640 {
641 	ptrdiff_t n = (ptrdiff_t)(e - t->table); /* CAW: fixed to be 64bit nice, still need the crazy negative case... */
642 
643 	/* CAW: this is bad, really bad, maybe stack goes other direction on this machine... */
644 	if(n < 0) { return -2; }
645 
646 	if(t->table[n].k == LH_EMPTY || t->table[n].k == LH_FREED) return -1;
647 	t->count--;
648 	if(t->free_fn) t->free_fn(e);
649 	t->table[n].v = NULL;
650 	t->table[n].k = LH_FREED;
651 	if(t->tail == &t->table[n] && t->head == &t->table[n]) {
652 		t->head = t->tail = NULL;
653 	} else if (t->head == &t->table[n]) {
654 		t->head->next->prev = NULL;
655 		t->head = t->head->next;
656 	} else if (t->tail == &t->table[n]) {
657 		t->tail->prev->next = NULL;
658 		t->tail = t->tail->prev;
659 	} else {
660 		t->table[n].prev->next = t->table[n].next;
661 		t->table[n].next->prev = t->table[n].prev;
662 	}
663 	t->table[n].next = t->table[n].prev = NULL;
664 	return 0;
665 }
666 
667 
lh_table_delete(struct lh_table * t,const void * k)668 int lh_table_delete(struct lh_table *t, const void *k)
669 {
670 	struct lh_entry *e = lh_table_lookup_entry(t, k);
671 	if(!e) return -1;
672 	return lh_table_delete_entry(t, e);
673 }
674 
lh_table_length(struct lh_table * t)675 int lh_table_length(struct lh_table *t)
676 {
677 	return t->count;
678 }
679