1 /*
2 * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 *
8 * - Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 *
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * - Neither the name of Oracle nor the names of its
16 * contributors may be used to endorse or promote products derived
17 * from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This source code is provided to illustrate the usage of a given feature
34 * or technique and has been deliberately simplified. Additional steps
35 * required for a production-quality application, such as security checks,
36 * input validation and proper error handling, might not be present in
37 * this sample code.
38 */
39
40
41 /* Lookup Table of generic elements. */
42
43 /*
44 * Each table has a unique lock, all accesses are protected.
45 *
46 * Table elements are identified with a 32bit unsigned int.
47 * (Also see HARE trick below, which makes the TableIndex unique per table).
48 *
49 * Each element has a key (N bytes) and possible additional info.
50 *
51 * Two elements with the same key should be the same element.
52 *
53 * The storage for the Key and Info cannot move, the table itself can.
54 *
55 * The hash table will only be allocated if we have keys, and will resize
56 * when the table needs to resize. The hash buckets just provide the
57 * reference to the first TableIndex in the hash bucket, the next
58 * field of the TableElement takes you to the next item in the hash
59 * bucket. Lookups will drift the looked up item to the head of the
60 * list.
61 *
62 * The full 32bit hashcode and key length is saved for comparisons, the
63 * last thing done is the actual comparison of the Key contents with
64 * keys_equal().
65 *
66 * Freed elements (not many tables actually free items) are managed with
67 * a bit vector and a low index where a freed element might be found.
68 * Bytes are inspected until a non-zero byte indicates a freed bit is
69 * set. A count of freed elements is also kept.
70 *
71 */
72
73 #include "hprof.h"
74
75 /* Macros for bit vectors: unsigned char 2^3==8 OR unsigned int 2^5==32 */
76
77 #define BV_CHUNK_POWER_2 3 /* 2 to this power == BV_CHUNK_BITSIZE */
78 #define BV_CHUNK_TYPE unsigned char
79
80 #define BV_CHUNK_BITSIZE (((int)sizeof(BV_CHUNK_TYPE))<<3) /* x8 */
81 #define BV_CHUNK_INDEX_MASK ( (1 << BV_CHUNK_POWER_2) - 1 )
82 #define BV_ELEMENT_COUNT(nelems) ((((nelems+1)) >> BV_CHUNK_POWER_2) + 1)
83
84 #define BV_CHUNK_ROUND(i) ((i) & ~(BV_CHUNK_INDEX_MASK))
85 #define BV_CHUNK(ptr, i) \
86 (((BV_CHUNK_TYPE*)(ptr))[(i) >> BV_CHUNK_POWER_2])
87 #define BV_CHUNK_MASK(i) \
88 (1 << ((i) & BV_CHUNK_INDEX_MASK))
89
90 /* Hash code value */
91
92 typedef unsigned HashCode;
93
94 /* Basic key for an element. What makes the element unique. */
95
96 typedef struct TableKey {
97 void *ptr; /* Pointer to arbitrary data that forms the key. */
98 int len; /* Length in bytes of this key. */
99 } TableKey;
100
101 /* Basic TableElement (but only allocated if keys are used) */
102
103 typedef struct TableElement {
104 TableKey key; /* The element key. */
105 HashCode hcode; /* The full 32bit hashcode for the key. */
106 TableIndex next; /* The next TableElement in the hash bucket chain. */
107 void *info; /* Info pointer */
108 } TableElement;
109
110 /* Generic Lookup Table structure */
111
112 typedef struct LookupTable {
113 char name[48]; /* Name of table. */
114 void *table; /* Pointer to array of elements. */
115 TableIndex *hash_buckets; /* Pointer to hash bucket chains. */
116 Blocks *info_blocks; /* Blocks space for info */
117 Blocks *key_blocks; /* Blocks space for keys */
118 TableIndex next_index; /* Next element available. */
119 TableIndex table_size; /* Current size of table. */
120 TableIndex table_incr; /* Suggested increment size. */
121 TableIndex hash_bucket_count; /* Number of hash buckets. */
122 int elem_size; /* Size of element. */
123 int info_size; /* Size of info structure (can be 0). */
124 void *freed_bv; /* Freed element bit vector */
125 int freed_count; /* Count of freed'd elements */
126 TableIndex freed_start; /* First freed in table */
127 int resizes; /* Count of table resizes done. */
128 unsigned bucket_walks; /* Count of bucket walks. */
129 jrawMonitorID lock; /* Lock for table access. */
130 SerialNumber serial_num; /* Table serial number. */
131 TableIndex hare; /* Rabbit (HARE) trick. */
132 } LookupTable;
133
134 /* To get a pointer to an element, regardless of element size. */
135
136 #define ELEMENT_PTR(ltable, i) \
137 ((void*)(((char*)(ltable)->table) + (ltable)->elem_size * (i)))
138
139 /* Sanity, check all the time. */
140
141 #define SANITY_CHECK(condition) ( (condition) ? (void)0 : \
142 HPROF_ERROR(JNI_FALSE, "SANITY IN QUESTION: " #condition))
143
144 /* To see if an index is valid. */
145
146 #define SANITY_CHECK_INDEX(ltable,i) SANITY_CHECK((i) < ltable->next_index)
147
148 /* Small rabbits (hares) can be hidden in the index value returned.
149 * Only the right rabbits are allowed in certain pens (LookupTables).
150 * When herding rabbits it's important to keep them separate,
151 * there are lots of rabbits, all different kinds and sizes,
152 * keeping them all separate is important to avoid cross breeding.
153 */
154
155 #define _SANITY_USE_HARE
156 #ifdef _SANITY_USE_HARE
157 #define SANITY_ADD_HARE(i,hare) (SANITY_REMOVE_HARE(i) | (hare))
158 #define SANITY_REMOVE_HARE(i) ((i) & 0x0FFFFFFF)
159 #define SANITY_CHECK_HARE(i,hare) SANITY_CHECK(SANITY_ADD_HARE(i,hare)==(i))
160 #else
161 #define SANITY_ADD_HARE(i,hare) (i)
162 #define SANITY_REMOVE_HARE(i) (i)
163 #define SANITY_CHECK_HARE(i,hare)
164 #endif
165
166 static jrawMonitorID
lock_create(char * name)167 lock_create(char *name)
168 {
169 jrawMonitorID stanley;
170
171 stanley = createRawMonitor(name);
172 return stanley;
173 }
174
175 static void
lock_destroy(jrawMonitorID stanley)176 lock_destroy(jrawMonitorID stanley)
177 {
178 if ( stanley != NULL ) {
179 destroyRawMonitor(stanley);
180 }
181 }
182
183 static void
lock_enter(jrawMonitorID stanley)184 lock_enter(jrawMonitorID stanley)
185 {
186 if ( stanley != NULL ) {
187 rawMonitorEnter(stanley);
188 }
189 }
190
191 static void
lock_exit(jrawMonitorID stanley)192 lock_exit(jrawMonitorID stanley)
193 {
194 if ( stanley != NULL ) {
195 rawMonitorExit(stanley);
196 }
197 }
198
199 static void
get_key(LookupTable * ltable,TableIndex index,void ** pkey_ptr,int * pkey_len)200 get_key(LookupTable *ltable, TableIndex index, void **pkey_ptr, int *pkey_len)
201 {
202 *pkey_ptr = ((TableElement*)ELEMENT_PTR(ltable,index))->key.ptr;
203 *pkey_len = ((TableElement*)ELEMENT_PTR(ltable,index))->key.len;
204 }
205
206 static void *
get_info(LookupTable * ltable,TableIndex index)207 get_info(LookupTable *ltable, TableIndex index)
208 {
209 TableElement *element;
210
211 element = (TableElement*)ELEMENT_PTR(ltable,index);
212 return element->info;
213 }
214
215 static void
hash_out(LookupTable * ltable,TableIndex index)216 hash_out(LookupTable *ltable, TableIndex index)
217 {
218 if ( ltable->hash_bucket_count > 0 ) {
219 TableElement *element;
220 TableElement *prev_e;
221 TableIndex bucket;
222 TableIndex i;
223
224 element = (TableElement*)ELEMENT_PTR(ltable,index);
225 bucket = (element->hcode % ltable->hash_bucket_count);
226 i = ltable->hash_buckets[bucket];
227 HPROF_ASSERT(i!=0);
228 prev_e = NULL;
229 while ( i != 0 && i != index ) {
230 prev_e = (TableElement*)ELEMENT_PTR(ltable,i);
231 i = prev_e->next;
232 }
233 HPROF_ASSERT(i==index);
234 if ( prev_e == NULL ) {
235 ltable->hash_buckets[bucket] = element->next;
236 } else {
237 prev_e->next = element->next;
238 }
239 element->next = 0;
240 element->hcode = 0;
241 }
242 }
243
244 static jboolean
is_freed_entry(LookupTable * ltable,TableIndex index)245 is_freed_entry(LookupTable *ltable, TableIndex index)
246 {
247 if ( ltable->freed_bv == NULL ) {
248 return JNI_FALSE;
249 }
250 if ( ( BV_CHUNK(ltable->freed_bv, index) & BV_CHUNK_MASK(index) ) != 0 ) {
251 return JNI_TRUE;
252 }
253 return JNI_FALSE;
254 }
255
256 static void
set_freed_bit(LookupTable * ltable,TableIndex index)257 set_freed_bit(LookupTable *ltable, TableIndex index)
258 {
259 void *p;
260
261 HPROF_ASSERT(!is_freed_entry(ltable, index));
262 p = ltable->freed_bv;
263 if ( p == NULL ) {
264 int size;
265
266 /* First time for a free */
267 HPROF_ASSERT(ltable->freed_start==0);
268 HPROF_ASSERT(ltable->freed_start==0);
269 size = BV_ELEMENT_COUNT(ltable->table_size);
270 p = HPROF_MALLOC(size*(int)sizeof(BV_CHUNK_TYPE));
271 ltable->freed_bv = p;
272 (void)memset(p, 0, size*(int)sizeof(BV_CHUNK_TYPE));
273 }
274 BV_CHUNK(p, index) |= BV_CHUNK_MASK(index);
275 ltable->freed_count++;
276 if ( ltable->freed_count == 1 ) {
277 /* Set freed_start for first time. */
278 HPROF_ASSERT(ltable->freed_start==0);
279 ltable->freed_start = index;
280 } else if ( index < ltable->freed_start ) {
281 /* Set freed_start to smaller value so we can be smart about search */
282 HPROF_ASSERT(ltable->freed_start!=0);
283 ltable->freed_start = index;
284 }
285 HPROF_ASSERT(ltable->freed_start!=0);
286 HPROF_ASSERT(ltable->freed_start < ltable->next_index);
287 HPROF_ASSERT(is_freed_entry(ltable, index));
288 }
289
290 static TableIndex
find_freed_entry(LookupTable * ltable)291 find_freed_entry(LookupTable *ltable)
292 {
293 if ( ltable->freed_count > 0 ) {
294 TableIndex i;
295 TableIndex istart;
296 void *p;
297 BV_CHUNK_TYPE chunk;
298
299 HPROF_ASSERT(BV_CHUNK_BITSIZE==(1<<BV_CHUNK_POWER_2));
300
301 p = ltable->freed_bv;
302 HPROF_ASSERT(p!=NULL);
303
304 /* Go to beginning of chunk */
305 HPROF_ASSERT(ltable->freed_start!=0);
306 HPROF_ASSERT(ltable->freed_start < ltable->next_index);
307 istart = BV_CHUNK_ROUND(ltable->freed_start);
308
309 /* Find chunk with any bit set */
310 chunk = 0;
311 for( ; istart < ltable->next_index ; istart += BV_CHUNK_BITSIZE ) {
312 chunk = BV_CHUNK(p, istart);
313 if ( chunk != 0 ) {
314 break;
315 }
316 }
317 HPROF_ASSERT(chunk!=0);
318 HPROF_ASSERT(chunk==BV_CHUNK(p,istart));
319 HPROF_ASSERT(istart < ltable->next_index);
320
321 /* Find bit in chunk and return index of freed item */
322 for( i = istart ; i < (istart+BV_CHUNK_BITSIZE) ; i++) {
323 BV_CHUNK_TYPE mask;
324
325 mask = BV_CHUNK_MASK(i);
326 if ( (chunk & mask) != 0 ) {
327 HPROF_ASSERT(chunk==BV_CHUNK(p,i));
328 chunk &= ~mask;
329 BV_CHUNK(p, i) = chunk;
330 ltable->freed_count--;
331 HPROF_ASSERT(i < ltable->next_index);
332 if ( ltable->freed_count > 0 ) {
333 /* Set freed_start so we can be smart about search */
334 HPROF_ASSERT((i+1) < ltable->next_index);
335 ltable->freed_start = i+1;
336 } else {
337 /* Clear freed_start because there are no freed entries */
338 ltable->freed_start = 0;
339 }
340 HPROF_ASSERT(!is_freed_entry(ltable, i));
341 return i;
342 }
343 }
344 HPROF_ASSERT(0);
345 }
346 return 0;
347 }
348
349 static void
free_entry(LookupTable * ltable,TableIndex index)350 free_entry(LookupTable *ltable, TableIndex index)
351 {
352 set_freed_bit(ltable, index);
353 hash_out(ltable, index);
354 }
355
356 /* Fairly generic hash code generator (not a hash table index) */
357 static HashCode
hashcode(void * key_ptr,int key_len)358 hashcode(void *key_ptr, int key_len)
359 {
360 unsigned char * p;
361 HashCode hcode;
362 int i;
363
364 hcode = 0;
365 if ( key_ptr == NULL || key_len == 0 ) {
366 return hcode;
367 }
368 i = 0;
369 p = (unsigned char*)key_ptr;
370 for ( ; i < key_len-3 ; i += 4 ) {
371 /* Do a little loop unrolling */
372 hcode += (
373 ( (unsigned)(p[i]) << 24 ) |
374 ( (unsigned)(p[i+1]) << 16 ) |
375 ( (unsigned)(p[i+2]) << 8 ) |
376 ( (unsigned)(p[i+3]) )
377 );
378 }
379 for ( ; i < key_len ; i++ ) {
380 hcode += (unsigned)(p[i]);
381 }
382 return hcode;
383 }
384
385 static void
hash_in(LookupTable * ltable,TableIndex index,HashCode hcode)386 hash_in(LookupTable *ltable, TableIndex index, HashCode hcode)
387 {
388 if ( ltable->hash_bucket_count > 0 ) {
389 TableElement *element;
390 TableIndex bucket;
391
392 bucket = (hcode % ltable->hash_bucket_count);
393 element = (TableElement*)ELEMENT_PTR(ltable, index);
394 element->hcode = hcode;
395 element->next = ltable->hash_buckets[bucket];
396 ltable->hash_buckets[bucket] = index;
397 }
398 }
399
400 static void
resize_hash_buckets(LookupTable * ltable)401 resize_hash_buckets(LookupTable *ltable)
402 {
403 /* Don't want to do this too often. */
404
405 /* Hash table needs resizing when it's smaller than 1/16 the number of
406 * elements used in the table. This is just a guess.
407 */
408 if ( ( ltable->hash_bucket_count < (ltable->next_index >> 4) )
409 && ( ltable->hash_bucket_count > 0 )
410 && ( ( ltable->resizes % 10 ) == 0 )
411 && ( ltable->bucket_walks > 1000*ltable->hash_bucket_count )
412 ) {
413 int old_size;
414 int new_size;
415 TableIndex *new_buckets;
416 TableIndex *old_buckets;
417 int bucket;
418
419 /* Increase size of hash_buckets array, and rehash all elements */
420
421 LOG3("Table resize", ltable->name, ltable->resizes);
422
423 old_size = ltable->hash_bucket_count;
424 old_buckets = ltable->hash_buckets;
425 new_size = (ltable->next_index >> 3); /* 1/8 current used count */
426 SANITY_CHECK(new_size > old_size);
427 new_buckets = HPROF_MALLOC(new_size*(int)sizeof(TableIndex));
428 (void)memset(new_buckets, 0, new_size*(int)sizeof(TableIndex));
429 ltable->hash_bucket_count = new_size;
430 ltable->hash_buckets = new_buckets;
431
432 for ( bucket = 0 ; bucket < old_size ; bucket++ ) {
433 TableIndex index;
434
435 index = old_buckets[bucket];
436 while ( index != 0 ) {
437 TableElement *element;
438 TableIndex next;
439
440 element = (TableElement*)ELEMENT_PTR(ltable, index);
441 next = element->next;
442 element->next = 0;
443 hash_in(ltable, index, element->hcode);
444 index = next;
445 }
446 }
447 HPROF_FREE(old_buckets);
448
449 ltable->bucket_walks = 0;
450 }
451 }
452
453 static void
resize(LookupTable * ltable)454 resize(LookupTable *ltable)
455 {
456 int old_size;
457 int new_size;
458 void *old_table;
459 void *new_table;
460 int nbytes;
461 int obytes;
462
463 LOG3("Table resize", ltable->name, ltable->resizes);
464
465 /* Adjust increment on every resize
466 * Minimum is 1/4 the size of the current table or 512.
467 */
468 old_size = ltable->table_size;
469 if ( ltable->table_incr < (unsigned)(old_size >> 2) ) {
470 ltable->table_incr = (old_size >> 2);
471 }
472 if ( ltable->table_incr < 512 ) {
473 ltable->table_incr = 512;
474 }
475 new_size = old_size + ltable->table_incr;
476
477 /* Basic table element array */
478 obytes = old_size * ltable->elem_size;
479 nbytes = new_size * ltable->elem_size;
480 old_table = ltable->table;
481 new_table = HPROF_MALLOC(nbytes);
482 (void)memcpy(new_table, old_table, obytes);
483 (void)memset(((char*)new_table)+obytes, 0, nbytes-obytes);
484 ltable->table = new_table;
485 ltable->table_size = new_size;
486 HPROF_FREE(old_table);
487
488 /* Then bit vector for freed entries */
489 if ( ltable->freed_bv != NULL ) {
490 void *old_bv;
491 void *new_bv;
492
493 obytes = BV_ELEMENT_COUNT(old_size)*(int)sizeof(BV_CHUNK_TYPE);
494 nbytes = BV_ELEMENT_COUNT(new_size)*(int)sizeof(BV_CHUNK_TYPE);
495 old_bv = ltable->freed_bv;
496 new_bv = HPROF_MALLOC(nbytes);
497 (void)memcpy(new_bv, old_bv, obytes);
498 (void)memset(((char*)new_bv)+obytes, 0, nbytes-obytes);
499 ltable->freed_bv = new_bv;
500 HPROF_FREE(old_bv);
501 }
502
503 /* Check to see if the hash table needs resizing */
504 resize_hash_buckets(ltable);
505
506 ltable->resizes++;
507 }
508
509 static jboolean
keys_equal(void * key_ptr1,void * key_ptr2,int key_len)510 keys_equal(void *key_ptr1, void *key_ptr2, int key_len)
511 {
512 unsigned char * p1;
513 unsigned char * p2;
514 int i;
515
516 if ( key_len == 0 ) {
517 return JNI_TRUE;
518 }
519
520 /* We know these are aligned because we malloc'd them. */
521
522 /* Compare word by word, then byte by byte */
523 p1 = (unsigned char*)key_ptr1;
524 p2 = (unsigned char*)key_ptr2;
525 for ( i = 0 ; i < key_len-3 ; i += 4 ) {
526 /*LINTED*/
527 if ( *(unsigned*)(p1+i) != *(unsigned*)(p2+i) ) {
528 return JNI_FALSE;
529 }
530 }
531 for ( ; i < key_len ; i++ ) {
532 if ( p1[i] != p2[i] ) {
533 return JNI_FALSE;
534 }
535 }
536 return JNI_TRUE;
537 }
538
539 static TableIndex
find_entry(LookupTable * ltable,void * key_ptr,int key_len,HashCode hcode)540 find_entry(LookupTable *ltable, void *key_ptr, int key_len, HashCode hcode)
541 {
542 TableIndex index;
543
544 HPROF_ASSERT(ltable!=NULL);
545
546 index = 0;
547 if ( ltable->hash_bucket_count > 0 ) {
548 TableIndex bucket;
549 TableIndex prev_index;
550
551 HPROF_ASSERT(key_ptr!=NULL);
552 HPROF_ASSERT(key_len>0);
553 prev_index = 0;
554 bucket = (hcode % ltable->hash_bucket_count);
555 index = ltable->hash_buckets[bucket];
556 while ( index != 0 ) {
557 TableElement *element;
558 TableElement *prev_element;
559
560 element = (TableElement*)ELEMENT_PTR(ltable, index);
561 if ( hcode == element->hcode &&
562 key_len == element->key.len &&
563 keys_equal(key_ptr, element->key.ptr, key_len) ) {
564 /* Place this guy at the head of the bucket list */
565 if ( prev_index != 0 ) {
566 prev_element = (TableElement*)ELEMENT_PTR(ltable, prev_index);
567 prev_element->next = element->next;
568 element->next = ltable->hash_buckets[bucket];
569 ltable->hash_buckets[bucket] = index;
570 }
571 break;
572 }
573 prev_index = index;
574 index = element->next;
575 ltable->bucket_walks++;
576 }
577 }
578 return index;
579 }
580
581 static TableIndex
setup_new_entry(LookupTable * ltable,void * key_ptr,int key_len,void * info_ptr)582 setup_new_entry(LookupTable *ltable, void *key_ptr, int key_len, void *info_ptr)
583 {
584 TableIndex index;
585 TableElement *element;
586 void *info;
587 void *dup_key;
588
589 /* Assume we need new allocations for key and info */
590 dup_key = NULL;
591 info = NULL;
592
593 /* Look for a freed element */
594 index = 0;
595 if ( ltable->freed_count > 0 ) {
596 index = find_freed_entry(ltable);
597 }
598 if ( index != 0 ) {
599 int old_key_len;
600
601 /* Found a freed element, re-use what we can but clean it up. */
602 element = (TableElement*)ELEMENT_PTR(ltable, index);
603 dup_key = element->key.ptr;
604 old_key_len = element->key.len;
605 info = element->info;
606 (void)memset(element, 0, ltable->elem_size);
607
608 /* Toss the key space if size is too small to hold new key */
609 if ( key_ptr != NULL ) {
610 if ( old_key_len < key_len ) {
611 /* This could leak space in the Blocks if keys are variable
612 * in size AND the table does frees of elements.
613 */
614 dup_key = NULL;
615 }
616 }
617 } else {
618
619 /* Brand new table element */
620 if ( ltable->next_index >= ltable->table_size ) {
621 resize(ltable);
622 }
623 index = ltable->next_index++;
624 element = (TableElement*)ELEMENT_PTR(ltable, index);
625 }
626
627 /* Setup info area */
628 if ( ltable->info_size > 0 ) {
629 if ( info == NULL ) {
630 info = blocks_alloc(ltable->info_blocks, ltable->info_size);
631 }
632 if ( info_ptr==NULL ) {
633 (void)memset(info, 0, ltable->info_size);
634 } else {
635 (void)memcpy(info, info_ptr, ltable->info_size);
636 }
637 }
638
639 /* Setup key area if one was provided */
640 if ( key_ptr != NULL ) {
641 if ( dup_key == NULL ) {
642 dup_key = blocks_alloc(ltable->key_blocks, key_len);
643 }
644 (void)memcpy(dup_key, key_ptr, key_len);
645 }
646
647 /* Fill in element */
648 element->key.ptr = dup_key;
649 element->key.len = key_len;
650 element->info = info;
651
652 return index;
653 }
654
655 LookupTable *
table_initialize(const char * name,int size,int incr,int bucket_count,int info_size)656 table_initialize(const char *name, int size, int incr, int bucket_count,
657 int info_size)
658 {
659 LookupTable * ltable;
660 char lock_name[80];
661 int elem_size;
662 int key_size;
663
664 HPROF_ASSERT(name!=NULL);
665 HPROF_ASSERT(size>0);
666 HPROF_ASSERT(incr>0);
667 HPROF_ASSERT(bucket_count>=0);
668 HPROF_ASSERT(info_size>=0);
669
670 key_size = 1;
671 ltable = (LookupTable *)HPROF_MALLOC((int)sizeof(LookupTable));
672 (void)memset(ltable, 0, (int)sizeof(LookupTable));
673
674 (void)strncpy(ltable->name, name, sizeof(ltable->name));
675
676 elem_size = (int)sizeof(TableElement);
677
678 ltable->next_index = 1; /* Never use index 0 */
679 ltable->table_size = size;
680 ltable->table_incr = incr;
681 ltable->hash_bucket_count = bucket_count;
682 ltable->elem_size = elem_size;
683 ltable->info_size = info_size;
684 if ( info_size > 0 ) {
685 ltable->info_blocks = blocks_init(8, info_size, incr);
686 }
687 if ( key_size > 0 ) {
688 ltable->key_blocks = blocks_init(8, key_size, incr);
689 }
690 ltable->table = HPROF_MALLOC(size * elem_size);
691 (void)memset(ltable->table, 0, size * elem_size);
692 if ( bucket_count > 0 ) {
693 int nbytes;
694
695 nbytes = (int)(bucket_count*sizeof(TableIndex));
696 ltable->hash_buckets = (TableIndex*)HPROF_MALLOC(nbytes);
697 (void)memset(ltable->hash_buckets, 0, nbytes);
698 }
699
700 (void)md_snprintf(lock_name, sizeof(lock_name),
701 "HPROF %s table lock", name);
702 lock_name[sizeof(lock_name)-1] = 0;
703 ltable->lock = lock_create(lock_name);
704 ltable->serial_num = gdata->table_serial_number_counter++;
705 ltable->hare = (ltable->serial_num << 28);
706
707 LOG3("Table initialized", ltable->name, ltable->table_size);
708 return ltable;
709 }
710
711 int
table_element_count(LookupTable * ltable)712 table_element_count(LookupTable *ltable)
713 {
714 int nelems;
715
716 HPROF_ASSERT(ltable!=NULL);
717
718 lock_enter(ltable->lock); {
719 nelems = ltable->next_index-1;
720 } lock_exit(ltable->lock);
721
722 return nelems;
723 }
724
725 void
table_free_entry(LookupTable * ltable,TableIndex index)726 table_free_entry(LookupTable *ltable, TableIndex index)
727 {
728 HPROF_ASSERT(ltable!=NULL);
729 SANITY_CHECK_HARE(index, ltable->hare);
730 index = SANITY_REMOVE_HARE(index);
731 SANITY_CHECK_INDEX(ltable, index);
732
733 lock_enter(ltable->lock); {
734 HPROF_ASSERT(!is_freed_entry(ltable, index));
735 free_entry(ltable, index);
736 } lock_exit(ltable->lock);
737 }
738
739 void
table_walk_items(LookupTable * ltable,LookupTableIterator func,void * arg)740 table_walk_items(LookupTable *ltable, LookupTableIterator func, void* arg)
741 {
742 if ( ltable == NULL || ltable->next_index <= 1 ) {
743 return;
744 }
745 HPROF_ASSERT(func!=NULL);
746
747 lock_enter(ltable->lock); {
748 TableIndex index;
749 int fcount;
750
751 LOG3("table_walk_items() count+free", ltable->name, ltable->next_index);
752 fcount = 0;
753 for ( index = 1 ; index < ltable->next_index ; index++ ) {
754 if ( ! is_freed_entry(ltable, index) ) {
755 void *key_ptr;
756 int key_len;
757 void *info;
758
759 get_key(ltable, index, &key_ptr, &key_len);
760 if ( ltable->info_size == 0 ) {
761 info = NULL;
762 } else {
763 info = get_info(ltable, index);
764 }
765 (*func)(SANITY_ADD_HARE(index, ltable->hare), key_ptr, key_len, info, arg);
766 if ( is_freed_entry(ltable, index) ) {
767 fcount++;
768 }
769 } else {
770 fcount++;
771 }
772 }
773 LOG3("table_walk_items() count-free", ltable->name, ltable->next_index);
774 HPROF_ASSERT(fcount==ltable->freed_count);
775 } lock_exit(ltable->lock);
776 }
777
778 void
table_cleanup(LookupTable * ltable,LookupTableIterator func,void * arg)779 table_cleanup(LookupTable *ltable, LookupTableIterator func, void *arg)
780 {
781 if ( ltable == NULL ) {
782 return;
783 }
784
785 if ( func != NULL ) {
786 table_walk_items(ltable, func, arg);
787 }
788
789 lock_enter(ltable->lock); {
790
791 HPROF_FREE(ltable->table);
792 if ( ltable->hash_buckets != NULL ) {
793 HPROF_FREE(ltable->hash_buckets);
794 }
795 if ( ltable->freed_bv != NULL ) {
796 HPROF_FREE(ltable->freed_bv);
797 }
798 if ( ltable->info_blocks != NULL ) {
799 blocks_term(ltable->info_blocks);
800 ltable->info_blocks = NULL;
801 }
802 if ( ltable->key_blocks != NULL ) {
803 blocks_term(ltable->key_blocks);
804 ltable->key_blocks = NULL;
805 }
806
807 } lock_exit(ltable->lock);
808
809 lock_destroy(ltable->lock);
810 ltable->lock = NULL;
811
812 HPROF_FREE(ltable);
813 ltable = NULL;
814 }
815
816 TableIndex
table_create_entry(LookupTable * ltable,void * key_ptr,int key_len,void * info_ptr)817 table_create_entry(LookupTable *ltable, void *key_ptr, int key_len, void *info_ptr)
818 {
819 TableIndex index;
820 HashCode hcode;
821
822 HPROF_ASSERT(ltable!=NULL);
823
824 /* Create hash code if needed */
825 hcode = 0;
826 if ( ltable->hash_bucket_count > 0 ) {
827 hcode = hashcode(key_ptr, key_len);
828 }
829
830 /* Create a new entry */
831 lock_enter(ltable->lock); {
832
833 /* Need to create a new entry */
834 index = setup_new_entry(ltable, key_ptr, key_len, info_ptr);
835
836 /* Add to hash table if we have one */
837 if ( ltable->hash_bucket_count > 0 ) {
838 hash_in(ltable, index, hcode);
839 }
840
841 } lock_exit(ltable->lock);
842 return SANITY_ADD_HARE(index, ltable->hare);
843 }
844
845 TableIndex
table_find_entry(LookupTable * ltable,void * key_ptr,int key_len)846 table_find_entry(LookupTable *ltable, void *key_ptr, int key_len)
847 {
848 TableIndex index;
849 HashCode hcode;
850
851 /* Create hash code if needed */
852 hcode = 0;
853 if ( ltable->hash_bucket_count > 0 ) {
854 hcode = hashcode(key_ptr, key_len);
855 }
856
857 /* Look for element */
858 lock_enter(ltable->lock); {
859 index = find_entry(ltable, key_ptr, key_len, hcode);
860 } lock_exit(ltable->lock);
861
862 return index==0 ? index : SANITY_ADD_HARE(index, ltable->hare);
863 }
864
865 TableIndex
table_find_or_create_entry(LookupTable * ltable,void * key_ptr,int key_len,jboolean * pnew_entry,void * info_ptr)866 table_find_or_create_entry(LookupTable *ltable, void *key_ptr, int key_len,
867 jboolean *pnew_entry, void *info_ptr)
868 {
869 TableIndex index;
870 HashCode hcode;
871
872 /* Assume it is NOT a new entry for now */
873 if ( pnew_entry ) {
874 *pnew_entry = JNI_FALSE;
875 }
876
877 /* Create hash code if needed */
878 hcode = 0;
879 if ( ltable->hash_bucket_count > 0 ) {
880 hcode = hashcode(key_ptr, key_len);
881 }
882
883 /* Look for element */
884 index = 0;
885 lock_enter(ltable->lock); {
886 if ( ltable->hash_bucket_count > 0 ) {
887 index = find_entry(ltable, key_ptr, key_len, hcode);
888 }
889 if ( index == 0 ) {
890
891 /* Need to create a new entry */
892 index = setup_new_entry(ltable, key_ptr, key_len, info_ptr);
893
894 /* Add to hash table if we have one */
895 if ( ltable->hash_bucket_count > 0 ) {
896 hash_in(ltable, index, hcode);
897 }
898
899 if ( pnew_entry ) {
900 *pnew_entry = JNI_TRUE;
901 }
902 }
903 } lock_exit(ltable->lock);
904
905 return SANITY_ADD_HARE(index, ltable->hare);
906 }
907
908 void *
table_get_info(LookupTable * ltable,TableIndex index)909 table_get_info(LookupTable *ltable, TableIndex index)
910 {
911 void *info;
912
913 HPROF_ASSERT(ltable!=NULL);
914 HPROF_ASSERT(ltable->info_size > 0);
915 SANITY_CHECK_HARE(index, ltable->hare);
916 index = SANITY_REMOVE_HARE(index);
917 SANITY_CHECK_INDEX(ltable, index);
918
919 lock_enter(ltable->lock); {
920 HPROF_ASSERT(!is_freed_entry(ltable, index));
921 info = get_info(ltable,index);
922 } lock_exit(ltable->lock);
923
924 return info;
925 }
926
927 void
table_get_key(LookupTable * ltable,TableIndex index,void ** pkey_ptr,int * pkey_len)928 table_get_key(LookupTable *ltable, TableIndex index, void **pkey_ptr, int *pkey_len)
929 {
930 HPROF_ASSERT(ltable!=NULL);
931 HPROF_ASSERT(pkey_ptr!=NULL);
932 HPROF_ASSERT(pkey_len!=NULL);
933 SANITY_CHECK_HARE(index, ltable->hare);
934 HPROF_ASSERT(ltable->elem_size!=0);
935 index = SANITY_REMOVE_HARE(index);
936 SANITY_CHECK_INDEX(ltable, index);
937
938 lock_enter(ltable->lock); {
939 HPROF_ASSERT(!is_freed_entry(ltable, index));
940 get_key(ltable, index, pkey_ptr, pkey_len);
941 } lock_exit(ltable->lock);
942 }
943
944 void
table_lock_enter(LookupTable * ltable)945 table_lock_enter(LookupTable *ltable)
946 {
947 lock_enter(ltable->lock);
948 }
949
950 void
table_lock_exit(LookupTable * ltable)951 table_lock_exit(LookupTable *ltable)
952 {
953 lock_exit(ltable->lock);
954 }
955