1 /*
2  * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "classfile/altHashing.hpp"
27 #include "classfile/dictionary.hpp"
28 #include "classfile/javaClasses.inline.hpp"
29 #include "classfile/moduleEntry.hpp"
30 #include "classfile/packageEntry.hpp"
31 #include "classfile/placeholders.hpp"
32 #include "classfile/protectionDomainCache.hpp"
33 #include "classfile/stringTable.hpp"
34 #include "logging/log.hpp"
35 #include "memory/allocation.inline.hpp"
36 #include "memory/metaspaceShared.hpp"
37 #include "memory/resourceArea.hpp"
38 #include "oops/oop.inline.hpp"
39 #include "oops/weakHandle.inline.hpp"
40 #include "runtime/safepoint.hpp"
41 #include "utilities/dtrace.hpp"
42 #include "utilities/hashtable.hpp"
43 #include "utilities/hashtable.inline.hpp"
44 #include "utilities/numberSeq.hpp"
45 
46 
47 // This hashtable is implemented as an open hash table with a fixed number of buckets.
48 
new_entry_free_list()49 template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry_free_list() {
50   BasicHashtableEntry<F>* entry = NULL;
51   if (_free_list != NULL) {
52     entry = _free_list;
53     _free_list = _free_list->next();
54   }
55   return entry;
56 }
57 
58 // HashtableEntrys are allocated in blocks to reduce the space overhead.
new_entry(unsigned int hashValue)59 template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry(unsigned int hashValue) {
60   BasicHashtableEntry<F>* entry = new_entry_free_list();
61 
62   if (entry == NULL) {
63     if (_first_free_entry + _entry_size >= _end_block) {
64       int block_size = MIN2(512, MAX2((int)_table_size / 2, (int)_number_of_entries));
65       int len = _entry_size * block_size;
66       len = 1 << log2_int(len); // round down to power of 2
67       assert(len >= _entry_size, "");
68       _first_free_entry = NEW_C_HEAP_ARRAY2(char, len, F, CURRENT_PC);
69       _end_block = _first_free_entry + len;
70     }
71     entry = (BasicHashtableEntry<F>*)_first_free_entry;
72     _first_free_entry += _entry_size;
73   }
74 
75   assert(_entry_size % HeapWordSize == 0, "");
76   entry->set_hash(hashValue);
77   return entry;
78 }
79 
80 
new_entry(unsigned int hashValue,T obj)81 template <class T, MEMFLAGS F> HashtableEntry<T, F>* Hashtable<T, F>::new_entry(unsigned int hashValue, T obj) {
82   HashtableEntry<T, F>* entry;
83 
84   entry = (HashtableEntry<T, F>*)BasicHashtable<F>::new_entry(hashValue);
85   entry->set_literal(obj);
86   return entry;
87 }
88 
89 // Version of hashtable entry allocation that allocates in the C heap directly.
90 // The allocator in blocks is preferable but doesn't have free semantics.
allocate_new_entry(unsigned int hashValue,T obj)91 template <class T, MEMFLAGS F> HashtableEntry<T, F>* Hashtable<T, F>::allocate_new_entry(unsigned int hashValue, T obj) {
92   HashtableEntry<T, F>* entry = (HashtableEntry<T, F>*) NEW_C_HEAP_ARRAY(char, this->entry_size(), F);
93 
94   entry->set_hash(hashValue);
95   entry->set_literal(obj);
96   entry->set_next(NULL);
97   return entry;
98 }
99 
100 // Check to see if the hashtable is unbalanced.  The caller set a flag to
101 // rehash at the next safepoint.  If this bucket is 60 times greater than the
102 // expected average bucket length, it's an unbalanced hashtable.
103 // This is somewhat an arbitrary heuristic but if one bucket gets to
104 // rehash_count which is currently 100, there's probably something wrong.
105 
check_rehash_table(int count)106 template <class T, MEMFLAGS F> bool RehashableHashtable<T, F>::check_rehash_table(int count) {
107   assert(this->table_size() != 0, "underflow");
108   if (count > (((double)this->number_of_entries()/(double)this->table_size())*rehash_multiple)) {
109     // Set a flag for the next safepoint, which should be at some guaranteed
110     // safepoint interval.
111     return true;
112   }
113   return false;
114 }
115 
116 // Create a new table and using alternate hash code, populate the new table
117 // with the existing elements.   This can be used to change the hash code
118 // and could in the future change the size of the table.
119 
move_to(RehashableHashtable<T,F> * new_table)120 template <class T, MEMFLAGS F> void RehashableHashtable<T, F>::move_to(RehashableHashtable<T, F>* new_table) {
121 
122   // Initialize the global seed for hashing.
123   _seed = AltHashing::compute_seed();
124   assert(seed() != 0, "shouldn't be zero");
125 
126   int saved_entry_count = this->number_of_entries();
127 
128   // Iterate through the table and create a new entry for the new table
129   for (int i = 0; i < new_table->table_size(); ++i) {
130     for (HashtableEntry<T, F>* p = this->bucket(i); p != NULL; ) {
131       HashtableEntry<T, F>* next = p->next();
132       T string = p->literal();
133       // Use alternate hashing algorithm on the symbol in the first table
134       unsigned int hashValue = string->new_hash(seed());
135       // Get a new index relative to the new table (can also change size)
136       int index = new_table->hash_to_index(hashValue);
137       p->set_hash(hashValue);
138       // Keep the shared bit in the Hashtable entry to indicate that this entry
139       // can't be deleted.   The shared bit is the LSB in the _next field so
140       // walking the hashtable past these entries requires
141       // BasicHashtableEntry::make_ptr() call.
142       bool keep_shared = p->is_shared();
143       this->unlink_entry(p);
144       new_table->add_entry(index, p);
145       if (keep_shared) {
146         p->set_shared();
147       }
148       p = next;
149     }
150   }
151   // give the new table the free list as well
152   new_table->copy_freelist(this);
153 
154   // Destroy memory used by the buckets in the hashtable.  The memory
155   // for the elements has been used in a new table and is not
156   // destroyed.  The memory reuse will benefit resizing the SystemDictionary
157   // to avoid a memory allocation spike at safepoint.
158   BasicHashtable<F>::free_buckets();
159 }
160 
free_buckets()161 template <MEMFLAGS F> void BasicHashtable<F>::free_buckets() {
162   if (NULL != _buckets) {
163     // Don't delete the buckets in the shared space.  They aren't
164     // allocated by os::malloc
165     if (!MetaspaceShared::is_in_shared_metaspace(_buckets)) {
166        FREE_C_HEAP_ARRAY(HashtableBucket, _buckets);
167     }
168     _buckets = NULL;
169   }
170 }
171 
free_entry(BasicHashtableEntry<F> * entry)172 template <MEMFLAGS F> void BasicHashtable<F>::BucketUnlinkContext::free_entry(BasicHashtableEntry<F>* entry) {
173   entry->set_next(_removed_head);
174   _removed_head = entry;
175   if (_removed_tail == NULL) {
176     _removed_tail = entry;
177   }
178   _num_removed++;
179 }
180 
bulk_free_entries(BucketUnlinkContext * context)181 template <MEMFLAGS F> void BasicHashtable<F>::bulk_free_entries(BucketUnlinkContext* context) {
182   if (context->_num_removed == 0) {
183     assert(context->_removed_head == NULL && context->_removed_tail == NULL,
184            "Zero entries in the unlink context, but elements linked from " PTR_FORMAT " to " PTR_FORMAT,
185            p2i(context->_removed_head), p2i(context->_removed_tail));
186     return;
187   }
188 
189   // MT-safe add of the list of BasicHashTableEntrys from the context to the free list.
190   BasicHashtableEntry<F>* current = _free_list;
191   while (true) {
192     context->_removed_tail->set_next(current);
193     BasicHashtableEntry<F>* old = Atomic::cmpxchg(context->_removed_head, &_free_list, current);
194     if (old == current) {
195       break;
196     }
197     current = old;
198   }
199   Atomic::add(-context->_num_removed, &_number_of_entries);
200 }
201 // Copy the table to the shared space.
count_bytes_for_table()202 template <MEMFLAGS F> size_t BasicHashtable<F>::count_bytes_for_table() {
203   size_t bytes = 0;
204   bytes += sizeof(intptr_t); // len
205 
206   for (int i = 0; i < _table_size; ++i) {
207     for (BasicHashtableEntry<F>** p = _buckets[i].entry_addr();
208          *p != NULL;
209          p = (*p)->next_addr()) {
210       bytes += entry_size();
211     }
212   }
213 
214   return bytes;
215 }
216 
217 // Dump the hash table entries (into CDS archive)
copy_table(char * top,char * end)218 template <MEMFLAGS F> void BasicHashtable<F>::copy_table(char* top, char* end) {
219   assert(is_aligned(top, sizeof(intptr_t)), "bad alignment");
220   intptr_t *plen = (intptr_t*)(top);
221   top += sizeof(*plen);
222 
223   int i;
224   for (i = 0; i < _table_size; ++i) {
225     for (BasicHashtableEntry<F>** p = _buckets[i].entry_addr();
226          *p != NULL;
227          p = (*p)->next_addr()) {
228       *p = (BasicHashtableEntry<F>*)memcpy(top, (void*)*p, entry_size());
229       top += entry_size();
230     }
231   }
232   *plen = (char*)(top) - (char*)plen - sizeof(*plen);
233   assert(top == end, "count_bytes_for_table is wrong");
234   // Set the shared bit.
235 
236   for (i = 0; i < _table_size; ++i) {
237     for (BasicHashtableEntry<F>* p = bucket(i); p != NULL; p = p->next()) {
238       p->set_shared();
239     }
240   }
241 }
242 
243 // For oops and Strings the size of the literal is interesting. For other types, nobody cares.
literal_size(ConstantPool *)244 static int literal_size(ConstantPool*) { return 0; }
literal_size(Klass *)245 static int literal_size(Klass*)        { return 0; }
literal_size(nmethod *)246 static int literal_size(nmethod*)      { return 0; }
247 
literal_size(Symbol * symbol)248 static int literal_size(Symbol *symbol) {
249   return symbol->size() * HeapWordSize;
250 }
251 
literal_size(oop obj)252 static int literal_size(oop obj) {
253   // NOTE: this would over-count if (pre-JDK8) java_lang_Class::has_offset_field() is true,
254   // and the String.value array is shared by several Strings. However, starting from JDK8,
255   // the String.value array is not shared anymore.
256   if (obj == NULL) {
257     return 0;
258   } else if (obj->klass() == SystemDictionary::String_klass()) {
259     return (obj->size() + java_lang_String::value(obj)->size()) * HeapWordSize;
260   } else {
261     return obj->size();
262   }
263 }
264 
literal_size(ClassLoaderWeakHandle v)265 static int literal_size(ClassLoaderWeakHandle v) {
266   return literal_size(v.peek());
267 }
268 
resize(int new_size)269 template <MEMFLAGS F> bool BasicHashtable<F>::resize(int new_size) {
270   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
271 
272   // Allocate new buckets
273   HashtableBucket<F>* buckets_new = NEW_C_HEAP_ARRAY2_RETURN_NULL(HashtableBucket<F>, new_size, F, CURRENT_PC);
274   if (buckets_new == NULL) {
275     return false;
276   }
277 
278   // Clear the new buckets
279   for (int i = 0; i < new_size; i++) {
280     buckets_new[i].clear();
281   }
282 
283   int table_size_old = _table_size;
284   // hash_to_index() uses _table_size, so switch the sizes now
285   _table_size = new_size;
286 
287   // Move entries from the old table to a new table
288   for (int index_old = 0; index_old < table_size_old; index_old++) {
289     for (BasicHashtableEntry<F>* p = _buckets[index_old].get_entry(); p != NULL; ) {
290       BasicHashtableEntry<F>* next = p->next();
291       bool keep_shared = p->is_shared();
292       int index_new = hash_to_index(p->hash());
293 
294       p->set_next(buckets_new[index_new].get_entry());
295       buckets_new[index_new].set_entry(p);
296 
297       if (keep_shared) {
298         p->set_shared();
299       }
300       p = next;
301     }
302   }
303 
304   // The old backets now can be released
305   BasicHashtable<F>::free_buckets();
306 
307   // Switch to the new storage
308   _buckets = buckets_new;
309 
310   return true;
311 }
312 
313 // Dump footprint and bucket length statistics
314 //
315 // Note: if you create a new subclass of Hashtable<MyNewType, F>, you will need to
316 // add a new function static int literal_size(MyNewType lit)
317 // because I can't get template <class T> int literal_size(T) to pick the specializations for Symbol and oop.
318 //
319 // The StringTable and SymbolTable dumping print how much footprint is used by the String and Symbol
320 // literals.
321 
print_table_statistics(outputStream * st,const char * table_name,T (* literal_load_barrier)(HashtableEntry<T,F> *))322 template <class T, MEMFLAGS F> void Hashtable<T, F>::print_table_statistics(outputStream* st,
323                                                                             const char *table_name,
324                                                                             T (*literal_load_barrier)(HashtableEntry<T, F>*)) {
325   NumberSeq summary;
326   int literal_bytes = 0;
327   for (int i = 0; i < this->table_size(); ++i) {
328     int count = 0;
329     for (HashtableEntry<T, F>* e = this->bucket(i);
330          e != NULL; e = e->next()) {
331       count++;
332       T l = (literal_load_barrier != NULL) ? literal_load_barrier(e) : e->literal();
333       literal_bytes += literal_size(l);
334     }
335     summary.add((double)count);
336   }
337   double num_buckets = summary.num();
338   double num_entries = summary.sum();
339 
340   int bucket_bytes = (int)num_buckets * sizeof(HashtableBucket<F>);
341   int entry_bytes  = (int)num_entries * sizeof(HashtableEntry<T, F>);
342   int total_bytes = literal_bytes +  bucket_bytes + entry_bytes;
343 
344   int bucket_size  = (num_buckets <= 0) ? 0 : (bucket_bytes  / num_buckets);
345   int entry_size   = (num_entries <= 0) ? 0 : (entry_bytes   / num_entries);
346 
347   st->print_cr("%s statistics:", table_name);
348   st->print_cr("Number of buckets       : %9d = %9d bytes, each %d", (int)num_buckets, bucket_bytes,  bucket_size);
349   st->print_cr("Number of entries       : %9d = %9d bytes, each %d", (int)num_entries, entry_bytes,   entry_size);
350   if (literal_bytes != 0) {
351     double literal_avg = (num_entries <= 0) ? 0 : (literal_bytes / num_entries);
352     st->print_cr("Number of literals      : %9d = %9d bytes, avg %7.3f", (int)num_entries, literal_bytes, literal_avg);
353   }
354   st->print_cr("Total footprint         : %9s = %9d bytes", "", total_bytes);
355   st->print_cr("Average bucket size     : %9.3f", summary.avg());
356   st->print_cr("Variance of bucket size : %9.3f", summary.variance());
357   st->print_cr("Std. dev. of bucket size: %9.3f", summary.sd());
358   st->print_cr("Maximum bucket size     : %9d", (int)summary.maximum());
359 }
360 
361 
362 // Dump the hash table buckets.
363 
count_bytes_for_buckets()364 template <MEMFLAGS F> size_t BasicHashtable<F>::count_bytes_for_buckets() {
365   size_t bytes = 0;
366   bytes += sizeof(intptr_t); // len
367   bytes += sizeof(intptr_t); // _number_of_entries
368   bytes += _table_size * sizeof(HashtableBucket<F>); // the buckets
369 
370   return bytes;
371 }
372 
373 // Dump the buckets (into CDS archive)
copy_buckets(char * top,char * end)374 template <MEMFLAGS F> void BasicHashtable<F>::copy_buckets(char* top, char* end) {
375   assert(is_aligned(top, sizeof(intptr_t)), "bad alignment");
376   intptr_t len = _table_size * sizeof(HashtableBucket<F>);
377   *(intptr_t*)(top) = len;
378   top += sizeof(intptr_t);
379 
380   *(intptr_t*)(top) = _number_of_entries;
381   top += sizeof(intptr_t);
382 
383   _buckets = (HashtableBucket<F>*)memcpy(top, (void*)_buckets, len);
384   top += len;
385 
386   assert(top == end, "count_bytes_for_buckets is wrong");
387 }
388 
389 #ifndef PRODUCT
print_literal(T l)390 template <class T> void print_literal(T l) {
391   l->print();
392 }
393 
print_literal(ClassLoaderWeakHandle l)394 static void print_literal(ClassLoaderWeakHandle l) {
395   l.print();
396 }
397 
print()398 template <class T, MEMFLAGS F> void Hashtable<T, F>::print() {
399   ResourceMark rm;
400 
401   for (int i = 0; i < BasicHashtable<F>::table_size(); i++) {
402     HashtableEntry<T, F>* entry = bucket(i);
403     while(entry != NULL) {
404       tty->print("%d : ", i);
405       print_literal(entry->literal());
406       tty->cr();
407       entry = entry->next();
408     }
409   }
410 }
411 
412 template <MEMFLAGS F>
verify_table(const char * table_name)413 template <class T> void BasicHashtable<F>::verify_table(const char* table_name) {
414   int element_count = 0;
415   int max_bucket_count = 0;
416   int max_bucket_number = 0;
417   for (int index = 0; index < table_size(); index++) {
418     int bucket_count = 0;
419     for (T* probe = (T*)bucket(index); probe != NULL; probe = probe->next()) {
420       probe->verify();
421       bucket_count++;
422     }
423     element_count += bucket_count;
424     if (bucket_count > max_bucket_count) {
425       max_bucket_count = bucket_count;
426       max_bucket_number = index;
427     }
428   }
429   guarantee(number_of_entries() == element_count,
430             "Verify of %s failed", table_name);
431 
432   // Log some statistics about the hashtable
433   log_info(hashtables)("%s max bucket size %d bucket %d element count %d table size %d", table_name,
434                        max_bucket_count, max_bucket_number, _number_of_entries, _table_size);
435   if (_number_of_entries > 0 && log_is_enabled(Debug, hashtables)) {
436     for (int index = 0; index < table_size(); index++) {
437       int bucket_count = 0;
438       for (T* probe = (T*)bucket(index); probe != NULL; probe = probe->next()) {
439         log_debug(hashtables)("bucket %d hash " INTPTR_FORMAT, index, (intptr_t)probe->hash());
440         bucket_count++;
441       }
442       if (bucket_count > 0) {
443         log_debug(hashtables)("bucket %d count %d", index, bucket_count);
444       }
445     }
446   }
447 }
448 #endif // PRODUCT
449 
450 // Explicitly instantiate these types
451 template class Hashtable<nmethod*, mtGC>;
452 template class HashtableEntry<nmethod*, mtGC>;
453 template class BasicHashtable<mtGC>;
454 template class Hashtable<ConstantPool*, mtClass>;
455 template class RehashableHashtable<Symbol*, mtSymbol>;
456 template class RehashableHashtable<oop, mtSymbol>;
457 template class Hashtable<Symbol*, mtSymbol>;
458 template class Hashtable<Klass*, mtClass>;
459 template class Hashtable<InstanceKlass*, mtClass>;
460 template class Hashtable<ClassLoaderWeakHandle, mtClass>;
461 template class Hashtable<Symbol*, mtModule>;
462 template class Hashtable<oop, mtSymbol>;
463 template class Hashtable<ClassLoaderWeakHandle, mtSymbol>;
464 template class Hashtable<Symbol*, mtClass>;
465 template class HashtableEntry<Symbol*, mtSymbol>;
466 template class HashtableEntry<Symbol*, mtClass>;
467 template class HashtableEntry<oop, mtSymbol>;
468 template class HashtableEntry<ClassLoaderWeakHandle, mtSymbol>;
469 template class HashtableBucket<mtClass>;
470 template class BasicHashtableEntry<mtSymbol>;
471 template class BasicHashtableEntry<mtCode>;
472 template class BasicHashtable<mtClass>;
473 template class BasicHashtable<mtClassShared>;
474 template class BasicHashtable<mtSymbol>;
475 template class BasicHashtable<mtCode>;
476 template class BasicHashtable<mtInternal>;
477 template class BasicHashtable<mtModule>;
478 template class BasicHashtable<mtCompiler>;
479 
480 template void BasicHashtable<mtClass>::verify_table<DictionaryEntry>(char const*);
481 template void BasicHashtable<mtModule>::verify_table<ModuleEntry>(char const*);
482 template void BasicHashtable<mtModule>::verify_table<PackageEntry>(char const*);
483 template void BasicHashtable<mtClass>::verify_table<ProtectionDomainCacheEntry>(char const*);
484 template void BasicHashtable<mtClass>::verify_table<PlaceholderEntry>(char const*);
485