1 /*
2 * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/altHashing.hpp"
27 #include "classfile/javaClasses.hpp"
28 #include "memory/allocation.inline.hpp"
29 #include "memory/filemap.hpp"
30 #include "memory/resourceArea.hpp"
31 #include "oops/oop.inline.hpp"
32 #include "runtime/safepoint.hpp"
33 #include "utilities/dtrace.hpp"
34 #include "utilities/hashtable.hpp"
35 #include "utilities/hashtable.inline.hpp"
36 #include "utilities/numberSeq.hpp"
37
38
39 // This hashtable is implemented as an open hash table with a fixed number of buckets.
40
new_entry_free_list()41 template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry_free_list() {
42 BasicHashtableEntry<F>* entry = NULL;
43 if (_free_list != NULL) {
44 entry = _free_list;
45 _free_list = _free_list->next();
46 }
47 return entry;
48 }
49
50 // HashtableEntrys are allocated in blocks to reduce the space overhead.
new_entry(unsigned int hashValue)51 template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry(unsigned int hashValue) {
52 BasicHashtableEntry<F>* entry = new_entry_free_list();
53
54 if (entry == NULL) {
55 if (_first_free_entry + _entry_size >= _end_block) {
56 int block_size = MIN2(512, MAX2((int)_table_size / 2, (int)_number_of_entries));
57 int len = _entry_size * block_size;
58 len = 1 << log2_int(len); // round down to power of 2
59 assert(len >= _entry_size, "");
60 _first_free_entry = NEW_C_HEAP_ARRAY2(char, len, F, CURRENT_PC);
61 _end_block = _first_free_entry + len;
62 }
63 entry = (BasicHashtableEntry<F>*)_first_free_entry;
64 _first_free_entry += _entry_size;
65 }
66
67 assert(_entry_size % HeapWordSize == 0, "");
68 entry->set_hash(hashValue);
69 return entry;
70 }
71
72
new_entry(unsigned int hashValue,T obj)73 template <class T, MEMFLAGS F> HashtableEntry<T, F>* Hashtable<T, F>::new_entry(unsigned int hashValue, T obj) {
74 HashtableEntry<T, F>* entry;
75
76 entry = (HashtableEntry<T, F>*)BasicHashtable<F>::new_entry(hashValue);
77 entry->set_literal(obj);
78 return entry;
79 }
80
81 // Check to see if the hashtable is unbalanced. The caller set a flag to
82 // rehash at the next safepoint. If this bucket is 60 times greater than the
83 // expected average bucket length, it's an unbalanced hashtable.
84 // This is somewhat an arbitrary heuristic but if one bucket gets to
85 // rehash_count which is currently 100, there's probably something wrong.
86
check_rehash_table(int count)87 template <class T, MEMFLAGS F> bool RehashableHashtable<T, F>::check_rehash_table(int count) {
88 assert(this->table_size() != 0, "underflow");
89 if (count > (((double)this->number_of_entries()/(double)this->table_size())*rehash_multiple)) {
90 // Set a flag for the next safepoint, which should be at some guaranteed
91 // safepoint interval.
92 return true;
93 }
94 return false;
95 }
96
97 // Create a new table and using alternate hash code, populate the new table
98 // with the existing elements. This can be used to change the hash code
99 // and could in the future change the size of the table.
100
move_to(RehashableHashtable<T,F> * new_table)101 template <class T, MEMFLAGS F> void RehashableHashtable<T, F>::move_to(RehashableHashtable<T, F>* new_table) {
102
103 // Initialize the global seed for hashing.
104 _seed = AltHashing::compute_seed();
105 assert(seed() != 0, "shouldn't be zero");
106
107 int saved_entry_count = this->number_of_entries();
108
109 // Iterate through the table and create a new entry for the new table
110 for (int i = 0; i < new_table->table_size(); ++i) {
111 for (HashtableEntry<T, F>* p = this->bucket(i); p != NULL; ) {
112 HashtableEntry<T, F>* next = p->next();
113 T string = p->literal();
114 // Use alternate hashing algorithm on the symbol in the first table
115 unsigned int hashValue = string->new_hash(seed());
116 // Get a new index relative to the new table (can also change size)
117 int index = new_table->hash_to_index(hashValue);
118 p->set_hash(hashValue);
119 // Keep the shared bit in the Hashtable entry to indicate that this entry
120 // can't be deleted. The shared bit is the LSB in the _next field so
121 // walking the hashtable past these entries requires
122 // BasicHashtableEntry::make_ptr() call.
123 bool keep_shared = p->is_shared();
124 this->unlink_entry(p);
125 new_table->add_entry(index, p);
126 if (keep_shared) {
127 p->set_shared();
128 }
129 p = next;
130 }
131 }
132 // give the new table the free list as well
133 new_table->copy_freelist(this);
134 assert(new_table->number_of_entries() == saved_entry_count, "lost entry on dictionary copy?");
135
136 // Destroy memory used by the buckets in the hashtable. The memory
137 // for the elements has been used in a new table and is not
138 // destroyed. The memory reuse will benefit resizing the SystemDictionary
139 // to avoid a memory allocation spike at safepoint.
140 BasicHashtable<F>::free_buckets();
141 }
142
free_buckets()143 template <MEMFLAGS F> void BasicHashtable<F>::free_buckets() {
144 if (NULL != _buckets) {
145 // Don't delete the buckets in the shared space. They aren't
146 // allocated by os::malloc
147 if (!UseSharedSpaces ||
148 !FileMapInfo::current_info()->is_in_shared_space(_buckets)) {
149 FREE_C_HEAP_ARRAY(HashtableBucket, _buckets, F);
150 }
151 _buckets = NULL;
152 }
153 }
154
155
156 // Reverse the order of elements in the hash buckets.
157
reverse()158 template <MEMFLAGS F> void BasicHashtable<F>::reverse() {
159
160 for (int i = 0; i < _table_size; ++i) {
161 BasicHashtableEntry<F>* new_list = NULL;
162 BasicHashtableEntry<F>* p = bucket(i);
163 while (p != NULL) {
164 BasicHashtableEntry<F>* next = p->next();
165 p->set_next(new_list);
166 new_list = p;
167 p = next;
168 }
169 *bucket_addr(i) = new_list;
170 }
171 }
172
free_entry(BasicHashtableEntry<F> * entry)173 template <MEMFLAGS F> void BasicHashtable<F>::BucketUnlinkContext::free_entry(BasicHashtableEntry<F>* entry) {
174 entry->set_next(_removed_head);
175 _removed_head = entry;
176 if (_removed_tail == NULL) {
177 _removed_tail = entry;
178 }
179 _num_removed++;
180 }
181
bulk_free_entries(BucketUnlinkContext * context)182 template <MEMFLAGS F> void BasicHashtable<F>::bulk_free_entries(BucketUnlinkContext* context) {
183 if (context->_num_removed == 0) {
184 assert(context->_removed_head == NULL && context->_removed_tail == NULL,
185 err_msg("Zero entries in the unlink context, but elements linked from " PTR_FORMAT " to " PTR_FORMAT,
186 p2i(context->_removed_head), p2i(context->_removed_tail)));
187 return;
188 }
189
190 // MT-safe add of the list of BasicHashTableEntrys from the context to the free list.
191 BasicHashtableEntry<F>* current = _free_list;
192 while (true) {
193 context->_removed_tail->set_next(current);
194 BasicHashtableEntry<F>* old = (BasicHashtableEntry<F>*)Atomic::cmpxchg_ptr(context->_removed_head, &_free_list, current);
195 if (old == current) {
196 break;
197 }
198 current = old;
199 }
200 Atomic::add(-context->_num_removed, &_number_of_entries);
201 }
202
203 // Copy the table to the shared space.
204
copy_table(char ** top,char * end)205 template <MEMFLAGS F> void BasicHashtable<F>::copy_table(char** top, char* end) {
206
207 // Dump the hash table entries.
208
209 intptr_t *plen = (intptr_t*)(*top);
210 *top += sizeof(*plen);
211
212 int i;
213 for (i = 0; i < _table_size; ++i) {
214 for (BasicHashtableEntry<F>** p = _buckets[i].entry_addr();
215 *p != NULL;
216 p = (*p)->next_addr()) {
217 if (*top + entry_size() > end) {
218 report_out_of_shared_space(SharedMiscData);
219 }
220 *p = (BasicHashtableEntry<F>*)memcpy(*top, (void*)*p, entry_size());
221 *top += entry_size();
222 }
223 }
224 *plen = (char*)(*top) - (char*)plen - sizeof(*plen);
225
226 // Set the shared bit.
227
228 for (i = 0; i < _table_size; ++i) {
229 for (BasicHashtableEntry<F>* p = bucket(i); p != NULL; p = p->next()) {
230 p->set_shared();
231 }
232 }
233 }
234
235
236
237 // Reverse the order of elements in the hash buckets.
238
reverse(void * boundary)239 template <class T, MEMFLAGS F> void Hashtable<T, F>::reverse(void* boundary) {
240
241 for (int i = 0; i < this->table_size(); ++i) {
242 HashtableEntry<T, F>* high_list = NULL;
243 HashtableEntry<T, F>* low_list = NULL;
244 HashtableEntry<T, F>* last_low_entry = NULL;
245 HashtableEntry<T, F>* p = bucket(i);
246 while (p != NULL) {
247 HashtableEntry<T, F>* next = p->next();
248 if ((void*)p->literal() >= boundary) {
249 p->set_next(high_list);
250 high_list = p;
251 } else {
252 p->set_next(low_list);
253 low_list = p;
254 if (last_low_entry == NULL) {
255 last_low_entry = p;
256 }
257 }
258 p = next;
259 }
260 if (low_list != NULL) {
261 *bucket_addr(i) = low_list;
262 last_low_entry->set_next(high_list);
263 } else {
264 *bucket_addr(i) = high_list;
265 }
266 }
267 }
268
literal_size(Symbol * symbol)269 template <class T, MEMFLAGS F> int RehashableHashtable<T, F>::literal_size(Symbol *symbol) {
270 return symbol->size() * HeapWordSize;
271 }
272
literal_size(oop oop)273 template <class T, MEMFLAGS F> int RehashableHashtable<T, F>::literal_size(oop oop) {
274 // NOTE: this would over-count if (pre-JDK8) java_lang_Class::has_offset_field() is true,
275 // and the String.value array is shared by several Strings. However, starting from JDK8,
276 // the String.value array is not shared anymore.
277 assert(oop != NULL && oop->klass() == SystemDictionary::String_klass(), "only strings are supported");
278 return (oop->size() + java_lang_String::value(oop)->size()) * HeapWordSize;
279 }
280
281 // Dump footprint and bucket length statistics
282 //
283 // Note: if you create a new subclass of Hashtable<MyNewType, F>, you will need to
284 // add a new function Hashtable<T, F>::literal_size(MyNewType lit)
285
dump_table(outputStream * st,const char * table_name)286 template <class T, MEMFLAGS F> void RehashableHashtable<T, F>::dump_table(outputStream* st, const char *table_name) {
287 NumberSeq summary;
288 int literal_bytes = 0;
289 for (int i = 0; i < this->table_size(); ++i) {
290 int count = 0;
291 for (HashtableEntry<T, F>* e = this->bucket(i);
292 e != NULL; e = e->next()) {
293 count++;
294 literal_bytes += literal_size(e->literal());
295 }
296 summary.add((double)count);
297 }
298 double num_buckets = summary.num();
299 double num_entries = summary.sum();
300
301 int bucket_bytes = (int)num_buckets * sizeof(HashtableBucket<F>);
302 int entry_bytes = (int)num_entries * sizeof(HashtableEntry<T, F>);
303 int total_bytes = literal_bytes + bucket_bytes + entry_bytes;
304
305 double bucket_avg = (num_buckets <= 0) ? 0 : (bucket_bytes / num_buckets);
306 double entry_avg = (num_entries <= 0) ? 0 : (entry_bytes / num_entries);
307 double literal_avg = (num_entries <= 0) ? 0 : (literal_bytes / num_entries);
308
309 st->print_cr("%s statistics:", table_name);
310 st->print_cr("Number of buckets : %9d = %9d bytes, avg %7.3f", (int)num_buckets, bucket_bytes, bucket_avg);
311 st->print_cr("Number of entries : %9d = %9d bytes, avg %7.3f", (int)num_entries, entry_bytes, entry_avg);
312 st->print_cr("Number of literals : %9d = %9d bytes, avg %7.3f", (int)num_entries, literal_bytes, literal_avg);
313 st->print_cr("Total footprint : %9s = %9d bytes", "", total_bytes);
314 st->print_cr("Average bucket size : %9.3f", summary.avg());
315 st->print_cr("Variance of bucket size : %9.3f", summary.variance());
316 st->print_cr("Std. dev. of bucket size: %9.3f", summary.sd());
317 st->print_cr("Maximum bucket size : %9d", (int)summary.maximum());
318 }
319
320
321 // Dump the hash table buckets.
322
copy_buckets(char ** top,char * end)323 template <MEMFLAGS F> void BasicHashtable<F>::copy_buckets(char** top, char* end) {
324 intptr_t len = _table_size * sizeof(HashtableBucket<F>);
325 *(intptr_t*)(*top) = len;
326 *top += sizeof(intptr_t);
327
328 *(intptr_t*)(*top) = _number_of_entries;
329 *top += sizeof(intptr_t);
330
331 if (*top + len > end) {
332 report_out_of_shared_space(SharedMiscData);
333 }
334 _buckets = (HashtableBucket<F>*)memcpy(*top, (void*)_buckets, len);
335 *top += len;
336 }
337
338
339 #ifndef PRODUCT
340
print()341 template <class T, MEMFLAGS F> void Hashtable<T, F>::print() {
342 ResourceMark rm;
343
344 for (int i = 0; i < BasicHashtable<F>::table_size(); i++) {
345 HashtableEntry<T, F>* entry = bucket(i);
346 while(entry != NULL) {
347 tty->print("%d : ", i);
348 entry->literal()->print();
349 tty->cr();
350 entry = entry->next();
351 }
352 }
353 }
354
355
verify()356 template <MEMFLAGS F> void BasicHashtable<F>::verify() {
357 int count = 0;
358 for (int i = 0; i < table_size(); i++) {
359 for (BasicHashtableEntry<F>* p = bucket(i); p != NULL; p = p->next()) {
360 ++count;
361 }
362 }
363 assert(count == number_of_entries(), "number of hashtable entries incorrect");
364 }
365
366
367 #endif // PRODUCT
368
369
370 #ifdef ASSERT
371
verify_lookup_length(double load)372 template <MEMFLAGS F> void BasicHashtable<F>::verify_lookup_length(double load) {
373 if ((double)_lookup_length / (double)_lookup_count > load * 2.0) {
374 warning("Performance bug: SystemDictionary lookup_count=%d "
375 "lookup_length=%d average=%lf load=%f",
376 _lookup_count, _lookup_length,
377 (double) _lookup_length / _lookup_count, load);
378 }
379 }
380
381 #endif
382 // Explicitly instantiate these types
383 #if INCLUDE_ALL_GCS
384 template class Hashtable<nmethod*, mtGC>;
385 template class HashtableEntry<nmethod*, mtGC>;
386 template class BasicHashtable<mtGC>;
387 #endif
388 template class Hashtable<ConstantPool*, mtClass>;
389 template class RehashableHashtable<Symbol*, mtSymbol>;
390 template class RehashableHashtable<oopDesc*, mtSymbol>;
391 template class Hashtable<Symbol*, mtSymbol>;
392 template class Hashtable<Klass*, mtClass>;
393 template class Hashtable<oop, mtClass>;
394 #if defined(SOLARIS) || defined(CHECK_UNHANDLED_OOPS)
395 template class Hashtable<oop, mtSymbol>;
396 template class RehashableHashtable<oop, mtSymbol>;
397 #endif // SOLARIS || CHECK_UNHANDLED_OOPS
398 template class Hashtable<oopDesc*, mtSymbol>;
399 template class Hashtable<Symbol*, mtClass>;
400 template class HashtableEntry<Symbol*, mtSymbol>;
401 template class HashtableEntry<Symbol*, mtClass>;
402 template class HashtableEntry<oop, mtSymbol>;
403 template class BasicHashtableEntry<mtSymbol>;
404 template class BasicHashtableEntry<mtCode>;
405 template class BasicHashtable<mtClass>;
406 template class BasicHashtable<mtSymbol>;
407 template class BasicHashtable<mtCode>;
408 template class BasicHashtable<mtInternal>;
409