1 /*
2 * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "memory/heap.hpp"
27 #include "oops/oop.inline.hpp"
28 #include "runtime/os.hpp"
29 #include "runtime/mutexLocker.hpp"
30 #include "services/memTracker.hpp"
31 #include "utilities/align.hpp"
32 #include "utilities/powerOfTwo.hpp"
33
34 // Implementation of Heap
35
CodeHeap(const char * name,const int code_blob_type)36 CodeHeap::CodeHeap(const char* name, const int code_blob_type)
37 : _code_blob_type(code_blob_type) {
38 _name = name;
39 _number_of_committed_segments = 0;
40 _number_of_reserved_segments = 0;
41 _segment_size = 0;
42 _log2_segment_size = 0;
43 _next_segment = 0;
44 _freelist = NULL;
45 _last_insert_point = NULL;
46 _freelist_segments = 0;
47 _freelist_length = 0;
48 _max_allocated_capacity = 0;
49 _blob_count = 0;
50 _nmethod_count = 0;
51 _adapter_count = 0;
52 _full_count = 0;
53 _fragmentation_count = 0;
54 }
55
56 // Dummy initialization of template array.
57 char CodeHeap::segmap_template[] = {0};
58
59 // This template array is used to (re)initialize the segmap,
60 // replacing a 1..254 loop.
init_segmap_template()61 void CodeHeap::init_segmap_template() {
62 assert(free_sentinel == 255, "Segment map logic changed!");
63 for (int i = 0; i <= free_sentinel; i++) {
64 segmap_template[i] = i;
65 }
66 }
67
68 // The segmap is marked free for that part of the heap
69 // which has not been allocated yet (beyond _next_segment).
70 // The range of segments to be marked is given by [beg..end).
71 // "Allocated" space in this context means there exists a
72 // HeapBlock or a FreeBlock describing this space.
73 // This method takes segment map indices as range boundaries
mark_segmap_as_free(size_t beg,size_t end)74 void CodeHeap::mark_segmap_as_free(size_t beg, size_t end) {
75 assert( beg < _number_of_committed_segments, "interval begin out of bounds");
76 assert(beg < end && end <= _number_of_committed_segments, "interval end out of bounds");
77 // Don't do unpredictable things in PRODUCT build
78 if (beg < end) {
79 // setup _segmap pointers for faster indexing
80 address p = (address)_segmap.low() + beg;
81 address q = (address)_segmap.low() + end;
82 // initialize interval
83 memset(p, free_sentinel, q-p);
84 }
85 }
86
87 // Don't get confused here.
88 // All existing blocks, no matter if they are used() or free(),
89 // have their segmap marked as used. This allows to find the
90 // block header (HeapBlock or FreeBlock) for any pointer
91 // within the allocated range (upper limit: _next_segment).
92 // This method takes segment map indices as range boundaries.
93 // The range of segments to be marked is given by [beg..end).
mark_segmap_as_used(size_t beg,size_t end,bool is_FreeBlock_join)94 void CodeHeap::mark_segmap_as_used(size_t beg, size_t end, bool is_FreeBlock_join) {
95 assert( beg < _number_of_committed_segments, "interval begin out of bounds");
96 assert(beg < end && end <= _number_of_committed_segments, "interval end out of bounds");
97 // Don't do unpredictable things in PRODUCT build
98 if (beg < end) {
99 // setup _segmap pointers for faster indexing
100 address p = (address)_segmap.low() + beg;
101 address q = (address)_segmap.low() + end;
102 // initialize interval
103 // If we are joining two free blocks, the segmap range for each
104 // block is consistent. To create a consistent segmap range for
105 // the blocks combined, we have three choices:
106 // 1 - Do a full init from beg to end. Not very efficient because
107 // the segmap range for the left block is potentially initialized
108 // over and over again.
109 // 2 - Carry over the last segmap element value of the left block
110 // and initialize the segmap range of the right block starting
111 // with that value. Saves initializing the left block's segmap
112 // over and over again. Very efficient if FreeBlocks mostly
113 // are appended to the right.
114 // 3 - Take full advantage of the segmap being almost correct with
115 // the two blocks combined. Lets assume the left block consists
116 // of m segments. The the segmap looks like
117 // ... (m-2) (m-1) (m) 0 1 2 3 ...
118 // By substituting the '0' by '1', we create a valid, but
119 // suboptimal, segmap range covering the two blocks combined.
120 // We introduced an extra hop for the find_block_for() iteration.
121 //
122 // When this method is called with is_FreeBlock_join == true, the
123 // segmap index beg must select the first segment of the right block.
124 // Otherwise, it has to select the first segment of the left block.
125 // Variant 3 is used for all FreeBlock joins.
126 if (is_FreeBlock_join && (beg > 0)) {
127 #ifndef PRODUCT
128 FreeBlock* pBlock = (FreeBlock*)block_at(beg);
129 assert(beg + pBlock->length() == end, "Internal error: (%d - %d) != %d", (unsigned int)end, (unsigned int)beg, (unsigned int)(pBlock->length()));
130 assert(*p == 0, "Begin index does not select a block start segment, *p = %2.2x", *p);
131 #endif
132 // If possible, extend the previous hop.
133 if (*(p-1) < (free_sentinel-1)) {
134 *p = *(p-1) + 1;
135 } else {
136 *p = 1;
137 }
138 if (_fragmentation_count++ >= fragmentation_limit) {
139 defrag_segmap(true);
140 _fragmentation_count = 0;
141 }
142 } else {
143 size_t n_bulk = free_sentinel-1; // bulk processing uses template indices [1..254].
144 // Use shortcut for blocks <= 255 segments.
145 // Special case bulk processing: [0..254].
146 if ((end - beg) <= n_bulk) {
147 memcpy(p, &segmap_template[0], end - beg);
148 } else {
149 *p++ = 0; // block header marker
150 while (p < q) {
151 if ((p+n_bulk) <= q) {
152 memcpy(p, &segmap_template[1], n_bulk);
153 p += n_bulk;
154 } else {
155 memcpy(p, &segmap_template[1], q-p);
156 p = q;
157 }
158 }
159 }
160 }
161 }
162 }
163
invalidate(size_t beg,size_t end,size_t hdr_size)164 void CodeHeap::invalidate(size_t beg, size_t end, size_t hdr_size) {
165 #ifndef PRODUCT
166 // Fill the given range with some bad value.
167 // length is expected to be in segment_size units.
168 // This prevents inadvertent execution of code leftover from previous use.
169 char* p = low_boundary() + segments_to_size(beg) + hdr_size;
170 memset(p, badCodeHeapNewVal, segments_to_size(end-beg)-hdr_size);
171 #endif
172 }
173
clear(size_t beg,size_t end)174 void CodeHeap::clear(size_t beg, size_t end) {
175 mark_segmap_as_free(beg, end);
176 invalidate(beg, end, 0);
177 }
178
clear()179 void CodeHeap::clear() {
180 _next_segment = 0;
181 clear(_next_segment, _number_of_committed_segments);
182 }
183
184
align_to_page_size(size_t size)185 static size_t align_to_page_size(size_t size) {
186 const size_t alignment = (size_t)os::vm_page_size();
187 assert(is_power_of_2(alignment), "no kidding ???");
188 return (size + alignment - 1) & ~(alignment - 1);
189 }
190
191
on_code_mapping(char * base,size_t size)192 void CodeHeap::on_code_mapping(char* base, size_t size) {
193 #ifdef LINUX
194 extern void linux_wrap_code(char* base, size_t size);
195 linux_wrap_code(base, size);
196 #endif
197 }
198
199
reserve(ReservedSpace rs,size_t committed_size,size_t segment_size)200 bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_size) {
201 assert(rs.size() >= committed_size, "reserved < committed");
202 assert(segment_size >= sizeof(FreeBlock), "segment size is too small");
203 assert(is_power_of_2(segment_size), "segment_size must be a power of 2");
204 assert_locked_or_safepoint(CodeCache_lock);
205
206 _segment_size = segment_size;
207 _log2_segment_size = exact_log2(segment_size);
208
209 // Reserve and initialize space for _memory.
210 const size_t page_size = rs.page_size();
211 const size_t granularity = os::vm_allocation_granularity();
212 const size_t c_size = align_up(committed_size, page_size);
213 assert(c_size <= rs.size(), "alignment made committed size to large");
214
215 os::trace_page_sizes(_name, c_size, rs.size(), page_size,
216 rs.base(), rs.size());
217 if (!_memory.initialize(rs, c_size)) {
218 return false;
219 }
220
221 on_code_mapping(_memory.low(), _memory.committed_size());
222 _number_of_committed_segments = size_to_segments(_memory.committed_size());
223 _number_of_reserved_segments = size_to_segments(_memory.reserved_size());
224 assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
225 const size_t reserved_segments_alignment = MAX2((size_t)os::vm_page_size(), granularity);
226 const size_t reserved_segments_size = align_up(_number_of_reserved_segments, reserved_segments_alignment);
227 const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments);
228
229 // reserve space for _segmap
230 ReservedSpace seg_rs(reserved_segments_size);
231 if (!_segmap.initialize(seg_rs, committed_segments_size)) {
232 return false;
233 }
234
235 MemTracker::record_virtual_memory_type((address)_segmap.low_boundary(), mtCode);
236
237 assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "could not commit enough space for segment map");
238 assert(_segmap.reserved_size() >= (size_t) _number_of_reserved_segments , "could not reserve enough space for segment map");
239 assert(_segmap.reserved_size() >= _segmap.committed_size() , "just checking");
240
241 // initialize remaining instance variables, heap memory and segmap
242 clear();
243 init_segmap_template();
244 return true;
245 }
246
247
expand_by(size_t size)248 bool CodeHeap::expand_by(size_t size) {
249 assert_locked_or_safepoint(CodeCache_lock);
250
251 // expand _memory space
252 size_t dm = align_to_page_size(_memory.committed_size() + size) - _memory.committed_size();
253 if (dm > 0) {
254 // Use at least the available uncommitted space if 'size' is larger
255 if (_memory.uncommitted_size() != 0 && dm > _memory.uncommitted_size()) {
256 dm = _memory.uncommitted_size();
257 }
258 char* base = _memory.low() + _memory.committed_size();
259 if (!_memory.expand_by(dm)) return false;
260 on_code_mapping(base, dm);
261 size_t i = _number_of_committed_segments;
262 _number_of_committed_segments = size_to_segments(_memory.committed_size());
263 assert(_number_of_reserved_segments == size_to_segments(_memory.reserved_size()), "number of reserved segments should not change");
264 assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
265 // expand _segmap space
266 size_t ds = align_to_page_size(_number_of_committed_segments) - _segmap.committed_size();
267 if ((ds > 0) && !_segmap.expand_by(ds)) {
268 return false;
269 }
270 assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "just checking");
271 // initialize additional space (heap memory and segmap)
272 clear(i, _number_of_committed_segments);
273 }
274 return true;
275 }
276
277
allocate(size_t instance_size)278 void* CodeHeap::allocate(size_t instance_size) {
279 size_t number_of_segments = size_to_segments(instance_size + header_size());
280 assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList");
281 assert_locked_or_safepoint(CodeCache_lock);
282
283 // First check if we can satisfy request from freelist
284 NOT_PRODUCT(verify());
285 HeapBlock* block = search_freelist(number_of_segments);
286 NOT_PRODUCT(verify());
287
288 if (block != NULL) {
289 assert(!block->free(), "must not be marked free");
290 guarantee((char*) block >= _memory.low_boundary() && (char*) block < _memory.high(),
291 "The newly allocated block " INTPTR_FORMAT " is not within the heap "
292 "starting with " INTPTR_FORMAT " and ending with " INTPTR_FORMAT,
293 p2i(block), p2i(_memory.low_boundary()), p2i(_memory.high()));
294 _max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity());
295 _blob_count++;
296 return block->allocated_space();
297 }
298
299 // Ensure minimum size for allocation to the heap.
300 number_of_segments = MAX2((int)CodeCacheMinBlockLength, (int)number_of_segments);
301
302 if (_next_segment + number_of_segments <= _number_of_committed_segments) {
303 mark_segmap_as_used(_next_segment, _next_segment + number_of_segments, false);
304 block = block_at(_next_segment);
305 block->initialize(number_of_segments);
306 _next_segment += number_of_segments;
307 guarantee((char*) block >= _memory.low_boundary() && (char*) block < _memory.high(),
308 "The newly allocated block " INTPTR_FORMAT " is not within the heap "
309 "starting with " INTPTR_FORMAT " and ending with " INTPTR_FORMAT,
310 p2i(block), p2i(_memory.low_boundary()), p2i(_memory.high()));
311 _max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity());
312 _blob_count++;
313 return block->allocated_space();
314 } else {
315 return NULL;
316 }
317 }
318
319 // Split the given block into two at the given segment.
320 // This is helpful when a block was allocated too large
321 // to trim off the unused space at the end (interpreter).
322 // It also helps with splitting a large free block during allocation.
323 // Usage state (used or free) must be set by caller since
324 // we don't know if the resulting blocks will be used or free.
325 // split_at is the segment number (relative to segment_for(b))
326 // where the split happens. The segment with relative
327 // number split_at is the first segment of the split-off block.
split_block(HeapBlock * b,size_t split_at)328 HeapBlock* CodeHeap::split_block(HeapBlock *b, size_t split_at) {
329 if (b == NULL) return NULL;
330 // After the split, both blocks must have a size of at least CodeCacheMinBlockLength
331 assert((split_at >= CodeCacheMinBlockLength) && (split_at + CodeCacheMinBlockLength <= b->length()),
332 "split position(%d) out of range [0..%d]", (int)split_at, (int)b->length());
333 size_t split_segment = segment_for(b) + split_at;
334 size_t b_size = b->length();
335 size_t newb_size = b_size - split_at;
336
337 HeapBlock* newb = block_at(split_segment);
338 newb->set_length(newb_size);
339 mark_segmap_as_used(segment_for(newb), segment_for(newb) + newb_size, false);
340 b->set_length(split_at);
341 return newb;
342 }
343
deallocate_tail(void * p,size_t used_size)344 void CodeHeap::deallocate_tail(void* p, size_t used_size) {
345 assert(p == find_start(p), "illegal deallocation");
346 assert_locked_or_safepoint(CodeCache_lock);
347
348 // Find start of HeapBlock
349 HeapBlock* b = (((HeapBlock *)p) - 1);
350 assert(b->allocated_space() == p, "sanity check");
351
352 size_t actual_number_of_segments = b->length();
353 size_t used_number_of_segments = size_to_segments(used_size + header_size());
354 size_t unused_number_of_segments = actual_number_of_segments - used_number_of_segments;
355 guarantee(used_number_of_segments <= actual_number_of_segments, "Must be!");
356
357 HeapBlock* f = split_block(b, used_number_of_segments);
358 add_to_freelist(f);
359 NOT_PRODUCT(verify());
360 }
361
deallocate(void * p)362 void CodeHeap::deallocate(void* p) {
363 assert(p == find_start(p), "illegal deallocation");
364 assert_locked_or_safepoint(CodeCache_lock);
365
366 // Find start of HeapBlock
367 HeapBlock* b = (((HeapBlock *)p) - 1);
368 assert(b->allocated_space() == p, "sanity check");
369 guarantee((char*) b >= _memory.low_boundary() && (char*) b < _memory.high(),
370 "The block to be deallocated " INTPTR_FORMAT " is not within the heap "
371 "starting with " INTPTR_FORMAT " and ending with " INTPTR_FORMAT,
372 p2i(b), p2i(_memory.low_boundary()), p2i(_memory.high()));
373 add_to_freelist(b);
374 NOT_PRODUCT(verify());
375 }
376
377 /**
378 * The segment map is used to quickly find the the start (header) of a
379 * code block (e.g. nmethod) when only a pointer to a location inside the
380 * code block is known. This works as follows:
381 * - The storage reserved for the code heap is divided into 'segments'.
382 * - The size of a segment is determined by -XX:CodeCacheSegmentSize=<#bytes>.
383 * - The size must be a power of two to allow the use of shift operations
384 * to quickly convert between segment index and segment address.
385 * - Segment start addresses should be aligned to be multiples of CodeCacheSegmentSize.
386 * - It seems beneficial for CodeCacheSegmentSize to be equal to os::page_size().
387 * - Allocation in the code cache can only happen at segment start addresses.
388 * - Allocation in the code cache is in units of CodeCacheSegmentSize.
389 * - A pointer in the code cache can be mapped to a segment by calling
390 * segment_for(addr).
391 * - The segment map is a byte array where array element [i] is related
392 * to the i-th segment in the code heap.
393 * - Each time memory is allocated/deallocated from the code cache,
394 * the segment map is updated accordingly.
395 * Note: deallocation does not cause the memory to become "free", as
396 * indicated by the segment map state "free_sentinel". Deallocation
397 * just changes the block state from "used" to "free".
398 * - Elements of the segment map (byte) array are interpreted
399 * as unsigned integer.
400 * - Element values normally identify an offset backwards (in segment
401 * size units) from the associated segment towards the start of
402 * the block.
403 * - Some values have a special meaning:
404 * 0 - This segment is the start of a block (HeapBlock or FreeBlock).
405 * 255 - The free_sentinel value. This is a free segment, i.e. it is
406 * not yet allocated and thus does not belong to any block.
407 * - The value of the current element has to be subtracted from the
408 * current index to get closer to the start.
409 * - If the value of the then current element is zero, the block start
410 * segment is found and iteration stops. Otherwise, start over with the
411 * previous step.
412 *
413 * The following example illustrates a possible state of code cache
414 * and the segment map: (seg -> segment, nm ->nmethod)
415 *
416 * code cache segmap
417 * ----------- ---------
418 * seg 1 | nm 1 | -> | 0 |
419 * seg 2 | nm 1 | -> | 1 |
420 * ... | nm 1 | -> | .. |
421 * seg m-1 | nm 1 | -> | m-1 |
422 * seg m | nm 2 | -> | 0 |
423 * seg m+1 | nm 2 | -> | 1 |
424 * ... | nm 2 | -> | 2 |
425 * ... | nm 2 | -> | .. |
426 * ... | nm 2 | -> | 0xFE | (free_sentinel-1)
427 * ... | nm 2 | -> | 1 |
428 * seg m+n | nm 2 | -> | 2 |
429 * ... | nm 2 | -> | |
430 *
431 * How to read:
432 * A value of '0' in the segmap indicates that this segment contains the
433 * beginning of a CodeHeap block. Let's walk through a simple example:
434 *
435 * We want to find the start of the block that contains nm 1, and we are
436 * given a pointer that points into segment m-2. We then read the value
437 * of segmap[m-2]. The value is an offset that points to the segment
438 * which contains the start of the block.
439 *
440 * Another example: We want to locate the start of nm 2, and we happen to
441 * get a pointer that points into seg m+n. We first read seg[n+m], which
442 * returns '2'. So we have to update our segment map index (ix -= segmap[n+m])
443 * and start over.
444 */
445
446 // Find block which contains the passed pointer,
447 // regardless of the block being used or free.
448 // NULL is returned if anything invalid is detected.
find_block_for(void * p) const449 void* CodeHeap::find_block_for(void* p) const {
450 // Check the pointer to be in committed range.
451 if (!contains(p)) {
452 return NULL;
453 }
454
455 address seg_map = (address)_segmap.low();
456 size_t seg_idx = segment_for(p);
457
458 // This may happen in special cases. Just ignore.
459 // Example: PPC ICache stub generation.
460 if (is_segment_unused(seg_map[seg_idx])) {
461 return NULL;
462 }
463
464 // Iterate the segment map chain to find the start of the block.
465 while (seg_map[seg_idx] > 0) {
466 // Don't check each segment index to refer to a used segment.
467 // This method is called extremely often. Therefore, any checking
468 // has a significant impact on performance. Rely on CodeHeap::verify()
469 // to do the job on request.
470 seg_idx -= (int)seg_map[seg_idx];
471 }
472
473 return address_for(seg_idx);
474 }
475
476 // Find block which contains the passed pointer.
477 // The block must be used, i.e. must not be a FreeBlock.
478 // Return a pointer that points past the block header.
find_start(void * p) const479 void* CodeHeap::find_start(void* p) const {
480 HeapBlock* h = (HeapBlock*)find_block_for(p);
481 return ((h == NULL) || h->free()) ? NULL : h->allocated_space();
482 }
483
484 // Find block which contains the passed pointer.
485 // Same as find_start(p), but with additional safety net.
find_blob_unsafe(void * start) const486 CodeBlob* CodeHeap::find_blob_unsafe(void* start) const {
487 CodeBlob* result = (CodeBlob*)CodeHeap::find_start(start);
488 return (result != NULL && result->blob_contains((address)start)) ? result : NULL;
489 }
490
alignment_unit() const491 size_t CodeHeap::alignment_unit() const {
492 // this will be a power of two
493 return _segment_size;
494 }
495
496
alignment_offset() const497 size_t CodeHeap::alignment_offset() const {
498 // The lowest address in any allocated block will be
499 // equal to alignment_offset (mod alignment_unit).
500 return sizeof(HeapBlock) & (_segment_size - 1);
501 }
502
503 // Returns the current block if available and used.
504 // If not, it returns the subsequent block (if available), NULL otherwise.
505 // Free blocks are merged, therefore there is at most one free block
506 // between two used ones. As a result, the subsequent block (if available) is
507 // guaranteed to be used.
508 // The returned pointer points past the block header.
next_used(HeapBlock * b) const509 void* CodeHeap::next_used(HeapBlock* b) const {
510 if (b != NULL && b->free()) b = next_block(b);
511 assert(b == NULL || !b->free(), "must be in use or at end of heap");
512 return (b == NULL) ? NULL : b->allocated_space();
513 }
514
515 // Returns the first used HeapBlock
516 // The returned pointer points to the block header.
first_block() const517 HeapBlock* CodeHeap::first_block() const {
518 if (_next_segment > 0)
519 return block_at(0);
520 return NULL;
521 }
522
523 // The returned pointer points to the block header.
block_start(void * q) const524 HeapBlock* CodeHeap::block_start(void* q) const {
525 HeapBlock* b = (HeapBlock*)find_start(q);
526 if (b == NULL) return NULL;
527 return b - 1;
528 }
529
530 // Returns the next Heap block.
531 // The returned pointer points to the block header.
next_block(HeapBlock * b) const532 HeapBlock* CodeHeap::next_block(HeapBlock *b) const {
533 if (b == NULL) return NULL;
534 size_t i = segment_for(b) + b->length();
535 if (i < _next_segment)
536 return block_at(i);
537 return NULL;
538 }
539
540
541 // Returns current capacity
capacity() const542 size_t CodeHeap::capacity() const {
543 return _memory.committed_size();
544 }
545
max_capacity() const546 size_t CodeHeap::max_capacity() const {
547 return _memory.reserved_size();
548 }
549
allocated_segments() const550 int CodeHeap::allocated_segments() const {
551 return (int)_next_segment;
552 }
553
allocated_capacity() const554 size_t CodeHeap::allocated_capacity() const {
555 // size of used heap - size on freelist
556 return segments_to_size(_next_segment - _freelist_segments);
557 }
558
559 // Returns size of the unallocated heap block
heap_unallocated_capacity() const560 size_t CodeHeap::heap_unallocated_capacity() const {
561 // Total number of segments - number currently used
562 return segments_to_size(_number_of_reserved_segments - _next_segment);
563 }
564
565 // Free list management
566
following_block(FreeBlock * b)567 FreeBlock* CodeHeap::following_block(FreeBlock *b) {
568 return (FreeBlock*)(((address)b) + _segment_size * b->length());
569 }
570
571 // Inserts block b after a
insert_after(FreeBlock * a,FreeBlock * b)572 void CodeHeap::insert_after(FreeBlock* a, FreeBlock* b) {
573 assert(a != NULL && b != NULL, "must be real pointers");
574
575 // Link b into the list after a
576 b->set_link(a->link());
577 a->set_link(b);
578
579 // See if we can merge blocks
580 merge_right(b); // Try to make b bigger
581 merge_right(a); // Try to make a include b
582 }
583
584 // Try to merge this block with the following block
merge_right(FreeBlock * a)585 bool CodeHeap::merge_right(FreeBlock* a) {
586 assert(a->free(), "must be a free block");
587 if (following_block(a) == a->link()) {
588 assert(a->link() != NULL && a->link()->free(), "must be free too");
589
590 // Remember linked (following) block. invalidate should only zap header of this block.
591 size_t follower = segment_for(a->link());
592 // Merge block a to include the following block.
593 a->set_length(a->length() + a->link()->length());
594 a->set_link(a->link()->link());
595
596 // Update the segment map and invalidate block contents.
597 mark_segmap_as_used(follower, segment_for(a) + a->length(), true);
598 // Block contents has already been invalidated by add_to_freelist.
599 // What's left is the header of the following block which now is
600 // in the middle of the merged block. Just zap one segment.
601 invalidate(follower, follower + 1, 0);
602
603 _freelist_length--;
604 return true;
605 }
606 return false;
607 }
608
609
add_to_freelist(HeapBlock * a)610 void CodeHeap::add_to_freelist(HeapBlock* a) {
611 FreeBlock* b = (FreeBlock*)a;
612 size_t bseg = segment_for(b);
613 _freelist_length++;
614
615 _blob_count--;
616 assert(_blob_count >= 0, "sanity");
617
618 assert(b != _freelist, "cannot be removed twice");
619
620 // Mark as free and update free space count
621 _freelist_segments += b->length();
622 b->set_free();
623 invalidate(bseg, bseg + b->length(), sizeof(FreeBlock));
624
625 // First element in list?
626 if (_freelist == NULL) {
627 b->set_link(NULL);
628 _freelist = b;
629 return;
630 }
631
632 // Since the freelist is ordered (smaller addresses -> larger addresses) and the
633 // element we want to insert into the freelist has a smaller address than the first
634 // element, we can simply add 'b' as the first element and we are done.
635 if (b < _freelist) {
636 // Insert first in list
637 b->set_link(_freelist);
638 _freelist = b;
639 merge_right(_freelist);
640 return;
641 }
642
643 // Scan for right place to put into list.
644 // List is sorted by increasing addresses.
645 FreeBlock* prev = _freelist;
646 FreeBlock* cur = _freelist->link();
647 if ((_freelist_length > freelist_limit) && (_last_insert_point != NULL)) {
648 _last_insert_point = (FreeBlock*)find_block_for(_last_insert_point);
649 if ((_last_insert_point != NULL) && _last_insert_point->free() && (_last_insert_point < b)) {
650 prev = _last_insert_point;
651 cur = prev->link();
652 }
653 }
654 while(cur != NULL && cur < b) {
655 assert(prev < cur, "Freelist must be ordered");
656 prev = cur;
657 cur = cur->link();
658 }
659 assert((prev < b) && (cur == NULL || b < cur), "free-list must be ordered");
660 insert_after(prev, b);
661 _last_insert_point = prev;
662 }
663
664 /**
665 * Search freelist for an entry on the list with the best fit.
666 * @return NULL, if no one was found
667 */
search_freelist(size_t length)668 HeapBlock* CodeHeap::search_freelist(size_t length) {
669 FreeBlock* found_block = NULL;
670 FreeBlock* found_prev = NULL;
671 size_t found_length = _next_segment; // max it out to begin with
672
673 HeapBlock* res = NULL;
674 FreeBlock* prev = NULL;
675 FreeBlock* cur = _freelist;
676
677 length = length < CodeCacheMinBlockLength ? CodeCacheMinBlockLength : length;
678
679 // Search for best-fitting block
680 while(cur != NULL) {
681 size_t cur_length = cur->length();
682 if (cur_length == length) {
683 // We have a perfect fit
684 found_block = cur;
685 found_prev = prev;
686 found_length = cur_length;
687 break;
688 } else if ((cur_length > length) && (cur_length < found_length)) {
689 // This is a new, closer fit. Remember block, its previous element, and its length
690 found_block = cur;
691 found_prev = prev;
692 found_length = cur_length;
693 }
694 // Next element in list
695 prev = cur;
696 cur = cur->link();
697 }
698
699 if (found_block == NULL) {
700 // None found
701 return NULL;
702 }
703
704 // Exact (or at least good enough) fit. Remove from list.
705 // Don't leave anything on the freelist smaller than CodeCacheMinBlockLength.
706 if (found_length - length < CodeCacheMinBlockLength) {
707 _freelist_length--;
708 length = found_length;
709 if (found_prev == NULL) {
710 assert(_freelist == found_block, "sanity check");
711 _freelist = _freelist->link();
712 } else {
713 assert((found_prev->link() == found_block), "sanity check");
714 // Unmap element
715 found_prev->set_link(found_block->link());
716 }
717 res = (HeapBlock*)found_block;
718 // sizeof(HeapBlock) < sizeof(FreeBlock).
719 // Invalidate the additional space that FreeBlock occupies.
720 // The rest of the block should already be invalidated.
721 // This is necessary due to a dubious assert in nmethod.cpp(PcDescCache::reset_to()).
722 // Can't use invalidate() here because it works on segment_size units (too coarse).
723 DEBUG_ONLY(memset((void*)res->allocated_space(), badCodeHeapNewVal, sizeof(FreeBlock) - sizeof(HeapBlock)));
724 } else {
725 // Truncate the free block and return the truncated part
726 // as new HeapBlock. The remaining free block does not
727 // need to be updated, except for it's length. Truncating
728 // the segment map does not invalidate the leading part.
729 res = split_block(found_block, found_length - length);
730 }
731
732 res->set_used();
733 _freelist_segments -= length;
734 return res;
735 }
736
defrag_segmap(bool do_defrag)737 int CodeHeap::defrag_segmap(bool do_defrag) {
738 int extra_hops_used = 0;
739 int extra_hops_free = 0;
740 int blocks_used = 0;
741 int blocks_free = 0;
742 for(HeapBlock* h = first_block(); h != NULL; h = next_block(h)) {
743 size_t beg = segment_for(h);
744 size_t end = segment_for(h) + h->length();
745 int extra_hops = segmap_hops(beg, end);
746 if (h->free()) {
747 extra_hops_free += extra_hops;
748 blocks_free++;
749 } else {
750 extra_hops_used += extra_hops;
751 blocks_used++;
752 }
753 if (do_defrag && (extra_hops > 0)) {
754 mark_segmap_as_used(beg, end, false);
755 }
756 }
757 return extra_hops_used + extra_hops_free;
758 }
759
760 // Count the hops required to get from the last segment of a
761 // heap block to the block header segment. For the optimal case,
762 // #hops = ((#segments-1)+(free_sentinel-2))/(free_sentinel-1)
763 // The range of segments to be checked is given by [beg..end).
764 // Return the number of extra hops required. There may be extra hops
765 // due to the is_FreeBlock_join optimization in mark_segmap_as_used().
segmap_hops(size_t beg,size_t end)766 int CodeHeap::segmap_hops(size_t beg, size_t end) {
767 if (beg < end) {
768 // setup _segmap pointers for faster indexing
769 address p = (address)_segmap.low() + beg;
770 int hops_expected
771 = checked_cast<int>(((end-beg-1)+(free_sentinel-2))/(free_sentinel-1));
772 int nhops = 0;
773 size_t ix = end-beg-1;
774 while (p[ix] > 0) {
775 ix -= p[ix];
776 nhops++;
777 }
778 return (nhops > hops_expected) ? nhops - hops_expected : 0;
779 }
780 return 0;
781 }
782
783 //----------------------------------------------------------------------------
784 // Non-product code
785
786 #ifndef PRODUCT
787
print()788 void CodeHeap::print() {
789 tty->print_cr("The Heap");
790 }
791
verify()792 void CodeHeap::verify() {
793 if (VerifyCodeCache) {
794 assert_locked_or_safepoint(CodeCache_lock);
795 size_t len = 0;
796 int count = 0;
797 for(FreeBlock* b = _freelist; b != NULL; b = b->link()) {
798 len += b->length();
799 count++;
800 // Check if we have merged all free blocks
801 assert(merge_right(b) == false, "Missed merging opportunity");
802 }
803 // Verify that freelist contains the right amount of free space
804 assert(len == _freelist_segments, "wrong freelist");
805
806 for(HeapBlock* h = first_block(); h != NULL; h = next_block(h)) {
807 if (h->free()) count--;
808 }
809 // Verify that the freelist contains the same number of blocks
810 // than free blocks found on the full list.
811 assert(count == 0, "missing free blocks");
812
813 //---< all free block memory must have been invalidated >---
814 for(FreeBlock* b = _freelist; b != NULL; b = b->link()) {
815 for (char* c = (char*)b + sizeof(FreeBlock); c < (char*)b + segments_to_size(b->length()); c++) {
816 assert(*c == (char)badCodeHeapNewVal, "FreeBlock@" PTR_FORMAT "(" PTR_FORMAT ") not invalidated @byte %d", p2i(b), b->length(), (int)(c - (char*)b));
817 }
818 }
819
820 address seg_map = (address)_segmap.low();
821 size_t nseg = 0;
822 int extra_hops = 0;
823 count = 0;
824 for(HeapBlock* b = first_block(); b != NULL; b = next_block(b)) {
825 size_t seg1 = segment_for(b);
826 size_t segn = seg1 + b->length();
827 extra_hops += segmap_hops(seg1, segn);
828 count++;
829 for (size_t i = seg1; i < segn; i++) {
830 nseg++;
831 //---< Verify segment map marking >---
832 // All allocated segments, no matter if in a free or used block,
833 // must be marked "in use".
834 assert(!is_segment_unused(seg_map[i]), "CodeHeap: unused segment. seg_map[%d]([%d..%d]) = %d, %s block", (int)i, (int)seg1, (int)segn, seg_map[i], b->free()? "free":"used");
835 assert((unsigned char)seg_map[i] < free_sentinel, "CodeHeap: seg_map[%d]([%d..%d]) = %d (out of range)", (int)i, (int)seg1, (int)segn, seg_map[i]);
836 }
837 }
838 assert(nseg == _next_segment, "CodeHeap: segment count mismatch. found %d, expected %d.", (int)nseg, (int)_next_segment);
839 assert(extra_hops <= _fragmentation_count, "CodeHeap: extra hops wrong. fragmentation: %d, extra hops: %d.", _fragmentation_count, extra_hops);
840 if (extra_hops >= (16 + 2 * count)) {
841 warning("CodeHeap: many extra hops due to optimization. blocks: %d, extra hops: %d.", count, extra_hops);
842 }
843
844 // Verify that the number of free blocks is not out of hand.
845 static int free_block_threshold = 10000;
846 if (count > free_block_threshold) {
847 warning("CodeHeap: # of free blocks > %d", free_block_threshold);
848 // Double the warning limit
849 free_block_threshold *= 2;
850 }
851 }
852 }
853
854 #endif
855