1 /*
2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "logging/log.hpp"
27 #include "memory/resourceArea.hpp"
28 #include "memory/virtualspace.hpp"
29 #include "oops/markOop.hpp"
30 #include "oops/oop.inline.hpp"
31 #include "runtime/os.inline.hpp"
32 #include "services/memTracker.hpp"
33 #include "utilities/align.hpp"
34
35 // ReservedSpace
36
37 // Dummy constructor
ReservedSpace()38 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
39 _alignment(0), _special(false), _executable(false), _fd_for_heap(-1) {
40 }
41
ReservedSpace(size_t size,size_t preferred_page_size)42 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1) {
43 bool has_preferred_page_size = preferred_page_size != 0;
44 // Want to use large pages where possible and pad with small pages.
45 size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
46 bool large_pages = page_size != (size_t)os::vm_page_size();
47 size_t alignment;
48 if (large_pages && has_preferred_page_size) {
49 alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
50 // ReservedSpace initialization requires size to be aligned to the given
51 // alignment. Align the size up.
52 size = align_up(size, alignment);
53 } else {
54 // Don't force the alignment to be large page aligned,
55 // since that will waste memory.
56 alignment = os::vm_allocation_granularity();
57 }
58 initialize(size, alignment, large_pages, NULL, false);
59 }
60
ReservedSpace(size_t size,size_t alignment,bool large,char * requested_address)61 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
62 bool large,
63 char* requested_address) : _fd_for_heap(-1) {
64 initialize(size, alignment, large, requested_address, false);
65 }
66
ReservedSpace(size_t size,size_t alignment,bool large,bool executable)67 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
68 bool large,
69 bool executable) : _fd_for_heap(-1) {
70 initialize(size, alignment, large, NULL, executable);
71 }
72
ReservedSpace(char * base,size_t size,size_t alignment,bool special,bool executable)73 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
74 bool special, bool executable) : _fd_for_heap(-1) {
75 assert((size % os::vm_allocation_granularity()) == 0,
76 "size not allocation aligned");
77 _base = base;
78 _size = size;
79 _alignment = alignment;
80 _noaccess_prefix = 0;
81 _special = special;
82 _executable = executable;
83 }
84
85 // Helper method
unmap_or_release_memory(char * base,size_t size,bool is_file_mapped)86 static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped) {
87 if (is_file_mapped) {
88 if (!os::unmap_memory(base, size)) {
89 fatal("os::unmap_memory failed");
90 }
91 } else if (!os::release_memory(base, size)) {
92 fatal("os::release_memory failed");
93 }
94 }
95
96 // Helper method.
failed_to_reserve_as_requested(char * base,char * requested_address,const size_t size,bool special,bool is_file_mapped=false)97 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
98 const size_t size, bool special, bool is_file_mapped = false)
99 {
100 if (base == requested_address || requested_address == NULL)
101 return false; // did not fail
102
103 if (base != NULL) {
104 // Different reserve address may be acceptable in other cases
105 // but for compressed oops heap should be at requested address.
106 assert(UseCompressedOops, "currently requested address used only for compressed oops");
107 log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
108 // OS ignored requested address. Try different address.
109 if (special) {
110 if (!os::release_memory_special(base, size)) {
111 fatal("os::release_memory_special failed");
112 }
113 } else {
114 unmap_or_release_memory(base, size, is_file_mapped);
115 }
116 }
117 return true;
118 }
119
initialize(size_t size,size_t alignment,bool large,char * requested_address,bool executable)120 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
121 char* requested_address,
122 bool executable) {
123 const size_t granularity = os::vm_allocation_granularity();
124 assert((size & (granularity - 1)) == 0,
125 "size not aligned to os::vm_allocation_granularity()");
126 assert((alignment & (granularity - 1)) == 0,
127 "alignment not aligned to os::vm_allocation_granularity()");
128 assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
129 "not a power of 2");
130
131 alignment = MAX2(alignment, (size_t)os::vm_page_size());
132
133 _base = NULL;
134 _size = 0;
135 _special = false;
136 _executable = executable;
137 _alignment = 0;
138 _noaccess_prefix = 0;
139 if (size == 0) {
140 return;
141 }
142
143 // If OS doesn't support demand paging for large page memory, we need
144 // to use reserve_memory_special() to reserve and pin the entire region.
145 // If there is a backing file directory for this space then whether
146 // large pages are allocated is up to the filesystem of the backing file.
147 // So we ignore the UseLargePages flag in this case.
148 bool special = large && !os::can_commit_large_page_memory();
149 if (special && _fd_for_heap != -1) {
150 special = false;
151 if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
152 !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
153 log_debug(gc, heap)("Ignoring UseLargePages since large page support is up to the file system of the backing file for Java heap");
154 }
155 }
156
157 char* base = NULL;
158
159 if (special) {
160
161 base = os::reserve_memory_special(size, alignment, requested_address, executable);
162
163 if (base != NULL) {
164 if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
165 // OS ignored requested address. Try different address.
166 return;
167 }
168 // Check alignment constraints.
169 assert((uintptr_t) base % alignment == 0,
170 "Large pages returned a non-aligned address, base: "
171 PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
172 p2i(base), alignment);
173 _special = true;
174 } else {
175 // failed; try to reserve regular memory below
176 if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
177 !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
178 log_debug(gc, heap, coops)("Reserve regular memory without large pages");
179 }
180 }
181 }
182
183 if (base == NULL) {
184 // Optimistically assume that the OSes returns an aligned base pointer.
185 // When reserving a large address range, most OSes seem to align to at
186 // least 64K.
187
188 // If the memory was requested at a particular address, use
189 // os::attempt_reserve_memory_at() to avoid over mapping something
190 // important. If available space is not detected, return NULL.
191
192 if (requested_address != 0) {
193 base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
194 if (failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
195 // OS ignored requested address. Try different address.
196 base = NULL;
197 }
198 } else {
199 base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
200 }
201
202 if (base == NULL) return;
203
204 // Check alignment constraints
205 if ((((size_t)base) & (alignment - 1)) != 0) {
206 // Base not aligned, retry
207 unmap_or_release_memory(base, size, _fd_for_heap != -1 /*is_file_mapped*/);
208
209 // Make sure that size is aligned
210 size = align_up(size, alignment);
211 base = os::reserve_memory_aligned(size, alignment, _fd_for_heap);
212
213 if (requested_address != 0 &&
214 failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
215 // As a result of the alignment constraints, the allocated base differs
216 // from the requested address. Return back to the caller who can
217 // take remedial action (like try again without a requested address).
218 assert(_base == NULL, "should be");
219 return;
220 }
221 }
222 }
223 // Done
224 _base = base;
225 _size = size;
226 _alignment = alignment;
227 // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
228 if (_fd_for_heap != -1) {
229 _special = true;
230 }
231 }
232
first_part(size_t partition_size,size_t alignment,bool split,bool realloc)233 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
234 bool split, bool realloc) {
235 assert(partition_size <= size(), "partition failed");
236 if (split) {
237 os::split_reserved_memory(base(), size(), partition_size, realloc);
238 }
239 ReservedSpace result(base(), partition_size, alignment, special(),
240 executable());
241 return result;
242 }
243
244
245 ReservedSpace
last_part(size_t partition_size,size_t alignment)246 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
247 assert(partition_size <= size(), "partition failed");
248 ReservedSpace result(base() + partition_size, size() - partition_size,
249 alignment, special(), executable());
250 return result;
251 }
252
253
page_align_size_up(size_t size)254 size_t ReservedSpace::page_align_size_up(size_t size) {
255 return align_up(size, os::vm_page_size());
256 }
257
258
page_align_size_down(size_t size)259 size_t ReservedSpace::page_align_size_down(size_t size) {
260 return align_down(size, os::vm_page_size());
261 }
262
263
allocation_align_size_up(size_t size)264 size_t ReservedSpace::allocation_align_size_up(size_t size) {
265 return align_up(size, os::vm_allocation_granularity());
266 }
267
268
allocation_align_size_down(size_t size)269 size_t ReservedSpace::allocation_align_size_down(size_t size) {
270 return align_down(size, os::vm_allocation_granularity());
271 }
272
273
release()274 void ReservedSpace::release() {
275 if (is_reserved()) {
276 char *real_base = _base - _noaccess_prefix;
277 const size_t real_size = _size + _noaccess_prefix;
278 if (special()) {
279 if (_fd_for_heap != -1) {
280 os::unmap_memory(real_base, real_size);
281 } else {
282 os::release_memory_special(real_base, real_size);
283 }
284 } else{
285 os::release_memory(real_base, real_size);
286 }
287 _base = NULL;
288 _size = 0;
289 _noaccess_prefix = 0;
290 _alignment = 0;
291 _special = false;
292 _executable = false;
293 }
294 }
295
noaccess_prefix_size(size_t alignment)296 static size_t noaccess_prefix_size(size_t alignment) {
297 return lcm(os::vm_page_size(), alignment);
298 }
299
establish_noaccess_prefix()300 void ReservedHeapSpace::establish_noaccess_prefix() {
301 assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
302 _noaccess_prefix = noaccess_prefix_size(_alignment);
303
304 if (base() && base() + _size > (char *)OopEncodingHeapMax) {
305 if (true
306 WIN64_ONLY(&& !UseLargePages)
307 AIX_ONLY(&& os::vm_page_size() != 64*K)) {
308 // Protect memory at the base of the allocated region.
309 // If special, the page was committed (only matters on windows)
310 if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
311 fatal("cannot protect protection page");
312 }
313 log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
314 PTR_FORMAT " / " INTX_FORMAT " bytes",
315 p2i(_base),
316 _noaccess_prefix);
317 assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
318 } else {
319 Universe::set_narrow_oop_use_implicit_null_checks(false);
320 }
321 }
322
323 _base += _noaccess_prefix;
324 _size -= _noaccess_prefix;
325 assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
326 }
327
328 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
329 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
330 // might still fulfill the wishes of the caller.
331 // Assures the memory is aligned to 'alignment'.
332 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
try_reserve_heap(size_t size,size_t alignment,bool large,char * requested_address)333 void ReservedHeapSpace::try_reserve_heap(size_t size,
334 size_t alignment,
335 bool large,
336 char* requested_address) {
337 if (_base != NULL) {
338 // We tried before, but we didn't like the address delivered.
339 release();
340 }
341
342 // If OS doesn't support demand paging for large page memory, we need
343 // to use reserve_memory_special() to reserve and pin the entire region.
344 // If there is a backing file directory for this space then whether
345 // large pages are allocated is up to the filesystem of the backing file.
346 // So we ignore the UseLargePages flag in this case.
347 bool special = large && !os::can_commit_large_page_memory();
348 if (special && _fd_for_heap != -1) {
349 special = false;
350 if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
351 !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
352 log_debug(gc, heap)("Cannot allocate large pages for Java Heap when AllocateHeapAt option is set.");
353 }
354 }
355 char* base = NULL;
356
357 log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
358 " heap of size " SIZE_FORMAT_HEX,
359 p2i(requested_address),
360 size);
361
362 if (special) {
363 base = os::reserve_memory_special(size, alignment, requested_address, false);
364
365 if (base != NULL) {
366 // Check alignment constraints.
367 assert((uintptr_t) base % alignment == 0,
368 "Large pages returned a non-aligned address, base: "
369 PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
370 p2i(base), alignment);
371 _special = true;
372 }
373 }
374
375 if (base == NULL) {
376 // Failed; try to reserve regular memory below
377 if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
378 !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
379 log_debug(gc, heap, coops)("Reserve regular memory without large pages");
380 }
381
382 // Optimistically assume that the OSes returns an aligned base pointer.
383 // When reserving a large address range, most OSes seem to align to at
384 // least 64K.
385
386 // If the memory was requested at a particular address, use
387 // os::attempt_reserve_memory_at() to avoid over mapping something
388 // important. If available space is not detected, return NULL.
389
390 if (requested_address != 0) {
391 base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
392 } else {
393 base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
394 }
395 }
396 if (base == NULL) { return; }
397
398 // Done
399 _base = base;
400 _size = size;
401 _alignment = alignment;
402
403 // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
404 if (_fd_for_heap != -1) {
405 _special = true;
406 }
407
408 // Check alignment constraints
409 if ((((size_t)base) & (alignment - 1)) != 0) {
410 // Base not aligned, retry.
411 release();
412 }
413 }
414
try_reserve_range(char * highest_start,char * lowest_start,size_t attach_point_alignment,char * aligned_heap_base_min_address,char * upper_bound,size_t size,size_t alignment,bool large)415 void ReservedHeapSpace::try_reserve_range(char *highest_start,
416 char *lowest_start,
417 size_t attach_point_alignment,
418 char *aligned_heap_base_min_address,
419 char *upper_bound,
420 size_t size,
421 size_t alignment,
422 bool large) {
423 const size_t attach_range = highest_start - lowest_start;
424 // Cap num_attempts at possible number.
425 // At least one is possible even for 0 sized attach range.
426 const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
427 const uint64_t num_attempts_to_try = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
428
429 const size_t stepsize = (attach_range == 0) ? // Only one try.
430 (size_t) highest_start : align_up(attach_range / num_attempts_to_try, attach_point_alignment);
431
432 // Try attach points from top to bottom.
433 char* attach_point = highest_start;
434 while (attach_point >= lowest_start &&
435 attach_point <= highest_start && // Avoid wrap around.
436 ((_base == NULL) ||
437 (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
438 try_reserve_heap(size, alignment, large, attach_point);
439 attach_point -= stepsize;
440 }
441 }
442
443 #define SIZE_64K ((uint64_t) UCONST64( 0x10000))
444 #define SIZE_256M ((uint64_t) UCONST64( 0x10000000))
445 #define SIZE_32G ((uint64_t) UCONST64( 0x800000000))
446
447 // Helper for heap allocation. Returns an array with addresses
448 // (OS-specific) which are suited for disjoint base mode. Array is
449 // NULL terminated.
get_attach_addresses_for_disjoint_mode()450 static char** get_attach_addresses_for_disjoint_mode() {
451 static uint64_t addresses[] = {
452 2 * SIZE_32G,
453 3 * SIZE_32G,
454 4 * SIZE_32G,
455 8 * SIZE_32G,
456 10 * SIZE_32G,
457 1 * SIZE_64K * SIZE_32G,
458 2 * SIZE_64K * SIZE_32G,
459 3 * SIZE_64K * SIZE_32G,
460 4 * SIZE_64K * SIZE_32G,
461 16 * SIZE_64K * SIZE_32G,
462 32 * SIZE_64K * SIZE_32G,
463 34 * SIZE_64K * SIZE_32G,
464 0
465 };
466
467 // Sort out addresses smaller than HeapBaseMinAddress. This assumes
468 // the array is sorted.
469 uint i = 0;
470 while (addresses[i] != 0 &&
471 (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
472 i++;
473 }
474 uint start = i;
475
476 // Avoid more steps than requested.
477 i = 0;
478 while (addresses[start+i] != 0) {
479 if (i == HeapSearchSteps) {
480 addresses[start+i] = 0;
481 break;
482 }
483 i++;
484 }
485
486 return (char**) &addresses[start];
487 }
488
initialize_compressed_heap(const size_t size,size_t alignment,bool large)489 void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
490 guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
491 "can not allocate compressed oop heap for this size");
492 guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
493
494 const size_t granularity = os::vm_allocation_granularity();
495 assert((size & (granularity - 1)) == 0,
496 "size not aligned to os::vm_allocation_granularity()");
497 assert((alignment & (granularity - 1)) == 0,
498 "alignment not aligned to os::vm_allocation_granularity()");
499 assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
500 "not a power of 2");
501
502 // The necessary attach point alignment for generated wish addresses.
503 // This is needed to increase the chance of attaching for mmap and shmat.
504 const size_t os_attach_point_alignment =
505 AIX_ONLY(SIZE_256M) // Known shm boundary alignment.
506 NOT_AIX(os::vm_allocation_granularity());
507 const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
508
509 char *aligned_heap_base_min_address = (char *)align_up((void *)HeapBaseMinAddress, alignment);
510 size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
511 noaccess_prefix_size(alignment) : 0;
512
513 // Attempt to alloc at user-given address.
514 if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
515 try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
516 if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
517 release();
518 }
519 }
520
521 // Keep heap at HeapBaseMinAddress.
522 if (_base == NULL) {
523
524 // Try to allocate the heap at addresses that allow efficient oop compression.
525 // Different schemes are tried, in order of decreasing optimization potential.
526 //
527 // For this, try_reserve_heap() is called with the desired heap base addresses.
528 // A call into the os layer to allocate at a given address can return memory
529 // at a different address than requested. Still, this might be memory at a useful
530 // address. try_reserve_heap() always returns this allocated memory, as only here
531 // the criteria for a good heap are checked.
532
533 // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
534 // Give it several tries from top of range to bottom.
535 if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
536
537 // Calc address range within we try to attach (range of possible start addresses).
538 char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
539 char* const lowest_start = align_up(aligned_heap_base_min_address, attach_point_alignment);
540 try_reserve_range(highest_start, lowest_start, attach_point_alignment,
541 aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
542 }
543
544 // zerobased: Attempt to allocate in the lower 32G.
545 // But leave room for the compressed class pointers, which is allocated above
546 // the heap.
547 char *zerobased_max = (char *)OopEncodingHeapMax;
548 const size_t class_space = align_up(CompressedClassSpaceSize, alignment);
549 // For small heaps, save some space for compressed class pointer
550 // space so it can be decoded with no base.
551 if (UseCompressedClassPointers && !UseSharedSpaces &&
552 OopEncodingHeapMax <= KlassEncodingMetaspaceMax &&
553 (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
554 zerobased_max = (char *)OopEncodingHeapMax - class_space;
555 }
556
557 // Give it several tries from top of range to bottom.
558 if (aligned_heap_base_min_address + size <= zerobased_max && // Zerobased theoretical possible.
559 ((_base == NULL) || // No previous try succeeded.
560 (_base + size > zerobased_max))) { // Unscaled delivered an arbitrary address.
561
562 // Calc address range within we try to attach (range of possible start addresses).
563 char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
564 // Need to be careful about size being guaranteed to be less
565 // than UnscaledOopHeapMax due to type constraints.
566 char *lowest_start = aligned_heap_base_min_address;
567 uint64_t unscaled_end = UnscaledOopHeapMax - size;
568 if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
569 lowest_start = MAX2(lowest_start, (char*)unscaled_end);
570 }
571 lowest_start = align_up(lowest_start, attach_point_alignment);
572 try_reserve_range(highest_start, lowest_start, attach_point_alignment,
573 aligned_heap_base_min_address, zerobased_max, size, alignment, large);
574 }
575
576 // Now we go for heaps with base != 0. We need a noaccess prefix to efficiently
577 // implement null checks.
578 noaccess_prefix = noaccess_prefix_size(alignment);
579
580 // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
581 char** addresses = get_attach_addresses_for_disjoint_mode();
582 int i = 0;
583 while (addresses[i] && // End of array not yet reached.
584 ((_base == NULL) || // No previous try succeeded.
585 (_base + size > (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
586 !Universe::is_disjoint_heap_base_address((address)_base)))) { // Not disjoint address.
587 char* const attach_point = addresses[i];
588 assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
589 try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
590 i++;
591 }
592
593 // Last, desperate try without any placement.
594 if (_base == NULL) {
595 log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
596 initialize(size + noaccess_prefix, alignment, large, NULL, false);
597 }
598 }
599 }
600
ReservedHeapSpace(size_t size,size_t alignment,bool large,const char * heap_allocation_directory)601 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* heap_allocation_directory) : ReservedSpace() {
602
603 if (size == 0) {
604 return;
605 }
606
607 if (heap_allocation_directory != NULL) {
608 _fd_for_heap = os::create_file_for_heap(heap_allocation_directory);
609 if (_fd_for_heap == -1) {
610 vm_exit_during_initialization(
611 err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
612 }
613 }
614
615 // Heap size should be aligned to alignment, too.
616 guarantee(is_aligned(size, alignment), "set by caller");
617
618 if (UseCompressedOops) {
619 initialize_compressed_heap(size, alignment, large);
620 if (_size > size) {
621 // We allocated heap with noaccess prefix.
622 // It can happen we get a zerobased/unscaled heap with noaccess prefix,
623 // if we had to try at arbitrary address.
624 establish_noaccess_prefix();
625 }
626 } else {
627 initialize(size, alignment, large, NULL, false);
628 }
629
630 assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
631 "area must be distinguishable from marks for mark-sweep");
632 assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
633 "area must be distinguishable from marks for mark-sweep");
634
635 if (base() != NULL) {
636 MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
637 }
638
639 if (_fd_for_heap != -1) {
640 os::close(_fd_for_heap);
641 }
642 }
643
644 // Reserve space for code segment. Same as Java heap only we mark this as
645 // executable.
ReservedCodeSpace(size_t r_size,size_t rs_align,bool large)646 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
647 size_t rs_align,
648 bool large) :
649 ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
650 MemTracker::record_virtual_memory_type((address)base(), mtCode);
651 }
652
653 // VirtualSpace
654
VirtualSpace()655 VirtualSpace::VirtualSpace() {
656 _low_boundary = NULL;
657 _high_boundary = NULL;
658 _low = NULL;
659 _high = NULL;
660 _lower_high = NULL;
661 _middle_high = NULL;
662 _upper_high = NULL;
663 _lower_high_boundary = NULL;
664 _middle_high_boundary = NULL;
665 _upper_high_boundary = NULL;
666 _lower_alignment = 0;
667 _middle_alignment = 0;
668 _upper_alignment = 0;
669 _special = false;
670 _executable = false;
671 }
672
673
initialize(ReservedSpace rs,size_t committed_size)674 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
675 const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
676 return initialize_with_granularity(rs, committed_size, max_commit_granularity);
677 }
678
initialize_with_granularity(ReservedSpace rs,size_t committed_size,size_t max_commit_granularity)679 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
680 if(!rs.is_reserved()) return false; // allocation failed.
681 assert(_low_boundary == NULL, "VirtualSpace already initialized");
682 assert(max_commit_granularity > 0, "Granularity must be non-zero.");
683
684 _low_boundary = rs.base();
685 _high_boundary = low_boundary() + rs.size();
686
687 _low = low_boundary();
688 _high = low();
689
690 _special = rs.special();
691 _executable = rs.executable();
692
693 // When a VirtualSpace begins life at a large size, make all future expansion
694 // and shrinking occur aligned to a granularity of large pages. This avoids
695 // fragmentation of physical addresses that inhibits the use of large pages
696 // by the OS virtual memory system. Empirically, we see that with a 4MB
697 // page size, the only spaces that get handled this way are codecache and
698 // the heap itself, both of which provide a substantial performance
699 // boost in many benchmarks when covered by large pages.
700 //
701 // No attempt is made to force large page alignment at the very top and
702 // bottom of the space if they are not aligned so already.
703 _lower_alignment = os::vm_page_size();
704 _middle_alignment = max_commit_granularity;
705 _upper_alignment = os::vm_page_size();
706
707 // End of each region
708 _lower_high_boundary = align_up(low_boundary(), middle_alignment());
709 _middle_high_boundary = align_down(high_boundary(), middle_alignment());
710 _upper_high_boundary = high_boundary();
711
712 // High address of each region
713 _lower_high = low_boundary();
714 _middle_high = lower_high_boundary();
715 _upper_high = middle_high_boundary();
716
717 // commit to initial size
718 if (committed_size > 0) {
719 if (!expand_by(committed_size)) {
720 return false;
721 }
722 }
723 return true;
724 }
725
726
~VirtualSpace()727 VirtualSpace::~VirtualSpace() {
728 release();
729 }
730
731
release()732 void VirtualSpace::release() {
733 // This does not release memory it reserved.
734 // Caller must release via rs.release();
735 _low_boundary = NULL;
736 _high_boundary = NULL;
737 _low = NULL;
738 _high = NULL;
739 _lower_high = NULL;
740 _middle_high = NULL;
741 _upper_high = NULL;
742 _lower_high_boundary = NULL;
743 _middle_high_boundary = NULL;
744 _upper_high_boundary = NULL;
745 _lower_alignment = 0;
746 _middle_alignment = 0;
747 _upper_alignment = 0;
748 _special = false;
749 _executable = false;
750 }
751
752
committed_size() const753 size_t VirtualSpace::committed_size() const {
754 return pointer_delta(high(), low(), sizeof(char));
755 }
756
757
reserved_size() const758 size_t VirtualSpace::reserved_size() const {
759 return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
760 }
761
762
uncommitted_size() const763 size_t VirtualSpace::uncommitted_size() const {
764 return reserved_size() - committed_size();
765 }
766
actual_committed_size() const767 size_t VirtualSpace::actual_committed_size() const {
768 // Special VirtualSpaces commit all reserved space up front.
769 if (special()) {
770 return reserved_size();
771 }
772
773 size_t committed_low = pointer_delta(_lower_high, _low_boundary, sizeof(char));
774 size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary, sizeof(char));
775 size_t committed_high = pointer_delta(_upper_high, _middle_high_boundary, sizeof(char));
776
777 #ifdef ASSERT
778 size_t lower = pointer_delta(_lower_high_boundary, _low_boundary, sizeof(char));
779 size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary, sizeof(char));
780 size_t upper = pointer_delta(_upper_high_boundary, _middle_high_boundary, sizeof(char));
781
782 if (committed_high > 0) {
783 assert(committed_low == lower, "Must be");
784 assert(committed_middle == middle, "Must be");
785 }
786
787 if (committed_middle > 0) {
788 assert(committed_low == lower, "Must be");
789 }
790 if (committed_middle < middle) {
791 assert(committed_high == 0, "Must be");
792 }
793
794 if (committed_low < lower) {
795 assert(committed_high == 0, "Must be");
796 assert(committed_middle == 0, "Must be");
797 }
798 #endif
799
800 return committed_low + committed_middle + committed_high;
801 }
802
803
contains(const void * p) const804 bool VirtualSpace::contains(const void* p) const {
805 return low() <= (const char*) p && (const char*) p < high();
806 }
807
pretouch_expanded_memory(void * start,void * end)808 static void pretouch_expanded_memory(void* start, void* end) {
809 assert(is_aligned(start, os::vm_page_size()), "Unexpected alignment");
810 assert(is_aligned(end, os::vm_page_size()), "Unexpected alignment");
811
812 os::pretouch_memory(start, end);
813 }
814
commit_expanded(char * start,size_t size,size_t alignment,bool pre_touch,bool executable)815 static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) {
816 if (os::commit_memory(start, size, alignment, executable)) {
817 if (pre_touch || AlwaysPreTouch) {
818 pretouch_expanded_memory(start, start + size);
819 }
820 return true;
821 }
822
823 debug_only(warning(
824 "INFO: os::commit_memory(" PTR_FORMAT ", " PTR_FORMAT
825 " size=" SIZE_FORMAT ", executable=%d) failed",
826 p2i(start), p2i(start + size), size, executable);)
827
828 return false;
829 }
830
831 /*
832 First we need to determine if a particular virtual space is using large
833 pages. This is done at the initialize function and only virtual spaces
834 that are larger than LargePageSizeInBytes use large pages. Once we
835 have determined this, all expand_by and shrink_by calls must grow and
836 shrink by large page size chunks. If a particular request
837 is within the current large page, the call to commit and uncommit memory
838 can be ignored. In the case that the low and high boundaries of this
839 space is not large page aligned, the pages leading to the first large
840 page address and the pages after the last large page address must be
841 allocated with default pages.
842 */
expand_by(size_t bytes,bool pre_touch)843 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
844 if (uncommitted_size() < bytes) {
845 return false;
846 }
847
848 if (special()) {
849 // don't commit memory if the entire space is pinned in memory
850 _high += bytes;
851 return true;
852 }
853
854 char* previous_high = high();
855 char* unaligned_new_high = high() + bytes;
856 assert(unaligned_new_high <= high_boundary(), "cannot expand by more than upper boundary");
857
858 // Calculate where the new high for each of the regions should be. If
859 // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
860 // then the unaligned lower and upper new highs would be the
861 // lower_high() and upper_high() respectively.
862 char* unaligned_lower_new_high = MIN2(unaligned_new_high, lower_high_boundary());
863 char* unaligned_middle_new_high = MIN2(unaligned_new_high, middle_high_boundary());
864 char* unaligned_upper_new_high = MIN2(unaligned_new_high, upper_high_boundary());
865
866 // Align the new highs based on the regions alignment. lower and upper
867 // alignment will always be default page size. middle alignment will be
868 // LargePageSizeInBytes if the actual size of the virtual space is in
869 // fact larger than LargePageSizeInBytes.
870 char* aligned_lower_new_high = align_up(unaligned_lower_new_high, lower_alignment());
871 char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
872 char* aligned_upper_new_high = align_up(unaligned_upper_new_high, upper_alignment());
873
874 // Determine which regions need to grow in this expand_by call.
875 // If you are growing in the lower region, high() must be in that
876 // region so calculate the size based on high(). For the middle and
877 // upper regions, determine the starting point of growth based on the
878 // location of high(). By getting the MAX of the region's low address
879 // (or the previous region's high address) and high(), we can tell if it
880 // is an intra or inter region growth.
881 size_t lower_needs = 0;
882 if (aligned_lower_new_high > lower_high()) {
883 lower_needs = pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
884 }
885 size_t middle_needs = 0;
886 if (aligned_middle_new_high > middle_high()) {
887 middle_needs = pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
888 }
889 size_t upper_needs = 0;
890 if (aligned_upper_new_high > upper_high()) {
891 upper_needs = pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
892 }
893
894 // Check contiguity.
895 assert(low_boundary() <= lower_high() && lower_high() <= lower_high_boundary(),
896 "high address must be contained within the region");
897 assert(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary(),
898 "high address must be contained within the region");
899 assert(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary(),
900 "high address must be contained within the region");
901
902 // Commit regions
903 if (lower_needs > 0) {
904 assert(lower_high() + lower_needs <= lower_high_boundary(), "must not expand beyond region");
905 if (!commit_expanded(lower_high(), lower_needs, _lower_alignment, pre_touch, _executable)) {
906 return false;
907 }
908 _lower_high += lower_needs;
909 }
910
911 if (middle_needs > 0) {
912 assert(middle_high() + middle_needs <= middle_high_boundary(), "must not expand beyond region");
913 if (!commit_expanded(middle_high(), middle_needs, _middle_alignment, pre_touch, _executable)) {
914 return false;
915 }
916 _middle_high += middle_needs;
917 }
918
919 if (upper_needs > 0) {
920 assert(upper_high() + upper_needs <= upper_high_boundary(), "must not expand beyond region");
921 if (!commit_expanded(upper_high(), upper_needs, _upper_alignment, pre_touch, _executable)) {
922 return false;
923 }
924 _upper_high += upper_needs;
925 }
926
927 _high += bytes;
928 return true;
929 }
930
931 // A page is uncommitted if the contents of the entire page is deemed unusable.
932 // Continue to decrement the high() pointer until it reaches a page boundary
933 // in which case that particular page can now be uncommitted.
shrink_by(size_t size)934 void VirtualSpace::shrink_by(size_t size) {
935 if (committed_size() < size)
936 fatal("Cannot shrink virtual space to negative size");
937
938 if (special()) {
939 // don't uncommit if the entire space is pinned in memory
940 _high -= size;
941 return;
942 }
943
944 char* unaligned_new_high = high() - size;
945 assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
946
947 // Calculate new unaligned address
948 char* unaligned_upper_new_high =
949 MAX2(unaligned_new_high, middle_high_boundary());
950 char* unaligned_middle_new_high =
951 MAX2(unaligned_new_high, lower_high_boundary());
952 char* unaligned_lower_new_high =
953 MAX2(unaligned_new_high, low_boundary());
954
955 // Align address to region's alignment
956 char* aligned_upper_new_high = align_up(unaligned_upper_new_high, upper_alignment());
957 char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
958 char* aligned_lower_new_high = align_up(unaligned_lower_new_high, lower_alignment());
959
960 // Determine which regions need to shrink
961 size_t upper_needs = 0;
962 if (aligned_upper_new_high < upper_high()) {
963 upper_needs =
964 pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
965 }
966 size_t middle_needs = 0;
967 if (aligned_middle_new_high < middle_high()) {
968 middle_needs =
969 pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
970 }
971 size_t lower_needs = 0;
972 if (aligned_lower_new_high < lower_high()) {
973 lower_needs =
974 pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
975 }
976
977 // Check contiguity.
978 assert(middle_high_boundary() <= upper_high() &&
979 upper_high() <= upper_high_boundary(),
980 "high address must be contained within the region");
981 assert(lower_high_boundary() <= middle_high() &&
982 middle_high() <= middle_high_boundary(),
983 "high address must be contained within the region");
984 assert(low_boundary() <= lower_high() &&
985 lower_high() <= lower_high_boundary(),
986 "high address must be contained within the region");
987
988 // Uncommit
989 if (upper_needs > 0) {
990 assert(middle_high_boundary() <= aligned_upper_new_high &&
991 aligned_upper_new_high + upper_needs <= upper_high_boundary(),
992 "must not shrink beyond region");
993 if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
994 debug_only(warning("os::uncommit_memory failed"));
995 return;
996 } else {
997 _upper_high -= upper_needs;
998 }
999 }
1000 if (middle_needs > 0) {
1001 assert(lower_high_boundary() <= aligned_middle_new_high &&
1002 aligned_middle_new_high + middle_needs <= middle_high_boundary(),
1003 "must not shrink beyond region");
1004 if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
1005 debug_only(warning("os::uncommit_memory failed"));
1006 return;
1007 } else {
1008 _middle_high -= middle_needs;
1009 }
1010 }
1011 if (lower_needs > 0) {
1012 assert(low_boundary() <= aligned_lower_new_high &&
1013 aligned_lower_new_high + lower_needs <= lower_high_boundary(),
1014 "must not shrink beyond region");
1015 if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
1016 debug_only(warning("os::uncommit_memory failed"));
1017 return;
1018 } else {
1019 _lower_high -= lower_needs;
1020 }
1021 }
1022
1023 _high -= size;
1024 }
1025
1026 #ifndef PRODUCT
check_for_contiguity()1027 void VirtualSpace::check_for_contiguity() {
1028 // Check contiguity.
1029 assert(low_boundary() <= lower_high() &&
1030 lower_high() <= lower_high_boundary(),
1031 "high address must be contained within the region");
1032 assert(lower_high_boundary() <= middle_high() &&
1033 middle_high() <= middle_high_boundary(),
1034 "high address must be contained within the region");
1035 assert(middle_high_boundary() <= upper_high() &&
1036 upper_high() <= upper_high_boundary(),
1037 "high address must be contained within the region");
1038 assert(low() >= low_boundary(), "low");
1039 assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
1040 assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
1041 assert(high() <= upper_high(), "upper high");
1042 }
1043
print_on(outputStream * out)1044 void VirtualSpace::print_on(outputStream* out) {
1045 out->print ("Virtual space:");
1046 if (special()) out->print(" (pinned in memory)");
1047 out->cr();
1048 out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
1049 out->print_cr(" - reserved: " SIZE_FORMAT, reserved_size());
1050 out->print_cr(" - [low, high]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", p2i(low()), p2i(high()));
1051 out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", p2i(low_boundary()), p2i(high_boundary()));
1052 }
1053
print()1054 void VirtualSpace::print() {
1055 print_on(tty);
1056 }
1057
1058 /////////////// Unit tests ///////////////
1059
1060 #ifndef PRODUCT
1061
1062 #define test_log(...) \
1063 do {\
1064 if (VerboseInternalVMTests) { \
1065 tty->print_cr(__VA_ARGS__); \
1066 tty->flush(); \
1067 }\
1068 } while (false)
1069
1070 class TestReservedSpace : AllStatic {
1071 public:
small_page_write(void * addr,size_t size)1072 static void small_page_write(void* addr, size_t size) {
1073 size_t page_size = os::vm_page_size();
1074
1075 char* end = (char*)addr + size;
1076 for (char* p = (char*)addr; p < end; p += page_size) {
1077 *p = 1;
1078 }
1079 }
1080
release_memory_for_test(ReservedSpace rs)1081 static void release_memory_for_test(ReservedSpace rs) {
1082 if (rs.special()) {
1083 guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1084 } else {
1085 guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1086 }
1087 }
1088
test_reserved_space1(size_t size,size_t alignment)1089 static void test_reserved_space1(size_t size, size_t alignment) {
1090 test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
1091
1092 assert(is_aligned(size, alignment), "Incorrect input parameters");
1093
1094 ReservedSpace rs(size, // size
1095 alignment, // alignment
1096 UseLargePages, // large
1097 (char *)NULL); // requested_address
1098
1099 test_log(" rs.special() == %d", rs.special());
1100
1101 assert(rs.base() != NULL, "Must be");
1102 assert(rs.size() == size, "Must be");
1103
1104 assert(is_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1105 assert(is_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1106
1107 if (rs.special()) {
1108 small_page_write(rs.base(), size);
1109 }
1110
1111 release_memory_for_test(rs);
1112 }
1113
test_reserved_space2(size_t size)1114 static void test_reserved_space2(size_t size) {
1115 test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
1116
1117 assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1118
1119 ReservedSpace rs(size);
1120
1121 test_log(" rs.special() == %d", rs.special());
1122
1123 assert(rs.base() != NULL, "Must be");
1124 assert(rs.size() == size, "Must be");
1125
1126 if (rs.special()) {
1127 small_page_write(rs.base(), size);
1128 }
1129
1130 release_memory_for_test(rs);
1131 }
1132
test_reserved_space3(size_t size,size_t alignment,bool maybe_large)1133 static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
1134 test_log("test_reserved_space3(%p, %p, %d)",
1135 (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
1136
1137 if (size < alignment) {
1138 // Tests might set -XX:LargePageSizeInBytes=<small pages> and cause unexpected input arguments for this test.
1139 assert((size_t)os::vm_page_size() == os::large_page_size(), "Test needs further refinement");
1140 return;
1141 }
1142
1143 assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1144 assert(is_aligned(size, alignment), "Must be at least aligned against alignment");
1145
1146 bool large = maybe_large && UseLargePages && size >= os::large_page_size();
1147
1148 ReservedSpace rs(size, alignment, large, false);
1149
1150 test_log(" rs.special() == %d", rs.special());
1151
1152 assert(rs.base() != NULL, "Must be");
1153 assert(rs.size() == size, "Must be");
1154
1155 if (rs.special()) {
1156 small_page_write(rs.base(), size);
1157 }
1158
1159 release_memory_for_test(rs);
1160 }
1161
1162
test_reserved_space1()1163 static void test_reserved_space1() {
1164 size_t size = 2 * 1024 * 1024;
1165 size_t ag = os::vm_allocation_granularity();
1166
1167 test_reserved_space1(size, ag);
1168 test_reserved_space1(size * 2, ag);
1169 test_reserved_space1(size * 10, ag);
1170 }
1171
test_reserved_space2()1172 static void test_reserved_space2() {
1173 size_t size = 2 * 1024 * 1024;
1174 size_t ag = os::vm_allocation_granularity();
1175
1176 test_reserved_space2(size * 1);
1177 test_reserved_space2(size * 2);
1178 test_reserved_space2(size * 10);
1179 test_reserved_space2(ag);
1180 test_reserved_space2(size - ag);
1181 test_reserved_space2(size);
1182 test_reserved_space2(size + ag);
1183 test_reserved_space2(size * 2);
1184 test_reserved_space2(size * 2 - ag);
1185 test_reserved_space2(size * 2 + ag);
1186 test_reserved_space2(size * 3);
1187 test_reserved_space2(size * 3 - ag);
1188 test_reserved_space2(size * 3 + ag);
1189 test_reserved_space2(size * 10);
1190 test_reserved_space2(size * 10 + size / 2);
1191 }
1192
test_reserved_space3()1193 static void test_reserved_space3() {
1194 size_t ag = os::vm_allocation_granularity();
1195
1196 test_reserved_space3(ag, ag , false);
1197 test_reserved_space3(ag * 2, ag , false);
1198 test_reserved_space3(ag * 3, ag , false);
1199 test_reserved_space3(ag * 2, ag * 2, false);
1200 test_reserved_space3(ag * 4, ag * 2, false);
1201 test_reserved_space3(ag * 8, ag * 2, false);
1202 test_reserved_space3(ag * 4, ag * 4, false);
1203 test_reserved_space3(ag * 8, ag * 4, false);
1204 test_reserved_space3(ag * 16, ag * 4, false);
1205
1206 if (UseLargePages) {
1207 size_t lp = os::large_page_size();
1208
1209 // Without large pages
1210 test_reserved_space3(lp, ag * 4, false);
1211 test_reserved_space3(lp * 2, ag * 4, false);
1212 test_reserved_space3(lp * 4, ag * 4, false);
1213 test_reserved_space3(lp, lp , false);
1214 test_reserved_space3(lp * 2, lp , false);
1215 test_reserved_space3(lp * 3, lp , false);
1216 test_reserved_space3(lp * 2, lp * 2, false);
1217 test_reserved_space3(lp * 4, lp * 2, false);
1218 test_reserved_space3(lp * 8, lp * 2, false);
1219
1220 // With large pages
1221 test_reserved_space3(lp, ag * 4 , true);
1222 test_reserved_space3(lp * 2, ag * 4, true);
1223 test_reserved_space3(lp * 4, ag * 4, true);
1224 test_reserved_space3(lp, lp , true);
1225 test_reserved_space3(lp * 2, lp , true);
1226 test_reserved_space3(lp * 3, lp , true);
1227 test_reserved_space3(lp * 2, lp * 2, true);
1228 test_reserved_space3(lp * 4, lp * 2, true);
1229 test_reserved_space3(lp * 8, lp * 2, true);
1230 }
1231 }
1232
test_reserved_space()1233 static void test_reserved_space() {
1234 test_reserved_space1();
1235 test_reserved_space2();
1236 test_reserved_space3();
1237 }
1238 };
1239
TestReservedSpace_test()1240 void TestReservedSpace_test() {
1241 TestReservedSpace::test_reserved_space();
1242 }
1243
1244 #define assert_equals(actual, expected) \
1245 assert(actual == expected, \
1246 "Got " SIZE_FORMAT " expected " \
1247 SIZE_FORMAT, actual, expected);
1248
1249 #define assert_ge(value1, value2) \
1250 assert(value1 >= value2, \
1251 "'" #value1 "': " SIZE_FORMAT " '" \
1252 #value2 "': " SIZE_FORMAT, value1, value2);
1253
1254 #define assert_lt(value1, value2) \
1255 assert(value1 < value2, \
1256 "'" #value1 "': " SIZE_FORMAT " '" \
1257 #value2 "': " SIZE_FORMAT, value1, value2);
1258
1259
1260 class TestVirtualSpace : AllStatic {
1261 enum TestLargePages {
1262 Default,
1263 Disable,
1264 Reserve,
1265 Commit
1266 };
1267
reserve_memory(size_t reserve_size_aligned,TestLargePages mode)1268 static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
1269 switch(mode) {
1270 default:
1271 case Default:
1272 case Reserve:
1273 return ReservedSpace(reserve_size_aligned);
1274 case Disable:
1275 case Commit:
1276 return ReservedSpace(reserve_size_aligned,
1277 os::vm_allocation_granularity(),
1278 /* large */ false, /* exec */ false);
1279 }
1280 }
1281
initialize_virtual_space(VirtualSpace & vs,ReservedSpace rs,TestLargePages mode)1282 static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1283 switch(mode) {
1284 default:
1285 case Default:
1286 case Reserve:
1287 return vs.initialize(rs, 0);
1288 case Disable:
1289 return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1290 case Commit:
1291 return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
1292 }
1293 }
1294
1295 public:
test_virtual_space_actual_committed_space(size_t reserve_size,size_t commit_size,TestLargePages mode=Default)1296 static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1297 TestLargePages mode = Default) {
1298 size_t granularity = os::vm_allocation_granularity();
1299 size_t reserve_size_aligned = align_up(reserve_size, granularity);
1300
1301 ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1302
1303 assert(reserved.is_reserved(), "Must be");
1304
1305 VirtualSpace vs;
1306 bool initialized = initialize_virtual_space(vs, reserved, mode);
1307 assert(initialized, "Failed to initialize VirtualSpace");
1308
1309 vs.expand_by(commit_size, false);
1310
1311 if (vs.special()) {
1312 assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1313 } else {
1314 assert_ge(vs.actual_committed_size(), commit_size);
1315 // Approximate the commit granularity.
1316 // Make sure that we don't commit using large pages
1317 // if large pages has been disabled for this VirtualSpace.
1318 size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1319 os::vm_page_size() : os::large_page_size();
1320 assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1321 }
1322
1323 reserved.release();
1324 }
1325
test_virtual_space_actual_committed_space_one_large_page()1326 static void test_virtual_space_actual_committed_space_one_large_page() {
1327 if (!UseLargePages) {
1328 return;
1329 }
1330
1331 size_t large_page_size = os::large_page_size();
1332
1333 ReservedSpace reserved(large_page_size, large_page_size, true, false);
1334
1335 assert(reserved.is_reserved(), "Must be");
1336
1337 VirtualSpace vs;
1338 bool initialized = vs.initialize(reserved, 0);
1339 assert(initialized, "Failed to initialize VirtualSpace");
1340
1341 vs.expand_by(large_page_size, false);
1342
1343 assert_equals(vs.actual_committed_size(), large_page_size);
1344
1345 reserved.release();
1346 }
1347
test_virtual_space_actual_committed_space()1348 static void test_virtual_space_actual_committed_space() {
1349 test_virtual_space_actual_committed_space(4 * K, 0);
1350 test_virtual_space_actual_committed_space(4 * K, 4 * K);
1351 test_virtual_space_actual_committed_space(8 * K, 0);
1352 test_virtual_space_actual_committed_space(8 * K, 4 * K);
1353 test_virtual_space_actual_committed_space(8 * K, 8 * K);
1354 test_virtual_space_actual_committed_space(12 * K, 0);
1355 test_virtual_space_actual_committed_space(12 * K, 4 * K);
1356 test_virtual_space_actual_committed_space(12 * K, 8 * K);
1357 test_virtual_space_actual_committed_space(12 * K, 12 * K);
1358 test_virtual_space_actual_committed_space(64 * K, 0);
1359 test_virtual_space_actual_committed_space(64 * K, 32 * K);
1360 test_virtual_space_actual_committed_space(64 * K, 64 * K);
1361 test_virtual_space_actual_committed_space(2 * M, 0);
1362 test_virtual_space_actual_committed_space(2 * M, 4 * K);
1363 test_virtual_space_actual_committed_space(2 * M, 64 * K);
1364 test_virtual_space_actual_committed_space(2 * M, 1 * M);
1365 test_virtual_space_actual_committed_space(2 * M, 2 * M);
1366 test_virtual_space_actual_committed_space(10 * M, 0);
1367 test_virtual_space_actual_committed_space(10 * M, 4 * K);
1368 test_virtual_space_actual_committed_space(10 * M, 8 * K);
1369 test_virtual_space_actual_committed_space(10 * M, 1 * M);
1370 test_virtual_space_actual_committed_space(10 * M, 2 * M);
1371 test_virtual_space_actual_committed_space(10 * M, 5 * M);
1372 test_virtual_space_actual_committed_space(10 * M, 10 * M);
1373 }
1374
test_virtual_space_disable_large_pages()1375 static void test_virtual_space_disable_large_pages() {
1376 if (!UseLargePages) {
1377 return;
1378 }
1379 // These test cases verify that if we force VirtualSpace to disable large pages
1380 test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1381 test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1382 test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1383 test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1384 test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1385 test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1386 test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1387
1388 test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1389 test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1390 test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1391 test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1392 test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1393 test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1394 test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1395
1396 test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1397 test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1398 test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1399 test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1400 test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1401 test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1402 test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1403 }
1404
test_virtual_space()1405 static void test_virtual_space() {
1406 test_virtual_space_actual_committed_space();
1407 test_virtual_space_actual_committed_space_one_large_page();
1408 test_virtual_space_disable_large_pages();
1409 }
1410 };
1411
TestVirtualSpace_test()1412 void TestVirtualSpace_test() {
1413 TestVirtualSpace::test_virtual_space();
1414 }
1415
1416 #endif // PRODUCT
1417
1418 #endif
1419