1 /*
2  * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "logging/log.hpp"
27 #include "logging/logStream.hpp"
28 #include "memory/memRegion.hpp"
29 #include "memory/resourceArea.hpp"
30 #include "memory/universe.hpp"
31 #include "memory/virtualspace.hpp"
32 #include "oops/compressedOops.hpp"
33 #include "gc/shared/collectedHeap.hpp"
34 #include "runtime/arguments.hpp"
35 #include "runtime/globals.hpp"
36 
37 // For UseCompressedOops.
38 NarrowPtrStruct CompressedOops::_narrow_oop = { NULL, 0, true };
39 MemRegion       CompressedOops::_heap_address_range;
40 
41 // Choose the heap base address and oop encoding mode
42 // when compressed oops are used:
43 // Unscaled  - Use 32-bits oops without encoding when
44 //     NarrowOopHeapBaseMin + heap_size < 4Gb
45 // ZeroBased - Use zero based compressed oops with encoding when
46 //     NarrowOopHeapBaseMin + heap_size < 32Gb
47 // HeapBased - Use compressed oops with heap base + encoding.
initialize(const ReservedHeapSpace & heap_space)48 void CompressedOops::initialize(const ReservedHeapSpace& heap_space) {
49 #ifdef _LP64
50   // Subtract a page because something can get allocated at heap base.
51   // This also makes implicit null checking work, because the
52   // memory+1 page below heap_base needs to cause a signal.
53   // See needs_explicit_null_check.
54   // Only set the heap base for compressed oops because it indicates
55   // compressed oops for pstack code.
56   if ((uint64_t)heap_space.end() > UnscaledOopHeapMax) {
57     // Didn't reserve heap below 4Gb.  Must shift.
58     set_shift(LogMinObjAlignmentInBytes);
59   }
60   if ((uint64_t)heap_space.end() <= OopEncodingHeapMax) {
61     // Did reserve heap below 32Gb. Can use base == 0;
62     set_base(0);
63   } else {
64     set_base((address)heap_space.compressed_oop_base());
65   }
66 
67   _heap_address_range = heap_space.region();
68 
69   LogTarget(Debug, gc, heap, coops) lt;
70   if (lt.is_enabled()) {
71     ResourceMark rm;
72     LogStream ls(lt);
73     print_mode(&ls);
74   }
75 
76   // Tell tests in which mode we run.
77   Arguments::PropertyList_add(new SystemProperty("java.vm.compressedOopsMode",
78                                                  mode_to_string(mode()),
79                                                  false));
80 
81   // base() is one page below the heap.
82   assert((intptr_t)base() <= ((intptr_t)_heap_address_range.start() - os::vm_page_size()) ||
83          base() == NULL, "invalid value");
84   assert(shift() == LogMinObjAlignmentInBytes ||
85          shift() == 0, "invalid value");
86 #endif
87 }
88 
set_base(address base)89 void CompressedOops::set_base(address base) {
90   assert(UseCompressedOops, "no compressed oops?");
91   _narrow_oop._base    = base;
92 }
93 
set_shift(int shift)94 void CompressedOops::set_shift(int shift) {
95   _narrow_oop._shift   = shift;
96 }
97 
set_use_implicit_null_checks(bool use)98 void CompressedOops::set_use_implicit_null_checks(bool use) {
99   assert(UseCompressedOops, "no compressed ptrs?");
100   _narrow_oop._use_implicit_null_checks   = use;
101 }
102 
is_in(void * addr)103 bool CompressedOops::is_in(void* addr) {
104   return _heap_address_range.contains(addr);
105 }
106 
is_in(MemRegion mr)107 bool CompressedOops::is_in(MemRegion mr) {
108   return _heap_address_range.contains(mr);
109 }
110 
mode()111 CompressedOops::Mode CompressedOops::mode() {
112   if (base_disjoint()) {
113     return DisjointBaseNarrowOop;
114   }
115 
116   if (base() != 0) {
117     return HeapBasedNarrowOop;
118   }
119 
120   if (shift() != 0) {
121     return ZeroBasedNarrowOop;
122   }
123 
124   return UnscaledNarrowOop;
125 }
126 
mode_to_string(Mode mode)127 const char* CompressedOops::mode_to_string(Mode mode) {
128   switch (mode) {
129     case UnscaledNarrowOop:
130       return "32-bit";
131     case ZeroBasedNarrowOop:
132       return "Zero based";
133     case DisjointBaseNarrowOop:
134       return "Non-zero disjoint base";
135     case HeapBasedNarrowOop:
136       return "Non-zero based";
137     default:
138       ShouldNotReachHere();
139       return "";
140   }
141 }
142 
143 // Test whether bits of addr and possible offsets into the heap overlap.
is_disjoint_heap_base_address(address addr)144 bool CompressedOops::is_disjoint_heap_base_address(address addr) {
145   return (((uint64_t)(intptr_t)addr) &
146           (((uint64_t)UCONST64(0xFFFFffffFFFFffff)) >> (32-LogMinObjAlignmentInBytes))) == 0;
147 }
148 
149 // Check for disjoint base compressed oops.
base_disjoint()150 bool CompressedOops::base_disjoint() {
151   return _narrow_oop._base != NULL && is_disjoint_heap_base_address(_narrow_oop._base);
152 }
153 
154 // Check for real heapbased compressed oops.
155 // We must subtract the base as the bits overlap.
156 // If we negate above function, we also get unscaled and zerobased.
base_overlaps()157 bool CompressedOops::base_overlaps() {
158   return _narrow_oop._base != NULL && !is_disjoint_heap_base_address(_narrow_oop._base);
159 }
160 
print_mode(outputStream * st)161 void CompressedOops::print_mode(outputStream* st) {
162   st->print("Heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
163             p2i(_heap_address_range.start()), _heap_address_range.byte_size()/M);
164 
165   st->print(", Compressed Oops mode: %s", mode_to_string(mode()));
166 
167   if (base() != 0) {
168     st->print(": " PTR_FORMAT, p2i(base()));
169   }
170 
171   if (shift() != 0) {
172     st->print(", Oop shift amount: %d", shift());
173   }
174 
175   if (!use_implicit_null_checks()) {
176     st->print(", no protected page in front of the heap");
177   }
178   st->cr();
179 }
180 
181 // For UseCompressedClassPointers.
182 NarrowPtrStruct CompressedKlassPointers::_narrow_klass = { NULL, 0, true };
183 
184 // CompressedClassSpaceSize set to 1GB, but appear 3GB away from _narrow_ptrs_base during CDS dump.
185 // (Todo: we should #ifdef out CompressedKlassPointers for 32bit completely and fix all call sites which
186 //  are compiled for 32bit to LP64_ONLY).
187 size_t CompressedKlassPointers::_range = 0;
188 
189 
190 // Given an address range [addr, addr+len) which the encoding is supposed to
191 //  cover, choose base, shift and range.
192 //  The address range is the expected range of uncompressed Klass pointers we
193 //  will encounter (and the implicit promise that there will be no Klass
194 //  structures outside this range).
initialize(address addr,size_t len)195 void CompressedKlassPointers::initialize(address addr, size_t len) {
196 #ifdef _LP64
197   assert(is_valid_base(addr), "Address must be a valid encoding base");
198   address const end = addr + len;
199 
200   address base;
201   int shift;
202   size_t range;
203 
204   if (UseSharedSpaces || DumpSharedSpaces) {
205 
206     // Special requirements if CDS is active:
207     // Encoding base and shift must be the same between dump and run time.
208     //   CDS takes care that the SharedBaseAddress and CompressedClassSpaceSize
209     //   are the same. Archive size will be probably different at runtime, but
210     //   it can only be smaller than at, never larger, since archives get
211     //   shrunk at the end of the dump process.
212     //   From that it follows that the range [addr, len) we are handed in at
213     //   runtime will start at the same address then at dumptime, and its len
214     //   may be smaller at runtime then it was at dump time.
215     //
216     // To be very careful here, we avoid any optimizations and just keep using
217     //  the same address and shift value. Specifically we avoid using zero-based
218     //  encoding. We also set the expected value range to 4G (encoding range
219     //  cannot be larger than that).
220 
221     base = addr;
222 
223     // JDK-8265705
224     // This is a temporary fix for aarch64: there, if the range-to-be-encoded is located
225     //  below 32g, either encoding base should be zero or base should be aligned to 4G
226     //  and shift should be zero. The simplest way to fix this for now is to force
227     //  shift to zero for both runtime and dumptime.
228     // Note however that this is not a perfect solution. Ideally this whole function
229     //  should be CDS agnostic, that would simplify it - and testing - alot. See JDK-8267141
230     //  for details.
231     shift = 0;
232 
233     // This must be true since at dumptime cds+ccs is 4G, at runtime it can
234     //  only be smaller, see comment above.
235     assert(len <= 4 * G, "Encoding range cannot be larger than 4G");
236     range = 4 * G;
237 
238   } else {
239 
240     // Otherwise we attempt to use a zero base if the range fits in lower 32G.
241     if (end <= (address)KlassEncodingMetaspaceMax) {
242       base = 0;
243     } else {
244       base = addr;
245     }
246 
247     // Highest offset a Klass* can ever have in relation to base.
248     range = end - base;
249 
250     // We may not even need a shift if the range fits into 32bit:
251     const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
252     if (range < UnscaledClassSpaceMax) {
253       shift = 0;
254     } else {
255       shift = LogKlassAlignmentInBytes;
256     }
257 
258   }
259 
260   set_base(base);
261   set_shift(shift);
262   set_range(range);
263 #else
264   fatal("64bit only.");
265 #endif
266 }
267 
268 // Given an address p, return true if p can be used as an encoding base.
269 //  (Some platforms have restrictions of what constitutes a valid base address).
is_valid_base(address p)270 bool CompressedKlassPointers::is_valid_base(address p) {
271 #ifdef AARCH64
272   // Below 32G, base must be aligned to 4G.
273   // Above that point, base must be aligned to 32G
274   if (p < (address)(32 * G)) {
275     return is_aligned(p, 4 * G);
276   }
277   return is_aligned(p, (4 << LogKlassAlignmentInBytes) * G);
278 #else
279   return true;
280 #endif
281 }
282 
print_mode(outputStream * st)283 void CompressedKlassPointers::print_mode(outputStream* st) {
284   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d, "
285                "Narrow klass range: " SIZE_FORMAT_HEX, p2i(base()), shift(),
286                range());
287 }
288 
set_base(address base)289 void CompressedKlassPointers::set_base(address base) {
290   assert(UseCompressedClassPointers, "no compressed klass ptrs?");
291   _narrow_klass._base   = base;
292 }
293 
set_shift(int shift)294 void CompressedKlassPointers::set_shift(int shift)       {
295   assert(shift == 0 || shift == LogKlassAlignmentInBytes, "invalid shift for klass ptrs");
296   _narrow_klass._shift   = shift;
297 }
298 
set_range(size_t range)299 void CompressedKlassPointers::set_range(size_t range) {
300   assert(UseCompressedClassPointers, "no compressed klass ptrs?");
301   _range = range;
302 }
303