1 /*
2 * Copyright (c) 2013, Red Hat Inc.
3 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.hpp"
28 #include "asm/macroAssembler.inline.hpp"
29 #include "memory/resourceArea.hpp"
30 #include "runtime/java.hpp"
31 #include "runtime/stubCodeGenerator.hpp"
32 #include "vm_version_aarch64.hpp"
33 #ifdef TARGET_OS_FAMILY_linux
34 # include "os_linux.inline.hpp"
35 #endif
36
37 #if defined (__linux__)
38 #include <sys/auxv.h>
39 #include <asm/hwcap.h>
40 #elif defined (__FreeBSD__)
41 #include <machine/elf.h>
42 #endif
43
44 #ifndef HWCAP_ASIMD
45 #define HWCAP_ASIMD (1<<1)
46 #endif
47
48 #ifndef HWCAP_AES
49 #define HWCAP_AES (1<<3)
50 #endif
51
52 #ifndef HWCAP_PMULL
53 #define HWCAP_PMULL (1<<4)
54 #endif
55
56 #ifndef HWCAP_SHA1
57 #define HWCAP_SHA1 (1<<5)
58 #endif
59
60 #ifndef HWCAP_SHA2
61 #define HWCAP_SHA2 (1<<6)
62 #endif
63
64 #ifndef HWCAP_CRC32
65 #define HWCAP_CRC32 (1<<7)
66 #endif
67
68 #ifndef HWCAP_ATOMICS
69 #define HWCAP_ATOMICS (1<<8)
70 #endif
71
72 int VM_Version::_cpu;
73 int VM_Version::_model;
74 int VM_Version::_model2;
75 int VM_Version::_variant;
76 int VM_Version::_revision;
77 int VM_Version::_stepping;
78 int VM_Version::_cpuFeatures;
79 const char* VM_Version::_features_str = "";
80 VM_Version::PsrInfo VM_Version::_psr_info = { 0, };
81
82 static BufferBlob* stub_blob;
83 static const int stub_size = 550;
84
85 extern "C" {
86 typedef void (*getPsrInfo_stub_t)(void*);
87 }
88 static getPsrInfo_stub_t getPsrInfo_stub = NULL;
89
90
91 class VM_Version_StubGenerator: public StubCodeGenerator {
92 public:
93
VM_Version_StubGenerator(CodeBuffer * c)94 VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {}
95
generate_getPsrInfo()96 address generate_getPsrInfo() {
97 StubCodeMark mark(this, "VM_Version", "getPsrInfo_stub");
98 # define __ _masm->
99 address start = __ pc();
100
101 // void getPsrInfo(VM_Version::PsrInfo* psr_info);
102
103 address entry = __ pc();
104
105 __ enter();
106
107 __ get_dczid_el0(rscratch1);
108 __ strw(rscratch1, Address(c_rarg0, in_bytes(VM_Version::dczid_el0_offset())));
109
110 __ get_ctr_el0(rscratch1);
111 __ strw(rscratch1, Address(c_rarg0, in_bytes(VM_Version::ctr_el0_offset())));
112
113 __ leave();
114 __ ret(lr);
115
116 # undef __
117
118 return start;
119 }
120 };
121
122
get_processor_features()123 void VM_Version::get_processor_features() {
124 _supports_cx8 = true;
125 _supports_atomic_getset4 = true;
126 _supports_atomic_getadd4 = true;
127 _supports_atomic_getset8 = true;
128 _supports_atomic_getadd8 = true;
129
130 getPsrInfo_stub(&_psr_info);
131
132 int dcache_line = VM_Version::dcache_line_size();
133
134 // Limit AllocatePrefetchDistance so that it does not exceed the
135 // constraint in AllocatePrefetchDistanceConstraintFunc.
136 if (FLAG_IS_DEFAULT(AllocatePrefetchDistance))
137 FLAG_SET_DEFAULT(AllocatePrefetchDistance, MIN2(512, 3*dcache_line));
138
139 if (FLAG_IS_DEFAULT(AllocatePrefetchStepSize))
140 FLAG_SET_DEFAULT(AllocatePrefetchStepSize, dcache_line);
141 if (FLAG_IS_DEFAULT(PrefetchScanIntervalInBytes))
142 FLAG_SET_DEFAULT(PrefetchScanIntervalInBytes, 3*dcache_line);
143 if (FLAG_IS_DEFAULT(PrefetchCopyIntervalInBytes))
144 FLAG_SET_DEFAULT(PrefetchCopyIntervalInBytes, 3*dcache_line);
145
146 if (PrefetchCopyIntervalInBytes != -1 &&
147 ((PrefetchCopyIntervalInBytes & 7) || (PrefetchCopyIntervalInBytes >= 32768))) {
148 warning("PrefetchCopyIntervalInBytes must be -1, or a multiple of 8 and < 32768");
149 PrefetchCopyIntervalInBytes &= ~7;
150 if (PrefetchCopyIntervalInBytes >= 32768)
151 PrefetchCopyIntervalInBytes = 32760;
152 }
153
154 if (AllocatePrefetchDistance !=-1 && (AllocatePrefetchDistance & 7)) {
155 warning("AllocatePrefetchDistance must be multiple of 8");
156 AllocatePrefetchDistance &= ~7;
157 }
158
159 if (AllocatePrefetchStepSize & 7) {
160 warning("AllocatePrefetchStepSize must be multiple of 8");
161 AllocatePrefetchStepSize &= ~7;
162 }
163
164 FLAG_SET_DEFAULT(UseSSE42Intrinsics, true);
165
166 #if defined(__linux__)
167 unsigned long auxv = getauxval(AT_HWCAP);
168
169 char buf[512];
170
171 strcpy(buf, "simd");
172 if (auxv & HWCAP_CRC32) strcat(buf, ", crc");
173 if (auxv & HWCAP_AES) strcat(buf, ", aes");
174 if (auxv & HWCAP_SHA1) strcat(buf, ", sha1");
175 if (auxv & HWCAP_SHA2) strcat(buf, ", sha256");
176 if (auxv & HWCAP_ATOMICS) strcat(buf, ", lse");
177
178 _features_str = strdup(buf);
179 _cpuFeatures = auxv;
180
181 int cpu_lines = 0;
182 if (FILE *f = fopen("/proc/cpuinfo", "r")) {
183 char buf[128], *p;
184 while (fgets(buf, sizeof (buf), f) != NULL) {
185 if ((p = strchr(buf, ':')) != NULL) {
186 long v = strtol(p+1, NULL, 0);
187 if (strncmp(buf, "CPU implementer", sizeof "CPU implementer" - 1) == 0) {
188 _cpu = v;
189 cpu_lines++;
190 } else if (strncmp(buf, "CPU variant", sizeof "CPU variant" - 1) == 0) {
191 _variant = v;
192 } else if (strncmp(buf, "CPU part", sizeof "CPU part" - 1) == 0) {
193 if (_model != v) _model2 = _model;
194 _model = v;
195 } else if (strncmp(buf, "CPU revision", sizeof "CPU revision" - 1) == 0) {
196 _revision = v;
197 }
198 }
199 }
200 fclose(f);
201 }
202 #elif defined(__FreeBSD__) || defined(__OpenBSD__)
203 char buf[512];
204 int cpu_lines = 0;
205 unsigned long auxv = os_get_processor_features();
206 #endif
207
208 // Enable vendor specific features
209 if (_cpu == CPU_CAVIUM) {
210 if (_variant == 0) _cpuFeatures |= CPU_DMB_ATOMICS;
211 if (FLAG_IS_DEFAULT(AvoidUnalignedAccesses)) {
212 FLAG_SET_DEFAULT(AvoidUnalignedAccesses, true);
213 }
214 if (FLAG_IS_DEFAULT(UseSIMDForMemoryOps)) {
215 FLAG_SET_DEFAULT(UseSIMDForMemoryOps, (_variant > 0));
216 }
217 }
218 if (_cpu == CPU_ARM && (_model == 0xd03 || _model2 == 0xd03)) _cpuFeatures |= CPU_A53MAC;
219 if (_cpu == CPU_ARM && (_model == 0xd07 || _model2 == 0xd07)) _cpuFeatures |= CPU_STXR_PREFETCH;
220 // If an olde style /proc/cpuinfo (cpu_lines == 1) then if _model is an A57 (0xd07)
221 // we assume the worst and assume we could be on a big little system and have
222 // undisclosed A53 cores which we could be swapped to at any stage
223 if (_cpu == CPU_ARM && cpu_lines == 1 && _model == 0xd07) _cpuFeatures |= CPU_A53MAC;
224
225 if (FLAG_IS_DEFAULT(UseCRC32)) {
226 UseCRC32 = (auxv & HWCAP_CRC32) != 0;
227 }
228 if (UseCRC32 && (auxv & HWCAP_CRC32) == 0) {
229 warning("UseCRC32 specified, but not supported on this CPU");
230 }
231
232 if (auxv & HWCAP_ATOMICS) {
233 if (FLAG_IS_DEFAULT(UseLSE))
234 FLAG_SET_DEFAULT(UseLSE, true);
235 } else {
236 if (UseLSE) {
237 warning("UseLSE specified, but not supported on this CPU");
238 }
239 }
240
241 if (auxv & HWCAP_AES) {
242 UseAES = UseAES || FLAG_IS_DEFAULT(UseAES);
243 UseAESIntrinsics =
244 UseAESIntrinsics || (UseAES && FLAG_IS_DEFAULT(UseAESIntrinsics));
245 if (UseAESIntrinsics && !UseAES) {
246 warning("UseAESIntrinsics enabled, but UseAES not, enabling");
247 UseAES = true;
248 }
249 } else {
250 if (UseAES) {
251 warning("UseAES specified, but not supported on this CPU");
252 }
253 if (UseAESIntrinsics) {
254 warning("UseAESIntrinsics specified, but not supported on this CPU");
255 }
256 }
257
258 if (auxv & HWCAP_PMULL) {
259 if (FLAG_IS_DEFAULT(UseGHASHIntrinsics)) {
260 FLAG_SET_DEFAULT(UseGHASHIntrinsics, true);
261 }
262 } else if (UseGHASHIntrinsics) {
263 warning("GHASH intrinsics are not available on this CPU");
264 FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
265 }
266
267 if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
268 UseCRC32Intrinsics = true;
269 }
270
271 if (auxv & (HWCAP_SHA1 | HWCAP_SHA2)) {
272 if (FLAG_IS_DEFAULT(UseSHA)) {
273 FLAG_SET_DEFAULT(UseSHA, true);
274 }
275 } else if (UseSHA) {
276 warning("SHA instructions are not available on this CPU");
277 FLAG_SET_DEFAULT(UseSHA, false);
278 }
279
280 if (!UseSHA) {
281 FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
282 FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
283 FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
284 } else {
285 if (auxv & HWCAP_SHA1) {
286 if (FLAG_IS_DEFAULT(UseSHA1Intrinsics)) {
287 FLAG_SET_DEFAULT(UseSHA1Intrinsics, true);
288 }
289 } else if (UseSHA1Intrinsics) {
290 warning("SHA1 instruction is not available on this CPU.");
291 FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
292 }
293 if (auxv & HWCAP_SHA2) {
294 if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) {
295 FLAG_SET_DEFAULT(UseSHA256Intrinsics, true);
296 }
297 } else if (UseSHA256Intrinsics) {
298 warning("SHA256 instruction (for SHA-224 and SHA-256) is not available on this CPU.");
299 FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
300 }
301 if (UseSHA512Intrinsics) {
302 warning("SHA512 instruction (for SHA-384 and SHA-512) is not available on this CPU.");
303 FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
304 }
305 }
306
307 if (is_zva_enabled()) {
308 if (FLAG_IS_DEFAULT(UseBlockZeroing)) {
309 FLAG_SET_DEFAULT(UseBlockZeroing, true);
310 }
311 if (FLAG_IS_DEFAULT(BlockZeroingLowLimit)) {
312 FLAG_SET_DEFAULT(BlockZeroingLowLimit, 4 * VM_Version::zva_length());
313 }
314 } else if (UseBlockZeroing) {
315 warning("DC ZVA is not available on this CPU");
316 FLAG_SET_DEFAULT(UseBlockZeroing, false);
317 }
318
319 if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
320 UseMultiplyToLenIntrinsic = true;
321 }
322
323 if (FLAG_IS_DEFAULT(UseBarriersForVolatile)) {
324 UseBarriersForVolatile = (_cpuFeatures & CPU_DMB_ATOMICS) != 0;
325 }
326
327 if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
328 UsePopCountInstruction = true;
329 }
330
331 if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
332 UseMontgomeryMultiplyIntrinsic = true;
333 }
334 if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
335 UseMontgomerySquareIntrinsic = true;
336 }
337
338 #ifdef COMPILER2
339 if (FLAG_IS_DEFAULT(OptoScheduling)) {
340 OptoScheduling = true;
341 }
342 #else
343 if (ReservedCodeCacheSize > 128*M) {
344 vm_exit_during_initialization("client compiler does not support ReservedCodeCacheSize > 128M");
345 }
346 #endif
347 }
348
initialize()349 void VM_Version::initialize() {
350 ResourceMark rm;
351
352 stub_blob = BufferBlob::create("getPsrInfo_stub", stub_size);
353 if (stub_blob == NULL) {
354 vm_exit_during_initialization("Unable to allocate getPsrInfo_stub");
355 }
356
357 CodeBuffer c(stub_blob);
358 VM_Version_StubGenerator g(&c);
359 getPsrInfo_stub = CAST_TO_FN_PTR(getPsrInfo_stub_t,
360 g.generate_getPsrInfo());
361
362 get_processor_features();
363
364 if (CriticalJNINatives) {
365 if (FLAG_IS_CMDLINE(CriticalJNINatives)) {
366 warning("CriticalJNINatives specified, but not supported in this VM");
367 }
368 FLAG_SET_DEFAULT(CriticalJNINatives, false);
369 }
370 }
371