1 /*
2  * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "jvm.h"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "runtime/java.hpp"
30 #include "runtime/os.inline.hpp"
31 #include "runtime/stubCodeGenerator.hpp"
32 #include "vm_version_arm.hpp"
33 
34 int  VM_Version::_stored_pc_adjustment = 4;
35 int  VM_Version::_arm_arch             = 5;
36 bool VM_Version::_is_initialized       = false;
37 int VM_Version::_kuser_helper_version  = 0;
38 
39 extern "C" {
40   typedef int (*get_cpu_info_t)();
41   typedef bool (*check_vfp_t)(double *d);
42   typedef bool (*check_simd_t)();
43 }
44 
45 #define __ _masm->
46 
47 class VM_Version_StubGenerator: public StubCodeGenerator {
48  public:
49 
VM_Version_StubGenerator(CodeBuffer * c)50   VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {}
51 
generate_get_cpu_info()52   address generate_get_cpu_info() {
53     StubCodeMark mark(this, "VM_Version", "get_cpu_info");
54     address start = __ pc();
55 
56     __ mov(R0, PC);
57     __ push(PC);
58     __ pop(R1);
59     __ sub(R0, R1, R0);
60     // return the result in R0
61     __ bx(LR);
62 
63     return start;
64   };
65 
generate_check_vfp()66   address generate_check_vfp() {
67     StubCodeMark mark(this, "VM_Version", "check_vfp");
68     address start = __ pc();
69 
70     __ fstd(D0, Address(R0));
71     __ mov(R0, 1);
72     __ bx(LR);
73 
74     return start;
75   };
76 
generate_check_vfp3_32()77   address generate_check_vfp3_32() {
78     StubCodeMark mark(this, "VM_Version", "check_vfp3_32");
79     address start = __ pc();
80 
81     __ fstd(D16, Address(R0));
82     __ mov(R0, 1);
83     __ bx(LR);
84 
85     return start;
86   };
87 
generate_check_simd()88   address generate_check_simd() {
89     StubCodeMark mark(this, "VM_Version", "check_simd");
90     address start = __ pc();
91 
92     __ vcnt(Stemp, Stemp);
93     __ mov(R0, 1);
94     __ bx(LR);
95 
96     return start;
97   };
98 };
99 
100 #undef __
101 
102 
103 extern "C" address check_vfp3_32_fault_instr;
104 extern "C" address check_vfp_fault_instr;
105 extern "C" address check_simd_fault_instr;
106 
early_initialize()107 void VM_Version::early_initialize() {
108 
109   // Make sure that _arm_arch is initialized so that any calls to OrderAccess will
110   // use proper dmb instruction
111   get_os_cpu_info();
112 
113   _kuser_helper_version = *(int*)KUSER_HELPER_VERSION_ADDR;
114   // armv7 has the ldrexd instruction that can be used to implement cx8
115   // armv5 with linux >= 3.1 can use kernel helper routine
116   _supports_cx8 = (supports_ldrexd() || supports_kuser_cmpxchg64());
117 }
118 
initialize()119 void VM_Version::initialize() {
120   ResourceMark rm;
121 
122   // Making this stub must be FIRST use of assembler
123   const int stub_size = 128;
124   BufferBlob* stub_blob = BufferBlob::create("get_cpu_info", stub_size);
125   if (stub_blob == NULL) {
126     vm_exit_during_initialization("Unable to allocate get_cpu_info stub");
127   }
128 
129   CodeBuffer c(stub_blob);
130   VM_Version_StubGenerator g(&c);
131   address get_cpu_info_pc = g.generate_get_cpu_info();
132   get_cpu_info_t get_cpu_info = CAST_TO_FN_PTR(get_cpu_info_t, get_cpu_info_pc);
133 
134   int pc_adjustment = get_cpu_info();
135 
136   VM_Version::_stored_pc_adjustment = pc_adjustment;
137 
138 #ifndef __SOFTFP__
139   address check_vfp_pc = g.generate_check_vfp();
140   check_vfp_t check_vfp = CAST_TO_FN_PTR(check_vfp_t, check_vfp_pc);
141 
142   check_vfp_fault_instr = (address)check_vfp;
143   double dummy;
144   if (check_vfp(&dummy)) {
145     _features |= vfp_m;
146   }
147 
148 #ifdef COMPILER2
149   if (has_vfp()) {
150     address check_vfp3_32_pc = g.generate_check_vfp3_32();
151     check_vfp_t check_vfp3_32 = CAST_TO_FN_PTR(check_vfp_t, check_vfp3_32_pc);
152     check_vfp3_32_fault_instr = (address)check_vfp3_32;
153     double dummy;
154     if (check_vfp3_32(&dummy)) {
155       _features |= vfp3_32_m;
156     }
157 
158     address check_simd_pc =g.generate_check_simd();
159     check_simd_t check_simd = CAST_TO_FN_PTR(check_simd_t, check_simd_pc);
160     check_simd_fault_instr = (address)check_simd;
161     if (check_simd()) {
162       _features |= simd_m;
163     }
164   }
165 #endif
166 #endif
167 
168 
169   if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
170     warning("AES intrinsics are not available on this CPU");
171     FLAG_SET_DEFAULT(UseAESIntrinsics, false);
172   }
173 
174   if (UseAES && !FLAG_IS_DEFAULT(UseAES)) {
175     warning("AES instructions are not available on this CPU");
176     FLAG_SET_DEFAULT(UseAES, false);
177   }
178 
179   if (UseAESCTRIntrinsics) {
180     warning("AES/CTR intrinsics are not available on this CPU");
181     FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
182   }
183 
184   if (UseFMA) {
185     warning("FMA instructions are not available on this CPU");
186     FLAG_SET_DEFAULT(UseFMA, false);
187   }
188 
189   if (UseSHA) {
190     warning("SHA instructions are not available on this CPU");
191     FLAG_SET_DEFAULT(UseSHA, false);
192   }
193 
194   if (UseSHA1Intrinsics) {
195     warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU.");
196     FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
197   }
198 
199   if (UseSHA256Intrinsics) {
200     warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU.");
201     FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
202   }
203 
204   if (UseSHA512Intrinsics) {
205     warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU.");
206     FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
207   }
208 
209   if (UseCRC32Intrinsics) {
210     if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics))
211       warning("CRC32 intrinsics are not available on this CPU");
212     FLAG_SET_DEFAULT(UseCRC32Intrinsics, false);
213   }
214 
215   if (UseCRC32CIntrinsics) {
216     if (!FLAG_IS_DEFAULT(UseCRC32CIntrinsics))
217       warning("CRC32C intrinsics are not available on this CPU");
218     FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
219   }
220 
221   if (UseAdler32Intrinsics) {
222     warning("Adler32 intrinsics are not available on this CPU");
223     FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
224   }
225 
226   if (UseVectorizedMismatchIntrinsic) {
227     warning("vectorizedMismatch intrinsic is not available on this CPU.");
228     FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false);
229   }
230 
231 #ifdef COMPILER2
232   // C2 is only supported on v7+ VFP at this time
233   if (_arm_arch < 7 || !has_vfp()) {
234     vm_exit_during_initialization("Server VM is only supported on ARMv7+ VFP");
235   }
236 #endif
237 
238   // ARM doesn't have special instructions for these but ldrex/ldrexd
239   // enable shorter instruction sequences that the ones based on cas.
240   _supports_atomic_getset4 = supports_ldrex();
241   _supports_atomic_getadd4 = supports_ldrex();
242   _supports_atomic_getset8 = supports_ldrexd();
243   _supports_atomic_getadd8 = supports_ldrexd();
244 
245 #ifdef COMPILER2
246   assert(_supports_cx8 && _supports_atomic_getset4 && _supports_atomic_getadd4
247          && _supports_atomic_getset8 && _supports_atomic_getadd8, "C2: atomic operations must be supported");
248 #endif
249   char buf[512];
250   jio_snprintf(buf, sizeof(buf), "(ARMv%d)%s%s%s",
251                _arm_arch,
252                (has_vfp() ? ", vfp" : ""),
253                (has_vfp3_32() ? ", vfp3-32" : ""),
254                (has_simd() ? ", simd" : ""));
255 
256   // buf is started with ", " or is empty
257   _features_string = os::strdup(buf);
258 
259   if (has_simd()) {
260     if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
261       FLAG_SET_DEFAULT(UsePopCountInstruction, true);
262     }
263   } else {
264     FLAG_SET_DEFAULT(UsePopCountInstruction, false);
265   }
266 
267   if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
268     FLAG_SET_DEFAULT(AllocatePrefetchDistance, 128);
269   }
270 
271 #ifdef COMPILER2
272   FLAG_SET_DEFAULT(UseFPUForSpilling, true);
273 
274   if (FLAG_IS_DEFAULT(MaxVectorSize)) {
275     // FLAG_SET_DEFAULT(MaxVectorSize, has_simd() ? 16 : 8);
276     // SIMD/NEON can use 16, but default is 8 because currently
277     // larger than 8 will disable instruction scheduling
278     FLAG_SET_DEFAULT(MaxVectorSize, 8);
279   }
280 
281   if (MaxVectorSize > 16) {
282     FLAG_SET_DEFAULT(MaxVectorSize, 8);
283   }
284 #endif
285 
286   if (FLAG_IS_DEFAULT(Tier4CompileThreshold)) {
287     Tier4CompileThreshold = 10000;
288   }
289   if (FLAG_IS_DEFAULT(Tier3InvocationThreshold)) {
290     Tier3InvocationThreshold = 1000;
291   }
292   if (FLAG_IS_DEFAULT(Tier3CompileThreshold)) {
293     Tier3CompileThreshold = 5000;
294   }
295   if (FLAG_IS_DEFAULT(Tier3MinInvocationThreshold)) {
296     Tier3MinInvocationThreshold = 500;
297   }
298 
299   UNSUPPORTED_OPTION(TypeProfileLevel);
300   UNSUPPORTED_OPTION(CriticalJNINatives);
301 
302   FLAG_SET_DEFAULT(TypeProfileLevel, 0); // unsupported
303 
304   // This machine does not allow unaligned memory accesses
305   if (UseUnalignedAccesses) {
306     if (!FLAG_IS_DEFAULT(UseUnalignedAccesses))
307       warning("Unaligned memory access is not available on this CPU");
308     FLAG_SET_DEFAULT(UseUnalignedAccesses, false);
309   }
310 
311   _is_initialized = true;
312 }
313 
use_biased_locking()314 bool VM_Version::use_biased_locking() {
315   get_os_cpu_info();
316   // The cost of CAS on uniprocessor ARM v6 and later is low compared to the
317   // overhead related to slightly longer Biased Locking execution path.
318   // Testing shows no improvement when running with Biased Locking enabled
319   // on an ARMv6 and higher uniprocessor systems.  The situation is different on
320   // ARMv5 and MP systems.
321   //
322   // Therefore the Biased Locking is enabled on ARMv5 and ARM MP only.
323   //
324   return (!os::is_MP() && (arm_arch() > 5)) ? false : true;
325 }
326