1 /*
2  * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_Compiler.hpp"
28 #include "c1/c1_FrameMap.hpp"
29 #include "c1/c1_GraphBuilder.hpp"
30 #include "c1/c1_LinearScan.hpp"
31 #include "c1/c1_MacroAssembler.hpp"
32 #include "c1/c1_Runtime1.hpp"
33 #include "c1/c1_ValueType.hpp"
34 #include "compiler/compileBroker.hpp"
35 #include "interpreter/linkResolver.hpp"
36 #include "jfr/support/jfrIntrinsics.hpp"
37 #include "memory/allocation.hpp"
38 #include "memory/allocation.inline.hpp"
39 #include "memory/resourceArea.hpp"
40 #include "prims/nativeLookup.hpp"
41 #include "runtime/arguments.hpp"
42 #include "runtime/interfaceSupport.inline.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "utilities/bitMap.inline.hpp"
45 #include "utilities/macros.hpp"
46 
47 
Compiler()48 Compiler::Compiler() : AbstractCompiler(compiler_c1) {
49 }
50 
init_c1_runtime()51 void Compiler::init_c1_runtime() {
52   BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
53   Arena* arena = new (mtCompiler) Arena(mtCompiler);
54   Runtime1::initialize(buffer_blob);
55   FrameMap::initialize();
56   // initialize data structures
57   ValueType::initialize(arena);
58   GraphBuilder::initialize();
59   // note: to use more than one instance of LinearScan at a time this function call has to
60   //       be moved somewhere outside of this constructor:
61   Interval::initialize(arena);
62 }
63 
64 
initialize()65 void Compiler::initialize() {
66   // Buffer blob must be allocated per C1 compiler thread at startup
67   BufferBlob* buffer_blob = init_buffer_blob();
68 
69   if (should_perform_init()) {
70     if (buffer_blob == NULL) {
71       // When we come here we are in state 'initializing'; entire C1 compilation
72       // can be shut down.
73       set_state(failed);
74     } else {
75       init_c1_runtime();
76       set_state(initialized);
77     }
78   }
79 }
80 
code_buffer_size()81 int Compiler::code_buffer_size() {
82   return Compilation::desired_max_code_buffer_size() + Compilation::desired_max_constant_size();
83 }
84 
init_buffer_blob()85 BufferBlob* Compiler::init_buffer_blob() {
86   // Allocate buffer blob once at startup since allocation for each
87   // compilation seems to be too expensive (at least on Intel win32).
88   assert (CompilerThread::current()->get_buffer_blob() == NULL, "Should initialize only once");
89 
90   // setup CodeBuffer.  Preallocate a BufferBlob of size
91   // NMethodSizeLimit plus some extra space for constants.
92   BufferBlob* buffer_blob = BufferBlob::create("C1 temporary CodeBuffer", code_buffer_size());
93   if (buffer_blob != NULL) {
94     CompilerThread::current()->set_buffer_blob(buffer_blob);
95   }
96 
97   return buffer_blob;
98 }
99 
is_intrinsic_supported(const methodHandle & method)100 bool Compiler::is_intrinsic_supported(const methodHandle& method) {
101   vmIntrinsics::ID id = method->intrinsic_id();
102   assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
103 
104   if (method->is_synchronized()) {
105     // C1 does not support intrinsification of synchronized methods.
106     return false;
107   }
108 
109   switch (id) {
110   case vmIntrinsics::_compareAndSetLong:
111     if (!VM_Version::supports_cx8()) return false;
112     break;
113   case vmIntrinsics::_getAndAddInt:
114     if (!VM_Version::supports_atomic_getadd4()) return false;
115     break;
116   case vmIntrinsics::_getAndAddLong:
117     if (!VM_Version::supports_atomic_getadd8()) return false;
118     break;
119   case vmIntrinsics::_getAndSetInt:
120     if (!VM_Version::supports_atomic_getset4()) return false;
121     break;
122   case vmIntrinsics::_getAndSetLong:
123     if (!VM_Version::supports_atomic_getset8()) return false;
124     break;
125   case vmIntrinsics::_getAndSetReference:
126 #ifdef _LP64
127     if (!UseCompressedOops && !VM_Version::supports_atomic_getset8()) return false;
128     if (UseCompressedOops && !VM_Version::supports_atomic_getset4()) return false;
129 #else
130     if (!VM_Version::supports_atomic_getset4()) return false;
131 #endif
132     break;
133   case vmIntrinsics::_onSpinWait:
134     if (!VM_Version::supports_on_spin_wait()) return false;
135     break;
136   case vmIntrinsics::_arraycopy:
137   case vmIntrinsics::_currentTimeMillis:
138   case vmIntrinsics::_nanoTime:
139   case vmIntrinsics::_Reference_get:
140     // Use the intrinsic version of Reference.get() so that the value in
141     // the referent field can be registered by the G1 pre-barrier code.
142     // Also to prevent commoning reads from this field across safepoint
143     // since GC can change its value.
144   case vmIntrinsics::_loadFence:
145   case vmIntrinsics::_storeFence:
146   case vmIntrinsics::_fullFence:
147   case vmIntrinsics::_floatToRawIntBits:
148   case vmIntrinsics::_intBitsToFloat:
149   case vmIntrinsics::_doubleToRawLongBits:
150   case vmIntrinsics::_longBitsToDouble:
151   case vmIntrinsics::_getClass:
152   case vmIntrinsics::_isInstance:
153   case vmIntrinsics::_isPrimitive:
154   case vmIntrinsics::_currentThread:
155   case vmIntrinsics::_dabs:
156   case vmIntrinsics::_dsqrt:
157   case vmIntrinsics::_dsin:
158   case vmIntrinsics::_dcos:
159   case vmIntrinsics::_dtan:
160   case vmIntrinsics::_dlog:
161   case vmIntrinsics::_dlog10:
162   case vmIntrinsics::_dexp:
163   case vmIntrinsics::_dpow:
164   case vmIntrinsics::_fmaD:
165   case vmIntrinsics::_fmaF:
166   case vmIntrinsics::_getReference:
167   case vmIntrinsics::_getBoolean:
168   case vmIntrinsics::_getByte:
169   case vmIntrinsics::_getShort:
170   case vmIntrinsics::_getChar:
171   case vmIntrinsics::_getInt:
172   case vmIntrinsics::_getLong:
173   case vmIntrinsics::_getFloat:
174   case vmIntrinsics::_getDouble:
175   case vmIntrinsics::_putReference:
176   case vmIntrinsics::_putBoolean:
177   case vmIntrinsics::_putByte:
178   case vmIntrinsics::_putShort:
179   case vmIntrinsics::_putChar:
180   case vmIntrinsics::_putInt:
181   case vmIntrinsics::_putLong:
182   case vmIntrinsics::_putFloat:
183   case vmIntrinsics::_putDouble:
184   case vmIntrinsics::_getReferenceVolatile:
185   case vmIntrinsics::_getBooleanVolatile:
186   case vmIntrinsics::_getByteVolatile:
187   case vmIntrinsics::_getShortVolatile:
188   case vmIntrinsics::_getCharVolatile:
189   case vmIntrinsics::_getIntVolatile:
190   case vmIntrinsics::_getLongVolatile:
191   case vmIntrinsics::_getFloatVolatile:
192   case vmIntrinsics::_getDoubleVolatile:
193   case vmIntrinsics::_putReferenceVolatile:
194   case vmIntrinsics::_putBooleanVolatile:
195   case vmIntrinsics::_putByteVolatile:
196   case vmIntrinsics::_putShortVolatile:
197   case vmIntrinsics::_putCharVolatile:
198   case vmIntrinsics::_putIntVolatile:
199   case vmIntrinsics::_putLongVolatile:
200   case vmIntrinsics::_putFloatVolatile:
201   case vmIntrinsics::_putDoubleVolatile:
202   case vmIntrinsics::_getShortUnaligned:
203   case vmIntrinsics::_getCharUnaligned:
204   case vmIntrinsics::_getIntUnaligned:
205   case vmIntrinsics::_getLongUnaligned:
206   case vmIntrinsics::_putShortUnaligned:
207   case vmIntrinsics::_putCharUnaligned:
208   case vmIntrinsics::_putIntUnaligned:
209   case vmIntrinsics::_putLongUnaligned:
210   case vmIntrinsics::_checkIndex:
211   case vmIntrinsics::_updateCRC32:
212   case vmIntrinsics::_updateBytesCRC32:
213   case vmIntrinsics::_updateByteBufferCRC32:
214 #if defined(S390) || defined(PPC64) || defined(AARCH64)
215   case vmIntrinsics::_updateBytesCRC32C:
216   case vmIntrinsics::_updateDirectByteBufferCRC32C:
217 #endif
218   case vmIntrinsics::_vectorizedMismatch:
219   case vmIntrinsics::_compareAndSetInt:
220   case vmIntrinsics::_compareAndSetReference:
221   case vmIntrinsics::_getCharStringU:
222   case vmIntrinsics::_putCharStringU:
223 #ifdef JFR_HAVE_INTRINSICS
224   case vmIntrinsics::_counterTime:
225   case vmIntrinsics::_getEventWriter:
226 #if defined(_LP64) || !defined(TRACE_ID_SHIFT)
227   case vmIntrinsics::_getClassId:
228 #endif
229 #endif
230     break;
231   case vmIntrinsics::_getObjectSize:
232     break;
233   default:
234     return false; // Intrinsics not on the previous list are not available.
235   }
236 
237   return true;
238 }
239 
compile_method(ciEnv * env,ciMethod * method,int entry_bci,bool install_code,DirectiveSet * directive)240 void Compiler::compile_method(ciEnv* env, ciMethod* method, int entry_bci, bool install_code, DirectiveSet* directive) {
241   BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
242   assert(buffer_blob != NULL, "Must exist");
243   // invoke compilation
244   {
245     // We are nested here because we need for the destructor
246     // of Compilation to occur before we release the any
247     // competing compiler thread
248     ResourceMark rm;
249     Compilation c(this, env, method, entry_bci, buffer_blob, install_code, directive);
250   }
251 }
252 
253 
print_timers()254 void Compiler::print_timers() {
255   Compilation::print_timers();
256 }
257